hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4615a64a6186fae886d04ac19288304aa7f07a0f
| 12,878
|
py
|
Python
|
src/dl/models/decoders/long_skips/unetpp_cat_skip.py
|
okunator/Dippa
|
dcbb7056511dd6f66bcc7b095716c385d0b0a8bb
|
[
"MIT"
] | 13
|
2021-01-25T07:47:03.000Z
|
2022-01-20T16:02:51.000Z
|
src/dl/models/decoders/long_skips/unetpp_cat_skip.py
|
okunator/Dippa
|
dcbb7056511dd6f66bcc7b095716c385d0b0a8bb
|
[
"MIT"
] | 1
|
2022-02-12T15:03:23.000Z
|
2022-02-12T15:03:23.000Z
|
src/dl/models/decoders/long_skips/unetpp_cat_skip.py
|
okunator/Dippa
|
dcbb7056511dd6f66bcc7b095716c385d0b0a8bb
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from typing import List, Tuple
from .. import MultiBlockBasic
from ...modules import FixedUnpool
class CatBlock(nn.ModuleDict):
def __init__(self) -> None:
"""
Cat merge block for all the skip connections in unet++
"""
super(CatBlock, self).__init__()
def forward(
self,
prev_feat: torch.Tensor,
skips: List[torch.Tensor]
) -> torch.Tensor:
"""
Args:
------------
x (torch.Tensor):
input from the previous decoder layer
skips (List[torch.Tensor]):
all the features from the encoder
idx (int):
index for the for the feature from the encoder
"""
pooled_skips = [skip for skip in skips]
pooled_skips.append(prev_feat)
prev_feat = torch.cat(pooled_skips, dim=1)
return prev_feat
class UnetppCatSkipBlock(nn.Module):
def __init__(
self,
skip_channel_list: List[int],
skip_index: int,
batch_norm: str="bn",
activation: str="relu",
weight_standardize: bool=False,
preactivate: bool=False,
n_conv_blocks: int=1,
**kwargs) -> None:
"""
Unet++ skip block for one level in the decoder
https://arxiv.org/abs/1807.10165
Supports concatenation merge policy
Args:
---------
skip_channel_list (List[int]):
List of the number of channels in each of the encoder
skip tensors.
skip_index (int):
index of the current skip channel in skip_channels_list.
batch_norm (str, default="bn"):
Normalization method. One of: "bn", "bcn", None
activation (str, default="relu"):
Activation method. One of: "relu", "swish". "mish"
weight_standardize (bool, default=False):
If True, perform weight standardization
preactivate (bool, default=False)
If True, normalization and activation are applied before
convolution
n_conv_blocks (int, default=2):
Number of basic (bn->relu->conv)-blocks inside one
multiconv block
"""
super(UnetppCatSkipBlock, self).__init__()
# ignore last channels where skips are not applied
skip_channel_list = skip_channel_list[:-1]
if skip_index < len(skip_channel_list):
# sub block name index
sub_block_idx0 = len(skip_channel_list) - (skip_index + 1)
self.ups = nn.ModuleDict()
self.skips = nn.ModuleDict()
self.conv_blocks = nn.ModuleDict()
# init encoder feat map channel_pool if reduce_param = True
current_skip_chl = skip_channel_list[skip_index]
conv_in_chl = current_skip_chl
for i in range(skip_index):
# set the enc channel num
prev_enc_chl = current_skip_chl
if i == 0:
prev_enc_chl = skip_channel_list[skip_index - 1]
# up block for the deeper feature maps
self.ups[f"up{i}"] = FixedUnpool()
# merge blocks for the feature maps in the sub network
self.skips[f"sub_skip{i + 1}"] = CatBlock()
# conv blocks for the feature maps in the sub network
conv_in_chl += prev_enc_chl
self.conv_blocks[f"x_{sub_block_idx0}_{i+1}"] = MultiBlockBasic(
in_channels=conv_in_chl,
out_channels=current_skip_chl,
n_blocks=n_conv_blocks,
batch_norm=batch_norm,
activation=activation,
weight_standardize=weight_standardize,
preactivate=preactivate
)
# Merge all the feature maps before the final conv in the decoder
self.final_merge = CatBlock()
def forward(
self,
x: torch.Tensor,
idx: int,
skips: Tuple[torch.Tensor],
extra_skips: Tuple[torch.Tensor]=None,
**kwargs
) -> List[torch.Tensor]:
"""
Args:
----------
x (torch.Tensor):
Input tensor. Shape (B, C, H, W).
idx (int, default=None):
runnning index used to get the right skip tensor(s) from
the skips tuple for the skip connection.
skips (Tuple[torch.Tensor]):
Tuple of tensors generated from consecutive encoder
blocks. Shapes (B, C, H, W).
extra_skips (Tuple[torch.Tensor], default=None):
Tuple of tensors generated in the previous layers sub
networks. In the paper, these are the middle blocks in
the architecture schema
Returns:
----------
A Tuple of tensors out tensors: First return tensor is the
decoder branch output the second is a list of subnetwork
tensors that are needed in the next layer.
"""
sub_network_tensors = None
if idx < len(skips):
current_skip = skips[idx]
all_skips = [current_skip]
for i, (up, skip, conv) in enumerate(
zip(
self.ups.values(),
self.skips.values(),
self.conv_blocks.values()
)
):
prev_feat = up(extra_skips[i])
sub_block = skip(prev_feat, all_skips[::-1])
sub_block = conv(sub_block)
all_skips.append(sub_block)
x = self.final_merge(x, all_skips)
sub_network_tensors = all_skips
return x, sub_network_tensors
class UnetppCatSkipBlockLight(nn.Module):
def __init__(
self,
in_channels: int,
out_channel_list: List[int],
skip_channel_list: List[int],
skip_index: int,
batch_norm: str="bn",
activation: str="relu",
weight_standardize: bool=False,
preactivate: bool=False,
n_conv_blocks: int=1,
**kwargs
) -> None:
"""
Unet++ skip block for one level in the decoder
https://arxiv.org/abs/1807.10165
Supports concatenation merge policy This is a light version
which has lower memory footprint. This is done similarly to
the unet3+ by basically just outputting a small number of
feature maps at every conv block which at the end sum up
to the number of output channels of the decoder block
(after concatenation)
Args:
---------
in_channels (int):
The number of channels coming in from the previous head
decoder branch
out_channel_list (List[int]):
List of the number of output channels in the decoder
output tensors
skip_channel_list (List[int]):
List of the number of channels in each of the encoder
skip tensors.
skip_index (int):
index of the current skip channel in skip_channels_list.
batch_norm (str, default="bn"):
Normalization method. One of: "bn", "bcn", None
activation (str, default="relu"):
Activation method. One of: "relu", "swish". "mish"
weight_standardize (bool, default=False):
If True, perform weight standardization
preactivate (bool, default=False)
If True, normalization and activation are applied before
convolution
n_conv_blocks (int, default=2):
Number of basic (bn->relu->conv)-blocks inside one
residual multiconv block
"""
super(UnetppCatSkipBlockLight, self).__init__()
# ignore last channels where skips are not applied
skip_channel_list = skip_channel_list[:-1]
if skip_index < len(skip_channel_list):
# sub block name index
sub_block_idx0 = len(skip_channel_list) - (skip_index + 1)
self.ups = nn.ModuleDict()
self.skips = nn.ModuleDict()
self.conv_blocks = nn.ModuleDict()
# init encoder feat map channel_pool if reduce_param = True
reminder = 0
current_skip_chl = skip_channel_list[skip_index]
cat_channels, reminder = divmod(
out_channel_list[skip_index], (skip_index + 2)
)
# pre conv for the encoder skip
self.pre_conv = MultiBlockBasic(
in_channels=current_skip_chl,
out_channels=cat_channels,
n_blocks=n_conv_blocks,
batch_norm=batch_norm,
activation=activation,
weight_standardize=weight_standardize,
preactivate=preactivate
)
# post conv for the in decoder feat map
self.post_conv = MultiBlockBasic(
in_channels=in_channels,
out_channels=cat_channels + reminder,
n_blocks=n_conv_blocks,
batch_norm=batch_norm,
activation=activation,
weight_standardize=weight_standardize,
preactivate=preactivate
)
conv_in_chl = out_channel_list[skip_index - 1] // (skip_index + 1)
for i in range(skip_index):
prev_enc_chl = cat_channels
# up block for the deeper feature maps
self.ups[f"up{i}"] = FixedUnpool()
# merge blocks for the feature maps in the sub network
self.skips[f"sub_skip{i + 1}"] = CatBlock()
# conv blocks for the feature maps in the sub network
conv_in_chl += prev_enc_chl
self.conv_blocks[f"x_{sub_block_idx0}_{i + 1}"] = MultiBlockBasic(
in_channels=conv_in_chl,
out_channels=cat_channels,
n_blocks=n_conv_blocks,
batch_norm=batch_norm,
activation=activation,
weight_standardize=weight_standardize,
preactivate=preactivate
)
# Merge all the feature maps before the final conv in the decoder
self.final_merge = CatBlock()
def forward(
self,
x: torch.Tensor,
idx: int,
skips: Tuple[torch.Tensor],
extra_skips: Tuple[torch.Tensor]=None,
**kwargs
) -> List[torch.Tensor]:
"""
Args:
----------
x (torch.Tensor):
Input tensor. Shape (B, C, H, W).
idx (int, default=None):
runnning index used to get the right skip tensor(s) from
the skips tuple for the skip connection.
skips (Tuple[torch.Tensor]):
Tuple of tensors generated from consecutive encoder
blocks. Shapes (B, C, H, W).
extra_skips (Tuple[torch.Tensor], default=None):
Tuple of tensors generated in the previous layers sub
networks. In the paper, these are the middle blocks in
the architecture schema
Returns:
----------
A Tuple of tensors out tensors: First return tensor is the
decoder branch output the second is a list of subnetwork
tensors that are needed in the next layer.
"""
sub_network_tensors = None
if idx < len(skips):
current_skip = skips[idx]
current_skip = self.pre_conv(current_skip)
all_skips = [current_skip]
for i, (up, skip, conv) in enumerate(
zip(
self.ups.values(),
self.skips.values(),
self.conv_blocks.values()
)
):
prev_feat = up(extra_skips[i])
sub_block = skip(prev_feat, all_skips[::-1])
sub_block = conv(sub_block)
all_skips.append(sub_block)
x = self.post_conv(x)
x = self.final_merge(x, all_skips)
sub_network_tensors = all_skips
return x, sub_network_tensors
| 37.005747
| 82
| 0.532458
| 1,411
| 12,878
| 4.67399
| 0.140326
| 0.031691
| 0.034117
| 0.025474
| 0.798484
| 0.776042
| 0.765732
| 0.760879
| 0.749356
| 0.744049
| 0
| 0.005624
| 0.39253
| 12,878
| 348
| 83
| 37.005747
| 0.837403
| 0.374748
| 0
| 0.720238
| 0
| 0
| 0.014338
| 0.006326
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.029762
| 0
| 0.10119
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1ce57468792f92b8e2ec5325a434adcf83005024
| 37
|
py
|
Python
|
structdc/_meta/__init__.py
|
KnifeMaster007/structdc
|
79bfb00527f1db7347091f220bb4b2df2da3a1b3
|
[
"MIT"
] | null | null | null |
structdc/_meta/__init__.py
|
KnifeMaster007/structdc
|
79bfb00527f1db7347091f220bb4b2df2da3a1b3
|
[
"MIT"
] | null | null | null |
structdc/_meta/__init__.py
|
KnifeMaster007/structdc
|
79bfb00527f1db7347091f220bb4b2df2da3a1b3
|
[
"MIT"
] | null | null | null |
from .structmixin import StructMixin
| 18.5
| 36
| 0.864865
| 4
| 37
| 8
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e801d6086980978f1d616ea1a2f5f181926e8b0a
| 136
|
py
|
Python
|
reset.py
|
umireon/ptc-streaming-utils
|
3dd8c75ea52d2c10fc374ef211a80abf27e6a92b
|
[
"MIT"
] | null | null | null |
reset.py
|
umireon/ptc-streaming-utils
|
3dd8c75ea52d2c10fc374ef211a80abf27e6a92b
|
[
"MIT"
] | null | null | null |
reset.py
|
umireon/ptc-streaming-utils
|
3dd8c75ea52d2c10fc374ef211a80abf27e6a92b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from applib import read_ini, reset
reset(read_ini('left'), 6)
reset(read_ini('right'), 6)
reset(read_ini('hand'), 7)
| 22.666667
| 34
| 0.713235
| 24
| 136
| 3.875
| 0.583333
| 0.301075
| 0.387097
| 0.27957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 0.088235
| 136
| 5
| 35
| 27.2
| 0.717742
| 0.125
| 0
| 0
| 0
| 0
| 0.110169
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e80d8beb3b54a3b3b9cde8ab18903cf0bf0aae62
| 130
|
py
|
Python
|
invoices/models/__init__.py
|
jdevera/pythoncanarias_web
|
465e8b0a054726e29b1029f1dffe11f913e40bcc
|
[
"MIT"
] | null | null | null |
invoices/models/__init__.py
|
jdevera/pythoncanarias_web
|
465e8b0a054726e29b1029f1dffe11f913e40bcc
|
[
"MIT"
] | null | null | null |
invoices/models/__init__.py
|
jdevera/pythoncanarias_web
|
465e8b0a054726e29b1029f1dffe11f913e40bcc
|
[
"MIT"
] | null | null | null |
from invoices.models.client import Client
from invoices.models.concept import Concept
from invoices.models.invoice import Invoice
| 32.5
| 43
| 0.861538
| 18
| 130
| 6.222222
| 0.388889
| 0.321429
| 0.482143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092308
| 130
| 3
| 44
| 43.333333
| 0.949153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
08ff003acd90e860c3a7ab4222d6a85ce77577ff
| 28
|
py
|
Python
|
src/ip2whois/__init__.py
|
ip2whois/ip2whois-python
|
79af46c585e3d1e48135b2b985ce51d9a83fced5
|
[
"MIT"
] | 1
|
2021-11-22T02:36:04.000Z
|
2021-11-22T02:36:04.000Z
|
src/ip2whois/__init__.py
|
ip2whois/ip2whois-python
|
79af46c585e3d1e48135b2b985ce51d9a83fced5
|
[
"MIT"
] | null | null | null |
src/ip2whois/__init__.py
|
ip2whois/ip2whois-python
|
79af46c585e3d1e48135b2b985ce51d9a83fced5
|
[
"MIT"
] | null | null | null |
from ip2whois.api import Api
| 28
| 28
| 0.857143
| 5
| 28
| 4.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.107143
| 28
| 1
| 28
| 28
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1c0aeb9ff15a0c1f1b4b025c2a79c72dfb77f9aa
| 26
|
py
|
Python
|
spr/core/__init__.py
|
aksr-aashish/spr
|
d63068f04fae262d351403103c8155af3f846cc9
|
[
"MIT"
] | 35
|
2021-07-16T01:27:25.000Z
|
2022-03-31T03:44:56.000Z
|
spr/core/__init__.py
|
aksr-aashish/spr
|
d63068f04fae262d351403103c8155af3f846cc9
|
[
"MIT"
] | 3
|
2021-09-02T11:18:07.000Z
|
2022-03-31T12:51:38.000Z
|
spr/core/__init__.py
|
aksr-aashish/spr
|
d63068f04fae262d351403103c8155af3f846cc9
|
[
"MIT"
] | 88
|
2021-07-16T02:21:53.000Z
|
2022-03-30T06:08:42.000Z
|
from .keyboard import ikb
| 13
| 25
| 0.807692
| 4
| 26
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1c4e2287d7223d5e5b97fec35130a31221dd19e7
| 30
|
py
|
Python
|
modytest/__init__.py
|
mohmoniem281/modytest
|
419eec1d5e85d2a2381ecd287caafbeabc57f34c
|
[
"MIT"
] | null | null | null |
modytest/__init__.py
|
mohmoniem281/modytest
|
419eec1d5e85d2a2381ecd287caafbeabc57f34c
|
[
"MIT"
] | null | null | null |
modytest/__init__.py
|
mohmoniem281/modytest
|
419eec1d5e85d2a2381ecd287caafbeabc57f34c
|
[
"MIT"
] | null | null | null |
from modytest.code import code
| 30
| 30
| 0.866667
| 5
| 30
| 5.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1c5212feea36c2efc19fa932cd43f916bcd82d98
| 54
|
py
|
Python
|
src/supervisor_console/__init__.py
|
MrWork/supervisor-console
|
52f263a03d0a2e8dbf0cd986547b05d6d19ef31a
|
[
"BSD-3-Clause"
] | 1
|
2020-10-05T16:16:10.000Z
|
2020-10-05T16:16:10.000Z
|
src/supervisor_console/__init__.py
|
MrWork/supervisor-console
|
52f263a03d0a2e8dbf0cd986547b05d6d19ef31a
|
[
"BSD-3-Clause"
] | null | null | null |
src/supervisor_console/__init__.py
|
MrWork/supervisor-console
|
52f263a03d0a2e8dbf0cd986547b05d6d19ef31a
|
[
"BSD-3-Clause"
] | 1
|
2022-01-12T16:38:48.000Z
|
2022-01-12T16:38:48.000Z
|
from .console import ProcessCommunicationEventHandler
| 27
| 53
| 0.907407
| 4
| 54
| 12.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 54
| 1
| 54
| 54
| 0.98
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1c89fcfc4af29f383c025608f6256dc4d8748208
| 35
|
py
|
Python
|
demon/__init__.py
|
iriszero/DeMoN
|
12a314d0c57908fd95d0e9c5a8ee17d3d122fb47
|
[
"Apache-2.0"
] | 1
|
2020-09-15T07:42:47.000Z
|
2020-09-15T07:42:47.000Z
|
demon/__init__.py
|
iriszero/DeMoN
|
12a314d0c57908fd95d0e9c5a8ee17d3d122fb47
|
[
"Apache-2.0"
] | null | null | null |
demon/__init__.py
|
iriszero/DeMoN
|
12a314d0c57908fd95d0e9c5a8ee17d3d122fb47
|
[
"Apache-2.0"
] | 1
|
2020-09-15T08:30:33.000Z
|
2020-09-15T08:30:33.000Z
|
from . import model, test, training
| 35
| 35
| 0.771429
| 5
| 35
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 35
| 1
| 35
| 35
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
98bc55731eef1b9921ea569bf4133856ebc22e17
| 131
|
py
|
Python
|
sensors/Wind_Sensor.py
|
Atech1/Weather_Station
|
817a05083a13823316048103c6474e9a257a2792
|
[
"MIT"
] | null | null | null |
sensors/Wind_Sensor.py
|
Atech1/Weather_Station
|
817a05083a13823316048103c6474e9a257a2792
|
[
"MIT"
] | null | null | null |
sensors/Wind_Sensor.py
|
Atech1/Weather_Station
|
817a05083a13823316048103c6474e9a257a2792
|
[
"MIT"
] | null | null | null |
# this is a base class
class WindSensor(object):
def __init__(self):
return
def read(self):
return 0
| 14.555556
| 25
| 0.580153
| 17
| 131
| 4.235294
| 0.764706
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 0.343511
| 131
| 8
| 26
| 16.375
| 0.825581
| 0.152672
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
98d47fafb8cc131c3148c6290416b658ced69625
| 205
|
py
|
Python
|
blog/views.py
|
vipulgupta2048/django-girls
|
5012b9b35737a1ab5e0c28a52be82dc80200848c
|
[
"MIT"
] | null | null | null |
blog/views.py
|
vipulgupta2048/django-girls
|
5012b9b35737a1ab5e0c28a52be82dc80200848c
|
[
"MIT"
] | null | null | null |
blog/views.py
|
vipulgupta2048/django-girls
|
5012b9b35737a1ab5e0c28a52be82dc80200848c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
# Create your views here.
def post_list(request):
return render(request, 'blog/post_list.html', {})
| 25.625
| 53
| 0.736585
| 28
| 205
| 5.142857
| 0.785714
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005682
| 0.141463
| 205
| 7
| 54
| 29.285714
| 0.8125
| 0.219512
| 0
| 0
| 0
| 0
| 0.121019
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
c72e85e6166e1ea3abab1f203358708366df40e4
| 43,449
|
py
|
Python
|
tests/test_zone_processor.py
|
bxparks/AceTimePython
|
d4eae67aa2fce307a3a683ca6e85e488383ec471
|
[
"MIT"
] | 1
|
2021-09-09T06:31:41.000Z
|
2021-09-09T06:31:41.000Z
|
tests/test_zone_processor.py
|
bxparks/AceTimePython
|
d4eae67aa2fce307a3a683ca6e85e488383ec471
|
[
"MIT"
] | null | null | null |
tests/test_zone_processor.py
|
bxparks/AceTimePython
|
d4eae67aa2fce307a3a683ca6e85e488383ec471
|
[
"MIT"
] | null | null | null |
# Copyright 2018 Brian T. Park
#
# MIT License
from typing import cast
import unittest
from datetime import datetime
from acetime.zonedb import zone_infos
from acetime.zone_processor import YearMonthTuple
from acetime.zone_processor import DateTuple
from acetime.zone_processor import Transition
from acetime.zone_processor import MatchingEra
from acetime.zone_processor import ZoneProcessor
from acetime.zone_processor import _get_interior_years
from acetime.zone_processor import _compare_era_to_year_month
from acetime.zone_processor import _era_overlaps_interval
from acetime.zone_processor import _subtract_date_tuple
from acetime.zone_processor import _normalize_date_tuple
from acetime.zone_processor import _expand_date_tuple
from acetime.zone_processor import _compare_transition_to_match_fuzzy
from acetime.zone_processor import _compare_transition_to_match
from acetime.zone_processor import _fix_transition_times
from acetime.zone_processor import MATCH_STATUS_PRIOR
from acetime.zone_processor import MATCH_STATUS_EXACT_MATCH
from acetime.zone_processor import MATCH_STATUS_WITHIN_MATCH
from acetime.zone_processor import MATCH_STATUS_FAR_FUTURE
from acetime.zonedb_types import ZonePolicy
from acetime.zonedb_types import ZoneEra
class TestZoneProcessorHelperMethods(unittest.TestCase):
def test_get_interior_years(self) -> None:
self.assertEqual([2, 3],
sorted(_get_interior_years(1, 4, 2, 3)))
self.assertEqual([2, 3],
sorted(_get_interior_years(0, 4, 2, 3)))
self.assertEqual([],
sorted(_get_interior_years(4, 5, 2, 3)))
self.assertEqual([],
sorted(_get_interior_years(0, 2, 5, 6)))
self.assertEqual([5],
sorted(_get_interior_years(0, 5, 5, 6)))
self.assertEqual([0, 1, 2],
sorted(_get_interior_years(0, 2, 0, 2)))
self.assertEqual([2, 3, 4],
sorted(_get_interior_years(0, 4, 2, 4)))
def test_expand_date_tuple(self) -> None:
self.assertEqual((DateTuple(2000, 1, 30, 10800, 'w'),
DateTuple(2000, 1, 30, 7200, 's'),
DateTuple(2000, 1, 30, 0, 'u')),
_expand_date_tuple(
DateTuple(2000, 1, 30, 10800, 'w'),
offset_seconds=7200,
delta_seconds=3600))
self.assertEqual((DateTuple(2000, 1, 30, 10800, 'w'),
DateTuple(2000, 1, 30, 7200, 's'),
DateTuple(2000, 1, 30, 0, 'u')),
_expand_date_tuple(
DateTuple(2000, 1, 30, 7200, 's'),
offset_seconds=7200,
delta_seconds=3600))
self.assertEqual((DateTuple(2000, 1, 30, 10800, 'w'),
DateTuple(2000, 1, 30, 7200, 's'),
DateTuple(2000, 1, 30, 0, 'u')),
_expand_date_tuple(
DateTuple(2000, 1, 30, 0, 'u'),
offset_seconds=7200,
delta_seconds=3600))
def test_normalize_date_tuple(self) -> None:
self.assertEqual(
DateTuple(2000, 2, 1, 0, 'w'),
_normalize_date_tuple(DateTuple(2000, 2, 1, 0, 'w')))
self.assertEqual(
DateTuple(2000, 2, 1, 0, 's'),
_normalize_date_tuple(DateTuple(2000, 1, 31, 24 * 3600, 's')))
self.assertEqual(
DateTuple(2000, 2, 29, 23 * 3600, 'u'),
_normalize_date_tuple(DateTuple(2000, 3, 1, -3600, 'u')))
def test_subtract_date_tuple(self) -> None:
self.assertEqual(
-1,
_subtract_date_tuple(
DateTuple(2000, 1, 1, 43, 'w'),
DateTuple(2000, 1, 1, 44, 'w'),
)
)
self.assertEqual(
24 * 3600 - 1,
_subtract_date_tuple(
DateTuple(2000, 1, 2, 43, 'w'),
DateTuple(2000, 1, 1, 44, 'w'),
)
)
self.assertEqual(
-31 * 24 * 3600 + 24 * 3600 - 1,
_subtract_date_tuple(
DateTuple(2000, 1, 2, 43, 'w'),
DateTuple(2000, 2, 1, 44, 'w'),
)
)
def test_compare_era_to_year_month(self) -> None:
era = ZoneEra({
'offset_seconds': 0,
'zone_policy': '-',
'rules_delta_seconds': 0,
'format': 'EST',
'until_year': 2000,
'until_month': 3,
'until_day': 1,
'until_seconds': 0,
'until_time_suffix': 'w',
})
self.assertEqual(-1, _compare_era_to_year_month(era, 2000, 4))
self.assertEqual(0, _compare_era_to_year_month(era, 2000, 3))
self.assertEqual(1, _compare_era_to_year_month(era, 2000, 2))
def test_era_overlaps_interval(self) -> None:
# until = 2000-01-01T00:00w
prev_era = ZoneEra({
'offset_seconds': 0,
'zone_policy': '-',
'rules_delta_seconds': 0,
'format': 'EST',
'until_year': 2000,
'until_month': 1,
'until_day': 1,
'until_seconds': 0,
'until_time_suffix': 'w',
})
# until = 2000-03-01T00:00w
era = ZoneEra({
'offset_seconds': 0,
'zone_policy': '-',
'rules_delta_seconds': 0,
'format': 'EST',
'until_year': 2000,
'until_month': 3,
'until_day': 1,
'until_seconds': 0,
'until_time_suffix': 'w',
})
self.assertFalse((_era_overlaps_interval(
prev_era=prev_era,
era=era,
start_ym=YearMonthTuple(1999, 1),
until_ym=YearMonthTuple(2000, 1),
)))
self.assertFalse((_era_overlaps_interval(
prev_era=prev_era,
era=era,
start_ym=YearMonthTuple(2000, 3),
until_ym=YearMonthTuple(2000, 12),
)))
self.assertTrue((_era_overlaps_interval(
prev_era=prev_era,
era=era,
start_ym=YearMonthTuple(2000, 1),
until_ym=YearMonthTuple(2000, 3),
)))
self.assertTrue((_era_overlaps_interval(
prev_era=prev_era,
era=era,
start_ym=YearMonthTuple(1999, 12),
until_ym=YearMonthTuple(2000, 2),
)))
class TestCompareTransitionToMatch(unittest.TestCase):
# until 2001-03-01T00:00
ZONE_ERA1: ZoneEra = {
'offset_seconds': 0,
'zone_policy': '-',
'rules_delta_seconds': 0,
'format': 'EST',
'until_year': 2001,
'until_month': 3,
'until_day': 1,
'until_seconds': 0,
'until_time_suffix': 'w',
}
# until 2002-03-01T00:00
ZONE_ERA2: ZoneEra = {
'offset_seconds': 0,
'zone_policy': '-',
'rules_delta_seconds': 0,
'format': 'EST',
'until_year': 2002,
'until_month': 3,
'until_day': 1,
'until_seconds': 0,
'until_time_suffix': 'w',
}
def test_compare_exact(self) -> None:
prev_match = MatchingEra(
start_date_time=DateTuple(2000, 12, 1, 0, 'w'),
until_date_time=DateTuple(2001, 3, 1, 0, 'w'),
zone_era=self.ZONE_ERA1,
)
prev_match.last_transition = Transition(
matching_era=prev_match,
transition_time=DateTuple(2001, 2, 1, 0, 'w')
)
match = MatchingEra(
start_date_time=DateTuple(2001, 3, 1, 0, 'w'),
until_date_time=DateTuple(2001, 9, 1, 0, 'w'),
zone_era=self.ZONE_ERA2,
)
match.prev_match = prev_match
# prior to MatchingEra
transition = Transition(
matching_era=match,
transition_time=DateTuple(2000, 1, 2, 0, 'w')
)
_fix_transition_times([transition])
self.assertEqual(
MATCH_STATUS_PRIOR,
_compare_transition_to_match(transition, match),
)
# exactly at start_date_time of MatchingEra
transition = Transition(
matching_era=match,
transition_time=DateTuple(2001, 3, 1, 0, 'w')
)
_fix_transition_times([transition])
self.assertEqual(
MATCH_STATUS_EXACT_MATCH,
_compare_transition_to_match(transition, match),
)
# inside current MatchingEra
transition = Transition(
matching_era=match,
transition_time=DateTuple(2001, 4, 1, 0, 'w')
)
_fix_transition_times([transition])
self.assertEqual(
MATCH_STATUS_WITHIN_MATCH,
_compare_transition_to_match(transition, match),
)
# after MatchingEra
transition = Transition(
matching_era=match,
transition_time=DateTuple(2001, 10, 1, 0, 'w')
)
_fix_transition_times([transition])
self.assertEqual(
MATCH_STATUS_FAR_FUTURE,
_compare_transition_to_match(transition, match),
)
ZONE_ERA: ZoneEra = {
'offset_seconds': 0,
'zone_policy': '-',
'rules_delta_seconds': 0,
'format': 'EST',
'until_year': 2000,
'until_month': 3,
'until_day': 1,
'until_seconds': 0,
'until_time_suffix': 'w',
}
def test_compare_fuzzy(self) -> None:
match = MatchingEra(
start_date_time=DateTuple(2000, 1, 1, 0, 'w'),
until_date_time=DateTuple(2001, 1, 1, 0, 'w'),
zone_era=self.ZONE_ERA,
)
transition = Transition(
matching_era=match,
transition_time=DateTuple(1999, 11, 1, 0, 'w')
)
self.assertEqual(-1,
_compare_transition_to_match_fuzzy(transition, match))
transition = Transition(
matching_era=match,
transition_time=DateTuple(1999, 12, 1, 0, 'w')
)
self.assertEqual(1,
_compare_transition_to_match_fuzzy(transition, match))
transition = Transition(
matching_era=match,
transition_time=DateTuple(2000, 1, 1, 0, 'w')
)
self.assertEqual(1,
_compare_transition_to_match_fuzzy(transition, match))
transition = Transition(
matching_era=match,
transition_time=DateTuple(2001, 1, 1, 0, 'w')
)
self.assertEqual(1,
_compare_transition_to_match_fuzzy(transition, match))
transition = Transition(
matching_era=match,
transition_time=DateTuple(2001, 2, 1, 0, 'w')
)
self.assertEqual(1,
_compare_transition_to_match_fuzzy(transition, match))
transition = Transition(
matching_era=match,
transition_time=DateTuple(2001, 3, 1, 0, 'w')
)
self.assertEqual(2,
_compare_transition_to_match_fuzzy(transition, match))
class TestZoneProcessorMatchesAndTransitions(unittest.TestCase):
def test_Los_Angeles(self) -> None:
"""America/Los_Angela uses a simple US rule.
"""
zone_processor = ZoneProcessor(zone_infos.ZONE_INFO_America_Los_Angeles)
zone_processor.init_for_year(2000)
matches = zone_processor.matches
self.assertEqual(1, len(matches))
self.assertEqual(
DateTuple(1999, 12, 1, 0, 'w'), matches[0].start_date_time)
self.assertEqual(
DateTuple(2001, 2, 1, 0, 'w'), matches[0].until_date_time)
zone_policy = cast(ZonePolicy, matches[0].zone_era['zone_policy'])
self.assertEqual('US', zone_policy['name'])
transitions = zone_processor.transitions
self.assertEqual(3, len(transitions))
self.assertEqual(
DateTuple(1999, 12, 1, 0, 'w'), transitions[0].start_date_time)
self.assertEqual(
DateTuple(2000, 4, 2, 2 * 3600, 'w'),
transitions[0].until_date_time)
self.assertEqual(-8 * 3600, transitions[0].offset_seconds)
self.assertEqual(0, transitions[0].delta_seconds)
self.assertEqual(
DateTuple(2000, 4, 2, 3 * 3600, 'w'),
transitions[1].start_date_time)
self.assertEqual(
DateTuple(2000, 10, 29, 2 * 3600, 'w'),
transitions[1].until_date_time)
self.assertEqual(-8 * 3600, transitions[1].offset_seconds)
self.assertEqual(1 * 3600, transitions[1].delta_seconds)
self.assertEqual(
DateTuple(2000, 10, 29, 1 * 3600, 'w'),
transitions[2].start_date_time)
self.assertEqual(
DateTuple(2001, 2, 1, 0, 'w'), transitions[2].until_date_time)
self.assertEqual(-8 * 3600, transitions[2].offset_seconds)
self.assertEqual(0 * 3600, transitions[2].delta_seconds)
def test_Petersburg(self) -> None:
"""America/Indianapolis/Petersbug moved from central to eastern time in
1977, then switched back in 2006, then switched back again in 2007.
"""
zone_processor = ZoneProcessor(
zone_infos.ZONE_INFO_America_Indiana_Petersburg
)
zone_processor.init_for_year(2006)
matches = zone_processor.matches
self.assertEqual(2, len(matches))
self.assertEqual(
DateTuple(2005, 12, 1, 0, 'w'), matches[0].start_date_time)
self.assertEqual(
DateTuple(2006, 4, 2, 2 * 3600, 'w'), matches[0].until_date_time)
self.assertEqual('-', matches[0].zone_era['zone_policy'])
self.assertEqual(
DateTuple(2006, 4, 2, 2 * 3600, 'w'), matches[1].start_date_time)
self.assertEqual(
DateTuple(2007, 2, 1, 0, 'w'), matches[1].until_date_time)
zone_policy = cast(ZonePolicy, matches[1].zone_era['zone_policy'])
self.assertEqual('US', zone_policy['name'])
transitions = zone_processor.transitions
self.assertEqual(3, len(transitions))
self.assertEqual(
DateTuple(2005, 12, 1, 0, 'w'), transitions[0].start_date_time)
self.assertEqual(
DateTuple(2006, 4, 2, 2 * 3600, 'w'),
transitions[0].until_date_time)
self.assertEqual(-5 * 3600, transitions[0].offset_seconds)
self.assertEqual(0 * 3600, transitions[0].delta_seconds)
self.assertEqual(
DateTuple(2006, 4, 2, 2 * 3600, 'w'),
transitions[1].start_date_time)
self.assertEqual(
DateTuple(2006, 10, 29, 2 * 3600, 'w'),
transitions[1].until_date_time)
self.assertEqual(-6 * 3600, transitions[1].offset_seconds)
self.assertEqual(1 * 3600, transitions[1].delta_seconds)
self.assertEqual(
DateTuple(2006, 10, 29, 1 * 3600, 'w'),
transitions[2].start_date_time)
self.assertEqual(
DateTuple(2007, 2, 1, 0, 'w'), transitions[2].until_date_time)
self.assertEqual(-6 * 3600, transitions[2].offset_seconds)
self.assertEqual(0 * 3600, transitions[2].delta_seconds)
def test_London(self) -> None:
"""Europe/London uses a EU which has a 'u' in the AT field.
"""
zone_processor = ZoneProcessor(zone_infos.ZONE_INFO_Europe_London)
zone_processor.init_for_year(2000)
matches = zone_processor.matches
self.assertEqual(1, len(matches))
self.assertEqual(
DateTuple(1999, 12, 1, 0, 'w'), matches[0].start_date_time)
self.assertEqual(
DateTuple(2001, 2, 1, 0, 'w'), matches[0].until_date_time)
zone_policy = cast(ZonePolicy, matches[0].zone_era['zone_policy'])
self.assertEqual('EU', zone_policy['name'])
transitions = zone_processor.transitions
self.assertEqual(3, len(transitions))
self.assertEqual(
DateTuple(1999, 12, 1, 0, 'w'), transitions[0].start_date_time)
self.assertEqual(
DateTuple(2000, 3, 26, 1 * 3600, 'w'),
transitions[0].until_date_time)
self.assertEqual(0 * 3600, transitions[0].offset_seconds)
self.assertEqual(0 * 3600, transitions[0].delta_seconds)
self.assertEqual(
DateTuple(2000, 3, 26, 2 * 3600, 'w'),
transitions[1].start_date_time)
self.assertEqual(
DateTuple(2000, 10, 29, 2 * 3600, 'w'),
transitions[1].until_date_time)
self.assertEqual(0 * 3600, transitions[1].offset_seconds)
self.assertEqual(1 * 3600, transitions[1].delta_seconds)
self.assertEqual(
DateTuple(2000, 10, 29, 1 * 3600, 'w'),
transitions[2].start_date_time)
self.assertEqual(
DateTuple(2001, 2, 1, 0, 'w'), transitions[2].until_date_time)
self.assertEqual(0 * 3600, transitions[2].offset_seconds)
self.assertEqual(0 * 3600, transitions[2].delta_seconds)
def test_Winnipeg(self) -> None:
"""America/Winnipeg uses 'Rule Winn' until 2006 which has an 's' suffix
in the Rule.AT field.
"""
zone_processor = ZoneProcessor(zone_infos.ZONE_INFO_America_Winnipeg)
zone_processor.init_for_year(2005)
matches = zone_processor.matches
self.assertEqual(2, len(matches))
self.assertEqual(
DateTuple(2004, 12, 1, 0, 'w'), matches[0].start_date_time)
self.assertEqual(
DateTuple(2006, 1, 1, 0 * 3600, 'w'), matches[0].until_date_time)
zone_policy = cast(ZonePolicy, matches[0].zone_era['zone_policy'])
self.assertEqual('Winn', zone_policy['name'])
self.assertEqual(
DateTuple(2006, 1, 1, 0 * 3600, 'w'), matches[1].start_date_time)
self.assertEqual(
DateTuple(2006, 2, 1, 0 * 3600, 'w'), matches[1].until_date_time)
zone_policy = cast(ZonePolicy, matches[1].zone_era['zone_policy'])
self.assertEqual('Canada', zone_policy['name'])
transitions = zone_processor.transitions
self.assertEqual(4, len(transitions))
self.assertEqual(
DateTuple(2004, 12, 1, 0, 'w'), transitions[0].start_date_time)
self.assertEqual(
DateTuple(2005, 4, 3, 2 * 3600, 'w'),
transitions[0].until_date_time)
self.assertEqual(-6 * 3600, transitions[0].offset_seconds)
self.assertEqual(0 * 3600, transitions[0].delta_seconds)
self.assertEqual(
DateTuple(2005, 4, 3, 3 * 3600, 'w'),
transitions[1].start_date_time)
self.assertEqual(
DateTuple(2005, 10, 30, 3 * 3600, 'w'),
transitions[1].until_date_time)
self.assertEqual(-6 * 3600, transitions[1].offset_seconds)
self.assertEqual(1 * 3600, transitions[1].delta_seconds)
self.assertEqual(
DateTuple(2005, 10, 30, 2 * 3600, 'w'),
transitions[2].start_date_time)
self.assertEqual(
DateTuple(2006, 1, 1, 0, 'w'), transitions[2].until_date_time)
self.assertEqual(-6 * 3600, transitions[2].offset_seconds)
self.assertEqual(0 * 3600, transitions[2].delta_seconds)
self.assertEqual(
DateTuple(2006, 1, 1, 0 * 3600, 'w'),
transitions[3].start_date_time)
self.assertEqual(
DateTuple(2006, 2, 1, 0, 'w'), transitions[3].until_date_time)
self.assertEqual(-6 * 3600, transitions[3].offset_seconds)
self.assertEqual(0 * 3600, transitions[3].delta_seconds)
def test_Moscow(self) -> None:
"""Europe/Moscow uses 's' in the Zone UNTIL field.
"""
zone_processor = ZoneProcessor(zone_infos.ZONE_INFO_Europe_Moscow)
zone_processor.init_for_year(2011)
matches = zone_processor.matches
self.assertEqual(2, len(matches))
self.assertEqual(
DateTuple(2010, 12, 1, 0, 'w'), matches[0].start_date_time)
self.assertEqual(
DateTuple(2011, 3, 27, 2 * 3600, 's'), matches[0].until_date_time)
zone_policy = cast(ZonePolicy, matches[0].zone_era['zone_policy'])
self.assertEqual('Russia', zone_policy['name'])
self.assertEqual(
DateTuple(2011, 3, 27, 2 * 3600, 's'), matches[1].start_date_time)
self.assertEqual(
DateTuple(2012, 2, 1, 0, 'w'), matches[1].until_date_time)
self.assertEqual('-', matches[1].zone_era['zone_policy'])
transitions = zone_processor.transitions
self.assertEqual(2, len(transitions))
self.assertEqual(
DateTuple(2010, 12, 1, 0, 'w'), transitions[0].start_date_time)
self.assertEqual(
DateTuple(2011, 3, 27, 2 * 3600, 'w'),
transitions[0].until_date_time)
self.assertEqual(3 * 3600, transitions[0].offset_seconds)
self.assertEqual(0 * 3600, transitions[0].delta_seconds)
self.assertEqual(
DateTuple(2011, 3, 27, 3 * 3600, 'w'),
transitions[1].start_date_time)
self.assertEqual(
DateTuple(2012, 2, 1, 0 * 3600, 'w'),
transitions[1].until_date_time)
self.assertEqual(4 * 3600, transitions[1].offset_seconds)
self.assertEqual(0 * 3600, transitions[1].delta_seconds)
def test_Famagusta(self) -> None:
"""Asia/Famagusta uses 'u' in the Zone UNTIL field.
"""
zone_processor = ZoneProcessor(zone_infos.ZONE_INFO_Asia_Famagusta)
zone_processor.init_for_year(2017)
matches = zone_processor.matches
self.assertEqual(2, len(matches))
self.assertEqual(
DateTuple(2016, 12, 1, 0, 'w'), matches[0].start_date_time)
self.assertEqual(
DateTuple(2017, 10, 29, 1 * 3600, 'u'), matches[0].until_date_time)
self.assertEqual('-', matches[0].zone_era['zone_policy'])
self.assertEqual(
DateTuple(2017, 10, 29, 1 * 3600, 'u'), matches[1].start_date_time)
self.assertEqual(
DateTuple(2018, 2, 1, 0, 'w'), matches[1].until_date_time)
zone_policy = cast(ZonePolicy, matches[1].zone_era['zone_policy'])
self.assertEqual('EUAsia', zone_policy['name'])
transitions = zone_processor.transitions
self.assertEqual(2, len(transitions))
self.assertEqual(
DateTuple(2016, 12, 1, 0, 'w'), transitions[0].start_date_time)
self.assertEqual(
DateTuple(2017, 10, 29, 4 * 3600, 'w'),
transitions[0].until_date_time)
self.assertEqual(3 * 3600, transitions[0].offset_seconds)
self.assertEqual(0 * 3600, transitions[0].delta_seconds)
self.assertEqual(
DateTuple(2017, 10, 29, 3 * 3600, 'w'),
transitions[1].start_date_time)
self.assertEqual(
DateTuple(2018, 2, 1, 0 * 3600, 'w'),
transitions[1].until_date_time)
self.assertEqual(2 * 3600, transitions[1].offset_seconds)
self.assertEqual(0 * 3600, transitions[1].delta_seconds)
def test_Santo_Domingo(self) -> None:
"""America/Santo_Domingo uses 2 ZoneEra changes in year 2000.
"""
zone_processor = ZoneProcessor(
zone_infos.ZONE_INFO_America_Santo_Domingo
)
zone_processor.init_for_year(2000)
matches = zone_processor.matches
self.assertEqual(3, len(matches))
self.assertEqual(
DateTuple(1999, 12, 1, 0, 'w'), matches[0].start_date_time)
self.assertEqual(
DateTuple(2000, 10, 29, 2 * 3600, 'w'), matches[0].until_date_time)
self.assertEqual('-', matches[0].zone_era['zone_policy'])
self.assertEqual(
DateTuple(2000, 10, 29, 2 * 3600, 'w'), matches[1].start_date_time)
self.assertEqual(
DateTuple(2000, 12, 3, 1 * 3600, 'w'), matches[1].until_date_time)
zone_policy = cast(ZonePolicy, matches[1].zone_era['zone_policy'])
self.assertEqual('US', zone_policy['name'])
self.assertEqual(
DateTuple(2000, 12, 3, 1 * 3600, 'w'), matches[2].start_date_time)
self.assertEqual(
DateTuple(2001, 2, 1, 0, 'w'), matches[2].until_date_time)
self.assertEqual('-', matches[2].zone_era['zone_policy'])
transitions = zone_processor.transitions
self.assertEqual(3, len(transitions))
self.assertEqual(
DateTuple(1999, 12, 1, 0, 'w'), transitions[0].start_date_time)
self.assertEqual(
DateTuple(2000, 10, 29, 2 * 3600, 'w'),
transitions[0].until_date_time)
self.assertEqual(-4 * 3600, transitions[0].offset_seconds)
self.assertEqual(0 * 3600, transitions[0].delta_seconds)
self.assertEqual(
DateTuple(2000, 10, 29, 1 * 3600, 'w'),
transitions[1].start_date_time)
self.assertEqual(
DateTuple(2000, 12, 3, 1 * 3600, 'w'),
transitions[1].until_date_time)
self.assertEqual(-5 * 3600, transitions[1].offset_seconds)
self.assertEqual(0 * 3600, transitions[1].delta_seconds)
self.assertEqual(
DateTuple(2000, 12, 3, 2 * 3600, 'w'),
transitions[2].start_date_time)
self.assertEqual(
DateTuple(2001, 2, 1, 0, 'w'), transitions[2].until_date_time)
self.assertEqual(-4 * 3600, transitions[2].offset_seconds)
self.assertEqual(0 * 3600, transitions[2].delta_seconds)
def test_Moncton(self) -> None:
"""America/Moncton transitioned DST at 00:01 through 2006.
"""
zone_processor = ZoneProcessor(zone_infos.ZONE_INFO_America_Moncton)
zone_processor.init_for_year(2006)
matches = zone_processor.matches
self.assertEqual(2, len(matches))
self.assertEqual(
DateTuple(2005, 12, 1, 0, 'w'), matches[0].start_date_time)
self.assertEqual(
DateTuple(2007, 1, 1, 0 * 3600, 'w'), matches[0].until_date_time)
zone_policy = cast(ZonePolicy, matches[0].zone_era['zone_policy'])
self.assertEqual('Moncton', zone_policy['name'])
self.assertEqual(
DateTuple(2007, 1, 1, 0 * 3600, 'w'), matches[1].start_date_time)
self.assertEqual(
DateTuple(2007, 2, 1, 0, 'w'), matches[1].until_date_time)
zone_policy = cast(ZonePolicy, matches[1].zone_era['zone_policy'])
self.assertEqual('Canada', zone_policy['name'])
transitions = zone_processor.transitions
self.assertEqual(4, len(transitions))
self.assertEqual(
DateTuple(2005, 12, 1, 0, 'w'), transitions[0].start_date_time)
self.assertEqual(
DateTuple(2006, 4, 2, 0 * 3600 + 60, 'w'),
transitions[0].until_date_time)
self.assertEqual(-4 * 3600, transitions[0].offset_seconds)
self.assertEqual(0 * 3600, transitions[0].delta_seconds)
self.assertEqual(
DateTuple(2006, 4, 2, 1 * 3600 + 60, 'w'),
transitions[1].start_date_time)
self.assertEqual(
DateTuple(2006, 10, 29, 0 * 3600 + 60, 'w'),
transitions[1].until_date_time)
self.assertEqual(-4 * 3600, transitions[1].offset_seconds)
self.assertEqual(1 * 3600, transitions[1].delta_seconds)
self.assertEqual(
DateTuple(2006, 10, 28, 23 * 3600 + 60, 'w'),
transitions[2].start_date_time)
self.assertEqual(
DateTuple(2007, 1, 1, 0, 'w'), transitions[2].until_date_time)
self.assertEqual(-4 * 3600, transitions[2].offset_seconds)
self.assertEqual(0 * 3600, transitions[2].delta_seconds)
self.assertEqual(
DateTuple(2007, 1, 1, 0 * 3600, 'w'),
transitions[3].start_date_time)
self.assertEqual(
DateTuple(2007, 2, 1, 0, 'w'), transitions[3].until_date_time)
self.assertEqual(-4 * 3600, transitions[3].offset_seconds)
self.assertEqual(0 * 3600, transitions[3].delta_seconds)
def test_Istanbul(self) -> None:
"""Europe/Istanbul uses an 'hh:mm' offset in the RULES field in 2015.
"""
zone_processor = ZoneProcessor(zone_infos.ZONE_INFO_Europe_Istanbul)
zone_processor.init_for_year(2015)
matches = zone_processor.matches
self.assertEqual(3, len(matches))
self.assertEqual(
DateTuple(2014, 12, 1, 0, 'w'), matches[0].start_date_time)
self.assertEqual(
DateTuple(2015, 10, 25, 1 * 3600, 'u'), matches[0].until_date_time)
zone_policy = cast(ZonePolicy, matches[0].zone_era['zone_policy'])
self.assertEqual('EU', zone_policy['name'])
self.assertEqual(
DateTuple(2015, 10, 25, 1 * 3600, 'u'), matches[1].start_date_time)
self.assertEqual(
DateTuple(2015, 11, 8, 1 * 3600, 'u'), matches[1].until_date_time)
self.assertEqual(':', matches[1].zone_era['zone_policy'])
self.assertEqual(
DateTuple(2015, 11, 8, 1 * 3600, 'u'), matches[2].start_date_time)
self.assertEqual(
DateTuple(2016, 2, 1, 0, 'w'), matches[2].until_date_time)
zone_policy = cast(ZonePolicy, matches[2].zone_era['zone_policy'])
self.assertEqual('EU', zone_policy['name'])
transitions = zone_processor.transitions
self.assertEqual(4, len(transitions))
self.assertEqual(
DateTuple(2014, 12, 1, 0, 'w'), transitions[0].start_date_time)
self.assertEqual(
DateTuple(2015, 3, 29, 3 * 3600, 'w'),
transitions[0].until_date_time)
self.assertEqual(2 * 3600, transitions[0].offset_seconds)
self.assertEqual(0 * 3600, transitions[0].delta_seconds)
self.assertEqual(
DateTuple(2015, 3, 29, 4 * 3600, 'w'),
transitions[1].start_date_time)
self.assertEqual(
DateTuple(2015, 10, 25, 4 * 3600, 'w'),
transitions[1].until_date_time)
self.assertEqual(2 * 3600, transitions[1].offset_seconds)
self.assertEqual(1 * 3600, transitions[1].delta_seconds)
self.assertEqual(
DateTuple(2015, 10, 25, 4 * 3600, 'w'),
transitions[2].start_date_time)
self.assertEqual(
DateTuple(2015, 11, 8, 4 * 3600, 'w'),
transitions[2].until_date_time)
self.assertEqual(2 * 3600, transitions[2].offset_seconds)
self.assertEqual(1 * 3600, transitions[2].delta_seconds)
self.assertEqual(
DateTuple(2015, 11, 8, 3 * 3600, 'w'),
transitions[3].start_date_time)
self.assertEqual(
DateTuple(2016, 2, 1, 0, 'w'), transitions[3].until_date_time)
self.assertEqual(2 * 3600, transitions[3].offset_seconds)
self.assertEqual(0 * 3600, transitions[3].delta_seconds)
def test_Dublin(self) -> None:
"""Europe/Dublin uses negative DST during Winter.
"""
zone_processor = ZoneProcessor(zone_infos.ZONE_INFO_Europe_Dublin)
zone_processor.init_for_year(2000)
matches = zone_processor.matches
self.assertEqual(1, len(matches))
self.assertEqual(
DateTuple(1999, 12, 1, 0, 'w'), matches[0].start_date_time)
self.assertEqual(
DateTuple(2001, 2, 1, 0, 'w'), matches[0].until_date_time)
zone_policy = cast(ZonePolicy, matches[0].zone_era['zone_policy'])
self.assertEqual('Eire', zone_policy['name'])
transitions = zone_processor.transitions
self.assertEqual(3, len(transitions))
self.assertEqual(
DateTuple(1999, 12, 1, 0, 'w'), transitions[0].start_date_time)
self.assertEqual(
DateTuple(2000, 3, 26, 1 * 3600, 'w'),
transitions[0].until_date_time)
self.assertEqual(1 * 3600, transitions[0].offset_seconds)
self.assertEqual(-1 * 3600, transitions[0].delta_seconds)
self.assertEqual(
DateTuple(2000, 3, 26, 2 * 3600, 'w'),
transitions[1].start_date_time)
self.assertEqual(
DateTuple(2000, 10, 29, 2 * 3600, 'w'),
transitions[1].until_date_time)
self.assertEqual(1 * 3600, transitions[1].offset_seconds)
self.assertEqual(0 * 3600, transitions[1].delta_seconds)
self.assertEqual(
DateTuple(2000, 10, 29, 1 * 3600, 'w'),
transitions[2].start_date_time)
self.assertEqual(
DateTuple(2001, 2, 1, 0, 'w'), transitions[2].until_date_time)
self.assertEqual(1 * 3600, transitions[2].offset_seconds)
self.assertEqual(-1 * 3600, transitions[2].delta_seconds)
def test_Apia(self) -> None:
"""Pacific/Apia uses a transition time of 24:00 on Dec 29, 2011,
going from Thursday 29th December 2011 23:59:59 Hours to Saturday 31st
December 2011 00:00:00 Hours.
"""
zone_processor = ZoneProcessor(zone_infos.ZONE_INFO_Pacific_Apia)
zone_processor.init_for_year(2011)
matches = zone_processor.matches
self.assertEqual(2, len(matches))
self.assertEqual(
DateTuple(2010, 12, 1, 0, 'w'), matches[0].start_date_time)
self.assertEqual(
DateTuple(2011, 12, 29, 24 * 3600, 'w'), matches[0].until_date_time)
zone_policy = cast(ZonePolicy, matches[0].zone_era['zone_policy'])
self.assertEqual('WS', zone_policy['name'])
self.assertEqual(
DateTuple(2011, 12, 29, 24 * 3600, 'w'), matches[1].start_date_time)
self.assertEqual(
DateTuple(2012, 2, 1, 0, 'w'), matches[1].until_date_time)
zone_policy = cast(ZonePolicy, matches[1].zone_era['zone_policy'])
self.assertEqual('WS', zone_policy['name'])
transitions = zone_processor.transitions
self.assertEqual(4, len(transitions))
self.assertEqual(
DateTuple(2010, 12, 1, 0, 'w'), transitions[0].start_date_time)
self.assertEqual(
DateTuple(2011, 4, 2, 4 * 3600, 'w'),
transitions[0].until_date_time)
self.assertEqual(-11 * 3600, transitions[0].offset_seconds)
self.assertEqual(1 * 3600, transitions[0].delta_seconds)
self.assertEqual(
DateTuple(2011, 4, 2, 3 * 3600, 'w'),
transitions[1].start_date_time)
self.assertEqual(
DateTuple(2011, 9, 24, 3 * 3600, 'w'),
transitions[1].until_date_time)
self.assertEqual(-11 * 3600, transitions[1].offset_seconds)
self.assertEqual(0 * 3600, transitions[1].delta_seconds)
self.assertEqual(
DateTuple(2011, 9, 24, 4 * 3600, 'w'),
transitions[2].start_date_time)
self.assertEqual(
DateTuple(2011, 12, 30, 0, 'w'), transitions[2].until_date_time)
self.assertEqual(-11 * 3600, transitions[2].offset_seconds)
self.assertEqual(1 * 3600, transitions[2].delta_seconds)
self.assertEqual(
DateTuple(2011, 12, 31, 0 * 3600, 'w'),
transitions[3].start_date_time)
self.assertEqual(
DateTuple(2012, 2, 1, 0, 'w'), transitions[3].until_date_time)
self.assertEqual(13 * 3600, transitions[3].offset_seconds)
self.assertEqual(1 * 3600, transitions[3].delta_seconds)
def test_Macquarie(self) -> None:
"""Antarctica/Macquarie changes ZoneEra in 2011 using a 'w' time, but
the ZoneRule transitions use an 's' time, which happens to coincide with
the change in ZoneEra. The code must treat those 2 transition times as
the same point in time.
In TZ version 2020b (specifically commit
6427fe6c0cca1dc0f8580f8b96348911ad051570 for github.com/eggert/tz on Thu
Oct 1 23:59:18 2020) adds an additional ZoneEra line for 2010, changing
this from 2 to 3. Antarctica/Macquarie stays on AEDT all year in 2010.
"""
zone_processor = ZoneProcessor(
zone_infos.ZONE_INFO_Antarctica_Macquarie
)
zone_processor.init_for_year(2010)
matches = zone_processor.matches
self.assertEqual(3, len(matches))
# Match 0
self.assertEqual(
DateTuple(2009, 12, 1, 0, 'w'), matches[0].start_date_time)
self.assertEqual(
DateTuple(2010, 1, 1, 0, 'w'), matches[0].until_date_time)
zone_policy = cast(ZonePolicy, matches[0].zone_era['zone_policy'])
self.assertEqual('AT', zone_policy['name'])
# Match 1
self.assertEqual(
DateTuple(2010, 1, 1, 0, 'w'), matches[1].start_date_time)
self.assertEqual(
DateTuple(2011, 1, 1, 0, 'w'), matches[1].until_date_time)
self.assertEqual(':', matches[1].zone_era['zone_policy'])
# Match 2
self.assertEqual(
DateTuple(2011, 1, 1, 0, 'w'), matches[2].start_date_time)
self.assertEqual(
DateTuple(2011, 2, 1, 0, 'w'), matches[2].until_date_time)
zone_policy = cast(ZonePolicy, matches[2].zone_era['zone_policy'])
self.assertEqual('AT', zone_policy['name'])
transitions = zone_processor.transitions
self.assertEqual(3, len(transitions))
# Transition 0
self.assertEqual(
DateTuple(2009, 12, 1, 0, 'w'), transitions[0].start_date_time)
self.assertEqual(
DateTuple(2010, 1, 1, 0, 'w'), transitions[0].until_date_time)
self.assertEqual(10 * 3600, transitions[0].offset_seconds)
self.assertEqual(1 * 3600, transitions[0].delta_seconds)
# Transition 1
self.assertEqual(
DateTuple(2010, 1, 1, 0, 'w'), transitions[1].start_date_time)
self.assertEqual(
DateTuple(2011, 1, 1, 0, 'w'), transitions[1].until_date_time)
self.assertEqual(10 * 3600, transitions[1].offset_seconds)
self.assertEqual(1 * 3600, transitions[1].delta_seconds)
# Transition 2
self.assertEqual(
DateTuple(2011, 1, 1, 0, 'w'), transitions[2].start_date_time)
self.assertEqual(
DateTuple(2011, 2, 1, 0, 'w'), transitions[2].until_date_time)
self.assertEqual(10 * 3600, transitions[2].offset_seconds)
self.assertEqual(1 * 3600, transitions[2].delta_seconds)
def test_Simferopol(self) -> None:
"""Asia/Simferopol in 2014 uses a bizarre mixture of 'w' when using EU
rules (which itself uses 'u' in the UNTIL fields), then uses 's' time to
switch to Moscow time.
"""
zone_processor = ZoneProcessor(zone_infos.ZONE_INFO_Europe_Simferopol)
zone_processor.init_for_year(2014)
matches = zone_processor.matches
self.assertEqual(3, len(matches))
self.assertEqual(
DateTuple(2013, 12, 1, 0 * 3600, 'w'), matches[0].start_date_time)
self.assertEqual(
DateTuple(2014, 3, 30, 2 * 3600, 'w'), matches[0].until_date_time)
zone_policy = cast(ZonePolicy, matches[0].zone_era['zone_policy'])
self.assertEqual('EU', zone_policy['name'])
self.assertEqual(
DateTuple(2014, 3, 30, 2 * 3600, 'w'), matches[1].start_date_time)
self.assertEqual(
DateTuple(2014, 10, 26, 2 * 3600, 's'), matches[1].until_date_time)
self.assertEqual('-', matches[1].zone_era['zone_policy'])
self.assertEqual(
DateTuple(2014, 10, 26, 2 * 3600, 's'), matches[2].start_date_time)
self.assertEqual(
DateTuple(2015, 2, 1, 0 * 3600, 'w'), matches[2].until_date_time)
self.assertEqual('-', matches[2].zone_era['zone_policy'])
transitions = zone_processor.transitions
self.assertEqual(3, len(transitions))
self.assertEqual(
DateTuple(2013, 12, 1, 0, 'w'), transitions[0].start_date_time)
self.assertEqual(
DateTuple(2014, 3, 30, 2 * 3600, 'w'),
transitions[0].until_date_time)
self.assertEqual(2 * 3600, transitions[0].offset_seconds)
self.assertEqual(0 * 3600, transitions[0].delta_seconds)
self.assertEqual(
DateTuple(2014, 3, 30, 4 * 3600, 'w'),
transitions[1].start_date_time)
self.assertEqual(
DateTuple(2014, 10, 26, 2 * 3600, 'w'),
transitions[1].until_date_time)
self.assertEqual(4 * 3600, transitions[1].offset_seconds)
self.assertEqual(0 * 3600, transitions[1].delta_seconds)
self.assertEqual(
DateTuple(2014, 10, 26, 1 * 3600, 'w'),
transitions[2].start_date_time)
self.assertEqual(
DateTuple(2015, 2, 1, 0 * 3600, 'w'),
transitions[2].until_date_time)
self.assertEqual(3 * 3600, transitions[2].offset_seconds)
self.assertEqual(0 * 3600, transitions[2].delta_seconds)
def test_Kamchatka(self) -> None:
"""Asia/Kamchatka uses 's' in the Zone UNTIL and Rule AT fields.
"""
zone_processor = ZoneProcessor(zone_infos.ZONE_INFO_Asia_Kamchatka)
zone_processor.init_for_year(2011)
matches = zone_processor.matches
self.assertEqual(2, len(matches))
self.assertEqual(
DateTuple(2010, 12, 1, 0 * 3600, 'w'), matches[0].start_date_time)
self.assertEqual(
DateTuple(2011, 3, 27, 2 * 3600, 's'), matches[0].until_date_time)
zone_policy = cast(ZonePolicy, matches[0].zone_era['zone_policy'])
self.assertEqual('Russia', zone_policy['name'])
self.assertEqual(
DateTuple(2011, 3, 27, 2 * 3600, 's'), matches[1].start_date_time)
self.assertEqual(
DateTuple(2012, 2, 1, 0 * 3600, 'w'), matches[1].until_date_time)
self.assertEqual('-', matches[1].zone_era['zone_policy'])
transitions = zone_processor.transitions
self.assertEqual(2, len(transitions))
self.assertEqual(
DateTuple(2010, 12, 1, 0, 'w'), transitions[0].start_date_time)
self.assertEqual(
DateTuple(2011, 3, 27, 2 * 3600, 'w'),
transitions[0].until_date_time)
self.assertEqual(11 * 3600, transitions[0].offset_seconds)
self.assertEqual(0 * 3600, transitions[0].delta_seconds)
self.assertEqual(
DateTuple(2011, 3, 27, 3 * 3600, 'w'),
transitions[1].start_date_time)
self.assertEqual(
DateTuple(2012, 2, 1, 0 * 3600, 'w'),
transitions[1].until_date_time)
self.assertEqual(12 * 3600, transitions[1].offset_seconds)
self.assertEqual(0 * 3600, transitions[1].delta_seconds)
class TestZoneProcessorGetTransition(unittest.TestCase):
def test_get_transition_for_datetime(self) -> None:
zone_processor = ZoneProcessor(zone_infos.ZONE_INFO_America_Los_Angeles)
# Just after a DST transition
dt = datetime(2000, 4, 2, 3, 0, 0)
transition = zone_processor.get_transition_for_datetime(dt)
self.assertIsNotNone(transition)
# DST gap does not exist, but a transition should be returned.
dt = datetime(2000, 4, 2, 2, 59, 59)
transition = zone_processor.get_transition_for_datetime(dt)
self.assertIsNotNone(transition)
| 39.861468
| 80
| 0.599646
| 5,159
| 43,449
| 4.85346
| 0.052723
| 0.189305
| 0.143776
| 0.114821
| 0.889373
| 0.85874
| 0.841927
| 0.812333
| 0.772755
| 0.740485
| 0
| 0.096566
| 0.278073
| 43,449
| 1,089
| 81
| 39.898072
| 0.701693
| 0.046376
| 0
| 0.665143
| 0
| 0
| 0.032697
| 0
| 0
| 0
| 0
| 0
| 0.368
| 1
| 0.026286
| false
| 0
| 0.027429
| 0
| 0.061714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c76d4f5c25355d19cb37035d76535164421a886b
| 176
|
py
|
Python
|
mitorch/datasets/__init__.py
|
shonohs/mitorch
|
567a8390cc7a600280c51bc4e2a4e1c93fefc801
|
[
"MIT"
] | null | null | null |
mitorch/datasets/__init__.py
|
shonohs/mitorch
|
567a8390cc7a600280c51bc4e2a4e1c93fefc801
|
[
"MIT"
] | 1
|
2020-06-12T05:56:54.000Z
|
2020-06-12T05:56:54.000Z
|
mitorch/datasets/__init__.py
|
shonohs/mitorch
|
567a8390cc7a600280c51bc4e2a4e1c93fefc801
|
[
"MIT"
] | null | null | null |
from .image_dataset import ImageDataset, ObjectDetectionDataset
from .factory import TransformFactory
__all__ = ['ImageDataset', 'ObjectDetectionDataset', 'TransformFactory']
| 35.2
| 72
| 0.835227
| 14
| 176
| 10.142857
| 0.642857
| 0.478873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085227
| 176
| 4
| 73
| 44
| 0.881988
| 0
| 0
| 0
| 0
| 0
| 0.284091
| 0.125
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c7b28ee49cd7969c9293c6bb726a733d5c9d6974
| 22
|
py
|
Python
|
icepyx/quest/dataset_scripts/__init__.py
|
zachghiaccio/icepyx
|
4bf75bd94d8b8db15dc8a0ebf14d1ff0966c3f3a
|
[
"BSD-3-Clause"
] | 41
|
2017-11-03T01:23:56.000Z
|
2021-10-03T18:00:32.000Z
|
icepyx/quest/dataset_scripts/__init__.py
|
zachghiaccio/icepyx
|
4bf75bd94d8b8db15dc8a0ebf14d1ff0966c3f3a
|
[
"BSD-3-Clause"
] | 3
|
2021-06-08T21:03:16.000Z
|
2022-03-12T00:18:44.000Z
|
icepyx/quest/dataset_scripts/__init__.py
|
zachghiaccio/icepyx
|
4bf75bd94d8b8db15dc8a0ebf14d1ff0966c3f3a
|
[
"BSD-3-Clause"
] | 7
|
2017-11-09T11:05:30.000Z
|
2020-03-21T17:09:42.000Z
|
from .dataset import *
| 22
| 22
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 22
| 1
| 22
| 22
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c7b8f075130251b732846dcc79b2d444825dd95c
| 20,069
|
py
|
Python
|
tests/test_async_client.py
|
cenobites/flask-jsonrpc
|
5d930b192859ae24a58b1ceb4273897df2e77cb9
|
[
"BSD-3-Clause"
] | 236
|
2015-01-07T10:38:45.000Z
|
2022-03-31T12:08:35.000Z
|
tests/test_async_client.py
|
cenobites/flask-jsonrpc
|
5d930b192859ae24a58b1ceb4273897df2e77cb9
|
[
"BSD-3-Clause"
] | 107
|
2015-01-22T11:35:28.000Z
|
2022-03-30T07:19:25.000Z
|
tests/test_async_client.py
|
Pandinosaurus/flask-jsonrpc
|
3897575fcb7106c665e2217a5762f1131a7dd536
|
[
"BSD-3-Clause"
] | 69
|
2015-01-05T08:29:36.000Z
|
2022-03-27T17:34:10.000Z
|
# Copyright (c) 2020-2021, Cenobit Technologies, Inc. http://cenobit.es/
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the Cenobit Technologies nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import pytest
pytest.importorskip('asgiref')
pyminversion = pytest.mark.skipif(sys.version_info < (3, 7), reason='requires python3.7 or higher')
def test_app_greeting(async_client):
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.greeting'})
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': 'Hello Flask JSON-RPC'}
assert rv.status_code == 200
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.greeting', 'params': ['Python']})
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': 'Hello Python'}
assert rv.status_code == 200
rv = async_client.post(
'/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.greeting', 'params': {'name': 'Flask'}}
)
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': 'Hello Flask'}
assert rv.status_code == 200
def test_app_greeting_raise_parse_error(async_client):
rv = async_client.post('/api', data={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.greeting'})
assert rv.json == {
'id': None,
'jsonrpc': '2.0',
'error': {
'code': -32700,
'data': {
'message': 'Invalid mime type for JSON: application/x-www-form-urlencoded, '
'use header Content-Type: application/json'
},
'message': 'Parse error',
'name': 'ParseError',
},
}
assert rv.status_code == 400
rv = async_client.post(
'/api',
data="{'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.greeting'}",
headers={'Content-Type': 'application/json'},
)
assert rv.json == {
'id': None,
'jsonrpc': '2.0',
'error': {
'code': -32700,
'data': {'message': 'Invalid JSON: b"{\'id\': 1, \'jsonrpc\': \'2.0\', \'method\': \'jsonrpc.greeting\'}"'},
'message': 'Parse error',
'name': 'ParseError',
},
}
assert rv.status_code == 400
rv = async_client.post(
'/api',
data="""[
{'jsonrpc': '2.0', 'method': 'jsonrpc.greeting', 'params': ['Flask'], 'id': '1'},
{'jsonrpc': '2.0', 'method'
]""",
headers={'Content-Type': 'application/json'},
)
assert rv.json == {
'id': None,
'jsonrpc': '2.0',
'error': {
'code': -32700,
'data': {
'message': 'Invalid JSON: b"[\\n {\'jsonrpc\': '
"'2.0', 'method': 'jsonrpc.greeting', 'params': "
"['Flask'], 'id': '1'},\\n "
"{'jsonrpc': '2.0', 'method'\\n "
']"'
},
'message': 'Parse error',
'name': 'ParseError',
},
}
assert rv.status_code == 400
def test_app_greeting_raise_invalid_request_error(async_client):
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0'})
assert rv.json == {
'id': 1,
'jsonrpc': '2.0',
'error': {
'code': -32600,
'data': {'message': "Invalid JSON: {'id': 1, 'jsonrpc': '2.0'}"},
'message': 'Invalid Request',
'name': 'InvalidRequestError',
},
}
assert rv.status_code == 400
def test_app_greeting_raise_invalid_params_error(async_client):
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.greeting', 'params': 'Wrong'})
assert rv.json == {
'id': 1,
'jsonrpc': '2.0',
'error': {
'code': -32602,
'data': {'message': 'Parameter structures are by-position (tuple, set, list) or by-name (dict): Wrong'},
'message': 'Invalid params',
'name': 'InvalidParamsError',
},
}
assert rv.status_code == 400
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.greeting', 'params': [1]})
assert rv.json == {
'id': 1,
'jsonrpc': '2.0',
'error': {
'code': -32602,
'data': {'message': 'type of argument "name" must be str; got int instead'},
'message': 'Invalid params',
'name': 'InvalidParamsError',
},
}
assert rv.status_code == 400
rv = async_client.post(
'/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.greeting', 'params': {'name': 2}}
)
assert rv.json == {
'id': 1,
'jsonrpc': '2.0',
'error': {
'code': -32602,
'data': {'message': 'type of argument "name" must be str; got int instead'},
'message': 'Invalid params',
'name': 'InvalidParamsError',
},
}
assert rv.status_code == 400
def test_app_greeting_raise_method_not_found_error(async_client):
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'method-not-found'})
assert rv.json == {
'id': 1,
'jsonrpc': '2.0',
'error': {
'code': -32601,
'data': {'message': 'Method not found: method-not-found'},
'message': 'Method not found',
'name': 'MethodNotFoundError',
},
}
assert rv.status_code == 400
def test_app_echo(async_client):
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.echo', 'params': ['Python']})
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': 'Python'}
assert rv.status_code == 200
rv = async_client.post(
'/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.echo', 'params': {'string': 'Flask'}}
)
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': 'Flask'}
assert rv.status_code == 200
def test_app_echo_raise_invalid_params_error(async_client):
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.echo', 'params': 'Wrong'})
assert rv.json == {
'id': 1,
'jsonrpc': '2.0',
'error': {
'code': -32602,
'data': {'message': 'Parameter structures are by-position (tuple, set, list) or by-name (dict): Wrong'},
'message': 'Invalid params',
'name': 'InvalidParamsError',
},
}
assert rv.status_code == 400
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.echo', 'params': [1]})
assert rv.json == {
'id': 1,
'jsonrpc': '2.0',
'error': {
'code': -32602,
'data': {'message': 'type of argument "string" must be str; got int instead'},
'message': 'Invalid params',
'name': 'InvalidParamsError',
},
}
assert rv.status_code == 400
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.echo', 'params': {'name': 2}})
assert rv.json == {
'id': 1,
'jsonrpc': '2.0',
'error': {
'code': -32602,
'data': {'message': "missing a required argument: 'string'"},
'message': 'Invalid params',
'name': 'InvalidParamsError',
},
}
assert rv.status_code == 400
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.echo'})
assert rv.json == {
'id': 1,
'jsonrpc': '2.0',
'error': {
'code': -32602,
'data': {'message': "missing a required argument: 'string'"},
'message': 'Invalid params',
'name': 'InvalidParamsError',
},
}
assert rv.status_code == 400
def test_app_notify(async_client):
rv = async_client.post('/api', json={'jsonrpc': '2.0', 'method': 'jsonrpc.notify'})
assert rv.json is None
assert rv.status_code == 204
rv = async_client.post('/api', json={'jsonrpc': '2.0', 'method': 'jsonrpc.notify', 'params': ['Some string']})
assert rv.json is None
assert rv.status_code == 204
def test_app_fails(async_client):
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.fails', 'params': [2]})
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': 2}
assert rv.status_code == 200
rv = async_client.post('/api', json={'id': '1', 'jsonrpc': '2.0', 'method': 'jsonrpc.fails', 'params': [1]})
assert rv.json == {
'id': '1',
'jsonrpc': '2.0',
'error': {
'code': -32000,
'data': {'message': 'number is odd'},
'message': 'Server error',
'name': 'ServerError',
},
}
assert rv.status_code == 500
def test_app_strange_echo(async_client):
data = {
'id': 1,
'jsonrpc': '2.0',
'method': 'jsonrpc.strangeEcho',
'params': ['string', {'a': 1}, ['a', 'b', 'c'], 23, 'Flask'],
}
rv = async_client.post('/api', json=data)
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': ['string', {'a': 1}, ['a', 'b', 'c'], 23, 'Flask']}
assert rv.status_code == 200
data = {
'id': 1,
'jsonrpc': '2.0',
'method': 'jsonrpc.strangeEcho',
'params': ['string', {'a': 1}, ['a', 'b', 'c'], 23],
}
rv = async_client.post('/api', json=data)
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': ['string', {'a': 1}, ['a', 'b', 'c'], 23, 'Default']}
assert rv.status_code == 200
def test_app_sum(async_client):
data = {'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.sum', 'params': [1, 3]}
rv = async_client.post('/api', json=data)
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': 4}
assert rv.status_code == 200
data = {'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.sum', 'params': [0.5, 1.5]}
rv = async_client.post('/api', json=data)
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': 2.0}
assert rv.status_code == 200
def test_app_decorators(async_client):
data = {'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.decorators', 'params': ['Python']}
rv = async_client.post('/api', json=data)
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': 'Hello Python from decorator, ;)'}
assert rv.status_code == 200
def test_app_return_status_code(async_client):
rv = async_client.post(
'/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.returnStatusCode', 'params': ['OK']}
)
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': 'Status Code OK'}
assert rv.status_code == 201
def test_app_return_headers(async_client):
rv = async_client.post(
'/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.returnHeaders', 'params': ['OK']}
)
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': 'Headers OK'}
assert rv.status_code == 200
assert ('X-JSONRPC', '1') in list(rv.headers)
def test_app_return_status_code_and_headers(async_client):
rv = async_client.post(
'/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.returnStatusCodeAndHeaders', 'params': ['OK']}
)
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': 'Status Code and Headers OK'}
assert rv.status_code == 400
assert ('X-JSONRPC', '1') in list(rv.headers)
def test_app_with_rcp_batch(async_client):
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.greeting'})
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': 'Hello Flask JSON-RPC'}
assert rv.status_code == 200
rv = async_client.post(
'/api',
json=[
{'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.greeting', 'params': ['Python']},
{'id': 2, 'jsonrpc': '2.0', 'method': 'jsonrpc.greeting', 'params': ['Flask']},
{'id': 3, 'jsonrpc': '2.0', 'method': 'jsonrpc.greeting', 'params': ['JSON-RCP']},
],
)
assert rv.json == [
{'id': 1, 'jsonrpc': '2.0', 'result': 'Hello Python'},
{'id': 2, 'jsonrpc': '2.0', 'result': 'Hello Flask'},
{'id': 3, 'jsonrpc': '2.0', 'result': 'Hello JSON-RCP'},
]
assert rv.status_code == 200
rv = async_client.post(
'/api',
json=[
{'id': 1, 'jsonrpc': '2.0', 'method': 'jsonrpc.greeting', 'params': ['Python']},
{'jsonrpc': '2.0', 'method': 'jsonrpc.greeting', 'params': ['Flask']},
{'id': 3, 'jsonrpc': '2.0', 'params': ['Flask']},
{'id': 4, 'jsonrpc': '2.0', 'method': 'jsonrpc.greeting', 'params': ['JSON-RCP']},
],
)
assert rv.json == [
{'id': 1, 'jsonrpc': '2.0', 'result': 'Hello Python'},
{
'id': 3,
'jsonrpc': '2.0',
'error': {
'code': -32600,
'data': {'message': "Invalid JSON: {'id': 3, 'jsonrpc': '2.0', 'params': ['Flask']}"},
'message': 'Invalid Request',
'name': 'InvalidRequestError',
},
},
{'id': 4, 'jsonrpc': '2.0', 'result': 'Hello JSON-RCP'},
]
assert rv.status_code == 200
rv = async_client.post('/api', json={'id': 2, 'jsonrpc': '2.0', 'method': 'jsonrpc.greeting'})
assert rv.json == {'id': 2, 'jsonrpc': '2.0', 'result': 'Hello Flask JSON-RPC'}
assert rv.status_code == 200
def test_app_class(async_client):
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'classapp.index'})
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': 'Hello Flask JSON-RPC'}
assert rv.status_code == 200
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'greeting', 'params': ['Python']})
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': 'Hello Python'}
assert rv.status_code == 200
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'hello', 'params': {'name': 'Flask'}})
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': 'Hello Flask'}
assert rv.status_code == 200
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'echo', 'params': ['Python', 1]})
assert rv.json == {'id': 1, 'jsonrpc': '2.0', 'result': 'Python'}
assert rv.status_code == 200
rv = async_client.post('/api', json={'jsonrpc': '2.0', 'method': 'notify', 'params': ['Python']})
assert rv.status_code == 204
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'fails', 'params': [13]})
assert rv.json == {
'id': 1,
'jsonrpc': '2.0',
'error': {
'code': -32000,
'data': {'message': 'number is odd'},
'message': 'Server error',
'name': 'ServerError',
},
}
assert rv.status_code == 500
def test_app_system_describe(async_client):
rv = async_client.post('/api', json={'id': 1, 'jsonrpc': '2.0', 'method': 'system.describe'})
assert rv.json['id'] == 1
assert rv.json['jsonrpc'] == '2.0'
assert rv.json['result']['name'] == 'Flask-JSONRPC'
assert rv.json['result']['sdversion'] == '1.0'
assert rv.json['result']['summary'] is None
assert rv.json['result']['version'] == '2.0'
assert rv.json['result']['procs'] == [
{
'name': 'jsonrpc.greeting',
'params': [{'name': 'name', 'type': 'String'}],
'return': {'type': 'String'},
'summary': None,
},
{
'name': 'jsonrpc.echo',
'params': [{'name': 'string', 'type': 'String'}, {'name': '_some', 'type': 'Object'}],
'return': {'type': 'String'},
'summary': None,
},
{
'name': 'jsonrpc.notify',
'params': [{'name': '_string', 'type': 'String'}],
'return': {'type': 'Null'},
'summary': None,
},
{
'name': 'jsonrpc.fails',
'params': [{'name': 'n', 'type': 'Number'}],
'return': {'type': 'Number'},
'summary': None,
},
{
'name': 'jsonrpc.strangeEcho',
'params': [
{'name': 'string', 'type': 'String'},
{'name': 'omg', 'type': 'Object'},
{'name': 'wtf', 'type': 'Array'},
{'name': 'nowai', 'type': 'Number'},
{'name': 'yeswai', 'type': 'String'},
],
'return': {'type': 'Array'},
'summary': None,
},
{
'name': 'jsonrpc.sum',
'params': [{'name': 'a', 'type': 'Number'}, {'name': 'b', 'type': 'Number'}],
'return': {'type': 'Number'},
'summary': None,
},
{
'name': 'jsonrpc.decorators',
'params': [{'name': 'string', 'type': 'String'}],
'return': {'type': 'String'},
'summary': None,
},
{
'name': 'jsonrpc.returnStatusCode',
'params': [{'name': 's', 'type': 'String'}],
'return': {'type': 'Array'},
'summary': None,
},
{
'name': 'jsonrpc.returnHeaders',
'params': [{'name': 's', 'type': 'String'}],
'return': {'type': 'Array'},
'summary': None,
},
{
'name': 'jsonrpc.returnStatusCodeAndHeaders',
'params': [{'name': 's', 'type': 'String'}],
'return': {'type': 'Array'},
'summary': None,
},
{
'name': 'classapp.index',
'params': [{'name': 'name', 'type': 'String'}],
'return': {'type': 'String'},
'summary': None,
},
{
'name': 'greeting',
'params': [{'name': 'name', 'type': 'String'}],
'return': {'type': 'String'},
'summary': None,
},
{
'name': 'hello',
'params': [{'name': 'name', 'type': 'String'}],
'return': {'type': 'String'},
'summary': None,
},
{
'name': 'echo',
'params': [{'name': 'string', 'type': 'String'}, {'name': '_some', 'type': 'Object'}],
'return': {'type': 'String'},
'summary': None,
},
{
'name': 'notify',
'params': [{'name': '_string', 'type': 'String'}],
'return': {'type': 'Null'},
'summary': None,
},
{'name': 'fails', 'params': [{'name': 'n', 'type': 'Number'}], 'return': {'type': 'Number'}, 'summary': None},
]
assert rv.status_code == 200
| 37.164815
| 120
| 0.51298
| 2,287
| 20,069
| 4.427197
| 0.111937
| 0.018568
| 0.081778
| 0.076049
| 0.800889
| 0.77521
| 0.756346
| 0.750914
| 0.727704
| 0.707062
| 0
| 0.035176
| 0.276147
| 20,069
| 539
| 121
| 37.233766
| 0.661802
| 0.076287
| 0
| 0.539823
| 0
| 0.011062
| 0.331749
| 0.010372
| 0
| 0
| 0
| 0
| 0.192478
| 1
| 0.039823
| false
| 0
| 0.006637
| 0
| 0.04646
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1bdf73a20cad4a1c3c3ca3de32ffb790e37b305d
| 155
|
py
|
Python
|
adaptivedistillation/apis/__init__.py
|
wyze-AI/AdaptiveDistillation
|
5f4575794101dbb1ed2f7e90a2be03856f76041c
|
[
"MIT"
] | 1
|
2022-01-12T22:24:07.000Z
|
2022-01-12T22:24:07.000Z
|
adaptivedistillation/apis/__init__.py
|
wyze-AI/AdaptiveDistillation
|
5f4575794101dbb1ed2f7e90a2be03856f76041c
|
[
"MIT"
] | null | null | null |
adaptivedistillation/apis/__init__.py
|
wyze-AI/AdaptiveDistillation
|
5f4575794101dbb1ed2f7e90a2be03856f76041c
|
[
"MIT"
] | 2
|
2022-01-12T22:24:17.000Z
|
2022-02-15T05:46:30.000Z
|
from .test import multi_gpu_test, single_gpu_test
from .train import train_model
__all__ = [
'multi_gpu_test', 'single_gpu_test', 'train_model'
]
| 22.142857
| 54
| 0.748387
| 23
| 155
| 4.434783
| 0.391304
| 0.27451
| 0.235294
| 0.352941
| 0.490196
| 0.490196
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 155
| 6
| 55
| 25.833333
| 0.784615
| 0
| 0
| 0
| 0
| 0
| 0.258065
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
1beaee3ce9b5e5df0d894f9d785ac2eba6197143
| 37,963
|
py
|
Python
|
templates/project/protos/python/wallet/resources_pb2.py
|
sadegh-moayedizadeh/Anilius
|
669cb9a7af72ae251a979a88f6556d14ccb63c2f
|
[
"MIT"
] | 3
|
2020-02-07T11:54:36.000Z
|
2020-02-09T11:53:26.000Z
|
templates/project/protos/python/wallet/resources_pb2.py
|
TomanPay/Anilius
|
24a7ff3ae8bd21901e46b475feadf72e46516481
|
[
"MIT"
] | null | null | null |
templates/project/protos/python/wallet/resources_pb2.py
|
TomanPay/Anilius
|
24a7ff3ae8bd21901e46b475feadf72e46516481
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: wallet/resources.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="wallet/resources.proto",
package="qbit.wallet",
syntax="proto3",
serialized_options=None,
serialized_pb=_b(
'\n\x16wallet/resources.proto\x12\x0bqbit.wallet"U\n\x06Wallet\x12\x11\n\twallet_id\x18\x01 \x01(\t\x12\x12\n\nis_blocked\x18\x02 \x01(\x08\x12\x13\n\x0bis_negative\x18\x03 \x01(\x08\x12\x0f\n\x07\x62\x61lance\x18\x04 \x01(\x05"\xe6\x01\n\x07Payment\x12\x13\n\x0bis_verified\x18\x01 \x01(\x08\x12\x12\n\nis_blocked\x18\x02 \x01(\x08\x12\x12\n\npartner_id\x18\x03 \x01(\t\x12\x12\n\npayment_id\x18\x04 \x01(\t\x12\x10\n\x08order_id\x18\x05 \x01(\t\x12\x11\n\twallet_id\x18\x06 \x01(\t\x12\x0e\n\x06\x61mount\x18\x07 \x01(\x05\x12\x17\n\x0fprevious_amount\x18\x08 \x01(\x05\x12\x15\n\rrefund_amount\x18\t \x01(\x05\x12\x0e\n\x06status\x18\n \x01(\t\x12\x15\n\rrefund_status\x18\x0b \x01(\t"\x9e\x01\n\x0bTransaction\x12\x12\n\npayment_id\x18\x01 \x01(\t\x12\x12\n\npartner_id\x18\x02 \x01(\t\x12\x11\n\twallet_id\x18\x03 \x01(\t\x12\x10\n\x08psp_name\x18\x04 \x01(\t\x12\x0c\n\x04type\x18\x05 \x01(\t\x12\x0e\n\x06\x61mount\x18\x06 \x01(\x05\x12\x14\n\x0ctrace_number\x18\x07 \x01(\x05\x12\x0e\n\x06status\x18\x08 \x01(\t"s\n\x04User\x12\x0f\n\x07user_id\x18\x02 \x01(\t\x12\x11\n\twallet_id\x18\x03 \x01(\t\x12\x0e\n\x06\x61vatar\x18\x04 \x01(\t\x12\x10\n\x08username\x18\x05 \x01(\t\x12\x12\n\nfirst_name\x18\x06 \x01(\t\x12\x11\n\tlast_name\x18\x07 \x01(\t"\xaf\x01\n\nPermission\x12\x10\n\x08is_grant\x18\x01 \x01(\x08\x12\x15\n\rpermission_id\x18\x02 \x01(\t\x12?\n\x0fpermission_type\x18\x03 \x01(\x0e\x32&.qbit.wallet.Permission.PermissionType"7\n\x0ePermissionType\x12\x07\n\x03\x41LL\x10\x00\x12\x0b\n\x07GENERAL\x10\x01\x12\x0f\n\x0bGET_BALANCE\x10\x02"f\n\x11RequestPermission\x12\x10\n\x08is_grant\x18\x01 \x01(\x08\x12?\n\x0fpermission_type\x18\x02 \x01(\x0e\x32&.qbit.wallet.Permission.PermissionType"l\n\x07Partner\x12\x12\n\npartner_id\x18\x01 \x01(\t\x12\x1c\n\x14\x65xpire_client_secret\x18\x02 \x01(\t\x12\x17\n\x0fmax_debt_amount\x18\x06 \x01(\x05\x12\x16\n\x0emax_debt_count\x18\x07 \x01(\x05"D\n\x0bPartnerDebt\x12\x12\n\npartner_id\x18\x01 \x01(\t\x12\x11\n\twallet_id\x18\x02 \x01(\t\x12\x0e\n\x06\x61mount\x18\x03 \x01(\x05"\xb3\x01\n\x07Service\x12\x12\n\nservice_id\x18\x01 \x01(\t\x12\x12\n\npartner_id\x18\x02 \x01(\t\x12\r\n\x05title\x18\x03 \x01(\t\x12\x12\n\nmax_amount\x18\x04 \x01(\x05\x12\x17\n\x0fmax_debt_amount\x18\x05 \x01(\x05\x12\x14\n\x0cmodify_count\x18\x06 \x01(\x05\x12\x18\n\x10modify_crm_count\x18\x07 \x01(\x05\x12\x14\n\x0c\x63\x61ncel_count\x18\x08 \x01(\x05\x62\x06proto3'
),
)
_PERMISSION_PERMISSIONTYPE = _descriptor.EnumDescriptor(
name="PermissionType",
full_name="qbit.wallet.Permission.PermissionType",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="ALL", index=0, number=0, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="GENERAL", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="GET_BALANCE", index=2, number=2, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=758,
serialized_end=813,
)
_sym_db.RegisterEnumDescriptor(_PERMISSION_PERMISSIONTYPE)
_WALLET = _descriptor.Descriptor(
name="Wallet",
full_name="qbit.wallet.Wallet",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="wallet_id",
full_name="qbit.wallet.Wallet.wallet_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="is_blocked",
full_name="qbit.wallet.Wallet.is_blocked",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="is_negative",
full_name="qbit.wallet.Wallet.is_negative",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="balance",
full_name="qbit.wallet.Wallet.balance",
index=3,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=39,
serialized_end=124,
)
_PAYMENT = _descriptor.Descriptor(
name="Payment",
full_name="qbit.wallet.Payment",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="is_verified",
full_name="qbit.wallet.Payment.is_verified",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="is_blocked",
full_name="qbit.wallet.Payment.is_blocked",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="partner_id",
full_name="qbit.wallet.Payment.partner_id",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="payment_id",
full_name="qbit.wallet.Payment.payment_id",
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="order_id",
full_name="qbit.wallet.Payment.order_id",
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="wallet_id",
full_name="qbit.wallet.Payment.wallet_id",
index=5,
number=6,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="amount",
full_name="qbit.wallet.Payment.amount",
index=6,
number=7,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="previous_amount",
full_name="qbit.wallet.Payment.previous_amount",
index=7,
number=8,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="refund_amount",
full_name="qbit.wallet.Payment.refund_amount",
index=8,
number=9,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="status",
full_name="qbit.wallet.Payment.status",
index=9,
number=10,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="refund_status",
full_name="qbit.wallet.Payment.refund_status",
index=10,
number=11,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=127,
serialized_end=357,
)
_TRANSACTION = _descriptor.Descriptor(
name="Transaction",
full_name="qbit.wallet.Transaction",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="payment_id",
full_name="qbit.wallet.Transaction.payment_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="partner_id",
full_name="qbit.wallet.Transaction.partner_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="wallet_id",
full_name="qbit.wallet.Transaction.wallet_id",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="psp_name",
full_name="qbit.wallet.Transaction.psp_name",
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="type",
full_name="qbit.wallet.Transaction.type",
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="amount",
full_name="qbit.wallet.Transaction.amount",
index=5,
number=6,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="trace_number",
full_name="qbit.wallet.Transaction.trace_number",
index=6,
number=7,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="status",
full_name="qbit.wallet.Transaction.status",
index=7,
number=8,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=360,
serialized_end=518,
)
_USER = _descriptor.Descriptor(
name="User",
full_name="qbit.wallet.User",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="user_id",
full_name="qbit.wallet.User.user_id",
index=0,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="wallet_id",
full_name="qbit.wallet.User.wallet_id",
index=1,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="avatar",
full_name="qbit.wallet.User.avatar",
index=2,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="username",
full_name="qbit.wallet.User.username",
index=3,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="first_name",
full_name="qbit.wallet.User.first_name",
index=4,
number=6,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_name",
full_name="qbit.wallet.User.last_name",
index=5,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=520,
serialized_end=635,
)
_PERMISSION = _descriptor.Descriptor(
name="Permission",
full_name="qbit.wallet.Permission",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="is_grant",
full_name="qbit.wallet.Permission.is_grant",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="permission_id",
full_name="qbit.wallet.Permission.permission_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="permission_type",
full_name="qbit.wallet.Permission.permission_type",
index=2,
number=3,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_PERMISSION_PERMISSIONTYPE],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=638,
serialized_end=813,
)
_REQUESTPERMISSION = _descriptor.Descriptor(
name="RequestPermission",
full_name="qbit.wallet.RequestPermission",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="is_grant",
full_name="qbit.wallet.RequestPermission.is_grant",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="permission_type",
full_name="qbit.wallet.RequestPermission.permission_type",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=815,
serialized_end=917,
)
_PARTNER = _descriptor.Descriptor(
name="Partner",
full_name="qbit.wallet.Partner",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="partner_id",
full_name="qbit.wallet.Partner.partner_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="expire_client_secret",
full_name="qbit.wallet.Partner.expire_client_secret",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_debt_amount",
full_name="qbit.wallet.Partner.max_debt_amount",
index=2,
number=6,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_debt_count",
full_name="qbit.wallet.Partner.max_debt_count",
index=3,
number=7,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=919,
serialized_end=1027,
)
_PARTNERDEBT = _descriptor.Descriptor(
name="PartnerDebt",
full_name="qbit.wallet.PartnerDebt",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="partner_id",
full_name="qbit.wallet.PartnerDebt.partner_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="wallet_id",
full_name="qbit.wallet.PartnerDebt.wallet_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="amount",
full_name="qbit.wallet.PartnerDebt.amount",
index=2,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1029,
serialized_end=1097,
)
_SERVICE = _descriptor.Descriptor(
name="Service",
full_name="qbit.wallet.Service",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="service_id",
full_name="qbit.wallet.Service.service_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="partner_id",
full_name="qbit.wallet.Service.partner_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="title",
full_name="qbit.wallet.Service.title",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_amount",
full_name="qbit.wallet.Service.max_amount",
index=3,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_debt_amount",
full_name="qbit.wallet.Service.max_debt_amount",
index=4,
number=5,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="modify_count",
full_name="qbit.wallet.Service.modify_count",
index=5,
number=6,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="modify_crm_count",
full_name="qbit.wallet.Service.modify_crm_count",
index=6,
number=7,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="cancel_count",
full_name="qbit.wallet.Service.cancel_count",
index=7,
number=8,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1100,
serialized_end=1279,
)
_PERMISSION.fields_by_name["permission_type"].enum_type = _PERMISSION_PERMISSIONTYPE
_PERMISSION_PERMISSIONTYPE.containing_type = _PERMISSION
_REQUESTPERMISSION.fields_by_name[
"permission_type"
].enum_type = _PERMISSION_PERMISSIONTYPE
DESCRIPTOR.message_types_by_name["Wallet"] = _WALLET
DESCRIPTOR.message_types_by_name["Payment"] = _PAYMENT
DESCRIPTOR.message_types_by_name["Transaction"] = _TRANSACTION
DESCRIPTOR.message_types_by_name["User"] = _USER
DESCRIPTOR.message_types_by_name["Permission"] = _PERMISSION
DESCRIPTOR.message_types_by_name["RequestPermission"] = _REQUESTPERMISSION
DESCRIPTOR.message_types_by_name["Partner"] = _PARTNER
DESCRIPTOR.message_types_by_name["PartnerDebt"] = _PARTNERDEBT
DESCRIPTOR.message_types_by_name["Service"] = _SERVICE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Wallet = _reflection.GeneratedProtocolMessageType(
"Wallet",
(_message.Message,),
{
"DESCRIPTOR": _WALLET,
"__module__": "wallet.resources_pb2"
# @@protoc_insertion_point(class_scope:qbit.wallet.Wallet)
},
)
_sym_db.RegisterMessage(Wallet)
Payment = _reflection.GeneratedProtocolMessageType(
"Payment",
(_message.Message,),
{
"DESCRIPTOR": _PAYMENT,
"__module__": "wallet.resources_pb2"
# @@protoc_insertion_point(class_scope:qbit.wallet.Payment)
},
)
_sym_db.RegisterMessage(Payment)
Transaction = _reflection.GeneratedProtocolMessageType(
"Transaction",
(_message.Message,),
{
"DESCRIPTOR": _TRANSACTION,
"__module__": "wallet.resources_pb2"
# @@protoc_insertion_point(class_scope:qbit.wallet.Transaction)
},
)
_sym_db.RegisterMessage(Transaction)
User = _reflection.GeneratedProtocolMessageType(
"User",
(_message.Message,),
{
"DESCRIPTOR": _USER,
"__module__": "wallet.resources_pb2"
# @@protoc_insertion_point(class_scope:qbit.wallet.User)
},
)
_sym_db.RegisterMessage(User)
Permission = _reflection.GeneratedProtocolMessageType(
"Permission",
(_message.Message,),
{
"DESCRIPTOR": _PERMISSION,
"__module__": "wallet.resources_pb2"
# @@protoc_insertion_point(class_scope:qbit.wallet.Permission)
},
)
_sym_db.RegisterMessage(Permission)
RequestPermission = _reflection.GeneratedProtocolMessageType(
"RequestPermission",
(_message.Message,),
{
"DESCRIPTOR": _REQUESTPERMISSION,
"__module__": "wallet.resources_pb2"
# @@protoc_insertion_point(class_scope:qbit.wallet.RequestPermission)
},
)
_sym_db.RegisterMessage(RequestPermission)
Partner = _reflection.GeneratedProtocolMessageType(
"Partner",
(_message.Message,),
{
"DESCRIPTOR": _PARTNER,
"__module__": "wallet.resources_pb2"
# @@protoc_insertion_point(class_scope:qbit.wallet.Partner)
},
)
_sym_db.RegisterMessage(Partner)
PartnerDebt = _reflection.GeneratedProtocolMessageType(
"PartnerDebt",
(_message.Message,),
{
"DESCRIPTOR": _PARTNERDEBT,
"__module__": "wallet.resources_pb2"
# @@protoc_insertion_point(class_scope:qbit.wallet.PartnerDebt)
},
)
_sym_db.RegisterMessage(PartnerDebt)
Service = _reflection.GeneratedProtocolMessageType(
"Service",
(_message.Message,),
{
"DESCRIPTOR": _SERVICE,
"__module__": "wallet.resources_pb2"
# @@protoc_insertion_point(class_scope:qbit.wallet.Service)
},
)
_sym_db.RegisterMessage(Service)
# @@protoc_insertion_point(module_scope)
| 30.615323
| 2,429
| 0.559308
| 3,877
| 37,963
| 5.202734
| 0.063193
| 0.063457
| 0.065589
| 0.05265
| 0.798572
| 0.737197
| 0.709038
| 0.688315
| 0.675921
| 0.666204
| 0
| 0.040881
| 0.336327
| 37,963
| 1,239
| 2,430
| 30.640032
| 0.759714
| 0.018992
| 0
| 0.802864
| 1
| 0.000842
| 0.149461
| 0.108846
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004212
| 0
| 0.004212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
402652e6a10f39cf397f8ef1122a87e4384d1819
| 48
|
py
|
Python
|
django_xfields/forms/__init__.py
|
leviplj/django_xfields
|
6108173f1d24b71b69bd1634f411b0ad5383f948
|
[
"MIT"
] | null | null | null |
django_xfields/forms/__init__.py
|
leviplj/django_xfields
|
6108173f1d24b71b69bd1634f411b0ad5383f948
|
[
"MIT"
] | null | null | null |
django_xfields/forms/__init__.py
|
leviplj/django_xfields
|
6108173f1d24b71b69bd1634f411b0ad5383f948
|
[
"MIT"
] | null | null | null |
from django_xfields.forms.fields import * # NOQA
| 48
| 48
| 0.8125
| 7
| 48
| 5.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104167
| 48
| 1
| 48
| 48
| 0.883721
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4061e6d70b6045eb8869b39ee7640a8adb465979
| 10,880
|
py
|
Python
|
analyzeGrain.py
|
charpagne/slipAnalysis
|
2b619fa4f029250e669ef0b6107cb66c47cc9dfa
|
[
"Apache-2.0"
] | 2
|
2020-03-05T02:47:50.000Z
|
2022-02-19T00:10:51.000Z
|
analyzeGrain.py
|
charpagne/slipAnalysis
|
2b619fa4f029250e669ef0b6107cb66c47cc9dfa
|
[
"Apache-2.0"
] | null | null | null |
analyzeGrain.py
|
charpagne/slipAnalysis
|
2b619fa4f029250e669ef0b6107cb66c47cc9dfa
|
[
"Apache-2.0"
] | 2
|
2021-02-27T02:46:20.000Z
|
2021-06-10T06:06:46.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 18:17:36 2020
@author: Arsenic
"""
import numpy as np
from matplotlib import pyplot as plt, colors, colorbar, cm
import crystallography
import seaborn as sns
sns.set(style="whitegrid")
lattice = 'FCC'
# load grain euler angles from TSL OIM
grain = 48
phi1= np.genfromtxt("grainfile2.txt",usecols=[1]).astype(float)
phi= np.genfromtxt("grainfile2.txt",usecols=[2]).astype(float)
phi2= np.genfromtxt("grainfile2.txt",usecols=[3]).astype(float)
phi1 = np.insert(phi1, 0, 0)
phi = np.insert(phi, 0, 0)
phi2 = np.insert(phi2, 0, 0)
eulers = np.column_stack((phi1,phi,phi2)) # Euler angles are in degrees
numGrains = eulers.shape[0]
# plot schmid factors
eulers_grain = eulers[grain]
if lattice == 'BCC':
for plane_type in ('110','112','123','134'):
schmidFactors = np.array(crystallography.calc_sfs(eulers_grain,plane_type,lattice))
if plane_type == '110':
legend_x =np.array(['(110)\n[-111]','(110)\n[1-11]',
'(101)\n[-111]','(101)\n[11-1]',
'(011)\n[1-11]','(011)\n[11-1]',
'(10-1)\n[111]','(10-1)\n[1-11]',
'(1-10)\n[111]','(1-10)\n[11-1]',
'(01-1)\n[111]','(01-1)\n[-111]'])
fig1 = plt.figure(figsize=(9,4))
sfbars = plt.bar(x=np.arange(1,schmidFactors.shape[0]+1), height=schmidFactors,
width=0.8, align='center', data=None,tick_label = legend_x,color='gold')
plt.grid(False)
fig1.savefig('grain_%s_sf_%s.png' %(grain,int(plane_type)), format='png', dpi=1000 )
if plane_type == '112':
legend_x =np.array(['(112)\n[11-1]','(121)\n[1-11]','(211)\n[-111]',
'(11-2)\n[111]','(1-21)\n[111]','(-211)\n[111]',
'(-112)\n[1-11]','(2-11)\n[11-1]','(12-1)\n[-111]',
'(1-12)\n[-111]','(21-1)\n[1-11]','(-121)\n[11-1]'])
fig2 = plt.figure(figsize=(9,4))
sfbars = plt.bar(x=np.arange(1,schmidFactors.shape[0]+1), height=schmidFactors,
width=0.8, align='center', data=None,tick_label = legend_x,color='navy')
plt.grid(False)
fig2.savefig('grain_%s_sf_%s.png' %(grain,int(plane_type)), format='png', dpi=1000 )
if plane_type == '123':
legend_x =np.array(['(123)\n[11-1]','(312)\n[-111]','(231)\n[1-11]',
'(-123)\n[1-11]','(3-12)\n[11-1]','(23-1)\n[-111]',
'(1-23)\n[-111]','(31-2)\n[1-11]','(2-31)\n[11-1]',
'(12-3)\n[111]','(-312)\n[111]','(2-31)\n[111]',
'(321)\n[-111]','(213)\n[11-1]','(132)\n[1-11]',
'(-321)\n[111]','(21-3)\n[111]','(1-32)\n[111]',
'(3-21)\n[11-1]','(-213)\n[1-11]','(13-2)\n[-111]',
'(32-1)\n[1-11]','(2-13)\n[-111]','(-132)\n[11-1]'])
fig3 = plt.figure(figsize=(18,4))
sfbars = plt.bar(x=np.arange(1,schmidFactors.shape[0]+1), height=schmidFactors,
width=0.8, align='center', data=None,tick_label = legend_x,color='mediumvioletred')
plt.grid(False)
fig3.savefig('grain_%s_sf_%s.png' %(grain,int(plane_type)), format='png', dpi=1000 )
if plane_type == '134':
legend_x =np.array(['(134)\n[11-1]','(413)\n[-111]','(341)\n[1-11]',
'(-134)\n[1-11]','(4-13)\n[11-1]','(34-1)\n[-111]',
'(1-34)\n[-111]','(41-3)\n[1-11]','(3-41)\n[11-1]',
'(13-4)\n[111]','(-413)\n[111]','(3-41)\n[111]',
'(431)\n[111]','(314)\n[111]','(143)\n[111]',
'(-431)\n[111]','(31-4)\n[111]','(1-43)\n[111]',
'(4-31)\n[111]','(-314)\n[111]','(14-3)\n[111]',
'(43-1)\n[111]','(3-14)\n[111]','(-143)\n[111]'])
fig4 = plt.figure(figsize=(18,4))
sfbars = plt.bar(x=np.arange(1,schmidFactors.shape[0]+1), height=schmidFactors,
width=0.8, align='center', data=None,tick_label = legend_x,color='darkgreen')
plt.grid(False)
fig4.savefig('grain_%s_sf_%s.png' %(grain,int(plane_type)), format='png', dpi=1000 )
# plot plane traces
file = open(('grain_%s_plane-traces.txt' %grain),'w')
file.write("grain:%s" %grain + "\n")
for plane_type in ('110','112','123','134'):
planes = crystallography.gen_planes(plane_type)
traces = []
fig5 = plt.figure(figsize=(5,5))
color=iter(cm.nipy_spectral(np.linspace(0,1,planes.shape[0])))
ax = plt.subplot(111)
ax.set_ylim([-1.2, 1.2]) # set the bounds to be 10, 10
ax.set_xlim([-1.2, 1.2])
plt.grid(False)
for plane in range(planes.shape[0]):
c=next(color)
file.write("plane:%s%s%s" %(planes[plane,0],planes[plane,1],planes[plane,2]) + "\t")
traces.append(crystallography.plane_trace_components(planes[plane],eulers_grain))
file.write(str(crystallography.plane_trace_components(planes[plane],eulers_grain)) + "\n")
x_values = (traces[plane][0], -traces[plane][0])
y_values = (traces[plane][1],-traces[plane][1])
plt.plot(x_values, y_values, '-',c=c, label=(str(planes[plane,0]) + str(planes[plane,1]) + str(planes[plane,2])))
plt.axis('off')
plt.gca().legend(loc='center left', bbox_to_anchor=(1, 0.5))
fig5.savefig('grain%s_traces_%s.png' %(grain,int(plane_type)),format='png', dpi=200, bbox_inches='tight')
file.close()
#
file = open(('grain_%s_gamma-angles.txt' %grain),'w')
file.write("grain:%s" %grain + "\n")
for plane_type in ('110','112','123','134'):
planes = crystallography.gen_planes(plane_type)
directions = crystallography.gen_directions()
for plane in range(planes.shape[0]):
file.write("plane:%s%s%s" %(planes[plane,0],planes[plane,1],planes[plane,2]) + "\n")
for direction in range(directions.shape[0]):
if np.dot(planes[plane],directions[direction]) == 0:
file.write("direction: %s%s%s" %(directions[direction,0],directions[direction,1],directions[direction,2]) + "\t")
gamma_angle = crystallography.get_gamma_angle(planes[plane],directions[direction],eulers_grain)
file.write(str(gamma_angle) + "\n")
file.close()
if lattice == 'FCC':
schmidFactors = np.array(crystallography.calc_sfs(eulers_grain,'111',lattice))
legend_x = np.array(['(-111)\n[0-11]','(-111)\n[101]','(-111)\n[110]',
'(111)\n[0-11]','(111)\n[-101]','(111)\n[-110]',
'(-1-11)\n[011]','(-1-11)\n[101]','(-1-11)\n[-110]',
'(1-11)\n[011]','(1-11)\n[-101]','(1-11)\n[110]'])
fig1 = plt.figure(figsize=(9,4))
sfbars = plt.bar(x=np.arange(1,schmidFactors.shape[0]+1), height=schmidFactors,
width=0.8, align='center', data=None,tick_label = legend_x,color='navy')
plt.grid(False)
fig1.savefig('grain_%s_sf.png' %(grain), format='png', dpi=1000)
# calculate traces through their angle
file = open(('grain_%s_plane-traces.txt' %grain),'w')
file.write("grain:%s" %grain + "\n")
planes = crystallography.gen_planes('111')
traces = []
for plane in range(planes.shape[0]):
file.write("plane:%s%s%s" %(planes[plane,0],planes[plane,1],planes[plane,2]) + "\t")
traces.append(crystallography.plane_trace_components(planes[plane],eulers_grain))
file.write(str(crystallography.plane_trace_components(planes[plane],eulers_grain)) + "\n")
file.close()
#
file = open(('grain_%s_gamma-angles.txt' %grain),'w')
file.write("grain:%s" %grain + "\n")
planes = crystallography.gen_planes('111')
directions = crystallography.gen_directions(lattice)
for plane in range(planes.shape[0]):
file.write("plane:%s%s%s" %(planes[plane,0],planes[plane,1],planes[plane,2]) + "\n")
for direction in range(directions.shape[0]):
if np.dot(planes[plane],directions[direction]) == 0:
file.write("direction: %s%s%s" %(directions[direction,0],directions[direction,1],directions[direction,2]) + "\t")
gamma_angle = crystallography.get_gamma_angle(planes[plane],directions[direction],eulers_grain)
file.write(str(gamma_angle) + "\n")
file.close()
# plot plane traces
file = open(('grain_%s_plane-traces.txt' %grain),'w')
file.write("grain:%s" %grain + "\n")
planes = crystallography.gen_planes('111')
traces = []
fig5 = plt.figure(figsize=(5,5))
color=iter(cm.nipy_spectral(np.linspace(0,1,planes.shape[0])))
ax = plt.subplot(111)
ax.set_ylim([-1.2, 1.2]) # set the bounds to be 10, 10
ax.set_xlim([-1.2, 1.2])
plt.grid(False)
for plane in range(planes.shape[0]):
c=next(color)
file.write("plane:%s%s%s" %(planes[plane,0],planes[plane,1],planes[plane,2]) + "\t")
traces.append(crystallography.plane_trace_components(planes[plane],eulers_grain))
file.write(str(crystallography.plane_trace_components(planes[plane],eulers_grain)) + "\n")
x_values = (traces[plane][0], -traces[plane][0])
y_values = (traces[plane][1],-traces[plane][1])
plt.plot(x_values, y_values, '-',c=c, label=(str(planes[plane,0]) + str(planes[plane,1]) + str(planes[plane,2])))
plt.axis('off')
plt.gca().legend(loc='center left', bbox_to_anchor=(1, 0.5))
fig5.savefig('grain%s_traces.png' %(grain),format='png', dpi=200, bbox_inches='tight')
file.close()
#
file = open(('grain_%s_gamma-angles.txt' %grain),'w')
file.write("grain:%s" %grain + "\n")
planes = crystallography.gen_planes('111')
directions = crystallography.gen_directions()
for plane in range(planes.shape[0]):
file.write("plane:%s%s%s" %(planes[plane,0],planes[plane,1],planes[plane,2]) + "\n")
for direction in range(directions.shape[0]):
if np.dot(planes[plane],directions[direction]) == 0:
file.write("direction: %s%s%s" %(directions[direction,0],directions[direction,1],directions[direction,2]) + "\t")
gamma_angle = crystallography.get_gamma_angle(planes[plane],directions[direction],eulers_grain)
file.write(str(gamma_angle) + "\n")
file.close()
| 56.082474
| 135
| 0.541728
| 1,523
| 10,880
| 3.786605
| 0.126724
| 0.029131
| 0.010404
| 0.014566
| 0.801977
| 0.771112
| 0.771112
| 0.771112
| 0.742154
| 0.738512
| 0
| 0.094977
| 0.24614
| 10,880
| 194
| 136
| 56.082474
| 0.608144
| 0.026654
| 0
| 0.602339
| 0
| 0
| 0.179882
| 0.016476
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.023392
| 0
| 0.023392
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
40706b38f05e00da9725053ac7eca2cdeaeb2354
| 132
|
py
|
Python
|
sheets/python/log-format-right-just.py
|
zgmarx/cheatsheet
|
b29e43a55c5c0fae8763a855025d77a8f46e1208
|
[
"MIT"
] | 1
|
2020-03-31T11:26:05.000Z
|
2020-03-31T11:26:05.000Z
|
sheets/python/log-format-right-just.py
|
zgmarx/cheatsheet
|
b29e43a55c5c0fae8763a855025d77a8f46e1208
|
[
"MIT"
] | null | null | null |
sheets/python/log-format-right-just.py
|
zgmarx/cheatsheet
|
b29e43a55c5c0fae8763a855025d77a8f46e1208
|
[
"MIT"
] | null | null | null |
# %(levelname)8s
logging.Formatter("[%(asctime)s] [%(levelname)8s] --- %(message)s (%(filename)s:%(lineno)s)", "%Y-%m-%d %H:%M:%S")
| 44
| 114
| 0.575758
| 20
| 132
| 3.8
| 0.65
| 0.289474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01626
| 0.068182
| 132
| 2
| 115
| 66
| 0.601626
| 0.106061
| 0
| 0
| 0
| 1
| 0.767241
| 0.215517
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
407526762fd2160cacb1b14fa146458cf50318bc
| 26,686
|
py
|
Python
|
experiments/tpcds/groupby/groupby501.py
|
qingzma/DBEstClient
|
d2cdf51bc3c69e50bcf4d1d516673b7d20843c16
|
[
"BSD-2-Clause"
] | 11
|
2019-12-24T02:39:35.000Z
|
2022-03-21T22:39:41.000Z
|
experiments/tpcds/groupby/groupby501.py
|
Forever-MrX/DBEstClient
|
d2cdf51bc3c69e50bcf4d1d516673b7d20843c16
|
[
"BSD-2-Clause"
] | 4
|
2019-12-09T09:48:17.000Z
|
2021-07-07T02:58:26.000Z
|
experiments/tpcds/groupby/groupby501.py
|
qingzma/DBEstClient
|
d2cdf51bc3c69e50bcf4d1d516673b7d20843c16
|
[
"BSD-2-Clause"
] | 8
|
2019-11-08T02:10:37.000Z
|
2022-03-21T22:42:46.000Z
|
# Created by Qingzhi Ma at 10/02/2020
# All right reserved
# Department of Computer Science
# the University of Warwick
# Q.Ma.2@warwick.ac.uk
#
#
# hive -e "select ss_store_sk, count(*) from store_sales_1t where ss_sold_date_sk between 2451119 and 2451483 group by ss_store_sk;" > ~/group501counts.csv
#
#
from dbestclient.executor.executor import SqlExecutor
def run():
sqlExecutor = SqlExecutor()
# build_501_groups(sqlExecutor)
# build_501_groups_grid_search(sqlExecutor)
# run_501_gogs(sqlExecutor)
# build_501_groups2(sqlExecutor)
build_501_groups2_stratified(sqlExecutor)
def build_models(sqlExecutor):
# sqlExecutor.execute(
# "create table ss1t_gg4_no_data(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_5m.csv' GROUP BY ss_store_sk method uniform size 5000000", n_mdn_layer_node=8, b_one_hot_encoding=True, b_grid_search=True, device='cpu', b_use_gg=True, n_per_gg=127)
# sqlExecutor.execute(
# "create table ss1t_gg8(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_5m.csv' GROUP BY ss_store_sk method uniform size 5000000", n_mdn_layer_node=8, b_one_hot_encoding=True, b_grid_search=True, device='cpu', b_use_gg=True, n_per_gg=64)
# sqlExecutor.execute(
# "create table ss1t_gg16(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_5m.csv' GROUP BY ss_store_sk method uniform size 5000000", n_mdn_layer_node=8, b_one_hot_encoding=True, b_grid_search=True, device='cpu', b_use_gg=True, n_per_gg=32)
# sqlExecutor.execute(
# "create table ss1t_gg1_gpu(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_5m.csv' GROUP BY ss_store_sk method uniform size 5000000", n_mdn_layer_node=8, b_one_hot_encoding=True, b_grid_search=True, device='gpu', b_use_gg=True, n_per_gg=512)
# sqlExecutor.execute(
# "create table ss1t_gg1_cpu(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_5m.csv' GROUP BY ss_store_sk method uniform size 5000000", n_mdn_layer_node=8, b_one_hot_encoding=True, b_grid_search=True, device='cpu', b_use_gg=True, n_per_gg=512)
sqlExecutor.execute(
"create table ss1t_gg1_cpu(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_5m.csv' GROUP BY ss_store_sk method uniform size 5000000",
n_mdn_layer_node=8,
b_one_hot_encoding=True,
b_grid_search=True,
device="cpu",
b_use_gg=True,
n_per_gg=512,
)
sqlExecutor.execute(
"create table ss1t_gg2_cpu(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_5m.csv' GROUP BY ss_store_sk method uniform size 5000000",
n_mdn_layer_node=8,
b_one_hot_encoding=True,
b_grid_search=True,
device="cpu",
b_use_gg=True,
n_per_gg=255,
)
sqlExecutor.execute(
"create table ss1t_gg4_cpu(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_5m.csv' GROUP BY ss_store_sk method uniform size 5000000",
n_mdn_layer_node=8,
b_one_hot_encoding=True,
b_grid_search=True,
device="cpu",
b_use_gg=True,
n_per_gg=127,
)
sqlExecutor.execute(
"create table ss1t_gg4_gpu(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_5m.csv' GROUP BY ss_store_sk method uniform size 5000000",
n_mdn_layer_node=8,
b_one_hot_encoding=True,
b_grid_search=True,
device="gpu",
b_use_gg=True,
n_per_gg=127,
)
# sqlExecutor.execute(
# "create table ss1t_gg2(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_5m.csv' GROUP BY ss_store_sk method uniform size 5000000", n_mdn_layer_node=8, b_one_hot_encoding=True, b_grid_search=True, device='cpu', b_use_gg=True, n_per_gg=254)
# sqlExecutor.execute(
# "create table ss1t_no_gg(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_5m.csv' GROUP BY ss_store_sk method uniform size 5000000", n_mdn_layer_node=8, b_one_hot_encoding=True, b_grid_search=True, device='cpu', b_use_gg=False, n_per_gg=254)
sqlExecutor.execute(
"create table ss1t_gg32_cpu(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_5m.csv' GROUP BY ss_store_sk method uniform size 5000000",
n_mdn_layer_node=8,
b_one_hot_encoding=True,
b_grid_search=True,
device="cpu",
b_use_gg=True,
n_per_gg=16,
)
sqlExecutor.execute(
"create table ss1t_gg64_cpu(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_5m.csv' GROUP BY ss_store_sk method uniform size 5000000",
n_mdn_layer_node=8,
b_one_hot_encoding=True,
b_grid_search=True,
device="cpu",
b_use_gg=True,
n_per_gg=8,
)
sqlExecutor.execute(
"create table ss1t_gg32_gpu(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_5m.csv' GROUP BY ss_store_sk method uniform size 5000000",
n_mdn_layer_node=8,
b_one_hot_encoding=True,
b_grid_search=True,
device="gpu",
b_use_gg=True,
n_per_gg=16,
)
sqlExecutor.execute(
"create table ss1t_gg64_gpu(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_5m.csv' GROUP BY ss_store_sk method uniform size 5000000",
n_mdn_layer_node=8,
b_one_hot_encoding=True,
b_grid_search=True,
device="gpu",
b_use_gg=True,
n_per_gg=8,
)
def query(sqlExecutor):
sqlExecutor.execute(
"select sum(ss_sales_price) from ss1t_gg32_cpu where ss_sold_date_sk between 2451119 and 2451483 group by ss_store_sk",
n_jobs=1,
)
sqlExecutor.execute(
"select sum(ss_sales_price) from ss1t_gg32_cpu where ss_sold_date_sk between 2451119 and 2451483 group by ss_store_sk",
n_jobs=2,
)
sqlExecutor.execute(
"select sum(ss_sales_price) from ss1t_gg32_cpu where ss_sold_date_sk between 2451119 and 2451483 group by ss_store_sk",
n_jobs=4,
)
sqlExecutor.execute(
"select sum(ss_sales_price) from ss1t_gg32_cpu where ss_sold_date_sk between 2451119 and 2451483 group by ss_store_sk",
n_jobs=8,
)
sqlExecutor.execute(
"select sum(ss_sales_price) from ss1t_gg32_cpu where ss_sold_date_sk between 2451119 and 2451483 group by ss_store_sk",
n_jobs=16,
)
sqlExecutor.execute(
"select sum(ss_sales_price) from ss1t_gg32_gpu where ss_sold_date_sk between 2451119 and 2451483 group by ss_store_sk",
n_jobs=1,
)
sqlExecutor.execute(
"select sum(ss_sales_price) from ss1t_gg32_gpu where ss_sold_date_sk between 2451119 and 2451483 group by ss_store_sk",
n_jobs=2,
)
sqlExecutor.execute(
"select sum(ss_sales_price) from ss1t_gg32_gpu where ss_sold_date_sk between 2451119 and 2451483 group by ss_store_sk",
n_jobs=4,
)
sqlExecutor.execute(
"select sum(ss_sales_price) from ss1t_gg32_gpu where ss_sold_date_sk between 2451119 and 2451483 group by ss_store_sk",
n_jobs=8,
)
sqlExecutor.execute(
"select sum(ss_sales_price) from ss1t_gg32_gpu where ss_sold_date_sk between 2451119 and 2451483 group by ss_store_sk",
n_jobs=16,
)
# sqlExecutor.execute('select sum(ss_sales_price) from ss1t_gg32_cpu where ss_sold_date_sk between 2451119 and 2451483 group by ss_store_sk',
# result2file="/home/u1796377/Projects/DBEstClient/experiments/results/mdn501/ss1t_gg32_cpu.txt", n_jobs=1)
def query_workload(sqlExecutor, model_name, n_jobs):
sqlExecutor.execute("set n_jobs=" + str(n_jobs) + '"')
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/sum1.txt'"
)
sqlExecutor.execute(
"select sum(ss_sales_price) from "
+ model_name
+ " where 2451119 <=ss_sold_date_sk<= 2451483 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/sum2.txt'"
)
sqlExecutor.execute(
"select sum(ss_sales_price) from "
+ model_name
+ " where 2451300 <=ss_sold_date_sk<= 2451665 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/sum3.txt'"
)
sqlExecutor.execute(
"select sum(ss_sales_price) from "
+ model_name
+ " where 2451392 <=ss_sold_date_sk<= 2451757 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/sum4.txt'"
)
sqlExecutor.execute(
"select sum(ss_sales_price) from "
+ model_name
+ " where 2451484 <=ss_sold_date_sk<= 2451849 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/sum5.txt'"
)
sqlExecutor.execute(
"select sum(ss_sales_price) from "
+ model_name
+ " where 2451545 <=ss_sold_date_sk<= 2451910 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/sum6.txt'"
)
sqlExecutor.execute(
"select sum(ss_sales_price) from "
+ model_name
+ " where 2451636 <=ss_sold_date_sk<= 2452000 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/sum7.txt'"
)
sqlExecutor.execute(
"select sum(ss_sales_price) from "
+ model_name
+ " where 2451727 <=ss_sold_date_sk<= 2452091 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/sum8.txt'"
)
sqlExecutor.execute(
"select sum(ss_sales_price) from "
+ model_name
+ " where 2451850 <=ss_sold_date_sk<= 2452214 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/sum9.txt'"
)
sqlExecutor.execute(
"select sum(ss_sales_price) from "
+ model_name
+ " where 2451911 <=ss_sold_date_sk<= 2452275 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/sum10.txt'"
)
sqlExecutor.execute(
"select sum(ss_sales_price) from "
+ model_name
+ " where 2452031 <=ss_sold_date_sk<= 2452395 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/count1.txt'"
)
sqlExecutor.execute(
"select count(ss_sales_price) from "
+ model_name
+ " where 2451119 <=ss_sold_date_sk<= 2451483 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/count2.txt'"
)
sqlExecutor.execute(
"select count(ss_sales_price) from "
+ model_name
+ " where 2451300 <=ss_sold_date_sk<= 2451665 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/count3.txt'"
)
sqlExecutor.execute(
"select count(ss_sales_price) from "
+ model_name
+ " where 2451392 <=ss_sold_date_sk<= 2451757 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/count4.txt'"
)
sqlExecutor.execute(
"select count(ss_sales_price) from "
+ model_name
+ " where 2451484 <=ss_sold_date_sk<= 2451849 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/count5.txt'"
)
sqlExecutor.execute(
"select count(ss_sales_price) from "
+ model_name
+ " where 2451545 <=ss_sold_date_sk<= 2451910 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/count6.txt'"
)
sqlExecutor.execute(
"select count(ss_sales_price) from "
+ model_name
+ " where 2451636 <=ss_sold_date_sk<= 2452000 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/count7.txt'"
)
sqlExecutor.execute(
"select count(ss_sales_price) from "
+ model_name
+ " where 2451727 <=ss_sold_date_sk<= 2452091 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/count8.txt'"
)
sqlExecutor.execute(
"select count(ss_sales_price) from "
+ model_name
+ " where 2451850 <=ss_sold_date_sk<= 2452214 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/count9.txt'"
)
sqlExecutor.execute(
"select count(ss_sales_price) from "
+ model_name
+ " where 2451911 <=ss_sold_date_sk<= 2452275 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/count10.txt'"
)
sqlExecutor.execute(
"select count(ss_sales_price) from "
+ model_name
+ " where 2452031 <=ss_sold_date_sk<= 2452395 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/avg1.txt'"
)
sqlExecutor.execute(
"select avg(ss_sales_price) from "
+ model_name
+ " where 2451119 <=ss_sold_date_sk<= 2451483 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/avg2.txt'"
)
sqlExecutor.execute(
"select avg(ss_sales_price) from "
+ model_name
+ " where 2451300 <=ss_sold_date_sk<= 2451665 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/avg3.txt'"
)
sqlExecutor.execute(
"select avg(ss_sales_price) from "
+ model_name
+ " where 2451392 <=ss_sold_date_sk<= 2451757 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/avg4.txt'"
)
sqlExecutor.execute(
"select avg(ss_sales_price) from "
+ model_name
+ " where 2451484 <=ss_sold_date_sk<= 2451849 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/avg5.txt'"
)
sqlExecutor.execute(
"select avg(ss_sales_price) from "
+ model_name
+ " where 2451545 <=ss_sold_date_sk<= 2451910 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/avg6.txt'"
)
sqlExecutor.execute(
"select avg(ss_sales_price) from "
+ model_name
+ " where 2451636 <=ss_sold_date_sk<= 2452000 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/avg7.txt'"
)
sqlExecutor.execute(
"select avg(ss_sales_price) from "
+ model_name
+ " where 2451727 <=ss_sold_date_sk<= 2452091 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/avg8.txt'"
)
sqlExecutor.execute(
"select avg(ss_sales_price) from "
+ model_name
+ " where 2451850 <=ss_sold_date_sk<= 2452214 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/avg9.txt'"
)
sqlExecutor.execute(
"select avg(ss_sales_price) from "
+ model_name
+ " where 2451911 <=ss_sold_date_sk<= 2452275 group by ss_store_sk",
)
sqlExecutor.execute(
"set result2file='/home/u1796377/Documents/workspace/DBEstClient/experiments/results/mdn501/avg10.txt'"
)
sqlExecutor.execute(
"select avg(ss_sales_price) from "
+ model_name
+ " where 2452031 <=ss_sold_date_sk<= 2452395 group by ss_store_sk",
)
def build_501_groups(sqlExecutor):
sqlExecutor.execute("set v='True'")
sqlExecutor.execute("set device='gpu'")
sqlExecutor.execute("set encoder='binary'")
sqlExecutor.execute("set b_grid_search='false'")
sqlExecutor.execute("set b_print_to_screen='false'")
sqlExecutor.execute("set csv_split_char='|'")
sqlExecutor.execute("set batch_size=1000")
# sqlExecutor.execute("set table_header=" +
# "'ss_sold_date_sk|ss_sold_time_sk|ss_item_sk|ss_customer_sk|ss_cdemo_sk|ss_hdemo_sk|" +
# "ss_addr_sk|ss_store_sk|ss_promo_sk|ss_ticket_number|ss_quantity|ss_wholesale_cost|" +
# "ss_list_price|ss_sales_price|ss_ext_discount_amt|ss_ext_sales_price|" +
# "ss_ext_wholesale_cost|ss_ext_list_price|ss_ext_tax|ss_coupon_amt|ss_net_paid|" +
# "ss_net_paid_inc_tax|ss_net_profit|none'"
# )
sqlExecutor.execute(
"set table_header=" + "'ss_sold_date_sk|ss_store_sk|ss_sales_price'"
)
sqlExecutor.execute("set n_mdn_layer_node=20")
sqlExecutor.execute("set n_jobs=1")
sqlExecutor.execute("set n_hidden_layer=1")
sqlExecutor.execute("set n_epoch=20")
sqlExecutor.execute("set n_gaussians_reg=3")
sqlExecutor.execute("set n_gaussians_density=20")
# sqlExecutor.execute("set result2file='/home/u1796377/Desktop/hah.txt'")
sqlExecutor.execute(
"create table ss1t_10gpu(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_10m_reduced.csv' GROUP BY ss_store_sk method uniform size 'num_points/num_of_points501.csv' "
) # num_of_points57.csv
sqlExecutor.execute(
"select avg(ss_sales_price) from ss1t_10gpu where 2451119 <=ss_sold_date_sk<= 2451483 group by ss_store_sk"
)
query_workload(sqlExecutor, "ss1t_10gpu", 1)
def build_501_groups2(sqlExecutor):
sqlExecutor.execute("set v='True'")
sqlExecutor.execute("set device='cpu'")
sqlExecutor.execute("set encoder='binary'")
sqlExecutor.execute("set b_grid_search='false'")
sqlExecutor.execute("set b_print_to_screen='false'")
sqlExecutor.execute("set csv_split_char='|'")
sqlExecutor.execute("set batch_size=1000")
sqlExecutor.execute(
"set table_header="
+ "'ss_sold_date_sk|ss_sold_time_sk|ss_item_sk|ss_customer_sk|ss_cdemo_sk|ss_hdemo_sk|"
+ "ss_addr_sk|ss_store_sk|ss_promo_sk|ss_ticket_number|ss_quantity|ss_wholesale_cost|"
+ "ss_list_price|ss_sales_price|ss_ext_discount_amt|ss_ext_sales_price|"
+ "ss_ext_wholesale_cost|ss_ext_list_price|ss_ext_tax|ss_coupon_amt|ss_net_paid|"
+ "ss_net_paid_inc_tax|ss_net_profit|none'"
)
# sqlExecutor.execute("set table_header=" +
# "'ss_sold_date_sk|ss_store_sk|ss_sales_price'")
sqlExecutor.execute("set n_mdn_layer_node=20")
sqlExecutor.execute("set n_jobs=2")
sqlExecutor.execute("set n_hidden_layer=1")
sqlExecutor.execute("set n_epoch=1")
sqlExecutor.execute("set n_gaussians_reg=3")
sqlExecutor.execute("set n_gaussians_density=20")
# sqlExecutor.execute("set result2file='/home/u1796377/Desktop/hah.txt'")
sqlExecutor.execute(
"create table ss1t_groups2(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_5m.csv' GROUP BY ss_store_sk,ss_quantity method uniform size 0.1 "
) # num_of_points57.csv
sqlExecutor.execute("set result2file='/home/u1796377/Desktop/hah.txt'")
sqlExecutor.execute(
"select avg(ss_sales_price) from ss1t_groups2 where 2451119 <=ss_sold_date_sk<= 2451483 group by ss_store_sk,ss_quantity"
)
# query_workload(sqlExecutor, "ss1t_groups2", 1)
def build_501_groups2_stratified(sqlExecutor):
# sqlExecutor.execute("set v='True'")
# sqlExecutor.execute("set device='cpu'")
# sqlExecutor.execute("set encoder='binary'")
# sqlExecutor.execute("set b_grid_search='false'")
sqlExecutor.execute("set b_print_to_screen='False'")
sqlExecutor.execute("set csv_split_char='|'")
# sqlExecutor.execute("set batch_size=1000")
sqlExecutor.execute(
"set table_header="
+ "'ss_sold_date_sk|ss_sold_time_sk|ss_item_sk|ss_customer_sk|ss_cdemo_sk|ss_hdemo_sk|"
+ "ss_addr_sk|ss_store_sk|ss_promo_sk|ss_ticket_number|ss_quantity|ss_wholesale_cost|"
+ "ss_list_price|ss_sales_price|ss_ext_discount_amt|ss_ext_sales_price|"
+ "ss_ext_wholesale_cost|ss_ext_list_price|ss_ext_tax|ss_coupon_amt|ss_net_paid|"
+ "ss_net_paid_inc_tax|ss_net_profit|none'"
)
sqlExecutor.execute("set n_mdn_layer_node=20")
sqlExecutor.execute("set n_jobs=1")
sqlExecutor.execute("set n_hidden_layer=1")
sqlExecutor.execute("set n_epoch=20")
sqlExecutor.execute("set n_gaussians_reg=3")
sqlExecutor.execute("set n_gaussians_density=20")
sqlExecutor.execute("drop table ss1t_groups2_stratified_20m_1")
sqlExecutor.execute(
"create table ss1t_groups2_stratified_20m_1(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/store_sales.dat' GROUP BY ss_store_sk,ss_quantity method stratified size 420" # data/tpcds/10g/ss_10g_100.csv /data/data/data/tpc/tpc-ds/1t/store_sales.dat
) # num_of_points57.csv /data/tpcds/1t/store_sales.dat
sqlExecutor.execute("set n_jobs=1")
# sqlExecutor.execute("set result2file='/home/u1796377/Desktop/hah.txt'")
sqlExecutor.execute(
"select avg(ss_sales_price) from ss1t_groups2_stratified_20m_1 where 2451119 <=ss_sold_date_sk<= 2451483 group by ss_store_sk,ss_quantity"
)
# query_workload(sqlExecutor, "ss1t_groups2", 1)
def build_501_groups_grid_search(sqlExecutor):
sqlExecutor.execute("set v='True'")
sqlExecutor.execute("set device='gpu'")
sqlExecutor.execute("set encoder='binary'")
sqlExecutor.execute("set b_grid_search='true'")
sqlExecutor.execute("set b_print_to_screen='false'")
sqlExecutor.execute("set csv_split_char='|'")
sqlExecutor.execute("set batch_size=1000")
sqlExecutor.execute(
"set table_header="
+ "'ss_sold_date_sk|ss_sold_time_sk|ss_item_sk|ss_customer_sk|ss_cdemo_sk|ss_hdemo_sk|"
+ "ss_addr_sk|ss_store_sk|ss_promo_sk|ss_ticket_number|ss_quantity|ss_wholesale_cost|"
+ "ss_list_price|ss_sales_price|ss_ext_discount_amt|ss_ext_sales_price|"
+ "ss_ext_wholesale_cost|ss_ext_list_price|ss_ext_tax|ss_coupon_amt|ss_net_paid|"
+ "ss_net_paid_inc_tax|ss_net_profit|none'"
)
sqlExecutor.execute("set n_mdn_layer_node=20")
sqlExecutor.execute("set n_jobs=1")
sqlExecutor.execute("set n_hidden_layer=2")
sqlExecutor.execute("set n_epoch=20")
# sqlExecutor.execute("set n_division=50")
# sqlExecutor.execute("set result2file='/home/u1796377/Desktop/hah.txt'")
sqlExecutor.execute(
"create table ss1t_501_grid_search(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_5m.csv' GROUP BY ss_store_sk method uniform size 'num_points/num_of_points501.csv' "
) # num_of_points57.csv
sqlExecutor.execute(
"select avg(ss_sales_price) from ss1t_501_grid_search where 2451119 <=ss_sold_date_sk<= 2451483 group by ss_store_sk"
)
query_workload(sqlExecutor, "ss1t_501_grid_search", 1)
def run_501_gogs(sqlExecutor):
# sqlExecutor.execute("set device='cpu'")
sqlExecutor.execute("set b_print_to_screen='False'")
sqlExecutor.execute("set device='cpu'")
sqlExecutor.execute("set n_jobs=1")
sqlExecutor.execute("set b_grid_search='false'")
sqlExecutor.execute("set csv_split_char='|'")
# sqlExecutor.execute("set table_header=" +
# "'ss_sold_date_sk|ss_sold_time_sk|ss_item_sk|ss_customer_sk|ss_cdemo_sk|ss_hdemo_sk|" +
# "ss_addr_sk|ss_store_sk|ss_promo_sk|ss_ticket_number|ss_quantity|ss_wholesale_cost|" +
# "ss_list_price|ss_sales_price|ss_ext_discount_amt|ss_ext_sales_price|" +
# "ss_ext_wholesale_cost|ss_ext_list_price|ss_ext_tax|ss_coupon_amt|ss_net_paid|" +
# "ss_net_paid_inc_tax|ss_net_profit|none'"
# )
sqlExecutor.execute(
"set table_header=" + "'ss_sold_date_sk|ss_store_sk|ss_sales_price'"
)
sqlExecutor.execute("set n_mdn_layer_node=10")
sqlExecutor.execute("set b_use_gg='true'")
sqlExecutor.execute("set n_per_gg=102")
sqlExecutor.execute("set n_hidden_layer=1")
sqlExecutor.execute("set n_epoch=20")
sqlExecutor.execute("set b_grid_search='false'")
sqlExecutor.execute("set n_gaussians_reg=3")
sqlExecutor.execute("set n_gaussians_density=15")
# sqlExecutor.execute("set result2file='/home/u1796377/Desktop/hah.txt'")
sqlExecutor.execute(
"create table ss1t_gogs10m_128(ss_sales_price real, ss_sold_date_sk real) from '/data/tpcds/1t/ss_10m_reduced.csv' GROUP BY ss_store_sk method uniform size 'num_points/num_of_points501.csv' "
) # num_of_points57.csv
# sqlExecutor.execute(
# "select avg(ss_sales_price) from ss1t_gogs10m_128 where 2451119 <=ss_sold_date_sk<= 2451483 group by ss_store_sk")
query_workload(sqlExecutor, "ss1t_gogs_128", 1)
if __name__ == "__main__":
run()
# run_dbest1()
| 44.925926
| 275
| 0.697145
| 3,700
| 26,686
| 4.686216
| 0.064595
| 0.184786
| 0.134437
| 0.051906
| 0.943019
| 0.933906
| 0.913547
| 0.913547
| 0.908415
| 0.900802
| 0
| 0.073509
| 0.199655
| 26,686
| 593
| 276
| 45.001686
| 0.738318
| 0.186577
| 0
| 0.653689
| 0
| 0.088115
| 0.574155
| 0.257984
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018443
| false
| 0
| 0.002049
| 0
| 0.020492
| 0.010246
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
40758fa0598fc68b8bb172fa598ca9024a5c4961
| 28
|
py
|
Python
|
pyotp/__init__.py
|
Elizafox/pyotp
|
78789b3514befcd9e2e7fb1f7ffe74249432240a
|
[
"WTFPL"
] | null | null | null |
pyotp/__init__.py
|
Elizafox/pyotp
|
78789b3514befcd9e2e7fb1f7ffe74249432240a
|
[
"WTFPL"
] | null | null | null |
pyotp/__init__.py
|
Elizafox/pyotp
|
78789b3514befcd9e2e7fb1f7ffe74249432240a
|
[
"WTFPL"
] | null | null | null |
# TODO FIXME add stuff here
| 14
| 27
| 0.75
| 5
| 28
| 4.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 28
| 1
| 28
| 28
| 0.954545
| 0.892857
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
40a7f207c75e1188c09d3d60cf7773c402bf3caa
| 149
|
py
|
Python
|
test/Bindings/import.py
|
yangkeping/scalehls
|
ad2bf8b7c6801a83042a3fdbe0fc3e39521c13ab
|
[
"Apache-2.0"
] | 60
|
2021-07-30T03:30:35.000Z
|
2022-03-27T20:00:41.000Z
|
test/Bindings/import.py
|
TimJZ/scalehls
|
ad2bf8b7c6801a83042a3fdbe0fc3e39521c13ab
|
[
"Apache-2.0"
] | 13
|
2021-08-02T16:13:04.000Z
|
2022-03-30T23:43:45.000Z
|
test/Bindings/import.py
|
TimJZ/scalehls
|
ad2bf8b7c6801a83042a3fdbe0fc3e39521c13ab
|
[
"Apache-2.0"
] | 14
|
2021-07-30T12:55:01.000Z
|
2022-03-04T14:29:39.000Z
|
# REQUIRES: bindings_python
# RUN: %PYTHON %s
import mlir.ir
from mlir.dialects import builtin
import scalehls
from scalehls.dialects import hlscpp
| 18.625
| 36
| 0.805369
| 21
| 149
| 5.666667
| 0.619048
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134228
| 149
| 7
| 37
| 21.285714
| 0.922481
| 0.275168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
40b4416a9354250df1fdb05349e15884cffa948c
| 32
|
py
|
Python
|
xxmaker/game/g1836jr/__init__.py
|
pijll/xxmaker
|
654d639e2f170b373a1a955b15cee07ed4cfa5ab
|
[
"MIT"
] | null | null | null |
xxmaker/game/g1836jr/__init__.py
|
pijll/xxmaker
|
654d639e2f170b373a1a955b15cee07ed4cfa5ab
|
[
"MIT"
] | null | null | null |
xxmaker/game/g1836jr/__init__.py
|
pijll/xxmaker
|
654d639e2f170b373a1a955b15cee07ed4cfa5ab
|
[
"MIT"
] | null | null | null |
from .main import create_1836jr
| 16
| 31
| 0.84375
| 5
| 32
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0.125
| 32
| 1
| 32
| 32
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
40f807051a9d9fea77fee0507c721d70c7113db1
| 284
|
py
|
Python
|
modulo1/Tiempo.py
|
OmarGP/Python1
|
66df90aa7b293d42f1e738099582d81ba203d3f4
|
[
"Apache-2.0"
] | null | null | null |
modulo1/Tiempo.py
|
OmarGP/Python1
|
66df90aa7b293d42f1e738099582d81ba203d3f4
|
[
"Apache-2.0"
] | null | null | null |
modulo1/Tiempo.py
|
OmarGP/Python1
|
66df90aa7b293d42f1e738099582d81ba203d3f4
|
[
"Apache-2.0"
] | null | null | null |
import time
print("Time : ", time.time())
print(time.localtime(time.time()))
print("Año: ", time.localtime(time.time()).tm_year)
print("Minutos: ", time.localtime(time.time()).tm_min)
print("Milliseconds: ", int(time.time() * 1000.0))
print(time.asctime(time.localtime(time.time())))
| 35.5
| 54
| 0.697183
| 41
| 284
| 4.780488
| 0.341463
| 0.285714
| 0.346939
| 0.428571
| 0.234694
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018868
| 0.066901
| 284
| 8
| 55
| 35.5
| 0.720755
| 0
| 0
| 0
| 0
| 0
| 0.122807
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.142857
| 0
| 0.142857
| 0.857143
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
dc04554417617ae3785ead802cb7acb263d4af3c
| 40
|
py
|
Python
|
atmos/__init__.py
|
cmflannery/atmos
|
be8261208527f46023ec25792e846f8798e76429
|
[
"MIT"
] | 1
|
2019-09-22T17:33:01.000Z
|
2019-09-22T17:33:01.000Z
|
atmos/__init__.py
|
cmflannery/atmos
|
be8261208527f46023ec25792e846f8798e76429
|
[
"MIT"
] | null | null | null |
atmos/__init__.py
|
cmflannery/atmos
|
be8261208527f46023ec25792e846f8798e76429
|
[
"MIT"
] | 1
|
2019-09-27T18:25:06.000Z
|
2019-09-27T18:25:06.000Z
|
from . import atmos
from .atmos import *
| 20
| 20
| 0.75
| 6
| 40
| 5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175
| 40
| 2
| 20
| 20
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dc1610646c8d73c9e5be546ff9df3f3add1d8fe1
| 47
|
py
|
Python
|
wamr/ffi.py
|
isabella232/wamr-python
|
44a296304c6649f03a22ff275a812db43efd068b
|
[
"Apache-2.0"
] | null | null | null |
wamr/ffi.py
|
isabella232/wamr-python
|
44a296304c6649f03a22ff275a812db43efd068b
|
[
"Apache-2.0"
] | 1
|
2022-03-21T06:27:49.000Z
|
2022-03-21T06:27:49.000Z
|
wamr/ffi.py
|
isabella232/wamr-python
|
44a296304c6649f03a22ff275a812db43efd068b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from .binding import *
| 15.666667
| 23
| 0.574468
| 6
| 47
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 0.191489
| 47
| 2
| 24
| 23.5
| 0.684211
| 0.446809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
905de1b077459c4da9131c8d2fcbb19b10d06363
| 180
|
py
|
Python
|
image-editor/data_editor/model/model_files.py
|
flegac/deep-experiments
|
e1b12e724f2c8340cbe9c51396cf3f42e3b4e934
|
[
"MIT"
] | null | null | null |
image-editor/data_editor/model/model_files.py
|
flegac/deep-experiments
|
e1b12e724f2c8340cbe9c51396cf3f42e3b4e934
|
[
"MIT"
] | null | null | null |
image-editor/data_editor/model/model_files.py
|
flegac/deep-experiments
|
e1b12e724f2c8340cbe9c51396cf3f42e3b4e934
|
[
"MIT"
] | null | null | null |
from data_editor.utils.file_select import ask_open_file
def ask_open_model():
return ask_open_file('Open model', [
('all files', '.*'),
('h5', '.h5'),
])
| 20
| 55
| 0.594444
| 24
| 180
| 4.125
| 0.625
| 0.212121
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014493
| 0.233333
| 180
| 8
| 56
| 22.5
| 0.702899
| 0
| 0
| 0
| 0
| 0
| 0.144444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.166667
| 0.166667
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
906b013bfdfd7d817fbc43580842772964463df5
| 13,970
|
py
|
Python
|
tests/lazy/test_op_gradients.py
|
fbartolic/starry
|
d50576caf964ad925c490c9f3ffe1273ab155397
|
[
"MIT"
] | null | null | null |
tests/lazy/test_op_gradients.py
|
fbartolic/starry
|
d50576caf964ad925c490c9f3ffe1273ab155397
|
[
"MIT"
] | null | null | null |
tests/lazy/test_op_gradients.py
|
fbartolic/starry
|
d50576caf964ad925c490c9f3ffe1273ab155397
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Test the various Theano Ops and their gradients.
"""
import theano
from theano.tests.unittest_tools import verify_grad
from theano.configparser import change_flags
import theano.tensor as tt
import numpy as np
import pytest
import starry
def test_sT(abs_tol=1e-5, rel_tol=1e-5, eps=1e-7):
with change_flags(compute_test_value="off"):
map = starry.Map(ydeg=2)
verify_grad(
map.ops.sT,
(np.linspace(0.01, 1.09, 30), 0.1),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
def test_intensity(abs_tol=1e-5, rel_tol=1e-5, eps=1e-7):
with change_flags(compute_test_value="off"):
map = starry.Map(ydeg=2, udeg=2)
np.random.seed(11)
lat = 180 * (np.random.random(10) - 0.5)
lon = 360 * (np.random.random(10) - 0.5)
y = [1.0] + list(np.random.randn(8))
u = [-1.0] + list(np.random.randn(2))
f = [np.pi]
theta = 0.0
alpha = 0.1
tau = 0.5
delta = 0.0
def intensity(lat, lon, y, u, f, theta, alpha, tau, delta):
return map.ops.intensity(
lat, lon, y, u, f, theta, alpha, tau, delta, np.array(True)
)
verify_grad(
intensity,
(lat, lon, y, u, f, theta, alpha, tau, delta),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
def test_tensordotRz(abs_tol=1e-5, rel_tol=1e-5, eps=1e-7):
with change_flags(compute_test_value="off"):
map = starry.Map(ydeg=2)
theta = (
np.array([0.0, 15.0, 30.0, 45.0, 60.0, 75.0, 90.0]) * np.pi / 180.0
)
# Matrix M
M = np.ones((7, 9))
verify_grad(
map.ops.tensordotRz,
(M, theta),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
# Vector M
M = np.ones((1, 9))
verify_grad(
map.ops.tensordotRz,
(M, theta),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
def test_dotR(abs_tol=1e-5, rel_tol=1e-5, eps=1e-7):
with change_flags(compute_test_value="off"):
map = starry.Map(ydeg=2)
x = 1.0 / np.sqrt(3)
y = 1.0 / np.sqrt(3)
z = 1.0 / np.sqrt(3)
theta = np.pi / 5
# Matrix M
M = np.ones((7, 9))
verify_grad(
map.ops.dotR,
(M, x, y, z, theta),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
# Vector M
M = np.ones((1, 9))
verify_grad(
map.ops.dotR,
(M, x, y, z, theta),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
def test_F(abs_tol=1e-5, rel_tol=1e-5, eps=1e-7):
with change_flags(compute_test_value="off"):
map = starry.Map(ydeg=2, udeg=2, rv=True)
np.random.seed(11)
u = np.random.randn(3)
u[0] = -1
f = np.random.randn(16)
verify_grad(
map.ops.F,
(u, f),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
def test_pT(abs_tol=1e-5, rel_tol=1e-5, eps=1e-7):
with change_flags(compute_test_value="off"):
map = starry.Map(ydeg=2)
map[1:, :] = 1
x = np.array([0.13])
y = np.array([0.25])
z = np.sqrt(1 - x ** 2 - y ** 2)
verify_grad(
map.ops.pT,
(x, y, z),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
def test_flux(abs_tol=1e-5, rel_tol=1e-5, eps=1e-7):
with change_flags(compute_test_value="off"):
map = starry.Map(ydeg=2)
theta = np.linspace(0, 30, 10)
xo = np.linspace(-1.5, 1.5, len(theta))
yo = np.ones_like(xo) * 0.3
zo = 1.0 * np.ones_like(xo)
ro = 0.1
inc = 85.0 * np.pi / 180.0
obl = 30.0 * np.pi / 180.0
y = np.ones(9)
u = [-1.0]
f = [np.pi]
alpha = 0.1
tau = 0.5
delta = 0.0
func = lambda *args: tt.dot(map.ops.X(*args), y)
# Just rotation
verify_grad(
func,
(theta, xo, yo, zo, 0.0, inc, obl, u, f, alpha, tau, delta),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
# Just occultation
verify_grad(
func,
(theta, xo / 3, yo, zo, ro, inc, obl, u, f, alpha, tau, delta),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
# Rotation + occultation
verify_grad(
func,
(theta, xo, yo, zo, ro, inc, obl, u, f, alpha, tau, delta),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
def test_rT_reflected(abs_tol=1e-5, rel_tol=1e-5, eps=1e-7):
with change_flags(compute_test_value="off"):
map = starry.Map(ydeg=2, reflected=True)
bterm = np.linspace(-1, 1, 10)[1:-1]
sigr = 30 * np.pi / 180
verify_grad(
map.ops.rT,
(bterm, sigr),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
"""
# TODO: Implement the gradient of the OrenNayarOp.
def test_intensity_reflected(abs_tol=1e-5, rel_tol=1e-5, eps=1e-7):
with change_flags(compute_test_value="off"):
map = starry.Map(ydeg=2, udeg=2, reflected=True)
np.random.seed(11)
lat = 180 * (np.random.random(10) - 0.5)
lon = 360 * (np.random.random(10) - 0.5)
y = [1.0] + list(np.random.randn(8))
u = [-1.0] + list(np.random.randn(2))
f = [np.pi]
source = np.random.randn(10, 3)
source /= np.sqrt(np.sum(source ** 2, axis=1)).reshape(-1, 1)
xs = source[:, 0]
ys = source[:, 1]
zs = source[:, 2]
Rs = 1.0
theta = 0.0
alpha = 0.1
tau = 0.5
delta = 0.0
sigr = 30 * np.pi / 180
def intensity(lat, lon, y, u, f, xs, ys, zs, Rs, theta, alpha, tau, delta, sigr):
return map.ops.intensity(
lat,
lon,
y,
u,
f,
xs,
ys,
zs,
Rs,
theta,
alpha,
tau,
delta,
np.array(False),
sigr,
np.array(False),
np.array(True),
)
verify_grad(
intensity,
(lat, lon, y, u, f, xs, ys, zs, Rs, theta, alpha, tau, delta, sigr),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
"""
def test_flux_reflected(abs_tol=1e-5, rel_tol=1e-5, eps=1e-7):
with change_flags(compute_test_value="off"):
map = starry.Map(ydeg=2, reflected=True)
theta = np.linspace(0, 30, 10)
xs = np.linspace(-1.5, 1.5, len(theta))
ys = np.ones_like(xs) * 0.3
zs = -1.0 * np.ones_like(xs)
ro = 0.0
inc = 85.0 * np.pi / 180.0
obl = 30.0 * np.pi / 180.0
y = np.ones(9)
u = [-1.0]
f = [np.pi]
alpha = 0.1
tau = 0.5
delta = 0.0
Rs = 1.0
sigr = 30 * np.pi / 180
def func(theta, xs, ys, zs, Rs, ro, inc, obl, u, f, alpha, tau, delta):
return tt.dot(
map.ops.X(
theta,
xs,
ys,
zs,
Rs,
xs,
ys,
zs,
ro,
inc,
obl,
u,
f,
alpha,
tau,
delta,
sigr,
),
y,
)
# Just rotation
verify_grad(
func,
(theta, xs, ys, zs, Rs, ro, inc, obl, u, f, alpha, tau, delta),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
def test_flux_ylm_ld(abs_tol=1e-5, rel_tol=1e-5, eps=1e-7):
with change_flags(compute_test_value="off"):
map = starry.Map(ydeg=2, udeg=2)
theta = np.linspace(0, 30, 10)
xo = np.linspace(-1.5, 1.5, len(theta))
yo = np.ones_like(xo) * 0.3
zo = 1.0 * np.ones_like(xo)
ro = 0.1
inc = 85.0 * np.pi / 180.0
obl = 30.0 * np.pi / 180.0
y = np.ones(9)
np.random.seed(14)
u = [-1.0] + list(np.random.randn(2))
f = [np.pi]
alpha = 0.1
tau = 0.5
delta = 0.0
func = lambda *args: tt.dot(map.ops.X(*args), y)
# Just rotation
verify_grad(
func,
(theta, xo, yo, zo, 0.0, inc, obl, u, f, alpha, tau, delta),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
# Just occultation
verify_grad(
func,
(theta, xo / 3, yo, zo, ro, inc, obl, u, f, alpha, tau, delta),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
# Rotation + occultation
verify_grad(
func,
(theta, xo, yo, zo, ro, inc, obl, u, f, alpha, tau, delta),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
def test_flux_quad_ld(abs_tol=1e-5, rel_tol=1e-5, eps=1e-7):
with change_flags(compute_test_value="off"):
map = starry.Map(udeg=2)
xo = np.linspace(-1.5, 1.5, 10)
yo = np.ones_like(xo) * 0.3
zo = 1.0 * np.ones_like(xo)
ro = 0.1
np.random.seed(14)
u = np.array([-1.0] + list(np.random.randn(2)))
func = lambda *args: map.ops.flux(*args)
verify_grad(
func,
(xo, yo, zo, ro, u),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
def test_rv(abs_tol=1e-5, rel_tol=1e-5, eps=1e-7):
with change_flags(compute_test_value="off"):
map = starry.Map(ydeg=2, rv=True)
theta = np.linspace(0, 30, 10)
xo = np.linspace(-1.5, 1.5, len(theta))
yo = np.ones_like(xo) * 0.3
zo = 1.0 * np.ones_like(xo)
ro = 0.1
inc = 85.0 * np.pi / 180.0
obl = 30.0 * np.pi / 180.0
veq = 0.5
alpha = 0.3
tau = 0.5
delta = 0.0
y = np.ones(9)
u = [-1.0]
# Just rotation
verify_grad(
map.ops.rv,
(theta, xo, yo, zo, 0.0, inc, obl, y, u, veq, alpha, tau, delta),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
# Just occultation
verify_grad(
map.ops.rv,
(
theta,
xo / 3,
yo,
zo,
ro,
inc,
obl,
y,
u,
veq,
alpha,
tau,
delta,
),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
# Rotation + occultation
verify_grad(
map.ops.rv,
(theta, xo, yo, zo, ro, inc, obl, y, u, veq, alpha, tau, delta),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
def test_diffrot(abs_tol=1e-5, rel_tol=1e-5, eps=1e-7):
np.random.seed(0)
with change_flags(compute_test_value="off"):
map = starry.Map(ydeg=5)
y = np.random.randn(4, map.Ny)
alpha = 1.0
tau = 0.5
delta = 0.0
theta = [0.1, 0.5, 1.0, 2.0] # radians
verify_grad(
map.ops.tensordotD,
(y, theta, alpha, tau, delta),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
def test_spot(abs_tol=1e-5, rel_tol=1e-5, eps=1e-7):
with change_flags(compute_test_value="off"):
map = starry.Map(ydeg=5)
amp = [-0.01]
sigma = 0.1
lat = 30 * np.pi / 180
lon = 45 * np.pi / 180
verify_grad(
map.ops.spotYlm,
(amp, sigma, lat, lon),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
def test_spot_spectral(abs_tol=1e-5, rel_tol=1e-5, eps=1e-7):
with change_flags(compute_test_value="off"):
map = starry.Map(ydeg=5, nw=2)
amp = [-0.01, -0.02]
sigma = 0.1
lat = 30 * np.pi / 180
lon = 45 * np.pi / 180
verify_grad(
map.ops.spotYlm,
(amp, sigma, lat, lon),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
def test_sT_reflected(abs_tol=1e-5, rel_tol=1e-5, eps=1e-7):
with change_flags(compute_test_value="off"):
map = starry.Map(ydeg=2, reflected=True)
b = np.array([0.5])
theta = np.array([0.5])
bo = np.array([0.75])
ro = 0.5
sigr = 30 * np.pi / 180
verify_grad(
map.ops.sT,
(b, theta, bo, ro, sigr),
abs_tol=abs_tol,
rel_tol=rel_tol,
eps=eps,
n_tests=1,
)
| 26.358491
| 89
| 0.445168
| 1,974
| 13,970
| 3.011651
| 0.075481
| 0.06762
| 0.075694
| 0.050463
| 0.84508
| 0.817998
| 0.812952
| 0.792935
| 0.78032
| 0.759125
| 0
| 0.069577
| 0.422835
| 13,970
| 529
| 90
| 26.408318
| 0.667742
| 0.02083
| 0
| 0.696517
| 0
| 0
| 0.003966
| 0
| 0
| 0
| 0
| 0.00189
| 0
| 1
| 0.044776
| false
| 0
| 0.017413
| 0.004975
| 0.067164
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
907daf00644d95e2a234686021ba60047c21f677
| 251
|
py
|
Python
|
bnbapp/core/context_processors.py
|
Bionetbook/bionetbook
|
c92d5bbdc5c121631c2230cf93aa63977d381f30
|
[
"MIT"
] | null | null | null |
bnbapp/core/context_processors.py
|
Bionetbook/bionetbook
|
c92d5bbdc5c121631c2230cf93aa63977d381f30
|
[
"MIT"
] | null | null | null |
bnbapp/core/context_processors.py
|
Bionetbook/bionetbook
|
c92d5bbdc5c121631c2230cf93aa63977d381f30
|
[
"MIT"
] | null | null | null |
from django.conf import settings # import the settings file
def registration_enabled(context):
# return the value you want as a dictionnary. you may add multiple values in there.
return {'REGISTRATION_ENABLED': settings.REGISTRATION_ENABLED}
| 41.833333
| 87
| 0.788845
| 34
| 251
| 5.735294
| 0.705882
| 0.292308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155378
| 251
| 5
| 88
| 50.2
| 0.919811
| 0.422311
| 0
| 0
| 0
| 0
| 0.140845
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
90b657959a92432da43d6638905ae3b3ebb0f772
| 23
|
py
|
Python
|
nevermore/lightning/__init__.py
|
shadowy000/nevermore
|
b46bc957dee283a02a19d1c6e6cb2d7b5eea86ca
|
[
"Apache-2.0"
] | null | null | null |
nevermore/lightning/__init__.py
|
shadowy000/nevermore
|
b46bc957dee283a02a19d1c6e6cb2d7b5eea86ca
|
[
"Apache-2.0"
] | null | null | null |
nevermore/lightning/__init__.py
|
shadowy000/nevermore
|
b46bc957dee283a02a19d1c6e6cb2d7b5eea86ca
|
[
"Apache-2.0"
] | null | null | null |
from .gradnorm import *
| 23
| 23
| 0.782609
| 3
| 23
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 23
| 1
| 23
| 23
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
90c92138e81e0bac49da667da1fd76297f5c752c
| 3,116
|
py
|
Python
|
tests/test_amin_amax.py
|
Rubtsowa/dpnp
|
ef404c0f284b0c508ed1e556e140f02f76ae5551
|
[
"BSD-2-Clause"
] | 37
|
2020-09-08T00:38:52.000Z
|
2022-03-18T01:44:10.000Z
|
tests/test_amin_amax.py
|
Rubtsowa/dpnp
|
ef404c0f284b0c508ed1e556e140f02f76ae5551
|
[
"BSD-2-Clause"
] | 432
|
2020-09-07T09:48:41.000Z
|
2022-03-25T17:50:55.000Z
|
tests/test_amin_amax.py
|
Rubtsowa/dpnp
|
ef404c0f284b0c508ed1e556e140f02f76ae5551
|
[
"BSD-2-Clause"
] | 17
|
2020-09-07T10:00:34.000Z
|
2022-03-25T13:53:43.000Z
|
import pytest
import dpnp
import numpy
@pytest.mark.parametrize("type",
[numpy.float64],
ids=['float64'])
def test_amax_float64(type):
a = numpy.array([[[-2., 3.], [9.1, 0.2]], [[-2., 5.0], [-2, -1.2]], [[1.0, -2.], [5.0, -1.1]]])
ia = dpnp.array(a)
for axis in range(len(a)):
result = dpnp.amax(ia, axis=axis)
expected = numpy.amax(a, axis=axis)
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("type",
[numpy.int64],
ids=['int64'])
def test_amax_int(type):
a = numpy.array([1, 0, 2, -3, -1, 2, 21, -9])
ia = dpnp.array(a)
result = dpnp.amax(ia)
expected = numpy.amax(a)
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("type",
[numpy.float64],
ids=['float64'])
def test_amin_float64(type):
a = numpy.array([[[-2., 3.], [9.1, 0.2]], [[-2., 5.0], [-2, -1.2]], [[1.0, -2.], [5.0, -1.1]]])
ia = dpnp.array(a)
for axis in range(len(a)):
result = dpnp.amin(ia, axis=axis)
expected = numpy.amin(a, axis=axis)
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("type",
[numpy.int64],
ids=['int64'])
def test_amin_int(type):
a = numpy.array([1, 0, 2, -3, -1, 2, 21, -9])
ia = dpnp.array(a)
result = dpnp.amin(ia)
expected = numpy.amin(a)
numpy.testing.assert_array_equal(expected, result)
def _get_min_max_input(type, shape):
size = 1
for i in range(len(shape)):
size *= shape[i]
a = numpy.arange(size, dtype=type)
a[int(size / 2)] = size * size
a[int(size / 3)] = -(size * size)
return a.reshape(shape)
@pytest.mark.parametrize("type",
[numpy.float64, numpy.float32, numpy.int64, numpy.int32],
ids=['float64', 'float32', 'int64', 'int32'])
@pytest.mark.parametrize("shape",
[(4,), (2, 3), (4, 5, 6)],
ids=['(4,)', '(2,3)', '(4,5,6)'])
def test_amax(type, shape):
a = _get_min_max_input(type, shape)
ia = dpnp.array(a)
np_res = numpy.amax(a)
dpnp_res = dpnp.amax(ia)
numpy.testing.assert_array_equal(dpnp_res, np_res)
np_res = a.max()
dpnp_res = ia.max()
numpy.testing.assert_array_equal(dpnp_res, np_res)
@pytest.mark.parametrize("type",
[numpy.float64, numpy.float32, numpy.int64, numpy.int32],
ids=['float64', 'float32', 'int64', 'int32'])
@pytest.mark.parametrize("shape",
[(4,), (2, 3), (4, 5, 6)],
ids=['(4,)', '(2,3)', '(4,5,6)'])
def test_amin(type, shape):
a = _get_min_max_input(type, shape)
ia = dpnp.array(a)
np_res = numpy.amin(a)
dpnp_res = dpnp.amin(ia)
numpy.testing.assert_array_equal(dpnp_res, np_res)
np_res = a.min()
dpnp_res = ia.min()
numpy.testing.assert_array_equal(dpnp_res, np_res)
| 28.851852
| 99
| 0.531772
| 432
| 3,116
| 3.710648
| 0.12037
| 0.049906
| 0.104803
| 0.114785
| 0.822832
| 0.781659
| 0.767311
| 0.767311
| 0.739863
| 0.689956
| 0
| 0.06062
| 0.285302
| 3,116
| 107
| 100
| 29.121495
| 0.659183
| 0
| 0
| 0.582278
| 0
| 0
| 0.044288
| 0
| 0
| 0
| 0
| 0
| 0.101266
| 1
| 0.088608
| false
| 0
| 0.037975
| 0
| 0.139241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
90d17a7ba88f1747d52989665437afc0ae4f3d27
| 3,134
|
py
|
Python
|
tests/test_api_keys.py
|
therefromhere/dj-stripe
|
8f1fa8b95229288caa0dc036038e015e84e52e85
|
[
"MIT"
] | null | null | null |
tests/test_api_keys.py
|
therefromhere/dj-stripe
|
8f1fa8b95229288caa0dc036038e015e84e52e85
|
[
"MIT"
] | null | null | null |
tests/test_api_keys.py
|
therefromhere/dj-stripe
|
8f1fa8b95229288caa0dc036038e015e84e52e85
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from djstripe import settings as djstripe_settings
from djstripe.models import StripeModel
try:
reload
except NameError:
from importlib import reload
class TestSubscriberModelRetrievalMethod(TestCase):
def setUp(self):
self.live_object = StripeModel(livemode=True)
self.test_object = StripeModel(livemode=False)
self.unk_object = StripeModel(livemode=None)
@override_settings(
STRIPE_SECRET_KEY="sk_live_foo",
STRIPE_PUBLIC_KEY="pk_live_foo",
STRIPE_LIVE_MODE=True
)
def test_global_api_keys_live_mode(self):
reload(djstripe_settings)
self.assertEqual(djstripe_settings.STRIPE_LIVE_MODE, True)
self.assertEqual(djstripe_settings.STRIPE_SECRET_KEY, "sk_live_foo")
# self.assertEqual(djstripe_settings.LIVE_API_KEY, "sk_live_foo")
self.assertEqual(self.live_object.default_api_key, "sk_live_foo")
@override_settings(
STRIPE_SECRET_KEY="sk_test_foo",
STRIPE_PUBLIC_KEY="pk_test_foo",
STRIPE_LIVE_MODE=False
)
def test_global_api_keys_test_mode(self):
reload(djstripe_settings)
self.assertEqual(djstripe_settings.STRIPE_LIVE_MODE, False)
self.assertEqual(djstripe_settings.STRIPE_SECRET_KEY, "sk_test_foo")
# self.assertEqual(djstripe_settings.TEST_API_KEY, "sk_test_foo")
self.assertEqual(self.test_object.default_api_key, "sk_test_foo")
@override_settings(
STRIPE_TEST_SECRET_KEY="sk_test_foo",
STRIPE_LIVE_SECRET_KEY="sk_live_foo",
STRIPE_TEST_PUBLIC_KEY="pk_test_foo",
STRIPE_LIVE_PUBLIC_KEY="pk_live_foo",
STRIPE_LIVE_MODE=True,
)
def test_api_key_live_mode(self):
del settings.STRIPE_SECRET_KEY
del settings.STRIPE_PUBLIC_KEY
reload(djstripe_settings)
self.assertEqual(djstripe_settings.STRIPE_LIVE_MODE, True)
self.assertEqual(djstripe_settings.STRIPE_SECRET_KEY, "sk_live_foo")
self.assertEqual(djstripe_settings.STRIPE_PUBLIC_KEY, "pk_live_foo")
self.assertEqual(djstripe_settings.LIVE_API_KEY, "sk_live_foo")
self.assertEqual(self.live_object.default_api_key, "sk_live_foo")
@override_settings(
STRIPE_TEST_SECRET_KEY="sk_test_foo",
STRIPE_LIVE_SECRET_KEY="sk_live_foo",
STRIPE_TEST_PUBLIC_KEY="pk_test_foo",
STRIPE_LIVE_PUBLIC_KEY="pk_live_foo",
STRIPE_LIVE_MODE=False,
)
def test_secret_key_test_mode(self):
del settings.STRIPE_SECRET_KEY
del settings.STRIPE_PUBLIC_KEY
reload(djstripe_settings)
self.assertEqual(djstripe_settings.STRIPE_LIVE_MODE, False)
self.assertEqual(djstripe_settings.STRIPE_SECRET_KEY, "sk_test_foo")
self.assertEqual(djstripe_settings.STRIPE_PUBLIC_KEY, "pk_test_foo")
self.assertEqual(djstripe_settings.TEST_API_KEY, "sk_test_foo")
self.assertEqual(self.test_object.default_api_key, "sk_test_foo")
def tearDown(self):
reload(djstripe_settings)
| 38.691358
| 76
| 0.739311
| 409
| 3,134
| 5.210269
| 0.110024
| 0.150164
| 0.151103
| 0.20366
| 0.782731
| 0.77053
| 0.761145
| 0.711403
| 0.711403
| 0.695448
| 0
| 0
| 0.181876
| 3,134
| 80
| 77
| 39.175
| 0.831123
| 0.040523
| 0
| 0.485294
| 0
| 0
| 0.087883
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 1
| 0.088235
| false
| 0
| 0.088235
| 0
| 0.191176
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
90ef05c04befab5d1af1bff364e66f8484ec4780
| 19,563
|
py
|
Python
|
plot_noise_parameters_v2.py
|
patternizer/AVHRR_NOISE
|
fe4127801393e92bd88e846d664bd1e2b4b76d78
|
[
"MIT"
] | null | null | null |
plot_noise_parameters_v2.py
|
patternizer/AVHRR_NOISE
|
fe4127801393e92bd88e846d664bd1e2b4b76d78
|
[
"MIT"
] | 5
|
2019-02-25T21:49:27.000Z
|
2019-02-27T23:32:37.000Z
|
plot_noise_parameters_v2.py
|
patternizer/AVHRR_NOISE
|
fe4127801393e92bd88e846d664bd1e2b4b76d78
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#ipdb> import os; os._exit(1)
# call as: python noise_parameters.py file_in
# =======================================
# Version 0.4
# 12 April, 2019
# michael.taylor AT reading DOT ac DOT uk
# =======================================
import os
import os.path
import glob
import optparse
from optparse import OptionParser
import sys
import numpy as np
import numpy.ma as ma
import xarray
import datetime
import pandas as pd
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
# =======================================
def plot_variables(file_in):
ds = xarray.open_dataset(file_in)
timestamp = ds['time']
positions = ds['positions']
coefs = ds['coefs']
prts = ds['prts']
bb_counts_c3 = ds['bb_counts_c3']
bb_counts_c3_mean = ds['bb_counts_c3_mean']
bb_counts_c3_std = ds['bb_counts_c3_std']
bb_counts_c4 = ds['bb_counts_c4']
bb_counts_c4_mean = ds['bb_counts_c4_mean']
bb_counts_c4_std = ds['bb_counts_c4_std']
bb_counts_c5 = ds['bb_counts_c5']
bb_counts_c5_mean = ds['bb_counts_c5_mean']
bb_counts_c5_std = ds['bb_counts_c5_std']
space_counts_c3 = ds['space_counts_c3']
space_counts_c3_mean = ds['space_counts_c3_mean']
space_counts_c3_std = ds['space_counts_c3_std']
space_counts_c4 = ds['space_counts_c4']
space_counts_c4_mean = ds['space_counts_c4_mean']
space_counts_c4_std = ds['space_counts_c4_std']
space_counts_c5 = ds['space_counts_c5']
space_counts_c5_mean = ds['space_counts_c5_mean']
space_counts_c5_std = ds['space_counts_c5_std']
coef_calib_c3 = ds['coef_calib_c3']
coef_calib_c4 = ds['coef_calib_c4']
coef_calib_c5 = ds['coef_calib_c5']
prt_counts = ds['prt_counts']
# prt_temp = ds['prt_temp']
prt_temp = ds['temp_prt']
# prt_temp_mean = ds['prt_temp_mean']
radiance_c3_radiance_bb = ds['radiance_c3_radiance_bb']
radiance_c4_radiance_bb = ds['radiance_c4_radiance_bb']
radiance_c5_radiance_bb = ds['radiance_c5_radiance_bb']
radiance_c3_gain = ds['radiance_c3_gain']
radiance_c4_gain = ds['radiance_c4_gain']
radiance_c5_gain = ds['radiance_c5_gain']
# radiance_c3_radiance_space = ds['radiance_c3_radiance_space']
# radiance_c4_radiance_space = ds['radiance_c4_radiance_space']
# radiance_c5_radiance_space = ds['radiance_c5_radiance_space']
radiance_c3_radiance_space = ds['radiance_c3_space']
radiance_c4_radiance_space = ds['radiance_c4_space']
radiance_c5_radiance_space = ds['radiance_c5_space']
# radiance_c3_counts_space = ds['radiance_c3_counts_space']
# radiance_c4_counts_space = ds['radiance_c4_counts_space']
# radiance_c5_counts_space = ds['radiance_c5_counts_space']
radiance_c3_counts_space = ds['radiance_c3_counts_sp']
radiance_c4_counts_space = ds['radiance_c4_counts_sp']
radiance_c5_counts_space = ds['radiance_c5_counts_sp']
radiance_c3_counts_bb = ds['radiance_c3_counts_bb']
radiance_c4_counts_bb = ds['radiance_c4_counts_bb']
radiance_c5_counts_bb = ds['radiance_c5_counts_bb']
ramp_c3 = ds['ramp_c3']
ramp_c4 = ds['ramp_c4']
ramp_c5 = ds['ramp_c5']
temp_detector_radiator = ds['temp_detector_radiator']
temp_detector_electronics = ds['temp_detector_electronics']
temp_detector_cooler = ds['temp_detector_cooler']
temp_detector_baseplate = ds['temp_detector_baseplate']
temp_detector_motor = ds['temp_detector_motor']
temp_detector_adconv = ds['temp_detector_adconv']
temp_detector_patch = ds['temp_detector_patch']
temp_detector_patch_extended = ds['temp_detector_patch_extended']
# fig, ax = plt.subplots()
# ax.plot(timestamp, bb_counts_c3, '-', markersize=0.2)
# ax.grid()
# ax.set_title(r'$3.7\mu m$ channel')
# ax.set_ylabel(r'bb_counts_c3: 1-10')
# ax.set_xlabel('time')
# plt.legend(['1','2','3','4','5','6','7','8','9','10'],loc='best')
# fig.autofmt_xdate()
# ax.fmt_xdata = mdates.DateFormatter('%H:%M')
# plt.savefig('bb_counts_c3.png')
# plt.close()
# fig, ax = plt.subplots()
# ax.plot(timestamp, bb_counts_c4, '-', markersize=0.2)
# ax.grid()
# ax.set_title(r'$11\mu m$ channel')
# ax.set_ylabel(r'bb_counts_c4: 1-10')
# ax.set_xlabel('time')
# plt.legend(['1','2','3','4','5','6','7','8','9','10'],loc='best')
# fig.autofmt_xdate()
# ax.fmt_xdata = mdates.DateFormatter('%H:%M')
# plt.savefig('bb_counts_c4.png')
# plt.close()
# fig, ax = plt.subplots()
# ax.plot(timestamp, bb_counts_c5, '-', markersize=0.2)
# ax.grid()
# ax.set_title(r'$12\mu m$ channel')
# ax.set_ylabel(r'bb_counts_c5: 1-10')
# ax.set_xlabel('time')
# plt.legend(['1','2','3','4','5','6','7','8','9','10'],loc='best')
# fig.autofmt_xdate()
# ax.fmt_xdata = mdates.DateFormatter('%H:%M')
# plt.savefig('bb_counts_c5.png')
# plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, bb_counts_c3_mean+bb_counts_c3_std, 'b-', markersize=0.2, label='$\pm\sigma$')
ax.plot(timestamp, bb_counts_c3_mean-bb_counts_c3_std, 'b-', markersize=0.2, label=None)
ax.plot(timestamp, bb_counts_c3_mean, 'r-', markersize=0.2, label='$\mu$')
ax.grid()
ax.set_title(r'$3.7\mu m$ channel')
ax.set_ylabel(r'bb_counts_c3: $\mu\pm\sigma$')
ax.set_xlabel('time')
plt.legend(loc='best')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('bb_counts_c3_mean_std.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, bb_counts_c4_mean+bb_counts_c4_std, 'b-', markersize=0.2, label='$\pm\sigma$')
ax.plot(timestamp, bb_counts_c4_mean-bb_counts_c4_std, 'b-', markersize=0.2, label=None)
ax.plot(timestamp, bb_counts_c4_mean, 'r-', markersize=0.2, label='$\mu$')
ax.grid()
ax.set_title(r'$11\mu m$ channel')
ax.set_ylabel(r'bb_counts_c4: $\mu\pm\sigma$')
ax.set_xlabel('time')
plt.legend(loc='best')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('bb_counts_c4_mean_std.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, bb_counts_c5_mean+bb_counts_c5_std, 'b-', markersize=0.2, label='$\pm\sigma$')
ax.plot(timestamp, bb_counts_c5_mean-bb_counts_c5_std, 'b-', markersize=0.2, label=None)
ax.plot(timestamp, bb_counts_c5_mean, 'r-', markersize=0.2, label='$\mu$')
ax.grid()
ax.set_title(r'$12\mu m$ channel')
ax.set_ylabel(r'bb_counts_c5: $\mu\pm\sigma$')
ax.set_xlabel('time')
plt.legend(loc='best')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('bb_counts_c5_mean_std.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, coef_calib_c3, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$3.7\mu m$ channel')
ax.set_ylabel(r'coef_calib_c3')
ax.set_xlabel('time')
plt.legend([r'a_{0}','a_{1}','a_{2}'],loc='best')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('coef_calib_c3.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, coef_calib_c4, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$11\mu m$ channel')
ax.set_ylabel(r'coef_calib_c4')
ax.set_xlabel('time')
plt.legend([r'a_{0}','a_{1}','a_{2}'],loc='best')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('coef_calib_c4.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, coef_calib_c5, '-', markersize=0.2, label=None)
ax.grid()
ax.set_title(r'$12\mu m$ channel')
ax.set_ylabel(r'coef_calib_c5')
ax.set_xlabel('time')
# plt.legend([r'a_{0}','a_{1}','a_{2}'],loc='best')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('coef_calib_c5.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, prt_temp, '-', markersize=0.2)
ax.grid()
ax.set_title(r'PRT counts')
ax.set_ylabel(r'counts')
ax.set_xlabel('time')
plt.legend(['PRT-1','PRT-2','PRT-3','PRT-4'],loc='best')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('prt_counts.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, prt_temp, '-', markersize=0.2)
# ax.plot(timestamp, prt_temp.mean(), 'k--')
ax.grid()
ax.set_title(r'PRT temperatures')
ax.set_ylabel(r'temperature [K]')
ax.set_xlabel('time')
# plt.legend([r'PRT-1','PRT-2','PRT-3','PRT-4','$\mu$'],loc='best')
plt.legend([r'PRT-1','PRT-2','PRT-3','PRT-4'],loc='best')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('prt_temp_mean.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, radiance_c3_radiance_bb, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$3.7\mu m$ channel')
ax.set_ylabel(r'radiance_c3_radiance_bb')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('radiance_c3_radiance_bb.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, radiance_c4_radiance_bb, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$11\mu m$ channel')
ax.set_ylabel(r'radiance_c4_radiance_bb')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('radiance_c4_radiance_bb.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, radiance_c5_radiance_bb, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$12\mu m$ channel')
ax.set_ylabel(r'radiance_c5_radiance_bb')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('radiance_c5_radiance_bb.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, radiance_c3_gain, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$3.7\mu m$ channel')
ax.set_ylabel(r'radiance_c3_gain')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('radiance_c3_gain.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, radiance_c4_gain, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$11\mu m$ channel')
ax.set_ylabel(r'radiance_c4_gain')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('radiance_c4_gain.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, radiance_c5_gain, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$12\mu m$ channel')
ax.set_ylabel(r'radiance_c5_gain')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('radiance_c5_gain.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, radiance_c3_radiance_space, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$3.7\mu m$ channel')
ax.set_ylabel(r'radiance_c3_radiance_space')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('radiance_c3_radiance_space.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, radiance_c4_radiance_space, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$11\mu m$ channel')
ax.set_ylabel(r'radiance_c4_radiance_space')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('radiance_c4_radiance_space.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, radiance_c5_radiance_space, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$12\mu m$ channel')
ax.set_ylabel(r'radiance_c5_radiance_space')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('radiance_c5_radiance_space.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, radiance_c3_counts_space, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$3.7\mu m$ channel')
ax.set_ylabel(r'radiance_c3_counts_space')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('radiance_c3_counts_space.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, radiance_c4_counts_space, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$11\mu m$ channel')
ax.set_ylabel(r'radiance_c4_counts_space')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('radiance_c4_counts_space.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, radiance_c5_counts_space, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$12\mu m$ channel')
ax.set_ylabel(r'radiance_c5_counts_space')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('radiance_c5_counts_space.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, radiance_c3_counts_bb, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$3.7\mu m$ channel')
ax.set_ylabel(r'radiance_c3_counts_bb')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('radiance_c3_counts_bb.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, radiance_c4_counts_bb, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$11\mu m$ channel')
ax.set_ylabel(r'radiance_c4_counts_bb')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('radiance_c4_counts_bb.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, radiance_c5_counts_bb, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$12\mu m$ channel')
ax.set_ylabel(r'radiance_c5_counts_bb')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('radiance_c5_counts_bb.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, ramp_c3, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$3.7\mu m$ channel')
ax.set_ylabel(r'ramp_c3')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('ramp_c3.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, ramp_c4, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$11\mu m$ channel')
ax.set_ylabel(r'ramp_c4')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('ramp_c4.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, ramp_c5, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$12\mu m$ channel')
ax.set_ylabel(r'ramp_c5')
ax.set_xlabel('time')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('ramp_c5.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, space_counts_c3, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$3.7\mu m$ channel')
ax.set_ylabel(r'space_counts_ch3: positions 1-10')
ax.set_xlabel('time')
plt.legend(['1','2','3','4','5','6','7','8','9','10'],loc='best')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('space_counts_ch3.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, space_counts_c4, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$11\mu m$ channel')
ax.set_ylabel(r'space_counts_ch4: positions 1-10')
ax.set_xlabel('time')
plt.legend(['1','2','3','4','5','6','7','8','9','10'],loc='best')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('space_counts_ch4.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, space_counts_c5, '-', markersize=0.2)
ax.grid()
ax.set_title(r'$12\mu m$ channel')
ax.set_ylabel(r'space_counts_ch5: positions 1-10')
ax.set_xlabel('time')
plt.legend(['1','2','3','4','5','6','7','8','9','10'],loc='best')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('space_counts_ch5.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, space_counts_c3_mean+space_counts_c3_std, 'b-', markersize=0.2, label='$\pm\sigma$')
ax.plot(timestamp, space_counts_c3_mean-space_counts_c3_std, 'b-', markersize=0.2, label=None)
ax.plot(timestamp, space_counts_c3_mean, 'r-', markersize=0.2)
ax.grid()
ax.set_title(r'$3.7\mu m$ channel')
ax.set_ylabel(r'space_counts_ch3: $\mu\pm\sigma$')
ax.set_xlabel('time')
plt.legend(loc='best')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('space_counts_ch3_mean_std.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, space_counts_c4_mean+space_counts_c4_std, 'b-', markersize=0.2, label='$\pm\sigma$')
ax.plot(timestamp, space_counts_c4_mean-space_counts_c4_std, 'b-', markersize=0.2, label=None)
ax.plot(timestamp, space_counts_c4_mean, 'r-', markersize=0.2, label='$\mu$')
ax.grid()
ax.set_title(r'$11\mu m$ channel')
ax.set_ylabel(r'space_counts_ch4: $\mu\pm\sigma$')
ax.set_xlabel('time')
plt.legend(loc='best')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('space_counts_ch4_mean_std.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, space_counts_c5_mean+space_counts_c5_std, 'b-', markersize=0.2, label='$\pm\sigma$')
ax.plot(timestamp, space_counts_c5_mean-space_counts_c5_std, 'b-', markersize=0.2, label=None)
ax.plot(timestamp, space_counts_c5_mean, 'r-', markersize=0.2, label='$\mu$')
ax.grid()
ax.set_title(r'$12\mu m$ channel')
ax.set_ylabel(r'space_counts_ch5: $\mu\pm\sigma$')
ax.set_xlabel('time')
plt.legend(loc='best')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('space_counts_ch5_mean_std.png')
plt.close()
fig, ax = plt.subplots()
ax.plot(timestamp, temp_detector_radiator, '-', markersize=0.2, label='radiator')
ax.plot(timestamp, temp_detector_electronics, '-', markersize=0.2, label='electronics')
ax.plot(timestamp, temp_detector_cooler, '-', markersize=0.2, label='cooler')
ax.plot(timestamp, temp_detector_baseplate, '-', markersize=0.2, label='baseplate')
ax.plot(timestamp, temp_detector_motor, '-', markersize=0.2, label='motor')
ax.plot(timestamp, temp_detector_adconv, '-', markersize=0.2, label='adconv')
ax.plot(timestamp, temp_detector_patch, '-', markersize=0.2, label='path')
ax.plot(timestamp, temp_detector_patch_extended, '-', markersize=0.2, label='patch_extended')
ax.grid()
ax.set_ylabel(r'detector temperature [$K$]')
ax.set_xlabel('time')
plt.legend(loc='center right')
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%H:%M')
plt.savefig('temp_detector.png')
plt.close()
if __name__ == "__main__":
parser = OptionParser("usage: %prog file_in")
(options, args) = parser.parse_args()
file_in = args[0]
plot_variables(file_in)
| 35.895413
| 107
| 0.654041
| 3,018
| 19,563
| 3.96554
| 0.047382
| 0.044703
| 0.070187
| 0.048128
| 0.811247
| 0.79629
| 0.787182
| 0.778409
| 0.730699
| 0.719836
| 0
| 0.030966
| 0.168021
| 19,563
| 544
| 108
| 35.961397
| 0.70435
| 0.106425
| 0
| 0.551487
| 0
| 0
| 0.205784
| 0.06123
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002288
| false
| 0
| 0.032037
| 0
| 0.034325
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
296131891e0d3732cbea1d4d4355d95fd060db6f
| 108
|
py
|
Python
|
config.py
|
ppiecuch/gd_chipmunk
|
3deb177c1cca0a2dc41c91aa5f174105a3564887
|
[
"MIT"
] | null | null | null |
config.py
|
ppiecuch/gd_chipmunk
|
3deb177c1cca0a2dc41c91aa5f174105a3564887
|
[
"MIT"
] | null | null | null |
config.py
|
ppiecuch/gd_chipmunk
|
3deb177c1cca0a2dc41c91aa5f174105a3564887
|
[
"MIT"
] | null | null | null |
# config.py
def can_build(env, platform):
return not env['production']
def configure(env):
pass
| 10.8
| 32
| 0.666667
| 15
| 108
| 4.733333
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212963
| 108
| 9
| 33
| 12
| 0.835294
| 0.083333
| 0
| 0
| 0
| 0
| 0.103093
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.25
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
296563ea5a34dabeb87a196fea6868448d4c8459
| 54,004
|
py
|
Python
|
demisto_sdk/tests/integration_tests/format_integration_test.py
|
SergeBakharev/demisto-sdk
|
17d00942a1bd33039a8aba9ddffecfd81008d275
|
[
"MIT"
] | null | null | null |
demisto_sdk/tests/integration_tests/format_integration_test.py
|
SergeBakharev/demisto-sdk
|
17d00942a1bd33039a8aba9ddffecfd81008d275
|
[
"MIT"
] | null | null | null |
demisto_sdk/tests/integration_tests/format_integration_test.py
|
SergeBakharev/demisto-sdk
|
17d00942a1bd33039a8aba9ddffecfd81008d275
|
[
"MIT"
] | null | null | null |
import json
import os
import re
from pathlib import PosixPath
from typing import List
import pytest
from click.testing import CliRunner
from demisto_sdk.__main__ import main
from demisto_sdk.commands.common import tools
from demisto_sdk.commands.common.handlers import YAML_Handler
from demisto_sdk.commands.common.hook_validations.content_entity_validator import \
ContentEntityValidator
from demisto_sdk.commands.common.hook_validations.playbook import \
PlaybookValidator
from demisto_sdk.commands.common.tools import (get_dict_from_file,
is_test_config_match)
from demisto_sdk.commands.format import format_module, update_generic
from demisto_sdk.commands.format.update_generic_yml import BaseUpdateYML
from demisto_sdk.commands.format.update_integration import IntegrationYMLFormat
from demisto_sdk.commands.format.update_playbook import PlaybookYMLFormat
from demisto_sdk.commands.lint.commands_builder import excluded_files
from demisto_sdk.tests.constants_test import (
DESTINATION_FORMAT_INTEGRATION_COPY, DESTINATION_FORMAT_PLAYBOOK_COPY,
INTEGRATION_WITH_TEST_PLAYBOOKS, PLAYBOOK_WITH_TEST_PLAYBOOKS,
SOURCE_FORMAT_INTEGRATION_COPY, SOURCE_FORMAT_PLAYBOOK_COPY)
from demisto_sdk.tests.test_files.validate_integration_test_valid_types import (
GENERIC_DEFINITION, GENERIC_FIELD, GENERIC_MODULE, GENERIC_TYPE)
from TestSuite.test_tools import ChangeCWD
yaml = YAML_Handler()
with open(SOURCE_FORMAT_INTEGRATION_COPY) as of:
SOURCE_FORMAT_INTEGRATION_YML = of.read() # prevents overriding by other `format` calls.
with open(SOURCE_FORMAT_PLAYBOOK_COPY) as of:
SOURCE_FORMAT_PLAYBOOK_YML = of.read() # prevents overriding by other `format` calls.
BASIC_YML_CONTENTS = (SOURCE_FORMAT_INTEGRATION_YML, SOURCE_FORMAT_PLAYBOOK_YML)
BASIC_YML_TEST_PACKS = [
(SOURCE_FORMAT_INTEGRATION_COPY, DESTINATION_FORMAT_INTEGRATION_COPY, IntegrationYMLFormat, 'New Integration_copy',
'integration'),
(SOURCE_FORMAT_PLAYBOOK_COPY, DESTINATION_FORMAT_PLAYBOOK_COPY, PlaybookYMLFormat, 'File Enrichment-GenericV2_copy',
'playbook')
]
YML_FILES_WITH_TEST_PLAYBOOKS = [
(
INTEGRATION_WITH_TEST_PLAYBOOKS,
DESTINATION_FORMAT_INTEGRATION_COPY,
IntegrationYMLFormat,
'New Integration',
'integration'),
(
PLAYBOOK_WITH_TEST_PLAYBOOKS,
DESTINATION_FORMAT_PLAYBOOK_COPY,
PlaybookYMLFormat,
'File Enrichment-GenericV2_copy',
'playbook'
)
]
FORMAT_CMD = "format"
CONF_JSON_ORIGINAL_CONTENT = {
"tests": [
{
"integrations": "PagerDuty v2",
"playbookID": "PagerDuty Test"
},
{
"integrations": "Account Enrichment",
"playbookID": "PagerDuty Test"
},
{
"integrations": "TestCreateDuplicates",
"playbookID": "PagerDuty Test"
}
]
}
@pytest.mark.parametrize('source_yml', BASIC_YML_CONTENTS)
def test_integration_format_yml_with_no_test_positive(tmp_path: PosixPath, source_yml: str):
"""
Given
- A yml file (integration, playbook or script) with no 'tests' configured
When
- Entering '-at' so the prompt message about asking the user if he wants to add 'No tests' to the file will
appear.
- Entering 'Y' into the prompt message about that asks the user if he wants to add 'No tests' to the file
Then
- Ensure no exception is raised
- Ensure 'No tests' is added in the first time
- Ensure message is not prompt in the second time
"""
source_file, output_file = tmp_path / 'source.yml', tmp_path / 'output.yml'
source_path, output_path = str(source_file), str(output_file)
source_file.write_text(source_yml)
# Running format in the first time
runner = CliRunner()
result = runner.invoke(main, [FORMAT_CMD, '-i', source_path, '-o', output_path, '-at'], input='Y')
prompt = f'The file {source_path} has no test playbooks configured. ' \
f'Do you want to configure it with "No tests"'
assert not result.exception
assert prompt in result.output
output_yml = get_dict_from_file(output_path)
assert output_yml[0].get('tests') == ['No tests (auto formatted)']
# Running format for the second time should raise no exception and should raise no prompt to the user
result = runner.invoke(main, [FORMAT_CMD, '-i', output_path], input='Y')
assert not result.exception
assert prompt not in result.output
@pytest.mark.parametrize('source_yml', BASIC_YML_CONTENTS)
def test_integration_format_yml_with_no_test_negative(tmp_path: PosixPath, source_yml: str):
"""
Given
- A yml file (integration, playbook or script) with no 'tests' configured
When
- Entering '-at' so the prompt message about asking the user if he wants to add 'No tests' to the file will
appear.
- Entering 'N' into the prompt message about that asks the user if he wants to add 'No tests' to the file
Then
- Ensure no exception is raised
- Ensure 'No tests' is not added
"""
source_file, output_file = tmp_path / 'source.yml', tmp_path / 'output.yml'
source_path, output_path = str(source_file), str(output_file)
source_file.write_text(source_yml)
runner = CliRunner()
result = runner.invoke(main, [FORMAT_CMD, '-i', source_path, '-o', output_path, '-at'], input='N')
assert not result.exception
prompt = f'The file {source_path} has no test playbooks configured. Do you want to configure it with "No tests"'
assert prompt in result.output
yml_content = get_dict_from_file(output_path)
assert not yml_content[0].get('tests')
@pytest.mark.parametrize('source_yml', BASIC_YML_CONTENTS)
def test_integration_format_yml_with_no_test_no_interactive_positive(tmp_path: PosixPath, source_yml: str):
"""
Given
- A yml file (integration, playbook or script) with no 'tests' configured
When
- using the '-y' option
Then
- Ensure no exception is raised
- Ensure 'No tests' is added in the first time
"""
source_file, output_file = tmp_path / 'source.yml', tmp_path / 'output.yml'
source_path, output_path = str(source_file), str(output_file)
source_file.write_text(source_yml)
runner = CliRunner()
# Running format in the first time
result = runner.invoke(main, [FORMAT_CMD, '-i', source_path, '-o', output_path, '-y'])
assert not result.exception
yml_content = get_dict_from_file(output_path)
assert yml_content[0].get('tests') == ['No tests (auto formatted)']
@pytest.mark.parametrize('source_path,destination_path,formatter,yml_title,file_type', YML_FILES_WITH_TEST_PLAYBOOKS)
def test_integration_format_configuring_conf_json_no_interactive_positive(tmp_path: PosixPath,
source_path: str,
destination_path: str,
formatter: BaseUpdateYML,
yml_title: str,
file_type: str):
"""
Given
- A yml file (integration, playbook or script) with no tests playbooks configured that are not configured
in conf.json
When
- using the -y option
Then
- Ensure no exception is raised
- If file_type is playbook or a script: Ensure {"playbookID": <content item ID>} is added to conf.json
for each test playbook configured in the yml under 'tests' key
- If file_type is integration: Ensure {"playbookID": <content item ID>, "integrations": yml_title} is
added to conf.json for each test playbook configured in the yml under 'tests' key
"""
# Setting up conf.json
conf_json_path = str(tmp_path / 'conf.json')
with open(conf_json_path, 'w') as file:
json.dump(CONF_JSON_ORIGINAL_CONTENT, file, indent=4)
BaseUpdateYML.CONF_PATH = conf_json_path
test_playbooks = ['test1', 'test2']
saved_file_path = str(tmp_path / os.path.basename(destination_path))
runner = CliRunner()
# Running format in the first time
result = runner.invoke(main, [FORMAT_CMD, '-i', source_path, '-o', saved_file_path, '-y'])
assert not result.exception
if file_type == 'playbook':
_verify_conf_json_modified(test_playbooks, '', conf_json_path)
else:
_verify_conf_json_modified(test_playbooks, yml_title, conf_json_path)
@pytest.mark.parametrize('source_path,destination_path,formatter,yml_title,file_type', YML_FILES_WITH_TEST_PLAYBOOKS)
def test_integration_format_configuring_conf_json_positive(tmp_path: PosixPath,
source_path: str,
destination_path: str,
formatter: BaseUpdateYML,
yml_title: str,
file_type: str):
"""
Given
- A yml file (integration, playbook or script) with no tests playbooks configured that are not configured
in conf.json
When
- Entering 'Y' into the prompt message that asks the user if he wants to configure those test playbooks into
conf.json
Then
- Ensure no exception is raised
- If file_type is playbook or a script: Ensure {"playbookID": <content item ID>} is added to conf.json
for each test playbook configured in the yml under 'tests' key
- If file_type is integration: Ensure {"playbookID": <content item ID>, "integrations": yml_title} is
added to conf.json for each test playbook configured in the yml under 'tests' key
- Ensure message is not prompt in the second time
"""
# Setting up conf.json
conf_json_path = str(tmp_path / 'conf.json')
with open(conf_json_path, 'w') as file:
json.dump(CONF_JSON_ORIGINAL_CONTENT, file, indent=4)
BaseUpdateYML.CONF_PATH = conf_json_path
test_playbooks = ['test1', 'test2']
saved_file_path = str(tmp_path / os.path.basename(destination_path))
runner = CliRunner()
# Running format in the first time
result = runner.invoke(main, [FORMAT_CMD, '-i', source_path, '-o', saved_file_path], input='Y')
prompt = 'The following test playbooks are not configured in conf.json file'
assert not result.exception
assert prompt in result.output
if file_type == 'playbook':
_verify_conf_json_modified(test_playbooks, '', conf_json_path)
else:
_verify_conf_json_modified(test_playbooks, yml_title, conf_json_path)
# Running format for the second time should raise no exception and should raise no prompt to the user
result = runner.invoke(main, [FORMAT_CMD, '-i', saved_file_path], input='Y')
assert not result.exception
assert prompt not in result.output
@pytest.mark.parametrize('source_path,destination_path,formatter,yml_title,file_type', YML_FILES_WITH_TEST_PLAYBOOKS)
def test_integration_format_configuring_conf_json_negative(tmp_path: PosixPath,
source_path: str,
destination_path: str,
formatter: BaseUpdateYML,
yml_title: str,
file_type: str):
"""
Given
- A yml file (integration, playbook or script) with no tests playbooks configured that are not configured
in conf.json
When
- Entering 'N' into the prompt message that asks the user if he wants to configure those test playbooks into
conf.json
Then
- Ensure no exception is raised
- Ensure conf.json is not modified
"""
# Setting up conf.json
conf_json_path = str(tmp_path / 'conf.json')
with open(conf_json_path, 'w') as file:
json.dump(CONF_JSON_ORIGINAL_CONTENT, file, indent=4)
BaseUpdateYML.CONF_PATH = conf_json_path
saved_file_path = str(tmp_path / os.path.basename(destination_path))
runner = CliRunner()
# Running format in the first time
result = runner.invoke(main, [FORMAT_CMD, '-i', source_path, '-o', saved_file_path], input='N')
prompt = 'The following test playbooks are not configured in conf.json file'
assert not result.exception
assert prompt in result.output
with open(conf_json_path) as data_file:
conf_json_content = json.load(data_file)
assert conf_json_content == CONF_JSON_ORIGINAL_CONTENT
assert 'Skipping test playbooks configuration' in result.output
def _verify_conf_json_modified(test_playbooks: List, yml_title: str, conf_json_path: str):
"""
Verifying all test playbooks are configured in conf.json file
"""
try:
with open(conf_json_path) as data_file:
conf_json_content = json.load(data_file)
for test_playbook in test_playbooks:
assert any(
test_config for test_config in conf_json_content['tests'] if
is_test_config_match(test_config,
test_playbook_id=test_playbook,
integration_id=yml_title,
)
)
except Exception:
raise
def test_integration_format_remove_playbook_sourceplaybookid(tmp_path):
"""
Given
- Playbook with field `sourceplaybookid`.
- destination_path to write the formatted playbook to.
When
- Running the format command.
- Entering '-at' so the prompt message about asking the user if he wants to add 'No tests' to the file will
appear.
Then
- Ensure 'sourceplaybookid' was deleted from the yml file.
"""
source_playbook_path = SOURCE_FORMAT_PLAYBOOK_COPY
playbook_path = str(tmp_path / 'format_new_playbook_copy.yml')
runner = CliRunner()
result = runner.invoke(main, [FORMAT_CMD, '-i', source_playbook_path, '-o', playbook_path, '-at'], input='N')
prompt = f'The file {source_playbook_path} has no test playbooks configured. Do you want to configure it with "No tests"'
assert result.exit_code == 0
assert prompt in result.output
assert '======= Updating file ' in result.stdout
assert f'Format Status on file: {source_playbook_path} - Success' in result.stdout
with open(playbook_path) as f:
yaml_content = yaml.load(f)
assert 'sourceplaybookid' not in yaml_content
assert not result.exception
def test_format_on_valid_py(mocker, repo):
"""
Given
- A valid python file.
When
- Running format
Then
- Ensure format passes.
"""
mocker.patch.object(update_generic, 'is_file_from_content_repo', return_value=(False, ''))
pack = repo.create_pack('PackName')
integration = pack.create_integration('integration')
valid_py = 'test\n'
integration.code.write(valid_py)
with ChangeCWD(pack.repo_path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [FORMAT_CMD, '-nv', '-i', integration.code.path, '-v'], catch_exceptions=True)
assert '======= Updating file' in result.stdout
assert 'Running autopep8 on file' in result.stdout
assert 'Success' in result.stdout
assert valid_py == integration.code.read()
def test_format_on_invalid_py_empty_lines(mocker, repo):
"""
Given
- Invalid python file - empty lines at the end of file.
When
- Running format
Then
- Ensure format passes.
"""
mocker.patch.object(update_generic, 'is_file_from_content_repo', return_value=(False, ''))
pack = repo.create_pack('PackName')
integration = pack.create_integration('integration')
invalid_py = 'test\n\n\n\n'
integration.code.write(invalid_py)
with ChangeCWD(pack.repo_path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [FORMAT_CMD, '-nv', '-i', integration.code.path, '-v'], catch_exceptions=False)
assert '======= Updating file' in result.stdout
assert 'Running autopep8 on file' in result.stdout
assert 'Success' in result.stdout
assert invalid_py != integration.code.read()
def test_format_on_invalid_py_dict(mocker, repo):
"""
Given
- Invalid python file - missing spaces in dict.
When
- Running format
Then
- Ensure format passes.
"""
mocker.patch.object(update_generic, 'is_file_from_content_repo', return_value=(False, ''))
pack = repo.create_pack('PackName')
integration = pack.create_integration('integration')
invalid_py = "{'test':'testing','test1':'testing1'}"
integration.code.write(invalid_py)
with ChangeCWD(pack.repo_path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [FORMAT_CMD, '-nv', '-i', integration.code.path, '-v'], catch_exceptions=False)
assert '======= Updating file' in result.stdout
assert 'Running autopep8 on file' in result.stdout
assert 'Success' in result.stdout
assert invalid_py != integration.code.read()
def test_format_on_invalid_py_long_dict(mocker, repo):
"""
Given
- Invalid python file - long dict.
When
- Running format
Then
- Ensure format passes.
"""
mocker.patch.object(update_generic, 'is_file_from_content_repo', return_value=(False, ''))
pack = repo.create_pack('PackName')
integration = pack.create_integration('integration')
invalid_py = "{'test':'testing','test1':'testing1','test2':'testing2','test3':'testing3'," \
"'test4':'testing4','test5':'testing5','test6':'testing6'}"
integration.code.write(invalid_py)
with ChangeCWD(pack.repo_path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [FORMAT_CMD, '-nv', '-i', integration.code.path, '-v'], catch_exceptions=False)
assert '======= Updating file' in result.stdout
assert 'Running autopep8 on file' in result.stdout
assert 'Success' in result.stdout
assert invalid_py != integration.code.read()
def test_format_on_invalid_py_long_dict_no_verbose(mocker, repo):
"""
(This is the same test as the previous one only not using the '-v' argument)
Given
- Invalid python file - long dict.
When
- Running format
Then
- Ensure format passes and that the verbose is off
"""
mocker.patch.object(update_generic, 'is_file_from_content_repo', return_value=(False, ''))
pack = repo.create_pack('PackName')
integration = pack.create_integration('integration')
invalid_py = "{'test':'testing','test1':'testing1','test2':'testing2','test3':'testing3'," \
"'test4':'testing4','test5':'testing5','test6':'testing6'}"
integration.code.write(invalid_py)
with ChangeCWD(pack.repo_path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [FORMAT_CMD, '-nv', '-i', integration.code.path], catch_exceptions=False)
assert '======= Updating file' in result.stdout
assert 'Running autopep8 on file' not in result.stdout
assert 'Success' in result.stdout
assert invalid_py != integration.code.read()
def test_format_on_relative_path_playbook(mocker, repo, monkeypatch):
"""
Given
- playbook to validate on with a relative path
When
- Running format
- Running validate
Then
- Ensure format passes.
- Ensure validate passes.
"""
pack = repo.create_pack('PackName')
playbook = pack.create_playbook('playbook')
playbook.create_default_playbook()
mocker.patch.object(update_generic, 'is_file_from_content_repo',
return_value=(True, f'{playbook.path}/playbook.yml'))
mocker.patch.object(PlaybookValidator, 'is_script_id_valid', return_value=True)
mocker.patch.object(PlaybookValidator, 'name_not_contain_the_type', return_value=True)
mocker.patch.object(ContentEntityValidator, 'validate_readme_exists', return_value=True)
mocker.patch.object(tools, 'is_external_repository', return_value=True)
monkeypatch.setattr('builtins.input', lambda _: 'N')
success_reg = re.compile("Format Status .+?- Success\n")
with ChangeCWD(playbook.path):
runner = CliRunner(mix_stderr=False)
result_format = runner.invoke(main, [FORMAT_CMD, '-i', 'playbook.yml', '-v'], catch_exceptions=False)
with ChangeCWD(repo.path):
result_validate = runner.invoke(main, ['validate', '-i', 'Packs/PackName/Playbooks/playbook.yml',
'--no-docker-checks', '--no-conf-json', '--allow-skipped'],
catch_exceptions=False)
assert '======= Updating file' in result_format.stdout
assert success_reg.search(result_format.stdout)
assert 'The files are valid' in result_validate.stdout
def test_format_integration_skipped_files(repo):
"""
Given:
- Content pack with integration and doc files
- Integration dir includes file artifacts from running lint (e.g. conftest.py)
When:
- Running format on the pack
Then:
- Ensure format runs successfully
- Ensure format does not run files to be skipped
"""
pack = repo.create_pack('PackName')
pack.create_integration('integration')
pack.create_doc_file()
runner = CliRunner(mix_stderr=False)
format_result = runner.invoke(main, [FORMAT_CMD, '-i', str(pack.path)], catch_exceptions=False)
assert '======= Updating file' in format_result.stdout
assert 'Success' in format_result.stdout
for excluded_file in excluded_files + ['pack_metadata.json']:
assert excluded_file not in format_result.stdout
def test_format_commonserver_skipped_files(repo):
"""
Given:
- Base content pack with CommonServerPython script
When:
- Running format on the pack
Then:
- Ensure format runs successfully
- Ensure format does not run files to be skipped
"""
pack = repo.create_pack('Base')
pack.create_script('CommonServerPython')
runner = CliRunner(mix_stderr=False)
format_result = runner.invoke(main, [FORMAT_CMD, '-i', str(pack.path), '-v'], catch_exceptions=False)
assert 'Success' in format_result.stdout
assert 'CommonServerPython.py' in format_result.stdout
commonserver_excluded_files = excluded_files[:]
commonserver_excluded_files.remove('CommonServerPython.py')
for excluded_file in commonserver_excluded_files:
assert excluded_file not in format_result.stdout
def test_format_playbook_without_fromversion_no_preset_flag(repo):
"""
Given:
- A playbook without fromversion
When:
- Running format on the pack with assume-yes flag without from-version flag
Then:
- Ensure format runs successfully
- Ensure format adds fromversion with the oldest supported version to the playbook.
"""
pack = repo.create_pack('Temp')
playbook = pack.create_playbook('my_temp_playbook')
playbook.create_default_playbook()
playbook_content = playbook.yml.read_dict()
if 'fromversion' in playbook_content:
del playbook_content['fromversion']
assert 'fromversion' not in playbook_content
playbook.yml.write_dict(playbook_content)
runner = CliRunner(mix_stderr=False)
format_result = runner.invoke(main, [FORMAT_CMD, '-i', str(playbook.yml.path), '--assume-yes', '-v'])
assert 'Success' in format_result.stdout
assert playbook.yml.read_dict().get('fromversion') == '5.5.0'
def test_format_playbook_without_fromversion_with_preset_flag(repo):
"""
Given:
- A playbook without fromversion
When:
- Running format on the pack with assume-yes flag with from-version flag
Then:
- Ensure format runs successfully
- Ensure format adds fromversion with the given from-version.
"""
pack = repo.create_pack('Temp')
playbook = pack.create_playbook('my_temp_playbook')
playbook.create_default_playbook()
playbook_content = playbook.yml.read_dict()
if 'fromversion' in playbook_content:
del playbook_content['fromversion']
assert 'fromversion' not in playbook_content
playbook.yml.write_dict(playbook_content)
runner = CliRunner(mix_stderr=False)
format_result = runner.invoke(main, [FORMAT_CMD, '-i', str(playbook.yml.path), '--assume-yes', '--from-version',
'6.0.0', '-v'])
assert 'Success' in format_result.stdout
assert playbook.yml.read_dict().get('fromversion') == '6.0.0'
def test_format_playbook_without_fromversion_with_preset_flag_manual(repo):
"""
Given:
- A playbook without fromversion
When:
- Running format on the pack with from-version flag
Then:
- Ensure format runs successfully
- Ensure format adds fromversion with the given from-version.
"""
pack = repo.create_pack('Temp')
playbook = pack.create_playbook('my_temp_playbook')
playbook.create_default_playbook()
playbook_content = playbook.yml.read_dict()
if 'fromversion' in playbook_content:
del playbook_content['fromversion']
assert 'fromversion' not in playbook_content
playbook.yml.write_dict(playbook_content)
runner = CliRunner(mix_stderr=False)
format_result = runner.invoke(main, [FORMAT_CMD, '-i', str(playbook.yml.path), '--from-version',
'6.0.0', '-v'], input='y')
assert 'Success' in format_result.stdout
assert playbook.yml.read_dict().get('fromversion') == '6.0.0'
def test_format_playbook_without_fromversion_without_preset_flag_manual(repo):
"""
Given:
- A playbook without fromversion
When:
- Running format on the pack
Then:
- Ensure format runs successfully
- Ensure format adds fromversion with the inputted version.
"""
pack = repo.create_pack('Temp')
playbook = pack.create_playbook('my_temp_playbook')
playbook.create_default_playbook()
playbook_content = playbook.yml.read_dict()
if 'fromversion' in playbook_content:
del playbook_content['fromversion']
assert 'fromversion' not in playbook_content
playbook.yml.write_dict(playbook_content)
runner = CliRunner(mix_stderr=False)
format_result = runner.invoke(main, [FORMAT_CMD, '-i', str(playbook.yml.path), '-v'], input='y\n5.5.0')
assert 'Success' in format_result.stdout
assert playbook.yml.read_dict().get('fromversion') == '5.5.0'
def test_format_playbook_without_fromversion_without_preset_flag_manual_two_tries(repo):
"""
Given:
- A playbook without fromversion
When:
- Running format on the pack
Then:
- Ensure format runs successfully
- Ensure the format does not except wrong version format.
- Ensure format adds fromversion with the inputted version.
"""
pack = repo.create_pack('Temp')
playbook = pack.create_playbook('my_temp_playbook')
playbook.create_default_playbook()
playbook_content = playbook.yml.read_dict()
if 'fromversion' in playbook_content:
del playbook_content['fromversion']
assert 'fromversion' not in playbook_content
playbook.yml.write_dict(playbook_content)
runner = CliRunner(mix_stderr=False)
format_result = runner.invoke(main, [FORMAT_CMD, '-i', str(playbook.yml.path), '-v'], input='y\n5.5\n5.5.0')
assert 'Version format is not valid' in format_result.stdout
assert 'Success' in format_result.stdout
assert playbook.yml.read_dict().get('fromversion') == '5.5.0'
def test_format_playbook_copy_removed_from_name_and_id(repo):
"""
Given:
- A playbook with name and id ending in `_copy`
When:
- Running format on the pack
Then:
- Ensure format runs successfully
- Ensure format removes `_copy` from both name and id.
"""
pack = repo.create_pack('Temp')
playbook = pack.create_playbook('my_temp_playbook')
playbook.create_default_playbook()
playbook_content = playbook.yml.read_dict()
playbook_id = playbook_content['id']
playbook_name = playbook_content['name']
playbook_content['id'] = playbook_id + '_copy'
playbook_content['name'] = playbook_name + '_copy'
playbook.yml.write_dict(playbook_content)
runner = CliRunner(mix_stderr=False)
format_result = runner.invoke(main, [FORMAT_CMD, '-i', str(playbook.yml.path), '-v'], input='y\n5.5.0')
assert 'Success' in format_result.stdout
assert playbook.yml.read_dict().get('id') == playbook_id
assert playbook.yml.read_dict().get('name') == playbook_name
def test_format_playbook_no_input_specified(mocker, repo):
"""
Given:
- A playbook with name and id ending in `_copy`
When:
- Running format on the pack
- The path of the playbook was not provided
Then:
- The command will find the changed playbook
- Ensure format runs successfully
- Ensure format removes `_copy` from both name and id.
"""
pack = repo.create_pack('Temp')
playbook = pack.create_playbook('my_temp_playbook')
playbook.create_default_playbook()
playbook_content = playbook.yml.read_dict()
playbook_id = playbook_content['id']
playbook_name = playbook_content['name']
playbook_content['id'] = playbook_id + '_copy'
playbook_content['name'] = playbook_name + '_copy'
playbook.yml.write_dict(playbook_content)
mocker.patch.object(format_module, 'get_files_to_format_from_git', return_value=[str(playbook.yml.path)])
runner = CliRunner(mix_stderr=False)
format_result = runner.invoke(main, [FORMAT_CMD, '-v'], input='y\n5.5.0')
print(format_result.stdout)
assert 'Success' in format_result.stdout
assert playbook.yml.read_dict().get('id') == playbook_id
assert playbook.yml.read_dict().get('name') == playbook_name
def test_format_incident_type_layout_id(repo):
"""
Given:
- Content pack with incident type and layout
- Layout with ID which is a UUID string
- Incident type which is linked to the above layout
When:
- Running format on the content pack
Then:
- Verify layout ID is updated
- Verify the updated layout ID is also updated in the incident type
"""
pack = repo.create_pack('PackName')
layout = pack.create_layoutcontainer(
name='layout',
content={
'id': '8f503eb3-883d-4626-8a45-16f56995bd43',
'name': 'IncidentLayout',
'group': 'incident',
'detailsV2': {"tabs": []}
}
)
incident_type = pack.create_incident_type(
name='incidentype',
content={
'layout': '8f503eb3-883d-4626-8a45-16f56995bd43',
'color': '',
'playbookId': '9f503eb3-333d-2226-7b45-16f56885bd45'
}
)
playbook = pack.create_playbook(
name='playbook',
yml={
'id': '9f503eb3-333d-2226-7b45-16f56885bd45',
'name': 'PlaybookName',
'tasks': {},
'fromversion': '5.0.0',
'description': ''
}
)
runner = CliRunner(mix_stderr=False)
format_result = runner.invoke(main, [FORMAT_CMD, '-i', str(pack.path), '-v', '-y'], catch_exceptions=False)
assert format_result.exit_code == 0
assert 'Success' in format_result.stdout
assert f'======= Updating file {pack.path}' in format_result.stdout
assert f'======= Updating file {layout.path}' in format_result.stdout
assert f'======= Updating file {incident_type.path}' in format_result.stdout
assert f'======= Updating file {playbook.yml.path}' in format_result.stdout
with open(layout.path) as layout_file:
layout_content = json.loads(layout_file.read())
assert layout_content['name'] == layout_content['id']
with open(playbook.yml.path) as playbook_file:
playbook_content = yaml.load(playbook_file)
assert playbook_content['name'] == playbook_content['id']
with open(incident_type.path) as incident_type_file:
incident_type_content = json.loads(incident_type_file.read())
assert incident_type_content['layout'] == 'IncidentLayout'
assert incident_type_content['playbookId'] == 'PlaybookName'
@pytest.mark.parametrize('field_to_test, invalid_value, expected_value_after_format', [
('fromVersion', '6.0.0', '6.5.0'),
('group', 0, 4),
('id', 'asset_operatingsystem', 'generic_asset_operatingsystem')
])
def test_format_generic_field_wrong_values(mocker, repo, field_to_test, invalid_value,
expected_value_after_format):
"""
Given
- Invalid generic field.
When
- Running format on it.
Then
- Ensure Format fixed the invalid value of the given generic field.
- Ensure success message is printed.
"""
mocker.patch.object(update_generic, 'is_file_from_content_repo', return_value=(False, ''))
pack = repo.create_pack('PackName')
generic_field = GENERIC_FIELD.copy()
generic_field[field_to_test] = invalid_value
pack.create_generic_field("generic-field", generic_field)
generic_field_path = pack.generic_fields[0].path
with ChangeCWD(pack.repo_path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [FORMAT_CMD, '-i', generic_field_path, '-v', '-y'], catch_exceptions=False)
assert 'Setting fromVersion field' in result.stdout
assert 'Success' in result.stdout
assert f'======= Updating file {generic_field_path}' in result.stdout
assert result.exit_code == 0
# check that sdk format did change the wrong fromVersion to '6.5.0':
with open(generic_field_path) as f:
updated_generic_field = json.load(f)
assert updated_generic_field[field_to_test] == expected_value_after_format
def test_format_generic_field_missing_from_version_key(mocker, repo):
"""
Given
- Invalid generic field - fromVersion field is missing
When
- Running format on it.
Then
- Ensure Format fixed the given generic field - fromVersion field was added and it's value is 6.5.0
- Ensure success message is printed.
"""
mocker.patch.object(update_generic, 'is_file_from_content_repo', return_value=(False, ''))
pack = repo.create_pack('PackName')
generic_field = GENERIC_FIELD.copy()
if generic_field['fromVersion']:
generic_field.pop('fromVersion')
pack.create_generic_field("generic-field", generic_field)
generic_field_path = pack.generic_fields[0].path
with ChangeCWD(pack.repo_path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [FORMAT_CMD, '-i', generic_field_path, '-v', '-y'], catch_exceptions=False)
assert 'Setting fromVersion field' in result.stdout
assert 'Success' in result.stdout
assert f'======= Updating file {generic_field_path}' in result.stdout
assert result.exit_code == 0
# check that sdk format did add a fromVersion key with '6.5.0' as a value:
with open(generic_field_path) as f:
updated_generic_field = json.load(f)
assert updated_generic_field['fromVersion'] == GENERIC_FIELD['fromVersion']
def test_format_generic_type_wrong_from_version(mocker, repo):
"""
Given
- Invalid generic type - fromVersion field is below 6.5.0
When
- Running format on it.
Then
- Ensure Format fixed the invalid value of the given generic type.
- Ensure success message is printed.
"""
mocker.patch.object(update_generic, 'is_file_from_content_repo', return_value=(False, ''))
pack = repo.create_pack('PackName')
generic_type = GENERIC_TYPE.copy()
generic_type['fromVersion'] = '6.0.0'
pack.create_generic_type("generic-type", generic_type)
generic_type_path = pack.generic_types[0].path
with ChangeCWD(pack.repo_path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [FORMAT_CMD, '-i', generic_type_path, '-v', '-y'], catch_exceptions=False)
assert 'Setting fromVersion field' in result.stdout
assert 'Success' in result.stdout
assert f'======= Updating file {generic_type_path}' in result.stdout
assert result.exit_code == 0
# check that sdk format did change the wrong fromVersion to '6.5.0':
with open(generic_type_path) as f:
updated_generic_type = json.load(f)
assert updated_generic_type['fromVersion'] == GENERIC_TYPE['fromVersion']
def test_format_generic_type_missing_from_version_key(mocker, repo):
"""
Given
- Invalid generic type - fromVersion field is missing
When
- Running format on it.
Then
- Ensure Format fixed the given generic type - fromVersion field was added and it's value is 6.5.0
- Ensure success message is printed.
"""
mocker.patch.object(update_generic, 'is_file_from_content_repo', return_value=(False, ''))
pack = repo.create_pack('PackName')
generic_type = GENERIC_TYPE.copy()
if generic_type['fromVersion']:
generic_type.pop('fromVersion')
pack.create_generic_type("generic-type", generic_type)
generic_type_path = pack.generic_types[0].path
with ChangeCWD(pack.repo_path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [FORMAT_CMD, '-i', generic_type_path, '-v', '-y'], catch_exceptions=False)
assert 'Setting fromVersion field' in result.stdout
assert 'Success' in result.stdout
assert f'======= Updating file {generic_type_path}' in result.stdout
assert result.exit_code == 0
# check that sdk format did add a fromVersion key with '6.5.0' as a value:
with open(generic_type_path) as f:
updated_generic_type = json.load(f)
assert updated_generic_type['fromVersion'] == GENERIC_TYPE['fromVersion']
def test_format_generic_module_wrong_from_version(mocker, repo):
"""
Given
- Invalid generic module - fromVersion field is below 6.5.0
When
- Running format on it.
Then
- Ensure Format fixed the invalid value of the given generic module.
- Ensure success message is printed.
"""
mocker.patch.object(update_generic, 'is_file_from_content_repo', return_value=(False, ''))
pack = repo.create_pack('PackName')
generic_module = GENERIC_MODULE.copy()
generic_module['fromVersion'] = '6.0.0'
pack.create_generic_module("generic-module", generic_module)
generic_module_path = pack.generic_modules[0].path
with ChangeCWD(pack.repo_path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [FORMAT_CMD, '-i', generic_module_path, '-v', '-y'], catch_exceptions=False)
assert 'Setting fromVersion field' in result.stdout
assert 'Success' in result.stdout
assert f'======= Updating file {generic_module_path}' in result.stdout
assert result.exit_code == 0
# check that sdk format did change the wrong fromVersion to '6.5.0':
with open(generic_module_path) as f:
updated_generic_module = json.load(f)
assert updated_generic_module['fromVersion'] == GENERIC_MODULE['fromVersion']
def test_format_generic_module_missing_from_version_key(mocker, repo):
"""
Given
- Invalid generic module - fromVersion field is missing
When
- Running format on it.
Then
- Ensure Format fixed the given generic module - fromVersion field was added and it's value is 6.5.0
- Ensure success message is printed.
"""
mocker.patch.object(update_generic, 'is_file_from_content_repo', return_value=(False, ''))
pack = repo.create_pack('PackName')
generic_module = GENERIC_MODULE.copy()
if generic_module['fromVersion']:
generic_module.pop('fromVersion')
pack.create_generic_module("generic-module", generic_module)
generic_module_path = pack.generic_modules[0].path
with ChangeCWD(pack.repo_path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [FORMAT_CMD, '-i', generic_module_path, '-v', '-y'], catch_exceptions=False)
assert 'Setting fromVersion field' in result.stdout
assert 'Success' in result.stdout
assert f'======= Updating file {generic_module_path}' in result.stdout
assert result.exit_code == 0
# check that sdk format did add a fromVersion key with '6.5.0' as a value:
with open(generic_module_path) as f:
updated_generic_module = json.load(f)
assert updated_generic_module['fromVersion'] == GENERIC_MODULE['fromVersion']
def test_format_generic_definition_wrong_from_version(mocker, repo):
"""
Given
- Invalid generic definition - fromVersion field is below 6.5.0
When
- Running format on it.
Then
- Ensure Format fixed the invalid value of the given generic definition.
- Ensure success message is printed.
"""
mocker.patch.object(update_generic, 'is_file_from_content_repo', return_value=(False, ''))
pack = repo.create_pack('PackName')
generic_definition = GENERIC_DEFINITION.copy()
generic_definition['fromVersion'] = '6.0.0'
pack.create_generic_definition("generic-definition", generic_definition)
generic_definition_path = pack.generic_definitions[0].path
with ChangeCWD(pack.repo_path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [FORMAT_CMD, '-i', generic_definition_path, '-v', '-y'], catch_exceptions=False)
assert 'Setting fromVersion field' in result.stdout
assert 'Success' in result.stdout
assert f'======= Updating file {generic_definition_path}' in result.stdout
assert result.exit_code == 0
# check that sdk format did change the wrong fromVersion to '6.5.0':
with open(generic_definition_path) as f:
updated_generic_definition = json.load(f)
assert updated_generic_definition['fromVersion'] == GENERIC_DEFINITION['fromVersion']
def test_format_generic_definition_missing_from_version_key(mocker, repo):
"""
Given
- Invalid generic definition - fromVersion field is missing
When
- Running format on it.
Then
- Ensure Format fixed the given generic definition - fromVersion field was added and it's value is 6.5.0
- Ensure success message is printed.
"""
mocker.patch.object(update_generic, 'is_file_from_content_repo', return_value=(False, ''))
pack = repo.create_pack('PackName')
generic_definition = GENERIC_DEFINITION.copy()
if generic_definition['fromVersion']:
generic_definition.pop('fromVersion')
pack.create_generic_definition("generic-definition", generic_definition)
generic_definition_path = pack.generic_definitions[0].path
with ChangeCWD(pack.repo_path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [FORMAT_CMD, '-i', generic_definition_path, '-v', '-y'], catch_exceptions=False)
assert 'Setting fromVersion field' in result.stdout
assert 'Success' in result.stdout
assert f'======= Updating file {generic_definition_path}' in result.stdout
assert result.exit_code == 0
# check that sdk format did add a fromVersion key with '6.5.0' as a value:
with open(generic_definition_path) as f:
updated_generic_definition = json.load(f)
assert updated_generic_definition['fromVersion'] == GENERIC_DEFINITION['fromVersion']
class TestFormatWithoutAddTestsFlag:
def test_format_integrations_folder_with_add_tests(self, pack):
"""
Given
- An integration folder.
When
- Running format command on it
Then
- Ensure no exception is raised.
- Ensure message asking to add tests is prompt.
"""
runner = CliRunner()
integration = pack.create_integration()
integration.create_default_integration()
integration.yml.update({'fromversion': '5.5.0'})
integration_path = integration.yml.path
result = runner.invoke(main, [FORMAT_CMD, '-i', integration_path, '-at'])
prompt = f'The file {integration_path} has no test playbooks configured.' \
f' Do you want to configure it with "No tests"?'
message = f'Formatting {integration_path} with "No tests"'
assert not result.exception
assert prompt in result.output
assert message not in result.output
def test_format_integrations_folder(self, pack):
"""
Given
- An integration folder.
When
- Running format command on it
Then
- Ensure no exception is raised.
- Ensure 'No tests' is added to the yaml file.
- Ensure message asking to add tests is not prompt.
- Ensure a message for formatting automatically the yaml file is added.
"""
runner = CliRunner()
integration = pack.create_integration()
integration.create_default_integration()
integration_path = integration.yml.path
result = runner.invoke(main, [FORMAT_CMD, '-i', integration_path], input='Y')
prompt = f'The file {integration_path} has no test playbooks configured.' \
f' Do you want to configure it with "No tests"?'
message = f'Formatting {integration_path} with "No tests"'
assert not result.exception
assert prompt not in result.output
assert message in result.output
def test_format_script_without_test_flag(self, pack):
"""
Given
- An script folder.
When
- Running format command on it
Then
- Ensure no exception is raised.
- Ensure 'No tests' is added to the yaml file.
- Ensure message asking to add tests is not prompt.
- Ensure a message for formatting automatically the yaml file is added.
"""
runner = CliRunner()
script = pack.create_script()
script.create_default_script()
script.yml.update({'fromversion': '5.5.0'})
script_path = script.yml.path
result = runner.invoke(main, [FORMAT_CMD, '-i', script_path])
prompt = f'The file {script_path} has no test playbooks configured.' \
f' Do you want to configure it with "No tests"?'
message = f'Formatting {script_path} with "No tests"'
assert not result.exception
assert prompt not in result.output
assert message in result.output
def test_format_playbooks_folder(self, pack):
"""
Given
- A playbooks folder.
When
- Running format command on it
Then
- Ensure no exception is raised.
- Ensure 'No tests' is added to the yaml file.
- Ensure message asking to add tests is not prompt.
- Ensure a message for formatting automatically the yaml file is added.
"""
runner = CliRunner()
playbook = pack.create_playbook()
playbook.create_default_playbook()
playbook.yml.update({'fromversion': '5.5.0'})
playbooks_path = playbook.yml.path
playbook.yml.delete_key('tests')
result = runner.invoke(main, [FORMAT_CMD, '-i', playbooks_path], input='N')
prompt = f'The file {playbooks_path} has no test playbooks configured.' \
f' Do you want to configure it with "No tests"?'
message = f'Formatting {playbooks_path} with "No tests"'
assert not result.exception
assert prompt not in result.output
assert message in result.output
assert playbook.yml.read_dict().get('tests')[0] == 'No tests (auto formatted)'
def test_format_testplaybook_folder_without_add_tests_flag(self, pack):
"""
Given
- An TestPlaybook folder.
When
- Running format command on it
Then
- Ensure no exception is raised.
- Ensure 'No tests' is NOT added to the yaml file.
- Ensure NO message for formatting automatically the yaml file is added.
"""
runner = CliRunner()
test_playbook = pack.create_test_playbook()
test_playbook.create_default_test_playbook()
test_playbook.yml.update({'fromversion': '5.5.0'})
test_playbooks_path = test_playbook.yml.path
test_playbook.yml.delete_key('tests')
result = runner.invoke(main, [FORMAT_CMD, '-i', test_playbooks_path], input='N')
prompt = f'The file {test_playbooks_path} has no test playbooks configured.' \
f' Do you want to configure it with "No tests"?'
message = f'Formatting {test_playbooks_path} with "No tests"'
assert not result.exception
assert prompt not in result.output
assert message not in result.output
assert not test_playbook.yml.read_dict().get('tests')
def test_format_test_playbook_folder_with_add_tests_flag(self, pack):
"""
Given
- An TestPlaybook folder.
When
- Running format command on it
Then
- Ensure no exception is raised.
- Ensure 'No tests' is NOT added to the yaml file.
- Ensure NO message for formatting automatically the yaml file is added.
"""
runner = CliRunner()
test_playbook = pack.create_test_playbook()
test_playbook.create_default_test_playbook()
test_playbook.yml.update({'fromversion': '5.5.0'})
test_playbooks_path = test_playbook.yml.path
test_playbook.yml.delete_key('tests')
result = runner.invoke(main, [FORMAT_CMD, '-i', test_playbooks_path, '-at'], input='N')
prompt = f'The file {test_playbooks_path} has no test playbooks configured.' \
f' Do you want to configure it with "No tests"?'
message = f'Formatting {test_playbooks_path} with "No tests"'
assert not result.exception
assert prompt not in result.output
assert message not in result.output
assert not test_playbook.yml.read_dict().get('tests')
def test_format_layouts_folder_without_add_tests_flag(self, repo):
"""
Given
- An Layouts folder.
When
- Running format command on it
Then
- Ensure no exception is raised.
- Ensure 'No tests' is NOT added to the yaml file.
- Ensure NO message for formatting automatically the yaml file is added.
"""
runner = CliRunner()
pack = repo.create_pack('PackName')
layout = pack.create_layoutcontainer(
name='layout',
content={
'id': '8f503eb3-883d-4626-8a45-16f56995bd43',
'name': 'IncidentLayout',
'group': 'incident',
'detailsV2': {"tabs": []}
}
)
layouts_path = layout.path
result = runner.invoke(main, [FORMAT_CMD, '-i', layouts_path])
prompt = f'The file {layouts_path} has no test playbooks configured.' \
f' Do you want to configure it with "No tests" '
message = f'Formatting {layouts_path} with "No tests"'
message1 = f'Format Status on file: {layouts_path} - Success'
assert not result.exception
assert prompt not in result.output
assert message not in result.output
assert message1 in result.output
def test_format_layouts_folder_with_add_tests_flag(self, repo):
"""
Given
- An Layouts folder.
When
- Running format command on it
Then
- Ensure no exception is raised.
- Ensure 'No tests' is NOT added to the yaml file.
- Ensure NO message for formatting automatically the yaml file is added.
"""
runner = CliRunner()
pack = repo.create_pack('PackName')
layout = pack.create_layoutcontainer(
name='layout',
content={
'id': '8f503eb3-883d-4626-8a45-16f56995bd43',
'name': 'IncidentLayout',
'group': 'incident',
'detailsV2': {"tabs": []}
}
)
layouts_path = layout.path
result = runner.invoke(main, [FORMAT_CMD, '-i', layouts_path, '-at'])
prompt = f'The file {layouts_path} has no test playbooks configured.' \
f' Do you want to configure it with "No tests" '
message = f'Formatting {layouts_path} with "No tests"'
message1 = f'Format Status on file: {layouts_path} - Success'
assert not result.exception
assert prompt not in result.output
assert message not in result.output
assert message1 in result.output
| 40.002963
| 125
| 0.657822
| 6,653
| 54,004
| 5.133774
| 0.056666
| 0.016162
| 0.028986
| 0.026409
| 0.845879
| 0.811653
| 0.78782
| 0.769492
| 0.76229
| 0.736964
| 0
| 0.008091
| 0.247074
| 54,004
| 1,349
| 126
| 40.032617
| 0.831903
| 0.203429
| 0
| 0.629932
| 0
| 0.002721
| 0.172554
| 0.040187
| 0
| 0
| 0
| 0
| 0.212245
| 1
| 0.054422
| false
| 0
| 0.028571
| 0
| 0.084354
| 0.001361
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
46a375734f84fecd9cc7680ff045bf144de72a2f
| 26,964
|
py
|
Python
|
cse2rad.py
|
dataaa/UDFA
|
20f700efe0fe8ce969754bae1d7c09b1e4ff6fae
|
[
"MIT"
] | null | null | null |
cse2rad.py
|
dataaa/UDFA
|
20f700efe0fe8ce969754bae1d7c09b1e4ff6fae
|
[
"MIT"
] | null | null | null |
cse2rad.py
|
dataaa/UDFA
|
20f700efe0fe8ce969754bae1d7c09b1e4ff6fae
|
[
"MIT"
] | null | null | null |
from glob import glob
from os.path import join
import re
################################################################################
##### cse2rad.py, v1.0 #####
##### Originally written in 2015 by Paul M. Woods. #####
##### This script automatically generates input files for RADMC3D, from #####
##### the public circumstellar envelope code available at http://udfa.net #####
##### This code is distributed under the terms of the MIT License (MIT) #####
##### #
##### Copyright (c) 2015 Dr. Paul M. Woods #
##### #
#Permission is hereby granted, free of charge, to any person obtaining a copy #
#of this software and associated documentation files (the "Software"), to deal #
#in the Software without restriction, including without limitation the rights #
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
#copies of the Software, and to permit persons to whom the Software is #
#furnished to do so, subject to the following conditions: #
# #
#The above copyright notice and this permission notice shall be included in all#
#copies or substantial portions of the Software. #
# #
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
#SOFTWARE. #
##### #####
################################################################################
#Turn on (1) or off (0) user interaction
interaction = 0
# Molecule of interest or 'ALL' for all molecules for which there is collisional data
specString = 'SiS'
# Format of collision partners
elecString = 'e-'
h2String = 'H2'
hString = 'H'
heString = 'He'
hpString = 'H+'
tempString = 'TEMP.'
vexp = 14.5
#db = 0.6*2.*vexp
db = 1.0
radmcPath = '/Users/abc/Work/radmc-3d/'
molecPath = '/Users/abc/Work/RATRAN/Ratran/molec/'
# Input file from CSE code
dataFile = open('csnum_rate13.out','rb')
# Get input parameters directly from user
if interaction ==1:
specString = raw_input("Please enter species name, exactly as in Rate12: ")
vexp = raw_input("Give the expansion velocity of the envelope, in km/s: ")
vexp = float(vexp)
radmcPath = raw_input("Provide the absolute path to the RADMC3D directory, including a trailing slash: ")
molecPath = raw_input("Provide the absolute path to the collisional data files, including a trailing slash: ")
dataFilename = raw_input("Please enter the CSE model datafile you would like to use. N.B. this must be a file containing number densities: ")
dataFile = open(dataFilename,'rb')
# A dictionary to convert LAMDA filenames to UDFA species names;
# second element of the tuple is the transitions within ALMA windows. Use getTrans.py to generate these.
fileToMol = {
'13co.dat': ('','1,2,3,4,6,8'),
'29sio.dat': ('','2,3,5,6,7,8,9,10,11,15,16,19,20,21,22'),
'a-ch3oh.dat': ('CH3OH','49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,343,428,429,430,431,432,433,434,435,436,437,438,439,440,441,442,443,444,445,446,447,448,449,450,451,452,453,454,455,456,457,458,459,460,461,462,463,464,465,466,467,468,469,470,471,472,473,474,475,476,477,478,479,480,481,482,483,484,485,486,487,488,489,490,491,492,493,494,495,496,497,498,499,500,501,502,503,504,505,506,507,508,509,510,511,512,513,514,515,516,517,518,519,520,521,522,523,524,525,526,527,528,529,530,531,532,533,534,535,536,537,538,539,540,541,542,543,544,545,617,618,619,620,621,622,623,624,625,626,627,628,629,630,631,632,633,634,635,636,637,638,639,640,641,642,643,644,645,646,647,648,649,650,651,652,653,654,655,656,657,658,659,660,661,662,663,664,665,666,667,668,669,670,671,672,673,674,675,676,677,678,679,680,681,682,683,684,685,686,687,688,689,690,691'),
'c+.dat': ('C+',''),
'c+@uv.dat': ('','1,2'),
'c17o.dat': ('','1,2,3,4,6,8'),
'c18o.dat': ('','1,2,3,4,6,8'),
'c2h_h2_e.dat': ('C2H','1,2,3,4,5,6,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,95,96,97,98,99,100,101,102,103,104,105'),
'catom.dat': ('C','1,2'),
'ch3cn.dat': ('CH3CN',''),
'cn.dat': ('CN','1,2,3,4,5,6,7,8,9,10,11,15,16,17,18,19,20,21,22,23'),
'co.dat': ('CO','1,2,3,4,6,7,8'),
'co@neufeld.dat': ('','1,2,3,4,6,7,8'),
'co@old.dat': ('','1,2,3,4,6,7,8'),
'cs@lique.dat': ('CS','2,3,5,6,7,8,9,10,13,14,17,18,19'),
'dco+@xpol.dat': ('','2,3,4,5,6,9,11,12,13'),
'e-ch3oh.dat': ('CH3OH','79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,343,344,345,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360,361,362,363,364,365,366,367,368,369,370,371,372,373,374,375,376,377,378,379,380,381,382,383,384,385,386,387,388,389,390,391,392,393,394,395,396,397,398,399,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,435,436,437,438,439,440,441,442,443,444,445,446,447,448,449,450,451,452,453,454,455,456,457,458,459,460,461,462,463,464,465,466,467,468,469,470,471,472,473,474,475,476,477,478,479,480,481,482,483,484,485,486,487,488,489,490,491,492,493,494,495,496,497,498,499,500,501,502,503,504,505,506,507,508,509,510,511,512,513,514,515,516,517,518,519,520,521,522,523,524,525,526,527,528,529,530,531,532,533,534,535,536,537,538,539,540,541,542,543,544,545,546,547,548,549,550,551,552,553,554,555,556,557,558,559,560,561,562,563,564,565,566,567,568,569,570,571,572,715,716,717,718,719,720,721,722,723,724,725,726,727,728,729,730,731,732,733,734,735,736,737,738,739,740,741,742,743,744,745,746,747,748,749,750,751,752,753,754,755,756,757,758,759,760,761,762,763,764,765,766,767,768,769,770,771,772,773,774,775,776,777,778,779,780,781,782,783,784,785,786,787,788,789,790,791,792,793,794,795,796,797,798,799,800,801,802,803,804,805,806,807,808,809,810,811,812,813,814,815,816,817,818,819,820,821,822,823,824,825,826,923,924,925,926,927,928,929,930,931,932,933,934,935,936,937,938,939,940,941,942,943,944,945,946,947,948,949,950,951,952,953,954,955,956,957,958,959,960,961,962,963,964,965,966,967,968,969,970,971,972,973,974,975,976,977,978,979,980,981,982,983,984,985,986,987,988,989,990,991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023,1024,1025,1026,1027,1028,1029,1030,1031,1032,1033,1034,1035,1036,1037,1038,1039,1040,1041,1042,1043,1044,1045,1046,1047,1048,1049,1050,1051,1052,1053,1054,1055,1056,1057,1058,1059'),
'h13cn@xpol.dat': ('','1,3,4,5,7,8,10,11'),
'h13co+@xpol.dat': ('','1,3,4,5,7,8,10'),
'hc15n@xpol.dat': ('','1,3,4,5,7,8,10,11'),
'hc17o+@xpol.dat': ('','1,3,4,5,7,8,10'),
'hc18o+@xpol.dat': ('','1,3,4,5,8,10,11'),
'hc3n.dat': ('HC3N','10,11,12,14,15,16,17'),
'hcl.dat': ('HCl','1'),
'hcl@hfs.dat': ('','1,2,3'),
'hcn.dat': ('HCN','1,3,4,5,7,8,9,10'),
'hcn@hfs.dat': ('','1,2,3,7,8,9,11,12,13,14'),
'hcn@xpol.dat': ('','1,3,4,5,7,8,9,10'),
'hco+.dat': ('HCO+','1,3,4,5,7,8,9,10'),
'hco+@xpol.dat': ('','1,3,4,5,7,8,9,10'),
'hcs+@xpol.dat': ('HCS+','2,3,5,6,7,8,10,11,15,16,19,20,21,22'),
'hdo.dat': ('','1,2,5,6,7,9,20,25,26,44,45,52,63,72,97,104,105,113'),
'hf.dat': ('HF',''),
'hnc.dat': ('HNC','1,3,4,5,7,9,10'),
'hnco.dat': ('HNCO','34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,86,87,88,93,94,95,96,97,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123'),
'n2h+@xpol.dat': ('N2H+','1,3,4,5,7,9,10'),
'n2h+_hfs.dat': ('','1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129'),
'no.dat': ('NO','253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,343,344,345,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360,361,362,363,364,365,366,367,368,369,370,371,372,373,374,375,376,377,378,379,380,381,382,383,384,385,386,387,388,389,390,391,392,393,394,395,396,397,398,399,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,427,428,429,430,431,432,433,434,435,436,498,499,500,501,502,503,504,505,506,507,508,509,510,511,512,513,514,515,516,517,518,519,520,521,522,523,524,525,526,527,528,529,530,531,532,533,534,535,536,537,538,539,540,541,542,543,544,545,546,547,548,549,550,551,552,553,554,555,556,557,558,559,560,613,614,615,616,617,618,619,620,621,622,623,624,625,626,627,628,629,630,631,632,633,634,635,636,637,638,639,640,641,642,643,644,645,646,647,648,649,650,651,652,653,654,655,656,657,658,659,660,661,662,663,664,665,666,667,668,669,670,671,672,673,674,675,676,677,678,679,680,681,682,683,684,685,686,687,688,689,690,691,692,693,694'),
'o-c3h2.dat': ('C3H2','20,21,22,23,24,25,26,27,28,29,30,38,39,40,41,42,43,44,45,46,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,134,135,136,137,138,139,140,141,142,143,148,149,150,151,152,153,154'),
'o-h3o+.dat': ('H3O+','3'),
'o-nh2d.dat': ('',''),
'o-nh3.dat': ('NH3',''),
'o-sic2.dat': ('SiC2','17,18,19,20,25,26,27,28,29,30,31,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78'),
'o2.dat': ('O2','33,34,35,36,38'),
'oatom.dat': ('O',''),
'ocs@xpol.dat': ('OCS','7,8,9,11,12,13,18,19,20,21,22,23,24,25,26,27,28,29,30,32,33,34,35,36,37,38,39,40,41,50,51,52,53,54,55,56,57,58,59,65,66,67,68,69,70,71,72,73,74,75,76,77,78'),
'oh+.dat': ('OH+',''),
'oh.dat': ('OH',''),
'oh2co-h2.dat': ('H2CO','2,4,5,7,8,10,11,14,15,16,17,18,28,33,39,44,48,49,50,51,56,60,70,82,83,84,89,93,94,95'),
'oh2cs.dat': ('H2CS','4,6,7,9,16,18,19,20,21,23,26,28,31,32,33,35,36,38,40,43,45,48,50,51,54'),
'oh2o@daniel.dat': ('H2O','17,32,57,90,144'),
'oh2o@rovib.dat': ('','17,38,67,112,148,218,235,254,278,410,474,527,550,635,834,850,886,1066,1164,1206,1281,1324,1458,1575,1738,1761,1846,1904,2064,2262,2369,2477,2519,2566,2686,2707,2758,2836,2880,2957,3027,3053,3212,3214,3294,3409,3440,3514,3604,3643,3749,3762,3876,4220,4347,4415,4966,4998,5031,5082,5083,5183,5224,5341,5342,5359,5401,5443,5615,5822,6193,6396,6435,6436,6481,6586,6587,6699,6857,6858,6908,7188,7317,7503'),
'oh2s.dat': ('H2S','7,9,14,17,21,35,37,42,63,66,89,95,98,103,118'),
'oh@hfs.dat': ('','51,60'),
'p-c3h2.dat': ('','19,20,21,22,23,24,25,26,27,34,35,36,37,38,39,40,41,42,43,44,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,134,135,136,137,138,139,140,148,149,150,151,152'),
'p-h3o+.dat': ('','1,5,12'),
'p-nh2d.dat': ('',''),
'p-nh3.dat': ('',''),
'ph2co-h2.dat': ('','2,3,4,5,10,13,14,16,17,20,21,24,27,30,33,34,40,43,47,50,51,54,55,67,70,75,79,84,90,91,94,95'),
'ph2cs.dat': ('','3,4,6,7,9,10,11,18,19,21,22,24,25,27,28,30,31,34,37,40,46,49,52,53'),
'ph2o@daniel.dat': ('','17,29,34,58,70,94,113'),
'ph2o@rovib.dat': ('','19,21,38,45,71,115,118,192,227,231,282,350,449,502,545,558,760,899,928,990,1060,1208,1471,1688,1705,1773,1971,1989,1990,2121,2186,2258,2386,2455,2509,2526,2551,2653,2668,2755,2756,2775,2802,2860,3120,3121,3196,3382,3453,3508,3532,3587,3752,3964,3965,4189,4224,4349,4655,4775,4805,4838,4893,5045,5257,5352,5458,5684,5901,6162,6244,6288,6289,6427,6628,6710,6848,7176,7223,7295'),
'ph2s.dat': ('','1,2,4,5,7,19,24,26,27,30,42,45,60,65,67,93,102,115'),
'sio.dat': ('SiO','2,3,5,6,7,8,9,10,11,14,15,16,19,20,21'),
'sis.dat': ('SiS','5,6,7,8,12,13,14,15,16,17,18,19,20,22,23,24,25,26,27,34,35,36,37,38,39'),
'so2.dat': ('SO2','48,49,50,51,52,53,54,55,56,57,58,59,60,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,249,250,251,252,253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,386,387,388,389,390,391,392,393,394,395,396,397,398,399,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,427,428,429,430,431,432,433,434,435,436,437,438,439,440,441,442,443,444,445,446,447,490,491,492,493,494,495,496,497,498,499,500,501,502,503,504,505,506,507,508,509,510,511,512,513,514,515,516,517,518,519,520,521,522,523,524,525,526,527,528,529,530,531,532,533,534,535,536,537,538,539,540,541,542,543,544,545,546,547,548,549,550,551,552,553,554,555,556,557,558,559,560,561,562'),
'so@lique.dat': ('SO','3,4,5,6,7,8,10,11,12,14,15,16,17,18,21,22,24,26,27,28,34,35,36,37,39,41,42,44,47,48,49,50,51,52,55,56,57,58,59,62,63,65,66,68,70,71,72,73,75,77,78,82,84,85,87,89,90,92,93,94,95,96,98,99,101,102,104,107,108,110,111,113,117,122,125,129,132,137,138,142,144,147,149,150,154,155,158,159,161,162,166,167,170,171,173,182,185,190,192,194,197,198,202,203,204,207,209,210,213,214,215,218,220,221,224,225,229,231,232,236,237,239,241,246,250,255,259,264')
}
# Set up list to hold the required species
requiredSpec = []
if specString == 'ALL':
availableSpec = []
molFilePaths = glob(join(molecPath,'*.dat'))
for i, x in enumerate(molFilePaths):
availableSpec.append(re.search(r'([^/]+$)',x).group(1))
for x in availableSpec:
requiredSpec.append(fileToMol[x][0])
else:
requiredSpec.append(specString)
linesFile = open(radmcPath+'lines.inp','w')
linesFile.write('2\n'+str(len(requiredSpec))+'\n')
# Generate a list of non-empty entries (generator object)
gen = (x for x in requiredSpec if x)
for x in gen:
specString = x
specLoc = []
specAbund = []
# Collision partners
elecLoc = []
elecAbund = []
h2Loc = []
h2Abund = []
hLoc = []
hAbund = []
heLoc = []
heAbund = []
hpLoc = []
hpAbund = []
collParts = []
# Physical parameters
tempLoc = None
radius = []
density = []
temperature = []
# Molecular output file (for RADMC-3D)
outFile = open(radmcPath+'numberdens_'+specString+'.inp','w')
# Get the location of key lines in the data file
for num, line in enumerate(dataFile, 0):
# Find our species of interest, and store its abundance
if ' '+specString+' ' in line:
specLoc.append(num)
if len(specLoc) == 1:
if num == specLoc[0]:
specIndex = line.split().index(specString)
specAbund.append(line.split()[specIndex])
# Find the electron abundance, store it
if ' '+elecString+' ' in line:
elecLoc.append(num)
if len(elecLoc) == 1:
if num == elecLoc[0]:
elecIndex = line.split().index(elecString)
elecAbund.append(line.split()[elecIndex])
# Find the H2 number density, store it
if ' '+h2String+' ' in line:
h2Loc.append(num)
if len(h2Loc) == 1:
if num == h2Loc[0]:
h2Index = line.split().index(h2String)
h2Abund.append(line.split()[h2Index])
# Find the H abundance, store it
if ' '+hString+' ' in line:
hLoc.append(num)
if len(hLoc) == 1:
if num == hLoc[0]:
hIndex = line.split().index(hString)
hAbund.append(line.split()[hIndex])
# Find the He abundance, store it
if ' '+heString+' ' in line:
heLoc.append(num)
if len(heLoc) == 1:
if num == heLoc[0]:
heIndex = line.split().index(heString)
heAbund.append(line.split()[heIndex])
# Find the H+ abundance, store it
if ' '+hpString+' ' in line:
hpLoc.append(num)
if len(hpLoc) == 1:
if num == hpLoc[0]:
hpIndex = line.split().index(hpString)
hpAbund.append(line.split()[hpIndex])
# Assumption: radius, density and temperature are the first three columns in the row
# Find the line of the file with the physical parameters
if ' '+tempString+' ' in line:
tempLoc = num
if tempLoc is not None:
if (num - tempLoc) <= len(specAbund) - 1:
radius.append(line.split()[0])
density.append(line.split()[1])
temperature.append(line.split()[2])
dataFile.seek(0)
if len(specAbund) == 0:
print "Species",specString,"not found."
if len(elecAbund) == 0:
print "Electrons",elecString,"not found."
if len(h2Abund) == 0:
print "Molecular hydrogen (",h2String,") not found."
if len(radius) == 0:
print "Temperature (",tempString,") not found."
# Print the output .inp file in RADMC-3D format
outFile.write('1\n'+str(len(radius)-1)+' \n')
for i in range(1,len(radius)):
outFile.write(' {0:13.7E} \n'.format(float(specAbund[i])))
outFile.close()
# Get name of LAMDA molecular data file
for k, v in fileToMol.iteritems():
if v[0] == specString:
molFile = k
break
# Species then format. Note no EOL.
linesFile.write(specString+' leiden 0 0 ')
# Open LAMDA molecular data file, find all collision partners
with open(molecPath+molFile,'rb') as molecFile:
for line in molecFile:
if ('NUMBER OF COLL' in line.upper()) and ('PARTNERS' in line.upper()):
# Strip carriage return
nCollPart = molecFile.next()[:-1]
# Unfortunately LAMDA files are not standardised, and have different wording. The last 'Partner' is solely for the hf.dat file.
if ('COLLISIONS BETWEEN' in line.upper()) or (len(re.findall(r'(COLLISION PARTNER\s+$)',line.upper())) > 0) or ('Partner' in line): #('COLLISION PARTNER' in line.upper()):
# Get number at start of line
collParts.append(molecFile.next().split()[0])
linesFile.write(nCollPart+'\n')
for i in range(0,len(collParts)):
if collParts[i] == '1':
linesFile.write('H2\n')
h2densFile = open(radmcPath+'numberdens_h2.inp','w')
h2densFile.write('1\n'+str(len(radius)-1)+'\n')
for j in range(1,len(radius)):
h2densFile.write(' {0:12.2f} \n'.format(float(h2Abund[j])))
h2densFile.close()
if collParts[i] == '2':
linesFile.write('p-H2\n')
ph2densFile = open(radmcPath+'numberdens_p-h2.inp','w')
ph2densFile.write('1\n'+str(len(radius)-1)+'\n')
for j in range(1,len(radius)):
ph2densFile.write(' {0:12.2f} \n'.format(0.25*float(h2Abund[j])))
ph2densFile.close()
if collParts[i] == '3':
linesFile.write('o-H2\n')
oh2densFile = open(radmcPath+'numberdens_o-h2.inp','w')
oh2densFile.write('1\n'+str(len(radius)-1)+'\n')
for j in range(1,len(radius)):
oh2densFile.write(' {0:12.2f} \n'.format(0.75*float(h2Abund[j])))
oh2densFile.close()
if collParts[i] == '4':
linesFile.write('e\n')
edensFile = open(radmcPath+'numberdens_e.inp','w')
edensFile.write('1\n'+str(len(radius)-1)+'\n')
for j in range(1,len(radius)):
edensFile.write(' {0:12.2f} \n'.format(float(elecAbund[j])))
edensFile.close()
if collParts[i] == '5':
linesFile.write('H\n')
hdensFile = open(radmcPath+'numberdens_h.inp','w')
hdensFile.write('1\n'+str(len(radius)-1)+'\n')
for j in range(1,len(radius)):
hdensFile.write(' {0:12.2f} \n'.format(float(hAbund[j])))
hdensFile.close()
if collParts[i] == '6':
linesFile.write('He\n')
hedensFile = open(radmcPath+'numberdens_he.inp','w')
hedensFile.write('1\n'+str(len(radius)-1)+'\n')
for j in range(1,len(radius)):
hedensFile.write(' {0:12.2f} \n'.format(float(heAbund[j])))
hedensFile.close()
if collParts[i] == '7':
linesFile.write('H+\n')
hpdensFile = open(radmcPath+'numberdens_h+.inp','w')
hpdensFile.write('1\n'+str(len(radius)-1)+'\n')
for j in range(1,len(radius)):
hpdensFile.write(' {0:12.2f} \n'.format(float(hpAbund[j])))
hpdensFile.close()
linesFile.close()
# Huge number of input files for RADMC-3D:
# amr_grid.inp, dust_density.inp, dust_temperature.dat, dustopac.inp, dustkappa_xxx.inp, gas_temperature.inp,
# gas_velocity.inp, lines.inp, (microturbulence.inp), numberdens_xx.inp, numberdens_collpart1.inp, radmc3d.inp,
# wavelength_micron.inp, (external_source.inp), molecule_co.inp, etc.
amrFile = open(radmcPath+'amr_grid.inp','w')
amrFile.write('1\n0\n100\n0\n1 1 1\n'+str(len(radius)-1)+' 1 1\n')
amrFile.write(' {0:13.7e} \n'.format(1.0e15/2.))
for i in range(1,len(radius)-1):
amrFile.write(' {0:13.7e} \n'.format(float(radius[i])+float(radius[i+1])/2.))
amrFile.write(' {0:13.7e} \n'.format(float(radius[len(radius)-1])*2.))
amrFile.write(' {0:12.2f} \n'.format(0))
amrFile.write(' {0:12.2f} \n'.format(1.5))
amrFile.write(' {0:12.2f} \n'.format(0))
amrFile.write(' {0:12.2f} \n'.format(1.5))
amrFile.close()
dopacFile = open(radmcPath+'dustopac.inp','w')
dopacFile.write('2 Format number of this file\n')
dopacFile.write('1 Nr of dust species\n')
dopacFile.write('============================================================================\n')
dopacFile.write('1 Way in which this dust species is read\n')
dopacFile.write('0 0=Thermal grain\n')
dopacFile.write('silicate Extension of name of dustkappa_***.inp file\n')
dopacFile.write('----------------------------------------------------------------------------\n')
dopacFile.close()
wavelFile = open(radmcPath+'wavelength_micron.inp','w')
lambda1 = 0.1e0
lambda2 = 7.0e0
lambda3 = 25.e0
lambda4 = 1.0e4
n12 = 20
n23 = 100
n34 = 30
lambdaAll = []
for k in range(n12):
lambdaAll.append(lambda1 * pow((lambda2/lambda1),(k/(1.e0*n12))))
for k in range(n23):
lambdaAll.append(lambda2 * pow((lambda3/lambda2),(k/(1.e0*n23))))
for k in range(n34):
lambdaAll.append(lambda3 * pow((lambda4/lambda3),(k/(1.e0*(n34-1.e0)))))
nlam = len(lambdaAll)
wavelFile.write(str(nlam)+'\n')
for k in range(nlam):
wavelFile.write(' {0:13.7e} \n'.format(lambdaAll[k]))
wavelFile.close()
starsFile = open(radmcPath+'stars.inp','w')
starsFile.write('2\n1 '+str(nlam)+'\n')
# The following are rstar,mstar,pstar[0],pstar[1],pstar[2]
starsFile.write(' {0:13.7e} {1:13.7e} {2:12.6f} {3:12.6f} {4:12.6f} \n'.format(1.0e13,6.0e33,0.0,0.0,0.0))
for k in range(nlam):
starsFile.write(' {0:13.7e} \n'.format(lambdaAll[k]))
# For blackbody, enter (-)temperature here. Otherwise provide wavelengths and fluxes.
starsFile.write(' -3000.0000\n')
starsFile.close()
radmcFile = open(radmcPath+'radmc3d.inp','w')
radmcFile.write('lines_mode = 3\n')
radmcFile.write('istar_sphere = 1\n')
radmcFile.close()
ddensFile = open(radmcPath+'dust_density.inp','w')
ddensFile.write('1\n'+str(len(radius)-1)+'\n1\n')
dtempFile = open(radmcPath+'dust_temperature.dat','w')
dtempFile.write('1\n'+str(len(radius)-1)+'\n1\n')
gtempFile = open(radmcPath+'gas_temperature.inp','w')
gtempFile.write('1\n'+str(len(radius)-1)+'\n')
gveloFile = open(radmcPath+'gas_velocity.inp','w')
gveloFile.write('1\n'+str(len(radius)-1)+'\n')
gdensFile = open(radmcPath+'numberdens_h2.inp','w')
gdensFile.write('1\n'+str(len(radius)-1)+'\n')
turbuFile = open(radmcPath+'microturbulence.inp','w')
turbuFile.write('1\n'+str(len(radius)-1)+'\n')
for i in range(1,len(radius)):
# 1.5E-12 is the standard fractional abundance of grains from the UDfA CSE model, see csmain.f
ddensFile.write(' {0:13.7e} \n'.format(float(density[i])*1.5E-12*5.E-12))
dtempFile.write(' {0:12.6f} \n'.format(float(temperature[i])))
gtempFile.write(' {0:12.6f} \n'.format(float(temperature[i])))
gveloFile.write(' {0:12.2f} {1:12.2f} {2:12.2f} \n'.format(vexp*1.e5,0.0,0.0))
gdensFile.write(' {0:12.2f} \n'.format(float(density[i])))
turbuFile.write(' {0:12.2f} \n'.format(db*1.e5))
ddensFile.close()
dtempFile.close()
gtempFile.close()
gveloFile.close()
gdensFile.close()
turbuFile.close()
dataFile.close()
print "DONE!"
| 63.895735
| 2,694
| 0.652425
| 5,254
| 26,964
| 3.341073
| 0.289113
| 0.003304
| 0.010254
| 0.006836
| 0.435513
| 0.42093
| 0.406859
| 0.383104
| 0.366982
| 0.35086
| 0
| 0.361817
| 0.108552
| 26,964
| 421
| 2,695
| 64.047506
| 0.368474
| 0.145231
| 0
| 0.046296
| 0
| 0.092593
| 0.619887
| 0.489973
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.009259
| null | null | 0.015432
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d3b2b632a7a3533ea8e7f11d810aeca42cc5f36b
| 9,199
|
py
|
Python
|
cumtd/cumtd_client.py
|
rthotakura97/cumtd-python
|
5073d79c0e2429457be7ff386bde2a25194e3c08
|
[
"MIT"
] | null | null | null |
cumtd/cumtd_client.py
|
rthotakura97/cumtd-python
|
5073d79c0e2429457be7ff386bde2a25194e3c08
|
[
"MIT"
] | null | null | null |
cumtd/cumtd_client.py
|
rthotakura97/cumtd-python
|
5073d79c0e2429457be7ff386bde2a25194e3c08
|
[
"MIT"
] | null | null | null |
import requests
import json
class ApiClient:
def __init__(self, api_key):
self.base_url = 'https://developer.cumtd.com/api/v2.2/json/'
self.api_key = api_key
self.api_key_url = '?key=' + self.api_key
def get_calendar_dates_by_date(self, date):
data = requests.get(self.base_url+'getcalendardatesbydate'+self.api_key_url+'&date='+date)
if(data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_calendar_dates_by_service(self, service_id):
data = requests.get(self.base_url+'getcalendardatesbyservice'+self.api_key_url+"&service_id="+service_id)
if(data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_departures_by_stop(self, stop_id):
data = requests.get(self.base_url+'getdeparturesbystop'+self.api_key_url+'&stop_id='+stop_id)
if(data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_reroutes(self):
data = requests.get(self.base_url+'getreroutes'+self.api_key_url)
if(data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_reroutes_by_route(self, route_id):
data = requests.get(self.base_url+'getreroutesbyroute'+self.api_key_url+'&route_id='+route_id)
if(data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_route(self, route_id):
data = requests.get(self.base_url+'getroute'+self.api_key_url+'&route_id='+route_id)
if(data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_routes(self):
data = requests.get(self.base_url+'getroutes'+self.api_key_url)
if(data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_routes_by_stop(self, stop_id):
data = requests.get(self.base_url+'getroutesbystop'+self.api_key_url+'&stop_id='+stop_id)
if(data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_shape(self, shape_id):
data = requests.get(self.base_url+'getshape'+self.api_key_url+'&shape_id='+shape_id)
if(data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_shape_between_stops(self, begin_stop_id, end_stop_id, shape_id):
data = requests.get(self.base_url+'getshapebetweenstops'+self.api_key_url+'&begin_stop_id='+begin_stop_id+'&end_stop_id='+end_stop_id+'&shape_id='+shape_id)
if(data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_stop(self, stop_id):
data = requests.get(self.base_url+'getstop'+self.api_key_url+'&stop_id='+stop_id)
if(data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_stops(self):
data = requests.get(self.base_url+'getstops'+self.api_key_url)
if(data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_stops_by_lat_lon(self, lat, lon, count):
data = requests.get(self.base_url+'getstopsbylatlon'+self.api_key_url+'&lat='+lat+'&lon='+lon+'&count='+count)
if(data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_stops_by_search(self, query):
data = requests.get(self.base_url+'getstopsbysearch'+self.api_key_url+'&query='+query)
if(data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_stop_times_by_trip(self, trip_id):
data = requests.get(self.base_url+'getstoptimesbytrip'+self.api_key_url+'&trip_id='+trip_id)
if(data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_stop_times_by_stop(self, stop_id):
data = requests.get(self.base_url+'getstoptimesbystop'+self.api_key_url+'&stop_id='+stop_id)
if(data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_planned_trips_by_lat_lon(self, origin_lat, origin_lon, destination_lat, destination_lon):
data = requests.get(self.base_url+'getplannedtripsbylatlon'+self.api_key_url+'&origin_lat='+origin_lat+'&origin_lon='+origin_lon+'&destination_lat='+destination_lat+"&destination_lon="+destination_lon)
if(data.ok):
jsonData = json.loads(data.content)
print(jsonData)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_planned_trips_by_stops(self, origin_stop_id, destination_stop_id):
data = requests.get(self.base_url+'getplannedtripsbystops'+self.api_key_url+'&origin_stop_id='+origin_stop_id+'&destination_stop_id='+destination_stop_id)
if(data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_trip(self, trip_id):
data = requests.get(self.base_url+'gettrip'+self.api_key_url+'&trip_id='+trip_id)
if (data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_trips_by_block(self, block_id):
data = requests.get(self.base_url+'gettripsbyblock'+self.api_key_url+'&block_id='+block_id)
if (data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_trips_by_route(self, route_id):
data = requests.get(self.base_url+'gettripsbyroute'+self.api_key_url+'&route_id='+route_id)
if (data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_vehicle(self, vehicle_id):
data = requests.get(self.base_url+'getvehicle'+self.api_key_url+'&vehicle_id='+vehicle_id)
if (data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_vehicles(self):
data = requests.get(self.base_url+'getvehicles'+self.api_key_url)
if (data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_vehicles_by_route(self, route_id):
data = requests.get(self.base_url+'getvehiclesbyroute'+self.api_key_url+'&route_id='+route_id)
if (data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_news(self):
data = requests.get(self.base_url+'getnews'+self.api_key_url)
if (data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
def get_api_usage(self):
data = requests.get(self.base_url + 'getapiusage' + self.api_key_url)
if (data.ok):
jsonData = json.loads(data.content)
return jsonData
else:
print("Unsuccesful: " + str(data.status_code))
return None
| 33.819853
| 209
| 0.602239
| 1,125
| 9,199
| 4.692444
| 0.080889
| 0.035234
| 0.056829
| 0.06649
| 0.818526
| 0.789543
| 0.756393
| 0.700701
| 0.675317
| 0.675317
| 0
| 0.000303
| 0.282639
| 9,199
| 271
| 210
| 33.944649
| 0.799667
| 0
| 0
| 0.722222
| 0
| 0
| 0.115569
| 0.012285
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.009259
| 0
| 0.37963
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
31a2ba43e3c4076eaf6bf797fcbd5e05c8f24053
| 207
|
py
|
Python
|
noteapp/members/urls.py
|
Sergiogd112/notesapp
|
63cc35f085c07ba411b4875cc10b689100a25c4e
|
[
"MIT"
] | null | null | null |
noteapp/members/urls.py
|
Sergiogd112/notesapp
|
63cc35f085c07ba411b4875cc10b689100a25c4e
|
[
"MIT"
] | null | null | null |
noteapp/members/urls.py
|
Sergiogd112/notesapp
|
63cc35f085c07ba411b4875cc10b689100a25c4e
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import *
urlpatterns = [
path('register/', UserRegistrationView.as_view(),name='register'),
path('login/', UserRegistrationView.as_view(),name='login'),
]
| 20.7
| 70
| 0.705314
| 23
| 207
| 6.26087
| 0.565217
| 0.305556
| 0.361111
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 207
| 9
| 71
| 23
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.135266
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
732bfcabd7dbd2f7b1d8b1628890f7aba728bfb7
| 9,369
|
py
|
Python
|
tests/apps/slack_integration/test_slash_command.py
|
alysivji/github-adapter
|
5e3543f41f189fbe4a50d64e3d6734dc765579b4
|
[
"MIT"
] | 55
|
2019-05-05T01:20:58.000Z
|
2022-01-10T18:03:05.000Z
|
tests/apps/slack_integration/test_slash_command.py
|
alysivji/github-adapter
|
5e3543f41f189fbe4a50d64e3d6734dc765579b4
|
[
"MIT"
] | 222
|
2019-05-03T16:31:26.000Z
|
2021-08-28T23:49:03.000Z
|
tests/apps/slack_integration/test_slash_command.py
|
busy-beaver-dev/busy-beaver
|
5e3543f41f189fbe4a50d64e3d6734dc765579b4
|
[
"MIT"
] | 19
|
2019-04-27T19:49:32.000Z
|
2020-06-30T19:52:09.000Z
|
import pytest
from busy_beaver.apps.slack_integration.slash_command import (
command_not_found,
disconnect_github,
display_help_text,
link_github,
next_event,
relink_github,
upcoming_events,
)
from busy_beaver.models import GitHubSummaryUser, SlackUser
from tests._utilities import FakeSlackClient
pytest_plugins = ("tests._utilities.fixtures.slack",)
MODULE_TO_TEST = "busy_beaver.apps.slack_integration.interactors"
@pytest.fixture
def patch_slack(patcher):
def _patch_slack(*, is_admin=None):
obj = FakeSlackClient(is_admin=is_admin)
patcher(MODULE_TO_TEST, namespace="SlackClient", replacement=obj)
return obj
return _patch_slack
###################
# Integration Tests
###################
@pytest.mark.integration
def test_slack_command_valid_command(
client,
session,
factory,
create_slack_headers,
generate_slash_command_request,
patch_slack,
):
installation = factory.SlackInstallation()
data = generate_slash_command_request("help", team_id=installation.workspace_id)
headers = create_slack_headers(100_000_000, data, is_json_data=False)
patch_slack(is_admin=False)
response = client.post("/slack/slash-command", headers=headers, data=data)
assert response.status_code == 200
assert "/busybeaver help" in response.json["text"].lower()
@pytest.mark.integration
def test_slack_command_invalid_command(
client, session, factory, create_slack_headers, generate_slash_command_request
):
installation = factory.SlackInstallation()
data = generate_slash_command_request(
"non-existent", team_id=installation.workspace_id
)
headers = create_slack_headers(100_000_000, data, is_json_data=False)
response = client.post("/slack/slash-command", headers=headers, data=data)
assert response.status_code == 200
assert "command not found" in response.json["text"].lower()
@pytest.mark.integration
def test_slack_command_empty_command(
client, session, factory, create_slack_headers, generate_slash_command_request
):
installation = factory.SlackInstallation()
data = generate_slash_command_request(command="", team_id=installation.workspace_id)
headers = create_slack_headers(100_000_000, data, is_json_data=False)
response = client.post("/slack/slash-command", headers=headers, data=data)
assert response.status_code == 200
assert "/busybeaver help" in response.json["text"].lower()
@pytest.mark.integration
def test_slack_command_creates_user_record_in_database(
client,
session,
factory,
create_slack_headers,
generate_slash_command_request,
patch_slack,
):
# Arrange
installation = factory.SlackInstallation()
data = generate_slash_command_request("help", team_id=installation.workspace_id)
headers = create_slack_headers(100_000_000, data, is_json_data=False)
patch_slack(is_admin=False)
# Act
client.post("/slack/slash-command", headers=headers, data=data)
# Assert
users = SlackUser.query.all()
assert len(users) == 1
user = users[0]
assert user.slack_id == data["user_id"]
assert user.installation.workspace_id == data["team_id"]
########################
# Miscellaneous Commands
########################
@pytest.mark.unit
def test_command_help(session, factory, generate_slash_command_request, patch_slack):
github_summary_config = factory.GitHubSummaryConfiguration()
install = github_summary_config.slack_installation
user = "user"
slack_user = factory.SlackUser(slack_id=user, installation=install)
data = generate_slash_command_request(
"help", user_id=user, team_id=install.workspace_id
)
data["user"] = slack_user
data["installation"] = install
patch_slack(is_admin=False)
result = display_help_text(**data)
assert "/busybeaver help" in result["text"]
@pytest.mark.unit
def test_command_not_found(generate_slash_command_request):
data = generate_slash_command_request(command="blah")
result = command_not_found(**data)
assert "not found" in result["text"]
assert "/busybeaver help" in result["text"]
#################
# Upcoming Events
#################
@pytest.mark.unit
def test_connect_command_new_user(session, factory, generate_slash_command_request):
github_summary_config = factory.GitHubSummaryConfiguration()
install = github_summary_config.slack_installation
new_user = "new_user"
slack_user = factory.SlackUser(slack_id=new_user, installation=install)
data = generate_slash_command_request(
"connect", user_id=new_user, team_id=install.workspace_id
)
data["user"] = slack_user
data["installation"] = install
result = link_github(**data)
slack_response = result["attachments"][0]
assert "Associate GitHub Profile" in slack_response["actions"][0]["text"]
@pytest.mark.unit
def test_connect_command_existing_user(
session, factory, generate_slash_command_request
):
existing_user = "existing_user"
github_user = factory.GitHubSummaryUser(slack_id=existing_user)
install = github_user.configuration.slack_installation
slack_user = factory.SlackUser(slack_id=existing_user, installation=install)
data = generate_slash_command_request(
"connect", user_id=github_user.slack_id, team_id=install.workspace_id
)
data["user"] = slack_user
data["installation"] = install
result = link_github(**data)
assert "/busybeaver reconnect" in result["text"]
@pytest.mark.unit
def test_reconnect_command_new_user(session, factory, generate_slash_command_request):
github_summary_config = factory.GitHubSummaryConfiguration()
install = github_summary_config.slack_installation
new_user = "new_user"
slack_user = factory.SlackUser(slack_id=new_user, installation=install)
data = generate_slash_command_request(
"reconnect", user_id=new_user, team_id=install.workspace_id
)
data["user"] = slack_user
data["installation"] = install
result = relink_github(**data)
slack_response = result["attachments"][0]
assert "Associate GitHub Profile" in slack_response["actions"][0]["text"]
@pytest.mark.unit
def test_reconnect_command_existing_user(
session, factory, generate_slash_command_request
):
existing_user = "existing_user"
github_user = factory.GitHubSummaryUser(slack_id=existing_user)
install = github_user.configuration.slack_installation
slack_user = factory.SlackUser(slack_id=existing_user, installation=install)
data = generate_slash_command_request(
"reconnect", user_id=existing_user, team_id=install.workspace_id
)
data["user"] = slack_user
data["installation"] = install
result = relink_github(**data)
slack_response = result["attachments"][0]
assert "Associate GitHub Profile" in slack_response["actions"][0]["text"]
@pytest.mark.unit
def test_disconnect_command_unregistered_user(
session, factory, generate_slash_command_request
):
github_summary_config = factory.GitHubSummaryConfiguration()
install = github_summary_config.slack_installation
slack_user = factory.SlackUser(slack_id="new_user", installation=install)
data = generate_slash_command_request("disconnect", team_id=install.workspace_id)
data["user"] = slack_user
data["installation"] = install
result = disconnect_github(**data)
assert "No GitHub account associated with profile" in result["text"]
@pytest.mark.unit
def test_disconnect_command_registered_user(
session, factory, generate_slash_command_request
):
existing_user = "existing_user"
github_user = factory.GitHubSummaryUser(slack_id=existing_user)
install = github_user.configuration.slack_installation
slack_user = factory.SlackUser(slack_id=existing_user, installation=install)
data = generate_slash_command_request(
"disconnect", user_id=github_user.slack_id, team_id=install.workspace_id
)
data["user"] = slack_user
data["installation"] = install
result = disconnect_github(**data)
assert "Account has been deleted" in result["text"]
assert not GitHubSummaryUser.query.get(github_user.id)
#########################
# Upcoming Event Schedule
#########################
@pytest.mark.end2end
def test_command_next(session, factory, generate_slash_command_request):
group = factory.UpcomingEventsGroup()
install = group.configuration.slack_installation
factory.Event.create_batch(size=10, group=group)
data = generate_slash_command_request("next", team_id=install.workspace_id)
data["installation"] = install
result = next_event(**data)
assert result["response_type"] == "ephemeral"
assert result["attachments"]
assert not result["blocks"]
assert not result["text"]
@pytest.mark.end2end
def test_command_events(session, factory, generate_slash_command_request):
group = factory.UpcomingEventsGroup()
install = group.configuration.slack_installation
factory.Event.create_batch(size=10, group=group)
data = generate_slash_command_request("events", team_id=install.workspace_id)
data["installation"] = install
result = upcoming_events(**data)
assert result["response_type"] == "ephemeral"
assert result["blocks"]
assert not result["attachments"]
assert not result["text"]
| 32.644599
| 88
| 0.738926
| 1,114
| 9,369
| 5.900359
| 0.115799
| 0.060246
| 0.085197
| 0.115016
| 0.837974
| 0.811197
| 0.773315
| 0.756428
| 0.71033
| 0.691161
| 0
| 0.007388
| 0.147614
| 9,369
| 286
| 89
| 32.758741
| 0.815677
| 0.010567
| 0
| 0.631579
| 0
| 0
| 0.100198
| 0.008469
| 0
| 0
| 0
| 0
| 0.129187
| 1
| 0.076555
| false
| 0
| 0.019139
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
735039babdde9d8be02059260f2716a3de66a7fd
| 961
|
py
|
Python
|
multiagent/observation.py
|
SakuraiSatoru/multiagent-particle-envs
|
151e66f37e373107b98e166a24304b64ae752f60
|
[
"MIT"
] | null | null | null |
multiagent/observation.py
|
SakuraiSatoru/multiagent-particle-envs
|
151e66f37e373107b98e166a24304b64ae752f60
|
[
"MIT"
] | null | null | null |
multiagent/observation.py
|
SakuraiSatoru/multiagent-particle-envs
|
151e66f37e373107b98e166a24304b64ae752f60
|
[
"MIT"
] | null | null | null |
import numpy as np
class Conv1dObservation(object):
def __init__(self, non_lidar_obs, lidar_obs):
"""
:type non_lidar_obs: np.ndarray
:type lidar_obs: np.ndarray
"""
assert len(non_lidar_obs.shape) == 1
assert len(lidar_obs.shape) == 2
self._non_lidar_obs = non_lidar_obs
self._lidar_obs = lidar_obs
def __len__(self):
return self._non_lidar_obs.shape[0] + self._lidar_obs.shape[0]*self._lidar_obs.shape[1]
@property
def non_lidar_obs(self):
return self._non_lidar_obs
@property
def lidar_obs(self):
return self._lidar_obs
# @property
# def obs(self):
# return self._non_lidar_obs, self._lidar_obs
@property
def flatten(self):
return np.concatenate((self._non_lidar_obs, self._lidar_obs.flatten()), axis=None)
@property
def shape(self):
return self._non_lidar_obs.shape, self._lidar_obs.shape
| 25.972973
| 95
| 0.651405
| 134
| 961
| 4.261194
| 0.201493
| 0.322242
| 0.211909
| 0.183888
| 0.474606
| 0.374781
| 0.334501
| 0.085814
| 0
| 0
| 0
| 0.008299
| 0.247659
| 961
| 37
| 96
| 25.972973
| 0.781466
| 0.138398
| 0
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 1
| 0.285714
| false
| 0
| 0.047619
| 0.238095
| 0.619048
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
7352bb3ce8cd9ffd7788e96dcc681288581fc5d8
| 39
|
py
|
Python
|
devip/__init__.py
|
jorgebg/devip
|
b4fe953efe513cdd88db99dee1d961a9c8be924e
|
[
"MIT"
] | null | null | null |
devip/__init__.py
|
jorgebg/devip
|
b4fe953efe513cdd88db99dee1d961a9c8be924e
|
[
"MIT"
] | null | null | null |
devip/__init__.py
|
jorgebg/devip
|
b4fe953efe513cdd88db99dee1d961a9c8be924e
|
[
"MIT"
] | null | null | null |
from devip.lib import get_ip_addresses
| 19.5
| 38
| 0.871795
| 7
| 39
| 4.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7dfbde09e42950bbd35d7f575da35cede36fd141
| 29
|
py
|
Python
|
plugins/i18n_subsites/__init__.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 13
|
2020-01-27T09:02:25.000Z
|
2022-01-20T07:45:26.000Z
|
plugins/i18n_subsites/__init__.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 29
|
2020-03-22T06:57:57.000Z
|
2022-01-24T22:46:42.000Z
|
plugins/i18n_subsites/__init__.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 6
|
2020-07-10T00:13:30.000Z
|
2022-01-26T08:22:33.000Z
|
from .i18n_subsites import *
| 14.5
| 28
| 0.793103
| 4
| 29
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0.137931
| 29
| 1
| 29
| 29
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b4302cccbef6d36cb12392d13e264501e86e71aa
| 10,817
|
py
|
Python
|
mayan/apps/mailer/tests/test_workflow_actions.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 4
|
2021-09-02T00:16:30.000Z
|
2021-09-09T22:25:15.000Z
|
mayan/apps/mailer/tests/test_workflow_actions.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 86
|
2021-09-01T23:53:02.000Z
|
2021-09-20T02:25:10.000Z
|
mayan/apps/mailer/tests/test_workflow_actions.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 70
|
2021-09-01T12:54:51.000Z
|
2022-02-16T00:53:18.000Z
|
import json
from django.core import mail
from mayan.apps.documents.tests.base import GenericDocumentTestCase
from mayan.apps.documents.tests.mixins.document_mixins import DocumentTestMixin
from mayan.apps.document_states.literals import WORKFLOW_ACTION_ON_ENTRY
from mayan.apps.document_states.permissions import permission_workflow_template_edit
from mayan.apps.document_states.tests.base import ActionTestCase
from mayan.apps.document_states.tests.mixins.workflow_template_mixins import WorkflowTemplateTestMixin
from mayan.apps.document_states.tests.mixins.workflow_template_state_mixins import WorkflowTemplateStateActionViewTestMixin
from mayan.apps.metadata.tests.mixins import MetadataTypeTestMixin
from mayan.apps.testing.tests.base import GenericViewTestCase
from ..permissions import permission_user_mailer_use
from ..workflow_actions import EmailAction
from .literals import (
TEST_EMAIL_ADDRESS, TEST_EMAIL_BODY, TEST_EMAIL_FROM_ADDRESS,
TEST_EMAIL_SUBJECT
)
from .mixins import MailerTestMixin
class EmailActionTestCase(
MailerTestMixin, WorkflowTemplateTestMixin, GenericDocumentTestCase
):
auto_upload_test_document = False
def test_email_action_literal_text(self):
self._create_test_document_stub()
self._create_test_user_mailer()
action = EmailAction(
form_data={
'mailing_profile': self.test_user_mailer.pk,
'recipient': TEST_EMAIL_ADDRESS,
'subject': TEST_EMAIL_SUBJECT,
'body': TEST_EMAIL_BODY,
}
)
action.execute(context={'document': self.test_document})
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, TEST_EMAIL_FROM_ADDRESS)
self.assertEqual(mail.outbox[0].to, [TEST_EMAIL_ADDRESS])
def test_email_action_literal_text_cc_field(self):
self._create_test_document_stub()
self._create_test_user_mailer()
action = EmailAction(
form_data={
'mailing_profile': self.test_user_mailer.pk,
'recipient': TEST_EMAIL_ADDRESS,
'cc': TEST_EMAIL_ADDRESS,
'subject': TEST_EMAIL_SUBJECT,
'body': TEST_EMAIL_BODY,
}
)
action.execute(context={'document': self.test_document})
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, TEST_EMAIL_FROM_ADDRESS)
self.assertEqual(mail.outbox[0].to, [TEST_EMAIL_ADDRESS])
self.assertEqual(mail.outbox[0].cc, [TEST_EMAIL_ADDRESS])
def test_email_action_literal_text_bcc_field(self):
self._create_test_document_stub()
self._create_test_user_mailer()
action = EmailAction(
form_data={
'mailing_profile': self.test_user_mailer.pk,
'recipient': TEST_EMAIL_ADDRESS,
'bcc': TEST_EMAIL_ADDRESS,
'subject': TEST_EMAIL_SUBJECT,
'body': TEST_EMAIL_BODY,
}
)
action.execute(context={'document': self.test_document})
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, TEST_EMAIL_FROM_ADDRESS)
self.assertEqual(mail.outbox[0].to, [TEST_EMAIL_ADDRESS])
self.assertEqual(mail.outbox[0].bcc, [TEST_EMAIL_ADDRESS])
def test_email_action_workflow_execute(self):
self._create_test_workflow_template()
self._create_test_workflow_template_state()
self._create_test_user_mailer()
self.test_workflow_template_state.actions.create(
action_data=json.dumps(
obj={
'mailing_profile': self.test_user_mailer.pk,
'recipient': TEST_EMAIL_ADDRESS,
'subject': TEST_EMAIL_SUBJECT,
'body': TEST_EMAIL_BODY,
}
),
action_path='mayan.apps.mailer.workflow_actions.EmailAction',
label='test email action', when=WORKFLOW_ACTION_ON_ENTRY,
)
self.test_workflow_template_state.initial = True
self.test_workflow_template_state.save()
self.test_workflow_template.document_types.add(self.test_document_type)
self._create_test_document_stub()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, TEST_EMAIL_FROM_ADDRESS)
self.assertEqual(mail.outbox[0].to, [TEST_EMAIL_ADDRESS])
class EmailActionTemplateTestCase(
MetadataTypeTestMixin, MailerTestMixin, WorkflowTemplateTestMixin, ActionTestCase
):
def test_email_action_recipient_template(self):
self._create_test_metadata_type()
self.test_document_type.metadata.create(
metadata_type=self.test_metadata_type
)
self.test_document.metadata.create(
metadata_type=self.test_metadata_type, value=TEST_EMAIL_ADDRESS
)
self._create_test_user_mailer()
action = EmailAction(
form_data={
'mailing_profile': self.test_user_mailer.pk,
'recipient': '{{{{ document.metadata_value_of.{} }}}}'.format(
self.test_metadata_type.name
),
'subject': TEST_EMAIL_SUBJECT,
'body': '',
}
)
action.execute(context={'document': self.test_document})
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, TEST_EMAIL_FROM_ADDRESS)
self.assertEqual(
mail.outbox[0].to, [self.test_document.metadata.first().value]
)
def test_email_action_subject_template(self):
self._create_test_metadata_type()
self.test_document_type.metadata.create(
metadata_type=self.test_metadata_type
)
self.test_document.metadata.create(
metadata_type=self.test_metadata_type, value=TEST_EMAIL_SUBJECT
)
self._create_test_user_mailer()
action = EmailAction(
form_data={
'mailing_profile': self.test_user_mailer.pk,
'recipient': TEST_EMAIL_ADDRESS,
'subject': '{{{{ document.metadata_value_of.{} }}}}'.format(
self.test_metadata_type.name
),
'body': '',
}
)
action.execute(context={'document': self.test_document})
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, TEST_EMAIL_FROM_ADDRESS)
self.assertEqual(mail.outbox[0].to, [TEST_EMAIL_ADDRESS])
self.assertEqual(
mail.outbox[0].subject, self.test_document.metadata.first().value
)
def test_email_action_body_template(self):
self._create_test_metadata_type()
self.test_document_type.metadata.create(
metadata_type=self.test_metadata_type
)
self.test_document.metadata.create(
metadata_type=self.test_metadata_type, value=TEST_EMAIL_BODY
)
self._create_test_user_mailer()
action = EmailAction(
form_data={
'mailing_profile': self.test_user_mailer.pk,
'recipient': TEST_EMAIL_ADDRESS,
'subject': TEST_EMAIL_SUBJECT,
'body': '{{{{ document.metadata_value_of.{} }}}}'.format(
self.test_metadata_type.name
),
}
)
action.execute(context={'document': self.test_document})
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, TEST_EMAIL_FROM_ADDRESS)
self.assertEqual(mail.outbox[0].to, [TEST_EMAIL_ADDRESS])
self.assertEqual(mail.outbox[0].body, TEST_EMAIL_BODY)
def test_email_action_attachment(self):
self._create_test_metadata_type()
self.test_document_type.metadata.create(
metadata_type=self.test_metadata_type
)
self.test_document.metadata.create(
metadata_type=self.test_metadata_type, value=TEST_EMAIL_SUBJECT
)
self._create_test_user_mailer()
action = EmailAction(
form_data={
'mailing_profile': self.test_user_mailer.pk,
'recipient': TEST_EMAIL_ADDRESS,
'subject': '{{{{ document.metadata_value_of.{} }}}}'.format(
self.test_metadata_type.name
),
'body': '',
'attachment': True
}
)
action.execute(context={'document': self.test_document})
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, TEST_EMAIL_FROM_ADDRESS)
self.assertEqual(mail.outbox[0].to, [TEST_EMAIL_ADDRESS])
self.assertEqual(
mail.outbox[0].subject, self.test_document.metadata.first().value
)
self.assertEqual(len(mail.outbox[0].attachments), 1)
class EmailActionViewTestCase(
DocumentTestMixin, MailerTestMixin, WorkflowTemplateStateActionViewTestMixin,
WorkflowTemplateTestMixin, GenericViewTestCase
):
auto_upload_test_document = False
def test_email_action_create_get_view(self):
self._create_test_workflow_template()
self._create_test_workflow_template_state()
self._create_test_user_mailer()
self.grant_access(
obj=self.test_workflow_template, permission=permission_workflow_template_edit
)
response = self._request_test_workflow_template_state_action_create_get_view(
class_path='mayan.apps.mailer.workflow_actions.EmailAction'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.test_workflow_template_state.actions.count(), 0)
def test_email_action_create_post_view(self):
self._create_test_workflow_template()
self._create_test_workflow_template_state()
self._create_test_user_mailer()
self.grant_access(
obj=self.test_user_mailer, permission=permission_user_mailer_use
)
self.grant_access(
obj=self.test_workflow_template, permission=permission_workflow_template_edit
)
response = self._request_test_workflow_template_state_action_create_post_view(
class_path='mayan.apps.mailer.workflow_actions.EmailAction',
extra_data={
'mailing_profile': self.test_user_mailer.pk,
'recipient': TEST_EMAIL_ADDRESS,
'subject': TEST_EMAIL_SUBJECT,
'body': TEST_EMAIL_BODY,
}
)
self.assertEqual(response.status_code, 302)
self.assertEqual(self.test_workflow_template_state.actions.count(), 1)
| 39.050542
| 123
| 0.660072
| 1,174
| 10,817
| 5.699319
| 0.089438
| 0.07936
| 0.050217
| 0.078464
| 0.802122
| 0.755343
| 0.740248
| 0.728441
| 0.728441
| 0.65491
| 0
| 0.004798
| 0.248498
| 10,817
| 276
| 124
| 39.192029
| 0.818305
| 0
| 0
| 0.588235
| 0
| 0
| 0.064436
| 0.023482
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.042017
| false
| 0
| 0.063025
| 0
| 0.12605
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b437fd7c37adbdcc45ba1b5e16007f27b077f6f0
| 2,879
|
py
|
Python
|
tests/jobs/test_build.py
|
jlakin2/python-jenkins
|
2b69951603d1f57b3fe82e119bba62a19156a629
|
[
"BSD-3-Clause"
] | 7
|
2020-01-08T19:34:50.000Z
|
2022-03-07T15:54:43.000Z
|
tests/jobs/test_build.py
|
jlakin2/python-jenkins
|
2b69951603d1f57b3fe82e119bba62a19156a629
|
[
"BSD-3-Clause"
] | 2
|
2021-03-31T12:35:39.000Z
|
2021-08-17T16:30:34.000Z
|
tests/jobs/test_build.py
|
jlakin2/python-jenkins
|
2b69951603d1f57b3fe82e119bba62a19156a629
|
[
"BSD-3-Clause"
] | 6
|
2017-12-02T23:31:32.000Z
|
2021-12-13T02:26:05.000Z
|
from mock import patch
import jenkins
from tests.jobs.base import JenkinsJobsTestBase
class JenkinsBuildJobTest(JenkinsJobsTestBase):
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_simple(self, jenkins_mock):
jenkins_mock.side_effect = [
{'foo': 'bar'},
]
build_info = self.j.build_job(u'Test Job')
self.assertEqual(jenkins_mock.call_args[0][0].get_full_url(),
self.make_url('job/Test%20Job/build'))
self.assertEqual(build_info, {'foo': 'bar'})
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_in_folder(self, jenkins_mock):
jenkins_mock.side_effect = [
{'foo': 'bar'},
]
build_info = self.j.build_job(u'a Folder/Test Job')
self.assertEqual(jenkins_mock.call_args[0][0].get_full_url(),
self.make_url('job/a%20Folder/job/Test%20Job/build'))
self.assertEqual(build_info, {'foo': 'bar'})
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_with_token(self, jenkins_mock):
jenkins_mock.side_effect = [
{'foo': 'bar'},
]
build_info = self.j.build_job(u'TestJob', token='some_token')
self.assertEqual(jenkins_mock.call_args[0][0].get_full_url(),
self.make_url('job/TestJob/build?token=some_token'))
self.assertEqual(build_info, {'foo': 'bar'})
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_in_folder_with_token(self, jenkins_mock):
jenkins_mock.side_effect = [
{'foo': 'bar'},
]
build_info = self.j.build_job(u'a Folder/TestJob', token='some_token')
self.assertEqual(jenkins_mock.call_args[0][0].get_full_url(),
self.make_url('job/a%20Folder/job/TestJob/build?token=some_token'))
self.assertEqual(build_info, {'foo': 'bar'})
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_with_parameters_and_token(self, jenkins_mock):
jenkins_mock.side_effect = [
{'foo': 'bar'},
]
build_info = self.j.build_job(
u'TestJob',
parameters={'when': 'now', 'why': 'because I felt like it'},
token='some_token')
self.assertTrue('token=some_token' in jenkins_mock.call_args[0][0].get_full_url())
self.assertTrue('when=now' in jenkins_mock.call_args[0][0].get_full_url())
self.assertTrue('why=because+I+felt+like+it' in jenkins_mock.call_args[0][0].get_full_url())
self.assertEqual(build_info, {'foo': 'bar'})
self._check_requests(jenkins_mock.call_args_list)
| 37.38961
| 100
| 0.637721
| 376
| 2,879
| 4.587766
| 0.151596
| 0.14029
| 0.104348
| 0.132174
| 0.882899
| 0.882899
| 0.858551
| 0.858551
| 0.833623
| 0.833623
| 0
| 0.009848
| 0.224036
| 2,879
| 76
| 101
| 37.881579
| 0.76231
| 0
| 0
| 0.5
| 0
| 0
| 0.147621
| 0.050017
| 0
| 0
| 0
| 0
| 0.206897
| 1
| 0.086207
| false
| 0
| 0.051724
| 0
| 0.155172
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b470dff06e03bbb5641987a9fd8d14291ab8c6f2
| 18,326
|
py
|
Python
|
backend/wellness/models.py
|
abayakturin/wellness
|
8cdc7c8f4a38f55fa4baf1511eb599eeefe1343d
|
[
"MIT"
] | null | null | null |
backend/wellness/models.py
|
abayakturin/wellness
|
8cdc7c8f4a38f55fa4baf1511eb599eeefe1343d
|
[
"MIT"
] | null | null | null |
backend/wellness/models.py
|
abayakturin/wellness
|
8cdc7c8f4a38f55fa4baf1511eb599eeefe1343d
|
[
"MIT"
] | null | null | null |
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey and OneToOneField has `on_delete` set to the desired behavior
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from django.db import models
class AuthGroup(models.Model):
name = models.CharField(unique=True, max_length=150)
class Meta:
managed = False
db_table = 'auth_group'
class AuthGroupPermissions(models.Model):
id = models.BigAutoField(primary_key=True)
group = models.ForeignKey(AuthGroup, models.DO_NOTHING)
permission = models.ForeignKey('AuthPermission', models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_group_permissions'
unique_together = (('group', 'permission'),)
class AuthPermission(models.Model):
name = models.CharField(max_length=255)
content_type = models.ForeignKey('DjangoContentType', models.DO_NOTHING)
codename = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'auth_permission'
unique_together = (('content_type', 'codename'),)
class AuthUser(models.Model):
password = models.CharField(max_length=128)
last_login = models.DateTimeField(blank=True, null=True)
is_superuser = models.BooleanField()
username = models.CharField(unique=True, max_length=150)
first_name = models.CharField(max_length=150)
last_name = models.CharField(max_length=150)
email = models.CharField(max_length=254)
is_staff = models.BooleanField()
is_active = models.BooleanField()
date_joined = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'auth_user'
class AuthUserGroups(models.Model):
id = models.BigAutoField(primary_key=True)
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
group = models.ForeignKey(AuthGroup, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_user_groups'
unique_together = (('user', 'group'),)
class AuthUserUserPermissions(models.Model):
id = models.BigAutoField(primary_key=True)
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
permission = models.ForeignKey(AuthPermission, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_user_user_permissions'
unique_together = (('user', 'permission'),)
class ConsultingDoctor(models.Model):
name = models.CharField(max_length=150)
contract_date = models.DateField()
contract_phone = models.CharField(max_length=150)
weekly_pay = models.IntegerField()
min_weekly_hours = models.IntegerField()
overtime_hourly_pay = models.IntegerField()
tbl_last_date = models.DateTimeField(auto_now_add=True)
id = models.OneToOneField('Doctor', models.DO_NOTHING, db_column='id', primary_key=True)
class Meta:
managed = False
db_table = 'consulting_doctor'
class ConsultingDoctorHistory(models.Model):
name = models.CharField(max_length=150)
timestamp = models.DateTimeField(auto_now_add=True)
contract_date = models.DateField()
contract_phone = models.CharField(max_length=150)
weekly_pay = models.IntegerField()
min_weekly_hours = models.IntegerField()
overtime_hourly_pay = models.IntegerField()
id = models.IntegerField(primary_key=True)
class Meta:
managed = False
db_table = 'consulting_doctor_history'
class ConsultingDoctorHistory202110(models.Model):
name = models.CharField(max_length=150)
timestamp = models.DateTimeField(auto_now_add=True)
contract_date = models.DateField()
contract_phone = models.CharField(max_length=150)
weekly_pay = models.IntegerField()
min_weekly_hours = models.IntegerField()
overtime_hourly_pay = models.IntegerField()
id = models.IntegerField(primary_key=True)
class Meta:
managed = False
db_table = 'consulting_doctor_history_2021_10'
class Department(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=150)
phone = models.CharField(max_length=150)
location = models.CharField(max_length=150)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'department'
class DepartmentDoctor(models.Model):
doctor = models.ForeignKey('Doctor', models.DO_NOTHING)
department = models.ForeignKey(Department, models.DO_NOTHING)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'department_doctor'
class DepartmentPatient(models.Model):
department = models.ForeignKey(Department, models.DO_NOTHING)
patient = models.ForeignKey('Patient', models.DO_NOTHING)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'department_patient'
class Disease(models.Model):
icd = models.IntegerField(primary_key=True)
description = models.CharField(max_length=150)
type = models.CharField(max_length=150)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'disease'
class DjangoAdminLog(models.Model):
action_time = models.DateTimeField(auto_now_add=True)
object_id = models.TextField(blank=True, null=True)
object_repr = models.CharField(max_length=200)
action_flag = models.SmallIntegerField()
change_message = models.TextField()
content_type = models.ForeignKey('DjangoContentType', models.DO_NOTHING, blank=True, null=True)
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'django_admin_log'
class DjangoContentType(models.Model):
app_label = models.CharField(max_length=100)
model = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'django_content_type'
unique_together = (('app_label', 'model'),)
class DjangoMigrations(models.Model):
id = models.BigAutoField(primary_key=True)
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'django_migrations'
class DjangoSession(models.Model):
session_key = models.CharField(primary_key=True, max_length=40)
session_data = models.TextField()
expire_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'django_session'
class Doctor(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=150)
office_phone = models.CharField(max_length=150)
personal_phone = models.CharField(max_length=150)
specialty = models.CharField(max_length=150)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'doctor'
class DoctorDisease(models.Model):
doctor = models.ForeignKey(Doctor, models.DO_NOTHING)
disease = models.ForeignKey(Disease, models.DO_NOTHING)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'doctor_disease'
class DoctorPatient(models.Model):
doctor = models.ForeignKey(Doctor, models.DO_NOTHING)
patient = models.ForeignKey('Patient', models.DO_NOTHING)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'doctor_patient'
class DrugTreatment(models.Model):
id = models.OneToOneField('Treatment', models.DO_NOTHING, db_column='id', primary_key=True)
drug = models.CharField(max_length=150)
dose = models.CharField(max_length=150)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'drug_treatment'
class EmergencyContact(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=150)
address = models.CharField(max_length=150)
phone = models.CharField(max_length=150)
relationship = models.CharField(max_length=150)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'emergency_contact'
class FulltimeDoctor(models.Model):
name = models.CharField(max_length=150)
hire_date = models.DateField()
annual_pay = models.IntegerField()
tbl_last_date = models.DateTimeField(auto_now_add=True)
id = models.OneToOneField(Doctor, models.DO_NOTHING, db_column='id', primary_key=True)
class Meta:
managed = False
db_table = 'fulltime_doctor'
class FulltimeDoctorHistory(models.Model):
name = models.CharField(max_length=150)
timestamp = models.DateTimeField(auto_now_add=True)
hire_date = models.DateField()
annual_pay = models.IntegerField()
id = models.IntegerField(primary_key=True)
class Meta:
managed = False
db_table = 'fulltime_doctor_history'
class FulltimeDoctorHistory202110(models.Model):
name = models.CharField(max_length=150)
timestamp = models.DateTimeField(auto_now_add=True)
hire_date = models.DateField()
annual_pay = models.IntegerField()
id = models.IntegerField(primary_key=True)
class Meta:
managed = False
db_table = 'fulltime_doctor_history_2021_10'
class Hospital(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=150)
address = models.CharField(max_length=150)
specialty = models.CharField(max_length=150)
emergency_phone = models.CharField(max_length=150)
general_phone = models.CharField(max_length=150)
registration_phone = models.CharField(max_length=150)
admin_phone = models.CharField(max_length=150)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'hospital'
class HospitalDepartment(models.Model):
hospital = models.ForeignKey(Hospital, models.DO_NOTHING)
department = models.ForeignKey(Department, models.DO_NOTHING)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'hospital_department'
class InPatient(models.Model):
id = models.OneToOneField('Patient', models.DO_NOTHING, db_column='id', primary_key=True)
bed_number = models.IntegerField()
floor = models.IntegerField()
discharge_date = models.DateField()
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'in_patient'
class InPatientHistory(models.Model):
id = models.IntegerField(primary_key=True)
timestamp = models.DateTimeField(auto_now_add=True)
bed_number = models.IntegerField()
floor = models.IntegerField()
discharge_date = models.DateField()
class Meta:
managed = False
db_table = 'in_patient_history'
class InPatientHistory202110(models.Model):
id = models.IntegerField(primary_key=True)
timestamp = models.DateTimeField(auto_now_add=True)
bed_number = models.IntegerField()
floor = models.IntegerField()
discharge_date = models.DateField()
class Meta:
managed = False
db_table = 'in_patient_history_2021_10'
class Insurance(models.Model):
id = models.IntegerField(primary_key=True)
insurance_company = models.CharField(max_length=50)
insurance_number = models.IntegerField()
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'insurance'
class InsuranceInvoice(models.Model):
insurance = models.ForeignKey(Insurance, models.DO_NOTHING)
invoice = models.ForeignKey('Invoice', models.DO_NOTHING)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'insurance_invoice'
class Invoice(models.Model):
id = models.IntegerField(primary_key=True)
date = models.DateField()
amount = models.FloatField()
payment_type = models.CharField(max_length=150)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'invoice'
class LaboratoryTreatment(models.Model):
laboratory = models.CharField(max_length=150)
test_type = models.CharField(max_length=150)
test_date = models.DateField()
test_result = models.CharField(max_length=150)
tbl_last_date = models.DateTimeField(auto_now_add=True)
id = models.OneToOneField('Treatment', models.DO_NOTHING, db_column='id', primary_key=True)
class Meta:
managed = False
db_table = 'laboratory_treatment'
class OutPatient(models.Model):
id = models.OneToOneField('Patient', models.DO_NOTHING, db_column='id', primary_key=True)
follow_up_date = models.DateField()
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'out_patient'
class OutPatientHistory(models.Model):
id = models.IntegerField(primary_key=True)
timestamp = models.DateTimeField(auto_now_add=True)
follow_up_date = models.DateField()
class Meta:
managed = False
db_table = 'out_patient_history'
class OutPatientHistory202110(models.Model):
id = models.IntegerField(primary_key=True)
timestamp = models.DateTimeField(auto_now_add=True)
follow_up_date = models.DateField()
class Meta:
managed = False
db_table = 'out_patient_history_2021_10'
class Patient(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=150)
address = models.CharField(max_length=150)
phone = models.CharField(max_length=150)
birth_date = models.DateField()
race = models.CharField(max_length=150)
martial_status = models.CharField(max_length=150)
gender = models.CharField(max_length=150)
blood_group = models.CharField(max_length=150)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'patient'
class PatientEmergencyContact(models.Model):
patient = models.ForeignKey(Patient, models.DO_NOTHING)
emergency_contact = models.ForeignKey(EmergencyContact, models.DO_NOTHING)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'patient_emergency_contact'
class PatientInsurance(models.Model):
patient = models.ForeignKey(Patient, models.DO_NOTHING)
insurance = models.ForeignKey(Insurance, models.DO_NOTHING)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'patient_insurance'
class PatientInvoice(models.Model):
patient = models.ForeignKey(Patient, models.DO_NOTHING)
invoice = models.ForeignKey(Invoice, models.DO_NOTHING)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'patient_invoice'
class PatientRegistration(models.Model):
patient = models.ForeignKey(Patient, models.DO_NOTHING)
registration = models.ForeignKey('Registration', models.DO_NOTHING)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'patient_registration'
class PatientTreatment(models.Model):
patient = models.ForeignKey(Patient, models.DO_NOTHING)
treatment = models.ForeignKey('Treatment', models.DO_NOTHING)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'patient_treatment'
class Registration(models.Model):
id = models.IntegerField(primary_key=True)
registration_date = models.DateField()
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'registration'
class RegistrationTreatment(models.Model):
registration = models.ForeignKey(Registration, models.DO_NOTHING)
treatment = models.ForeignKey('Treatment', models.DO_NOTHING)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'registration_treatment'
class SurgeryTreatment(models.Model):
id = models.OneToOneField('Treatment', models.DO_NOTHING, db_column='id', primary_key=True)
surgery = models.CharField(max_length=150)
description = models.CharField(max_length=150)
date = models.DateField()
result = models.CharField(max_length=150)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'surgery_treatment'
class Treatment(models.Model):
id = models.IntegerField(primary_key=True)
date = models.DateField()
type = models.CharField(max_length=150)
result_status = models.CharField(max_length=150)
description = models.CharField(max_length=150)
tbl_last_date = models.DateTimeField(auto_now_add=True)
class Meta:
managed = False
db_table = 'treatment'
class TreatmentHistory(models.Model):
id = models.IntegerField(primary_key=True)
timestamp = models.DateTimeField(auto_now_add=True)
date = models.DateField()
type = models.CharField(max_length=150)
result_status = models.CharField(max_length=150)
description = models.CharField(max_length=150)
class Meta:
managed = False
db_table = 'treatment_history'
class TreatmentHistory202110(models.Model):
id = models.IntegerField(primary_key=True)
timestamp = models.DateTimeField(auto_now_add=True)
date = models.DateField()
type = models.CharField(max_length=150)
result_status = models.CharField(max_length=150)
description = models.CharField(max_length=150)
class Meta:
managed = False
db_table = 'treatment_history_2021_10'
| 31.542169
| 104
| 0.72007
| 2,191
| 18,326
| 5.797809
| 0.103606
| 0.081477
| 0.093521
| 0.124695
| 0.781863
| 0.764071
| 0.731638
| 0.702511
| 0.66441
| 0.627804
| 0
| 0.017759
| 0.185747
| 18,326
| 580
| 105
| 31.596552
| 0.833534
| 0.025647
| 0
| 0.603286
| 1
| 0
| 0.059222
| 0.015968
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.002347
| 0.002347
| 0
| 0.758216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
b485007b056c930c2e38a42c0a483fe964f3ed0c
| 21
|
py
|
Python
|
Xana/SaxsAna/__init__.py
|
ClLov/Xana
|
83d880432a457cff0f1fab2801e2530ddecb4019
|
[
"MIT"
] | 1
|
2021-01-25T08:57:57.000Z
|
2021-01-25T08:57:57.000Z
|
Xana/SaxsAna/__init__.py
|
ClLov/Xana
|
83d880432a457cff0f1fab2801e2530ddecb4019
|
[
"MIT"
] | 21
|
2020-03-23T12:50:32.000Z
|
2021-05-07T07:54:38.000Z
|
Xana/SaxsAna/__init__.py
|
ClLov/Xana
|
83d880432a457cff0f1fab2801e2530ddecb4019
|
[
"MIT"
] | 2
|
2020-03-22T10:31:09.000Z
|
2020-07-01T14:00:28.000Z
|
from .Soq import Soq
| 10.5
| 20
| 0.761905
| 4
| 21
| 4
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
81e8e46ad0f32377e77f7c139d78adec7877862d
| 16,316
|
py
|
Python
|
tests/test_rds/test_filters.py
|
andormarkus/moto
|
67cda6d7d6f42118ccd7e2170e7ff0a1f92fa6a6
|
[
"Apache-2.0"
] | null | null | null |
tests/test_rds/test_filters.py
|
andormarkus/moto
|
67cda6d7d6f42118ccd7e2170e7ff0a1f92fa6a6
|
[
"Apache-2.0"
] | null | null | null |
tests/test_rds/test_filters.py
|
andormarkus/moto
|
67cda6d7d6f42118ccd7e2170e7ff0a1f92fa6a6
|
[
"Apache-2.0"
] | null | null | null |
import boto3
import pytest
import sure # noqa # pylint: disable=unused-import
from botocore.exceptions import ClientError
from moto import mock_rds
class TestDBInstanceFilters(object):
mock = mock_rds()
@classmethod
def setup_class(cls):
cls.mock.start()
client = boto3.client("rds", region_name="us-west-2")
for i in range(10):
instance_identifier = "db-instance-{}".format(i)
cluster_identifier = "db-cluster-{}".format(i)
engine = "postgres" if (i % 3) else "mysql"
client.create_db_instance(
DBInstanceIdentifier=instance_identifier,
DBClusterIdentifier=cluster_identifier,
Engine=engine,
DBInstanceClass="db.m1.small",
)
cls.client = client
@classmethod
def teardown_class(cls):
try:
cls.mock.stop()
except RuntimeError:
pass
def test_invalid_filter_name_raises_error(self):
with pytest.raises(ClientError) as ex:
self.client.describe_db_instances(
Filters=[{"Name": "invalid-filter-name", "Values": []}]
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue")
ex.value.response["Error"]["Message"].should.equal(
"Unrecognized filter name: invalid-filter-name"
)
def test_empty_filter_values_raises_error(self):
with pytest.raises(ClientError) as ex:
self.client.describe_db_instances(
Filters=[{"Name": "db-instance-id", "Values": []}]
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterCombination")
ex.value.response["Error"]["Message"].should.contain("must not be empty")
def test_db_cluster_id_filter(self):
resp = self.client.describe_db_instances()
db_cluster_identifier = resp["DBInstances"][0]["DBClusterIdentifier"]
db_instances = self.client.describe_db_instances(
Filters=[{"Name": "db-cluster-id", "Values": [db_cluster_identifier]}]
).get("DBInstances")
db_instances.should.have.length_of(1)
db_instances[0]["DBClusterIdentifier"].should.equal(db_cluster_identifier)
def test_db_instance_id_filter(self):
resp = self.client.describe_db_instances()
db_instance_identifier = resp["DBInstances"][0]["DBInstanceIdentifier"]
db_instances = self.client.describe_db_instances(
Filters=[{"Name": "db-instance-id", "Values": [db_instance_identifier]}]
).get("DBInstances")
db_instances.should.have.length_of(1)
db_instances[0]["DBInstanceIdentifier"].should.equal(db_instance_identifier)
def test_db_instance_id_filter_works_with_arns(self):
resp = self.client.describe_db_instances()
db_instance_arn = resp["DBInstances"][0]["DBInstanceArn"]
db_instances = self.client.describe_db_instances(
Filters=[{"Name": "db-instance-id", "Values": [db_instance_arn]}]
).get("DBInstances")
db_instances.should.have.length_of(1)
db_instances[0]["DBInstanceArn"].should.equal(db_instance_arn)
def test_dbi_resource_id_filter(self):
resp = self.client.describe_db_instances()
dbi_resource_identifier = resp["DBInstances"][0]["DbiResourceId"]
db_instances = self.client.describe_db_instances(
Filters=[{"Name": "dbi-resource-id", "Values": [dbi_resource_identifier]}]
).get("DBInstances")
for db_instance in db_instances:
db_instance["DbiResourceId"].should.equal(dbi_resource_identifier)
def test_engine_filter(self):
db_instances = self.client.describe_db_instances(
Filters=[{"Name": "engine", "Values": ["postgres"]}]
).get("DBInstances")
for db_instance in db_instances:
db_instance["Engine"].should.equal("postgres")
db_instances = self.client.describe_db_instances(
Filters=[{"Name": "engine", "Values": ["oracle"]}]
).get("DBInstances")
db_instances.should.have.length_of(0)
def test_multiple_filters(self):
resp = self.client.describe_db_instances(
Filters=[
{
"Name": "db-instance-id",
"Values": ["db-instance-0", "db-instance-1", "db-instance-3"],
},
{"Name": "engine", "Values": ["mysql", "oracle"]},
]
)
returned_identifiers = [
db["DBInstanceIdentifier"] for db in resp["DBInstances"]
]
returned_identifiers.should.have.length_of(2)
"db-instance-0".should.be.within(returned_identifiers)
"db-instance-3".should.be.within(returned_identifiers)
def test_invalid_db_instance_identifier_with_exclusive_filter(self):
# Passing a non-existent DBInstanceIdentifier will not raise an error
# if the resulting filter matches other resources.
resp = self.client.describe_db_instances(
DBInstanceIdentifier="non-existent",
Filters=[{"Name": "db-instance-id", "Values": ["db-instance-1"]}],
)
resp["DBInstances"].should.have.length_of(1)
resp["DBInstances"][0]["DBInstanceIdentifier"].should.equal("db-instance-1")
def test_invalid_db_instance_identifier_with_non_matching_filter(self):
# Passing a non-existent DBInstanceIdentifier will raise an error if
# the resulting filter does not match any resources.
with pytest.raises(ClientError) as ex:
self.client.describe_db_instances(
DBInstanceIdentifier="non-existent",
Filters=[{"Name": "engine", "Values": ["mysql"]}],
)
ex.value.response["Error"]["Code"].should.equal("DBInstanceNotFound")
ex.value.response["Error"]["Message"].should.equal(
"DBInstance non-existent not found."
)
def test_valid_db_instance_identifier_with_exclusive_filter(self):
# Passing a valid DBInstanceIdentifier with a filter it does not match
# but does match other resources will return those other resources.
resp = self.client.describe_db_instances(
DBInstanceIdentifier="db-instance-0",
Filters=[
{"Name": "db-instance-id", "Values": ["db-instance-1"]},
{"Name": "engine", "Values": ["postgres"]},
],
)
returned_identifiers = [
db["DBInstanceIdentifier"] for db in resp["DBInstances"]
]
"db-instance-0".should_not.be.within(returned_identifiers)
"db-instance-1".should.be.within(returned_identifiers)
def test_valid_db_instance_identifier_with_inclusive_filter(self):
# Passing a valid DBInstanceIdentifier with a filter it matches but also
# matches other resources will return all matching resources.
resp = self.client.describe_db_instances(
DBInstanceIdentifier="db-instance-0",
Filters=[
{"Name": "db-instance-id", "Values": ["db-instance-1"]},
{"Name": "engine", "Values": ["mysql", "postgres"]},
],
)
returned_identifiers = [
db["DBInstanceIdentifier"] for db in resp["DBInstances"]
]
"db-instance-0".should.be.within(returned_identifiers)
"db-instance-1".should.be.within(returned_identifiers)
def test_valid_db_instance_identifier_with_non_matching_filter(self):
# Passing a valid DBInstanceIdentifier will raise an error if the
# resulting filter does not match any resources.
with pytest.raises(ClientError) as ex:
self.client.describe_db_instances(
DBInstanceIdentifier="db-instance-0",
Filters=[{"Name": "engine", "Values": ["postgres"]}],
)
ex.value.response["Error"]["Code"].should.equal("DBInstanceNotFound")
ex.value.response["Error"]["Message"].should.equal(
"DBInstance db-instance-0 not found."
)
class TestDBSnapshotFilters(object):
mock = mock_rds()
@classmethod
def setup_class(cls):
cls.mock.start()
client = boto3.client("rds", region_name="us-west-2")
# We'll set up two instances (one postgres, one mysql)
# with two snapshots each.
for i in range(2):
identifier = "db-instance-{}".format(i)
engine = "postgres" if i else "mysql"
client.create_db_instance(
DBInstanceIdentifier=identifier,
Engine=engine,
DBInstanceClass="db.m1.small",
)
for j in range(2):
client.create_db_snapshot(
DBInstanceIdentifier=identifier,
DBSnapshotIdentifier="{}-snapshot-{}".format(identifier, j),
)
cls.client = client
@classmethod
def teardown_class(cls):
try:
cls.mock.stop()
except RuntimeError:
pass
def test_invalid_filter_name_raises_error(self):
with pytest.raises(ClientError) as ex:
self.client.describe_db_snapshots(
Filters=[{"Name": "invalid-filter-name", "Values": []}]
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue")
ex.value.response["Error"]["Message"].should.equal(
"Unrecognized filter name: invalid-filter-name"
)
def test_empty_filter_values_raises_error(self):
with pytest.raises(ClientError) as ex:
self.client.describe_db_snapshots(
Filters=[{"Name": "db-snapshot-id", "Values": []}]
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterCombination")
ex.value.response["Error"]["Message"].should.contain("must not be empty")
def test_db_snapshot_id_filter(self):
snapshots = self.client.describe_db_snapshots(
Filters=[{"Name": "db-snapshot-id", "Values": ["db-instance-1-snapshot-0"]}]
).get("DBSnapshots")
snapshots.should.have.length_of(1)
snapshots[0]["DBSnapshotIdentifier"].should.equal("db-instance-1-snapshot-0")
def test_db_instance_id_filter(self):
resp = self.client.describe_db_instances()
db_instance_identifier = resp["DBInstances"][0]["DBInstanceIdentifier"]
snapshots = self.client.describe_db_snapshots(
Filters=[{"Name": "db-instance-id", "Values": [db_instance_identifier]}]
).get("DBSnapshots")
for snapshot in snapshots:
snapshot["DBInstanceIdentifier"].should.equal(db_instance_identifier)
def test_db_instance_id_filter_works_with_arns(self):
resp = self.client.describe_db_instances()
db_instance_identifier = resp["DBInstances"][0]["DBInstanceIdentifier"]
db_instance_arn = resp["DBInstances"][0]["DBInstanceArn"]
snapshots = self.client.describe_db_snapshots(
Filters=[{"Name": "db-instance-id", "Values": [db_instance_arn]}]
).get("DBSnapshots")
for snapshot in snapshots:
snapshot["DBInstanceIdentifier"].should.equal(db_instance_identifier)
def test_dbi_resource_id_filter(self):
resp = self.client.describe_db_instances()
dbi_resource_identifier = resp["DBInstances"][0]["DbiResourceId"]
snapshots = self.client.describe_db_snapshots(
Filters=[{"Name": "dbi-resource-id", "Values": [dbi_resource_identifier]}]
).get("DBSnapshots")
for snapshot in snapshots:
snapshot["DbiResourceId"].should.equal(dbi_resource_identifier)
def test_engine_filter(self):
snapshots = self.client.describe_db_snapshots(
Filters=[{"Name": "engine", "Values": ["postgres"]}]
).get("DBSnapshots")
for snapshot in snapshots:
snapshot["Engine"].should.equal("postgres")
snapshots = self.client.describe_db_snapshots(
Filters=[{"Name": "engine", "Values": ["oracle"]}]
).get("DBSnapshots")
snapshots.should.have.length_of(0)
def test_multiple_filters(self):
snapshots = self.client.describe_db_snapshots(
Filters=[
{"Name": "db-snapshot-id", "Values": ["db-instance-0-snapshot-1"]},
{
"Name": "db-instance-id",
"Values": ["db-instance-1", "db-instance-0"],
},
{"Name": "engine", "Values": ["mysql"]},
]
).get("DBSnapshots")
snapshots.should.have.length_of(1)
snapshots[0]["DBSnapshotIdentifier"].should.equal("db-instance-0-snapshot-1")
def test_invalid_snapshot_id_with_db_instance_id_and_filter(self):
# Passing a non-existent DBSnapshotIdentifier will return an empty list
# if DBInstanceIdentifier is also passed in.
resp = self.client.describe_db_snapshots(
DBSnapshotIdentifier="non-existent",
DBInstanceIdentifier="a-db-instance-identifier",
Filters=[{"Name": "db-instance-id", "Values": ["db-instance-1"]}],
)
resp["DBSnapshots"].should.have.length_of(0)
def test_invalid_snapshot_id_with_non_matching_filter(self):
# Passing a non-existent DBSnapshotIdentifier will raise an error if
# the resulting filter does not match any resources.
with pytest.raises(ClientError) as ex:
self.client.describe_db_snapshots(
DBSnapshotIdentifier="non-existent",
Filters=[{"Name": "engine", "Values": ["oracle"]}],
)
ex.value.response["Error"]["Code"].should.equal("DBSnapshotNotFound")
ex.value.response["Error"]["Message"].should.equal(
"DBSnapshot non-existent not found."
)
def test_valid_snapshot_id_with_exclusive_filter(self):
# Passing a valid DBSnapshotIdentifier with a filter it does not match
# but does match other resources will return those other resources.
resp = self.client.describe_db_snapshots(
DBSnapshotIdentifier="db-instance-0-snapshot-0",
Filters=[
{"Name": "db-snapshot-id", "Values": ["db-instance-1-snapshot-1"]},
{"Name": "db-instance-id", "Values": ["db-instance-1"]},
{"Name": "engine", "Values": ["postgres"]},
],
)
resp["DBSnapshots"].should.have.length_of(1)
resp["DBSnapshots"][0]["DBSnapshotIdentifier"].should.equal(
"db-instance-1-snapshot-1"
)
def test_valid_snapshot_id_with_inclusive_filter(self):
# Passing a valid DBSnapshotIdentifier with a filter it matches but also
# matches other resources will return all matching resources.
snapshots = self.client.describe_db_snapshots(
DBSnapshotIdentifier="db-instance-0-snapshot-0",
Filters=[
{"Name": "db-snapshot-id", "Values": ["db-instance-1-snapshot-1"]},
{
"Name": "db-instance-id",
"Values": ["db-instance-1", "db-instance-0"],
},
{"Name": "engine", "Values": ["mysql", "postgres"]},
],
).get("DBSnapshots")
returned_identifiers = [ss["DBSnapshotIdentifier"] for ss in snapshots]
returned_identifiers.should.have.length_of(2)
"db-instance-0-snapshot-0".should.be.within(returned_identifiers)
"db-instance-1-snapshot-1".should.be.within(returned_identifiers)
def test_valid_snapshot_id_with_non_matching_filter(self):
# Passing a valid DBSnapshotIdentifier will raise an error if the
# resulting filter does not match any resources.
with pytest.raises(ClientError) as ex:
self.client.describe_db_snapshots(
DBSnapshotIdentifier="db-instance-0-snapshot-0",
Filters=[{"Name": "engine", "Values": ["postgres"]}],
)
ex.value.response["Error"]["Code"].should.equal("DBSnapshotNotFound")
ex.value.response["Error"]["Message"].should.equal(
"DBSnapshot db-instance-0-snapshot-0 not found."
)
| 43.393617
| 88
| 0.620679
| 1,744
| 16,316
| 5.625
| 0.087729
| 0.082569
| 0.06422
| 0.071356
| 0.902854
| 0.888685
| 0.858002
| 0.800204
| 0.766259
| 0.725076
| 0
| 0.007198
| 0.250735
| 16,316
| 375
| 89
| 43.509333
| 0.795256
| 0.081699
| 0
| 0.661238
| 0
| 0
| 0.203837
| 0.028881
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09772
| false
| 0.006515
| 0.016287
| 0
| 0.127036
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c33ae08834f0370d4050a01ddcf2dce529617ce9
| 164
|
py
|
Python
|
quadtree/point.py
|
josehuillca/GeoComp_TrabalhoFinal
|
3af183f940b624234d741aa3fd6789faec236aa1
|
[
"MIT"
] | null | null | null |
quadtree/point.py
|
josehuillca/GeoComp_TrabalhoFinal
|
3af183f940b624234d741aa3fd6789faec236aa1
|
[
"MIT"
] | null | null | null |
quadtree/point.py
|
josehuillca/GeoComp_TrabalhoFinal
|
3af183f940b624234d741aa3fd6789faec236aa1
|
[
"MIT"
] | null | null | null |
class Point():
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return 'P({:.2f}, {:.2f})'.format(self.x, self.y)
| 23.428571
| 57
| 0.487805
| 25
| 164
| 2.88
| 0.48
| 0.208333
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017544
| 0.304878
| 164
| 7
| 57
| 23.428571
| 0.614035
| 0
| 0
| 0
| 0
| 0
| 0.10303
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
c3595a9b8c4aaf78741498dca9d243d2c9e9f1c4
| 41
|
py
|
Python
|
sensors/cpu.py
|
singh87/rtop
|
153f1fa130fa71b57e3225a01ff220900ecb9f3a
|
[
"MIT"
] | null | null | null |
sensors/cpu.py
|
singh87/rtop
|
153f1fa130fa71b57e3225a01ff220900ecb9f3a
|
[
"MIT"
] | null | null | null |
sensors/cpu.py
|
singh87/rtop
|
153f1fa130fa71b57e3225a01ff220900ecb9f3a
|
[
"MIT"
] | null | null | null |
import psutil
class CpuSensor:
pass
| 8.2
| 16
| 0.731707
| 5
| 41
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.243902
| 41
| 5
| 17
| 8.2
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
5ee419291c80caf452e84b1d87b5bdb5f26a71a2
| 112
|
py
|
Python
|
src/biopy/__init__.py
|
BioPyTeam/biopy
|
5c1444280d0a5098b61a99d96dc2825259c7ced5
|
[
"MIT"
] | null | null | null |
src/biopy/__init__.py
|
BioPyTeam/biopy
|
5c1444280d0a5098b61a99d96dc2825259c7ced5
|
[
"MIT"
] | null | null | null |
src/biopy/__init__.py
|
BioPyTeam/biopy
|
5c1444280d0a5098b61a99d96dc2825259c7ced5
|
[
"MIT"
] | 2
|
2021-07-23T09:30:58.000Z
|
2021-07-23T09:33:25.000Z
|
from . import datasets
from . import metrics
from . import models
from . import statistic
from . import training
| 22.4
| 23
| 0.785714
| 15
| 112
| 5.866667
| 0.466667
| 0.568182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169643
| 112
| 5
| 24
| 22.4
| 0.946237
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5eeeeac16b8fb941c05fc12ae687d27b922ec2c0
| 4,957
|
py
|
Python
|
paper/oopsla17-cameraready/plots.py
|
ucsd-progsys/nate
|
8b1267cd8b10283d8bc239d16a28c654a4cb8942
|
[
"BSD-3-Clause"
] | 9
|
2017-08-30T23:00:52.000Z
|
2021-02-25T23:08:55.000Z
|
paper/oopsla17-cameraready/plots.py
|
ucsd-progsys/ml2
|
8b1267cd8b10283d8bc239d16a28c654a4cb8942
|
[
"BSD-3-Clause"
] | null | null | null |
paper/oopsla17-cameraready/plots.py
|
ucsd-progsys/ml2
|
8b1267cd8b10283d8bc239d16a28c654a4cb8942
|
[
"BSD-3-Clause"
] | 1
|
2022-03-31T19:50:33.000Z
|
2022-03-31T19:50:33.000Z
|
import csv
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
UCSD = 'UCSD'
BUCKETS = [0.1, 0.2, 1.0, 10.0, 60.0 ] # range(500, 3001, 500)
#COLORS=['#90B0D4', '#90D492', '#D4B490', '#D490D2']
COLORS=['#8dd3c7','#bebada','#ffffb3','#fb8072','#80b1d3','#fdb462']
COLORS_E=['#8dd3c7','#bebada','#80b1d3','#ffffb3','#fdb462','#fb8072']
def read_csv(f):
with open(f) as f:
return list(csv.reader(f))
def read_csv_dict(f):
with open(f) as f:
return list(csv.DictReader(f))
def plot_user_study():
a = read_csv_dict('study/study-data/Types_Study_A_scores.csv')
b = read_csv_dict('study/study-data/Types_Study_B_scores.csv')
def f(xs):
return [float(x) for x in xs if float(x) >= 0]
def err(xs):
#p = np.average(xs)
#return 100 * np.sqrt(p * (1-p) / len(xs))
s = np.std(xs)
n = len(xs)
return 100 * (s / np.sqrt(n))
## REASON
sepconcat_a = f([r['1: sepConcat explain (1.0 pts)'] for r in a])
padzero_a = f([r['3: padZero explain (1.0 pts)'] for r in a])
mulbydigit_a = f([r['5: mulByDigit explain (1.0 pts)'] for r in a])
sepconcat_b = f([r['1: sepConcat explain (1.0 pts)'] for r in b])
padzero_b = f([r['3: padZero explain (1.0 pts)'] for r in b])
mulbydigit_b = f([r['5: mulByDigit explain (1.0 pts)'] for r in b])
ind = np.arange(3)
width = 0.35
print 'EXPLAIN'
print 'sherrloc'
print [100*np.average(sepconcat_a), 100*np.average(padzero_b), 100*np.average(mulbydigit_a)]
print map(err, [sepconcat_a, padzero_b, mulbydigit_a])
fig = plt.figure()
p_o = plt.bar(ind,
[100*np.average(sepconcat_a), 100*np.average(padzero_b), 100*np.average(mulbydigit_a)],
width,
color=COLORS[0],
yerr=map(err, [sepconcat_a, padzero_b, mulbydigit_a]),
error_kw={'linewidth': 3, 'ecolor': 'gray', 'capsize': 6, 'capthick': 3}
)
print 'nate'
print [100*np.average(sepconcat_b), 100*np.average(padzero_a), 100*np.average(mulbydigit_b)]
print map(err, [sepconcat_b, padzero_a, mulbydigit_b])
p_n = plt.bar(ind + width,
[100*np.average(sepconcat_b), 100*np.average(padzero_a), 100*np.average(mulbydigit_b)],
width,
color=COLORS[1],
yerr=map(err, [sepconcat_b, padzero_a, mulbydigit_b]),
error_kw={'linewidth': 3, 'ecolor': 'gray', 'capsize': 6, 'capthick': 3}
)
plt.title('Explanation',fontsize=30)
# plt.xlabel('Problem', fontsize=20)
plt.ylabel('% Correct', fontsize=24)
plt.xticks(ind + width, ['sepConcat\n(p = 0.48)', 'padZero\n(p = 0.097)', 'mulByDigit\n(p = 0.083)'], fontsize=20)
plt.legend(('SHErrLoc', 'Nate'), loc='lower right', fontsize=20)
# autolabel(plt, p_o)
# autolabel(plt, p_n)
fig.savefig('user-study-reason.png')
plt.close()
## FIX
sepconcat_a = f([r['2: sepConcat fix (1.0 pts)'] for r in a])
padzero_a = f([r['4: padZero fix (1.0 pts)'] for r in a])
mulbydigit_a = f([r['6: mulByDigit fix (1.0 pts)'] for r in a])
sepconcat_b = f([r['2: sepConcat fix (1.0 pts)'] for r in b])
padzero_b = f([r['4: padZero fix (1.0 pts)'] for r in b])
mulbydigit_b = f([r['6: mulByDigit fix (1.0 pts)'] for r in b])
ind = np.arange(3)
width = 0.35
print 'FIX'
print 'sherrloc'
print [100*np.average(sepconcat_a), 100*np.average(padzero_b), 100*np.average(mulbydigit_a)]
print map(err, [sepconcat_a, padzero_b, mulbydigit_a])
fig = plt.figure()
p_o = plt.bar(ind,
[100*np.average(sepconcat_a), 100*np.average(padzero_b), 100*np.average(mulbydigit_a)],
width,
color=COLORS[0],
yerr=map(err, [sepconcat_a, padzero_b, mulbydigit_a]),
error_kw={'linewidth': 3, 'ecolor': 'gray', 'capsize': 6, 'capthick': 3}
)
print 'nate'
print [100*np.average(sepconcat_b), 100*np.average(padzero_a), 100*np.average(mulbydigit_b)]
print map(err, [sepconcat_b, padzero_a, mulbydigit_b])
p_n = plt.bar(ind + width,
[100*np.average(sepconcat_b), 100*np.average(padzero_a), 100*np.average(mulbydigit_b)],
width,
color=COLORS[1],
yerr=map(err, [sepconcat_b, padzero_a, mulbydigit_b]),
error_kw={'linewidth': 3, 'ecolor': 'gray', 'capsize': 6, 'capthick': 3}
)
plt.title('Fix',fontsize=30)
# plt.xlabel('Problem', fontsize=20)
plt.ylabel('% Correct', fontsize=24)
plt.xticks(ind + width, ['sepConcat\n(p = 0.57)', 'padZero\n(p = 0.33)', 'mulByDigit\n(p = 0.31)'], fontsize=20)
plt.legend(('SHErrLoc', 'Nate'), loc='lower right', fontsize=20)
# autolabel(plt, p_o)
# autolabel(plt, p_n)
fig.savefig('user-study-fix.png')
plt.close()
if __name__ == '__main__':
plot_user_study()
| 38.130769
| 118
| 0.587452
| 754
| 4,957
| 3.732095
| 0.16313
| 0.079957
| 0.102345
| 0.034115
| 0.793888
| 0.793888
| 0.793888
| 0.793888
| 0.769012
| 0.750533
| 0
| 0.065717
| 0.235626
| 4,957
| 129
| 119
| 38.426357
| 0.67696
| 0.0581
| 0
| 0.515464
| 0
| 0
| 0.198495
| 0.022151
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.041237
| null | null | 0.14433
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6f26d849b15598e6e852d39e4c42d5a09474f4db
| 104
|
py
|
Python
|
test/__init__.py
|
synx-ai/single-page-https
|
6dc63cdfd18ca8d155cf6eb8a9f8143e683a256f
|
[
"MIT"
] | null | null | null |
test/__init__.py
|
synx-ai/single-page-https
|
6dc63cdfd18ca8d155cf6eb8a9f8143e683a256f
|
[
"MIT"
] | null | null | null |
test/__init__.py
|
synx-ai/single-page-https
|
6dc63cdfd18ca8d155cf6eb8a9f8143e683a256f
|
[
"MIT"
] | null | null | null |
import os
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
| 20.8
| 67
| 0.778846
| 17
| 104
| 4.529412
| 0.529412
| 0.181818
| 0.38961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086538
| 104
| 4
| 68
| 26
| 0.810526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6f2e09147840846a01aa9242b31aadbcee3c47f0
| 111
|
py
|
Python
|
app/views.py
|
silence114/DataGO
|
bd087a235ccdc901033139d41e51aca6de0950a4
|
[
"MIT"
] | null | null | null |
app/views.py
|
silence114/DataGO
|
bd087a235ccdc901033139d41e51aca6de0950a4
|
[
"MIT"
] | null | null | null |
app/views.py
|
silence114/DataGO
|
bd087a235ccdc901033139d41e51aca6de0950a4
|
[
"MIT"
] | null | null | null |
from app import app
@app.route('/index', methods=['GET', 'POST'])
def get_index():
return 'Hello World'
| 13.875
| 45
| 0.63964
| 16
| 111
| 4.375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171171
| 111
| 7
| 46
| 15.857143
| 0.76087
| 0
| 0
| 0
| 0
| 0
| 0.218182
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
6f7297b179413f290e7eaaa2ecaf2ba1cbaed213
| 268
|
py
|
Python
|
pvae/distributions/__init__.py
|
macio232/pvae
|
391a4c634d565d6a7dc60915fabf02ab77d3cc68
|
[
"MIT"
] | 95
|
2019-04-30T12:36:00.000Z
|
2022-03-14T13:59:52.000Z
|
pvae/distributions/__init__.py
|
thanosvlo/Causal-Future-Prediction-in-a-Minkowski-Space-Time
|
0e0539a122484ce9869aca9acd436a24c2597908
|
[
"MIT"
] | 8
|
2020-06-18T12:15:44.000Z
|
2022-03-27T00:04:02.000Z
|
pvae/distributions/__init__.py
|
thanosvlo/Causal-Future-Prediction-in-a-Minkowski-Space-Time
|
0e0539a122484ce9869aca9acd436a24c2597908
|
[
"MIT"
] | 29
|
2019-05-02T09:12:35.000Z
|
2022-01-24T11:31:45.000Z
|
from pvae.distributions.riemannian_normal import RiemannianNormal
from pvae.distributions.hyperbolic_radius import HyperbolicRadius
from pvae.distributions.wrapped_normal import WrappedNormal
from pvae.distributions.hyperspherical_uniform import HypersphericalUniform
| 53.6
| 75
| 0.910448
| 28
| 268
| 8.571429
| 0.535714
| 0.133333
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059701
| 268
| 4
| 76
| 67
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
489361ee6d5bbfc8e2d253acbbb689d5e6644bc7
| 149
|
py
|
Python
|
hci/command/commands/le_apcf_commands/__init__.py
|
cc4728/python-hci
|
d988f69c55972af445ec3ba04fd4cd1199593d10
|
[
"MIT"
] | 3
|
2021-12-16T14:32:45.000Z
|
2022-01-25T03:10:48.000Z
|
hci/command/commands/le_apcf_commands/__init__.py
|
cc4728/python-hci
|
d988f69c55972af445ec3ba04fd4cd1199593d10
|
[
"MIT"
] | null | null | null |
hci/command/commands/le_apcf_commands/__init__.py
|
cc4728/python-hci
|
d988f69c55972af445ec3ba04fd4cd1199593d10
|
[
"MIT"
] | 1
|
2022-01-25T03:10:50.000Z
|
2022-01-25T03:10:50.000Z
|
from .apcf_enable import APCF_Enable
from .apcf_local_name import APCF_Local_Name
from .apcf_set_filter_parameters import APCF_Set_Filter_parameters
| 37.25
| 66
| 0.899329
| 24
| 149
| 5.083333
| 0.375
| 0.196721
| 0.213115
| 0.377049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080537
| 149
| 3
| 67
| 49.666667
| 0.890511
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
48dbe79b7fea81eff5f549f797ffc02353c385f4
| 26,594
|
py
|
Python
|
venv/Lib/site-packages/introcs/strings.py
|
frankappolonia/SmartPrice
|
7b6e1116b3f388623648db43390c7cf3492e8271
|
[
"MIT"
] | 1
|
2021-12-26T03:15:59.000Z
|
2021-12-26T03:15:59.000Z
|
venv/Lib/site-packages/introcs/strings.py
|
frankappolonia/SmartPrice
|
7b6e1116b3f388623648db43390c7cf3492e8271
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/introcs/strings.py
|
frankappolonia/SmartPrice
|
7b6e1116b3f388623648db43390c7cf3492e8271
|
[
"MIT"
] | 2
|
2021-01-20T01:38:08.000Z
|
2021-07-17T02:15:37.000Z
|
"""
Functions for popular string operations.
The purpose of this module is to allow students to work with strings without having to
understand method calls. We do not provide all string methods as functions -- just the
most popular ones.
The functions that would normally return lists return tuples. That is because, by the
time students understand lists, they can understand method calls. However, tuples (since
they are immutable) can be introduced earlier.
:author: Walker M. White (wmw2)
:version: July 20, 2018
"""
#mark Test Functions
def isalnum(text):
"""
Checks if all characters in ``text`` are alphanumeric and there is at least one character
A character c is alphanumeric if one of the following returns True: :func:`isalpha`,
:func:`isdecimal`,:func:`isdigit`, or, :func:`isnumeric`.
:param text: The string to check
:type text: ``str``
:return: True if all characters in ``text`` are alphanumeric and there is at least one character, False otherwise.
:rtype: ``bool``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.isalnum()
def isalpha(text):
"""
Checks if all characters in ``text`` are alphabetic and there is at least one character.
Alphabetic characters are those characters defined in the Unicode character database
as a "Letter". Note that this is different from the "Alphabetic" property defined in
the Unicode Standard.
:param text: The string to check
:type text: ``str``
:return: True if all characters in ``text`` are alphabetic and there is at least one character, False otherwise.
:rtype: ``bool``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.isalpha()
def isdecimal(text):
"""
Check if all characters in ``text`` are decimal characters and there is at least one character.
Decimal characters are those that can be used to form integer numbers in base 10.
For example, '10' has all decimals, but '1.0' does not (since the period is not a
decimal). Formally a decimal character is in the Unicode General Category "Nd".
:param text: The string to check
:type text: ``str``
:return: True if all characters in ``text`` are decimal characters and there is at least one character, False otherwise.
:rtype: ``bool``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.isdecimal()
def isdigit(text):
"""
Checks if all characters in ``text`` are digits and there is at least one character.
Digits include decimal characters and digits that need special handling, such as the
compatibility superscript digits. This covers digits which cannot be used to form
numbers in base 10, like the Kharosthi numbers. It is very rare that this function
is needed instead of :func:`isdecimal`
:param text: The string to check
:type text: ``str``
:return: True if all characters in ``text`` are digits and there is at least one character, False otherwise.
:rtype: ``bool``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.isdigit()
def islower(text):
"""
Checks if all cased characters in ``text`` are lowercase and there is at least one cased character.
Cased characters are defined by the Unicode standard. All alphabetic characters in the
ASCII character set are cased.
:param text: The string to check
:type text: ``str``
:return: True if all cased characters in ``text`` are lowercase and there is at least one cased character, False otherwise.
:rtype: ``bool``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.islower()
def isnumeric(text):
"""
Checks if all characters in ``text`` are numeric characters, and there is at least one character.
Numeric characters include digit characters, and all characters that have the Unicode
numeric value property. These includes all digit characters as well as vulgar fractions
and Roman numeral (characters).
:param text: The string to check
:type text: ``str``
:return: True if all characters in ``text`` are numeric characters, and there is at least one character, False otherwise.
:rtype: ``bool``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.isnumeric()
def isprintable(text):
"""
Checks if all characters in ``text`` are printable or the string is empty.
Nonprintable characters are those characters defined in the Unicode character database
as "Other" or "Separator", excepting the ASCII space (0x20) which is considered printable.
Note that printable characters in this context are those which should not be escaped
when repr() is invoked on a string. It has no bearing on the handling of strings
written to sys.stdout or sys.stderr.
:param text: The string to check
:type text: ``str``
:return: True if all characters in ``text`` are printable or the string is empty, False otherwise.
:rtype: ``bool``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.isprintable()
def isspace(text):
"""
Checks if there are only whitespace characters in ``text`` and there is at least one character.
Whitespace characters are those characters defined in the Unicode character database
as "Other" or "Separator".
:param text: The string to check
:type text: ``str``
:return: True if there are only whitespace characters in ``text`` and there is at least one character, False otherwise.
:rtype: ``bool``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.isspace()
def isupper(text):
"""
Checks if all cased characters in ``text`` are uppercase and there is at least one cased character.
Cased characters are defined by the Unicode standard. All alphabetic characters in the
ASCII character set are cased.
:param text: The string to check
:type text: ``str``
:return: True if all cased characters in ``text`` are uppercase and there is at least one cased character, False otherwise.
:rtype: ``bool``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.isupper()
pass
#mark -
#mark Conversion Functions
def capitalize(text):
"""
Creates a copy of ``text`` with only its first character capitalized.
For 8-bit strings, this function is locale-dependent.
:param text: The string to capitalize
:type text: ``str``
:return: A copy of ``text`` with only its first character capitalized.
:rtype: ``str``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.capitalize()
def swapcase(text):
"""
Creates a copy of ``text`` with uppercase characters converted to lowercase and vice versa.
Note that it is not necessarily true that ``swapcase(swapcase(s)) == s``. That is
because of how the Unicode Standard defines cases.
:param text: The string to convert
:type text: ``str``
:return: A copy of ``text`` with uppercase characters converted to lowercase and vice versa.
:rtype: ``str``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.swapcase()
def lower(text):
"""
Creates a copy of ``text`` with all the cased characters converted to lowercase.
The lowercasing algorithm used is described in section 3.13 of the Unicode Standard.
:param text: The string to convert
:type text: ``str``
:return: A copy of ``text`` with all the cased characters converted to lowercase.
:rtype: ``str``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.lower()
def upper(text):
"""
Creates a copy of ``text`` with all the cased characters converted to uppercase.
Note that ``isupper(upper(s))`` might be False if ``s`` contains uncased characters
or if the Unicode category of the resulting character(s) is not "Lu" (Letter, uppercase).
The uppercasing algorithm used is described in section 3.13 of the Unicode Standard.
:param text: The string to convert
:type text: ``str``
:return: A copy of ``text`` with all the cased characters converted to uppercase.
:rtype: ``str``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.upper()
def center(text, width, fillchar= ' '):
"""
Creates a copy of ``text`` centered in a string of length ``width``.
Padding is done using the specified ``fillchar`` (default is an ASCII space). The original
string is returned if ``width`` is less than or equal to len(s).
:param text: The string to center
:type text: ``str``
:param width: The width of the stirng to produce
:type width: ``int``
:param fillchar: The padding to expand the character to width
:type fillchar: ``str``
:return: A copy of ``text`` centered in a string of length ``width``.
:rtype: ``str``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.center(width,fillchar)
def ljust(text, width, fillchar=' '):
"""
Creates a copy of ``text`` left justified in a string of length ``width``.
Padding is done using the specified ``fillchar`` (default is an ASCII space). The original
string is returned if ``width`` is less than or equal to len(s).
:param text: The string to justify
:type text: ``str``
:return: A copy of ``text`` left justified in a string of length ``width``.
:rtype: ``str``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.ljust(width,fillchar)
def rjust(text, width, fillchar=' '):
"""
Creates a copy of ``text`` right justified in a string of length ``width``.
Padding is done using the specified ``fillchar`` (default is an ASCII space). The original
string is returned if ``width`` is less than or equal to len(s).
:param text: The string to justify
:type text: ``str``
:return: A copy of ``text`` right justified in a string of length ``width``.
:rtype: ``str``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.rjust(width,fillchar)
def strip(text, chars=None):
"""
Creates a copy of ``text`` with the leading and trailing characters removed.
The ``chars`` argument is a string specifying the set of characters to be removed.
If omitted or None, the ``chars`` argument defaults to removing whitespace. The ``chars``
argument is not a prefix or suffix; rather, all combinations of its values are stripped::
>>>
>>> strip(' spacious ')
'spacious'
>>> strip('www.example.com','cmowz.')
'example'
The outermost leading and trailing ``chars`` argument values are stripped from the string.
Characters are removed from the leading end until reaching a string character that
is not contained in the set of characters in chars. A similar action takes place on
the trailing end. For example::
>>>
>>> comment_string = '#....... Section 3.2.1 Issue #32 .......'
>>> strip(comment_string,'.#! ')
'Section 3.2.1 Issue #32'
:param text: The string to copy
:type text: ``str``
:param chars: The characters to remove from the ends
:type chars: ``str``
:return: A copy of ``text`` with the leading and trailing characters removed.
:rtype: ``str``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.strip(chars)
def lstrip(text, chars=None):
"""
Creates a copy of ``text`` with leading characters removed.
The ``chars`` argument is a string specifying the set of characters to be removed. If
omitted or None, the ``chars`` argument defaults to removing whitespace. The ``chars``
argument is not a prefix; rather, all combinations of its values are stripped::
>>>
>>> lstrip(' spacious ')
'spacious '
>>> lstrip('www.example.com'.lstrip,'cmowz.')
'example.com'
:param text: The string to copy
:type text: ``str``
:param chars: The leading characters to remove
:type chars: ``str``
:return: A copy of ``text`` with the leading characters removed.
:rtype: ``str``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.lstrip(chars)
def rstrip(text, chars=None):
"""
Creates a copy of ``text`` with trailing characters removed.
The ``chars`` argument is a string specifying the set of characters to be removed. If
omitted or None, the ``chars`` argument defaults to removing whitespace. The ``chars``
argument is not a suffix; rather, all combinations of its values are stripped::
>>>
>>> rstrip(' spacious ')
' spacious'
>>> rstrip('mississippi','ipz')
'mississ'
:param text: The string to copy
:type text: ``str``
:param chars: The trailing characters to remove
:type chars: ``str``
:return: A copy of ``text`` with the trailing characters removed.
:rtype: ``str``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.rstrip(chars)
pass
#mark -
#mark Search Functions
def count_str(text, sub, start=None, end=None):
"""
Computes the number of non-overlapping occurrences of substring ``sub`` in ``text[start:end]``.
Optional arguments start and end are interpreted as in slice notation.
:param text: The string to search
:type text: ``str``
:param sub: The substring to count
:type sub: ``str``
:param start: The start of the search range
:type start: ``int``
:param end: The end of the search range
:type end: ``int``
:return: The number of non-overlapping occurrences of substring ``sub`` in ``text[start:end]``.
:rtype: ``int``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.count(sub,start,end)
def endswith_str(text, suffix, start=None, end=None):
"""
Determines if ``text`` ends with the specified suffix.
The suffix can also be a tuple of suffixes to look for. With optional parameter ``start``,
the test will begin at that position. With optional parameter ``end``, the test will
stop comparing at that position.
:param text: The string to search
:type text: ``str``
:param suffix: The suffix to search for
:type suffix: ``str`` or ``tuple`` of ``str``
:param start: The start of the search range
:type start: ``int``
:param end: The end of the search range
:type end: ``int``
:return: True if ``text`` ends with the specified suffix, otherwise return False.
:rtype: ``int``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.endswith(suffix,start,end)
def startswith_str(text, prefix, start=None, end=None):
"""
Determines if ``text`` starts with the specified prefix.
The prefix can also be a tuple of prefixes to look for. With optional parameter ``start``,
the test will begin at that position. With optional parameter ``end``, the test will
stop comparing at that position.
:param text: The string to search
:type text: ``str``
:param prefix: The prefix to search for
:type prefix: ``str`` or ``tuple`` of ``str``
:param start: The start of the search range
:type start: ``int``
:param end: The end of the search range
:type end: ``int``
:return: True if ``text`` starts with the specified prefix, otherwise return False.
:rtype: ``int``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.startswith(prefix,start,end)
def find_str(text, sub, start=None, end=None):
"""
Finds the lowest index of the substring ``sub`` within ``text`` in the range [``start``, ``end``].
Optional arguments ``start`` and ``end`` are interpreted as in slice notation. However,
the index returned is relative to the original string ``text`` and not the slice
``text[start:end]``. The function returns -1 if ``sub`` is not found.
**Note:** The ``find_str()`` function should be used only if you need to know the position
of ``sub``. To check if ``sub`` is a substring or not, use the in operator::
>>>
>>> 'Py' in 'Python'
True
:param text: The string to search
:type text: ``str``
:param sub: The substring to search for
:type sub: ``str``
:param start: The start of the search range
:type start: ``int``
:param end: The end of the search range
:type end: ``int``
:return: The lowest index of the substring ``sub`` within ``text`` in the range [``start``, ``end``].
:rtype: ``int``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.find(sub,start,end)
def index_str(text, sub, start=None, end=None):
"""
Finds the lowest index of the substring ``sub`` within ``text`` in the range [``start``, ``end``].
Optional arguments ``start`` and ``end`` are interpreted as in slice notation. However,
the index returned is relative to the original string ``text`` and not the slice
``text[start:end]``.
This function is like :func:`find_str`, except that it raises a ``ValueError`` when the
substring is not found.
:param text: The string to search
:type text: ``str``
:param sub: The substring to search for
:type sub: ``str``
:param start: The start of the search range
:type start: ``int``
:param end: The end of the search range
:type end: ``int``
:return: The lowest index of the substring ``sub`` within ``text`` in the range [``start``, ``end``].
:rtype: ``int``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.index(sub,start,end)
def rfind_str(text, sub, start=None, end=None):
"""
Finds the highest index of the substring ``sub`` within ``text`` in the range [``start``, ``end``].
Optional arguments ``start`` and ``end`` are interpreted as in slice notation. However,
the index returned is relative to the original string ``text`` and not the slice
``text[start:end]``. The function returns -1 if ``sub`` is not found.
:param text: The string to search
:type text: ``str``
:param sub: The substring to search for
:type sub: ``str``
:param start: The start of the search range
:type start: ``int``
:param end: The end of the search range
:type end: ``int``
:return: The highest index of the substring ``sub`` within ``text`` in the range [``start``, ``end``].
:rtype: ``int``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.rfind(sub,start,end)
def rindex_str(text, sub, start=None, end=None):
"""
Finds the highest index of the substring ``sub`` within ``text`` in the range [``start``, ``end``].
Optional arguments ``start`` and ``end`` are interpreted as in slice notation. However,
the index returned is relative to the original string ``text`` and not the slice
``text[start:end]``.
This function is like :func:`rfind_str`, except that it raises a ``ValueError`` when the
substring is not found.
:param text: The string to search
:type text: ``str``
:param sub: The substring to search for
:type sub: ``str``
:param start: The start of the search range
:type start: ``int``
:param end: The end of the search range
:type end: ``int``
:return: The highest index of the substring ``sub`` within ``text`` in the range [``start``, ``end``].
:rtype: ``int``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.rindex(sub,start,end)
def replace_str(text, old, new, count=-1):
"""
Creates a copy of ``text`` with all occurrences of substring ``old`` replaced by ``new``.
If the optional argument ``count`` is given, only the first count occurrences are replaced.
:param text: The string to copy
:type text: ``str``
:param old: The old string to replace
:type old: ``str``
:param new: The new string to replace with
:type new: ``str``
:param count: The number of occurrences to replace
:type count: ``int``
:return: A copy of ``text`` with all occurrences of substring ``old`` replaced by ``new``.
:rtype: ``str``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.replace(old,new,count)
pass
#mark -
#mark Split and Join Functions
def join(iterable,sep=''):
"""
Creates a string by concatenating the strings in ``iterable``
A TypeError will be raised if there are any non-string values in iterable, including
bytes objects. The optional separator is placed between the elements, but by default
there is no separator.
:param iterable: The iterable of strings to concatenate
:type iterable: ``iterable``
:param sep: The separating string
:type sep: ``str``
:return: A string which is the concatenation of the strings in iterable.
:rtype: ``str``
"""
assert isinstance(sep,str), '%s is not a string' % sep
return sep.join(iterable)
def split(text, sep=None, maxsplit=-1):
"""
Creates a tuple of the words in ``text``, using ``sep`` as the delimiter string.
If ``maxsplit`` is given, at most maxsplit splits are done (thus, the tuple will have at
most maxsplit+1 elements). If ``maxsplit`` is not specified or -1, then there is no
limit on the number of splits (all possible splits are made).
If ``sep`` is given, consecutive delimiters are not grouped together and are deemed to
delimit empty strings (for example, ``split('1,,2',',') returns ('1', '', '2')``). The
``sep`` argument may consist of multiple characters (for example,
``split('1<>2<>3','<>')`` returns ``('1', '2', '3')``). Splitting an empty string with
a specified separator returns ``('',)``.
For example::
>>>
>>> split('1,2,3',',')
('1', '2', '3')
>>> split('1,2,3',',', maxsplit=1)
('1', '2,3')
>>> split('1,2,,3,',',')
('1', '2', '', '3', '')
If ``sep`` is not specified or is None, a different splitting algorithm is applied. In
that case runs of consecutive whitespace are regarded as a single separator, and the
result will contain no empty strings at the start or end if the string has leading
or trailing whitespace. Consequently, splitting an empty string or a string
consisting of just whitespace with a None separator returns [].
For example::
>>>
>>> split('1 2 3')
('1', '2', '3')
>>> split('1 2 3',maxsplit=1)
('1', '2 3')
>>> split(' 1 2 3 ')
('1', '2', '3')
:param text: The string to split
:type text: ``str``
:param sep: The separator to split at
:type sep: ``str``
:param maxsplit: The maximum number of splits to perform
:type maxsplit: ``int``
:return: A list of the words in ``text``, using ``sep`` as the delimiter string.
:rtype: ``str``
"""
assert isinstance(text,str), '%s is not a string' % text
return tuple(text.split(sep,maxsplit))
def rsplit(text, sep=None, maxsplit=-1):
"""
Creates a tuple of the words in ``text``, using ``sep`` as the delimiter string.
If ``maxsplit`` is given, at most maxsplit splits are done (thus, the tuple will have at
most maxsplit+1 elements). If ``maxsplit`` is not specified or -1, then there is no
limit on the number of splits (all possible splits are made).
If ``sep`` is given, consecutive delimiters are not grouped together and are deemed to
delimit empty strings (for example, ``rsplit('1,,2',',')`` returns ``('1', '', '2')``).
The ``sep`` argument may consist of multiple characters (for example,
``rsplit('1<>2<>3','<>')`` returns ``('1', '2', '3')``). Splitting an empty string
with a specified separator returns ``('',)``.
This function only differs from :func:`split` if ``maxsplit`` is given and is less than
the possible number of splits. In that case, the splits are favored to the right,
and so the remainder is to the left.
:param text: The string to split
:type text: ``str``
:param sep: The separator to split at
:type sep: ``str``
:param maxsplit: The maximum number of splits to perform
:type maxsplit: ``int``
:return: A list of the words in ``text``, using ``sep`` as the delimiter string.
:rtype: ``str``
"""
assert isinstance(text,str), '%s is not a string' % text
return tuple(text.rsplit(sep,maxsplit))
def partition(text, sep):
"""
Splits ``text`` at the first occurrence of ``sep``, returning the result as 3-tuple.
If the separator is not found, this function returns a 3-tuple containing the
string itself, followed by two empty strings.
:return: a 3-tuple containing the part before the separator, the separator itself, and the part after the separator.
:rtype: ``tuple`` of ``str``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.partition(sep)
def rpartition(text, sep):
"""
Splits ``text`` at the last occurrence of ``sep``, returning the result as 3-tuple.
If the separator is not found, this function a 3-tuple containing two empty strings,
followed by the string itself.
:return: a 3-tuple containing the part before the separator, the separator itself, and the part after the separator.
:rtype: ``tuple`` of ``str``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.rpartition(sep)
| 34.448187
| 127
| 0.633113
| 3,719
| 26,594
| 4.523797
| 0.111051
| 0.024964
| 0.012839
| 0.017118
| 0.724144
| 0.720756
| 0.714337
| 0.700487
| 0.686044
| 0.67267
| 0
| 0.005439
| 0.253365
| 26,594
| 771
| 128
| 34.492866
| 0.841861
| 0.748327
| 0
| 0.343434
| 0
| 0
| 0.136075
| 0
| 0
| 0
| 0
| 0
| 0.323232
| 1
| 0.323232
| false
| 0.030303
| 0
| 0
| 0.646465
| 0.020202
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
48e7d341d8f003cb9f133e36811adcf1acdfa459
| 2,469
|
py
|
Python
|
Misc/Incomplete/Data_getter.py
|
BadenLab/2Panalysis
|
8b23b3eaa1700f4fb5729eb7e89bfd55e7b4811a
|
[
"MIT"
] | null | null | null |
Misc/Incomplete/Data_getter.py
|
BadenLab/2Panalysis
|
8b23b3eaa1700f4fb5729eb7e89bfd55e7b4811a
|
[
"MIT"
] | null | null | null |
Misc/Incomplete/Data_getter.py
|
BadenLab/2Panalysis
|
8b23b3eaa1700f4fb5729eb7e89bfd55e7b4811a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 13 17:52:00 2021
@author: SimenLab
"""
import pandas as pd
def Data_getter(file_location):
"""A function which gets and prepares data from CSV files, as well as
returning some additional params like an number of ROIs and their
corresponding labels.
Parameters
---------
File:
The directory of a CSV .txt file containing the data, with each ROI
represented as individual columns in the file.
Returns
-------
Stimulus DataFrame, stimulus array (as an alternative), number of
ROIs, and their labels.
"""
file = file_location
avgs_data = pd.read_csv((file), sep="\t", header=None, engine="python")
##Label data
ROI_num = avgs_data.shape[1]
# Consider making data loading its own function?
"Can put optional manipulations to data here"
# pd.read_csv((file), sep="\t", header=None) #, names = labels)
averages_dataframe = avgs_data
avgerages_array = pd.DataFrame.to_numpy(averages_dataframe)
return averages_dataframe, avgerages_array, ROI_num
class Data:
def Data_getter(self):
"""A function which gets and prepares data from CSV files, as well as
returning some additional params like an number of ROIs and their
corresponding labels.
Parameters
---------
File:
The directory of a CSV .txt file containing the data, with each ROI
represented as individual columns in the file.
Returns
-------
Stimulus DataFrame, stimulus array (as an alternative), number of
ROIs, and their labels.
"""
file = self.file_location
avgs_data = pd.read_csv((file), sep="\t", header=None, engine="python")
##Label data
ROI_num = avgs_data.shape[1]
# Consider making data loading its own function?
"Can put optional manipulations to data here"
# pd.read_csv((file), sep="\t", header=None) #, names = labels)
averages_dataframe = avgs_data
avgerages_array = pd.DataFrame.to_numpy(averages_dataframe)
return averages_dataframe, avgerages_array, ROI_num
Retrieved_data = Data_getter(self.storage_info)
self.data = Retrieved_data[0]
self.ROI_num = Retrieved_data[2]
| 32.92
| 83
| 0.611989
| 304
| 2,469
| 4.848684
| 0.319079
| 0.032564
| 0.032564
| 0.040706
| 0.860244
| 0.860244
| 0.860244
| 0.860244
| 0.860244
| 0.860244
| 0
| 0.009861
| 0.301742
| 2,469
| 75
| 84
| 32.92
| 0.845128
| 0.463751
| 0
| 0.571429
| 0
| 0
| 0.094532
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.047619
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d2cea63bb88f4bd5d66b3b760df7373eae60582c
| 11,664
|
py
|
Python
|
tensorflow_graphics/math/tests/vector_test.py
|
BachiLi/graphics
|
0a9b20dbca6c1fc2f67bedfcdd100e732ede0362
|
[
"Apache-2.0"
] | 1
|
2019-10-10T06:16:42.000Z
|
2019-10-10T06:16:42.000Z
|
tensorflow_graphics/math/tests/vector_test.py
|
mwtarnowski/graphics
|
228472e31327635e80220a740f13d672252bec9f
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_graphics/math/tests/vector_test.py
|
mwtarnowski/graphics
|
228472e31327635e80220a740f13d672252bec9f
|
[
"Apache-2.0"
] | 1
|
2019-10-10T06:16:30.000Z
|
2019-10-10T06:16:30.000Z
|
#Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for vector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import flagsaver
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.geometry.transformation.tests import test_data as td
from tensorflow_graphics.math import vector
from tensorflow_graphics.util import test_case
class VectorTest(test_case.TestCase):
@parameterized.parameters(
((None, 3), (None, 3)),)
def test_cross_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(vector.cross, shapes)
@parameterized.parameters(
("must have exactly 3 dimensions in axis", (1,), (3,)),
("must have exactly 3 dimensions in axis", (3,), (2,)),
("Not all batch dimensions are broadcast-compatible.", (2, 3), (3, 3)),
)
def test_cross_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(vector.cross, error_msg, shapes)
@parameterized.parameters(
(td.AXIS_3D_0, td.AXIS_3D_0),
(td.AXIS_3D_0, td.AXIS_3D_X),
(td.AXIS_3D_0, td.AXIS_3D_Y),
(td.AXIS_3D_0, td.AXIS_3D_Z),
(td.AXIS_3D_X, td.AXIS_3D_X),
(td.AXIS_3D_X, td.AXIS_3D_Y),
(td.AXIS_3D_X, td.AXIS_3D_Z),
(td.AXIS_3D_Y, td.AXIS_3D_X),
(td.AXIS_3D_Y, td.AXIS_3D_Y),
(td.AXIS_3D_Y, td.AXIS_3D_Z),
(td.AXIS_3D_Z, td.AXIS_3D_X),
(td.AXIS_3D_Z, td.AXIS_3D_Y),
(td.AXIS_3D_Z, td.AXIS_3D_Z),
)
def test_cross_jacobian_preset(self, u_init, v_init):
"""Tests the Jacobian of the dot product."""
u_tensor = tf.convert_to_tensor(value=u_init)
v_tensor = tf.convert_to_tensor(value=v_init)
y = vector.cross(u_tensor, v_tensor)
self.assert_jacobian_is_correct(u_tensor, u_init, y)
self.assert_jacobian_is_correct(v_tensor, v_init, y)
def test_cross_jacobian_random(self):
"""Test the Jacobian of the dot product."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
u_init = np.random.random(size=tensor_shape + [3])
v_init = np.random.random(size=tensor_shape + [3])
u_tensor = tf.convert_to_tensor(value=u_init)
v_tensor = tf.convert_to_tensor(value=v_init)
y = vector.cross(u_tensor, v_tensor)
self.assert_jacobian_is_correct(u_tensor, u_init, y)
self.assert_jacobian_is_correct(v_tensor, v_init, y)
@parameterized.parameters(
((td.AXIS_3D_0, td.AXIS_3D_0), (td.AXIS_3D_0,)),
((td.AXIS_3D_0, td.AXIS_3D_X), (td.AXIS_3D_0,)),
((td.AXIS_3D_0, td.AXIS_3D_Y), (td.AXIS_3D_0,)),
((td.AXIS_3D_0, td.AXIS_3D_Z), (td.AXIS_3D_0,)),
((td.AXIS_3D_X, td.AXIS_3D_X), (td.AXIS_3D_0,)),
((td.AXIS_3D_X, td.AXIS_3D_Y), (td.AXIS_3D_Z,)),
((td.AXIS_3D_X, td.AXIS_3D_Z), (-td.AXIS_3D_Y,)),
((td.AXIS_3D_Y, td.AXIS_3D_X), (-td.AXIS_3D_Z,)),
((td.AXIS_3D_Y, td.AXIS_3D_Y), (td.AXIS_3D_0,)),
((td.AXIS_3D_Y, td.AXIS_3D_Z), (td.AXIS_3D_X,)),
((td.AXIS_3D_Z, td.AXIS_3D_X), (td.AXIS_3D_Y,)),
((td.AXIS_3D_Z, td.AXIS_3D_Y), (-td.AXIS_3D_X,)),
((td.AXIS_3D_Z, td.AXIS_3D_Z), (td.AXIS_3D_0,)),
)
def test_cross_preset(self, test_inputs, test_outputs):
"""Tests the cross product of predefined axes."""
self.assert_output_is_correct(vector.cross, test_inputs, test_outputs)
def test_cross_random(self):
"""Tests the cross product function."""
tensor_size = np.random.randint(1, 4)
tensor_shape = np.random.randint(1, 10, size=tensor_size).tolist()
axis = np.random.randint(tensor_size)
tensor_shape[axis] = 3
u = np.random.random(size=tensor_shape)
v = np.random.random(size=tensor_shape)
self.assertAllClose(
vector.cross(u, v, axis=axis), np.cross(u, v, axis=axis))
@parameterized.parameters(
((None,), (None,)),
((None, None), (None, None)),
)
def test_dot_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(vector.dot, shapes)
@parameterized.parameters(
("must have the same number of dimensions", (None, 1), (None, 2)),
("Not all batch dimensions are broadcast-compatible.", (2, 3), (3, 3)),
)
def test_dot_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(vector.dot, error_msg, shapes)
@parameterized.parameters(
(td.AXIS_3D_0, td.AXIS_3D_0),
(td.AXIS_3D_0, td.AXIS_3D_X),
(td.AXIS_3D_0, td.AXIS_3D_Y),
(td.AXIS_3D_0, td.AXIS_3D_Z),
(td.AXIS_3D_X, td.AXIS_3D_X),
(td.AXIS_3D_X, td.AXIS_3D_Y),
(td.AXIS_3D_X, td.AXIS_3D_Z),
(td.AXIS_3D_Y, td.AXIS_3D_X),
(td.AXIS_3D_Y, td.AXIS_3D_Y),
(td.AXIS_3D_Y, td.AXIS_3D_Z),
(td.AXIS_3D_Z, td.AXIS_3D_X),
(td.AXIS_3D_Z, td.AXIS_3D_Y),
(td.AXIS_3D_Z, td.AXIS_3D_Z),
)
def test_dot_jacobian_preset(self, u_init, v_init):
"""Tests the Jacobian of the dot product."""
u_tensor = tf.convert_to_tensor(value=u_init)
v_tensor = tf.convert_to_tensor(value=v_init)
y = vector.dot(u_tensor, v_tensor)
self.assert_jacobian_is_correct(u_tensor, u_init, y)
self.assert_jacobian_is_correct(v_tensor, v_init, y)
def test_dot_jacobian_random(self):
"""Tests the Jacobian of the dot product."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
u_init = np.random.random(size=tensor_shape + [3])
v_init = np.random.random(size=tensor_shape + [3])
u_tensor = tf.convert_to_tensor(value=u_init)
v_tensor = tf.convert_to_tensor(value=v_init)
y = vector.dot(u_tensor, v_tensor)
self.assert_jacobian_is_correct(u_tensor, u_init, y)
self.assert_jacobian_is_correct(v_tensor, v_init, y)
@parameterized.parameters(
((td.AXIS_3D_0, td.AXIS_3D_0), (0.,)),
((td.AXIS_3D_0, td.AXIS_3D_X), (0.,)),
((td.AXIS_3D_0, td.AXIS_3D_Y), (0.,)),
((td.AXIS_3D_0, td.AXIS_3D_Z), (0.,)),
((td.AXIS_3D_X, td.AXIS_3D_X), (1.,)),
((td.AXIS_3D_X, td.AXIS_3D_Y), (0.,)),
((td.AXIS_3D_X, td.AXIS_3D_Z), (0.,)),
((td.AXIS_3D_Y, td.AXIS_3D_X), (0.,)),
((td.AXIS_3D_Y, td.AXIS_3D_Y), (1.,)),
((td.AXIS_3D_Y, td.AXIS_3D_Z), (0.,)),
((td.AXIS_3D_Z, td.AXIS_3D_X), (0.,)),
((td.AXIS_3D_Z, td.AXIS_3D_Y), (0.,)),
((td.AXIS_3D_Z, td.AXIS_3D_Z), (1.,)),
)
def test_dot_preset(self, test_inputs, test_outputs):
"""Tests the dot product of predefined axes."""
def func(u, v):
return tf.squeeze(vector.dot(u, v), axis=-1)
self.assert_output_is_correct(func, test_inputs, test_outputs)
def test_dot_random(self):
"""Tests the dot product function."""
tensor_size = np.random.randint(2, 4)
tensor_shape = np.random.randint(1, 10, size=tensor_size).tolist()
axis = np.random.randint(tensor_size)
u = np.random.random(size=tensor_shape)
v = np.random.random(size=tensor_shape)
dot = tf.linalg.tensor_diag_part(tf.tensordot(u, v, axes=[[axis], [axis]]))
dot = tf.expand_dims(dot, axis=axis)
self.assertAllClose(vector.dot(u, v, axis=axis), dot)
@parameterized.parameters(
((None,), (None,)),
((None, None), (None, None)),
((1,), (1,)),
((1, 1), (1, 1)),
)
def test_reflect_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(vector.reflect, shapes)
@parameterized.parameters(
("must have the same number of dimensions", (None, 1), (None, 2)),
("Not all batch dimensions are broadcast-compatible.", (2, 2), (3, 2)),
)
def test_reflect_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(vector.reflect, error_msg, shapes)
@parameterized.parameters(
(td.AXIS_3D_0, td.AXIS_3D_0),
(td.AXIS_3D_0, td.AXIS_3D_X),
(td.AXIS_3D_0, td.AXIS_3D_Y),
(td.AXIS_3D_0, td.AXIS_3D_Z),
(td.AXIS_3D_X, td.AXIS_3D_X),
(td.AXIS_3D_X, td.AXIS_3D_Y),
(td.AXIS_3D_X, td.AXIS_3D_Z),
(td.AXIS_3D_Y, td.AXIS_3D_X),
(td.AXIS_3D_Y, td.AXIS_3D_Y),
(td.AXIS_3D_Y, td.AXIS_3D_Z),
(td.AXIS_3D_Z, td.AXIS_3D_X),
(td.AXIS_3D_Z, td.AXIS_3D_Y),
(td.AXIS_3D_Z, td.AXIS_3D_Z),
)
@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)
def test_reflect_jacobian_preset(self, u_init, v_init):
"""Tests the Jacobian of the reflect function."""
u_tensor = tf.convert_to_tensor(value=u_init)
v_tensor = tf.convert_to_tensor(value=v_init)
y = vector.reflect(u_tensor, v_tensor)
self.assert_jacobian_is_correct(u_tensor, u_init, y)
self.assert_jacobian_is_correct(v_tensor, v_init, y)
@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)
def test_reflect_jacobian_random(self):
"""Tests the Jacobian of the reflect function."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
u_init = np.random.random(size=tensor_shape + [3])
v_init = np.random.random(size=tensor_shape + [3])
u_tensor = tf.convert_to_tensor(value=u_init)
v_tensor = tf.convert_to_tensor(value=v_init)
y = vector.reflect(u_tensor, v_tensor)
self.assert_jacobian_is_correct(u_tensor, u_init, y)
self.assert_jacobian_is_correct(v_tensor, v_init, y)
@parameterized.parameters(
((td.AXIS_3D_0, td.AXIS_3D_X), (td.AXIS_3D_0,)),
((td.AXIS_3D_0, td.AXIS_3D_Y), (td.AXIS_3D_0,)),
((td.AXIS_3D_0, td.AXIS_3D_Z), (td.AXIS_3D_0,)),
((td.AXIS_3D_X, td.AXIS_3D_X), (-td.AXIS_3D_X,)),
((td.AXIS_3D_X, td.AXIS_3D_Y), (td.AXIS_3D_X,)),
((td.AXIS_3D_X, td.AXIS_3D_Z), (td.AXIS_3D_X,)),
((td.AXIS_3D_Y, td.AXIS_3D_X), (td.AXIS_3D_Y,)),
((td.AXIS_3D_Y, td.AXIS_3D_Y), (-td.AXIS_3D_Y,)),
((td.AXIS_3D_Y, td.AXIS_3D_Z), (td.AXIS_3D_Y,)),
((td.AXIS_3D_Z, td.AXIS_3D_X), (td.AXIS_3D_Z,)),
((td.AXIS_3D_Z, td.AXIS_3D_Y), (td.AXIS_3D_Z,)),
((td.AXIS_3D_Z, td.AXIS_3D_Z), (-td.AXIS_3D_Z,)),
)
def test_reflect_preset(self, test_inputs, test_outputs):
"""Tests the reflect function of predefined axes."""
self.assert_output_is_correct(vector.reflect, test_inputs, test_outputs)
def test_reflect_random(self):
"""Tests that calling reflect twice give an identity transform."""
tensor_size = np.random.randint(2, 4)
tensor_shape = np.random.randint(2, 3, size=tensor_size).tolist()
axis = np.random.randint(tensor_size)
u = np.random.random(size=tensor_shape)
v = np.random.random(size=tensor_shape)
v /= np.linalg.norm(v, axis=axis, keepdims=True)
u_new = vector.reflect(u, v, axis=axis)
u_new = vector.reflect(u_new, v, axis=axis)
self.assertAllClose(u_new, u)
if __name__ == "__main__":
test_case.main()
| 38.622517
| 79
| 0.680813
| 1,964
| 11,664
| 3.703666
| 0.089104
| 0.147649
| 0.196866
| 0.058152
| 0.801072
| 0.777152
| 0.765604
| 0.74512
| 0.690129
| 0.664146
| 0
| 0.031214
| 0.170525
| 11,664
| 301
| 80
| 38.750831
| 0.72062
| 0.117541
| 0
| 0.542222
| 0
| 0
| 0.030633
| 0.006186
| 0
| 0
| 0
| 0
| 0.115556
| 1
| 0.084444
| false
| 0
| 0.044444
| 0.004444
| 0.137778
| 0.004444
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8250a21c28442004b55f8b2b6a8fe0c12d9f3bb7
| 1,145
|
py
|
Python
|
src/nlpia/book/forum/example_sparse_matmul.py
|
byukan/nlpia
|
73c03f651e54e945f9a7eebe4714095dc3e5609a
|
[
"MIT"
] | 532
|
2016-11-30T03:51:13.000Z
|
2022-03-29T13:40:43.000Z
|
src/nlpia/book/forum/example_sparse_matmul.py
|
byukan/nlpia
|
73c03f651e54e945f9a7eebe4714095dc3e5609a
|
[
"MIT"
] | 30
|
2017-12-12T12:18:41.000Z
|
2022-03-23T14:44:45.000Z
|
src/nlpia/book/forum/example_sparse_matmul.py
|
byukan/nlpia
|
73c03f651e54e945f9a7eebe4714095dc3e5609a
|
[
"MIT"
] | 241
|
2017-06-20T11:51:31.000Z
|
2022-03-28T09:42:38.000Z
|
"""
>>> import numpy as np
>>> import scipy
>>> id(scipy.dot) == id(np.dot)
True
>>> A = scipy.sparse.csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> v = scipy.sparse.csr_matrix([[1], [0], [-1]])
>>> A.dot(v)
<3x1 sparse matrix of type '<class 'numpy.int64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> scipy.dot(A, v)
<3x1 sparse matrix of type '<class 'numpy.int64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> np.dot(A, v)
<3x1 sparse matrix of type '<class 'numpy.int64'>'
with 3 stored elements in Compressed Sparse Row format>
"""
import numpy as np
import scipy
id(scipy.dot) == id(np.dot)
# True
A = scipy.sparse.csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
v = scipy.sparse.csr_matrix([[1], [0], [-1]])
A.dot(v)
# <3x1 sparse matrix of type '<class 'numpy.int64'>'
# with 3 stored elements in Compressed Sparse Row format>
scipy.dot(A, v)
# <3x1 sparse matrix of type '<class 'numpy.int64'>'
# with 3 stored elements in Compressed Sparse Row format>
np.dot(A, v)
# <3x1 sparse matrix of type '<class 'numpy.int64'>'
# with 3 stored elements in Compressed Sparse Row format>
| 33.676471
| 66
| 0.648035
| 194
| 1,145
| 3.804124
| 0.159794
| 0.03252
| 0.081301
| 0.130081
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0.057143
| 0.174672
| 1,145
| 33
| 67
| 34.69697
| 0.72381
| 0.799127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8288c3a3879468a2ceee0f8e37ad9fca46aac1c3
| 4,748
|
py
|
Python
|
assignement_eight_2021_09_28/test_assignment_ten.py
|
Soyvolon/CISS_380
|
81ba41ef45ba8f4a4cfc55f9e20b87c5feddba08
|
[
"Unlicense"
] | null | null | null |
assignement_eight_2021_09_28/test_assignment_ten.py
|
Soyvolon/CISS_380
|
81ba41ef45ba8f4a4cfc55f9e20b87c5feddba08
|
[
"Unlicense"
] | null | null | null |
assignement_eight_2021_09_28/test_assignment_ten.py
|
Soyvolon/CISS_380
|
81ba41ef45ba8f4a4cfc55f9e20b87c5feddba08
|
[
"Unlicense"
] | null | null | null |
import unittest
from . import assignment_ten_2021_10_05 as a10
class TestIntArray(unittest.TestCase):
def _build_int_array(self, lyst):
pass
def test_addition(self):
test = [([1, 2, 3], [3, 2, 1]),
([0, 3], [1, 2, 3]),
([], [1, 2, 3])]
res = [[4, 4, 4],
[1, 5, 3],
[1, 2, 3]]
for i in range(0, len(test)):
a1 = a10.IntArray(len(test[i][0]))
a2 = a10.IntArray(len(test[i][1]))
for x in range(0, len(test[i][0])):
a1.insert(x, test[i][0][x])
for x in range(0, len(test[i][1])):
a2.insert(x, test[i][1][x])
output = a1 + a2
for x in range(0, len(output)):
self.assertEqual(res[i][x], output[x])
def test_non_int_fail_add(self):
with self.assertRaises(ValueError):
a1 = a10.IntArray(2)
a1 + "hi"
def test_subtraction(self):
test = [([1, 2, 3], [3, 2, 1]),
([0, 3], [1, 2, 3]),
([], [1, 2, 3])]
res = [[-2, 0, 2],
[-1, 1, -3],
[-1, -2, -3]]
for i in range(0, len(test)):
a1 = a10.IntArray(len(test[i][0]))
a2 = a10.IntArray(len(test[i][1]))
for x in range(0, len(test[i][0])):
a1.insert(x, test[i][0][x])
for x in range(0, len(test[i][1])):
a2.insert(x, test[i][1][x])
output = a1 - a2
for x in range(0, len(output)):
self.assertEqual(res[i][x], output[x])
def test_non_int_fail_sub(self):
with self.assertRaises(ValueError):
a1 = a10.IntArray(2)
a1 - "hi"
def test_mul(self):
test = [([1, 2, 3], 2),
([0, 3], -1),
([5, 25], 5)]
res = [[2, 4, 6],
[0, -3],
[25, 125]]
for i in range(0, len(test)):
a1 = a10.IntArray(len(test[i][0]))
for x in range(0, len(test[i][0])):
a1.insert(x, test[i][0][x])
output = a1 * test[i][1]
for x in range(0, len(output)):
self.assertEqual(res[i][x], output[x])
def test_non_int_fail_mul(self):
with self.assertRaises(ValueError):
a1 = a10.IntArray(2)
a1 * "hi"
def test_truediv(self):
test = [([1, 2, 3], 2),
([0, 3], -1),
([5, 25], 5)]
res = [[0, 1, 1],
[0, -3],
[1, 5]]
for i in range(0, len(test)):
a1 = a10.IntArray(len(test[i][0]))
for x in range(0, len(test[i][0])):
a1.insert(x, test[i][0][x])
output = a1 / test[i][1]
for x in range(0, len(output)):
self.assertEqual(res[i][x], output[x])
def test_non_int_fail_truediv(self):
with self.assertRaises(ValueError):
a1 = a10.IntArray(2)
a1 / "hi"
def test_floordiv(self):
test = [([1, 2, 3], 2),
([0, 3], -1),
([5, 25], 5)]
res = [[0, 1, 1],
[0, -3],
[1, 5]]
for i in range(0, len(test)):
a1 = a10.IntArray(len(test[i][0]))
for x in range(0, len(test[i][0])):
a1.insert(x, test[i][0][x])
output = a1 // test[i][1]
for x in range(0, len(output)):
self.assertEqual(res[i][x], output[x])
def test_non_int_fail_floordiv(self):
with self.assertRaises(ValueError):
a1 = a10.IntArray(2)
a1 // "hi"
def test_exp(self):
test = [([1, 2, 3], 2),
([5, 25], 5)]
res = [[1, 4, 9],
[3125, 9765625]]
for i in range(0, len(test)):
a1 = a10.IntArray(len(test[i][0]))
for x in range(0, len(test[i][0])):
a1.insert(x, test[i][0][x])
output = a1 ** test[i][1]
for x in range(0, len(output)):
self.assertEqual(res[i][x], output[x])
def test_non_int_fail_exp(self):
with self.assertRaises(ValueError):
a1 = a10.IntArray(2)
a1 ** "hi"
def test_non_int_res_fail_exp(self):
with self.assertRaises(ValueError):
a1 = a10.IntArray(2)
a1.insert(0, 1)
a1.insert(1, 2)
a1 ** -1
def test_non_int_fail_init(self):
with self.assertRaises(ValueError):
a10.IntArray(2, None)
if __name__ == "__main__":
unittest.main()
| 29.675
| 54
| 0.4246
| 651
| 4,748
| 3.013825
| 0.099846
| 0.071356
| 0.081549
| 0.11213
| 0.849643
| 0.823649
| 0.817533
| 0.817533
| 0.817533
| 0.817533
| 0
| 0.094599
| 0.403328
| 4,748
| 160
| 55
| 29.675
| 0.597953
| 0
| 0
| 0.632813
| 0
| 0
| 0.004211
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 1
| 0.117188
| false
| 0.007813
| 0.015625
| 0
| 0.140625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7d5cc769d6b1a65666220d8e2acbcc55b05d2b43
| 23
|
py
|
Python
|
ped/__init__.py
|
khalidm/ped
|
23ba61040fee17d39785aade6707490578d53dfd
|
[
"BSD-2-Clause"
] | null | null | null |
ped/__init__.py
|
khalidm/ped
|
23ba61040fee17d39785aade6707490578d53dfd
|
[
"BSD-2-Clause"
] | null | null | null |
ped/__init__.py
|
khalidm/ped
|
23ba61040fee17d39785aade6707490578d53dfd
|
[
"BSD-2-Clause"
] | null | null | null |
from .core import main
| 23
| 23
| 0.782609
| 4
| 23
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7d6a73104f1052e4f823f69cb2b5761c2662531c
| 166
|
py
|
Python
|
pattern6/smart_home_v4/src/commnad/NoneCommand.py
|
icexmoon/design-pattern-with-python
|
bb897e886fe52bb620db0edc6ad9d2e5ecb067af
|
[
"MIT"
] | null | null | null |
pattern6/smart_home_v4/src/commnad/NoneCommand.py
|
icexmoon/design-pattern-with-python
|
bb897e886fe52bb620db0edc6ad9d2e5ecb067af
|
[
"MIT"
] | null | null | null |
pattern6/smart_home_v4/src/commnad/NoneCommand.py
|
icexmoon/design-pattern-with-python
|
bb897e886fe52bb620db0edc6ad9d2e5ecb067af
|
[
"MIT"
] | null | null | null |
from smart_home_v4.src.commnad.Command import Command
class NoneCommand(Command):
def execute(self) -> None:
pass
def undo(self) -> None:
pass
| 27.666667
| 53
| 0.668675
| 22
| 166
| 4.954545
| 0.727273
| 0.146789
| 0.220183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007874
| 0.23494
| 166
| 6
| 54
| 27.666667
| 0.850394
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0.166667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
7dc49f9c2eab13dfdb7f6e66c7709caae8133631
| 169
|
py
|
Python
|
apps/twee/models/__init__.py
|
adepeter/pythondailytip
|
8b114b68d417e7631d139f1ee2267f6f0e061cdf
|
[
"MIT"
] | null | null | null |
apps/twee/models/__init__.py
|
adepeter/pythondailytip
|
8b114b68d417e7631d139f1ee2267f6f0e061cdf
|
[
"MIT"
] | null | null | null |
apps/twee/models/__init__.py
|
adepeter/pythondailytip
|
8b114b68d417e7631d139f1ee2267f6f0e061cdf
|
[
"MIT"
] | null | null | null |
from .hash_tag import PythonTipHashTag
from .link import PythonTipLink
from .tip import PythonTip
from .twee_crypt import TweeCrypt
from .user import PythonTipTweetUser
| 169
| 169
| 0.852071
| 22
| 169
| 6.454545
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118343
| 169
| 1
| 169
| 169
| 0.95302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7dca7fa0b14aa339ae1608d9ee69ed724cab3418
| 43,312
|
py
|
Python
|
app/revisioner/tests/e2e/inspected.py
|
getmetamapper/metamapper
|
0b2f67eec03fbf7ece35ff9f58ea9bb2dde4d85f
|
[
"BSD-2-Clause"
] | 53
|
2020-07-01T23:11:59.000Z
|
2022-03-31T19:10:28.000Z
|
app/revisioner/tests/e2e/inspected.py
|
metamapper-io/metamapper
|
376716e72bcaca62f1ec09ca9a13a0346e5502f9
|
[
"BSD-2-Clause"
] | 5
|
2020-11-25T19:48:57.000Z
|
2022-02-27T23:50:18.000Z
|
app/revisioner/tests/e2e/inspected.py
|
metamapper-io/metamapper
|
376716e72bcaca62f1ec09ca9a13a0346e5502f9
|
[
"BSD-2-Clause"
] | 5
|
2020-08-29T16:43:59.000Z
|
2022-01-17T19:05:30.000Z
|
# -*- coding: utf-8 -*-
tables_and_views = [
{
"schema_object_id": 16441,
"table_schema": "app",
"table_object_id": 16442,
"table_name": "customers",
"table_type": "base table",
"properties": {},
"columns": [
{
"column_object_id": "16442/1",
"column_name": "customernumber",
"column_description": None,
"ordinal_position": 1,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": False,
"is_primary": True,
"default_value": ""
},
{
"column_object_id": "16442/2",
"column_name": "customername",
"column_description": None,
"ordinal_position": 2,
"data_type": "character varying",
"max_length": 50,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16442/3",
"column_name": "contactlastname",
"column_description": None,
"ordinal_position": 3,
"data_type": "character varying",
"max_length": 50,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16442/4",
"column_name": "contactfirstname",
"column_description": None,
"ordinal_position": 4,
"data_type": "character varying",
"max_length": 50,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16442/5",
"column_name": "phone",
"column_description": None,
"ordinal_position": 5,
"data_type": "character varying",
"max_length": 50,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16442/6",
"column_name": "addressline1",
"column_description": None,
"ordinal_position": 6,
"data_type": "character varying",
"max_length": 50,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16442/7",
"column_name": "addressline2",
"column_description": None,
"ordinal_position": 7,
"data_type": "character varying",
"max_length": 50,
"numeric_scale": None,
"is_nullable": True,
"is_primary": False,
"default_value": "NULL::character varying"
},
{
"column_object_id": "16442/8",
"column_name": "city",
"column_description": None,
"ordinal_position": 8,
"data_type": "character varying",
"max_length": 50,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16442/9",
"column_name": "state",
"column_description": None,
"ordinal_position": 9,
"data_type": "character varying",
"max_length": 50,
"numeric_scale": None,
"is_nullable": True,
"is_primary": False,
"default_value": "NULL::character varying"
},
{
"column_object_id": "16442/10",
"column_name": "postalcode",
"column_description": None,
"ordinal_position": 10,
"data_type": "character varying",
"max_length": 15,
"numeric_scale": None,
"is_nullable": True,
"is_primary": False,
"default_value": "NULL::character varying"
},
{
"column_object_id": "16442/11",
"column_name": "country",
"column_description": None,
"ordinal_position": 11,
"data_type": "character varying",
"max_length": 50,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16442/12",
"column_name": "salesrepemployeenumber",
"column_description": None,
"ordinal_position": 12,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": True,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16442/13",
"column_name": "creditlimit",
"column_description": None,
"ordinal_position": 13,
"data_type": "numeric",
"max_length": 10,
"numeric_scale": 2,
"is_nullable": True,
"is_primary": False,
"default_value": "NULL::numeric"
}
]
},
{
"schema_object_id": 16441,
"table_schema": "app",
"table_object_id": 16522,
"table_name": "departments",
"table_type": "base table",
"properties": {},
"columns": [
{
"column_object_id": "16522/1",
"column_name": "id",
"column_description": None,
"ordinal_position": 1,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": False,
"is_primary": True,
"default_value": "nextval('app.departments_id_seq'::regclass)"
},
{
"column_object_id": "16522/2",
"column_name": "dept_name",
"column_description": None,
"ordinal_position": 2,
"data_type": "character varying",
"max_length": 40,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
}
]
},
{
"schema_object_id": 16441,
"table_schema": "app",
"table_object_id": 16501,
"table_name": "orderdetails",
"table_type": "base table",
"properties": {},
"columns": [
{
"column_object_id": "16501/1",
"column_name": "ordernumber",
"column_description": None,
"ordinal_position": 1,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": False,
"is_primary": True,
"default_value": ""
},
{
"column_object_id": "16501/2",
"column_name": "productcode",
"column_description": None,
"ordinal_position": 2,
"data_type": "character varying",
"max_length": 15,
"numeric_scale": None,
"is_nullable": False,
"is_primary": True,
"default_value": ""
},
{
"column_object_id": "16501/3",
"column_name": "quantityordered",
"column_description": None,
"ordinal_position": 3,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16501/4",
"column_name": "priceeach",
"column_description": None,
"ordinal_position": 4,
"data_type": "numeric",
"max_length": 10,
"numeric_scale": 2,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16501/5",
"column_name": "orderlinenumber",
"column_description": None,
"ordinal_position": 5,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": False,
"is_primary": False,
"default_value": ""
}
]
},
{
"schema_object_id": 16441,
"table_schema": "app",
"table_object_id": 16465,
"table_name": "orders",
"table_type": "base table",
"properties": {},
"columns": [
{
"column_object_id": "16465/1",
"column_name": "ordernumber",
"column_description": None,
"ordinal_position": 1,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": False,
"is_primary": True,
"default_value": ""
},
{
"column_object_id": "16465/2",
"column_name": "orderdate",
"column_description": None,
"ordinal_position": 2,
"data_type": "date",
"max_length": None,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16465/3",
"column_name": "requireddate",
"column_description": None,
"ordinal_position": 3,
"data_type": "date",
"max_length": None,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16465/4",
"column_name": "shippeddate",
"column_description": None,
"ordinal_position": 4,
"data_type": "date",
"max_length": None,
"numeric_scale": None,
"is_nullable": True,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16465/5",
"column_name": "status",
"column_description": None,
"ordinal_position": 5,
"data_type": "character varying",
"max_length": 15,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16465/6",
"column_name": "comments",
"column_description": None,
"ordinal_position": 6,
"data_type": "text",
"max_length": None,
"numeric_scale": None,
"is_nullable": True,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16465/7",
"column_name": "customernumber",
"column_description": None,
"ordinal_position": 7,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": False,
"is_primary": False,
"default_value": ""
}
]
},
{
"schema_object_id": 16441,
"table_schema": "app",
"table_object_id": 16478,
"table_name": "payments",
"table_type": "base table",
"properties": {},
"columns": [
{
"column_object_id": "16478/1",
"column_name": "customernumber",
"column_description": None,
"ordinal_position": 1,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": False,
"is_primary": True,
"default_value": ""
},
{
"column_object_id": "16478/2",
"column_name": "checknumber",
"column_description": None,
"ordinal_position": 2,
"data_type": "character varying",
"max_length": 50,
"numeric_scale": None,
"is_nullable": False,
"is_primary": True,
"default_value": ""
},
{
"column_object_id": "16478/3",
"column_name": "paymentdate",
"column_description": None,
"ordinal_position": 3,
"data_type": "date",
"max_length": None,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16478/4",
"column_name": "amount",
"column_description": None,
"ordinal_position": 4,
"data_type": "numeric",
"max_length": 10,
"numeric_scale": 2,
"is_nullable": False,
"is_primary": False,
"default_value": ""
}
]
},
{
"schema_object_id": 16441,
"table_schema": "app",
"table_object_id": 16456,
"table_name": "productlines",
"table_type": "base table",
"properties": {},
"columns": [
{
"column_object_id": "16456/1",
"column_name": "productline",
"column_description": None,
"ordinal_position": 1,
"data_type": "character varying",
"max_length": 50,
"numeric_scale": None,
"is_nullable": False,
"is_primary": True,
"default_value": ""
},
{
"column_object_id": "16456/2",
"column_name": "textdescription",
"column_description": None,
"ordinal_position": 2,
"data_type": "character varying",
"max_length": 4000,
"numeric_scale": None,
"is_nullable": True,
"is_primary": False,
"default_value": "NULL::character varying"
},
{
"column_object_id": "16456/3",
"column_name": "htmldescription",
"column_description": None,
"ordinal_position": 3,
"data_type": "text",
"max_length": None,
"numeric_scale": None,
"is_nullable": True,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16456/4",
"column_name": "image",
"column_description": None,
"ordinal_position": 4,
"data_type": "bytea",
"max_length": None,
"numeric_scale": None,
"is_nullable": True,
"is_primary": False,
"default_value": ""
}
]
},
{
"schema_object_id": 16441,
"table_schema": "app",
"table_object_id": 16488,
"table_name": "products",
"table_type": "base table",
"properties": {},
"columns": [
{
"column_object_id": "16488/1",
"column_name": "productcode",
"column_description": None,
"ordinal_position": 1,
"data_type": "character varying",
"max_length": 15,
"numeric_scale": None,
"is_nullable": False,
"is_primary": True,
"default_value": ""
},
{
"column_object_id": "16488/2",
"column_name": "productname",
"column_description": None,
"ordinal_position": 2,
"data_type": "character varying",
"max_length": 70,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16488/3",
"column_name": "productline",
"column_description": None,
"ordinal_position": 3,
"data_type": "character varying",
"max_length": 50,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16488/4",
"column_name": "productscale",
"column_description": None,
"ordinal_position": 4,
"data_type": "character varying",
"max_length": 10,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16488/5",
"column_name": "productvendor",
"column_description": None,
"ordinal_position": 5,
"data_type": "character varying",
"max_length": 50,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16488/6",
"column_name": "productdescription",
"column_description": None,
"ordinal_position": 6,
"data_type": "text",
"max_length": None,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16488/7",
"column_name": "quantityinstock",
"column_description": None,
"ordinal_position": 7,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16488/8",
"column_name": "buyprice",
"column_description": None,
"ordinal_position": 8,
"data_type": "numeric",
"max_length": 10,
"numeric_scale": 2,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16488/9",
"column_name": "msrp",
"column_description": None,
"ordinal_position": 9,
"data_type": "numeric",
"max_length": 10,
"numeric_scale": 2,
"is_nullable": False,
"is_primary": False,
"default_value": ""
}
]
},
{
"schema_object_id": 16441,
"table_schema": "app",
"table_object_id": 16516,
"table_name": "sales_representatives",
"table_type": "view",
"properties": {},
"columns": [
{
"column_object_id": "16516/1",
"column_name": "customernumber",
"column_description": None,
"ordinal_position": 1,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": True,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16516/2",
"column_name": "customername",
"column_description": None,
"ordinal_position": 2,
"data_type": "character varying",
"max_length": 50,
"numeric_scale": None,
"is_nullable": True,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16516/3",
"column_name": "salesrepemployeenumber",
"column_description": None,
"ordinal_position": 3,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": True,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16516/4",
"column_name": "emp_no",
"column_description": None,
"ordinal_position": 4,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": True,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16516/5",
"column_name": "name",
"column_description": None,
"ordinal_position": 5,
"data_type": "text",
"max_length": None,
"numeric_scale": None,
"is_nullable": True,
"is_primary": False,
"default_value": ""
}
]
},
{
"schema_object_id": 16386,
"table_schema": "employees",
"table_object_id": 16437,
"table_name": "current_dept_emp",
"table_type": "view",
"properties": {},
"columns": [
{
"column_object_id": "16437/1",
"column_name": "emp_no",
"column_description": None,
"ordinal_position": 1,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": True,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16437/2",
"column_name": "dept_no",
"column_description": None,
"ordinal_position": 2,
"data_type": "character",
"max_length": 4,
"numeric_scale": None,
"is_nullable": True,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16437/3",
"column_name": "from_date",
"column_description": None,
"ordinal_position": 3,
"data_type": "date",
"max_length": None,
"numeric_scale": None,
"is_nullable": True,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16437/4",
"column_name": "to_date",
"column_description": None,
"ordinal_position": 4,
"data_type": "date",
"max_length": None,
"numeric_scale": None,
"is_nullable": True,
"is_primary": False,
"default_value": ""
}
]
},
{
"schema_object_id": 16386,
"table_schema": "employees",
"table_object_id": 16392,
"table_name": "departments",
"table_type": "base table",
"properties": {},
"columns": [
{
"column_object_id": "16392/1",
"column_name": "dept_no",
"column_description": None,
"ordinal_position": 1,
"data_type": "character",
"max_length": 4,
"numeric_scale": None,
"is_nullable": False,
"is_primary": True,
"default_value": ""
},
{
"column_object_id": "16392/2",
"column_name": "dept_name",
"column_description": None,
"ordinal_position": 2,
"data_type": "character varying",
"max_length": 40,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16392/3",
"column_name": "started_on",
"column_description": None,
"ordinal_position": 3,
"data_type": "timestamp without time zone",
"max_length": None,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
}
]
},
{
"schema_object_id": 16386,
"table_schema": "employees",
"table_object_id": 16418,
"table_name": "dept_emp",
"table_type": "base table",
"properties": {},
"columns": [
{
"column_object_id": "16418/1",
"column_name": "emp_no",
"column_description": None,
"ordinal_position": 1,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": False,
"is_primary": True,
"default_value": ""
},
{
"column_object_id": "16418/2",
"column_name": "dept_no",
"column_description": None,
"ordinal_position": 2,
"data_type": "character",
"max_length": 4,
"numeric_scale": None,
"is_nullable": False,
"is_primary": True,
"default_value": ""
},
{
"column_object_id": "16418/3",
"column_name": "from_date",
"column_description": None,
"ordinal_position": 3,
"data_type": "date",
"max_length": None,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16418/4",
"column_name": "to_date",
"column_description": None,
"ordinal_position": 4,
"data_type": "date",
"max_length": None,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
}
]
},
{
"schema_object_id": 16386,
"table_schema": "employees",
"table_object_id": 16433,
"table_name": "dept_emp_latest_date",
"table_type": "view",
"properties": {},
"columns": [
{
"column_object_id": "16433/1",
"column_name": "emp_no",
"column_description": None,
"ordinal_position": 1,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": True,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16433/2",
"column_name": "from_date",
"column_description": None,
"ordinal_position": 2,
"data_type": "date",
"max_length": None,
"numeric_scale": None,
"is_nullable": True,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16433/3",
"column_name": "to_date",
"column_description": None,
"ordinal_position": 3,
"data_type": "date",
"max_length": None,
"numeric_scale": None,
"is_nullable": True,
"is_primary": False,
"default_value": ""
}
]
},
{
"schema_object_id": 16386,
"table_schema": "employees",
"table_object_id": 16399,
"table_name": "dept_manager",
"table_type": "base table",
"properties": {},
"columns": [
{
"column_object_id": "16399/1",
"column_name": "dept_no",
"column_description": None,
"ordinal_position": 1,
"data_type": "character",
"max_length": 4,
"numeric_scale": None,
"is_nullable": False,
"is_primary": True,
"default_value": ""
},
{
"column_object_id": "16399/2",
"column_name": "emp_no",
"column_description": None,
"ordinal_position": 2,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": False,
"is_primary": True,
"default_value": ""
},
{
"column_object_id": "16399/3",
"column_name": "from_date",
"column_description": None,
"ordinal_position": 3,
"data_type": "date",
"max_length": None,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16399/4",
"column_name": "to_date",
"column_description": None,
"ordinal_position": 4,
"data_type": "date",
"max_length": None,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16399/5",
"column_name": "extras",
"column_description": None,
"ordinal_position": 5,
"data_type": "text",
"max_length": None,
"numeric_scale": None,
"is_nullable": True,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16399/6",
"column_name": "rating",
"column_description": "The NPS score for this manager",
"ordinal_position": 6,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": True,
"is_primary": False,
"default_value": "5"
}
]
},
{
"schema_object_id": 16386,
"table_schema": "employees",
"table_object_id": 16387,
"table_name": "employees",
"table_type": "base table",
"properties": {},
"columns": [
{
"column_object_id": "16387/1",
"column_name": "emp_no",
"column_description": None,
"ordinal_position": 1,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": False,
"is_primary": True,
"default_value": ""
},
{
"column_object_id": "16387/2",
"column_name": "birth_date",
"column_description": None,
"ordinal_position": 2,
"data_type": "date",
"max_length": None,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16387/3",
"column_name": "first_name",
"column_description": None,
"ordinal_position": 3,
"data_type": "character varying",
"max_length": 14,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16387/4",
"column_name": "last_name",
"column_description": None,
"ordinal_position": 4,
"data_type": "character varying",
"max_length": 16,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16387/5",
"column_name": "hire_date",
"column_description": None,
"ordinal_position": 5,
"data_type": "date",
"max_length": None,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
{
"column_object_id": "16387/6",
"column_name": "created_at",
"column_description": None,
"ordinal_position": 6,
"data_type": "timestamp without time zone",
"max_length": None,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
}
]
}
]
indexes = [
{
"schema_name": "app",
"schema_object_id": 16441,
"table_name": "customers",
"table_object_id": 16442,
"index_name": "customers_pkey",
"index_object_id": 16449,
"is_unique": True,
"is_primary": True,
"definition": "CREATE UNIQUE INDEX customers_pkey ON app.customers USING btree (customernumber)",
"columns": [
{
"column_name": "customernumber",
"ordinal_position": 1
}
]
},
{
"schema_name": "app",
"schema_object_id": 16441,
"table_name": "departments",
"table_object_id": 16522,
"index_name": "departments_dept_name_key",
"index_object_id": 16528,
"is_unique": True,
"is_primary": False,
"definition": "CREATE UNIQUE INDEX departments_dept_name_key ON app.departments USING btree (dept_name)",
"columns": [
{
"column_name": "dept_name",
"ordinal_position": 1
}
]
},
{
"schema_name": "app",
"schema_object_id": 16441,
"table_name": "departments",
"table_object_id": 16522,
"index_name": "app_departments_pkey",
"index_object_id": 16526,
"is_unique": True,
"is_primary": True,
"definition": "CREATE UNIQUE INDEX app_departments_pkey ON app.departments USING btree (id)",
"columns": [
{
"column_name": "id",
"ordinal_position": 1
}
]
},
{
"schema_name": "app",
"schema_object_id": 16441,
"table_name": "orderdetails",
"table_object_id": 16501,
"index_name": "orderdetails_pkey",
"index_object_id": 16504,
"is_unique": True,
"is_primary": True,
"definition": "CREATE UNIQUE INDEX orderdetails_pkey ON app.orderdetails USING btree (ordernumber, productcode)",
"columns": [
{
"column_name": "ordernumber",
"ordinal_position": 1
},
{
"column_name": "productcode",
"ordinal_position": 2
}
]
},
{
"schema_name": "app",
"schema_object_id": 16441,
"table_name": "orders",
"table_object_id": 16465,
"index_name": "orders_pkey",
"index_object_id": 16471,
"is_unique": True,
"is_primary": True,
"definition": "CREATE UNIQUE INDEX orders_pkey ON app.orders USING btree (ordernumber)",
"columns": [
{
"column_name": "ordernumber",
"ordinal_position": 1
}
]
},
{
"schema_name": "app",
"schema_object_id": 16441,
"table_name": "payments",
"table_object_id": 16478,
"index_name": "payments_pkey",
"index_object_id": 16481,
"is_unique": True,
"is_primary": True,
"definition": "CREATE UNIQUE INDEX payments_pkey ON app.payments USING btree (customernumber, checknumber)",
"columns": [
{
"column_name": "customernumber",
"ordinal_position": 1
},
{
"column_name": "checknumber",
"ordinal_position": 2
}
]
},
{
"schema_name": "app",
"schema_object_id": 16441,
"table_name": "productlines",
"table_object_id": 16456,
"index_name": "productlines_pkey",
"index_object_id": 16463,
"is_unique": True,
"is_primary": True,
"definition": "CREATE UNIQUE INDEX productlines_pkey ON app.productlines USING btree (productline)",
"columns": [
{
"column_name": "productline",
"ordinal_position": 1
}
]
},
{
"schema_name": "app",
"schema_object_id": 16441,
"table_name": "products",
"table_object_id": 16488,
"index_name": "products_pkey",
"index_object_id": 16494,
"is_unique": True,
"is_primary": True,
"definition": "CREATE UNIQUE INDEX products_pkey ON app.products USING btree (productcode)",
"columns": [
{
"column_name": "productcode",
"ordinal_position": 1
}
]
},
{
"schema_name": "employees",
"schema_object_id": 16386,
"table_name": "departments",
"table_object_id": 16392,
"index_name": "departments_dept_name_key",
"index_object_id": 16397,
"is_unique": True,
"is_primary": False,
"definition": "CREATE UNIQUE INDEX departments_dept_name_key ON employees.departments USING btree (dept_name)",
"columns": [
{
"column_name": "dept_name",
"ordinal_position": 1
}
]
},
{
"schema_name": "employees",
"schema_object_id": 16386,
"table_name": "departments",
"table_object_id": 16392,
"index_name": "employees_departments_pkey",
"index_object_id": 16395,
"is_unique": True,
"is_primary": True,
"definition": "CREATE UNIQUE INDEX employees_departments_pkey ON employees.departments USING btree (dept_no)",
"columns": [
{
"column_name": "dept_no",
"ordinal_position": 1
}
]
},
{
"schema_name": "employees",
"schema_object_id": 16386,
"table_name": "dept_emp",
"table_object_id": 16418,
"index_name": "dept_emp_pkey",
"index_object_id": 16421,
"is_unique": True,
"is_primary": True,
"definition": "CREATE UNIQUE INDEX dept_emp_pkey ON employees.dept_emp USING btree (emp_no, dept_no)",
"columns": [
{
"column_name": "emp_no",
"ordinal_position": 1
},
{
"column_name": "dept_no",
"ordinal_position": 2
}
]
},
{
"schema_name": "employees",
"schema_object_id": 16386,
"table_name": "dept_manager",
"table_object_id": 16399,
"index_name": "dept_manager_pkey",
"index_object_id": 16406,
"is_unique": True,
"is_primary": True,
"definition": "CREATE UNIQUE INDEX dept_manager_pkey ON employees.dept_manager USING btree (emp_no, dept_no)",
"columns": [
{
"column_name": "emp_no",
"ordinal_position": 1
},
{
"column_name": "dept_no",
"ordinal_position": 2
}
]
},
{
"schema_name": "employees",
"schema_object_id": 16386,
"table_name": "employees",
"table_object_id": 16387,
"index_name": "employees_pkey",
"index_object_id": 16390,
"is_unique": True,
"is_primary": True,
"definition": "CREATE UNIQUE INDEX employees_pkey ON employees.employees USING btree (emp_no)",
"columns": [
{
"column_name": "emp_no",
"ordinal_position": 1
}
]
}
]
| 33.679627
| 121
| 0.427757
| 3,375
| 43,312
| 5.133926
| 0.052444
| 0.065562
| 0.060599
| 0.119582
| 0.859237
| 0.83846
| 0.831131
| 0.822474
| 0.776707
| 0.732556
| 0
| 0.042659
| 0.449575
| 43,312
| 1,285
| 122
| 33.705837
| 0.684144
| 0.000485
| 0
| 0.657299
| 0
| 0
| 0.388644
| 0.007485
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7dd4adc3f96a7e734caa68afda04ed96ad26f848
| 231
|
py
|
Python
|
great_expectations/datasource/__init__.py
|
williamjr/great_expectations
|
7e3af56476ea9966045172696af316b8537ff4c6
|
[
"Apache-2.0"
] | 2
|
2020-03-04T19:35:57.000Z
|
2020-04-13T21:06:02.000Z
|
great_expectations/datasource/__init__.py
|
noncomposmentis/great_expectations
|
8155b1f20a88aa186745698792856f84d82f33ef
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/datasource/__init__.py
|
noncomposmentis/great_expectations
|
8155b1f20a88aa186745698792856f84d82f33ef
|
[
"Apache-2.0"
] | null | null | null |
from .datasource import Datasource
from .pandas_datasource import PandasDatasource
from .sqlalchemy_datasource import SqlAlchemyDatasource
from .sparkdf_datasource import SparkDFDatasource
from .dbt_datasource import DBTDatasource
| 38.5
| 55
| 0.891775
| 24
| 231
| 8.416667
| 0.458333
| 0.39604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08658
| 231
| 5
| 56
| 46.2
| 0.957346
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7deca5d3892271826be8accdc79f61a8e9a65ac4
| 256
|
py
|
Python
|
snake_wars/commons/__init__.py
|
Joffreybvn/snake-wars
|
74732b29272c30af967d13e16d460b424a78cd98
|
[
"MIT"
] | null | null | null |
snake_wars/commons/__init__.py
|
Joffreybvn/snake-wars
|
74732b29272c30af967d13e16d460b424a78cd98
|
[
"MIT"
] | null | null | null |
snake_wars/commons/__init__.py
|
Joffreybvn/snake-wars
|
74732b29272c30af967d13e16d460b424a78cd98
|
[
"MIT"
] | null | null | null |
from snake_wars.commons.direction import Direction
from snake_wars.commons.location import Location
from snake_wars.commons.random_location import RandomLocation
from snake_wars.commons.size import Size
from snake_wars.commons.game_state import GameState
| 36.571429
| 61
| 0.878906
| 37
| 256
| 5.891892
| 0.351351
| 0.206422
| 0.298165
| 0.458716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082031
| 256
| 6
| 62
| 42.666667
| 0.92766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
c4a4e28fe6b079084fedac6e2b04b9b8173b224f
| 87
|
py
|
Python
|
ruslanways/HW_5/cw5_5.py
|
kolyasalubov/Lv-639.pythonCore
|
06f10669a188318884adb00723127465ebdf2907
|
[
"MIT"
] | null | null | null |
ruslanways/HW_5/cw5_5.py
|
kolyasalubov/Lv-639.pythonCore
|
06f10669a188318884adb00723127465ebdf2907
|
[
"MIT"
] | null | null | null |
ruslanways/HW_5/cw5_5.py
|
kolyasalubov/Lv-639.pythonCore
|
06f10669a188318884adb00723127465ebdf2907
|
[
"MIT"
] | null | null | null |
def count_sheeps(sheep):
# TODO May the force be with you
return sheep.count(True)
| 21.75
| 34
| 0.735632
| 15
| 87
| 4.2
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183908
| 87
| 3
| 35
| 29
| 0.887324
| 0.344828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
c4b5715c36f1a1c8ccec949d77aca91c7dd09766
| 41
|
py
|
Python
|
build/lib/fractalpaths/__init__.py
|
karangoe/fractalpaths
|
eeeec7c6d301c7561da62c2d3e3ec4160d5d9b98
|
[
"MIT"
] | 1
|
2021-01-25T01:22:12.000Z
|
2021-01-25T01:22:12.000Z
|
build/lib/fractalpaths/__init__.py
|
karangoe/fractalpaths
|
eeeec7c6d301c7561da62c2d3e3ec4160d5d9b98
|
[
"MIT"
] | null | null | null |
build/lib/fractalpaths/__init__.py
|
karangoe/fractalpaths
|
eeeec7c6d301c7561da62c2d3e3ec4160d5d9b98
|
[
"MIT"
] | null | null | null |
from fractalpaths.fractal import Fractal
| 20.5
| 40
| 0.878049
| 5
| 41
| 7.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.972973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c4f40a0bcd8fce3b7ddcfba9cb2a91b13c1086ca
| 177
|
py
|
Python
|
imagedt/caffe/__init__.py
|
Eddy-zheng/ImageDT
|
78c9e671526422f28bd564cad9879ef95f12b454
|
[
"Apache-2.0"
] | 9
|
2018-06-06T02:37:50.000Z
|
2020-07-16T12:23:26.000Z
|
imagedt/caffe/__init__.py
|
Eddy-zheng/ImageDT
|
78c9e671526422f28bd564cad9879ef95f12b454
|
[
"Apache-2.0"
] | null | null | null |
imagedt/caffe/__init__.py
|
Eddy-zheng/ImageDT
|
78c9e671526422f28bd564cad9879ef95f12b454
|
[
"Apache-2.0"
] | 5
|
2018-06-03T11:04:11.000Z
|
2018-12-26T11:37:22.000Z
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import print_function
from . import network
from . import optim
from . import tools
from . import trainer
| 22.125
| 38
| 0.80791
| 25
| 177
| 5.32
| 0.52
| 0.300752
| 0.240602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006667
| 0.152542
| 177
| 8
| 39
| 22.125
| 0.88
| 0.073446
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.166667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
481dd6a90407e870fe54a9197f8db1e323c5f937
| 6,516
|
py
|
Python
|
API and AI Model/disease.py
|
themayankjha/MedX
|
cdf295910f6e0a30e6738fcabbf458b8f3e9f9ae
|
[
"MIT"
] | null | null | null |
API and AI Model/disease.py
|
themayankjha/MedX
|
cdf295910f6e0a30e6738fcabbf458b8f3e9f9ae
|
[
"MIT"
] | null | null | null |
API and AI Model/disease.py
|
themayankjha/MedX
|
cdf295910f6e0a30e6738fcabbf458b8f3e9f9ae
|
[
"MIT"
] | 1
|
2021-02-28T06:01:08.000Z
|
2021-02-28T06:01:08.000Z
|
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report
import sklearn.metrics
from sklearn import datasets
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
import os
def disease(back_pain,constipation,abdominal_pain,diarrhoea,mild_fever,yellow_urine, yellowing_of_eyes,acute_liver_failure,fluid_overload,swelling_of_stomach,
swelled_lymph_nodes,malaise,blurred_and_distorted_vision,phlegm,throat_irritation,
redness_of_eyes,sinus_pressure,runny_nose,congestion,chest_pain,weakness_in_limbs,
fast_heart_rate,pain_during_bowel_movements,pain_in_anal_region,bloody_stool,
irritation_in_anus,neck_pain,dizziness,cramps,bruising,obesity,swollen_legs,
swollen_blood_vessels,puffy_face_and_eyes,enlarged_thyroid,brittle_nails,
swollen_extremeties,excessive_hunger,extra_marital_contacts,drying_and_tingling_lips,
slurred_speech,knee_pain,hip_joint_pain,muscle_weakness,stiff_neck,swelling_joints,
movement_stiffness,spinning_movements,loss_of_balance,unsteadiness,
weakness_of_one_body_side,loss_of_smell,bladder_discomfort,foul_smell_of_urine,
continuous_feel_of_urine,passage_of_gases,internal_itching,toxic_look_typhos,
depression,irritability,muscle_pain,altered_sensorium,red_spots_over_body,belly_pain,
abnormal_menstruation,dischromic_patches,watering_from_eyes,increased_appetite,polyuria,family_history,mucoid_sputum,
rusty_sputum,lack_of_concentration,visual_disturbances,receiving_blood_transfusion,
receiving_unsterile_injections,coma,stomach_bleeding,distention_of_abdomen,
history_of_alcohol_consumption,blood_in_sputum,prominent_veins_on_calf,
palpitations,painful_walking,pus_filled_pimples,blackheads,scurring,skin_peeling,
silver_like_dusting,small_dents_in_nails,inflammatory_nails,blister,red_sore_around_nose,
yellow_crust_ooze):
os.environ['PATH'] = os.environ['PATH']+';'+os.environ['CONDA_PREFIX']+r"\Library\bin\graphviz"
data = pd.read_csv('static/symptoms.csv')
predictors = data[['back_pain','constipation','abdominal_pain','diarrhoea','mild_fever','yellow_urine', 'yellowing_of_eyes','acute_liver_failure','fluid_overload','swelling_of_stomach',
'swelled_lymph_nodes','malaise','blurred_and_distorted_vision','phlegm','throat_irritation',
'redness_of_eyes','sinus_pressure','runny_nose','congestion','chest_pain','weakness_in_limbs',
'fast_heart_rate','pain_during_bowel_movements','pain_in_anal_region','bloody_stool',
'irritation_in_anus','neck_pain','dizziness','cramps','bruising','obesity','swollen_legs',
'swollen_blood_vessels','puffy_face_and_eyes','enlarged_thyroid','brittle_nails',
'swollen_extremeties','excessive_hunger','extra_marital_contacts','drying_and_tingling_lips',
'slurred_speech','knee_pain','hip_joint_pain','muscle_weakness','stiff_neck','swelling_joints',
'movement_stiffness','spinning_movements','loss_of_balance','unsteadiness',
'weakness_of_one_body_side','loss_of_smell','bladder_discomfort','foul_smell_of urine',
'continuous_feel_of_urine','passage_of_gases','internal_itching','toxic_look_(typhos)',
'depression','irritability','muscle_pain','altered_sensorium','red_spots_over_body','belly_pain',
'abnormal_menstruation','dischromic _patches','watering_from_eyes','increased_appetite','polyuria','family_history','mucoid_sputum',
'rusty_sputum','lack_of_concentration','visual_disturbances','receiving_blood_transfusion',
'receiving_unsterile_injections','coma','stomach_bleeding','distention_of_abdomen',
'history_of_alcohol_consumption','blood_in_sputum','prominent_veins_on_calf',
'palpitations','painful_walking','pus_filled_pimples','blackheads','scurring','skin_peeling',
'silver_like_dusting','small_dents_in_nails','inflammatory_nails','blister','red_sore_around_nose',
'yellow_crust_ooze']]
targets = data.prognosis
pred_train, pred_test, tar_train, tar_test = train_test_split(predictors, targets, test_size = .4, random_state = 2)
print(pred_train.shape)
print(pred_test.shape)
print(tar_train.shape)
print(tar_test.shape)
classifier = RandomForestClassifier(n_estimators = 4)
classifier = classifier.fit(pred_train, tar_train)
predictions = classifier.predict(pred_test)
print(predictions[20])
print(sklearn.metrics.confusion_matrix(tar_test, predictions))
accuracy = sklearn.metrics.accuracy_score(tar_test, predictions)
print(accuracy)
model = ExtraTreesClassifier()
model.fit(pred_train, tar_train)
print(model.feature_importances_)
return classifier.predict([[back_pain,constipation,abdominal_pain,diarrhoea,mild_fever,yellow_urine, yellowing_of_eyes,acute_liver_failure,fluid_overload,swelling_of_stomach,
swelled_lymph_nodes,malaise,blurred_and_distorted_vision,phlegm,throat_irritation,
redness_of_eyes,sinus_pressure,runny_nose,congestion,chest_pain,weakness_in_limbs,
fast_heart_rate,pain_during_bowel_movements,pain_in_anal_region,bloody_stool,
irritation_in_anus,neck_pain,dizziness,cramps,bruising,obesity,swollen_legs,
swollen_blood_vessels,puffy_face_and_eyes,enlarged_thyroid,brittle_nails,
swollen_extremeties,excessive_hunger,extra_marital_contacts,drying_and_tingling_lips,
slurred_speech,knee_pain,hip_joint_pain,muscle_weakness,stiff_neck,swelling_joints,
movement_stiffness,spinning_movements,loss_of_balance,unsteadiness,
weakness_of_one_body_side,loss_of_smell,bladder_discomfort,foul_smell_of_urine,
continuous_feel_of_urine,passage_of_gases,internal_itching,toxic_look_typhos,
depression,irritability,muscle_pain,altered_sensorium,red_spots_over_body,belly_pain,
abnormal_menstruation,dischromic_patches,watering_from_eyes,increased_appetite,polyuria,family_history,mucoid_sputum,
rusty_sputum,lack_of_concentration,visual_disturbances,receiving_blood_transfusion,
receiving_unsterile_injections,coma,stomach_bleeding,distention_of_abdomen,
history_of_alcohol_consumption,blood_in_sputum,prominent_veins_on_calf,
palpitations,painful_walking,pus_filled_pimples,blackheads,scurring,skin_peeling,
silver_like_dusting,small_dents_in_nails,inflammatory_nails,blister,red_sore_around_nose,
yellow_crust_ooze]])
| 71.604396
| 190
| 0.824586
| 830
| 6,516
| 5.979518
| 0.278313
| 0.013298
| 0.012089
| 0.01753
| 0.815434
| 0.800322
| 0.800322
| 0.800322
| 0.800322
| 0.800322
| 0
| 0.000838
| 0.084254
| 6,516
| 90
| 191
| 72.4
| 0.830903
| 0
| 0
| 0.395349
| 0
| 0
| 0.235871
| 0.056827
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011628
| false
| 0.034884
| 0.139535
| 0
| 0.162791
| 0.093023
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
48668178dff7e8d60ba7428f3498fc22130b3421
| 8,344
|
py
|
Python
|
app.py
|
JLabadorf/COVID_data_scraper
|
522cf04daf19d25718dc060be864d0fbaee72b78
|
[
"MIT"
] | null | null | null |
app.py
|
JLabadorf/COVID_data_scraper
|
522cf04daf19d25718dc060be864d0fbaee72b78
|
[
"MIT"
] | null | null | null |
app.py
|
JLabadorf/COVID_data_scraper
|
522cf04daf19d25718dc060be864d0fbaee72b78
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, render_template, jsonify
import pandas as pd
from flask_heroku import Heroku
#Code by James Labadorf. James@jameslabadorf.com
app = Flask(__name__)
heroku = Heroku(app)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/sum/')
def summuraized():
oh_url = r'https://coronavirus.ohio.gov/static/dashboards/COVIDDeathData_CountyOfResidence.csv'
df = pd.read_csv(oh_url,low_memory=False)
df_county = df[['County','Case Count','Death Due To Illness Count - County Of Residence','Hospitalized Count']]
df_county['Case Count'] = df_county['Case Count']#.astype(str).str.replace(',','').astype(int)
df_county['Death Due To Illness Count - County Of Residence'] = df_county['Death Due To Illness Count - County Of Residence'].str.replace(',','').astype(int)
df_county['Hospitalized Count'] = df_county['Hospitalized Count'].str.replace(',','').astype(int)
df_county = df_county.set_index(['County']).sum()
d = df_county.to_dict()
return jsonify(d)
@app.route('/all/') # This pulls all the data from the ODH
def all_data():
oh_url = r'https://coronavirus.ohio.gov/static/dashboards/COVIDDeathData_CountyOfResidence.csv'
df = pd.read_csv(oh_url,low_memory=False)
d = df.to_dict()
return jsonify(d)
@app.route('/county/')
def county_data():
param = request.args.get("g")
oh_url = r'https://coronavirus.ohio.gov/static/dashboards/COVIDDeathData_CountyOfResidence.csv'
df = pd.read_csv(oh_url,low_memory=False)
if param=="d":
df_county = df[['County','Case Count','Death Due To Illness Count - County Of Residence','Hospitalized Count','Onset Date']]
else:
df_county = df[['County','Case Count','Death Due To Illness Count - County Of Residence','Hospitalized Count']]
df_county['Case Count'] = df_county['Case Count'].astype(str).str.replace(',','').astype(int)
df_county['Death Due To Illness Count - County Of Residence'] = df_county['Death Due To Illness Count - County Of Residence']#.str.replace(',','').astype(int)
df_county['Hospitalized Count'] = df_county['Hospitalized Count'].str.replace(',','').astype(int)
if param =="d":
d={}
df_county = df_county.set_index(['Onset Date']).groupby(['County','Onset Date']).sum()
counties = []
dates=[]
for i, rows in df_county.iterrows():
counties.append(i[0])
dates.append(i[1])
county_list = []
date_list = []
for date in dates:
if date not in date_list:
date_list.append(date)
d[date] ={}
for i, rows in df_county.iterrows():
date = i[1]
county = i[0]
d[date][county] = {}
#print(d)
for i, rows in df_county.iterrows():
print(i[1])
county = i[0]
date = i[1]
case_count = rows[0]
death_count = rows[1]
hosp_count = rows[2]
d[date][county] = []
d[county].append({"Case Count":case_count,"Death Due To Illness Count - County Of Residence":death_count,"Hospitalized Count":hosp_count})
else:
df_county = df_county.set_index(['County']).groupby(['County']).sum()
d = df_county.to_dict()
print(d)
#print(df_county.head(100))
return jsonify(d)
@app.route('/county/<county>/')
def county_lookup(county):
oh_url = r'https://coronavirus.ohio.gov/static/dashboards/COVIDDeathData_CountyOfResidence.csv'
df = pd.read_csv(oh_url,low_memory=False)
df_county = df[['County','Case Count','Death Due To Illness Count - County Of Residence','Hospitalized Count']]
df_county['Case Count'] = df_county['Case Count']#.astype(str).str.replace(',','').astype(int)
#df_county['Death Due To Illness Count - County Of Residence'] = df_county['Death Due To Illness Count - County Of Residence']#.str.replace(',','').astype(int)
#df_county['Hospitalized Count'] = df_county['Hospitalized Count']#.str.replace(',','').astype(int)
df_county = df_county.set_index(['County']).groupby(['County']).sum()
d = df_county.loc[county].to_dict()
return jsonify(d)
@app.route('/sex/')
def sex():
oh_url = r'https://coronavirus.ohio.gov/static/dashboards/COVIDDeathData_CountyOfResidence.csv'
df = pd.read_csv(oh_url,low_memory=False)
df_county = df[['Sex','Case Count','Death Due To Illness Count - County Of Residence','Hospitalized Count']]
df_county['Case Count'] = df_county['Case Count']#.astype(str).str.replace(',','').astype(int)
df_county['Death Due To Illness Count - County Of Residence'] = df_county['Death Due To Illness Count - County Of Residence']#.str.replace(',','').astype(int)
df_county['Hospitalized Count'] = df_county['Hospitalized Count']#.str.replace(',','').astype(int)
df_county = df_county.set_index(['Sex']).groupby(['County']).sum()
d = df_county.to_dict()
return jsonify(d)
@app.route('/onset/date/')
def onset_date():
oh_url = r'https://coronavirus.ohio.gov/static/dashboards/COVIDDeathData_CountyOfResidence.csv'
df = pd.read_csv(oh_url,low_memory=False)
df_county = df[['Onset Date','Case Count','Death Due To Illness Count - County Of Residence','Hospitalized Count']]
df_county['Case Count'] = df_county['Case Count']#.astype(str).str.replace(',','').astype(int)
#df_county['Death Due To Illness Count - County Of Residence'] = df_county['Death Due To Illness Count - County Of Residence']#.str.replace(',','').astype(int)
#df_county['Hospitalized Count'] = df_county['Hospitalized Count']#.str.replace(',','').astype(int)
df_county = df_county.set_index(['Onset Date']).groupby(['Onset Date']).sum()
d = df_county.to_dict()
return jsonify(d)
@app.route('/onset/date/<county>/')
def onset_date_county(county):
oh_url = r'https://coronavirus.ohio.gov/static/dashboards/COVIDDeathData_CountyOfResidence.csv'
df = pd.read_csv(oh_url,low_memory=False)
df_county = df[['County','Onset Date','Case Count','Death Due To Illness Count - County Of Residence','Hospitalized Count']]
df_county['Case Count'] = df_county['Case Count']#.astype(str).str.replace(',','').astype(int)
#df_county['Death Due To Illness Count - County Of Residence'] = df_county['Death Due To Illness Count - County Of Residence']#.str.replace(',','').astype(int)
#df_county['Hospitalized Count'] = df_county['Hospitalized Count']#.str.replace(',','').astype(int)
df_county = df_county.set_index(['County','Onset Date']).groupby(['County','Onset Date']).sum()
d = df_county.loc[county].to_dict()
return jsonify(d)
@app.route('/death/')
def death_count():
oh_url = r'https://coronavirus.ohio.gov/static/dashboards/COVIDDeathData_CountyOfResidence.csv'
df = pd.read_csv(oh_url,low_memory=False)
df_county = df[['Date Of Death','Case Count','Death Due To Illness Count - County Of Residence','Hospitalized Count']]
df_county['Case Count'] = df_county['Case Count']#.astype(str).str.replace(',','').astype(int)
#df_county['Death Due To Illness Count - County Of Residence'] = df_county['Death Due To Illness Count - County Of Residence']#.str.replace(',','').astype(int)
#df_county['Hospitalized Count'] = df_county['Hospitalized Count']#.str.replace(',','').astype(int)
df_county = df_county.set_index(['Date Of Death']).groupby(['Date Of Death']).sum()
d = df_county.to_dict()
return jsonify(d)
@app.route('/age/')
def age_group():
oh_url = r'https://coronavirus.ohio.gov/static/dashboards/COVIDDeathData_CountyOfResidence.csv'
df = pd.read_csv(oh_url,low_memory=False)
df_county = df[['Age Range','Case Count','Death Due To Illness Count - County Of Residence','Hospitalized Count']]
df_county['Case Count'] = df_county['Case Count']#.astype(str).str.replace(',','').astype(int)
#df_county['Death Due To Illness Count - County Of Residence'] = df_county['Death Due To Illness Count - County Of Residence'].str.replace(',','')#.astype(int)
#df_county['Hospitalized Count'] = df_county['Hospitalized Count']#.str.replace(',','').astype(int)
df_county = df_county.set_index(['Age Range']).groupby(['Age Range']).sum()
d = df_county.to_dict()
return jsonify(d)
if __name__ =='__main__':
app.run()
| 51.826087
| 163
| 0.667665
| 1,158
| 8,344
| 4.65285
| 0.086356
| 0.1366
| 0.048255
| 0.082034
| 0.850223
| 0.844655
| 0.836674
| 0.807906
| 0.805494
| 0.789161
| 0
| 0.001858
| 0.161553
| 8,344
| 160
| 164
| 52.15
| 0.768296
| 0.214765
| 0
| 0.488189
| 0
| 0
| 0.371393
| 0.003223
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07874
| false
| 0
| 0.023622
| 0.007874
| 0.181102
| 0.015748
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
486b5e9c08040dc90412b0d639c2fe4e6a0c0a10
| 457
|
py
|
Python
|
api/server/codegen/server/controllers/default_controller.py
|
CivicKnowledge/health-reporter
|
ff352ef5b15a17bb849ace0af3f29dfc8f7f34f0
|
[
"BSD-2-Clause"
] | null | null | null |
api/server/codegen/server/controllers/default_controller.py
|
CivicKnowledge/health-reporter
|
ff352ef5b15a17bb849ace0af3f29dfc8f7f34f0
|
[
"BSD-2-Clause"
] | null | null | null |
api/server/codegen/server/controllers/default_controller.py
|
CivicKnowledge/health-reporter
|
ff352ef5b15a17bb849ace0af3f29dfc8f7f34f0
|
[
"BSD-2-Clause"
] | null | null | null |
def find_measure(name = None, tag = None, id = None, search = None):
return 'do some magic!'
def get_dimension(id):
return 'do some magic!'
def get_indicator(id):
return 'do some magic!'
def get_measure(id):
return 'do some magic!'
def get_measure_root(id):
return 'do some magic!'
def get_root():
return 'do some magic!'
def list_measures():
return 'do some magic!'
def reduce_indicator(id):
return 'do some magic!'
| 18.28
| 68
| 0.66302
| 70
| 457
| 4.2
| 0.271429
| 0.217687
| 0.326531
| 0.462585
| 0.727891
| 0.591837
| 0.387755
| 0.217687
| 0
| 0
| 0
| 0
| 0.21663
| 457
| 24
| 69
| 19.041667
| 0.821229
| 0
| 0
| 0.5
| 0
| 0
| 0.245614
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
6f852fb6c87071c47eb8071af79907751480a08a
| 146
|
py
|
Python
|
src/currencycloud/resources/report.py
|
icebotariccl/currencycloud-python
|
03bb0df2743e6669790dee6f2367f9e0500a4610
|
[
"MIT"
] | 12
|
2015-07-31T10:28:55.000Z
|
2021-12-28T03:28:37.000Z
|
src/currencycloud/resources/report.py
|
icebotariccl/currencycloud-python
|
03bb0df2743e6669790dee6f2367f9e0500a4610
|
[
"MIT"
] | 26
|
2015-07-01T16:25:19.000Z
|
2022-02-25T14:42:18.000Z
|
src/currencycloud/resources/report.py
|
icebotariccl/currencycloud-python
|
03bb0df2743e6669790dee6f2367f9e0500a4610
|
[
"MIT"
] | 20
|
2015-10-14T18:21:04.000Z
|
2022-02-02T09:59:28.000Z
|
from currencycloud.resources.resource import Resource
class Report (Resource):
"""This class represents a CurrencyCloud Report"""
pass
| 18.25
| 54
| 0.753425
| 16
| 146
| 6.875
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171233
| 146
| 7
| 55
| 20.857143
| 0.909091
| 0.30137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
6f891ba41df694e17963eae57cafd81f4470d782
| 56
|
py
|
Python
|
__init__.py
|
takkaO/Bezier_Clipping_Algorithm
|
5ce169c0c32ea66a42739759dbc067a1d52494ac
|
[
"MIT"
] | 2
|
2020-08-19T17:27:39.000Z
|
2022-01-20T18:14:24.000Z
|
__init__.py
|
takkaO/Bezier_Clipping_Algorithm
|
5ce169c0c32ea66a42739759dbc067a1d52494ac
|
[
"MIT"
] | null | null | null |
__init__.py
|
takkaO/Bezier_Clipping_Algorithm
|
5ce169c0c32ea66a42739759dbc067a1d52494ac
|
[
"MIT"
] | 1
|
2021-04-01T10:42:18.000Z
|
2021-04-01T10:42:18.000Z
|
from . import bezier_clipping
from . import line_module
| 28
| 30
| 0.821429
| 8
| 56
| 5.5
| 0.75
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 56
| 2
| 31
| 28
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6fbd3ad8cd950642fce8f521342d60ee1a5a41e2
| 2,107
|
py
|
Python
|
MyDIPUtils/hole.py
|
DaddyGang/Laser-Shadowgraph-Image-Processing
|
befbba60f88d6ec3b250458733b2d56a3712e970
|
[
"MIT"
] | 3
|
2018-10-21T05:16:29.000Z
|
2018-11-01T06:33:13.000Z
|
MyDIPUtils/hole.py
|
DaddyGang/Laser-Shadowgraph-Image-Processing
|
befbba60f88d6ec3b250458733b2d56a3712e970
|
[
"MIT"
] | null | null | null |
MyDIPUtils/hole.py
|
DaddyGang/Laser-Shadowgraph-Image-Processing
|
befbba60f88d6ec3b250458733b2d56a3712e970
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
import cv2
import numpy as np
from MyDIPUtils.bitop import is_similar
def floodfill(image, FULL = False):
im_in = image.copy()
# Copy the thresholded image.
im_floodfill = im_in.copy()
# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = im_in.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
# Floodfill from point (0, 0)
cv2.floodFill(im_floodfill, mask, (0,0), 255);
# Invert floodfilled image
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
# Combine the two images to get the foreground.
im_rawout = im_in | im_floodfill_inv
im_rawout = cv2.dilate(im_rawout, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5)), iterations = 4)
im_rawout = cv2.erode(im_rawout, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5)), iterations = 4)
im_out = cv2.medianBlur(im_rawout,5)
if FULL == True:
while True:
im_in = im_out.copy()
# Copy the thresholded image.
im_floodfill = im_in.copy()
# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = im_in.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
# Floodfill from point (0, 0)
cv2.floodFill(im_floodfill, mask, (0,0), 255);
# Invert floodfilled image
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
if len(np.nonzero(im_floodfill_inv)[0]) == 0:
return im_out
# Combine the two images to get the foreground.
im_rawout = im_in | im_floodfill_inv
im_rawout = cv2.dilate(im_rawout, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5)), iterations = 4)
im_rawout = cv2.erode(im_rawout, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5)), iterations = 4)
im_out = cv2.medianBlur(im_rawout,5)
return im_out
| 36.964912
| 115
| 0.589464
| 285
| 2,107
| 4.189474
| 0.259649
| 0.080402
| 0.073702
| 0.107203
| 0.834171
| 0.834171
| 0.834171
| 0.834171
| 0.834171
| 0.834171
| 0
| 0.041436
| 0.312767
| 2,107
| 57
| 116
| 36.964912
| 0.783149
| 0.213574
| 0
| 0.689655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.103448
| 0
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6fcb36aceaef82a69b81adec07d38433fb2e10d9
| 93
|
py
|
Python
|
data/config_interface/__init__.py
|
khoehlein/CNNs-for-Wind-Field-Downscaling
|
eb8418d4d893fcb2beb929abb241281b7a9b6a95
|
[
"MIT"
] | 5
|
2021-05-05T06:08:52.000Z
|
2022-03-24T04:57:52.000Z
|
data/config_interface/__init__.py
|
khoehlein/CNNs-for-Wind-Field-Downscaling
|
eb8418d4d893fcb2beb929abb241281b7a9b6a95
|
[
"MIT"
] | null | null | null |
data/config_interface/__init__.py
|
khoehlein/CNNs-for-Wind-Field-Downscaling
|
eb8418d4d893fcb2beb929abb241281b7a9b6a95
|
[
"MIT"
] | 2
|
2021-08-07T05:18:05.000Z
|
2022-03-31T03:48:37.000Z
|
from .DataConfigurator import DataConfigurator
from .GridConfigurator import GridConfigurator
| 46.5
| 46
| 0.903226
| 8
| 93
| 10.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075269
| 93
| 2
| 47
| 46.5
| 0.976744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b505d1bb1ef4d42abd66e116dc34b05a56372e67
| 55
|
py
|
Python
|
task_in_steps/__init__.py
|
tom-010/task_in_steps
|
826e329b8815fe6cb3eb829100998315bd55f666
|
[
"Apache-2.0"
] | null | null | null |
task_in_steps/__init__.py
|
tom-010/task_in_steps
|
826e329b8815fe6cb3eb829100998315bd55f666
|
[
"Apache-2.0"
] | null | null | null |
task_in_steps/__init__.py
|
tom-010/task_in_steps
|
826e329b8815fe6cb3eb829100998315bd55f666
|
[
"Apache-2.0"
] | null | null | null |
from task_in_steps.task_in_steps import Step, run_steps
| 55
| 55
| 0.890909
| 11
| 55
| 4
| 0.636364
| 0.272727
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 55
| 1
| 55
| 55
| 0.862745
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b510b91b99c1a03bb4d48207f9494837964973a4
| 166
|
py
|
Python
|
test/test_secure_import.py
|
rsimari/secure_import
|
7d70cb6984977d9abf4e2cca5ff95c5ded69b819
|
[
"MIT"
] | null | null | null |
test/test_secure_import.py
|
rsimari/secure_import
|
7d70cb6984977d9abf4e2cca5ff95c5ded69b819
|
[
"MIT"
] | 3
|
2019-10-21T17:14:02.000Z
|
2021-06-01T22:42:23.000Z
|
test/test_secure_import.py
|
rsimari/secure_import
|
7d70cb6984977d9abf4e2cca5ff95c5ded69b819
|
[
"MIT"
] | null | null | null |
import pytest
import os
from sys import path
path.append(os.path.join(os.getcwd(), 'src'))
from secure_import import secure_import
def test_secure_import():
pass
| 15.090909
| 45
| 0.777108
| 27
| 166
| 4.62963
| 0.518519
| 0.288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126506
| 166
| 10
| 46
| 16.6
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0.018182
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0.142857
| 0.714286
| 0
| 0.857143
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
b514c2206eaacda3175b389bee9647a792348b6a
| 42
|
py
|
Python
|
src/manim_pptx/__init__.py
|
TheShadow29/manim-pptx
|
2a8fca7fcf205e07063b511146bd1c774ed645a5
|
[
"MIT"
] | 6
|
2021-11-30T03:09:13.000Z
|
2022-02-25T01:17:59.000Z
|
src/manim_pptx/__init__.py
|
TheShadow29/manim-pptx
|
2a8fca7fcf205e07063b511146bd1c774ed645a5
|
[
"MIT"
] | 2
|
2021-12-03T19:56:08.000Z
|
2022-03-03T08:37:50.000Z
|
src/manim_pptx/__init__.py
|
TheShadow29/manim-pptx
|
2a8fca7fcf205e07063b511146bd1c774ed645a5
|
[
"MIT"
] | 1
|
2021-11-30T05:33:23.000Z
|
2021-11-30T05:33:23.000Z
|
from manim_pptx.pptxscene import PPTXScene
| 42
| 42
| 0.904762
| 6
| 42
| 6.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 42
| 1
| 42
| 42
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d2027a953218722586c09f57259b0995c2b9930f
| 45
|
py
|
Python
|
my/bert_codertimo_pytorch/__init__.py
|
notyetend/annotated-transformer
|
9c4fdbbbc5ab4d3bff931b540be5f1d811de3ea0
|
[
"MIT"
] | null | null | null |
my/bert_codertimo_pytorch/__init__.py
|
notyetend/annotated-transformer
|
9c4fdbbbc5ab4d3bff931b540be5f1d811de3ea0
|
[
"MIT"
] | null | null | null |
my/bert_codertimo_pytorch/__init__.py
|
notyetend/annotated-transformer
|
9c4fdbbbc5ab4d3bff931b540be5f1d811de3ea0
|
[
"MIT"
] | null | null | null |
from .model import BERT, CustomBERT, GoodBad
| 22.5
| 44
| 0.8
| 6
| 45
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 45
| 1
| 45
| 45
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d269c462f3233e464f799f1ee22e2858335d5867
| 31
|
py
|
Python
|
streamz_ext/sources.py
|
xpdAcq/streamz_ext
|
9c3b41fdcca3dc7deea6d3f5523fee315af71211
|
[
"BSD-3-Clause"
] | 1
|
2018-10-02T02:37:19.000Z
|
2018-10-02T02:37:19.000Z
|
streamz_ext/sources.py
|
xpdAcq/streamz_ext
|
9c3b41fdcca3dc7deea6d3f5523fee315af71211
|
[
"BSD-3-Clause"
] | 31
|
2018-01-17T15:54:32.000Z
|
2018-10-24T17:11:28.000Z
|
streamz_ext/sources.py
|
xpdAcq/streamz_ext
|
9c3b41fdcca3dc7deea6d3f5523fee315af71211
|
[
"BSD-3-Clause"
] | 4
|
2018-01-16T19:27:49.000Z
|
2018-08-20T08:58:06.000Z
|
from zstreamz.sources import *
| 15.5
| 30
| 0.806452
| 4
| 31
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9677a9dcffcb3b06012debedbb5cde5144b60e57
| 38
|
py
|
Python
|
platipy/dicom/communication/__init__.py
|
RadiotherapyAI/platipy
|
53294789a3805ea088c9953027f4ab09a614f052
|
[
"Apache-2.0"
] | 26
|
2020-10-26T17:30:00.000Z
|
2022-03-07T01:21:37.000Z
|
platipy/dicom/communication/__init__.py
|
RadiotherapyAI/platipy
|
53294789a3805ea088c9953027f4ab09a614f052
|
[
"Apache-2.0"
] | 20
|
2020-10-01T04:05:37.000Z
|
2022-03-29T23:27:11.000Z
|
platipy/dicom/communication/__init__.py
|
RadiotherapyAI/platipy
|
53294789a3805ea088c9953027f4ab09a614f052
|
[
"Apache-2.0"
] | 5
|
2020-10-01T03:33:36.000Z
|
2021-02-20T09:58:30.000Z
|
from .connector import DicomConnector
| 19
| 37
| 0.868421
| 4
| 38
| 8.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.970588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
967b681ab9942c359fbecb0863f6ae9bef66774f
| 1,031
|
py
|
Python
|
Day72/Functions.py
|
analuisadev/100-Days-Of-Code
|
b1dafabc335cd2c3c9b1cecd50597b42d8959d4a
|
[
"MIT"
] | 2
|
2021-01-08T22:13:21.000Z
|
2021-03-17T10:44:12.000Z
|
Day72/Functions.py
|
analuisadev/100-Days-Of-Code
|
b1dafabc335cd2c3c9b1cecd50597b42d8959d4a
|
[
"MIT"
] | null | null | null |
Day72/Functions.py
|
analuisadev/100-Days-Of-Code
|
b1dafabc335cd2c3c9b1cecd50597b42d8959d4a
|
[
"MIT"
] | null | null | null |
#Função para ler um número inteiro usando tratamento de erros caso não seja digitado corretamente
def ReadInt(msg):
while True:
try:
number = int(input(msg))
except (ValueError, TypeError):
print ('\033[1;31mERRO: Digite um número inteiro válido!\033[m')
continue
except (KeyboardInterrupt):
print ('\n\033[1;31mO usuário preferiu não informar um número\033[m')
return 0
else:
return number
#Função para ler um número real usando tratamento de erros caso não seja digitado corretamente
def ReadFloat(msg):
while True:
try:
number = float(input(msg))
except (ValueError, TypeError):
print('\033[1;31mERRO: Digite um número real válido!\033[m')
continue
except (KeyboardInterrupt):
print ('\n\033[1;31mO usuário preferiu não informar não informar um número.\033[m')
return 0
else:
return number
| 38.185185
| 102
| 0.594568
| 122
| 1,031
| 5.02459
| 0.385246
| 0.078303
| 0.042414
| 0.04894
| 0.924959
| 0.787928
| 0.787928
| 0.787928
| 0.787928
| 0.787928
| 0
| 0.054755
| 0.326867
| 1,031
| 27
| 103
| 38.185185
| 0.82853
| 0.183317
| 0
| 0.666667
| 0
| 0
| 0.284514
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.25
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9693d0adc1ee8de233d3015aa07f6141ff5843be
| 28
|
py
|
Python
|
test/login.py
|
huang880/test
|
9c65ffd470dd52633ded4eb2b8e1cd89d3b7c5d0
|
[
"MIT"
] | null | null | null |
test/login.py
|
huang880/test
|
9c65ffd470dd52633ded4eb2b8e1cd89d3b7c5d0
|
[
"MIT"
] | null | null | null |
test/login.py
|
huang880/test
|
9c65ffd470dd52633ded4eb2b8e1cd89d3b7c5d0
|
[
"MIT"
] | null | null | null |
a=1
b=2
c=3
d=4
你是不是个沙雕
f=6
| 4
| 7
| 0.607143
| 11
| 28
| 1.545455
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.227273
| 0.214286
| 28
| 6
| 8
| 4.666667
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
96b6ffb35c3e506a946c466854e70ad7712a107c
| 34
|
py
|
Python
|
tests/connection/__init__.py
|
Amin-egn/Recipient
|
51e98ef9e679b3f32db7ab2eb07f0a905f0dac24
|
[
"Apache-2.0"
] | 1
|
2021-07-24T11:05:20.000Z
|
2021-07-24T11:05:20.000Z
|
tests/connection/__init__.py
|
Amin-egn/Recipient
|
51e98ef9e679b3f32db7ab2eb07f0a905f0dac24
|
[
"Apache-2.0"
] | null | null | null |
tests/connection/__init__.py
|
Amin-egn/Recipient
|
51e98ef9e679b3f32db7ab2eb07f0a905f0dac24
|
[
"Apache-2.0"
] | 1
|
2021-08-08T11:14:02.000Z
|
2021-08-08T11:14:02.000Z
|
from .tests import TestConnection
| 17
| 33
| 0.852941
| 4
| 34
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
96c13bd9d17b1f133114f423edc49b8147772953
| 83
|
py
|
Python
|
uq360/algorithms/ensemble_heteroscedastic_regression/__init__.py
|
Sclare87/UQ360
|
2378bfa4a8d61f813afbf6854341888434c9eb11
|
[
"Apache-2.0"
] | 148
|
2021-05-27T20:52:51.000Z
|
2022-03-16T22:49:48.000Z
|
uq360/algorithms/ensemble_heteroscedastic_regression/__init__.py
|
Sclare87/UQ360
|
2378bfa4a8d61f813afbf6854341888434c9eb11
|
[
"Apache-2.0"
] | 9
|
2021-06-21T18:45:07.000Z
|
2021-11-08T14:42:30.000Z
|
uq360/algorithms/ensemble_heteroscedastic_regression/__init__.py
|
Sclare87/UQ360
|
2378bfa4a8d61f813afbf6854341888434c9eb11
|
[
"Apache-2.0"
] | 27
|
2021-06-01T18:29:02.000Z
|
2022-03-02T06:56:03.000Z
|
from .ensemble_heteroscedastic_regression import EnsembleHeteroscedasticRegression
| 41.5
| 82
| 0.939759
| 6
| 83
| 12.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048193
| 83
| 1
| 83
| 83
| 0.962025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7393c4f4852ba5d21f5669a7fd019496e04c0545
| 8,434
|
py
|
Python
|
tests/test_auth.py
|
nhi-vanye/sanic-auth
|
5ea5eaad9a554f2e1358004dc23786b484169ded
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_auth.py
|
nhi-vanye/sanic-auth
|
5ea5eaad9a554f2e1358004dc23786b484169ded
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_auth.py
|
nhi-vanye/sanic-auth
|
5ea5eaad9a554f2e1358004dc23786b484169ded
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
from sanic import response
from sanic_auth import Auth, User
def test_login(app):
auth = Auth(app)
@app.post('/login')
async def login(request):
name = request.form.get('name')
password = request.form.get('password')
if name == 'demo' and password == '1234':
auth.login_user(request, User(id=1, name=name))
return response.text('okay')
return response.text('failed')
@app.route('/user')
async def user(request):
user = auth.current_user(request)
if user is not None:
return response.text(user.name)
return response.text('')
payload = {}
req, resp = app.test_client.post('/login', data=payload)
assert resp.status == 200 and resp.text == 'failed'
req, resp = app.test_client.get('/user')
assert resp.status == 200 and resp.text == ''
payload = {'user': 'demo'}
req, resp = app.test_client.post('/login', data=payload)
assert resp.status == 200 and resp.text == 'failed'
req, resp = app.test_client.get('/user')
assert resp.status == 200 and resp.text == ''
payload = {'name': 'demo', 'password': '4321'}
req, resp = app.test_client.post('/login', data=payload)
assert resp.status == 200 and resp.text == 'failed'
req, resp = app.test_client.get('/user')
assert resp.status == 200 and resp.text == ''
payload = {'name': 'demo', 'password': '1234'}
req, resp = app.test_client.post('/login', data=payload)
assert resp.status == 200 and resp.text == 'okay'
req, resp = app.test_client.get('/user')
assert resp.status == 200 and resp.text == 'demo'
def test_logout(app):
auth = Auth(app)
@app.post('/login')
async def login(request):
name = request.form.get('name')
password = request.form.get('password')
if name == 'demo' and password == '1234':
auth.login_user(request, User(id=1, name=name))
return response.text('okay')
return response.text('failed')
@app.route('/logout')
async def logout(request):
auth.logout_user(request)
return response.redirect('/user')
@app.route('/user')
async def user(request):
user = auth.current_user(request)
if user is not None:
return response.text(user.name)
return response.text('')
payload = {'name': 'demo', 'password': '1234'}
req, resp = app.test_client.post('/login', data=payload)
assert resp.status == 200 and resp.text == 'okay'
req, resp = app.test_client.get('/user')
assert resp.status == 200 and resp.text == 'demo'
req, reps = app.test_client.get('/logout')
assert resp.status == 200 and resp.text == 'demo'
req, resp = app.test_client.get('/user')
assert resp.status == 200 and resp.text == ''
req, reps = app.test_client.get('/logout')
assert resp.status == 200 and resp.text == ''
req, resp = app.test_client.get('/user')
assert resp.status == 200 and resp.text == ''
def test_login_required(app):
# the default is 'auth.login', change to 'login' to avoid using blueprint
app.config.AUTH_LOGIN_ENDPOINT = 'login'
auth = Auth(app)
@app.post('/login')
async def login(request):
name = request.form.get('name')
password = request.form.get('password')
if name == 'demo' and password == '1234':
auth.login_user(request, User(id=1, name=name))
return response.text('okay')
return response.text('failed')
@app.route('/logout')
@auth.login_required
async def logout(request):
auth.logout_user(request)
return response.redirect('/user')
@app.route('/user')
@auth.login_required(user_keyword='user')
async def user(request, user):
return response.text(user.name)
payload = {'name': 'demo', 'password': '1234'}
req, resp = app.test_client.get('/user', allow_redirects=False)
assert resp.status == 302
assert resp.headers['Location'] == app.url_for('login')
payload = {'name': 'demo', 'password': '1234'}
req, resp = app.test_client.post('/login', data=payload)
assert resp.status == 200 and resp.text == 'okay'
req, resp = app.test_client.get('/user')
assert resp.status == 200 and resp.text == 'demo'
def test_user_keyword(app):
auth = Auth(app)
@app.post('/login')
async def login(request):
name = request.form.get('name')
password = request.form.get('password')
if name == 'demo' and password == '1234':
auth.login_user(request, User(id=1, name=name))
return response.text('okay')
return response.text('failed')
@app.route('/user')
@auth.login_required(user_keyword='user')
async def user(request, user):
return response.text(user.name)
@app.route('/<user>')
@auth.login_required(user_keyword='user')
async def user_id(request, user):
return response.text(user.id)
payload = {'name': 'demo', 'password': '1234'}
req, resp = app.test_client.post('/login', data=payload)
assert resp.status == 200 and resp.text == 'okay'
req, resp = app.test_client.get('/user')
assert resp.status == 200 and resp.text == 'demo'
req, resp = app.test_client.get(app.url_for('user_id', user=1))
# RuntimeError being raised because we try to overwrite user parameter
assert resp.status == 500
def test_decorators(app):
auth = Auth(app)
USER_DB = [
{'id': '1', 'name': 'demo', 'password': '1234'},
{'id': '2', 'name': 'admin', 'password': 'root'},
]
def find_user(name, password):
for user in USER_DB:
if user['name'] == name and user['password'] == password:
return user
@auth.serializer
def serialize(user):
return user['id']
@auth.user_loader
def load_user(token):
for user in USER_DB:
if user['id'] == token:
return user
@auth.no_auth_handler
def handle_unauthorized(request):
return response.text('unauthorized', status=401)
def handle_no_auth(request):
return response.text('no_auth', status=403)
@app.post('/login')
async def login(request):
name = request.form.get('name')
password = request.form.get('password')
user = find_user(name, password)
if user is not None:
auth.login_user(request, user)
return response.text('okay')
return response.text('failed')
@app.route('/user')
@auth.login_required(user_keyword='user')
async def user(request, user):
return response.text(user['name'])
@app.route('/user/data')
@auth.login_required(handle_no_auth=handle_no_auth)
async def user_data(request, user):
return response.text(user['name'])
req, resp = app.test_client.get('/user')
assert resp.status == 401 and resp.text == 'unauthorized'
req, resp = app.test_client.get('/user/data')
assert resp.status == 403 and resp.text == 'no_auth'
payload = {'name': 'noone', 'password': '1234'}
req, resp = app.test_client.post('/login', data=payload)
assert resp.status == 200 and resp.text == 'failed'
payload = {'name': 'demo', 'password': '1234'}
req, resp = app.test_client.post('/login', data=payload)
assert resp.status == 200 and resp.text == 'okay'
req, resp = app.test_client.get('/user')
assert resp.status == 200 and resp.text == 'demo'
def test_async_user_loader(app):
auth = Auth(app)
@auth.user_loader
async def load_user(token):
if token is 'root':
return 'pwned'
return None
@auth.serializer
def serialize(user):
return 'root'
@app.post('/login')
async def login(request):
auth.login_user(request, user)
return response.text('All your base are belong to us')
@app.route('/user')
@auth.login_required(user_keyword='user')
async def user(request, user):
return response.text(user)
req, resp = app.test_client.post('/login')
assert resp.status == 200 and resp.text == 'All your base are belong to us'
req, resp = app.test_client.get('/user')
assert resp.status == 200 and resp.text == 'pwned'
def test_setup_once(app):
auth = Auth(app)
with pytest.raises(RuntimeError):
auth.setup(app)
| 31.94697
| 79
| 0.612521
| 1,115
| 8,434
| 4.552466
| 0.095067
| 0.055162
| 0.069149
| 0.068952
| 0.783688
| 0.780733
| 0.759259
| 0.719858
| 0.696612
| 0.696612
| 0
| 0.022738
| 0.23346
| 8,434
| 263
| 80
| 32.068441
| 0.762413
| 0.019208
| 0
| 0.720588
| 0
| 0
| 0.104862
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 1
| 0.063725
| false
| 0.107843
| 0.014706
| 0.019608
| 0.230392
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
73d7984e928998618b05a99d7efe16d78d45d1cb
| 153
|
py
|
Python
|
config.py
|
valerybriz/api_tests
|
ba66df69fbd422d0c4c9e292350302ea442d2336
|
[
"MIT"
] | null | null | null |
config.py
|
valerybriz/api_tests
|
ba66df69fbd422d0c4c9e292350302ea442d2336
|
[
"MIT"
] | null | null | null |
config.py
|
valerybriz/api_tests
|
ba66df69fbd422d0c4c9e292350302ea442d2336
|
[
"MIT"
] | null | null | null |
import os
test_username= os.environ.get("TEST_USERNAME","")
test_password= os.environ.get("TEST_PASSWORD","")
test_params = [{"something": "something"}]
| 30.6
| 49
| 0.738562
| 20
| 153
| 5.4
| 0.45
| 0.222222
| 0.222222
| 0.296296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065359
| 153
| 5
| 50
| 30.6
| 0.755245
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.25
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
73dadc12194e37c4138ac0967e34e1e560a7901b
| 295
|
py
|
Python
|
plotly/graph_objs/contour/__init__.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/graph_objs/contour/__init__.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/graph_objs/contour/__init__.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
from ._stream import Stream
from ._line import Line
from ._hoverlabel import Hoverlabel
from plotly.graph_objs.contour import hoverlabel
from ._contours import Contours
from plotly.graph_objs.contour import contours
from ._colorbar import ColorBar
from plotly.graph_objs.contour import colorbar
| 32.777778
| 48
| 0.854237
| 41
| 295
| 5.95122
| 0.268293
| 0.122951
| 0.184426
| 0.233607
| 0.393443
| 0.393443
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108475
| 295
| 8
| 49
| 36.875
| 0.927757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
73dcc4968e972c3e15c37e4dae3f34088c5b7883
| 6,435
|
py
|
Python
|
pymtl3/passes/backends/verilog/import_/test/VImportSignalGen_test.py
|
mondO/pymtl3
|
9869dda28c01926cee6da94ebdeac2a210150c62
|
[
"BSD-3-Clause"
] | 1
|
2022-01-03T06:22:11.000Z
|
2022-01-03T06:22:11.000Z
|
pymtl3/passes/backends/verilog/import_/test/VImportSignalGen_test.py
|
mondO/pymtl3
|
9869dda28c01926cee6da94ebdeac2a210150c62
|
[
"BSD-3-Clause"
] | null | null | null |
pymtl3/passes/backends/verilog/import_/test/VImportSignalGen_test.py
|
mondO/pymtl3
|
9869dda28c01926cee6da94ebdeac2a210150c62
|
[
"BSD-3-Clause"
] | null | null | null |
#=========================================================================
# VImportSignalGen.py
#=========================================================================
# Author : Peitian Pan
# Date : June 1, 2019
"""Test the SystemVerilog signal generation of imported component."""
from pymtl3.datatypes import Bits1, Bits32, bitstruct, mk_bits
from pymtl3.dsl import Component, InPort, Interface, OutPort
from pymtl3.passes.rtlir import RTLIRDataType as rdt
from pymtl3.passes.rtlir import RTLIRType as rt
from pymtl3.passes.rtlir.util.test_utility import do_test
from ..VerilatorImportPass import VerilatorImportPass
def local_do_test( m ):
m.elaborate()
rtype = rt.get_component_ifc_rtlir( m )
ipass = VerilatorImportPass()
symbols, decls = ipass.gen_signal_decl_py( rtype )
assert symbols == m._ref_symbols
assert decls == m._ref_decls
def test_port_single( do_test ):
class A( Component ):
def construct( s ):
s.in_ = InPort( mk_bits( 322 ) )
a = A()
a._ref_symbols = { 'Bits322' : mk_bits(322) }
a._ref_decls = [
"s.in_ = InPort( Bits322 )",
]
do_test( a )
def test_port_array( do_test ):
class A( Component ):
def construct( s ):
s.in_ = [ InPort( Bits32 ) for _ in range( 3 ) ]
a = A()
a._ref_symbols = {}
a._ref_decls = [
"s.in_ = [ InPort( Bits32 ) for _ in range(3) ]",
]
do_test( a )
def test_port_2d_array( do_test ):
class A( Component ):
def construct( s ):
s.in_ = [ [ InPort( Bits32 ) for _ in range(2) ] for _ in range(3) ]
a = A()
a._ref_symbols = {}
a._ref_decls = [
"s.in_ = [ [ InPort( Bits32 ) for _ in range(2) ] for _ in range(3) ]",
]
do_test( a )
def test_struct_port_single( do_test ):
@bitstruct
class struct:
bar: Bits32
foo: Bits32
class A( Component ):
def construct( s ):
s.in_ = InPort( struct )
a = A()
a._ref_symbols = { 'struct' : struct }
a._ref_decls = [
"s.in_ = InPort( struct )",
]
do_test( a )
def test_struct_port_array( do_test ):
@bitstruct
class struct:
bar: Bits32
foo: Bits32
class A( Component ):
def construct( s ):
s.in_ = [ InPort( struct ) for _ in range(2) ]
a = A()
a._ref_symbols = { 'struct' : struct }
a._ref_decls = [
"s.in_ = [ InPort( struct ) for _ in range(2) ]",
]
do_test( a )
def test_packed_array_port_array( do_test ):
@bitstruct
class struct:
bar: Bits32
foo: [ [ Bits32 for _ in range(2) ] for _ in range(3) ]
class A( Component ):
def construct( s ):
s.in_ = [ InPort( struct ) for _ in range(2) ]
a = A()
a._ref_symbols = { 'struct' : struct }
a._ref_decls = [
"s.in_ = [ InPort( struct ) for _ in range(2) ]",
]
do_test( a )
def test_nested_struct( do_test ):
@bitstruct
class inner_struct:
foo: Bits32
@bitstruct
class struct:
bar: Bits32
inner: inner_struct
class A( Component ):
def construct( s ):
s.in_ = [ InPort( struct ) for _ in range(2) ]
a = A()
# Inner struct will not be added to `symbols` because struct
# refers to it!
a._ref_symbols = { 'struct' : struct }
a._ref_decls = [
"s.in_ = [ InPort( struct ) for _ in range(2) ]",
]
do_test( a )
def test_interface( do_test ):
class Ifc( Interface ):
def construct( s ):
s.msg = InPort( Bits32 )
s.val = InPort( Bits1 )
s.rdy = OutPort( Bits1 )
class A( Component ):
def construct( s ):
s.ifc = Ifc()
a = A()
a._ref_symbols = { 'Ifc' : Ifc }
a._ref_decls = [
"s.ifc = Ifc()"
]
do_test( a )
def test_interface_parameter( do_test ):
class Ifc( Interface ):
def construct( s, Type, nbits_val, nbits_rdy ):
s.msg = InPort( Type )
s.val = InPort( mk_bits(nbits_val) )
# Added support for BitsN values in case someone wants to do
# tricky things like this.
s.rdy = OutPort( mk_bits(nbits_rdy.nbits) )
class A( Component ):
def construct( s ):
s.ifc = Ifc( Bits32, 1, Bits1(1) )
a = A()
a._ref_symbols = { 'Ifc' : Ifc }
a._ref_decls = [
"s.ifc = Ifc( Bits32, 1, Bits1( 1 ) )"
]
do_test( a )
def test_interface_parameter_long_vector( do_test ):
class Ifc( Interface ):
def construct( s, Type, nbits_val, nbits_rdy ):
s.msg = InPort( Type )
s.val = InPort( mk_bits(nbits_val) )
# Added support for BitsN values in case someone wants to do
# tricky things like this.
s.rdy = OutPort( mk_bits(nbits_rdy.nbits) )
class A( Component ):
def construct( s ):
Bits322 = mk_bits(322)
s.ifc = Ifc( Bits322, 1, Bits322(1) )
a = A()
a._ref_symbols = { 'Ifc' : Ifc, 'Bits322' : mk_bits(322) }
a._ref_decls = [
"s.ifc = Ifc( Bits322, 1, Bits322( 1 ) )"
]
do_test( a )
def test_interface_array( do_test ):
class Ifc( Interface ):
def construct( s ):
s.msg = InPort( Bits32 )
s.val = InPort( Bits1 )
s.rdy = OutPort( Bits1 )
class A( Component ):
def construct( s ):
s.ifc = [ Ifc() for _ in range(2) ]
a = A()
a._ref_symbols = { 'Ifc' : Ifc }
a._ref_decls = [
"s.ifc = [ Ifc() for _ in range(2) ]"
]
do_test( a )
def test_nested_interface( do_test ):
class InnerIfc( Interface ):
def construct( s ):
s.msg = InPort( Bits32 )
s.val = InPort( Bits1 )
s.rdy = OutPort( Bits1 )
class Ifc( Interface ):
def construct( s ):
s.valrdy_ifc = InnerIfc()
s.ctrl_bar = InPort( Bits32 )
s.ctrl_foo = OutPort( Bits32 )
class A( Component ):
def construct( s ):
s.ifc = [ Ifc() for _ in range(2) ]
a = A()
# Inner interface will not be added to `symbols` because Ifc refers
# to it!
a._ref_symbols = { 'Ifc' : Ifc }
a._ref_decls = [
"s.ifc = [ Ifc() for _ in range(2) ]"
]
do_test( a )
def test_nested_interface_port_array( do_test ):
class InnerIfc( Interface ):
def construct( s ):
s.msg = [ InPort( Bits32 ) for _ in range(2) ]
s.val = InPort( Bits1 )
s.rdy = OutPort( Bits1 )
class Ifc( Interface ):
def construct( s ):
s.valrdy_ifc = InnerIfc()
s.ctrl_bar = InPort( Bits32 )
s.ctrl_foo = OutPort( Bits32 )
class A( Component ):
def construct( s ):
s.ifc = [ Ifc() for _ in range(2) ]
a = A()
# Inner interface will not be added to `symbols` because Ifc refers
# to it!
a._ref_symbols = { 'Ifc' : Ifc }
a._ref_decls = [
"s.ifc = [ Ifc() for _ in range(2) ]"
]
do_test( a )
| 27.151899
| 75
| 0.588967
| 907
| 6,435
| 3.973539
| 0.115766
| 0.046615
| 0.075749
| 0.069922
| 0.821032
| 0.791898
| 0.77525
| 0.713929
| 0.693674
| 0.67758
| 0
| 0.027924
| 0.259829
| 6,435
| 236
| 76
| 27.266949
| 0.728742
| 0.102564
| 0
| 0.704434
| 0
| 0.004926
| 0.095569
| 0
| 0
| 0
| 0
| 0
| 0.009852
| 1
| 0.172414
| false
| 0.029557
| 0.034483
| 0
| 0.37931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fb45b80af373f1ca67f0d005ecddc7593f77eb92
| 35
|
py
|
Python
|
vnpy/gateway/mt5/__init__.py
|
funrunskypalace/vnpy
|
2d87aede685fa46278d8d3392432cc127b797926
|
[
"MIT"
] | 19,529
|
2015-03-02T12:17:35.000Z
|
2022-03-31T17:18:27.000Z
|
vnpy/gateway/mt5/__init__.py
|
funrunskypalace/vnpy
|
2d87aede685fa46278d8d3392432cc127b797926
|
[
"MIT"
] | 2,186
|
2015-03-04T23:16:33.000Z
|
2022-03-31T03:44:01.000Z
|
vnpy/gateway/mt5/__init__.py
|
funrunskypalace/vnpy
|
2d87aede685fa46278d8d3392432cc127b797926
|
[
"MIT"
] | 8,276
|
2015-03-02T05:21:04.000Z
|
2022-03-31T13:13:13.000Z
|
from .mt5_gateway import Mt5Gateway
| 35
| 35
| 0.885714
| 5
| 35
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.085714
| 35
| 1
| 35
| 35
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fb49cd235f066ad95405a72b7af891476fa545df
| 48
|
py
|
Python
|
zimp/__init__.py
|
freecraver/zimp
|
13fda3c561643ce24a9f4358058d9af9f395283c
|
[
"Apache-2.0"
] | null | null | null |
zimp/__init__.py
|
freecraver/zimp
|
13fda3c561643ce24a9f4358058d9af9f395283c
|
[
"Apache-2.0"
] | null | null | null |
zimp/__init__.py
|
freecraver/zimp
|
13fda3c561643ce24a9f4358058d9af9f395283c
|
[
"Apache-2.0"
] | null | null | null |
from readability.metrics import get_dummy_score
| 24
| 47
| 0.895833
| 7
| 48
| 5.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 48
| 1
| 48
| 48
| 0.931818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fb6e77a276e97736312f93e4bbb2ee98cb3928c6
| 2,152
|
py
|
Python
|
test05.py
|
OskarBreach/advent-of-code-2018
|
10887873d988df699e7ebe83d39511b3303ccf38
|
[
"MIT"
] | null | null | null |
test05.py
|
OskarBreach/advent-of-code-2018
|
10887873d988df699e7ebe83d39511b3303ccf38
|
[
"MIT"
] | null | null | null |
test05.py
|
OskarBreach/advent-of-code-2018
|
10887873d988df699e7ebe83d39511b3303ccf38
|
[
"MIT"
] | null | null | null |
import unittest
from day05 import resulting_polymer, reacted_polymer_after_unit_removed, shortest_reacted_polymer_after_unit_removed,\
shortest_reacted_polymer_after_any_unit_removed
class ResultingPolymerTests(unittest.TestCase):
def test_firstTestCase(self):
self.assertEqual(resulting_polymer("aA"), 0)
def test_secondTestCase(self):
self.assertEqual(resulting_polymer("abBA"), 0)
def test_thirdTestCase(self):
self.assertEqual(resulting_polymer("abAB"), 4)
def test_forthTestCase(self):
self.assertEqual(resulting_polymer("aabAAB"), 6)
def test_fifthTestCase(self):
self.assertEqual(resulting_polymer("dabAcCaCBAcCcaDA"), 10)
class RemoveUnitsTests(unittest.TestCase):
def test_firstTestCase(self):
self.assertEqual(reacted_polymer_after_unit_removed("dabAcCaCBAcCcaDA", "a"), 6)
def test_secondTestCase(self):
self.assertEqual(reacted_polymer_after_unit_removed("dabAcCaCBAcCcaDA", "b"), 8)
def test_thirdTestCase(self):
self.assertEqual(reacted_polymer_after_unit_removed("dabAcCaCBAcCcaDA", "C"), 4)
def test_forthTestCase(self):
self.assertEqual(reacted_polymer_after_unit_removed("dabAcCaCBAcCcaDA", "D"), 6)
class FindShortestRemovedUnitsLength(unittest.TestCase):
def test_firstTestCase(self):
self.assertEqual(shortest_reacted_polymer_after_unit_removed("dabAcCaCBAcCcaDA", "a"), 6)
def test_secondTestCase(self):
self.assertEqual(shortest_reacted_polymer_after_unit_removed("dabAcCaCBAcCcaDA", "B"), 8)
def test_thirdTestCase(self):
self.assertEqual(shortest_reacted_polymer_after_unit_removed("dabAcCaCBAcCcaDA", "aB"), 6)
def test_forthTestCase(self):
self.assertEqual(shortest_reacted_polymer_after_unit_removed("dabAcCaCBAcCcaDA", "aBc"), 4)
def test_fifthTestCase(self):
self.assertEqual(shortest_reacted_polymer_after_unit_removed("dabAcCaCBAcCcaDA", "aBcD"), 4)
def test_sixthTestCase(self):
self.assertEqual(shortest_reacted_polymer_after_any_unit_removed("dabAcCaCBAcCcaDA"), 4)
if __name__ == "__main__":
unittest.main()
| 36.474576
| 118
| 0.761617
| 238
| 2,152
| 6.504202
| 0.197479
| 0.067829
| 0.184109
| 0.163437
| 0.846253
| 0.794574
| 0.692506
| 0.605297
| 0.523256
| 0.377907
| 0
| 0.009704
| 0.138011
| 2,152
| 59
| 119
| 36.474576
| 0.824798
| 0
| 0
| 0.368421
| 0
| 0
| 0.099861
| 0
| 0
| 0
| 0
| 0
| 0.394737
| 1
| 0.394737
| false
| 0
| 0.052632
| 0
| 0.526316
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
fb784bb9c52bbd4f10e8a63a3cd9d27ccd23c083
| 11,955
|
py
|
Python
|
tests/io/test_agent_exclusions.py
|
oklymenok/pyTenable
|
73475e37034608afa5e9c7b20c9cec33a2818622
|
[
"MIT"
] | 1
|
2020-05-22T12:08:52.000Z
|
2020-05-22T12:08:52.000Z
|
tests/io/test_agent_exclusions.py
|
oklymenok/pyTenable
|
73475e37034608afa5e9c7b20c9cec33a2818622
|
[
"MIT"
] | null | null | null |
tests/io/test_agent_exclusions.py
|
oklymenok/pyTenable
|
73475e37034608afa5e9c7b20c9cec33a2818622
|
[
"MIT"
] | null | null | null |
from datetime import datetime as dtime, timedelta
from .fixtures import *
from tenable.errors import *
import uuid
@pytest.fixture
def agentexclusion(request, api):
excl = api.agent_exclusions.create(str(uuid.uuid4()),
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1))
def teardown():
try:
api.agent_exclusions.delete(excl['id'])
except NotFoundError:
pass
request.addfinalizer(teardown)
return excl
def test_create_no_times(api):
with pytest.raises(AttributeError):
api.agent_exclusions.create(str(uuid.uuid4()))
def test_create_scanner_id_typeerror(api):
with pytest.raises(TypeError):
api.agent_exclusions.create(str(uuid.uuid4()),
scanner_id='nope',
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1))
def test_create_name_typeerror(api):
with pytest.raises(TypeError):
api.agent_exclusions.create(1.02,
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1))
def test_create_starttime_typerror(api):
with pytest.raises(TypeError):
api.agent_exclusions.create(str(uuid.uuid4()),
start_time='fail',
end_time=dtime.utcnow() + timedelta(hours=1))
def test_create_endtime_typerror(api):
with pytest.raises(TypeError):
api.agent_exclusions.create(str(uuid.uuid4()),
start_time=dtime.utcnow(),
end_time='nope')
def test_create_timezone_typerror(api):
with pytest.raises(TypeError):
api.agent_exclusions.create(str(uuid.uuid4()),
timezone=1,
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1))
def test_create_timezone_unexpectedvalue(api):
with pytest.raises(UnexpectedValueError):
api.agent_exclusions.create(str(uuid.uuid4()),
timezone='not a real timezone',
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1))
def test_create_description_typeerror(api):
with pytest.raises(TypeError):
api.agent_exclusions.create(str(uuid.uuid4()),
description=True,
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1))
def test_create_frequency_typeerror(api):
with pytest.raises(TypeError):
api.agent_exclusions.create(str(uuid.uuid4()),
frequency=True,
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1))
def test_create_frequency_unexpectedvalue(api):
with pytest.raises(UnexpectedValueError):
api.agent_exclusions.create(str(uuid.uuid4()),
frequency='nope',
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1))
def test_create_interval_typeerror(api):
with pytest.raises(TypeError):
api.agent_exclusions.create(str(uuid.uuid4()),
interval='nope',
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1))
def test_create_weekdays_typerror(api):
with pytest.raises(TypeError):
api.agent_exclusions.create(str(uuid.uuid4()),
weekdays='nope',
frequency='weekly',
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1))
def test_create_weekdays_unexpectedvalue(api):
with pytest.raises(UnexpectedValueError):
api.agent_exclusions.create(str(uuid.uuid4()),
weekdays=['MO', 'TU', 'nope'],
frequency='weekly',
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1))
def test_create_dayofmonth_typeerror(api):
with pytest.raises(TypeError):
api.agent_exclusions.create(str(uuid.uuid4()),
day_of_month='nope',
frequency='monthly',
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1))
def test_create_dayofmonth_unexpectedvalue(api):
with pytest.raises(UnexpectedValueError):
api.agent_exclusions.create(str(uuid.uuid4()),
day_of_month=0,
frequency='monthly',
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1))
def test_create_onetime_exclusion(api):
resp = api.agent_exclusions.create(str(uuid.uuid4()),
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1))
assert isinstance(resp, dict)
check(resp, 'id', int)
check(resp, 'name', str)
check(resp, 'description', str, allow_none=True)
check(resp, 'last_modification_date', int)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'timezone', str)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
api.agent_exclusions.delete(resp['id'])
def test_create_daily_exclusion(api):
resp = api.agent_exclusions.create(str(uuid.uuid4()),
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1),
frequency='daily')
assert isinstance(resp, dict)
check(resp, 'id', int)
check(resp, 'name', str)
check(resp, 'description', str, allow_none=True)
check(resp, 'last_modification_date', int)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'timezone', str)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
api.agent_exclusions.delete(resp['id'])
def test_create_weekly_exclusion(api):
resp = api.agent_exclusions.create(str(uuid.uuid4()),
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1),
frequency='weekly',
weekdays=['mo', 'we', 'fr'])
assert isinstance(resp, dict)
check(resp, 'id', int)
check(resp, 'name', str)
check(resp, 'description', str, allow_none=True)
check(resp, 'last_modification_date', int)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'timezone', str)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule']['rrules'], 'byweekday', str)
api.agent_exclusions.delete(resp['id'])
def test_create_monthly_exclusion(api):
resp = api.agent_exclusions.create(str(uuid.uuid4()),
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1),
frequency='monthly',
day_of_month=15)
assert isinstance(resp, dict)
check(resp, 'id', int)
check(resp, 'name', str)
check(resp, 'description', str, allow_none=True)
check(resp, 'last_modification_date', int)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'timezone', str)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule']['rrules'], 'bymonthday', int)
api.agent_exclusions.delete(resp['id'])
def test_create_yearly_exclusion(api):
resp = api.agent_exclusions.create(str(uuid.uuid4()),
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1),
frequency='yearly')
assert isinstance(resp, dict)
check(resp, 'id', int)
check(resp, 'name', str)
check(resp, 'description', str, allow_none=True)
check(resp, 'last_modification_date', int)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'timezone', str)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
api.agent_exclusions.delete(resp['id'])
def test_create_standard_users_cant_create(stdapi):
with pytest.raises(PermissionError):
stdapi.agent_exclusions.create(str(uuid.uuid4()),
start_time=dtime.utcnow(),
end_time=dtime.utcnow() + timedelta(hours=1))
def test_delete_notfounderror(api):
with pytest.raises(NotFoundError):
api.agent_exclusions.delete(123)
def test_delete_exclusion(api, agentexclusion):
api.agent_exclusions.delete(agentexclusion['id'])
def test_delete_standard_user_fail(stdapi, agentexclusion):
with pytest.raises(PermissionError):
stdapi.agent_exclusions.delete(agentexclusion['id'])
def test_edit_no_exclusion_id_typeerror(api):
with pytest.raises(TypeError):
api.agent_exclusions.edit()
def test_edit_exclusion_id_typeerror(api):
with pytest.raises(TypeError):
api.agent_exclusions.edit('nope')
def test_edit_scanner_id_typeerror(api, agentexclusion):
with pytest.raises(TypeError):
api.agent_exclusions.edit(agentexclusion['id'], scanner_id='nope')
def test_edit_name_typeerror(api, agentexclusion):
with pytest.raises(TypeError):
api.agent_exclusions.edit(agentexclusion['id'], name=1.02)
def test_edit_starttime_typerror(api, agentexclusion):
with pytest.raises(TypeError):
api.agent_exclusions.edit(agentexclusion['id'], start_time='nope')
def test_edit_timezone_typerror(api, agentexclusion):
with pytest.raises(TypeError):
api.agent_exclusions.edit(agentexclusion['id'], timezone=1)
def test_edit_timezone_unexpectedvalue(api, agentexclusion):
with pytest.raises(UnexpectedValueError):
api.agent_exclusions.edit(agentexclusion['id'], timezone='nope')
def test_edit_description_typerror(api, agentexclusion):
with pytest.raises(TypeError):
api.agent_exclusions.edit(agentexclusion['id'], description=1)
def test_edit_frequency_typerror(api, agentexclusion):
with pytest.raises(TypeError):
api.agent_exclusions.edit(agentexclusion['id'], frequency=1)
def test_edit_frequency_unexpectedvalue(api, agentexclusion):
with pytest.raises(UnexpectedValueError):
api.agent_exclusions.edit(agentexclusion['id'], frequency='nope')
def test_edit_interval_typerror(api, agentexclusion):
with pytest.raises(TypeError):
api.agent_exclusions.edit(agentexclusion['id'], interval='nope')
def test_edit_weekdays_typerror(api, agentexclusion):
with pytest.raises(TypeError):
api.agent_exclusions.edit(agentexclusion['id'], weekdays='nope')
def test_edit_weekdays_unexpectedvalue(api, agentexclusion):
with pytest.raises(UnexpectedValueError):
api.agent_exclusions.edit(agentexclusion['id'], weekdays=['MO', 'WE', 'nope'])
def test_edit_dayofmonth_typerror(api, agentexclusion):
with pytest.raises(TypeError):
api.agent_exclusions.edit(agentexclusion['id'], day_of_month='nope')
def test_edit_dayofmonth_unexpectedvalue(api, agentexclusion):
with pytest.raises(UnexpectedValueError):
api.agent_exclusions.edit(agentexclusion['id'], day_of_month=0)
def test_edit_standard_user_permission_error(stdapi, agentexclusion):
with pytest.raises(PermissionError):
stdapi.agent_exclusions.edit(agentexclusion['id'], name=str(uuid.uuid4()))
def test_edit_success(api, agentexclusion):
api.agent_exclusions.edit(agentexclusion['id'], name=str(uuid.uuid4()))
def test_list_blackouts(api, agentexclusion):
items = api.agent_exclusions.list()
assert isinstance(items, list)
assert agentexclusion in items
| 38.941368
| 86
| 0.681221
| 1,411
| 11,955
| 5.593905
| 0.079376
| 0.064994
| 0.104903
| 0.063854
| 0.864944
| 0.841885
| 0.841885
| 0.816166
| 0.809832
| 0.785506
| 0
| 0.006088
| 0.175659
| 11,955
| 307
| 87
| 38.941368
| 0.794825
| 0
| 0
| 0.609848
| 0
| 0
| 0.089829
| 0.0092
| 0
| 0
| 0
| 0
| 0.026515
| 1
| 0.166667
| false
| 0.003788
| 0.015152
| 0
| 0.185606
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fb92113cf97b19cbfdf57bde5151c4d65c8cf615
| 66
|
py
|
Python
|
python/testData/testRunner/env/pytest/folder_no_init_py/test_test.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/testRunner/env/pytest/folder_no_init_py/test_test.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/testRunner/env/pytest/folder_no_init_py/test_test.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def test_test():
assert False
def test_2_test():
pass
| 7.333333
| 18
| 0.621212
| 10
| 66
| 3.8
| 0.6
| 0.368421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 0.287879
| 66
| 8
| 19
| 8.25
| 0.787234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.5
| true
| 0.25
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
fbbad49a5ba884a8303b023dfd8e4b9ed065bb4d
| 9,395
|
py
|
Python
|
tests/test_sirix_async.py
|
sirixdb/sirix-python-client
|
1e57aa7961ec7916e2e6fe227f8f9111b4d0633b
|
[
"Apache-2.0"
] | 7
|
2019-12-11T19:16:17.000Z
|
2021-04-09T14:39:05.000Z
|
tests/test_sirix_async.py
|
sirixdb/sirix-python-client
|
1e57aa7961ec7916e2e6fe227f8f9111b4d0633b
|
[
"Apache-2.0"
] | 1
|
2021-03-25T15:11:05.000Z
|
2021-03-25T15:11:05.000Z
|
tests/test_sirix_async.py
|
sirixdb/sirix-python-client
|
1e57aa7961ec7916e2e6fe227f8f9111b4d0633b
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import httpx
import pysirix
from pysirix import DBType
from pysirix.errors import SirixServerError
from .data import data_for_query, post_query, resource_query
base_url = "http://localhost:9443"
pytestmark = pytest.mark.asyncio
async def test_sirix_async_init():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
data = sirix._auth._token_data
assert type(data["access_token"]) == str
assert type(data["refresh_token"]) == str
assert type(data["expires_in"]) == int
await client.aclose()
async def test_auth_refresh():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
data = sirix._auth._token_data
await sirix._auth._async_refresh()
new_data = sirix._auth._token_data
assert new_data != data
await client.aclose()
async def test_get_info():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
await sirix.delete_all()
data = await sirix.get_info()
assert data == []
await sirix.delete_all()
await client.aclose()
async def test_database_create():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("First", DBType.JSON)
await db.create()
info = await db.get_database_info()
assert info["resources"] == []
await sirix.delete_all()
await client.aclose()
async def test_database_delete():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("First", DBType.JSON)
await db.create()
await db.delete()
with pytest.raises(SirixServerError):
await db.get_database_info()
await sirix.delete_all()
await client.aclose()
async def test_resource_exists():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("First", DBType.JSON)
resource = db.resource("test_resource6")
assert await resource.exists() is False
await sirix.delete_all()
await client.aclose()
async def test_create_resource():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("First", DBType.JSON)
resource = db.resource("test_resource8")
assert await resource.create([]) == "[]"
assert await resource.exists() is True
await sirix.delete_all()
await client.aclose()
async def test_delete_resource():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("First", DBType.JSON)
resource = db.resource("test_resource10")
await resource.create([])
await resource.delete(None, None)
assert await resource.exists() is False
await sirix.delete_all()
await client.aclose()
async def test_delete_nonexistent_resource():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("blah", DBType.JSON)
resource = db.resource("blah")
assert await resource.exists() is False
with pytest.raises(SirixServerError):
await resource.delete(None, None)
await client.aclose()
async def test_read_resource():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("First", DBType.JSON)
resource = db.resource("test_resource13")
await resource.create([])
data = await resource.read(None)
assert data == []
await sirix.delete_all()
await client.aclose()
async def test_get_etag():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("First", DBType.JSON)
resource = db.resource("test_resource15")
await resource.create([])
etag = await resource.get_etag(1)
assert type(etag) == str
await sirix.delete_all()
await client.aclose()
async def test_get_etag_nonexistent():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("First", DBType.JSON)
resource = db.resource("test_resource17")
await resource.create([])
with pytest.raises(SirixServerError):
await resource.get_etag(2)
await sirix.delete_all()
await client.aclose()
async def test_delete_by_node_id():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("First", DBType.JSON)
resource = db.resource("test_resource19")
await resource.create({})
await resource.delete(1, None)
with pytest.raises(SirixServerError):
await resource.delete(1, None)
await sirix.delete_all()
await client.aclose()
async def test_delete_by_etag():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("First", DBType.JSON)
resource = db.resource("test_resource21")
await resource.create({})
etag = await resource.get_etag(1)
await resource.delete(1, etag)
with pytest.raises(SirixServerError):
await resource.delete(1, None)
await sirix.delete_all()
await client.aclose()
async def test_update_by_etag():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("First", DBType.JSON)
resource = db.resource("test_resource23")
await resource.create([])
etag = await resource.get_etag(1)
await resource.update(1, {}, etag=etag)
assert await resource.read(None) == [{}]
await sirix.delete_all()
await client.aclose()
async def test_update_by_node_id():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("First", DBType.JSON)
resource = db.resource("test_resource25")
await resource.create([])
await resource.update(1, {})
assert await resource.read(None) == [{}]
await sirix.delete_all()
await client.aclose()
async def test_update_nonexistent_node():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("First", DBType.JSON)
resource = db.resource("test_resource27")
await resource.create([])
with pytest.raises(SirixServerError):
await resource.update(5, {})
await sirix.delete_all()
await client.aclose()
async def test_read_metadata():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("First", DBType.JSON)
resource = db.resource("test_resource29")
await resource.create([])
resp = await resource.read_with_metadata(1, 1)
assert resp == {
"metadata": {
"nodeKey": 1,
"hash": "29359c75ea7bce76d9e352a23abf7c69",
"type": "ARRAY",
"descendantCount": 0,
"childCount": 0,
},
"value": [],
}
await sirix.delete_all()
await client.aclose()
async def test_history():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("First", DBType.JSON)
resource = db.resource("test_resource31")
await resource.create([])
await resource.update(1, {})
await resource.delete(2, None)
assert len(await resource.history()) == 3
await sirix.delete_all()
await client.aclose()
async def test_diff():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("First", DBType.JSON)
resource = db.resource("test_resource33")
await resource.create([])
await resource.update(1, {})
assert await resource.diff(1, 2) == [
{
"insert": {
"data": "{}",
"depth": 2,
"deweyID": "1.17.17",
"insertPosition": "asFirstChild",
"insertPositionNodeKey": 1,
"nodeKey": 2,
"type": "jsonFragment",
}
}
]
await sirix.delete_all()
await client.aclose()
async def test_sirix_query():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("Query", DBType.JSON)
resource = db.resource("query_resource1")
await resource.create(data_for_query)
assert await sirix.query(post_query) == '{"rest":[6]}'
await sirix.delete_all()
await client.aclose()
async def test_resource_query():
client = httpx.AsyncClient(base_url=base_url)
sirix = await pysirix.sirix_async("admin", "admin", client)
db = sirix.database("Query", DBType.JSON)
resource = db.resource("query_resource2")
await resource.create(data_for_query)
assert await resource.query(resource_query) == {"rest": [6]}
await sirix.delete_all()
await client.aclose()
| 32.396552
| 64
| 0.675891
| 1,166
| 9,395
| 5.280446
| 0.09434
| 0.051161
| 0.042878
| 0.092902
| 0.818256
| 0.786422
| 0.758649
| 0.732824
| 0.719181
| 0.691246
| 0
| 0.010976
| 0.195104
| 9,395
| 289
| 65
| 32.508651
| 0.803227
| 0
| 0
| 0.605809
| 0
| 0
| 0.088345
| 0.005641
| 0
| 0
| 0
| 0
| 0.082988
| 1
| 0
| false
| 0
| 0.024896
| 0
| 0.024896
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fbd865570b892e0869af54f729c092c6a63174b9
| 57
|
py
|
Python
|
tests/frameworks/lightgbm/test_lightgbm.py
|
schroedk/sensAI
|
a2d6d7c6ab7bed9ccd5eac216dd988c49d69aec7
|
[
"MIT"
] | 10
|
2020-02-19T09:16:54.000Z
|
2022-02-04T16:19:33.000Z
|
tests/frameworks/lightgbm/test_lightgbm.py
|
schroedk/sensAI
|
a2d6d7c6ab7bed9ccd5eac216dd988c49d69aec7
|
[
"MIT"
] | 47
|
2020-03-11T16:26:51.000Z
|
2022-02-04T15:29:40.000Z
|
tests/frameworks/lightgbm/test_lightgbm.py
|
schroedk/sensAI
|
a2d6d7c6ab7bed9ccd5eac216dd988c49d69aec7
|
[
"MIT"
] | 5
|
2020-03-12T21:33:22.000Z
|
2020-12-21T14:43:04.000Z
|
def test_lightgbm():
import lightgbm
assert True
| 14.25
| 20
| 0.701754
| 7
| 57
| 5.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.245614
| 57
| 3
| 21
| 19
| 0.906977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
838971380c6ca7300a0196431537795be3047818
| 25
|
py
|
Python
|
spikeforest/spikesorters/yass1/__init__.py
|
mhhennig/spikeforest
|
5b4507ead724af3de0be5d48a3b23aaedb0be170
|
[
"Apache-2.0"
] | 1
|
2021-09-23T01:07:19.000Z
|
2021-09-23T01:07:19.000Z
|
spikeforest/spikesorters/yass1/__init__.py
|
mhhennig/spikeforest
|
5b4507ead724af3de0be5d48a3b23aaedb0be170
|
[
"Apache-2.0"
] | null | null | null |
spikeforest/spikesorters/yass1/__init__.py
|
mhhennig/spikeforest
|
5b4507ead724af3de0be5d48a3b23aaedb0be170
|
[
"Apache-2.0"
] | 1
|
2021-09-23T01:07:21.000Z
|
2021-09-23T01:07:21.000Z
|
from .yass1 import YASS1
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 0.16
| 25
| 1
| 25
| 25
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
83dca0bc1a3af7cc4a0ee2a5df938dce2d2fcc57
| 157
|
py
|
Python
|
material/curso_em_video/ex030.py
|
sergiodealencar/courses
|
c9d86b27b0185cc82624b01ed76653dbc12554a3
|
[
"MIT"
] | null | null | null |
material/curso_em_video/ex030.py
|
sergiodealencar/courses
|
c9d86b27b0185cc82624b01ed76653dbc12554a3
|
[
"MIT"
] | null | null | null |
material/curso_em_video/ex030.py
|
sergiodealencar/courses
|
c9d86b27b0185cc82624b01ed76653dbc12554a3
|
[
"MIT"
] | null | null | null |
num = int(input('Diga um número inteiro: '))
if (num % 2) == 0:
print('Esse número é par!')
else:
print('Esse número é ímpar!')
print('--- FIM ---')
| 22.428571
| 44
| 0.566879
| 24
| 157
| 3.708333
| 0.708333
| 0.202247
| 0.337079
| 0.359551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016129
| 0.210191
| 157
| 6
| 45
| 26.166667
| 0.701613
| 0
| 0
| 0
| 0
| 0
| 0.464968
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.