hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8098676a40412eb52c1fff71d39e75e131acacd9
| 117
|
py
|
Python
|
src/whylogs/src/whylabs/logs/core/statistics/datatypes/__init__.py
|
bernease/cli-demo-1
|
895d9eddc95ca3dd43b7ae8b33a8fbdedbc855f5
|
[
"Apache-2.0"
] | null | null | null |
src/whylogs/src/whylabs/logs/core/statistics/datatypes/__init__.py
|
bernease/cli-demo-1
|
895d9eddc95ca3dd43b7ae8b33a8fbdedbc855f5
|
[
"Apache-2.0"
] | null | null | null |
src/whylogs/src/whylabs/logs/core/statistics/datatypes/__init__.py
|
bernease/cli-demo-1
|
895d9eddc95ca3dd43b7ae8b33a8fbdedbc855f5
|
[
"Apache-2.0"
] | null | null | null |
from .variancetracker import *
from .integertracker import *
from .floattracker import *
from .stringtracker import *
| 29.25
| 30
| 0.803419
| 12
| 117
| 7.833333
| 0.5
| 0.319149
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 117
| 4
| 31
| 29.25
| 0.921569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
80f5e63a6f85eac6a66c77f8d70bb01c9fb8423d
| 3,934
|
py
|
Python
|
models/autoencoder.py
|
garedaba/BERMUDA
|
ad989cbdae03efafe81d1d0aa36673dd6ffa2e14
|
[
"MIT"
] | null | null | null |
models/autoencoder.py
|
garedaba/BERMUDA
|
ad989cbdae03efafe81d1d0aa36673dd6ffa2e14
|
[
"MIT"
] | null | null | null |
models/autoencoder.py
|
garedaba/BERMUDA
|
ad989cbdae03efafe81d1d0aa36673dd6ffa2e14
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
import torch.nn as nn
def init_weights(m):
""" initialize weights of fully connected layer
"""
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
# autoencoder with hidden units 20, latent, 20
# Encoder
class Encoder_20(nn.Module):
def __init__(self, num_inputs, code_dim):
super(Encoder_20, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(num_inputs, 20),
nn.ReLU(),
nn.Linear(20, code_dim))
self.encoder.apply(init_weights)
def forward(self, x):
x = self.encoder(x)
return x
# Decoder
class Decoder_20(nn.Module):
def __init__(self, num_inputs, code_dim):
super(Decoder_20, self).__init__()
self.decoder = nn.Sequential(
nn.Linear(code_dim, 20),
nn.ReLU(),
nn.Linear(20, num_inputs),
nn.Sigmoid())
self.decoder.apply(init_weights)
def forward(self, x):
x = self.decoder(x)
return x
# Autoencoder
class autoencoder_20(nn.Module):
def __init__(self, num_inputs, code_dim):
super(autoencoder_20, self).__init__()
self.encoder = Encoder_20(num_inputs, code_dim)
self.decoder = Decoder_20(num_inputs, code_dim)
def forward(self, x):
code = self.encoder(x)
x = self.decoder(code)
return code, x
# autoencoder with hidden units 100, latent, 100
# Encoder
class Encoder_100(nn.Module):
def __init__(self, num_inputs, code_dim):
super(Encoder_100, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(num_inputs, 100),
nn.ReLU(),
nn.Linear(100, code_dim))
self.encoder.apply(init_weights)
def forward(self, x):
x = self.encoder(x)
return x
# Decoder
class Decoder_100(nn.Module):
def __init__(self, num_inputs, code_dim):
super(Decoder_100, self).__init__()
self.decoder = nn.Sequential(
nn.Linear(code_dim, 100),
nn.ReLU(),
nn.Linear(100, num_inputs),
nn.Sigmoid())
self.decoder.apply(init_weights)
def forward(self, x):
x = self.decoder(x)
return x
# Autoencoder
class autoencoder_100(nn.Module):
def __init__(self, num_inputs, code_dim):
super(autoencoder_100, self).__init__()
self.encoder = Encoder_100(num_inputs, code_dim)
self.decoder = Decoder_100(num_inputs, code_dim)
def forward(self, x):
code = self.encoder(x)
x = self.decoder(code)
return code, x
# autoencoder with hidden units 8, 4, latent, 4, 8
class Encoder_3(nn.Module):
def __init__(self, num_inputs, code_dim):
super(Encoder_3, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(num_inputs, 20),
nn.ReLU(),
nn.Linear(20, 10),
nn.ReLU(),
nn.Linear(10, code_dim))
self.encoder.apply(init_weights)
def forward(self, x):
x = self.encoder(x)
return x
# Decoder
class Decoder_3(nn.Module):
def __init__(self, num_inputs, code_dim):
super(Decoder_3, self).__init__()
self.decoder = nn.Sequential(
nn.Linear(code_dim, 10),
nn.ReLU(),
nn.Linear(10, 20),
nn.ReLU(),
nn.Linear(20, num_inputs),
nn.Sigmoid())
self.decoder.apply(init_weights)
def forward(self, x):
x = self.decoder(x)
return x
# Autoencoder
class autoencoder_3(nn.Module):
def __init__(self, num_inputs, code_dim):
super(autoencoder_3, self).__init__()
self.encoder = Encoder_3(num_inputs, code_dim)
self.decoder = Decoder_3(num_inputs, code_dim)
def forward(self, x):
code = self.encoder(x)
x = self.decoder(code)
return code, x
| 29.578947
| 56
| 0.599898
| 520
| 3,934
| 4.253846
| 0.111538
| 0.085443
| 0.088156
| 0.108499
| 0.879747
| 0.833635
| 0.799277
| 0.753165
| 0.753165
| 0.753165
| 0
| 0.036068
| 0.281139
| 3,934
| 132
| 57
| 29.80303
| 0.74611
| 0.076258
| 0
| 0.628571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.180952
| false
| 0
| 0.009524
| 0
| 0.361905
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
03949e01b1fbab514806269a877f9e68b99adc80
| 17
|
py
|
Python
|
zfit_physics/unstable/__init__.py
|
chm-ipmu/zfit-physics
|
9e6b1f857fe993fadb888612822c394db4d0f5c5
|
[
"BSD-3-Clause"
] | null | null | null |
zfit_physics/unstable/__init__.py
|
chm-ipmu/zfit-physics
|
9e6b1f857fe993fadb888612822c394db4d0f5c5
|
[
"BSD-3-Clause"
] | null | null | null |
zfit_physics/unstable/__init__.py
|
chm-ipmu/zfit-physics
|
9e6b1f857fe993fadb888612822c394db4d0f5c5
|
[
"BSD-3-Clause"
] | null | null | null |
from . import pdf
| 17
| 17
| 0.764706
| 3
| 17
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 17
| 1
| 17
| 17
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
03d374576d2ec245ce83ddf40486d33b88e37cfb
| 41
|
py
|
Python
|
src/sas/sascalc/pr/fit/__init__.py
|
opendatafit/sasview
|
c470220eecfc9f6d8a0e27e2ea8919dcb1b38e39
|
[
"BSD-3-Clause"
] | null | null | null |
src/sas/sascalc/pr/fit/__init__.py
|
opendatafit/sasview
|
c470220eecfc9f6d8a0e27e2ea8919dcb1b38e39
|
[
"BSD-3-Clause"
] | 1
|
2021-09-20T13:20:35.000Z
|
2021-09-20T13:20:35.000Z
|
src/sas/sascalc/pr/fit/__init__.py
|
opendatafit/sasview
|
c470220eecfc9f6d8a0e27e2ea8919dcb1b38e39
|
[
"BSD-3-Clause"
] | null | null | null |
from .AbstractFitEngine import FitHandler
| 41
| 41
| 0.902439
| 4
| 41
| 9.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 41
| 1
| 41
| 41
| 0.973684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
03d3ede9bf333e5ac91b4bfab7687b8821f0f6c6
| 66
|
py
|
Python
|
week10/CinemaReservation/hack_cinema/db_schema/__init__.py
|
HackBulgaria/Programming-101-Python-2020-Spring
|
443446028df7fe78fcdd6c37dada0b5cd8ed3c93
|
[
"MIT"
] | 30
|
2020-01-22T17:22:43.000Z
|
2022-01-26T08:28:57.000Z
|
week10/CinemaReservation/hack_cinema/db_schema/__init__.py
|
HackBulgaria/Programming-101-Python-2020-Spring
|
443446028df7fe78fcdd6c37dada0b5cd8ed3c93
|
[
"MIT"
] | 1
|
2020-01-21T19:50:47.000Z
|
2020-03-18T16:18:31.000Z
|
week10/CinemaReservation/hack_cinema/db_schema/__init__.py
|
HackBulgaria/Programming-101-Python-2020-Spring
|
443446028df7fe78fcdd6c37dada0b5cd8ed3c93
|
[
"MIT"
] | 7
|
2019-11-28T15:59:16.000Z
|
2020-12-05T08:39:02.000Z
|
from .users import CREATE_USERS
from .movies import CREATE_MOVIES
| 22
| 33
| 0.848485
| 10
| 66
| 5.4
| 0.5
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 66
| 2
| 34
| 33
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ff04416537a6ec02a7d39411cd2da79a2c52d956
| 843
|
py
|
Python
|
client_apis/python/test/test_conflict_error.py
|
alikins/galaxy-api-swaggerhub
|
5d6d4070cd6964c6d6217cad6743de89cf4eac24
|
[
"MIT"
] | null | null | null |
client_apis/python/test/test_conflict_error.py
|
alikins/galaxy-api-swaggerhub
|
5d6d4070cd6964c6d6217cad6743de89cf4eac24
|
[
"MIT"
] | 3
|
2020-07-17T10:18:45.000Z
|
2022-01-22T05:24:05.000Z
|
client_apis/python/test/test_conflict_error.py
|
alikins/galaxy-api-swaggerhub
|
5d6d4070cd6964c6d6217cad6743de89cf4eac24
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Galaxy 3.2 API (wip)
Galaxy 3.2 API (wip) # noqa: E501
The version of the OpenAPI document: 1.2.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.conflict_error import ConflictError # noqa: E501
from openapi_client.rest import ApiException
class TestConflictError(unittest.TestCase):
"""ConflictError unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testConflictError(self):
"""Test ConflictError"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.conflict_error.ConflictError() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.075
| 84
| 0.691578
| 101
| 843
| 5.584158
| 0.564356
| 0.092199
| 0.028369
| 0.039007
| 0.163121
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025836
| 0.219454
| 843
| 39
| 85
| 21.615385
| 0.831307
| 0.431791
| 0
| 0.214286
| 1
| 0
| 0.018141
| 0
| 0
| 0
| 0
| 0.025641
| 0
| 1
| 0.214286
| false
| 0.214286
| 0.357143
| 0
| 0.642857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
206b7a3d5225ff49bcbb58405bc70465ecf3e182
| 69
|
py
|
Python
|
sklearnbot/__init__.py
|
openml/sklearn-bot
|
7476dd6e27f087166fc416974fc67a78dd4fa4d2
|
[
"BSD-3-Clause"
] | 1
|
2020-05-06T14:54:32.000Z
|
2020-05-06T14:54:32.000Z
|
sklearnbot/__init__.py
|
openml/sklearn-bot
|
7476dd6e27f087166fc416974fc67a78dd4fa4d2
|
[
"BSD-3-Clause"
] | 2
|
2018-10-07T17:30:03.000Z
|
2018-10-19T00:06:35.000Z
|
sklearnbot/__init__.py
|
openml/sklearn-bot
|
7476dd6e27f087166fc416974fc67a78dd4fa4d2
|
[
"BSD-3-Clause"
] | 1
|
2019-07-03T20:35:22.000Z
|
2019-07-03T20:35:22.000Z
|
from . import bot
from . import config_spaces
from . import sklearn
| 13.8
| 27
| 0.768116
| 10
| 69
| 5.2
| 0.6
| 0.576923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188406
| 69
| 4
| 28
| 17.25
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
209515769e0305875dba77990be0750bb976af34
| 9,391
|
py
|
Python
|
legacy/minimize.py
|
patrikkj/algorithms
|
25799fb57807eca1784202c499fda8a5a94acea3
|
[
"MIT"
] | null | null | null |
legacy/minimize.py
|
patrikkj/algorithms
|
25799fb57807eca1784202c499fda8a5a94acea3
|
[
"MIT"
] | null | null | null |
legacy/minimize.py
|
patrikkj/algorithms
|
25799fb57807eca1784202c499fda8a5a94acea3
|
[
"MIT"
] | null | null | null |
import numpy as np
from .helpers import (step_adam, step_gradient_descent, step_momentum,
step_rmsprop)
def mini_batch_gradient_descent(params, X, y, cost_func, grad_func,
alpha=0.01, epochs=100, k=64, l=0,
step_func=None, **hparams):
"""Minimizes the objective function using mini-batch gradient descent.
Args:
params (ndarray[1, n]): initial parameters
X (ndarray[m, ...]): input features
y (ndarray[m, 1]): output labels
cost_func (... -> float32): mapping of the form (params, X, Y) -> cost
grad_func (... -> ndarray[1, n]): gradients of cost function
alpha (float, optional): learning rate (defaults to 0.01)
epochs (int, optional): number of iterations (defaults to 100)
k (int, optional): mini-batch size (defaults to 64)
l (float, optional): regularization parameter (defaults to 0)
Returns:
params (ndarray[1, n]): updated parameters
costs (ndarray[epochs, 1]): costs for each batch iteration
grads (ndarray[n, epochs]): gradients for each batch iteration
"""
# Initialization
if step_func is None:
step_func = step_gradient_descent
costs, grads = [], []
for _ in range(epochs):
# Shuffle input and labels
m = X.shape[0]
p = np.random.permutation(m)
X, y = X[p], y[p]
# Partition input and labels
X_batches = np.split(X, range(k, m, k))
y_batches = np.split(y, range(k, m, k))
# Perform a single iteration of gradient descent for every mini-batch
for X_batch, y_batch in zip(X_batches, y_batches):
grad = grad_func(params, X_batch, y_batch, l)
cost = cost_func(params, X_batch, y_batch, l)
params = step_func(params, grad, alpha, **hparams)
costs.append(cost)
grads.append(grad)
return params, np.array(costs), np.array(grads)
def batch_gradient_descent(params, X, y, cost_func, grad_func,
alpha=0.01, epochs=100, l=0):
"""Minimizes the objective function using batch gradient descent.
Args:
params (ndarray[1, n]): initial parameters
X (ndarray[m, ...]): input features
y (ndarray[m, 1]): output labels
cost_func (... -> float32): mapping of the form (params, X, Y) -> cost
grad_func (... -> ndarray[1, n]): gradients of cost function
alpha (float, optional): learning rate (defaults to 0.01)
epochs (int, optional): number of iterations (defaults to 100)
l (float, optional): regularization parameter (defaults to 0)
Returns:
params (ndarray[1, n]): updated parameters
costs (ndarray[epochs, 1]): costs for each batch iteration
grads (ndarray[n, epochs]): gradients for each batch iteration
"""
return mini_batch_gradient_descent(params, X, y, cost_func, grad_func, alpha, epochs, X.shape[0], l)
def stochastic_gradient_descent(params, X, y, cost_func, grad_func, alpha=0.01, epochs=100, l=0):
"""Minimizes the objective function using stochastic gradient descent.
Args:
params (ndarray[1, n]): initial parameters
X (ndarray[m, ...]): input features
y (ndarray[m, 1]): output labels
cost_func (... -> float32): mapping of the form (params, X, Y) -> cost
grad_func (... -> ndarray[1, n]): gradients of cost function
alpha (float, optional): learning rate (defaults to 0.01)
epochs (int, optional): number of iterations (defaults to 100)
l (float, optional): regularization parameter (defaults to 0)
Returns:
params (ndarray[1, n]): updated parameters
costs (ndarray[epochs, 1]): costs for each batch iteration
grads (ndarray[n, epochs]): gradients for each batch iteration
"""
return mini_batch_gradient_descent(params, X, y, cost_func, grad_func, alpha, epochs, 1, l)
def momentum_gradient_descent(params, X, y, cost_func, grad_func,
alpha=0.01, epochs=100, k=64, l=0, beta=0.9):
"""Minimizes the objective function using mini-batch gradient descent w/ momentum.
Args:
params (ndarray[1, n]): initial parameters
X (ndarray[m, ...]): input features
y (ndarray[m, 1]): output labels
cost_func (... -> float32): mapping of the form (params, X, Y) -> cost
grad_func (... -> ndarray[1, n]): gradients of cost function
alpha (float, optional): learning rate (defaults to 0.01)
epochs (int, optional): number of iterations (defaults to 100)
k (int, optional): mini-batch size (defaults to 64)
l (float, optional): regularization parameter (defaults to 0)
beta (float, optional): moment decay rate (defaults to 0.9)
Returns:
params (ndarray[1, n]): updated parameters
costs (ndarray[epochs, 1]): costs for each batch iteration
grads (ndarray[n, epochs]): gradients for each batch iteration
"""
args = (params, X, y, cost_func, grad_func)
kwargs = {
'alpha': alpha,
'epochs': epochs,
'k': k,
'l': l,
'step_func': step_momentum
}
hparams = { # Hyperparameters passed to the step function
'v': np.zeros(params.shape),
'beta': beta
}
return mini_batch_gradient_descent(*args, **kwargs, **hparams)
def rmsprop(params, X, y, cost_func, grad_func,
alpha=0.01, epochs=100, k=64, l=0, beta=0.9):
"""Minimizes the objective function using RMSprop.
Args:
params (ndarray[1, n]): initial parameters
X (ndarray[m, ...]): input features
y (ndarray[m, 1]): output labels
cost_func (... -> float32): mapping of the form (params, X, Y) -> cost
grad_func (... -> ndarray[1, n]): gradients of cost function
alpha (float, optional): learning rate (defaults to 0.01)
epochs (int, optional): number of iterations (defaults to 100)
k (int, optional): mini-batch size (defaults to 64)
l (float, optional): regularization parameter (defaults to 0)
beta (float, optional): moment decay rate (defaults to 0.9)
Returns:
params (ndarray[1, n]): updated parameters
costs (ndarray[epochs, 1]): costs for each batch iteration
grads (ndarray[n, epochs]): gradients for each batch iteration
"""
args = (params, X, y, cost_func, grad_func)
kwargs = {
'alpha': alpha,
'epochs': epochs,
'k': k,
'l': l,
'step_func': step_rmsprop
}
hparams = { # Hyperparameters passed to the step function
's': np.zeros(params.shape),
'beta': beta
}
return mini_batch_gradient_descent(*args, **kwargs, **hparams)
def adam(params, X, y, cost_func, grad_func,
alpha=0.01, epochs=100, k=64, l=0,
beta1=0.9, beta2=0.999, epsilon=10e-8):
"""Minimizes the objective function using Adam.
Args:
params (ndarray[1, n]): initial parameters
X (ndarray[m, ...]): input features
y (ndarray[m, 1]): output labels
cost_func (... -> float32): mapping of the form (params, X, Y) -> cost
grad_func (... -> ndarray[1, n]): gradients of cost function
alpha (float, optional): learning rate (defaults to 0.01)
epochs (int, optional): number of iterations (defaults to 100)
k (int, optional): mini-batch size (defaults to 64)
l (float, optional): regularization parameter (defaults to 0)
beta1 (float, optional): 1st order moment decay rate (defaults to 0.9)
beta2 (float, optional): 2nd order moment decay rate (defaults to 0.999)
epsilon (float, optional): numerical stability constant (defaults to 10e-8)
Returns:
params (ndarray[1, n]): updated parameters
costs (ndarray[epochs, 1]): costs for each batch iteration
grads (ndarray[n, epochs]): gradients for each batch iteration
"""
args = (params, X, y, cost_func, grad_func)
kwargs = {
'alpha': alpha,
'epochs': epochs,
'k': k,
'l': l,
'step_func': step_adam
}
hparams = { # Hyperparameters passed to the step function
'v': np.zeros(params.shape),
's': np.zeros(params.shape),
't': np.array([0]),
'beta1': beta1,
'beta2': beta2,
'epsilon': epsilon
}
return mini_batch_gradient_descent(*args, **kwargs, **hparams)
| 44.933014
| 104
| 0.554041
| 1,115
| 9,391
| 4.587444
| 0.10583
| 0.052786
| 0.031672
| 0.039883
| 0.844184
| 0.833627
| 0.833627
| 0.803519
| 0.79433
| 0.782209
| 0
| 0.029185
| 0.33596
| 9,391
| 208
| 105
| 45.149038
| 0.791052
| 0.637845
| 0
| 0.441558
| 0
| 0
| 0.03215
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077922
| false
| 0
| 0.025974
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
20c0a0ab9c26529031cddba7ba1cc8566c11bc95
| 403
|
py
|
Python
|
tests/integration/__init__.py
|
wlan0/docker-py
|
ae11d81b183db2f6641c6e64329820ac10597a13
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/__init__.py
|
wlan0/docker-py
|
ae11d81b183db2f6641c6e64329820ac10597a13
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/__init__.py
|
wlan0/docker-py
|
ae11d81b183db2f6641c6e64329820ac10597a13
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
# FIXME: crutch while we transition to the new folder architecture
# Remove imports when merged in master and Jenkins is updated to find the
# tests in the new location.
from .api_test import *
from .build_test import *
from .container_test import *
from .exec_test import *
from .image_test import *
from .network_test import *
from .regression_test import *
from .volume_test import *
| 28.785714
| 73
| 0.776675
| 62
| 403
| 4.919355
| 0.580645
| 0.262295
| 0.321311
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002976
| 0.166253
| 403
| 13
| 74
| 31
| 0.904762
| 0.436725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
20ce9f12a4dad87f7dc18a01dcf1c6dd8b13d8fa
| 26
|
py
|
Python
|
exercises/transpose/transpose.py
|
RJTK/python
|
f9678d629735f75354bbd543eb7f10220a498dae
|
[
"MIT"
] | 1
|
2021-05-15T19:59:04.000Z
|
2021-05-15T19:59:04.000Z
|
exercises/transpose/transpose.py
|
RJTK/python
|
f9678d629735f75354bbd543eb7f10220a498dae
|
[
"MIT"
] | null | null | null |
exercises/transpose/transpose.py
|
RJTK/python
|
f9678d629735f75354bbd543eb7f10220a498dae
|
[
"MIT"
] | 2
|
2018-03-03T08:32:12.000Z
|
2019-08-22T11:55:53.000Z
|
def transpose():
pass
| 8.666667
| 16
| 0.615385
| 3
| 26
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.269231
| 26
| 2
| 17
| 13
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
20e671a114b0ec867421494a9b1d41a15f80e320
| 3,777
|
py
|
Python
|
FastlyPythonCLI/scripts/WAF/emergency.py
|
hummelm10/FastlyPythonAPI
|
cf2d880649319c56f38a74bc76ea50c642ad2f11
|
[
"MIT"
] | 1
|
2020-05-06T17:07:52.000Z
|
2020-05-06T17:07:52.000Z
|
FastlyPythonCLI/scripts/WAF/emergency.py
|
hummelm10/FastlyPythonAPI
|
cf2d880649319c56f38a74bc76ea50c642ad2f11
|
[
"MIT"
] | null | null | null |
FastlyPythonCLI/scripts/WAF/emergency.py
|
hummelm10/FastlyPythonAPI
|
cf2d880649319c56f38a74bc76ea50c642ad2f11
|
[
"MIT"
] | null | null | null |
import requests
import scripts
import pprint
import pandas
from .listWAFIDs import listWAFIDsNoPrompt
def disableWAF():
print(scripts.bcolors.FAIL + scripts.bcolors.UNDERLINE + "EMERGENCY DISABLE: THIS IS TO BE USED IN AN EMERGENCY ONLY\n(Requires Superuser permissions)" + scripts.bcolors.ENDC + scripts.bcolors.ENDC)
if scripts.checkAPINoPrint():
dfObj = listWAFIDsNoPrompt()
try:
inVar = int(input("\n\nEnter index of WAF to display [Enter to exit]: "))
print(str(dfObj['WAF ID'].iloc[inVar]))
except:
e = input("Not a valid number. Press enter to continue or E to exit...")
if e.strip(' ').lower() == 'e':
scripts.clear()
scripts.WAFMenu()
scripts.clear()
disableWAF()
print(scripts.bcolors.WARNING + scripts.bcolors.UNDERLINE + "EMERGENCY DISABLE: THIS IS TO BE USED IN AN EMERGENCY ONLY" + scripts.bcolors.ENDC + scripts.bcolors.ENDC)
while "Not a valid response.":
reply = str(input("Request: https://api.fastly.com/wafs/" + str(dfObj['WAF ID'].iloc[inVar]) + "/disable\nCorrect service " + str(dfObj['Name'].iloc[inVar]) + " [Y/n]: ")).lower().strip()
if reply[0] == 'y':
break
if reply[0] == 'n':
scripts.clear()
disableWAF()
break
header={"Accept":"application/vnd.api+json"}
header.update({"Content-Type":"application/vnd.api+json"})
header.update({"Fastly-Key":scripts.getKeyFromConfig()})
r=requests.patch("https://api.fastly.com/wafs/" + str(dfObj['WAF ID'].iloc[inVar]) + "/disable",headers=header)
if r.status_code == 202:
print(scripts.bcolors.OKGREEN + "Disabled WAF" + scripts.bcolors.ENDC)
pprint.pprint(r.json()['data'])
input("Press ENTER to return to menu...")
else:
input(scripts.bcolors.WARNING + "Error with request.\nStatus: " + str(r.status_code) + "\nPress ENTER to continue..." + scripts.bcolors.ENDC)
else:
input(scripts.bcolors.WARNING + "Error with API Key, generate a new one. Press ENTER to continue..." + scripts.bcolors.ENDC)
def enableWAF():
print(scripts.bcolors.WARNING + scripts.bcolors.UNDERLINE + "EMERGENCY ENABLE: THIS IS TO BE USED IN AN EMERGENCY ONLY (only works on emergency disabled WAF)\n(Requires Superuser permissions)" + scripts.bcolors.ENDC + scripts.bcolors.ENDC)
if scripts.checkAPINoPrint():
dfObj = listWAFIDsNoPrompt()
try:
inVar = int(input("\n\nEnter index of WAF to display: "))
str(dfObj['WAF ID'].iloc[inVar])
except:
e = input("Not a valid number. Press enter to continue or E to exit...")
if e.strip(' ').lower() == 'e':
scripts.clear()
scripts.WAFMenu()
scripts.clear()
enableWAF()
header={"Accept":"application/vnd.api+json"}
header.update({"Content-Type":"application/vnd.api+json"})
header.update({"Fastly-Key":scripts.getKeyFromConfig()})
r=requests.patch("https://api.fastly.com/wafs/" + str(dfObj['WAF ID'].iloc[inVar]) + "/enable",headers=header)
if r.status_code == 202:
print(scripts.bcolors.OKGREEN + "Enabled WAF" + scripts.bcolors.ENDC)
pprint.pprint(r.json()['data'])
input("Press ENTER to return to menu...")
else:
input(scripts.bcolors.WARNING + "Error with request.\nStatus: " + str(r.status_code) + "\nPress ENTER to continue..." + scripts.bcolors.ENDC)
else:
input(scripts.bcolors.WARNING + "Error with API Key, generate a new one. Press ENTER to continue..." + scripts.bcolors.ENDC)
| 55.544118
| 243
| 0.610537
| 455
| 3,777
| 5.059341
| 0.23956
| 0.14596
| 0.093831
| 0.028236
| 0.83927
| 0.83927
| 0.823632
| 0.823632
| 0.787142
| 0.773675
| 0
| 0.002807
| 0.245433
| 3,777
| 68
| 244
| 55.544118
| 0.804912
| 0
| 0
| 0.651515
| 0
| 0.015152
| 0.312335
| 0.02541
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.075758
| 0
| 0.106061
| 0.136364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4561c551d821590b6ee24ff780ae4785a6cc7028
| 38,036
|
py
|
Python
|
instances/passenger_demand/pas-20210421-2109-int18e/47.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210421-2109-int18e/47.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210421-2109-int18e/47.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
"""
PASSENGERS
"""
numPassengers = 4249
passenger_arriving = (
(2, 15, 5, 3, 2, 0, 9, 9, 8, 5, 1, 0), # 0
(4, 9, 10, 3, 3, 0, 12, 10, 7, 10, 3, 0), # 1
(2, 6, 7, 2, 2, 0, 3, 17, 5, 7, 3, 0), # 2
(6, 13, 7, 7, 4, 0, 7, 14, 5, 7, 2, 0), # 3
(5, 12, 9, 3, 3, 0, 12, 10, 9, 9, 3, 0), # 4
(4, 13, 14, 5, 0, 0, 4, 7, 7, 4, 0, 0), # 5
(9, 12, 8, 4, 2, 0, 10, 8, 8, 5, 6, 0), # 6
(5, 8, 6, 5, 2, 0, 5, 11, 10, 5, 2, 0), # 7
(7, 10, 7, 11, 2, 0, 7, 4, 9, 11, 6, 0), # 8
(9, 14, 2, 3, 2, 0, 7, 16, 7, 8, 3, 0), # 9
(6, 7, 16, 5, 2, 0, 3, 8, 9, 10, 2, 0), # 10
(7, 14, 7, 2, 5, 0, 16, 10, 6, 3, 5, 0), # 11
(7, 9, 9, 9, 5, 0, 6, 14, 5, 2, 1, 0), # 12
(10, 13, 7, 2, 4, 0, 8, 9, 5, 8, 3, 0), # 13
(5, 16, 11, 5, 1, 0, 10, 11, 11, 9, 1, 0), # 14
(3, 10, 7, 6, 2, 0, 7, 7, 7, 12, 3, 0), # 15
(4, 14, 9, 6, 5, 0, 15, 16, 8, 5, 3, 0), # 16
(3, 13, 8, 7, 4, 0, 9, 7, 9, 6, 5, 0), # 17
(5, 8, 11, 3, 8, 0, 6, 15, 8, 10, 1, 0), # 18
(8, 14, 8, 3, 3, 0, 6, 19, 7, 5, 2, 0), # 19
(7, 14, 12, 9, 2, 0, 7, 10, 8, 6, 3, 0), # 20
(4, 13, 13, 2, 3, 0, 5, 10, 13, 4, 3, 0), # 21
(10, 11, 10, 5, 4, 0, 8, 8, 6, 8, 2, 0), # 22
(9, 11, 8, 5, 4, 0, 11, 17, 5, 7, 3, 0), # 23
(7, 21, 8, 5, 5, 0, 3, 15, 11, 9, 3, 0), # 24
(5, 11, 12, 5, 1, 0, 11, 15, 6, 4, 3, 0), # 25
(4, 8, 12, 4, 1, 0, 11, 18, 10, 5, 2, 0), # 26
(6, 11, 9, 10, 4, 0, 6, 21, 6, 5, 2, 0), # 27
(7, 15, 10, 1, 1, 0, 7, 11, 10, 5, 4, 0), # 28
(5, 13, 9, 3, 4, 0, 6, 11, 10, 7, 3, 0), # 29
(5, 18, 10, 5, 1, 0, 8, 8, 6, 5, 3, 0), # 30
(10, 10, 4, 5, 0, 0, 5, 12, 9, 5, 4, 0), # 31
(4, 13, 16, 1, 2, 0, 19, 11, 7, 5, 5, 0), # 32
(5, 13, 11, 5, 1, 0, 11, 7, 10, 4, 3, 0), # 33
(2, 17, 6, 3, 2, 0, 9, 4, 11, 4, 3, 0), # 34
(11, 9, 12, 5, 6, 0, 3, 7, 9, 4, 0, 0), # 35
(6, 10, 10, 4, 2, 0, 8, 16, 9, 5, 6, 0), # 36
(9, 9, 11, 4, 5, 0, 13, 13, 0, 4, 1, 0), # 37
(3, 9, 9, 3, 5, 0, 10, 10, 4, 4, 2, 0), # 38
(1, 16, 17, 8, 2, 0, 5, 12, 5, 5, 1, 0), # 39
(6, 10, 8, 4, 2, 0, 3, 15, 3, 5, 2, 0), # 40
(7, 7, 12, 3, 4, 0, 10, 13, 9, 9, 4, 0), # 41
(6, 10, 7, 6, 3, 0, 8, 9, 11, 7, 3, 0), # 42
(5, 21, 9, 5, 8, 0, 4, 10, 7, 5, 1, 0), # 43
(6, 11, 10, 6, 2, 0, 10, 8, 5, 3, 5, 0), # 44
(8, 10, 4, 3, 2, 0, 7, 10, 8, 5, 4, 0), # 45
(5, 14, 11, 5, 3, 0, 9, 15, 18, 8, 3, 0), # 46
(8, 9, 15, 5, 1, 0, 2, 6, 7, 2, 1, 0), # 47
(7, 17, 10, 9, 3, 0, 9, 21, 10, 7, 5, 0), # 48
(9, 11, 11, 4, 4, 0, 10, 10, 11, 9, 3, 0), # 49
(3, 13, 7, 7, 8, 0, 4, 10, 8, 3, 4, 0), # 50
(5, 12, 10, 4, 3, 0, 12, 15, 8, 9, 4, 0), # 51
(6, 16, 14, 5, 6, 0, 8, 11, 7, 7, 4, 0), # 52
(11, 11, 8, 8, 2, 0, 6, 13, 8, 8, 5, 0), # 53
(12, 15, 6, 5, 3, 0, 8, 11, 3, 3, 6, 0), # 54
(7, 12, 14, 3, 2, 0, 3, 13, 4, 8, 5, 0), # 55
(8, 13, 8, 5, 4, 0, 4, 13, 4, 10, 1, 0), # 56
(7, 15, 6, 3, 3, 0, 8, 9, 11, 7, 2, 0), # 57
(3, 19, 11, 5, 4, 0, 5, 13, 8, 4, 5, 0), # 58
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 59
)
station_arriving_intensity = (
(4.769372805092186, 12.233629261363635, 14.389624839331619, 11.405298913043477, 12.857451923076923, 8.562228260869567), # 0
(4.81413961808604, 12.369674877683082, 14.46734796754499, 11.46881589673913, 12.953819711538461, 8.559309850543478), # 1
(4.8583952589991215, 12.503702525252525, 14.54322622107969, 11.530934782608696, 13.048153846153847, 8.556302173913043), # 2
(4.902102161984196, 12.635567578125, 14.617204169344474, 11.591602581521737, 13.14036778846154, 8.553205638586958), # 3
(4.94522276119403, 12.765125410353535, 14.689226381748071, 11.650766304347826, 13.230375, 8.550020652173911), # 4
(4.987719490781387, 12.892231395991162, 14.759237427699228, 11.708372961956522, 13.318088942307691, 8.546747622282608), # 5
(5.029554784899035, 13.01674090909091, 14.827181876606687, 11.764369565217393, 13.403423076923078, 8.54338695652174), # 6
(5.0706910776997365, 13.138509323705808, 14.893004297879177, 11.818703125, 13.486290865384618, 8.5399390625), # 7
(5.1110908033362605, 13.257392013888888, 14.956649260925452, 11.871320652173912, 13.56660576923077, 8.536404347826087), # 8
(5.1507163959613695, 13.373244353693181, 15.018061335154243, 11.922169157608696, 13.644281249999999, 8.532783220108696), # 9
(5.1895302897278315, 13.485921717171717, 15.077185089974291, 11.971195652173915, 13.719230769230771, 8.529076086956522), # 10
(5.227494918788412, 13.595279478377526, 15.133965094794343, 12.018347146739131, 13.791367788461539, 8.525283355978262), # 11
(5.2645727172958745, 13.701173011363636, 15.188345919023137, 12.063570652173912, 13.860605769230768, 8.521405434782608), # 12
(5.3007261194029835, 13.803457690183082, 15.240272132069407, 12.106813179347826, 13.926858173076925, 8.51744273097826), # 13
(5.335917559262511, 13.90198888888889, 15.289688303341899, 12.148021739130433, 13.99003846153846, 8.513395652173912), # 14
(5.370109471027217, 13.996621981534089, 15.336539002249355, 12.187143342391304, 14.050060096153846, 8.509264605978261), # 15
(5.403264288849868, 14.087212342171718, 15.380768798200515, 12.224124999999999, 14.10683653846154, 8.50505), # 16
(5.4353444468832315, 14.173615344854797, 15.422322260604112, 12.258913722826087, 14.16028125, 8.500752241847827), # 17
(5.46631237928007, 14.255686363636363, 15.461143958868895, 12.291456521739132, 14.210307692307696, 8.496371739130435), # 18
(5.496130520193152, 14.333280772569443, 15.4971784624036, 12.321700407608695, 14.256829326923079, 8.491908899456522), # 19
(5.524761303775241, 14.40625394570707, 15.530370340616965, 12.349592391304348, 14.299759615384616, 8.487364130434782), # 20
(5.552167164179106, 14.47446125710227, 15.56066416291774, 12.375079483695652, 14.339012019230768, 8.482737839673913), # 21
(5.578310535557506, 14.537758080808082, 15.588004498714653, 12.398108695652175, 14.374499999999998, 8.47803043478261), # 22
(5.603153852063214, 14.595999790877526, 15.612335917416454, 12.418627038043478, 14.40613701923077, 8.473242323369567), # 23
(5.62665954784899, 14.649041761363636, 15.633602988431875, 12.43658152173913, 14.433836538461538, 8.468373913043479), # 24
(5.648790057067603, 14.696739366319445, 15.651750281169667, 12.451919157608696, 14.457512019230768, 8.463425611413044), # 25
(5.669507813871817, 14.738947979797977, 15.66672236503856, 12.464586956521739, 14.477076923076922, 8.458397826086957), # 26
(5.688775252414398, 14.77552297585227, 15.6784638094473, 12.474531929347828, 14.492444711538463, 8.453290964673915), # 27
(5.7065548068481124, 14.806319728535353, 15.68691918380463, 12.481701086956523, 14.503528846153845, 8.448105434782608), # 28
(5.722808911325724, 14.831193611900254, 15.69203305751928, 12.486041440217392, 14.510242788461538, 8.44284164402174), # 29
(5.7375, 14.85, 15.69375, 12.4875, 14.512500000000001, 8.4375), # 30
(5.751246651214834, 14.865621839488634, 15.692462907608693, 12.487236580882353, 14.511678590425532, 8.430077267616193), # 31
(5.7646965153452685, 14.881037215909092, 15.68863804347826, 12.486451470588234, 14.509231914893617, 8.418644565217393), # 32
(5.777855634590792, 14.896244211647728, 15.682330027173915, 12.485152389705883, 14.50518630319149, 8.403313830584706), # 33
(5.790730051150895, 14.91124090909091, 15.67359347826087, 12.483347058823531, 14.499568085106382, 8.38419700149925), # 34
(5.803325807225064, 14.926025390624996, 15.662483016304348, 12.481043198529411, 14.492403590425532, 8.361406015742128), # 35
(5.815648945012788, 14.940595738636366, 15.649053260869564, 12.478248529411767, 14.48371914893617, 8.335052811094453), # 36
(5.8277055067135555, 14.954950035511365, 15.63335883152174, 12.474970772058823, 14.47354109042553, 8.305249325337332), # 37
(5.839501534526853, 14.969086363636364, 15.615454347826088, 12.471217647058824, 14.461895744680852, 8.272107496251873), # 38
(5.851043070652174, 14.983002805397728, 15.595394429347825, 12.466996875000001, 14.44880944148936, 8.23573926161919), # 39
(5.862336157289003, 14.99669744318182, 15.573233695652176, 12.462316176470589, 14.434308510638296, 8.196256559220389), # 40
(5.873386836636828, 15.010168359374997, 15.549026766304348, 12.457183272058824, 14.418419281914893, 8.153771326836583), # 41
(5.88420115089514, 15.023413636363639, 15.522828260869566, 12.451605882352942, 14.401168085106384, 8.108395502248875), # 42
(5.894785142263428, 15.03643135653409, 15.494692798913043, 12.445591727941178, 14.38258125, 8.060241023238381), # 43
(5.905144852941176, 15.049219602272727, 15.464675, 12.439148529411764, 14.36268510638298, 8.009419827586207), # 44
(5.915286325127877, 15.061776455965909, 15.432829483695656, 12.43228400735294, 14.341505984042554, 7.956043853073464), # 45
(5.925215601023019, 15.074100000000003, 15.39921086956522, 12.425005882352941, 14.319070212765958, 7.90022503748126), # 46
(5.934938722826087, 15.086188316761364, 15.363873777173913, 12.417321874999999, 14.295404122340427, 7.842075318590705), # 47
(5.944461732736574, 15.098039488636365, 15.326872826086957, 12.409239705882353, 14.27053404255319, 7.7817066341829095), # 48
(5.953790672953963, 15.10965159801136, 15.288262635869566, 12.400767095588236, 14.24448630319149, 7.71923092203898), # 49
(5.96293158567775, 15.121022727272724, 15.248097826086958, 12.391911764705883, 14.217287234042553, 7.65476011994003), # 50
(5.971890513107417, 15.132150958806818, 15.206433016304347, 12.38268143382353, 14.188963164893616, 7.588406165667167), # 51
(5.980673497442456, 15.143034375, 15.163322826086954, 12.373083823529411, 14.159540425531915, 7.5202809970015), # 52
(5.989286580882353, 15.153671058238638, 15.118821875, 12.363126654411765, 14.129045345744682, 7.450496551724138), # 53
(5.9977358056266, 15.164059090909088, 15.072984782608694, 12.352817647058824, 14.09750425531915, 7.379164767616192), # 54
(6.00602721387468, 15.174196555397728, 15.02586616847826, 12.342164522058825, 14.064943484042553, 7.306397582458771), # 55
(6.014166847826087, 15.184081534090907, 14.977520652173913, 12.331175, 14.031389361702129, 7.232306934032984), # 56
(6.022160749680308, 15.193712109375003, 14.92800285326087, 12.319856801470587, 13.996868218085105, 7.15700476011994), # 57
(6.030014961636829, 15.203086363636363, 14.877367391304347, 12.308217647058825, 13.961406382978723, 7.0806029985007495), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_arriving_acc = (
(2, 15, 5, 3, 2, 0, 9, 9, 8, 5, 1, 0), # 0
(6, 24, 15, 6, 5, 0, 21, 19, 15, 15, 4, 0), # 1
(8, 30, 22, 8, 7, 0, 24, 36, 20, 22, 7, 0), # 2
(14, 43, 29, 15, 11, 0, 31, 50, 25, 29, 9, 0), # 3
(19, 55, 38, 18, 14, 0, 43, 60, 34, 38, 12, 0), # 4
(23, 68, 52, 23, 14, 0, 47, 67, 41, 42, 12, 0), # 5
(32, 80, 60, 27, 16, 0, 57, 75, 49, 47, 18, 0), # 6
(37, 88, 66, 32, 18, 0, 62, 86, 59, 52, 20, 0), # 7
(44, 98, 73, 43, 20, 0, 69, 90, 68, 63, 26, 0), # 8
(53, 112, 75, 46, 22, 0, 76, 106, 75, 71, 29, 0), # 9
(59, 119, 91, 51, 24, 0, 79, 114, 84, 81, 31, 0), # 10
(66, 133, 98, 53, 29, 0, 95, 124, 90, 84, 36, 0), # 11
(73, 142, 107, 62, 34, 0, 101, 138, 95, 86, 37, 0), # 12
(83, 155, 114, 64, 38, 0, 109, 147, 100, 94, 40, 0), # 13
(88, 171, 125, 69, 39, 0, 119, 158, 111, 103, 41, 0), # 14
(91, 181, 132, 75, 41, 0, 126, 165, 118, 115, 44, 0), # 15
(95, 195, 141, 81, 46, 0, 141, 181, 126, 120, 47, 0), # 16
(98, 208, 149, 88, 50, 0, 150, 188, 135, 126, 52, 0), # 17
(103, 216, 160, 91, 58, 0, 156, 203, 143, 136, 53, 0), # 18
(111, 230, 168, 94, 61, 0, 162, 222, 150, 141, 55, 0), # 19
(118, 244, 180, 103, 63, 0, 169, 232, 158, 147, 58, 0), # 20
(122, 257, 193, 105, 66, 0, 174, 242, 171, 151, 61, 0), # 21
(132, 268, 203, 110, 70, 0, 182, 250, 177, 159, 63, 0), # 22
(141, 279, 211, 115, 74, 0, 193, 267, 182, 166, 66, 0), # 23
(148, 300, 219, 120, 79, 0, 196, 282, 193, 175, 69, 0), # 24
(153, 311, 231, 125, 80, 0, 207, 297, 199, 179, 72, 0), # 25
(157, 319, 243, 129, 81, 0, 218, 315, 209, 184, 74, 0), # 26
(163, 330, 252, 139, 85, 0, 224, 336, 215, 189, 76, 0), # 27
(170, 345, 262, 140, 86, 0, 231, 347, 225, 194, 80, 0), # 28
(175, 358, 271, 143, 90, 0, 237, 358, 235, 201, 83, 0), # 29
(180, 376, 281, 148, 91, 0, 245, 366, 241, 206, 86, 0), # 30
(190, 386, 285, 153, 91, 0, 250, 378, 250, 211, 90, 0), # 31
(194, 399, 301, 154, 93, 0, 269, 389, 257, 216, 95, 0), # 32
(199, 412, 312, 159, 94, 0, 280, 396, 267, 220, 98, 0), # 33
(201, 429, 318, 162, 96, 0, 289, 400, 278, 224, 101, 0), # 34
(212, 438, 330, 167, 102, 0, 292, 407, 287, 228, 101, 0), # 35
(218, 448, 340, 171, 104, 0, 300, 423, 296, 233, 107, 0), # 36
(227, 457, 351, 175, 109, 0, 313, 436, 296, 237, 108, 0), # 37
(230, 466, 360, 178, 114, 0, 323, 446, 300, 241, 110, 0), # 38
(231, 482, 377, 186, 116, 0, 328, 458, 305, 246, 111, 0), # 39
(237, 492, 385, 190, 118, 0, 331, 473, 308, 251, 113, 0), # 40
(244, 499, 397, 193, 122, 0, 341, 486, 317, 260, 117, 0), # 41
(250, 509, 404, 199, 125, 0, 349, 495, 328, 267, 120, 0), # 42
(255, 530, 413, 204, 133, 0, 353, 505, 335, 272, 121, 0), # 43
(261, 541, 423, 210, 135, 0, 363, 513, 340, 275, 126, 0), # 44
(269, 551, 427, 213, 137, 0, 370, 523, 348, 280, 130, 0), # 45
(274, 565, 438, 218, 140, 0, 379, 538, 366, 288, 133, 0), # 46
(282, 574, 453, 223, 141, 0, 381, 544, 373, 290, 134, 0), # 47
(289, 591, 463, 232, 144, 0, 390, 565, 383, 297, 139, 0), # 48
(298, 602, 474, 236, 148, 0, 400, 575, 394, 306, 142, 0), # 49
(301, 615, 481, 243, 156, 0, 404, 585, 402, 309, 146, 0), # 50
(306, 627, 491, 247, 159, 0, 416, 600, 410, 318, 150, 0), # 51
(312, 643, 505, 252, 165, 0, 424, 611, 417, 325, 154, 0), # 52
(323, 654, 513, 260, 167, 0, 430, 624, 425, 333, 159, 0), # 53
(335, 669, 519, 265, 170, 0, 438, 635, 428, 336, 165, 0), # 54
(342, 681, 533, 268, 172, 0, 441, 648, 432, 344, 170, 0), # 55
(350, 694, 541, 273, 176, 0, 445, 661, 436, 354, 171, 0), # 56
(357, 709, 547, 276, 179, 0, 453, 670, 447, 361, 173, 0), # 57
(360, 728, 558, 281, 183, 0, 458, 683, 455, 365, 178, 0), # 58
(360, 728, 558, 281, 183, 0, 458, 683, 455, 365, 178, 0), # 59
)
passenger_arriving_rate = (
(4.769372805092186, 9.786903409090908, 8.63377490359897, 4.56211956521739, 2.5714903846153843, 0.0, 8.562228260869567, 10.285961538461537, 6.843179347826086, 5.755849935732647, 2.446725852272727, 0.0), # 0
(4.81413961808604, 9.895739902146465, 8.680408780526994, 4.587526358695651, 2.5907639423076922, 0.0, 8.559309850543478, 10.363055769230769, 6.881289538043478, 5.786939187017995, 2.4739349755366162, 0.0), # 1
(4.8583952589991215, 10.00296202020202, 8.725935732647814, 4.612373913043478, 2.609630769230769, 0.0, 8.556302173913043, 10.438523076923076, 6.918560869565217, 5.817290488431875, 2.500740505050505, 0.0), # 2
(4.902102161984196, 10.1084540625, 8.770322501606683, 4.636641032608694, 2.628073557692308, 0.0, 8.553205638586958, 10.512294230769232, 6.954961548913042, 5.846881667737789, 2.527113515625, 0.0), # 3
(4.94522276119403, 10.212100328282828, 8.813535829048842, 4.66030652173913, 2.6460749999999997, 0.0, 8.550020652173911, 10.584299999999999, 6.990459782608696, 5.875690552699228, 2.553025082070707, 0.0), # 4
(4.987719490781387, 10.313785116792928, 8.855542456619537, 4.6833491847826085, 2.663617788461538, 0.0, 8.546747622282608, 10.654471153846153, 7.025023777173913, 5.90369497107969, 2.578446279198232, 0.0), # 5
(5.029554784899035, 10.413392727272727, 8.896309125964011, 4.705747826086957, 2.680684615384615, 0.0, 8.54338695652174, 10.72273846153846, 7.058621739130436, 5.930872750642674, 2.603348181818182, 0.0), # 6
(5.0706910776997365, 10.510807458964646, 8.935802578727506, 4.72748125, 2.697258173076923, 0.0, 8.5399390625, 10.789032692307693, 7.0912218750000005, 5.95720171915167, 2.6277018647411614, 0.0), # 7
(5.1110908033362605, 10.60591361111111, 8.97398955655527, 4.7485282608695645, 2.7133211538461537, 0.0, 8.536404347826087, 10.853284615384615, 7.122792391304347, 5.982659704370181, 2.6514784027777774, 0.0), # 8
(5.1507163959613695, 10.698595482954543, 9.010836801092546, 4.768867663043478, 2.7288562499999993, 0.0, 8.532783220108696, 10.915424999999997, 7.153301494565217, 6.007224534061697, 2.6746488707386358, 0.0), # 9
(5.1895302897278315, 10.788737373737373, 9.046311053984574, 4.7884782608695655, 2.743846153846154, 0.0, 8.529076086956522, 10.975384615384616, 7.182717391304348, 6.030874035989716, 2.697184343434343, 0.0), # 10
(5.227494918788412, 10.87622358270202, 9.080379056876605, 4.807338858695652, 2.7582735576923074, 0.0, 8.525283355978262, 11.03309423076923, 7.2110082880434785, 6.053586037917737, 2.719055895675505, 0.0), # 11
(5.2645727172958745, 10.960938409090907, 9.113007551413881, 4.825428260869565, 2.7721211538461534, 0.0, 8.521405434782608, 11.088484615384614, 7.238142391304347, 6.0753383676092545, 2.740234602272727, 0.0), # 12
(5.3007261194029835, 11.042766152146465, 9.144163279241644, 4.8427252717391305, 2.7853716346153847, 0.0, 8.51744273097826, 11.141486538461539, 7.264087907608696, 6.096108852827762, 2.760691538036616, 0.0), # 13
(5.335917559262511, 11.121591111111112, 9.173812982005138, 4.859208695652173, 2.7980076923076918, 0.0, 8.513395652173912, 11.192030769230767, 7.288813043478259, 6.115875321336759, 2.780397777777778, 0.0), # 14
(5.370109471027217, 11.19729758522727, 9.201923401349612, 4.874857336956521, 2.810012019230769, 0.0, 8.509264605978261, 11.240048076923076, 7.312286005434782, 6.134615600899742, 2.7993243963068175, 0.0), # 15
(5.403264288849868, 11.269769873737372, 9.228461278920308, 4.88965, 2.8213673076923076, 0.0, 8.50505, 11.28546923076923, 7.334474999999999, 6.152307519280206, 2.817442468434343, 0.0), # 16
(5.4353444468832315, 11.338892275883836, 9.253393356362468, 4.903565489130434, 2.83205625, 0.0, 8.500752241847827, 11.328225, 7.3553482336956515, 6.168928904241644, 2.834723068970959, 0.0), # 17
(5.46631237928007, 11.40454909090909, 9.276686375321336, 4.916582608695652, 2.842061538461539, 0.0, 8.496371739130435, 11.368246153846156, 7.374873913043479, 6.184457583547558, 2.8511372727272724, 0.0), # 18
(5.496130520193152, 11.466624618055553, 9.298307077442159, 4.928680163043477, 2.8513658653846155, 0.0, 8.491908899456522, 11.405463461538462, 7.393020244565217, 6.198871384961439, 2.866656154513888, 0.0), # 19
(5.524761303775241, 11.525003156565655, 9.318222204370178, 4.939836956521739, 2.859951923076923, 0.0, 8.487364130434782, 11.439807692307692, 7.409755434782609, 6.212148136246785, 2.8812507891414136, 0.0), # 20
(5.552167164179106, 11.579569005681815, 9.336398497750643, 4.95003179347826, 2.8678024038461536, 0.0, 8.482737839673913, 11.471209615384614, 7.425047690217391, 6.224265665167096, 2.894892251420454, 0.0), # 21
(5.578310535557506, 11.630206464646465, 9.352802699228791, 4.95924347826087, 2.8748999999999993, 0.0, 8.47803043478261, 11.499599999999997, 7.438865217391305, 6.235201799485861, 2.907551616161616, 0.0), # 22
(5.603153852063214, 11.67679983270202, 9.367401550449872, 4.967450815217391, 2.8812274038461534, 0.0, 8.473242323369567, 11.524909615384614, 7.451176222826087, 6.244934366966581, 2.919199958175505, 0.0), # 23
(5.62665954784899, 11.719233409090908, 9.380161793059125, 4.974632608695652, 2.8867673076923075, 0.0, 8.468373913043479, 11.54706923076923, 7.461948913043478, 6.25344119537275, 2.929808352272727, 0.0), # 24
(5.648790057067603, 11.757391493055556, 9.391050168701799, 4.980767663043478, 2.8915024038461534, 0.0, 8.463425611413044, 11.566009615384614, 7.471151494565217, 6.260700112467866, 2.939347873263889, 0.0), # 25
(5.669507813871817, 11.79115838383838, 9.400033419023135, 4.985834782608695, 2.8954153846153843, 0.0, 8.458397826086957, 11.581661538461537, 7.478752173913043, 6.266688946015424, 2.947789595959595, 0.0), # 26
(5.688775252414398, 11.820418380681815, 9.40707828566838, 4.989812771739131, 2.8984889423076923, 0.0, 8.453290964673915, 11.593955769230769, 7.484719157608696, 6.271385523778919, 2.9551045951704538, 0.0), # 27
(5.7065548068481124, 11.84505578282828, 9.412151510282778, 4.992680434782609, 2.9007057692307687, 0.0, 8.448105434782608, 11.602823076923075, 7.489020652173913, 6.274767673521851, 2.96126394570707, 0.0), # 28
(5.722808911325724, 11.864954889520202, 9.415219834511568, 4.994416576086956, 2.902048557692307, 0.0, 8.44284164402174, 11.608194230769229, 7.491624864130435, 6.276813223007712, 2.9662387223800506, 0.0), # 29
(5.7375, 11.879999999999999, 9.41625, 4.995, 2.9025, 0.0, 8.4375, 11.61, 7.4925, 6.277499999999999, 2.9699999999999998, 0.0), # 30
(5.751246651214834, 11.892497471590906, 9.415477744565216, 4.994894632352941, 2.9023357180851064, 0.0, 8.430077267616193, 11.609342872340426, 7.492341948529411, 6.276985163043476, 2.9731243678977264, 0.0), # 31
(5.7646965153452685, 11.904829772727274, 9.413182826086956, 4.994580588235293, 2.901846382978723, 0.0, 8.418644565217393, 11.607385531914892, 7.49187088235294, 6.275455217391303, 2.9762074431818184, 0.0), # 32
(5.777855634590792, 11.916995369318181, 9.40939801630435, 4.994060955882353, 2.9010372606382977, 0.0, 8.403313830584706, 11.60414904255319, 7.491091433823529, 6.272932010869566, 2.9792488423295453, 0.0), # 33
(5.790730051150895, 11.928992727272727, 9.40415608695652, 4.993338823529412, 2.899913617021276, 0.0, 8.38419700149925, 11.599654468085104, 7.490008235294118, 6.269437391304347, 2.9822481818181816, 0.0), # 34
(5.803325807225064, 11.940820312499996, 9.39748980978261, 4.9924172794117645, 2.898480718085106, 0.0, 8.361406015742128, 11.593922872340425, 7.488625919117647, 6.264993206521739, 2.985205078124999, 0.0), # 35
(5.815648945012788, 11.952476590909091, 9.389431956521738, 4.9912994117647065, 2.896743829787234, 0.0, 8.335052811094453, 11.586975319148936, 7.486949117647059, 6.259621304347825, 2.988119147727273, 0.0), # 36
(5.8277055067135555, 11.96396002840909, 9.380015298913044, 4.989988308823529, 2.8947082180851056, 0.0, 8.305249325337332, 11.578832872340422, 7.484982463235293, 6.253343532608695, 2.9909900071022726, 0.0), # 37
(5.839501534526853, 11.97526909090909, 9.369272608695653, 4.988487058823529, 2.89237914893617, 0.0, 8.272107496251873, 11.56951659574468, 7.4827305882352935, 6.246181739130434, 2.9938172727272727, 0.0), # 38
(5.851043070652174, 11.986402244318182, 9.357236657608695, 4.98679875, 2.8897618882978717, 0.0, 8.23573926161919, 11.559047553191487, 7.480198125, 6.23815777173913, 2.9966005610795454, 0.0), # 39
(5.862336157289003, 11.997357954545455, 9.343940217391305, 4.984926470588235, 2.886861702127659, 0.0, 8.196256559220389, 11.547446808510635, 7.477389705882353, 6.22929347826087, 2.999339488636364, 0.0), # 40
(5.873386836636828, 12.008134687499997, 9.329416059782607, 4.982873308823529, 2.8836838563829783, 0.0, 8.153771326836583, 11.534735425531913, 7.474309963235294, 6.219610706521738, 3.002033671874999, 0.0), # 41
(5.88420115089514, 12.01873090909091, 9.31369695652174, 4.980642352941176, 2.880233617021277, 0.0, 8.108395502248875, 11.520934468085107, 7.4709635294117644, 6.209131304347826, 3.0046827272727277, 0.0), # 42
(5.894785142263428, 12.02914508522727, 9.296815679347825, 4.978236691176471, 2.8765162499999994, 0.0, 8.060241023238381, 11.506064999999998, 7.467355036764706, 6.1978771195652165, 3.0072862713068176, 0.0), # 43
(5.905144852941176, 12.03937568181818, 9.278805, 4.975659411764705, 2.8725370212765955, 0.0, 8.009419827586207, 11.490148085106382, 7.4634891176470575, 6.1858699999999995, 3.009843920454545, 0.0), # 44
(5.915286325127877, 12.049421164772726, 9.259697690217394, 4.972913602941176, 2.8683011968085106, 0.0, 7.956043853073464, 11.473204787234042, 7.459370404411764, 6.1731317934782615, 3.0123552911931815, 0.0), # 45
(5.925215601023019, 12.059280000000001, 9.239526521739132, 4.970002352941176, 2.8638140425531913, 0.0, 7.90022503748126, 11.455256170212765, 7.455003529411765, 6.159684347826087, 3.0148200000000003, 0.0), # 46
(5.934938722826087, 12.06895065340909, 9.218324266304347, 4.966928749999999, 2.859080824468085, 0.0, 7.842075318590705, 11.43632329787234, 7.450393124999999, 6.145549510869564, 3.0172376633522724, 0.0), # 47
(5.944461732736574, 12.07843159090909, 9.196123695652174, 4.9636958823529405, 2.854106808510638, 0.0, 7.7817066341829095, 11.416427234042551, 7.445543823529412, 6.130749130434782, 3.0196078977272727, 0.0), # 48
(5.953790672953963, 12.087721278409088, 9.17295758152174, 4.960306838235294, 2.8488972606382976, 0.0, 7.71923092203898, 11.39558904255319, 7.4404602573529415, 6.115305054347826, 3.021930319602272, 0.0), # 49
(5.96293158567775, 12.096818181818177, 9.148858695652175, 4.956764705882353, 2.8434574468085105, 0.0, 7.65476011994003, 11.373829787234042, 7.43514705882353, 6.099239130434783, 3.0242045454545443, 0.0), # 50
(5.971890513107417, 12.105720767045453, 9.123859809782608, 4.953072573529411, 2.837792632978723, 0.0, 7.588406165667167, 11.351170531914892, 7.429608860294118, 6.082573206521738, 3.026430191761363, 0.0), # 51
(5.980673497442456, 12.114427499999998, 9.097993695652173, 4.949233529411764, 2.8319080851063827, 0.0, 7.5202809970015, 11.32763234042553, 7.4238502941176465, 6.065329130434781, 3.0286068749999995, 0.0), # 52
(5.989286580882353, 12.122936846590909, 9.071293125, 4.945250661764706, 2.8258090691489364, 0.0, 7.450496551724138, 11.303236276595745, 7.417875992647058, 6.04752875, 3.030734211647727, 0.0), # 53
(5.9977358056266, 12.13124727272727, 9.043790869565216, 4.941127058823529, 2.8195008510638297, 0.0, 7.379164767616192, 11.278003404255319, 7.411690588235294, 6.0291939130434775, 3.0328118181818176, 0.0), # 54
(6.00602721387468, 12.139357244318182, 9.015519701086955, 4.93686580882353, 2.8129886968085103, 0.0, 7.306397582458771, 11.251954787234041, 7.405298713235295, 6.010346467391304, 3.0348393110795455, 0.0), # 55
(6.014166847826087, 12.147265227272724, 8.986512391304348, 4.9324699999999995, 2.8062778723404254, 0.0, 7.232306934032984, 11.225111489361701, 7.398705, 5.991008260869565, 3.036816306818181, 0.0), # 56
(6.022160749680308, 12.154969687500001, 8.95680171195652, 4.927942720588234, 2.7993736436170207, 0.0, 7.15700476011994, 11.197494574468083, 7.391914080882352, 5.9712011413043475, 3.0387424218750003, 0.0), # 57
(6.030014961636829, 12.16246909090909, 8.926420434782608, 4.923287058823529, 2.792281276595744, 0.0, 7.0806029985007495, 11.169125106382976, 7.384930588235295, 5.950946956521738, 3.0406172727272724, 0.0), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_allighting_rate = (
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 0
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 1
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 2
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 3
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 4
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 5
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 6
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 7
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 8
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 9
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 10
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 11
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 12
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 13
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 14
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 15
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 16
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 17
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 18
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 19
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 20
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 21
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 22
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 23
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 24
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 25
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 26
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 27
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 28
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 29
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 30
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 31
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 32
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 33
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 34
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 35
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 36
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 37
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 38
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 39
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 40
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 41
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 42
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 43
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 44
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 45
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 46
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 47
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 48
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 49
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 50
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 51
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 52
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 53
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 54
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 55
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 56
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 57
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 58
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 59
)
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 258194110137029475889902652135037600173
#index for seed sequence child
child_seed_index = (
1, # 0
46, # 1
)
| 113.540299
| 213
| 0.73015
| 5,147
| 38,036
| 5.393627
| 0.239363
| 0.311228
| 0.246389
| 0.466842
| 0.328518
| 0.326645
| 0.326141
| 0.326141
| 0.326141
| 0.326141
| 0
| 0.81982
| 0.118677
| 38,036
| 334
| 214
| 113.88024
| 0.008323
| 0.031838
| 0
| 0.202532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.015823
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4599568de9d33307f2941439c89fdd2b50cdda0f
| 4,416
|
py
|
Python
|
tests/test_objects_list.py
|
omaralvarez/trakt.py
|
93a6beb73cdd37ffb354d2e9c1892dc39d9c4baf
|
[
"MIT"
] | null | null | null |
tests/test_objects_list.py
|
omaralvarez/trakt.py
|
93a6beb73cdd37ffb354d2e9c1892dc39d9c4baf
|
[
"MIT"
] | null | null | null |
tests/test_objects_list.py
|
omaralvarez/trakt.py
|
93a6beb73cdd37ffb354d2e9c1892dc39d9c4baf
|
[
"MIT"
] | null | null | null |
from tests.core.helpers import authenticated_response
from trakt import Trakt
import responses
@responses.activate
def test_list_add():
responses.add_callback(
responses.GET, 'http://mock/users/me/lists/movies',
callback=authenticated_response('fixtures/users/me/lists/movies.json'),
content_type='application/json'
)
responses.add_callback(
responses.POST, 'http://mock/users/me/lists/123456/items',
callback=authenticated_response(data='{"mock": "mock"}'),
content_type='application/json'
)
Trakt.base_url = 'http://mock'
with Trakt.configuration.auth('mock', 'mock'):
movies_list = Trakt['users/me/lists/movies'].get()
result = movies_list.add({
'shows': [
{'ids': {'tvdb': 121361}}
]
})
assert result is not None
@responses.activate
def test_list_delete():
responses.add_callback(
responses.GET, 'http://mock/users/me/lists/movies',
callback=authenticated_response('fixtures/users/me/lists/movies.json'),
content_type='application/json'
)
responses.add_callback(
responses.DELETE, 'http://mock/users/me/lists/123456',
callback=authenticated_response(data='{"mock": "mock"}'),
content_type='application/json'
)
Trakt.base_url = 'http://mock'
with Trakt.configuration.auth('mock', 'mock'):
movies_list = Trakt['users/me/lists/movies'].get()
success = movies_list.delete()
assert success is True
@responses.activate
def test_list_update():
responses.add_callback(
responses.GET, 'http://mock/users/me/lists/movies',
callback=authenticated_response('fixtures/users/me/lists/movies.json'),
content_type='application/json'
)
responses.add_callback(
responses.PUT, 'http://mock/users/me/lists/123456',
callback=authenticated_response('fixtures/users/me/lists/shows.json'),
content_type='application/json'
)
Trakt.base_url = 'http://mock'
with Trakt.configuration.auth('mock', 'mock'):
movies_list = Trakt['users/me/lists/movies'].get()
result = movies_list.update(
name="Shows (2)"
)
assert result is not None
@responses.activate
def test_list_remove():
responses.add_callback(
responses.GET, 'http://mock/users/me/lists/movies',
callback=authenticated_response('fixtures/users/me/lists/movies.json'),
content_type='application/json'
)
responses.add_callback(
responses.POST, 'http://mock/users/me/lists/123456/items/remove',
callback=authenticated_response(data='{"mock": "mock"}'),
content_type='application/json'
)
Trakt.base_url = 'http://mock'
with Trakt.configuration.auth('mock', 'mock'):
movies_list = Trakt['users/me/lists/movies'].get()
result = movies_list.remove({
'shows': [
{'ids': {'tvdb': 121361}}
]
})
assert result is not None
@responses.activate
def test_list_like():
responses.add_callback(
responses.GET, 'http://mock/users/me/lists/movies',
callback=authenticated_response('fixtures/users/me/lists/movies.json'),
content_type='application/json'
)
responses.add_callback(
responses.POST, 'http://mock/users/me/lists/123456/like',
callback=authenticated_response(data='{"mock": "mock"}'),
content_type='application/json'
)
Trakt.base_url = 'http://mock'
with Trakt.configuration.auth('mock', 'mock'):
movies_list = Trakt['users/me/lists/movies'].get()
success = movies_list.like()
assert success is True
@responses.activate
def test_list_unlike():
responses.add_callback(
responses.GET, 'http://mock/users/me/lists/movies',
callback=authenticated_response('fixtures/users/me/lists/movies.json'),
content_type='application/json'
)
responses.add_callback(
responses.DELETE, 'http://mock/users/me/lists/123456/like',
callback=authenticated_response(data='{"mock": "mock"}'),
content_type='application/json'
)
Trakt.base_url = 'http://mock'
with Trakt.configuration.auth('mock', 'mock'):
movies_list = Trakt['users/me/lists/movies'].get()
success = movies_list.unlike()
assert success is True
| 27.949367
| 79
| 0.643569
| 503
| 4,416
| 5.516899
| 0.109344
| 0.063063
| 0.108108
| 0.116757
| 0.932613
| 0.921081
| 0.921081
| 0.913874
| 0.913874
| 0.86018
| 0
| 0.01406
| 0.210824
| 4,416
| 158
| 80
| 27.949367
| 0.782209
| 0
| 0
| 0.669565
| 0
| 0
| 0.274847
| 0.083767
| 0
| 0
| 0
| 0
| 0.052174
| 1
| 0.052174
| false
| 0
| 0.026087
| 0
| 0.078261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
45bc50b731cf70112b5aafcabf3780fb8f683029
| 93
|
py
|
Python
|
probe_ably/core/tasks/utils/__init__.py
|
ai-systems/Probe-Ably
|
45b283ee5068289f9a1844cae9f109c000507723
|
[
"MIT"
] | 12
|
2021-03-26T14:56:42.000Z
|
2022-02-11T15:37:58.000Z
|
probe_ably/core/tasks/utils/__init__.py
|
ai-systems/Probe-Ably
|
45b283ee5068289f9a1844cae9f109c000507723
|
[
"MIT"
] | 1
|
2021-11-28T13:45:22.000Z
|
2021-11-28T13:45:22.000Z
|
probe_ably/core/tasks/utils/__init__.py
|
ai-systems/Probe-Ably
|
45b283ee5068289f9a1844cae9f109c000507723
|
[
"MIT"
] | null | null | null |
from .read_input_task import ReadInputTask
from .visualization_task import VisualiaztionTask
| 31
| 49
| 0.892473
| 11
| 93
| 7.272727
| 0.727273
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086022
| 93
| 2
| 50
| 46.5
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
45d108d58b5811673fd7982ea764cff0e29030a6
| 2,983
|
py
|
Python
|
plugins/diffusion/tests/test_io.py
|
bsavelev/medipy
|
f0da3750a6979750d5f4c96aedc89ad5ae74545f
|
[
"CECILL-B"
] | null | null | null |
plugins/diffusion/tests/test_io.py
|
bsavelev/medipy
|
f0da3750a6979750d5f4c96aedc89ad5ae74545f
|
[
"CECILL-B"
] | null | null | null |
plugins/diffusion/tests/test_io.py
|
bsavelev/medipy
|
f0da3750a6979750d5f4c96aedc89ad5ae74545f
|
[
"CECILL-B"
] | 1
|
2022-03-04T05:47:08.000Z
|
2022-03-04T05:47:08.000Z
|
import os
import shutil
import tempfile
import unittest
import numpy
import medipy.base
import medipy.io
import medipy.diffusion
class TestIO(unittest.TestCase):
def test_itk_io(self):
tensors = medipy.base.Image(
data=numpy.arange(10*20*30*6, dtype=numpy.single).reshape((10,20,30,6)),
dti="tensor_2",
origin=(1,2,3), spacing=(4,5,6), direction=medipy.base.coordinate_system.RAS)
directory = tempfile.mkdtemp()
medipy.io.save(tensors, os.path.join(directory, "tensors.nii"))
other_tensors = medipy.io.load(
os.path.join(directory, "tensors.nii"), None)
self.assertTrue(isinstance(
other_tensors.metadata["loader"]["loader"],
medipy.diffusion.itk_io.ITK))
self.assertEqual(other_tensors.shape, (10,20,30))
self.assertEqual(other_tensors.dtype, numpy.single)
self.assertEqual(other_tensors.data_type, "vector")
self.assertEqual(other_tensors.image_type, "tensor_2")
numpy.testing.assert_array_almost_equal(other_tensors.origin, (1,2,3))
numpy.testing.assert_array_almost_equal(other_tensors.spacing, (4,5,6))
numpy.testing.assert_array_almost_equal(
other_tensors.direction, medipy.base.coordinate_system.RAS)
numpy.testing.assert_array_almost_equal(
other_tensors.data,
numpy.arange(10*20*30*6, dtype=numpy.single).reshape((10,20,30,6)))
shutil.rmtree(directory)
def test_vtk_io(self):
tensors = medipy.base.Image(
data=numpy.arange(10*20*30*6, dtype=numpy.single).reshape((10,20,30,6)),
dti="tensor_2",
origin=(1,2,3), spacing=(4,5,6), direction=medipy.base.coordinate_system.LPS)
directory = tempfile.mkdtemp()
medipy.io.save(tensors, os.path.join(directory, "tensors.vtk"))
other_tensors = medipy.io.load(
os.path.join(directory, "tensors.vtk"), None)
self.assertTrue(isinstance(
other_tensors.metadata["loader"]["loader"],
medipy.diffusion.vtk_io.Vtk))
self.assertEqual(other_tensors.shape, (10,20,30))
self.assertEqual(other_tensors.dtype, numpy.single)
self.assertEqual(other_tensors.data_type, "vector")
self.assertEqual(other_tensors.image_type, "tensor_2")
numpy.testing.assert_array_almost_equal(other_tensors.origin, (1,2,3))
numpy.testing.assert_array_almost_equal(other_tensors.spacing, (4,5,6))
numpy.testing.assert_array_almost_equal(
other_tensors.direction, medipy.base.coordinate_system.LPS)
numpy.testing.assert_array_almost_equal(
other_tensors.data,
numpy.arange(10*20*30*6, dtype=numpy.single).reshape((10,20,30,6)))
shutil.rmtree(directory)
if __name__ == "__main__" :
unittest.main()
| 37.759494
| 89
| 0.642642
| 374
| 2,983
| 4.938503
| 0.184492
| 0.12994
| 0.032485
| 0.030319
| 0.894423
| 0.894423
| 0.881429
| 0.881429
| 0.881429
| 0.881429
| 0
| 0.041885
| 0.231646
| 2,983
| 78
| 90
| 38.24359
| 0.763962
| 0
| 0
| 0.610169
| 0
| 0
| 0.040228
| 0
| 0
| 0
| 0
| 0
| 0.305085
| 1
| 0.033898
| false
| 0
| 0.135593
| 0
| 0.186441
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2fb0d19430a1dfba3007eefc0c0eb056cf430ea1
| 6,522
|
py
|
Python
|
tests/scaffold/test_validation.py
|
haowen-xu/tfsnippet-pre-alpha
|
31eb2cf692ac25b95cc815aaca53754d6db42d9f
|
[
"MIT"
] | null | null | null |
tests/scaffold/test_validation.py
|
haowen-xu/tfsnippet-pre-alpha
|
31eb2cf692ac25b95cc815aaca53754d6db42d9f
|
[
"MIT"
] | null | null | null |
tests/scaffold/test_validation.py
|
haowen-xu/tfsnippet-pre-alpha
|
31eb2cf692ac25b95cc815aaca53754d6db42d9f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import unittest
import tensorflow as tf
from tfsnippet.scaffold import early_stopping
from tfsnippet.utils import (TemporaryDirectory,
set_variable_values,
get_variable_values)
from tests.helper import TestCase
def _populate_variables():
a = tf.get_variable('a', shape=(), dtype=tf.int32)
b = tf.get_variable('b', shape=(), dtype=tf.int32)
c = tf.get_variable('c', shape=(), dtype=tf.int32)
set_variable_values([a, b, c], [1, 2, 3])
return [a, b, c]
class EarlyStoppingTestCase(TestCase):
def test_basic(self):
with self.get_session():
a, b, c = _populate_variables()
self.assertEqual(get_variable_values([a, b, c]), [1, 2, 3])
# test: param-vars must not be empty
with self.assertRaisesRegex(
ValueError, '`param_vars` must not be empty.'):
with early_stopping([]):
pass
# test: early-stopping context without updating loss
with early_stopping([a, b]):
set_variable_values([a], [10])
self.assertEqual(get_variable_values([a, b, c]), [10, 2, 3])
# test: the first loss will always cause saving
with early_stopping([a, b]) as es:
set_variable_values([a], [10])
self.assertTrue(es.update(1.))
set_variable_values([a, b], [100, 20])
self.assertAlmostEqual(es.best_metric, 1.)
self.assertEqual(get_variable_values([a, b, c]), [10, 2, 3])
# test: memorize the best loss
set_variable_values([a, b, c], [1, 2, 3])
self.assertEqual(get_variable_values([a, b, c]), [1, 2, 3])
with early_stopping([a, b]) as es:
set_variable_values([a], [10])
self.assertTrue(es.update(1.))
self.assertAlmostEqual(es.best_metric, 1.)
set_variable_values([a, b], [100, 20])
self.assertTrue(es.update(.5))
self.assertAlmostEqual(es.best_metric, .5)
set_variable_values([a, b, c], [1000, 200, 30])
self.assertFalse(es.update(.8))
self.assertAlmostEqual(es.best_metric, .5)
self.assertAlmostEqual(es.best_metric, .5)
self.assertEqual(get_variable_values([a, b, c]), [100, 20, 30])
# test: initial_loss
set_variable_values([a, b, c], [1, 2, 3])
self.assertEqual(get_variable_values([a, b, c]), [1, 2, 3])
with early_stopping([a, b], initial_metric=.6) as es:
set_variable_values([a], [10])
self.assertFalse(es.update(1.))
self.assertAlmostEqual(es.best_metric, .6)
set_variable_values([a, b], [100, 20])
self.assertTrue(es.update(.5))
self.assertAlmostEqual(es.best_metric, .5)
self.assertEqual(get_variable_values([a, b, c]), [100, 20, 3])
def test_restore_on_error(self):
with self.get_session():
a, b, c = _populate_variables()
self.assertEqual(get_variable_values([a, b, c]), [1, 2, 3])
# test: do not restore on error
with self.assertRaisesRegex(ValueError, 'value error'):
with early_stopping([a, b], restore_on_error=False) as es:
self.assertTrue(es.update(1.))
set_variable_values([a, b], [10, 20])
raise ValueError('value error')
self.assertAlmostEqual(es.best_metric, 1.)
self.assertEqual(get_variable_values([a, b, c]), [10, 20, 3])
# test: restore on error
set_variable_values([a, b, c], [1, 2, 3])
self.assertEqual(get_variable_values([a, b, c]), [1, 2, 3])
with self.assertRaisesRegex(ValueError, 'value error'):
with early_stopping([a, b], restore_on_error=True) as es:
self.assertTrue(es.update(1.))
set_variable_values([a, b], [10, 20])
raise ValueError('value error')
self.assertAlmostEqual(es.best_metric, 1.)
self.assertEqual(get_variable_values([a, b, c]), [1, 2, 3])
def test_bigger_is_better(self):
with self.get_session():
a, b, c = _populate_variables()
self.assertEqual(get_variable_values([a, b, c]), [1, 2, 3])
# test: memorize the best loss
set_variable_values([a, b, c], [1, 2, 3])
self.assertEqual(get_variable_values([a, b, c]), [1, 2, 3])
with early_stopping([a, b], smaller_is_better=False) as es:
set_variable_values([a], [10])
self.assertTrue(es.update(.5))
self.assertAlmostEqual(es.best_metric, .5)
set_variable_values([a, b], [100, 20])
self.assertTrue(es.update(1.))
self.assertAlmostEqual(es.best_metric, 1.)
set_variable_values([a, b, c], [1000, 200, 30])
self.assertFalse(es.update(.8))
self.assertAlmostEqual(es.best_metric, 1.)
self.assertAlmostEqual(es.best_metric, 1.)
self.assertEqual(get_variable_values([a, b, c]), [100, 20, 30])
def test_save_dir(self):
with self.get_session():
a, b, c = _populate_variables()
self.assertEqual(get_variable_values([a, b, c]), [1, 2, 3])
with TemporaryDirectory() as tempdir:
# test cleanup save_dir
save_dir = os.path.join(tempdir, '1')
with early_stopping([a, b], save_dir=save_dir) as es:
self.assertTrue(es.update(1.))
self.assertTrue(
os.path.exists(os.path.join(save_dir, 'latest')))
self.assertFalse(os.path.exists(save_dir))
# test not cleanup save_dir
save_dir = os.path.join(tempdir, '2')
with early_stopping([a, b], save_dir=save_dir,
cleanup=False) as es:
self.assertTrue(es.update(1.))
self.assertTrue(
os.path.exists(os.path.join(save_dir, 'latest')))
self.assertTrue(
os.path.exists(os.path.join(save_dir, 'latest')))
if __name__ == '__main__':
unittest.main()
| 43.771812
| 75
| 0.544312
| 802
| 6,522
| 4.240648
| 0.128429
| 0.024699
| 0.145545
| 0.131726
| 0.779771
| 0.774184
| 0.768303
| 0.752132
| 0.728609
| 0.678624
| 0
| 0.037868
| 0.323827
| 6,522
| 148
| 76
| 44.067568
| 0.733333
| 0.050751
| 0
| 0.652174
| 0
| 0
| 0.017155
| 0
| 0
| 0
| 0
| 0
| 0.417391
| 1
| 0.043478
| false
| 0.008696
| 0.052174
| 0
| 0.113043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2fc73255fc7175f53a2c57f4e7942a5944ee76dd
| 5,754
|
py
|
Python
|
TAE.py
|
behnamh217rn21/DTC
|
4f70d6b24722bd9f8331502d9cae00d35686a4d2
|
[
"MIT"
] | 153
|
2019-06-13T12:38:22.000Z
|
2022-03-23T02:35:11.000Z
|
TAE.py
|
behnamh217rn21/DTC
|
4f70d6b24722bd9f8331502d9cae00d35686a4d2
|
[
"MIT"
] | 21
|
2019-11-19T11:38:26.000Z
|
2022-02-13T21:07:11.000Z
|
TAE.py
|
behnamh217rn21/DTC
|
4f70d6b24722bd9f8331502d9cae00d35686a4d2
|
[
"MIT"
] | 48
|
2019-07-01T07:55:29.000Z
|
2022-03-02T21:45:20.000Z
|
"""
Implementation of the Deep Temporal Clustering model
Temporal Autoencoder (TAE)
@author Florent Forest (FlorentF9)
"""
from keras.models import Model
from keras.layers import Input, Conv1D, LeakyReLU, MaxPool1D, CuDNNLSTM, Bidirectional, TimeDistributed, Dense, Reshape
from keras.layers import UpSampling2D, Conv2DTranspose
def temporal_autoencoder(input_dim, timesteps, n_filters=50, kernel_size=10, strides=1, pool_size=10, n_units=[50, 1]):
"""
Temporal Autoencoder (TAE) model with Convolutional and BiLSTM layers.
# Arguments
input_dim: input dimension
timesteps: number of timesteps (can be None for variable length sequences)
n_filters: number of filters in convolutional layer
kernel_size: size of kernel in convolutional layer
strides: strides in convolutional layer
pool_size: pooling size in max pooling layer, must divide time series length
n_units: numbers of units in the two BiLSTM layers
alpha: coefficient in Student's kernel
dist_metric: distance metric between latent sequences
# Return
(ae_model, encoder_model, decoder_model): AE, encoder and decoder models
"""
assert(timesteps % pool_size == 0)
# Input
x = Input(shape=(timesteps, input_dim), name='input_seq')
# Encoder
encoded = Conv1D(n_filters, kernel_size, strides=strides, padding='same', activation='linear')(x)
encoded = LeakyReLU()(encoded)
encoded = MaxPool1D(pool_size)(encoded)
encoded = Bidirectional(CuDNNLSTM(n_units[0], return_sequences=True), merge_mode='sum')(encoded)
encoded = LeakyReLU()(encoded)
encoded = Bidirectional(CuDNNLSTM(n_units[1], return_sequences=True), merge_mode='sum')(encoded)
encoded = LeakyReLU(name='latent')(encoded)
# Decoder
decoded = Reshape((-1, 1, n_units[1]), name='reshape')(encoded)
decoded = UpSampling2D((pool_size, 1), name='upsampling')(decoded) #decoded = UpSampling1D(pool_size, name='upsampling')(decoded)
decoded = Conv2DTranspose(input_dim, (kernel_size, 1), padding='same', name='conv2dtranspose')(decoded)
output = Reshape((-1, input_dim), name='output_seq')(decoded) #output = Conv1D(1, kernel_size, strides=strides, padding='same', activation='linear', name='output_seq')(decoded)
# AE model
autoencoder = Model(inputs=x, outputs=output, name='AE')
# Encoder model
encoder = Model(inputs=x, outputs=encoded, name='encoder')
# Create input for decoder model
encoded_input = Input(shape=(timesteps // pool_size, n_units[1]), name='decoder_input')
# Internal layers in decoder
decoded = autoencoder.get_layer('reshape')(encoded_input)
decoded = autoencoder.get_layer('upsampling')(decoded)
decoded = autoencoder.get_layer('conv2dtranspose')(decoded)
decoder_output = autoencoder.get_layer('output_seq')(decoded)
# Decoder model
decoder = Model(inputs=encoded_input, outputs=decoder_output, name='decoder')
return autoencoder, encoder, decoder
def temporal_autoencoder_v2(input_dim, timesteps, n_filters=50, kernel_size=10, strides=1, pool_size=10, n_units=[50, 1]):
"""
Temporal Autoencoder (TAE) model with Convolutional and BiLSTM layers.
# Arguments
input_dim: input dimension
timesteps: number of timesteps (can be None for variable length sequences)
n_filters: number of filters in convolutional layer
kernel_size: size of kernel in convolutional layer
strides: strides in convolutional layer
pool_size: pooling size in max pooling layer
n_units: numbers of units in the two BiLSTM layers
alpha: coefficient in Student's kernel
dist_metric: distance metric between latent sequences
# Return
(ae_model, encoder_model, decoder_model): AE, encoder and decoder models
"""
assert (timesteps % pool_size == 0)
# Input
x = Input(shape=(timesteps, input_dim), name='input_seq')
# Encoder
encoded = Conv1D(n_filters, kernel_size, strides=strides, padding='same', activation='linear')(x)
encoded = LeakyReLU()(encoded)
encoded = MaxPool1D(pool_size)(encoded)
encoded = Bidirectional(CuDNNLSTM(n_units[0], return_sequences=True), merge_mode='concat')(encoded)
encoded = LeakyReLU()(encoded)
encoded = Bidirectional(CuDNNLSTM(n_units[1], return_sequences=True), merge_mode='concat')(encoded)
encoded = LeakyReLU(name='latent')(encoded)
# Decoder
decoded = TimeDistributed(Dense(units=n_filters), name='dense')(encoded) # sequence labeling
decoded = LeakyReLU(name='act')(decoded)
decoded = Reshape((-1, 1, n_filters), name='reshape')(decoded)
decoded = UpSampling2D((pool_size, 1), name='upsampling')(decoded)
decoded = Conv2DTranspose(input_dim, (kernel_size, 1), padding='same', name='conv2dtranspose')(decoded)
output = Reshape((-1, input_dim), name='output_seq')(decoded)
# AE model
autoencoder = Model(inputs=x, outputs=output, name='AE')
# Encoder model
encoder = Model(inputs=x, outputs=encoded, name='encoder')
# Create input for decoder model
encoded_input = Input(shape=(timesteps // pool_size, 2 * n_units[1]), name='decoder_input')
# Internal layers in decoder
decoded = autoencoder.get_layer('dense')(encoded_input)
decoded = autoencoder.get_layer('act')(decoded)
decoded = autoencoder.get_layer('reshape')(decoded)
decoded = autoencoder.get_layer('upsampling')(decoded)
decoded = autoencoder.get_layer('conv2dtranspose')(decoded)
decoder_output = autoencoder.get_layer('output_seq')(decoded)
# Decoder model
decoder = Model(inputs=encoded_input, outputs=decoder_output, name='decoder')
return autoencoder, encoder, decoder
| 43.590909
| 181
| 0.715328
| 706
| 5,754
| 5.689802
| 0.154391
| 0.02589
| 0.047299
| 0.05178
| 0.868808
| 0.848643
| 0.836196
| 0.836196
| 0.808066
| 0.771222
| 0
| 0.012818
| 0.172923
| 5,754
| 131
| 182
| 43.923664
| 0.831267
| 0.322558
| 0
| 0.641509
| 0
| 0
| 0.084891
| 0
| 0
| 0
| 0
| 0
| 0.037736
| 1
| 0.037736
| false
| 0
| 0.056604
| 0
| 0.132075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2fcde4aa9f5097aba12efe70f82fa5e688a50a08
| 16,582
|
py
|
Python
|
apl104lib.py
|
maikeriva/APL104
|
fe29a89e8965811313df6d06de8f878b10e7987e
|
[
"MIT"
] | 2
|
2020-01-10T06:52:02.000Z
|
2020-01-14T19:44:47.000Z
|
apl104lib.py
|
maikeriva/APL104
|
fe29a89e8965811313df6d06de8f878b10e7987e
|
[
"MIT"
] | null | null | null |
apl104lib.py
|
maikeriva/APL104
|
fe29a89e8965811313df6d06de8f878b10e7987e
|
[
"MIT"
] | null | null | null |
"""
apl104lib.py
Python library implementation of:
Interdiffusion across solid electrolyte-electrode interface
Applied Physics Letters 104 (2014)
Tested on:
- python 3.6.5
- numpy 1.14.2
- matplotlib 2.2.2
"""
import numpy as np
import matplotlib.pyplot as plt
import time
"""
1D model
"""
class Sample1D:
def __init__(sample,species=1,dim=200,res=64):
sample.species=species # Number of chemical species
sample.dim=dim # Sample dimension (nm)
sample.res=res # Harmonic resolution of the domain
sample.z=np.zeros(species) # Ionic valence vector
sample.c1_bulk=np.zeros(species) # Bulk concentrations vector (material 1)
sample.c2_bulk=np.zeros(species) # Bulk concentrations vector (material 2)
sample.D1=np.zeros(species) # Diffusion coefficients vector (material 1, nm²/s)
sample.D2=np.zeros(species) # Diffusion coefficients vector (material 2, nm²/s)
sample.L=0 # Phase evolution coefficient (nm³/(J*s))
sample.W0=0 # Phase gradient coefficient (J/nm)
sample.fc=0 # Free energy coefficient (J/(nm^3))
sample.c=np.zeros((res,species)) # Concentrations domain
sample.p=np.zeros(res) # Phase domain
# Plotting parameters
sample.name=' '
sample.specienames=[' ' for specie in range(sample.species)]
def h(sample):
return sample.p**3*(6*sample.p**2-15*sample.p+10)
def D(sample):
return sample.h()[...,np.newaxis,np.newaxis]*sample.D1mat+(1-sample.h())[...,np.newaxis,np.newaxis]*sample.D2mat
def f(sample):
return np.sum((sample.c[...,0:-1]-sample.c1_bulk[0:-1])**2,axis=-1)*sample.h()+\
np.sum((sample.c[...,0:-1]-sample.c2_bulk[0:-1])**2,axis=-1)*(1-sample.h())+\
2*(sample.p**4-2*sample.p**3+sample.p**2)
def dfdp(sample):
return (30*sample.p**4-60*sample.p**3+30*sample.p**2)*\
(np.sum((sample.c[...,0:-1]-sample.c1_bulk[0:-1])**2,axis=-1)-\
np.sum((sample.c[...,0:-1]-sample.c2_bulk[0:-1])**2,axis=-1))+\
(8*sample.p**3-12*sample.p**2+4*sample.p)
def update(sample):
# Update diffusion matrices. Call after editing the sample.
sample.D1mat=(sample.res/sample.dim)**2*(np.diag(sample.D1[0:-1])-(sample.z[0:-1]*sample.c1_bulk[0:-1]*sample.D1[0:-1])[:,np.newaxis]*\
(sample.z[0:-1]*(sample.D1[0:-1]-sample.D1[-1]))[np.newaxis,:]/np.sum(sample.z**2*sample.c1_bulk*sample.D1))
sample.D2mat=(sample.res/sample.dim)**2*(np.diag(sample.D2[0:-1])-(sample.z[0:-1]*sample.c2_bulk[0:-1]*sample.D2[0:-1])[:,np.newaxis]*\
(sample.z[0:-1]*(sample.D2[0:-1]-sample.D2[-1]))[np.newaxis,:]/np.sum(sample.z**2*sample.c2_bulk*sample.D2))
###
# Finite-difference explicit Euler FW solver
###
def efwsolve(sample,dt,steps):
# Prepare scaled coefficients
sample.L_s=sample.L*(sample.res/sample.dim)**3 # nm³/(J*s)
sample.W0_s=sample.W0*(sample.dim/sample.res) # J/(nm)
sample.fc_s=sample.fc*(sample.dim/sample.res)**3 # J/(nm³)
# Update sample internal data
sample.update()
# Performance improvement
matmuldest=np.ndarray((sample.res,sample.species-1,1))
# Euler forward explicit algorithm
for step in range(steps):
sample.c[...,0:-1]=sample.c[...,0:-1]+dt*np.gradient(np.matmul(sample.D(),np.gradient(sample.c[...,0:-1],axis=0)[...,np.newaxis],matmuldest)[...,0],axis=0)
sample.p=sample.p+dt*sample.L_s*(sample.W0_s*np.gradient(np.gradient(sample.p))-sample.fc_s*sample.dfdp())
# Compute last species once cycle has finished
sample.c[...,-1]=1-np.sum(sample.c[...,0:-1],axis=-1)
###
# Spectral semi-implicit Fourier solver with real numbers optimization
###
def rfftsolve(sample,dt,steps,log=False):
# Prepare scaled coefficients
sample.L_s=sample.L*(sample.res/sample.dim)**3 # nm³/(J*s)
sample.W0_s=sample.W0*(sample.dim/sample.res) # J/(nm)
sample.fc_s=sample.fc*(sample.dim/sample.res)**3 # J/(nm³)
# Update sample internal data
sample.update()
# Performance improvement
matmuldest=np.ndarray((sample.res,sample.species-1,1))
# Prepare fourier coefficients for real FFT
kx=np.linspace(0,sample.res/2,sample.res/2+1)*2*np.pi/sample.res
k=np.sqrt(kx**2)
if log:
# Prepare arrays for storage
clog=np.zeros((steps,sample.res,sample.species))
plog=np.zeros((steps,sample.res))
# Prepare first FFT
c_fft=np.fft.rfft(sample.c[...,0:-1],axis=0)
p_fft=np.fft.rfft(sample.p)
# Solving cycle
for step in range(steps):
# Phase evolution equation in fourier space
p_fft=(p_fft-dt*sample.L_s*sample.fc_s*np.fft.rfft(sample.dfdp()))/(1+dt*sample.L_s*sample.W0_s*k**2)
sample.p=np.fft.irfft(p_fft)
# Concentration evolution equation in fourier space
c_fft=c_fft+dt*1j*k[...,np.newaxis]*np.fft.rfft(np.matmul(sample.D(),np.fft.irfft(1j*k[...,np.newaxis]*c_fft,axis=0)[...,np.newaxis],matmuldest)[...,0],axis=0)
sample.c[...,0:-1]=np.fft.irfft(c_fft,axis=0)
# Compute last specie
sample.c[...,-1]=1-np.sum(sample.c[...,0:-1],axis=-1)
# Store logs
clog[step]=sample.c
plog[step]=sample.p
# Return logs
return clog,plog
else:
# Prepare first FFT
c_fft=np.fft.rfft(sample.c[...,0:-1],axis=0)
p_fft=np.fft.rfft(sample.p)
# Solving cycle
for step in range(steps):
# Phase evolution equation in fourier space
p_fft=(p_fft-dt*sample.L_s*sample.fc_s*np.fft.rfft(sample.dfdp()))/(1+dt*sample.L_s*sample.W0_s*k**2)
sample.p=np.fft.irfft(p_fft)
# Concentration evolution equation in fourier space
c_fft=c_fft+dt*1j*k[...,np.newaxis]*np.fft.rfft(np.matmul(sample.D(),np.fft.irfft(1j*k[...,np.newaxis]*c_fft,axis=0)[...,np.newaxis],matmuldest)[...,0],axis=0)
sample.c[...,0:-1]=np.fft.irfft(c_fft,axis=0)
# Compute last species once cycle has finished
sample.c[...,-1]=1-np.sum(sample.c[...,0:-1],axis=-1)
"""
Interface evaluation
"""
def cifeval(sample,tol=1e-3):
ifstart=np.argmin(np.abs(sample.c[0:int(sample.res/2)]-sample.c1_bulk)<tol,axis=0)*sample.dim/sample.res
ifend=(sample.res/2-np.argmin(np.abs(np.flip(sample.c[0:int(sample.res/2)],axis=0)-sample.c2_bulk)<tol,axis=0))*sample.dim/sample.res
return ifend-ifstart,ifstart,ifend
def pifeval(sample,tol=1e-3):
ifstart=np.argmin(np.abs(sample.p[0:int(sample.res/2)]-1)<tol,axis=0)*sample.dim/sample.res
ifend=(sample.res/2-np.argmin(np.abs(np.flip(sample.p[0:int(sample.res/2)],axis=0)-0)<tol,axis=0))*sample.dim/sample.res
return ifend-ifstart,ifstart,ifend
def cifevallog(log,sample,tol=1e-3):
ifstart=np.argmin(np.abs(log[:,0:int(sample.res/2),:]-sample.c1_bulk)<tol,axis=1)*sample.dim/sample.res
ifend=(sample.res/2-np.argmin(np.abs(np.flip(log[:,0:int(sample.res/2),:],axis=1)-sample.c2_bulk)<tol,axis=1))*sample.dim/sample.res
return ifend-ifstart,ifstart,ifend
def pifevallog(log,sample,tol=1e-3):
ifstart=np.argmin(np.abs(log[:,0:int(sample.res/2)]-1)<tol,axis=1)*sample.dim/sample.res
ifend=(sample.res/2-np.argmin(np.abs(np.flip(log[:,0:int(sample.res/2)],axis=1)-0)<tol,axis=1))*sample.dim/sample.res
return ifend-ifstart,ifstart,ifend
###
# Plotting functions
###
def summaryplot(sample,interface=True,ifspecie=0,iftol=1e-3,save=False,filename="summaryplot.pdf"):
fig,ax1=plt.subplots()
ax1.plot(np.linspace(0,sample.dim,sample.res),sample.p,linestyle='dashed')
ax1.set_ylim([-0.1,1.1])
ax1.set_title("Results overview")
ax1.set_xlabel("Coordinate (nm)")
ax1.set_ylabel("Phase (dashed line)")
ax1.grid()
ax2=ax1.twinx()
ax2.plot(np.linspace(0,sample.dim,sample.res),sample.c)
ax2.set_ylim([-0.1,1.1])
ax2.set_ylabel("Molar concentration (solid line)")
ax2.legend(sample.specienames)
if interface:
iflength,ifstart,ifend=cifeval(sample,iftol)
ax2.vlines([ifstart[ifspecie],ifend[ifspecie],sample.dim-ifend[ifspecie],sample.dim-ifstart[ifspecie]],0,1,linestyle='dotted')
if save:
fig.savefig(filename)
return fig
def pplot(sample,interface=True,iftol=1e-3,save=False,filename="pplot.pdf"):
fig=plt.figure()
plt.plot(np.linspace(0,sample.dim,sample.res),sample.p)
plt.ylim([-0.1,1.1])
plt.grid()
plt.title("Phase profile")
plt.xlabel("Coordinate (nm)")
if interface:
iflength,ifstart,ifend=pifeval(sample,iftol)
plt.vlines([ifstart,ifend,ifstart+sample.dim/2,ifend+sample.dim/2],0,1,linestyle='dotted')
if save:
fig.savefig(filename)
return fig
def cplot(sample,interface=True,ifspecie=0,iftol=1e-3,save=False,filename="cplot.pdf"):
fig=plt.figure()
plt.plot(np.linspace(0,sample.dim,sample.res),sample.c)
plt.ylim([-0.1,1.1])
plt.grid()
plt.title("Concentration profiles")
plt.xlabel("Coordinate (nm)")
plt.ylabel("Molar concentration")
plt.legend(sample.specienames)
if interface:
iflength,ifstart,ifend=cifeval(sample,ifspecie,iftol)
plt.vlines([ifstart[...,ifspecie],ifend[...,ifspecie],ifstart[...,ifspecie]+sample.dim/2,ifend[...,ifspecie]+sample.dim/2],0,1,linestyle='dotted')
if save:
fig.savefig(filename)
return fig
def fplot(sample,interface=True,ifspecie=0,iftol=1e-3,save=False,filename="fplot.pdf"):
fig=plt.figure()
plt.plot(np.linspace(0,sample.dim,sample.res),sample.f())
plt.grid()
plt.title("f plot")
plt.xlabel("Coordinate (nm)")
plt.ylabel("Energy (J/nm^3)")
if interface:
iflength,ifstart,ifend=cifeval(sample,ifspecie,iftol)
plt.vlines([ifstart[...,ifspecie],ifend[...,ifspecie],ifstart[...,ifspecie]+sample.dim/2,ifend[...,ifspecie]+sample.dim/2],0,1,linestyle='dotted')
if save:
fig.savefig(filename)
return fig
def dfdpplot(sample,interface=True,ifspecie=0,iftol=1e-3,save=False,filename="dfdpplot.pdf"):
fig=plt.figure()
plt.plot(np.linspace(0,sample.dim,sample.res),sample.dfdp())
plt.grid()
plt.title("dfdp plot")
plt.xlabel("Coordinate (nm)")
plt.ylabel("Energy variation (J/nm^3)")
if interface:
iflength,ifstart,ifend=cifeval(sample,ifspecie,iftol)
plt.vlines([ifstart[...,ifspecie],ifend[...,ifspecie],ifstart[...,ifspecie]+sample.dim/2,ifend[...,ifspecie]+sample.dim/2],0,1,linestyle='dotted')
if save:
fig.savefig(filename)
return fig
def showplots():
plt.show()
"""
2D model
"""
class Sample2D:
def __init__(sample,species=1,dim=200,res=64):
sample.species=species
sample.dim=dim
sample.res=res
sample.z=np.zeros(species)
sample.c1_bulk=np.zeros(species)
sample.c2_bulk=np.zeros(species)
sample.D1=np.zeros(species) # nm^2/s
sample.D2=np.zeros(species) # nm^2/s
sample.L=0 # nm^3/(J*s)
sample.W0=0 # J/nm
sample.fc=0 # J/(nm^3)
sample.c=np.zeros([res,res,species])
sample.p=np.zeros([res,res])
# Plotting parameters
sample.name=' '
sample.specienames=[' ' for specie in range(sample.species)]
def h(sample):
return sample.p**3*(6*sample.p**2-15*sample.p+10)
def D(sample):
return sample.h()[...,np.newaxis,np.newaxis]*sample.D1mat+(1-sample.h())[...,np.newaxis,np.newaxis]*sample.D2mat
def f(sample):
return np.sum((sample.c[...,0:-1]-sample.c1_bulk[0:-1])**2,axis=-1)*sample.h()+\
np.sum((sample.c[...,0:-1]-sample.c2_bulk[0:-1])**2,axis=-1)*(1-sample.h())+\
2*(sample.p**4-2*sample.p**3+sample.p**2)
def dfdp(sample):
return (30*sample.p**4-60*sample.p**3+30*sample.p**2)*\
(np.sum((sample.c[...,0:-1]-sample.c1_bulk[0:-1])**2,axis=-1)-\
np.sum((sample.c[...,0:-1]-sample.c2_bulk[0:-1])**2,axis=-1))+\
(8*sample.p**3-12*sample.p**2+4*sample.p)
def update(sample):
sample.D1mat=(sample.res/sample.dim)**2*(np.diag(sample.D1[0:-1])-(sample.z[0:-1]*sample.c1_bulk[0:-1]*sample.D1[0:-1])[:,np.newaxis]*\
(sample.z[0:-1]*(sample.D1[0:-1]-sample.D1[-1]))[np.newaxis,:]/np.sum(sample.z**2*sample.c1_bulk*sample.D1))
sample.D2mat=(sample.res/sample.dim)**2*(np.diag(sample.D2[0:-1])-(sample.z[0:-1]*sample.c2_bulk[0:-1]*sample.D2[0:-1])[:,np.newaxis]*\
(sample.z[0:-1]*(sample.D2[0:-1]-sample.D2[-1]))[np.newaxis,:]/np.sum(sample.z**2*sample.c2_bulk*sample.D2))
###
# Spectral semi-implicit Fourier solver without real numbers optimization
###
def fftsolve2D(sample,dt,steps,log=False):
# Prepare scaled coefficients
sample.L_s=sample.L*(sample.res/sample.dim)**3 # nm³/(J*s)
sample.W0_s=sample.W0*(sample.dim/sample.res) # J/(nm)
sample.fc_s=sample.fc*(sample.dim/sample.res)**3 # J/(nm³)
# Update sample internal data
sample.update()
# Performance improvement
matmuldest=np.ndarray((sample.res,sample.res,sample.species-1,1),dtype=complex)
# Prepare fourier coefficients for real FFT
kx=np.concatenate([np.linspace(0,sample.res/2,sample.res/2,False),np.linspace(-sample.res/2,0,sample.res/2,False)])*2*np.pi/sample.res
ky=np.concatenate([np.linspace(0,sample.res/2,sample.res/2,False),np.linspace(-sample.res/2,0,sample.res/2,False)])*2*np.pi/sample.res
k=np.sqrt(kx[np.newaxis,:]**2+ky[:,np.newaxis]**2)
if log:
# Prepare arrays for storage
clog=np.zeros((steps,sample.res,sample.res,sample.species))
plog=np.zeros((steps,sample.res,sample.res))
# Prepare first FFT
c_fft=np.fft.fftn(sample.c[...,0:-1],axes=(0,1))
p_fft=np.fft.fftn(sample.p)
# Solving cycle
for step in range(steps):
# Phase evolution equation in fourier space
p_fft=(p_fft-dt*sample.L_s*sample.fc_s*np.fft.fftn(sample.dfdp()))/(1+dt*sample.L_s*sample.W0_s*k**2)
sample.p=np.real(np.fft.ifftn(p_fft))
# Concentration evolution equation in fourier space
c_fft=c_fft+dt*1j*k[...,np.newaxis]*np.fft.fftn(np.matmul(sample.D(),np.fft.ifftn(1j*k[...,np.newaxis]*c_fft,axes=(0,1))[...,np.newaxis],matmuldest)[...,0],axes=(0,1))
sample.c[...,0:-1]=np.real(np.fft.ifftn(c_fft,axes=(0,-1)))
# Compute last specie
sample.c[...,-1]=1-np.sum(sample.c[...,0:-1],axis=-1)
# Store logs
clog[step]=sample.c
plog[step]=sample.p
# Return logs
return clog,plog
else:
# Prepare first FFT
c_fft=np.fft.fftn(sample.c[...,0:-1],axes=(0,1))
p_fft=np.fft.fftn(sample.p)
# Solving cycle
for step in range(steps):
# Phase evolution equation in fourier space
p_fft=(p_fft-dt*sample.L_s*sample.fc_s*np.fft.fftn(sample.dfdp()))/(1+dt*sample.L_s*sample.W0_s*k**2)
sample.p=np.real(np.fft.ifftn(p_fft))
# Concentration evolution equation in fourier space
c_fft=c_fft+dt*1j*k[...,np.newaxis]*np.fft.fftn(np.matmul(sample.D(),np.fft.ifftn(1j*k[...,np.newaxis]*c_fft,axes=(0,1))[...,np.newaxis],matmuldest)[...,0],axes=(0,1))
sample.c[...,0:-1]=np.real(np.fft.ifftn(c_fft,axes=(0,1)))
# Compute last species once cycle has finished
sample.c[...,-1]=1-np.sum(sample.c[...,0:-1],axis=-1)
"""
2D contour plot functions
"""
def contourcplot2d(sample,specie=0,save=False,filename="contourcplot2d.pdf"):
speciename=sample.specienames(specie)
plt.figure()
plt.contourf(np.linspace(0,sample.dim,sample.res),np.linspace(0,sample.dim,sample.res),sample.c[...,specie])
plt.colorbar()
plt.title("Molar concentration of {}".format(speciename))
plt.xlabel("Coordinate (nm)")
plt.ylabel("Coordinate (nm)")
if save:
fig.savefig(filename)
def contourpplot2d(sample,save=False,filename="contourpplot.pdf"):
pfig=plt.figure()
pplot=plt.contourf(np.linspace(0,sample.dim,sample.res),np.linspace(0,sample.dim,sample.res),sample.p)
plt.colorbar(pplot)
plt.title("Phase")
plt.xlabel("Coordinate (nm)")
plt.ylabel("Coordinate (nm)")
if save:
fig.savefig(filename)
| 45.554945
| 180
| 0.622301
| 2,603
| 16,582
| 3.925086
| 0.095659
| 0.01429
| 0.024273
| 0.021141
| 0.833023
| 0.796418
| 0.77743
| 0.757169
| 0.725653
| 0.714985
| 0
| 0.040321
| 0.181884
| 16,582
| 363
| 181
| 45.680441
| 0.712811
| 0.131166
| 0
| 0.671875
| 0
| 0
| 0.0339
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105469
| false
| 0
| 0.011719
| 0.03125
| 0.199219
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
64226494c732aeeceef7d12ba018e16839e8d97f
| 3,646
|
py
|
Python
|
old_code/street_cross_exp.py
|
pulkitag/egomotion
|
fad2ab94b0c2f5533c79a01d1b0546b8d0c64f19
|
[
"BSD-3-Clause"
] | 9
|
2017-11-25T14:24:23.000Z
|
2022-03-25T07:08:28.000Z
|
old_code/street_cross_exp.py
|
pulkitag/egomotion
|
fad2ab94b0c2f5533c79a01d1b0546b8d0c64f19
|
[
"BSD-3-Clause"
] | null | null | null |
old_code/street_cross_exp.py
|
pulkitag/egomotion
|
fad2ab94b0c2f5533c79a01d1b0546b8d0c64f19
|
[
"BSD-3-Clause"
] | 3
|
2017-10-13T02:30:28.000Z
|
2021-06-30T05:55:42.000Z
|
##Records/Performs the experiment which evaluate the pose model on the patch task and
## vice-versa.
import street_params as sp
import street_exp as se
import my_exp_ptch as mept
import my_exp_pose as mepo
import my_exp_v2 as mev2
def train_ptch_using_pose(isRun=False, deviceId=[0]):
ptPrms, ptCPrms = mept.smallnetv2_pool4_ptch_crp192_rawImSz256(isPythonLayer=True,
lrAbove='common_fc', deviceId=deviceId)
poPrms, poCPrms = mepo.smallnetv2_pool4_pose_euler_mx90_crp192_rawImSz256(isPythonLayer=True, extraFc=512)
exp, modelFile = se.setup_experiment_from_previous(poPrms, poCPrms,
ptPrms, ptCPrms, srcModelIter=60000)
#Rename common_fc so that it is initialized randomly
exp.expFile_.netDef_.rename_layer('common_fc', 'common_fc_new')
if isRun:
exp.make(modelFile=modelFile)
exp.run()
return exp
def train_pose_using_ptch(isRun=False, deviceId=[0]):
poPrms, poCPrms = mepo.smallnetv5_fc5_pose_euler_mx90_crp192_rawImSz256(numFc5=512,
lrAbove='common_fc', isPythonLayer=True, deviceId=deviceId)
ptPrms, ptCPrms = mept.smallnetv5_fc5_ptch_crp192_rawImSz256(numFc5=512,
isPythonLayer=True)
exp, modelFile = se.setup_experiment_from_previous(ptPrms, ptCPrms,
poPrms, poCPrms, srcModelIter=60000)
#Rename common_fc so that it is initialized randomly
exp.expFile_.netDef_.rename_layer('common_fc', 'common_fc_new')
if isRun:
exp.make(modelFile=modelFile)
exp.run()
return exp
def train_ptch_using_pose_fc5(isRun=False, deviceId=[0]):
poPrms, poCPrms = mepo.smallnetv5_fc5_pose_euler_crp192_rawImSz256(numFc5=512,
isPythonLayer=True)
ptPrms, ptCPrms = mept.smallnetv5_fc5_ptch_crp192_rawImSz256(numFc5=512,
isPythonLayer=True, lrAbove='common_fc', deviceId=deviceId)
exp, modelFile = se.setup_experiment_from_previous(poPrms, poCPrms,
ptPrms, ptCPrms, srcModelIter=60000)
#Rename common_fc so that it is initialized randomly
exp.expFile_.netDef_.rename_layer('common_fc', 'common_fc_new')
if isRun:
exp.make(modelFile=modelFile)
exp.run()
return exp
def train_ptch_using_ptch_lt5(isRun=False, deviceId=[0]):
#The target experiment is to peform ptch matching on general angles
tgtPrms, tgtCPrms = mept.smallnetv5_fc5_ptch_crp192_rawImSz256(isPythonLayer=True,
lrAbove='common_fc', deviceId=deviceId)
#The source experiment is ptch matching with positives only from euler angles lt 5
srcPrms, srcCPrms = mept.smallnetv5_fc5_ptch_euler_mx5_crp192_rawImSz256(numFc5=512)
exp, modelFile = se.setup_experiment_from_previous(srcPrms, srcCPrms,
tgtPrms, tgtCPrms, srcModelIter=36000)
#Rename common_fc so that it is initialized randomly
exp.expFile_.netDef_.rename_layer('common_fc', 'common_fc_new')
if isRun:
exp.make(modelFile=modelFile)
exp.run()
return exp
def train_ptch_using_ptch_lt5_pose_all(isRun=False, deviceId=[0]):
#The target experiment is to peform ptch matching on general angles
tgtPrms, tgtCPrms = mept.smallnetv5_fc5_ptch_crp192_rawImSz256(isPythonLayer=True,
lrAbove='common_fc', deviceId=deviceId)
#The source experiment is ptch matching with positives only from euler angles lt 5
srcPrms, srcCPrms = mev2.ptch_euler_mx5_pose_euler_smallnet_v5_fc5_exp1(numFc5=512)
exp, modelFile = se.setup_experiment_from_previous(srcPrms, srcCPrms,
tgtPrms, tgtCPrms, srcModelIter=36000)
#Rename common_fc so that it is initialized randomly
exp.expFile_.netDef_.rename_layer('common_fc', 'common_fc_new')
if isRun:
exp.make(modelFile=modelFile)
exp.run()
return exp
| 45.012346
| 135
| 0.765222
| 500
| 3,646
| 5.312
| 0.196
| 0.060241
| 0.033886
| 0.035768
| 0.818901
| 0.791039
| 0.775226
| 0.748117
| 0.746988
| 0.746988
| 0
| 0.04731
| 0.153593
| 3,646
| 80
| 136
| 45.575
| 0.813351
| 0.176906
| 0
| 0.677419
| 0
| 0
| 0.051892
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080645
| false
| 0
| 0.080645
| 0
| 0.241935
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ff5b4e0eba6b9304a2a09908cb2a3999b01dd80a
| 14,470
|
py
|
Python
|
koku/reporting/migrations/0103_azurecomputesummary_azurecostsummary_azurecostsummarybyaccount_azurecostsummarybylocation_azurecosts.py
|
Vasyka/koku
|
b5aa9ec41c3b0821e74afe9ff3a5ffaedb910614
|
[
"Apache-2.0"
] | 2
|
2022-01-12T03:42:39.000Z
|
2022-01-12T03:42:40.000Z
|
koku/reporting/migrations/0103_azurecomputesummary_azurecostsummary_azurecostsummarybyaccount_azurecostsummarybylocation_azurecosts.py
|
Vasyka/koku
|
b5aa9ec41c3b0821e74afe9ff3a5ffaedb910614
|
[
"Apache-2.0"
] | null | null | null |
koku/reporting/migrations/0103_azurecomputesummary_azurecostsummary_azurecostsummarybyaccount_azurecostsummarybylocation_azurecosts.py
|
Vasyka/koku
|
b5aa9ec41c3b0821e74afe9ff3a5ffaedb910614
|
[
"Apache-2.0"
] | 1
|
2021-07-21T09:33:59.000Z
|
2021-07-21T09:33:59.000Z
|
# Generated by Django 2.2.11 on 2020-03-25 18:52
import django.contrib.postgres.fields
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [("reporting", "0102_auto_20200228_1812")]
operations = [
migrations.CreateModel(
name="AzureComputeSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("instance_type", models.CharField(max_length=50, null=True)),
(
"instance_ids",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=256), null=True, size=None
),
),
("instance_count", models.IntegerField(null=True)),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.CharField(max_length=63, null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.CharField(default="USD", max_length=10)),
],
options={"db_table": "reporting_azure_compute_summary", "managed": False},
),
migrations.CreateModel(
name="AzureCostSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.CharField(default="USD", max_length=10)),
],
options={"db_table": "reporting_azure_cost_summary", "managed": False},
),
migrations.CreateModel(
name="AzureCostSummaryByAccount",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.CharField(max_length=50)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.CharField(default="USD", max_length=10)),
],
options={"db_table": "reporting_azure_cost_summary_by_account", "managed": False},
),
migrations.CreateModel(
name="AzureCostSummaryByLocation",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("resource_location", models.CharField(max_length=50)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.CharField(default="USD", max_length=10)),
],
options={"db_table": "reporting_azure_cost_summary_by_location", "managed": False},
),
migrations.CreateModel(
name="AzureCostSummaryByService",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("service_name", models.TextField()),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.CharField(default="USD", max_length=10)),
],
options={"db_table": "reporting_azure_cost_summary_by_service", "managed": False},
),
migrations.CreateModel(
name="AzureDatabaseSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("service_name", models.TextField()),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.CharField(max_length=63, null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.CharField(default="USD", max_length=10)),
],
options={"db_table": "reporting_azure_database_summary", "managed": False},
),
migrations.CreateModel(
name="AzureNetworkSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("service_name", models.TextField()),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.CharField(max_length=63, null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.CharField(default="USD", max_length=10)),
],
options={"db_table": "reporting_azure_network_summary", "managed": False},
),
migrations.CreateModel(
name="AzureStorageSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("service_name", models.TextField()),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.CharField(max_length=63, null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.CharField(default="USD", max_length=10)),
],
options={"db_table": "reporting_azure_storage_summary", "managed": False},
),
migrations.RunSQL(
sql="""
CREATE MATERIALIZED VIEW reporting_azure_cost_summary AS(
SELECT row_number() OVER(ORDER BY usage_start) as id,
usage_start as usage_start,
usage_start as usage_end,
sum(pretax_cost) as pretax_cost,
sum(markup_cost) as markup_cost,
max(currency) as currency
FROM reporting_azurecostentrylineitem_daily_summary
-- Get data for this month or last month
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
GROUP BY usage_start
)
;
CREATE UNIQUE INDEX azure_cost_summary
ON reporting_azure_cost_summary (usage_start)
;
CREATE MATERIALIZED VIEW reporting_azure_cost_summary_by_account AS(
SELECT row_number() OVER(ORDER BY usage_start, subscription_guid) as id,
usage_start as usage_start,
usage_start as usage_end,
subscription_guid,
sum(pretax_cost) as pretax_cost,
sum(markup_cost) as markup_cost,
max(currency) as currency
FROM reporting_azurecostentrylineitem_daily_summary
-- Get data for this month or last month
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
GROUP BY usage_start, subscription_guid
)
;
CREATE UNIQUE INDEX azure_cost_summary_account
ON reporting_azure_cost_summary_by_account (usage_start, subscription_guid)
;
CREATE MATERIALIZED VIEW reporting_azure_cost_summary_by_location AS(
SELECT row_number() OVER(ORDER BY usage_start, resource_location) as id,
usage_start as usage_start,
usage_start as usage_end,
resource_location,
sum(pretax_cost) as pretax_cost,
sum(markup_cost) as markup_cost,
max(currency) as currency
FROM reporting_azurecostentrylineitem_daily_summary
-- Get data for this month or last month
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
GROUP BY usage_start, resource_location
)
;
CREATE UNIQUE INDEX azure_cost_summary_location
ON reporting_azure_cost_summary_by_location (usage_start, resource_location)
;
CREATE MATERIALIZED VIEW reporting_azure_cost_summary_by_service AS(
SELECT row_number() OVER(ORDER BY usage_start, service_name) as id,
usage_start as usage_start,
usage_start as usage_end,
service_name,
sum(pretax_cost) as pretax_cost,
sum(markup_cost) as markup_cost,
max(currency) as currency
FROM reporting_azurecostentrylineitem_daily_summary
-- Get data for this month or last month
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
GROUP BY usage_start, service_name
)
;
CREATE UNIQUE INDEX azure_cost_summary_service
ON reporting_azure_cost_summary_by_service (usage_start, service_name)
;
CREATE MATERIALIZED VIEW reporting_azure_compute_summary AS(
SELECT ROW_NUMBER() OVER(ORDER BY c.usage_start, c.instance_type) AS id,
c.usage_start,
c.usage_start as usage_end,
c.instance_type,
r.instance_ids,
CARDINALITY(r.instance_ids) AS instance_count,
c.usage_quantity,
c.unit_of_measure,
c.pretax_cost,
c.markup_cost,
c.currency
FROM (
-- this group by gets the counts
SELECT usage_start,
instance_type,
SUM(usage_quantity) AS usage_quantity,
MAX(unit_of_measure) AS unit_of_measure,
SUM(pretax_cost) AS pretax_cost,
SUM(markup_cost) AS markup_cost,
MAX(currency) AS currency
FROM reporting_azurecostentrylineitem_daily_summary
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
AND instance_type IS NOT NULL
GROUP
BY usage_start,
instance_type
) AS c
JOIN (
-- this group by gets the distinct resources running by day
SELECT usage_start,
instance_type,
ARRAY_AGG(DISTINCT instance_id ORDER BY instance_id) as instance_ids
FROM (
SELECT usage_start,
instance_type,
UNNEST(instance_ids) AS instance_id
FROM reporting_azurecostentrylineitem_daily_summary
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
AND instance_type IS NOT NULL
) AS x
GROUP
BY usage_start,
instance_type
) AS r
ON c.usage_start = r.usage_start
AND c.instance_type = r.instance_type
)
WITH DATA
;
CREATE UNIQUE INDEX azure_compute_summary
ON reporting_azure_compute_summary (usage_start, instance_type)
;
CREATE MATERIALIZED VIEW reporting_azure_storage_summary AS(
SELECT row_number() OVER(ORDER BY usage_start, service_name) as id,
usage_start as usage_start,
usage_start as usage_end,
service_name,
sum(usage_quantity) as usage_quantity,
max(unit_of_measure) as unit_of_measure,
sum(pretax_cost) as pretax_cost,
sum(markup_cost) as markup_cost,
max(currency) as currency
FROM reporting_azurecostentrylineitem_daily_summary
-- Get data for this month or last month
WHERE service_name LIKE '%Storage%'
AND usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
GROUP BY usage_start, service_name
)
;
CREATE UNIQUE INDEX azure_storage_summary
ON reporting_azure_storage_summary (usage_start, service_name)
;
CREATE MATERIALIZED VIEW reporting_azure_network_summary AS(
SELECT row_number() OVER(ORDER BY usage_start, service_name) as id,
usage_start as usage_start,
usage_start as usage_end,
service_name,
sum(usage_quantity) as usage_quantity,
max(unit_of_measure) as unit_of_measure,
sum(pretax_cost) as pretax_cost,
sum(markup_cost) as markup_cost,
max(currency) as currency
FROM reporting_azurecostentrylineitem_daily_summary
-- Get data for this month or last month
WHERE service_name IN ('Virtual Network','VPN','DNS','Traffic Manager','ExpressRoute','Load Balancer','Application Gateway')
AND usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
GROUP BY usage_start, service_name
)
;
CREATE UNIQUE INDEX azure_network_summary
ON reporting_azure_network_summary (usage_start, service_name)
;
CREATE MATERIALIZED VIEW reporting_azure_database_summary AS(
SELECT row_number() OVER(ORDER BY usage_start, service_name) as id,
usage_start as usage_start,
usage_start as usage_end,
service_name,
sum(usage_quantity) as usage_quantity,
max(unit_of_measure) as unit_of_measure,
sum(pretax_cost) as pretax_cost,
sum(markup_cost) as markup_cost,
max(currency) as currency
FROM reporting_azurecostentrylineitem_daily_summary
-- Get data for this month or last month
WHERE service_name IN ('Cosmos DB','Cache for Redis') OR service_name ILIKE '%database%'
AND usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
GROUP BY usage_start, service_name
)
;
CREATE UNIQUE INDEX azure_database_summary
ON reporting_azure_database_summary (usage_start, service_name)
;
"""
),
]
| 43.716012
| 128
| 0.639737
| 1,662
| 14,470
| 5.291817
| 0.098075
| 0.079591
| 0.05685
| 0.070495
| 0.840819
| 0.780785
| 0.731666
| 0.720068
| 0.699375
| 0.684252
| 0
| 0.012479
| 0.257913
| 14,470
| 330
| 129
| 43.848485
| 0.806575
| 0.003179
| 0
| 0.61165
| 1
| 0.003236
| 0.593746
| 0.125087
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.009709
| 0
| 0.019417
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ff767c05e0464633a3c3327ed0a5c1bfd230ded3
| 65
|
py
|
Python
|
graphpype/__init__.py
|
davidmeunier79/graphpype
|
800d1f8cbfdf3a18de77558c3b88eeb31735857e
|
[
"BSD-3-Clause"
] | 17
|
2017-12-26T18:51:43.000Z
|
2022-02-25T19:42:09.000Z
|
graphpype/__init__.py
|
davidmeunier79/graphpype
|
800d1f8cbfdf3a18de77558c3b88eeb31735857e
|
[
"BSD-3-Clause"
] | 44
|
2017-12-09T19:14:08.000Z
|
2021-08-17T14:42:48.000Z
|
graphpype/__init__.py
|
davidmeunier79/graphpype
|
800d1f8cbfdf3a18de77558c3b88eeb31735857e
|
[
"BSD-3-Clause"
] | 12
|
2017-05-28T20:38:27.000Z
|
2022-03-16T20:57:47.000Z
|
from . import pipelines # noqa
from . import interfaces # noqa
| 21.666667
| 32
| 0.723077
| 8
| 65
| 5.875
| 0.625
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.215385
| 65
| 2
| 33
| 32.5
| 0.921569
| 0.138462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
80ff3db18bef9a18287b2733b1c09bd2f373a218
| 13,287
|
py
|
Python
|
tests/containers/test_AudioContainer.py
|
gertdekkers/dcase_util
|
e5b80cc98b28facad2f3fff9acba126487b19879
|
[
"MIT"
] | null | null | null |
tests/containers/test_AudioContainer.py
|
gertdekkers/dcase_util
|
e5b80cc98b28facad2f3fff9acba126487b19879
|
[
"MIT"
] | null | null | null |
tests/containers/test_AudioContainer.py
|
gertdekkers/dcase_util
|
e5b80cc98b28facad2f3fff9acba126487b19879
|
[
"MIT"
] | null | null | null |
""" Unit tests for AudioContainer """
import nose.tools
import dcase_util
import os
import numpy
import tempfile
def test_load():
# Mono
audio = dcase_util.containers.AudioContainer(
filename=dcase_util.utils.Example.audio_filename()
).load(
mono=True
)
nose.tools.eq_(audio.fs, 44100)
nose.tools.eq_(len(audio.data.shape), 1)
nose.tools.eq_(audio.data.shape[0], 441001)
# Stereo
audio = dcase_util.containers.AudioContainer(
filename=dcase_util.utils.Example.audio_filename()
).load(
mono=False
)
nose.tools.eq_(audio.fs, 44100)
nose.tools.eq_(audio.data.shape[0], 2)
nose.tools.eq_(audio.data.shape[1], 441001)
# Re-sampling
audio = dcase_util.containers.AudioContainer(
filename=dcase_util.utils.Example.audio_filename()
).load(
fs=16000,
mono=True
)
nose.tools.eq_(audio.fs, 16000)
nose.tools.eq_(len(audio.data.shape), 1)
nose.tools.eq_(audio.data.shape[0], 160001)
# Segment
audio = dcase_util.containers.AudioContainer(
filename=dcase_util.utils.Example.audio_filename()
).load(
mono=True,
start=4.0,
stop=6.0
)
nose.tools.eq_(audio.fs, 44100)
nose.tools.eq_(len(audio.data.shape), 1)
nose.tools.eq_(audio.data.shape[0], 88200)
def test_load_youtube():
with dcase_util.utils.DisableLogger():
audio_container = dcase_util.containers.AudioContainer().load_from_youtube(
query_id='2ceUOv8A3FE',
start=1,
stop=5
)
nose.tools.eq_(audio_container.fs, 44100)
nose.tools.eq_(len(audio_container.data.shape), 2)
nose.tools.eq_(audio_container.streams, 2)
nose.tools.eq_(audio_container.shape, (2, 176400))
def test_container():
# Empty
a = dcase_util.utils.Example.audio_container()
if a:
pass
nose.tools.eq_(a.empty, False)
nose.tools.eq_(dcase_util.containers.AudioContainer().empty, True)
# Basic info
a = dcase_util.utils.Example.audio_container()
nose.tools.eq_(a.fs, 44100)
nose.tools.eq_(len(a.data.shape), 2)
nose.tools.eq_(a.data.shape, a.shape)
nose.tools.eq_(a.duration_ms, 2000)
nose.tools.eq_(a.duration_sec, 2)
nose.tools.eq_(a.duration_samples, 2*44100)
nose.tools.eq_(a.channels, 2)
# Focus #1.1
a = dcase_util.utils.Example.audio_container()
a.set_focus(start_seconds=0.5, stop_seconds=0.8)
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 2)
nose.tools.eq_(a_focused.shape, (2, 13230))
# Focus #1.2
a = dcase_util.utils.Example.audio_container()
a.set_focus(start_seconds=0.5, duration_seconds=0.3)
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 2)
nose.tools.eq_(a_focused.shape, (2, 13230))
# Focus #1.3
a = dcase_util.utils.Example.audio_container()
a.set_focus(start=0, duration=44100)
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 2)
nose.tools.eq_(a_focused.shape, (2, 44100))
# Focus #1.4
a = dcase_util.utils.Example.audio_container()
a.set_focus(start=0, stop=44100)
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 2)
nose.tools.eq_(a_focused.shape, (2, 44100))
# Focus #1.5
a = dcase_util.utils.Example.audio_container()
a.set_focus()
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 2)
nose.tools.eq_(a_focused.shape, (2, 2*44100))
# Focus #2.1
a = dcase_util.utils.Example.audio_container()
a.focus_start_samples = int(0.2 * 44100)
a.focus_stop_samples = int(0.5 * 44100)
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 2)
nose.tools.eq_(a_focused.shape, (2, 0.3 * 44100))
# Focus #2.2
a = dcase_util.utils.Example.audio_container()
a.focus_start_samples = 0.2 * 44100
a.focus_stop_samples = 0.5 * 44100
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 2)
nose.tools.eq_(a_focused.shape, (2, 0.3 * 44100))
# Focus #2.3
a = dcase_util.utils.Example.audio_container()
a.focus_start_samples = 0.5 * 44100
a.focus_stop_samples = 0.2 * 44100
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 2)
nose.tools.eq_(a_focused.shape, (2, 0.3 * 44100))
# Focus #2.4
a = dcase_util.utils.Example.audio_container()
a.focus_stop_samples = 0.2 * 44100
a.focus_start_samples = 0.5 * 44100
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 2)
nose.tools.eq_(a_focused.shape, (2, 0.3 * 44100))
# Focus #2.5
a = dcase_util.utils.Example.audio_container()
a.focus_start_samples = 0.5 * 44100
a.focus_stop_samples = None
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 2)
nose.tools.eq_(a_focused.shape, (2, 1.5 * 44100))
# Focus #2.6
a = dcase_util.utils.Example.audio_container()
a.focus_start_samples = None
a.focus_stop_samples = 0.5 * 44100
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 2)
nose.tools.eq_(a_focused.shape, (2, 0.5 * 44100))
# Focus #2.7
a = dcase_util.utils.Example.audio_container()
a.focus_start_samples = 0
a.focus_stop_samples = 30 * 44100
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 2)
nose.tools.eq_(a_focused.shape, (2, a.length))
# Focus #3.1
a = dcase_util.utils.Example.audio_container()
a.focus_start_seconds = 1.0
a.focus_stop_seconds = 2.0
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 2)
nose.tools.eq_(a_focused.shape, (2, 1 * 44100))
# Focus #4.1
a = dcase_util.utils.Example.audio_container()
a.focus_channel = 0
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 1)
nose.tools.eq_(a_focused.shape, (a.length, ))
numpy.testing.assert_array_almost_equal(a_focused, a.data[0, :])
# Focus #4.2
a = dcase_util.utils.Example.audio_container()
a.focus_channel = 1
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 1)
nose.tools.eq_(a_focused.shape, (a.length, ))
numpy.testing.assert_array_almost_equal(a_focused, a.data[1, :])
# Focus #4.3
a = dcase_util.utils.Example.audio_container()
a.focus_channel = 'left'
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 1)
nose.tools.eq_(a_focused.shape, (a.length, ))
numpy.testing.assert_array_almost_equal(a_focused, a.data[0, :])
# Focus #4.4
a = dcase_util.utils.Example.audio_container()
a.focus_channel = 'right'
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 1)
nose.tools.eq_(a_focused.shape, (a.length, ))
numpy.testing.assert_array_almost_equal(a_focused, a.data[1, :])
# Focus #4.5
a = dcase_util.utils.Example.audio_container()
a.focus_channel = 123
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 2)
nose.tools.eq_(a_focused.shape, (2, a.length))
# Focus #4.6
a = dcase_util.utils.Example.audio_container()
a.focus_channel = 0
a_focused = a.get_focused()
nose.tools.eq_(len(a_focused.shape), 1)
nose.tools.eq_(a_focused.shape, (a.length, ))
# Channel average
a = dcase_util.utils.Example.audio_container()
a.mixdown()
nose.tools.eq_(a.channels, 1)
nose.tools.eq_(a.duration_ms, 2000)
nose.tools.eq_(a.duration_sec, 2)
nose.tools.eq_(a.duration_samples, 2*44100)
# Normalization
a = dcase_util.utils.Example.audio_container()
a.normalize(headroom=0.5)
nose.tools.eq_(a.duration_ms, 2000)
nose.tools.eq_(a.duration_sec, 2)
nose.tools.eq_(a.duration_samples, 2*44100)
nose.tools.assert_almost_equal(numpy.max(numpy.abs(a.data)), 0.66666661027952101, places=6)
# Normalization / Mono
a = dcase_util.utils.Example.audio_container().mixdown()
a.normalize(headroom=0.5)
nose.tools.eq_(a.duration_ms, 2000)
nose.tools.eq_(a.duration_sec, 2)
nose.tools.eq_(a.duration_samples, 2*44100)
nose.tools.assert_almost_equal(numpy.max(numpy.abs(a.data)), 0.63770331161958482, places=6)
# Re-sampling
a = dcase_util.utils.Example.audio_container()
a.resample(target_fs=16000)
nose.tools.eq_(a.fs, 16000)
nose.tools.eq_(a.duration_ms, 2000)
nose.tools.eq_(a.duration_sec, 2)
nose.tools.eq_(a.duration_samples, 2*16000)
# Select channel
# make_monophonic
a = dcase_util.utils.Example.audio_container()
x1 = a.data[0, :]
x2 = a.data[1, :]
a.mixdown()
nose.tools.eq_(a.fs, 44100)
nose.tools.eq_(len(a.data.shape), 1)
nose.tools.eq_(a.data.shape, a.shape)
nose.tools.eq_(a.duration_ms, 2000)
nose.tools.eq_(a.duration_sec, 2)
nose.tools.eq_(a.duration_samples, 2*44100)
nose.tools.eq_(a.channels, 1)
def test_save():
a_out = dcase_util.utils.Example.audio_container()
# 16 bit / wav
tmp = tempfile.NamedTemporaryFile('r+', suffix='.wav', prefix='16_', dir='/tmp', delete=False)
try:
a_out.save(filename=tmp.name, bit_depth=16)
a_in = dcase_util.containers.AudioContainer().load(filename=tmp.name)
nose.tools.eq_(a_out.shape, a_in.shape)
numpy.testing.assert_array_almost_equal(a_out.data, a_in.data, decimal=4)
finally:
os.unlink(tmp.name)
# 24 bit / wav
tmp = tempfile.NamedTemporaryFile('r+', suffix='.wav', prefix='24_', dir='/tmp', delete=False)
try:
a_out.save(filename=tmp.name, bit_depth=24)
a_in = dcase_util.containers.AudioContainer().load(filename=tmp.name)
nose.tools.eq_(a_out.shape, a_in.shape)
numpy.testing.assert_array_almost_equal(a_out.data, a_in.data, decimal=5)
finally:
os.unlink(tmp.name)
# 32 bit / wav
tmp = tempfile.NamedTemporaryFile('r+', suffix='.wav', prefix='32_', dir='/tmp', delete=False)
try:
a_out.save(filename=tmp.name, bit_depth=32)
a_in = dcase_util.containers.AudioContainer().load(filename=tmp.name)
nose.tools.eq_(a_out.shape, a_in.shape)
numpy.testing.assert_array_almost_equal(a_out.data, a_in.data, decimal=6)
finally:
os.unlink(tmp.name)
def test_log():
with dcase_util.utils.DisableLogger():
a = dcase_util.utils.Example.audio_container()
a.log()
a.filename = 'test.wav'
a.log()
a.focus_start_samples = 0.1
a.focus_stop_samples = 0.5
a.log()
a.focus_channel = 'left'
a.log()
def test_pad():
a = dcase_util.utils.Example.audio_container().mixdown()
a.pad(length_seconds=10)
nose.tools.eq_(a.duration_sec, 10)
a = dcase_util.utils.Example.audio_container()
a.pad(length_seconds=10)
nose.tools.eq_(a.duration_sec, 10)
a = dcase_util.utils.Example.audio_container_ch4()
a.pad(length_seconds=10)
nose.tools.eq_(a.duration_sec, 10)
def test_segments():
a = dcase_util.utils.Example.audio_container().mixdown()
segments, segment_meta = a.segments(segment_length=1000)
nose.tools.eq_(len(segments), 88)
nose.tools.eq_(len(segments), len(segment_meta))
segments, segment_meta = a.segments(segment_length_seconds=0.5)
nose.tools.eq_(len(segments), 3)
nose.tools.eq_(len(segments), len(segment_meta))
segments, segment_meta = a.segments(
segments=[
{'onset': 0.5, 'offset': 0.8}
]
)
nose.tools.eq_(len(segments), 1)
nose.tools.eq_(len(segments), len(segment_meta))
nose.tools.eq_(segment_meta[0]['onset'], 0.5)
nose.tools.eq_(segment_meta[0]['offset'], 0.8)
segments, segment_meta = a.segments(
segment_length_seconds=0.5,
skip_segments=[
{
'onset': 0.6,
'offset': 0.8
}
]
)
nose.tools.eq_(len(segments), 3)
nose.tools.eq_(len(segments), len(segment_meta))
nose.tools.eq_(segment_meta, [
{
'onset': 0.0,
'offset': 0.5
},
{
'onset': 0.8,
'offset': 1.3
},
{
'onset': 1.3,
'offset': 1.8
}
])
a = dcase_util.utils.Example.audio_container()
segments, segment_meta = a.segments(segment_length=1000)
nose.tools.eq_(len(segments), 88)
nose.tools.eq_(len(segments), len(segment_meta))
def test_frames():
a = dcase_util.utils.Example.audio_container().mixdown()
frames = a.frames(frame_length=1000, hop_length=1000)
nose.tools.eq_(frames.shape[0], 1000)
nose.tools.eq_(frames.shape[1], 88)
a = dcase_util.utils.Example.audio_container()
frames = a.frames(frame_length=1000, hop_length=1000)
nose.tools.eq_(frames.shape[0], 2)
nose.tools.eq_(frames.shape[1], 1000)
nose.tools.eq_(frames.shape[2], 88)
@nose.tools.raises(ValueError)
def test_focus_channel():
with dcase_util.utils.DisableLogger():
a = dcase_util.utils.Example.audio_container()
a.focus_channel = 'wrong'
@nose.tools.raises(IOError)
def test_load_error():
with dcase_util.utils.DisableLogger():
dcase_util.containers.AudioContainer(
filename='Test.test'
).load()
| 31.117096
| 98
| 0.656657
| 2,001
| 13,287
| 4.131434
| 0.072964
| 0.123019
| 0.143704
| 0.075481
| 0.874803
| 0.841055
| 0.803919
| 0.770412
| 0.738357
| 0.70473
| 0
| 0.055472
| 0.198164
| 13,287
| 426
| 99
| 31.190141
| 0.720481
| 0.03033
| 0
| 0.599379
| 0
| 0
| 0.011774
| 0
| 0
| 0
| 0
| 0
| 0.02795
| 1
| 0.031056
| false
| 0.003106
| 0.015528
| 0
| 0.046584
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
441bf6f5a1be97369c1e6f93074df179605f6411
| 39
|
py
|
Python
|
mizarlabs/transformers/__init__.py
|
MizarAI/mizar-labs
|
c6ec17bc3d9a91ec3f6ee2e7b20017499115fc37
|
[
"MIT"
] | 18
|
2021-03-19T15:41:43.000Z
|
2022-03-20T14:23:07.000Z
|
mizarlabs/transformers/__init__.py
|
MizarAI/mizar-labs
|
c6ec17bc3d9a91ec3f6ee2e7b20017499115fc37
|
[
"MIT"
] | 14
|
2021-03-17T14:16:02.000Z
|
2021-05-31T16:51:12.000Z
|
mizarlabs/transformers/__init__.py
|
MizarAI/mizar-labs
|
c6ec17bc3d9a91ec3f6ee2e7b20017499115fc37
|
[
"MIT"
] | 3
|
2021-07-02T21:38:06.000Z
|
2022-01-10T09:56:18.000Z
|
from .utils import IdentityTransformer
| 19.5
| 38
| 0.871795
| 4
| 39
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
443d208ce4e7d48d2dc3609083791d96721ec308
| 13,452
|
py
|
Python
|
tests/test_setup.py
|
nickzuber/add-reason
|
c0f902bc59a6b0eca044750d4a1c99c48754971f
|
[
"MIT"
] | 169
|
2017-12-18T21:48:01.000Z
|
2020-05-25T01:19:08.000Z
|
tests/test_setup.py
|
nickzuber/add-reason
|
c0f902bc59a6b0eca044750d4a1c99c48754971f
|
[
"MIT"
] | 7
|
2017-12-30T05:43:42.000Z
|
2018-02-23T02:49:14.000Z
|
tests/test_setup.py
|
nickzuber/add-reason
|
c0f902bc59a6b0eca044750d4a1c99c48754971f
|
[
"MIT"
] | 2
|
2018-02-08T16:25:17.000Z
|
2018-02-08T23:24:40.000Z
|
import os
import subprocess
from tests.framework.base_command_test_case import BaseCommandTestCase
from tests.framework.context_manager import cd
class TestInit(BaseCommandTestCase):
# This is a portion of the postinstall script we generate that should always be there only once, since
# our script should never write itself more than once.
should_exist_once_in_postinstall = "fs=require('fs');if(fs.existsSync(d)===false){fs.symlinkSync(s,d,'dir')}"
should_exist_once_in_build = "bsb -make-world"
name = "reason-package"
directory = "src/myCode"
def test_steps_pass(self):
with cd('./tests/root_for_testing'):
result = self.call("node", "../../index.js", "setup", self.directory, self.name)
self.assertTrue(result, 'Standard setup call did not pass successfully.')
def test_steps_pass_no_linking_flag(self):
with cd('./tests/root_for_testing'):
result = self.call("node", "../../index.js", "setup", self.directory, self.name, "--no-linking")
self.assertTrue(result, 'Standard --no-linking setup call did not pass successfully.')
def test_steps_pass_in_source_flag(self):
with cd('./tests/root_for_testing'):
result = self.call("node", "../../index.js", "setup", self.directory, self.name, "--in-source")
self.assertTrue(result, 'Standard --in-source setup call did not pass successfully.')
def test_bsconfig_was_created(self):
with cd('./tests/root_for_testing'):
self.call("node", "../../index.js", "setup", self.directory, self.name)
exists = os.path.isfile('bsconfig.json')
self.assertTrue(exists, 'bsconfig.json was never created')
def test_merlin_was_created(self):
with cd('./tests/root_for_testing'):
self.call("node", "../../index.js", "setup", self.directory, self.name)
exists = os.path.isfile('.merlin')
self.assertTrue(exists, '.merlin was never created')
### postinstall testing
def test_postinstall_was_correctly_added_to_package_file_with_no_scripts(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.no_scripts.json", "package.json")
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('postinstall', contents['scripts'], 'Could not find postinstall key in the package.json scripts key')
postinstall_script = contents['scripts']['postinstall']
self.assertEqual(postinstall_script.count(self.should_exist_once_in_postinstall), 1, 'Found more than one instance of our postinstall script')
def test_postinstall_was_correctly_added_to_package_file_with_empty_scripts(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.empty_scripts.json", "package.json")
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('postinstall', contents['scripts'], 'Could not find postinstall key in the package.json scripts key')
postinstall_script = contents['scripts']['postinstall']
self.assertEqual(postinstall_script.count(self.should_exist_once_in_postinstall), 1, 'Found more than one instance of our postinstall script')
def test_postinstall_was_correctly_added_to_package_file_with_other_script(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.other_script.json", "package.json")
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('postinstall', contents['scripts'], 'Could not find postinstall key in the package.json scripts key')
postinstall_script = contents['scripts']['postinstall']
self.assertEqual(postinstall_script.count(self.should_exist_once_in_postinstall), 1, 'Found more than one instance of our postinstall script')
def test_postinstall_was_correctly_added_to_package_file_with_other_postinstall(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.other_postinstall.json", "package.json")
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('postinstall', contents['scripts'], 'Could not find postinstall key in the package.json scripts key')
postinstall_script = contents['scripts']['postinstall']
self.assertIn(' && ', postinstall_script, 'Our postinstall script was probably not integrated correctly with an existing postinstall')
self.assertEqual(postinstall_script.count(self.should_exist_once_in_postinstall), 1, 'Found more than one instance of our postinstall script')
def test_postinstall_was_correctly_added_to_package_file_with_no_scripts_called_twice(self):
with cd('./tests/root_for_testing'):
self.call("node", "../../index.js", "setup", self.directory, self.name)
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('postinstall', contents['scripts'], 'Could not find postinstall key in the package.json scripts key')
postinstall_script = contents['scripts']['postinstall']
self.assertEqual(postinstall_script.count(self.should_exist_once_in_postinstall), 1, 'Found more than one instance of our postinstall script')
### build command testing
def test_build_command_was_correctly_added_to_package_file_with_no_scripts(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.no_scripts.json", "package.json")
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('build-reason', contents['scripts'], 'Could not find build-reason key in the package.json scripts key')
build_command = contents['scripts']['build-reason']
self.assertEqual(build_command.count(self.should_exist_once_in_build), 1, 'Found more than one instance of our build script')
def test_build_command_was_correctly_added_to_package_file_with_empty_scripts(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.empty_scripts.json", "package.json")
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('build-reason', contents['scripts'], 'Could not find build-reason key in the package.json scripts key')
build_command = contents['scripts']['build-reason']
self.assertEqual(build_command.count(self.should_exist_once_in_build), 1, 'Found more than one instance of our postinstall script')
def test_build_command_was_correctly_added_to_package_file_with_other_script(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.other_script.json", "package.json")
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('build-reason', contents['scripts'], 'Could not find build-reason key in the package.json scripts key')
build_command = contents['scripts']['build-reason']
self.assertEqual(build_command.count(self.should_exist_once_in_build), 1, 'Found more than one instance of our postinstall script')
def test_build_command_was_correctly_added_to_package_file_with_other_build_command(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.other_postinstall.json", "package.json")
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('build-reason', contents['scripts'], 'Could not find build-reason key in the package.json scripts key')
build_command = contents['scripts']['build-reason']
self.assertIn(' && ', build_command, 'Our postinstall script was probably not integrated correctly with an existing postinstall')
self.assertEqual(build_command.count(self.should_exist_once_in_build), 1, 'Found more than one instance of our postinstall script')
def test_build_command_was_correctly_added_to_package_file_with_no_scripts_called_twice(self):
with cd('./tests/root_for_testing'):
self.call("node", "../../index.js", "setup", self.directory, self.name)
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('build-reason', contents['scripts'], 'Could not find build-reason key in the package.json scripts key')
build_command = contents['scripts']['build-reason']
self.assertEqual(build_command.count(self.should_exist_once_in_build), 1, 'Found more than one instance of our postinstall script')
def test_postinstall_was_not_added_to_package_file_with_in_source_flag(self):
with cd('./tests/root_for_testing'):
# This uses our default package.json copy which has a scripts key and no postinstall key
self.call("node", "../../index.js", "setup", self.directory, self.name, "--in-source")
contents = self.read_json('package.json')
self.assertNotIn('postinstall', contents['scripts'], 'Postinstall was created when it should not have been')
def test_proper_bsconfig_file_generated_with_in_source_flag(self):
with cd('./tests/root_for_testing'):
# This uses our default package.json copy which has a scripts key and no postinstall key
self.call("node", "../../index.js", "setup", self.directory, self.name, "--in-source")
contents = self.read_file('bsconfig.json')
self.assertIn('in-source', contents, 'Config file doesn\'t have `in-source` set to true')
def test_proper_bsconfig_file_generated_with(self):
with cd('./tests/root_for_testing'):
# This uses our default package.json copy which has a scripts key and no postinstall key
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_file('bsconfig.json')
self.assertNotIn('in-source', contents, 'Config file wrongly has `in-source` set to true')
def test_postinstall_was_not_added_to_package_file_with_no_linking_flag(self):
with cd('./tests/root_for_testing'):
# This uses our default package.json copy which has a scripts key and no postinstall key
self.call("node", "../../index.js", "setup", self.directory, self.name, "--no-linking")
contents = self.read_json('package.json')
self.assertNotIn('postinstall', contents['scripts'], 'Postinstall was created when it should not have been')
def test_config_was_not_created_when_given_bad_target(self):
with cd('./tests/root_for_testing'):
self.call("node", "../../index.js", "setup", 'some/bad/target', self.name)
existsMerlin = self.exists('.merlin')
existsBsconfig = self.exists('bsconfig.json')
self.assertFalse(existsMerlin, '.merlin file was created even though we gave a bad target')
self.assertFalse(existsBsconfig, 'bsconfig.json file was created even though we gave a bad target')
def test_config_was_not_created_when_given_bad_target_no_linking(self):
with cd('./tests/root_for_testing'):
self.call("node", "../../index.js", "setup", 'some/bad/target', self.name, "--no-linking")
existsMerlin = self.exists('.merlin')
existsBsconfig = self.exists('bsconfig.json')
self.assertFalse(existsMerlin, '.merlin file was created even though we gave a bad target')
self.assertFalse(existsBsconfig, 'bsconfig.json file was created even though we gave a bad target')
def test_postinstall_was_not_created_when_given_bad_target(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.other_postinstall.json", "package.json")
contents = self.read_json('package.json')
postinstall_before = contents['scripts']['postinstall']
self.call("node", "../../index.js", "setup", 'some/bad/target', self.name)
contents = self.read_json('package.json')
postinstall_after = contents['scripts']['postinstall']
self.assertEqual(postinstall_before, postinstall_after, 'The postinstall script was altered even though we gave bad target')
def tearDown(self):
with cd('./tests/root_for_testing'):
self.call("rm", "-f", "bsconfig.json")
self.call("rm", "-f", ".merlin")
self.call("rm", "-rf", "lib")
self.call("rm", "-f", "node_modules/reason-package")
self.call("cp", "package.empty_scripts.json", "package.json")
| 64.363636
| 148
| 0.717291
| 1,842
| 13,452
| 5.053746
| 0.084691
| 0.056719
| 0.030938
| 0.043829
| 0.893329
| 0.88119
| 0.875604
| 0.854979
| 0.847567
| 0.838866
| 0
| 0.000868
| 0.143993
| 13,452
| 208
| 149
| 64.673077
| 0.807555
| 0.040366
| 0
| 0.676301
| 0
| 0.017341
| 0.397953
| 0.070952
| 0
| 0
| 0
| 0
| 0.265896
| 1
| 0.132948
| false
| 0.034682
| 0.023121
| 0
| 0.184971
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
92726e39c726bfb77d4b6e053ffdca72cb2c6fb3
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/rope/base/oi/type_hinting/providers/numpydocstrings.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/rope/base/oi/type_hinting/providers/numpydocstrings.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/rope/base/oi/type_hinting/providers/numpydocstrings.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/04/24/99/6e383995c777d21346cf27bed1be350c26cd82b9bd2745c6a780c01b54
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.458333
| 0
| 96
| 1
| 96
| 96
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
929757bef39e04b4376186e065d1cfa8ca63a044
| 13,187
|
py
|
Python
|
tests/test_population.py
|
comic31/MongoDBQueriesManager
|
34d2a0bd73777dc12ee860bbd8929254bed48791
|
[
"MIT"
] | 2
|
2021-04-29T12:05:36.000Z
|
2021-07-15T08:42:40.000Z
|
tests/test_population.py
|
comic31/MongoDBQueriesManager
|
34d2a0bd73777dc12ee860bbd8929254bed48791
|
[
"MIT"
] | null | null | null |
tests/test_population.py
|
comic31/MongoDBQueriesManager
|
34d2a0bd73777dc12ee860bbd8929254bed48791
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf-8
# Copyright (c) Modos Team, 2020
import pytest
from mongo_queries_manager import mqm, LogicalPopulationError, LogicalSubPopulationError
class TestPopulation:
def test_empty_population(self):
query_result = mqm(string_query="populate=", populate=True)
assert query_result == {'filter': {}, 'sort': None, 'skip': 0, 'limit': 0, 'projection': None, 'population': []}
def test_simple_population(self):
query_result = mqm(string_query="populate=user", populate=True)
assert query_result == {'filter': {}, 'sort': None, 'skip': 0, 'limit': 0,
'projection': None, 'population': [{'path': 'user', 'projection': None}]}
def test_multi_population(self):
query_result = mqm(string_query="populate=user,settings", populate=True)
assert query_result == {'filter': {}, 'sort': None, 'skip': 0, 'limit': 0,
'projection': None, 'population': [{'path': 'user', 'projection': None},
{'path': 'settings', 'projection': None}]}
def test_multi_population_2(self):
query_result = mqm(string_query="populate=user,user.settings", populate=True)
assert query_result == {'filter': {}, 'sort': None, 'skip': 0, 'limit': 0,
'projection': None, 'population': [{'path': 'user', 'projection': None,
'population': [{
'path': 'settings',
'projection': None}]}]}
def test_multi_population_3(self):
query_result = mqm(string_query="populate=user,user.settings,user.comments,user.info", populate=True)
assert query_result == {'filter': {}, 'sort': None, 'skip': 0, 'limit': 0,
'projection': None, 'population': [{'path': 'user', 'projection': None,
'population': [{
'path': 'settings',
'projection': None}, {
'path': 'comments',
'projection': None},
{
'path': 'info',
'projection': None}
]}]}
def test_multi_population_4(self):
query_result = mqm(string_query="populate=user,user.settings,user.settings.info,user.settings.info.rates",
populate=True)
assert query_result == {'filter': {}, 'sort': None, 'skip': 0, 'limit': 0,
'projection': None,
'population': [
{'path': 'user',
'projection': None,
'population': [
{
'path': 'settings',
'projection': None,
'population': [
{
'path': 'info',
'projection': None,
'population': [
{
'path': 'rates',
'projection': None
}]
}]
}]
}
]}
def test_multi_population_5(self):
query_result = mqm(
string_query="populate=user,user.settings,user.settings.notifications,user.settings.configuration",
populate=True)
assert query_result == {'filter': {}, 'sort': None, 'skip': 0, 'limit': 0,
'projection': None, 'population': [
{'path': 'user',
'projection': None,
'population': [
{
'path': 'settings',
'projection': None,
'population': [
{
'path': 'notifications',
'projection': None
},
{
'path': 'configuration',
'projection': None
}
]
}
]}]}
def test_multi_population_6(self):
query_result = mqm(
string_query="populate=user,user.life,user.life.info,user.settings,user.settings.notifications",
populate=True)
assert query_result == {'filter': {}, 'sort': None, 'skip': 0, 'limit': 0,
'projection': None, 'population': [
{'path': 'user',
'projection': None,
'population': [
{
'path': 'life',
'projection': None,
'population': [
{
'path': 'info',
'projection': None
}
]
},
{
'path': 'settings',
'projection': None,
'population': [
{
'path': 'notifications',
'projection': None
}
]
}
]}]}
def test_simple_population_with_projection(self):
query_result = mqm(string_query="fields=-created_at,-updated_at,hives.label&populate=hives", populate=True)
assert query_result == {'filter': {}, 'sort': None, 'skip': 0, 'limit': 0,
'projection': {'created_at': 0, 'updated_at': 0},
'population': [{'path': 'hives', 'projection': {'label': 1}}]}
def test_multi_population_with_multi_projection(self):
query_result = mqm(string_query="fields=-created_at,-updated_at,hives.label,hives._id,"
"data.temperature&populate=hives,data", populate=True)
assert query_result == {'filter': {}, 'sort': None, 'skip': 0, 'limit': 0,
'projection': {'created_at': 0, 'updated_at': 0},
'population': [{'path': 'hives', 'projection': {'label': 1, '_id': 1}},
{'path': 'data', 'projection': {'temperature': 1}}]}
def test_multi_population_with_multi_projection_2(self):
query_result = mqm(string_query="fields=-created_at,-updated_at,-service.created_at,-service.updated_at,"
"-service.description.created_at,-service."
"description.updated_at&populate=service,service.description,service."
"description.picture,animal,animal.info", populate=True)
assert query_result == {'filter': {}, 'sort': None, 'skip': 0, 'limit': 0,
'projection': {'created_at': 0, 'updated_at': 0},
'population': [{
'path': 'service',
'projection': {'created_at': 0, 'updated_at': 0},
'population': [{
'path': 'description',
'projection': {'created_at': 0, 'updated_at': 0},
'population': [
{'path': 'picture', 'projection': None}]
}]},
{
'path': 'animal',
'projection': None,
'population': [
{'path': 'info', 'projection': None}]
}]
}
def test_bad_population_logic(self):
with pytest.raises(LogicalPopulationError) as excinfo:
query_result = mqm(string_query="populate=service.description", populate=True)
assert excinfo.value.__str__() == 'Fail to find logical population item'
def test_bad_population_logic_2(self):
with pytest.raises(LogicalPopulationError) as excinfo:
query_result = mqm(string_query="populate=service,service.description,service.description.toto.titi",
populate=True)
assert excinfo.value.__str__() == 'Fail to find logical population item'
def test_bad_sub_population_logic(self):
with pytest.raises(LogicalSubPopulationError) as excinfo:
query_result = mqm(string_query="populate=service,service.description,"
"service.description.info,service.descriptions.info",
populate=True)
assert excinfo.value.__str__() == 'Fail to find logical sub population item'
def test_bad_sub_population_logic_2(self):
with pytest.raises(LogicalSubPopulationError) as excinfo:
query_result = mqm(string_query="populate=service,service.description,service.description.info,"
"service.description.info,service.description.info.toto,"
"service.descriptions.info.titi",
populate=True)
assert excinfo.value.__str__() == 'Fail to find logical sub population item'
def test_sub_population_alex(self):
query_result = mqm(string_query="populate=animal,crossbreed,crossbreed.crossbreeds,company,service,"
"service.service_description,pet&fields=-company.settings.booking",
populate=True)
assert query_result == {'filter': {}, 'sort': None, 'skip': 0, 'limit': 0, 'projection': None,
'population': [{'path': 'animal', 'projection': None},
{'path': 'crossbreed', 'projection': None, 'population': [
{'path': 'crossbreeds', 'projection': None}]},
{'path': 'company', 'projection': {'settings.booking': 0}},
{'path': 'service', 'projection': None, 'population': [
{'path': 'service_description', 'projection': None}]},
{'path': 'pet', 'projection': None}]}
def test_sub_population_alex_2(self):
query_result = mqm(string_query="populate=animal,crossbreed,crossbreed.crossbreeds,company,service,"
"service.service_description,pet&fields=-company.settings.booking,"
"-company.settings.toto",
populate=True)
assert query_result == {'filter': {}, 'sort': None, 'skip': 0, 'limit': 0, 'projection': None,
'population': [{'path': 'animal', 'projection': None},
{'path': 'crossbreed', 'projection': None, 'population': [
{'path': 'crossbreeds', 'projection': None}]},
{'path': 'company', 'projection': {'settings.booking': 0,
'settings.toto': 0}},
{'path': 'service', 'projection': None, 'population': [
{'path': 'service_description', 'projection': None}]},
{'path': 'pet', 'projection': None}]}
| 55.407563
| 120
| 0.401911
| 895
| 13,187
| 5.75419
| 0.105028
| 0.127767
| 0.116505
| 0.130485
| 0.866796
| 0.848544
| 0.80233
| 0.777476
| 0.732621
| 0.669515
| 0
| 0.008511
| 0.483203
| 13,187
| 237
| 121
| 55.64135
| 0.747175
| 0.005005
| 0
| 0.522843
| 0
| 0.010152
| 0.248818
| 0.099787
| 0
| 0
| 0
| 0
| 0.086294
| 1
| 0.086294
| false
| 0
| 0.010152
| 0
| 0.101523
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2bc83f7a6460e5b4c817a612aa26c80f1e0aa5f7
| 1,076
|
py
|
Python
|
rest_framework_extensions/decorators.py
|
maryokhin/drf-extensions
|
8223db2bdddaf3cd99f951b2291210c5fd5b0e6f
|
[
"MIT"
] | null | null | null |
rest_framework_extensions/decorators.py
|
maryokhin/drf-extensions
|
8223db2bdddaf3cd99f951b2291210c5fd5b0e6f
|
[
"MIT"
] | null | null | null |
rest_framework_extensions/decorators.py
|
maryokhin/drf-extensions
|
8223db2bdddaf3cd99f951b2291210c5fd5b0e6f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import warnings
def link(endpoint=None, is_for_list=False, **kwargs):
"""
Used to mark a method on a ViewSet that should be routed for GET requests.
"""
msg = 'link is pending deprecation. Use detail_route instead.'
warnings.warn(msg, PendingDeprecationWarning, stacklevel=2)
def decorator(func):
func.bind_to_methods = ['get']
func.kwargs = kwargs
func.endpoint = endpoint or func.__name__
func.is_for_list = is_for_list
return func
return decorator
def action(methods=['post'], endpoint=None, is_for_list=False, **kwargs):
"""
Used to mark a method on a ViewSet that should be routed for POST requests.
"""
msg = 'action is pending deprecation. Use detail_route instead.'
warnings.warn(msg, PendingDeprecationWarning, stacklevel=2)
def decorator(func):
func.bind_to_methods = methods
func.kwargs = kwargs
func.endpoint = endpoint or func.__name__
func.is_for_list = is_for_list
return func
return decorator
| 29.888889
| 79
| 0.671004
| 141
| 1,076
| 4.93617
| 0.333333
| 0.043103
| 0.077586
| 0.048851
| 0.856322
| 0.856322
| 0.856322
| 0.856322
| 0.856322
| 0.856322
| 0
| 0.003663
| 0.238848
| 1,076
| 35
| 80
| 30.742857
| 0.846154
| 0.160781
| 0
| 0.666667
| 0
| 0
| 0.134328
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.190476
| false
| 0
| 0.047619
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2bd6bebfa58de8e850328790a29e3c49bb9c5ebf
| 257
|
py
|
Python
|
container/pyf/schemas/classroom.py
|
Pompino/react-components-23KB
|
3201a417c5160e1b77f29fc1eac74ae9dc10d6ad
|
[
"MIT"
] | 2
|
2021-10-30T18:18:33.000Z
|
2021-12-01T10:21:28.000Z
|
container/pyf/schemas/classroom.py
|
Pompino/react-components-23KB
|
3201a417c5160e1b77f29fc1eac74ae9dc10d6ad
|
[
"MIT"
] | null | null | null |
container/pyf/schemas/classroom.py
|
Pompino/react-components-23KB
|
3201a417c5160e1b77f29fc1eac74ae9dc10d6ad
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel as BaseSchema
from simpleschemas import UserGetSimpleSchema, GroupGetSimpleSchema, EventGetSimpleSchema, ClassRoomGetSimpleSchema
class ClassRoomGetSchema(ClassRoomGetSimpleSchema):
class Config:
orm_mode = True
| 32.125
| 115
| 0.836576
| 21
| 257
| 10.190476
| 0.809524
| 0.271028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132296
| 257
| 8
| 116
| 32.125
| 0.959641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2be0f2e2264d868d4ffaee8e3626c464c7c8ebbf
| 217
|
py
|
Python
|
core/extensions/xtp/__init__.py
|
yunnant/kungfu
|
03dba19c922a5950068bd2d223488b8543ad8dd1
|
[
"Apache-2.0"
] | 2,209
|
2017-11-15T07:51:14.000Z
|
2021-01-19T03:16:48.000Z
|
core/extensions/xtp/__init__.py
|
yunnant/kungfu
|
03dba19c922a5950068bd2d223488b8543ad8dd1
|
[
"Apache-2.0"
] | 45
|
2017-11-16T04:38:51.000Z
|
2021-01-18T22:20:33.000Z
|
core/extensions/xtp/__init__.py
|
yunnant/kungfu
|
03dba19c922a5950068bd2d223488b8543ad8dd1
|
[
"Apache-2.0"
] | 889
|
2017-11-15T08:04:38.000Z
|
2021-01-16T12:41:25.000Z
|
from . import ${PROJECT_NAME} as ext
from extensions import EXTENSION_REGISTRY_MD, EXTENSION_REGISTRY_TD
EXTENSION_REGISTRY_MD.register_extension('xtp', ext.MD)
EXTENSION_REGISTRY_TD.register_extension('xtp', ext.TD)
| 43.4
| 67
| 0.843318
| 31
| 217
| 5.548387
| 0.419355
| 0.395349
| 0.22093
| 0.244186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069124
| 217
| 4
| 68
| 54.25
| 0.851485
| 0
| 0
| 0
| 0
| 0
| 0.02765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
920fd84acb260e96a57d6c63a4c7b8e5cb0ed55a
| 17,682
|
py
|
Python
|
poda/segmentation/InceptionV4ResnetV2.py
|
gideonmanurung/poda
|
0a64cfa474f82acb891454141bc537d81bc77092
|
[
"MIT"
] | null | null | null |
poda/segmentation/InceptionV4ResnetV2.py
|
gideonmanurung/poda
|
0a64cfa474f82acb891454141bc537d81bc77092
|
[
"MIT"
] | 4
|
2020-09-26T01:08:59.000Z
|
2022-02-10T01:40:42.000Z
|
poda/segmentation/InceptionV4ResnetV2.py
|
gideonmanurung/poda
|
0a64cfa474f82acb891454141bc537d81bc77092
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from poda.layers.convolutional import *
class InceptionV4ResnetV2(object):
def __init__(self, is_training = True):
"""[summary]
Arguments:
num_classes {[type]} -- [description]
Keyword Arguments:
input_tensor {[type]} -- [description] (default: {None})
input_shape {tuple} -- [description] (default: {(None, 300, 300, 3)})
learning_rate {float} -- [description] (default: {0.0001})
is_training {bool} -- [description] (default: {True})
"""
self.is_training = is_training
def conv_block(self,
inputs,
filters,
kernel_size,
strides=(2,2),
padding='VALID',
dropout_rate=0.2,
activation='relu',
batch_normalization=True):
"""[summary]
Arguments:
inputs {[type]} -- [description]
filters {[type]} -- [description]
kernel_size {[type]} -- [description]
Keyword Arguments:
strides {tuple} -- [description] (default: {(2,2)})
padding {str} -- [description] (default: {'valid'})
batch_normalization {bool} -- [description] (default: {True})
dropout_rate {float} -- [description] (default: {0.15})
activation {str} -- [description] (default: {'relu'})
is_training {bool} -- [description] (default: {True})
Returns:
[type] -- [description]
"""
conv = convolution_2d(input_tensor=inputs, number_filters=filters, kernel_sizes=kernel_size,
stride_sizes=strides, paddings=padding, activations=activation)
conv = batch_normalization(input_tensor=conv, is_trainable=batch_normalization)
conv = dropout(input_tensor=conv, dropout_rates=dropout_rate)
return conv
def stem_block(self, input_tensor, batch_normalization=True):
"""[summary]
Arguments:
input_tensor {[type]} -- [description]
Returns:
[type] -- [description]
"""
conv_1 = self.conv_block(inputs=input_tensor, filters=32, kernel_size=(3,3), strides=(2, 2), batch_normalization=batch_normalization)
conv_2 = self.conv_block(inputs=conv_1, filters=32, kernel_size=(3,3), strides=(1,1), batch_normalization=batch_normalization)
conv_3 = self.conv_block(inputs=conv_2, filters=64, kernel_size=(3,3), padding='same', strides=(1, 1), batch_normalization=batch_normalization)
conv_4 = self.conv_block(inputs=conv_3, filters=96, kernel_size=(3,3), strides=(2,2), batch_normalization=batch_normalization)
max_pool_1 = max_pool_2d(input_tensor=conv_3, pool_sizes=(3,3), stride_sizes=(2,2))
concat_1 = tf.concat([conv_4, max_pool_1], -1)
conv_5 = self.conv_block(inputs=concat_1, filters=64, kernel_size=(3,3), strides=(1,1), batch_normalization=batch_normalization)
conv_6 = self.conv_block(inputs=conv_5, filters=64, kernel_size=(7,1), padding='same', strides=(1, 1), batch_normalization=batch_normalization)
conv_7 = self.conv_block(inputs=conv_6, filters=64, kernel_size=(1,7), padding='same', strides=(1, 1), batch_normalization=batch_normalization)
conv_8 = self.conv_block(inputs=conv_7, filters=96, kernel_size=(3,3), padding='same', strides=(1, 1), batch_normalization=batch_normalization)
conv_9 = self.conv_block(inputs=concat_1, filters=64, kernel_size=(1,1), strides=(1,1), batch_normalization=batch_normalization)
conv_10 = self.conv_block(inputs=conv_9, filters=96, kernel_size=(3,3), strides=(1,1), batch_normalization=batch_normalization)
concat_2 = tf.concat([conv_8, conv_10], -1)
max_pool_2 = max_pool_2d(inputs=concat_2, pool_sizes=(3,3), stride_sizes=(2,2))
conv_11 = self.conv_block(inputs=concat_2, filters=192, kernel_size=(3,3), strides=(2,2), batch_normalization=batch_normalization)
concat_3 = tf.concat([max_pool_2, conv_11], -1)
return concat_3
def inception_resnet_a(self, input_tensor, drop_out=0.80, batch_normalization=True):
"""[summary]
Arguments:
inputs {[type]} -- [description]
Keyword Arguments:
drop_out {float} -- [description] (default: {0.85})
activation {str} -- [description] (default: {'NONE'})
is_training {bool} -- [description] (default: {True})
use_bias {bool} -- [description] (default: {True})
use_batchnorm {bool} -- [description] (default: {True})
"""
conv_1 = self.conv_block(inputs=input_tensor, filters=64, kernel_size=(1,1) , strides=(1,1), batch_normalization=batch_normalization)
conv_2 = self.conv_block(inputs=conv_1, filters=96, kernel_size=(3,3) , strides=(1,1), batch_normalization=batch_normalization)
conv_3 = self.conv_block(inputs=conv_2, filters=96, kernel_size=(3,3) , strides=(1,1), batch_normalization=batch_normalization)
conv_4 = self.conv_block(inputs=input_tensor, filters=64, kernel_size=(1,1) , strides=(1,1), batch_normalization=batch_normalization)
conv_5 = self.conv_block(inputs=conv_4, filters=96, kernel_size=(3,3) , strides=(1,1), batch_normalization=batch_normalization)
conv_6 = self.conv_block(inputs=input_tensor, filters=96, kernel_size=(1,1) , strides=(1,1), batch_normalization=batch_normalization)
conv_7 = self.conv_block(inputs=conv_2, filters=96, kernel_size=(3,3) , strides=(1,1), batch_normalization=batch_normalization)
input_depth = conv_right1.get_shape().as_list()[-1]
conv_right2, _ = new_conv2d_layer(input=conv_right1,
filter_shape=[3, 3, input_depth, 48],
name='inres_a_conv_r2',
dropout_val=drop_out,
activation=activation,
is_training=self.is_training,
use_bias=use_bias,
use_batchnorm=use_batchnorm)
input_depth = conv_right2.get_shape().as_list()[-1]
conv_right3, _ = new_conv2d_layer(input=conv_right2,
filter_shape=[3, 3, input_depth, 32],
name='inres_a_conv_r3',
dropout_val=drop_out,
activation=activation,
is_training=self.is_training,
use_bias=use_bias,
use_batchnorm=use_batchnorm)
input_depth = inputs.get_shape().as_list()[-1]
conv_mid1, _ = new_conv2d_layer(input=inputs,
filter_shape=[1, 1, input_depth, 32],
name='inres_a_conv_m1',
dropout_val=drop_out,
activation=activation,
is_training=self.is_training,
use_bias=use_bias,
use_batchnorm=use_batchnorm)
input_depth = conv_mid1.get_shape().as_list()[-1]
conv_mid2, _ = new_conv2d_layer(input=conv_mid1,
filter_shape=[3, 3, input_depth, 32],
name='inres_a_conv_m2',
dropout_val=drop_out,
activation=activation,
is_training=self.is_training,
use_bias=use_bias,
use_batchnorm=use_batchnorm)
input_depth = inputs.get_shape().as_list()[-1]
conv_left1, _ = new_conv2d_layer(input=inputs,
filter_shape=[1, 1, input_depth, 32],
name='inres_a_conv_l1',
dropout_val=drop_out,
activation=activation,
is_training=self.is_training,
use_bias=use_bias,
use_batchnorm=use_batchnorm)
concat_conv = tf.concat([conv_right3, conv_mid2, conv_left1], -1)
input_depth = concat_conv.get_shape().as_list()[-1]
output_depth = inputs.get_shape().as_list()[-1]
conv_mixed, _ = new_conv2d_layer(input=concat_conv,
filter_shape=[1, 1, input_depth, output_depth],
name='inres_a_conv_concat',
dropout_val=drop_out,
activation=activation,
is_training=self.is_training,
use_bias=use_bias,
use_batchnorm=use_batchnorm)
final_conv = inputs + conv_mixed
return final_conv
#return tf.nn.leaky_relu(final_conv)
# Reduction A
def reduction_a(self, inputs,
drop_out=0.80,
activation='NONE',
use_bias=True,
use_batchnorm=True):
"""[summary]
Arguments:
inputs {[type]} -- [description]
Keyword Arguments:
drop_out {float} -- [description] (default: {0.85})
activation {str} -- [description] (default: {'NONE'})
is_training {bool} -- [description] (default: {True})
use_bias {bool} -- [description] (default: {True})
use_batchnorm {bool} -- [description] (default: {True})
"""
input_depth = inputs.get_shape().as_list()[-1]
conv_right1, _ = new_conv2d_layer(input=inputs,
filter_shape=[1, 1, input_depth, 256],
name='reduc_a_conv_r1',
dropout_val=drop_out,
activation=activation,
is_training=self.is_training,
use_bias=use_bias,
use_batchnorm=use_batchnorm)
input_depth = conv_right1.get_shape().as_list()[-1]
conv_right2, _ = new_conv2d_layer(input=conv_right1,
filter_shape=[3, 3, input_depth, 256],
name='reduc_a_conv_r2',
dropout_val=drop_out,
activation=activation,
is_training=self.is_training,
use_bias=use_bias,
use_batchnorm=use_batchnorm)
input_depth = conv_right2.get_shape().as_list()[-1]
conv_right3, _ = new_conv2d_layer(input=conv_right2,
filter_shape=[3, 3, input_depth, 384],
name='reduc_a_conv_r3',
dropout_val=drop_out,
activation=activation,
strides=[1, 2, 2, 1],
padding='VALID',
is_training=self.is_training,
use_bias=use_bias,
use_batchnorm=use_batchnorm)
input_depth = inputs.get_shape().as_list()[-1]
conv_mid1, _ = new_conv2d_layer(input=inputs,
filter_shape=[3, 3, input_depth, 384],
name='reduc_a_conv_m1',
dropout_val=drop_out,
activation=activation,
strides=[1, 2, 2, 1],
padding='VALID',
is_training=self.is_training,
use_bias=use_bias,
use_batchnorm=use_batchnorm)
"""
max_pool = tf.nn.max_pool(value=inputs,
ksize=[1, 3, 3, 1],
strides=[1, 1, 1, 15],
padding='VALID',
name='reduc_a_conv_mp')
"""
return tf.concat([conv_right3, conv_mid1], -1, name='hellloooooo')
# Inception ResNet B
def inception_resnet_b(self, inputs,
drop_out=0.80,
activation='NONE',
use_bias=True,
use_batchnorm=True):
"""[summary]
Arguments:
inputs {[type]} -- [description]
Keyword Arguments:
drop_out {float} -- [description] (default: {0.85})
activation {str} -- [description] (default: {'NONE'})
is_training {bool} -- [description] (default: {True})
use_bias {bool} -- [description] (default: {True})
use_batchnorm {bool} -- [description] (default: {True})
"""
input_depth = inputs.get_shape().as_list()[-1]
conv_right1, _ = new_conv2d_layer(input=inputs,
filter_shape=[1, 1, input_depth, 128],
name='inres_a_conv_r1',
dropout_val=drop_out,
activation=activation,
is_training=self.is_training,
use_bias=use_bias,
use_batchnorm=use_batchnorm)
input_depth = conv_right1.get_shape().as_list()[-1]
conv_right2, _ = new_conv2d_layer(input=conv_right1,
filter_shape=[1, 7, input_depth, 160],
name='inres_a_conv_r2',
dropout_val=drop_out,
activation=activation,
is_training=self.is_training,
use_bias=use_bias,
use_batchnorm=use_batchnorm)
input_depth = conv_right2.get_shape().as_list()[-1]
conv_right3, _ = new_conv2d_layer(input=conv_right2,
filter_shape=[7, 1, input_depth, 192],
name='inres_a_conv_r3',
dropout_val=drop_out,
activation=activation,
is_training=self.is_training,
use_bias=use_bias,
use_batchnorm=use_batchnorm)
input_depth = inputs.get_shape().as_list()[-1]
conv_mid1, _ = new_conv2d_layer(input=inputs,
filter_shape=[1, 1, input_depth, 192],
name='inres_a_conv_m1',
dropout_val=drop_out,
activation=activation,
is_training=self.is_training,
use_bias=use_bias,
use_batchnorm=use_batchnorm)
concat_conv = tf.concat([conv_right3, conv_mid1], -1)
input_depth = concat_conv.get_shape().as_list()[-1]
output_depth = inputs.get_shape().as_list()[-1]
conv_mixed, _ = new_conv2d_layer(input=concat_conv,
filter_shape=[1, 1, input_depth, output_depth],
name='inres_a_conv_mx',
dropout_val=drop_out,
activation=activation,
is_training=self.is_training,
use_bias=use_bias,
use_batchnorm=use_batchnorm)
final_conv = inputs + conv_mixed
return final_conv
#return tf.nn.leaky_relu(final_conv)
def create_base_model(self, input=None):
"""[summary]
Arguments:
classes {[type]} -- [description]
Returns:
[type] -- [description]
"""
with tf.variable_scope("stem"):
net = self.stem_block(input)
print (net, "===================>>>")
with tf.variable_scope("inception_resnet_a"):
for i in range(5):
net = self.inception_resnet_a(inputs=net)
print (net, "===================>>>")
with tf.variable_scope("reduction_a"):
net = self.reduction_a(inputs=net)
print(net, "===================>>>")
with tf.variable_scope("inception_resnet_b"):
for i in range(10):
net = self.inception_resnet_b(inputs=net)
print (net, "===================>>>")
return net
| 49.391061
| 151
| 0.490499
| 1,720
| 17,682
| 4.713953
| 0.084884
| 0.093241
| 0.037
| 0.042181
| 0.808954
| 0.76554
| 0.749877
| 0.73927
| 0.714726
| 0.699309
| 0
| 0.036208
| 0.406459
| 17,682
| 357
| 152
| 49.529412
| 0.736351
| 0.129567
| 0
| 0.634703
| 0
| 0
| 0.029148
| 0.006078
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031963
| false
| 0
| 0.009132
| 0
| 0.073059
| 0.018265
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a626c17f1ddc96620ae5513ec6da9897863fb641
| 173
|
py
|
Python
|
rest_ml/firstApp/admin.py
|
Binucb/machineLearning_RestAPI
|
5ffb51febd9ac31e74977aa20cb1ab8c9e44560a
|
[
"MIT"
] | null | null | null |
rest_ml/firstApp/admin.py
|
Binucb/machineLearning_RestAPI
|
5ffb51febd9ac31e74977aa20cb1ab8c9e44560a
|
[
"MIT"
] | 4
|
2021-03-19T02:02:07.000Z
|
2021-06-04T22:54:44.000Z
|
rest_ml/firstApp/admin.py
|
Binucb/machineLearning_RestAPI
|
5ffb51febd9ac31e74977aa20cb1ab8c9e44560a
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from firstApp import models
# Register your models here.
admin.site.register(models.PersonDetail)
admin.site.register(models.MovieRating)
| 21.625
| 40
| 0.82659
| 23
| 173
| 6.217391
| 0.565217
| 0.125874
| 0.237762
| 0.321678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098266
| 173
| 7
| 41
| 24.714286
| 0.916667
| 0.150289
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a6f78b91c5bbb2598357f86be62fa5d4530bb764
| 177
|
py
|
Python
|
contact/fields.py
|
uktrade/great-domestic-ui
|
e4c1e4783d7321e170ecb6fd5f9eb6c30cd21f4c
|
[
"MIT"
] | null | null | null |
contact/fields.py
|
uktrade/great-domestic-ui
|
e4c1e4783d7321e170ecb6fd5f9eb6c30cd21f4c
|
[
"MIT"
] | 369
|
2019-02-18T15:53:55.000Z
|
2021-06-09T13:17:37.000Z
|
contact/fields.py
|
uktrade/great-domestic-ui
|
e4c1e4783d7321e170ecb6fd5f9eb6c30cd21f4c
|
[
"MIT"
] | 3
|
2019-03-11T12:04:22.000Z
|
2020-11-12T15:28:13.000Z
|
from directory_components.forms import DirectoryComponentsFieldMixin
from django import forms
class IntegerField(DirectoryComponentsFieldMixin, forms.IntegerField):
pass
| 22.125
| 70
| 0.858757
| 16
| 177
| 9.4375
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107345
| 177
| 7
| 71
| 25.285714
| 0.955696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
470ee63186367deeb54b613f36979408b5081240
| 20
|
py
|
Python
|
__init__.py
|
rodrigo203203/person_track_yolo_detection
|
b6210467ca188af44b9c1b513fcbfa8af240a66a
|
[
"MIT"
] | null | null | null |
__init__.py
|
rodrigo203203/person_track_yolo_detection
|
b6210467ca188af44b9c1b513fcbfa8af240a66a
|
[
"MIT"
] | null | null | null |
__init__.py
|
rodrigo203203/person_track_yolo_detection
|
b6210467ca188af44b9c1b513fcbfa8af240a66a
|
[
"MIT"
] | null | null | null |
from . import prueba
| 20
| 20
| 0.8
| 3
| 20
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 20
| 1
| 20
| 20
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5bbc5268cd81744970ec1919181a5071a75fc258
| 22
|
py
|
Python
|
writing/__init__.py
|
akyruu/blender-cartography-addon
|
4f34b029d9b6a72619227ab3ceaed9393506934e
|
[
"Apache-2.0"
] | null | null | null |
writing/__init__.py
|
akyruu/blender-cartography-addon
|
4f34b029d9b6a72619227ab3ceaed9393506934e
|
[
"Apache-2.0"
] | null | null | null |
writing/__init__.py
|
akyruu/blender-cartography-addon
|
4f34b029d9b6a72619227ab3ceaed9393506934e
|
[
"Apache-2.0"
] | null | null | null |
from .writer import *
| 11
| 21
| 0.727273
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5bc46372f756cc1ca3bfb90fe230442edd601420
| 3,999
|
py
|
Python
|
mlxtk/doit_analyses/expval.py
|
f-koehler/mlxtk
|
373aed06ab23ab9b70cd99e160228c50b87e939a
|
[
"MIT"
] | 2
|
2018-12-21T19:41:10.000Z
|
2019-11-25T15:26:27.000Z
|
mlxtk/doit_analyses/expval.py
|
f-koehler/mlxtk
|
373aed06ab23ab9b70cd99e160228c50b87e939a
|
[
"MIT"
] | 73
|
2017-12-22T13:30:16.000Z
|
2022-02-22T04:21:14.000Z
|
mlxtk/doit_analyses/expval.py
|
f-koehler/mlxtk
|
373aed06ab23ab9b70cd99e160228c50b87e939a
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from typing import List, Union
import matplotlib.pyplot as plt
import numpy
from mlxtk.doit_analyses.collect import collect_values
from mlxtk.doit_analyses.plot import doit_plot_individual
from mlxtk.inout.expval import read_expval_hdf5
from mlxtk.parameter_selection import load_scan
from mlxtk.plot import PlotArgs2D
from mlxtk.plot.expval import plot_expval
from mlxtk.util import make_path
def collect_initial_expval(
scan_dir: Union[Path, str],
expval: Union[Path, str],
output_file: Union[Path, str] = None,
node: int = 1,
dof: int = 1,
missing_ok: bool = True,
):
expval = make_path(expval)
if output_file is None:
folder_name = "expval_" + expval.name.rstrip(".exp.h5")
if not folder_name.startswith("initial_"):
folder_name = "initial_" + folder_name
output_file = Path("data") / (folder_name) / (make_path(scan_dir).name + ".txt")
def fetch(index, path, parameters):
_, data = numpy.array(read_expval_hdf5(path / expval))
return data[0].real, data[0].imag
return collect_values(scan_dir, [expval], output_file, fetch, missing_ok=missing_ok)
def collect_final_expval(
scan_dir: Union[Path, str],
expval: Union[Path, str],
output_file: Union[Path, str] = None,
node: int = 1,
dof: int = 1,
missing_ok: bool = True,
):
expval = make_path(expval)
if output_file is None:
folder_name = "expval_" + expval.name.rstrip(".exp.h5")
if not folder_name.startswith("final_"):
folder_name = "final_" + folder_name
output_file = Path("data") / (folder_name) / (make_path(scan_dir).name + ".txt")
def fetch(index, path, parameters):
_, data = numpy.array(read_expval_hdf5(path / expval))
return data[-1].real, data[-1].imag
return collect_values(scan_dir, [expval], output_file, fetch, missing_ok=missing_ok)
def scan_plot_expval(
scan_dir: Union[Path, str],
expval: Union[Path, str],
extensions: List[str] = [
".png",
],
**kwargs,
):
scan_dir = make_path(scan_dir)
expval = make_path(expval)
kwargs["coefficient"] = kwargs.get("coefficient", 1.0)
plotting_args = PlotArgs2D.from_dict(kwargs)
selection = load_scan(scan_dir)
def plot_func(index, path, parameters):
del path
del parameters
data = read_expval_hdf5(
str((scan_dir / "by_index" / str(index) / expval).with_suffix(".exp.h5"))
)
fig, axis = plt.subplots(1, 1)
plot_expval(axis, *data, **kwargs)
return fig, [axis]
return doit_plot_individual(
selection,
f"expval_{str(expval)}".replace("/", "_"),
[str(expval.with_suffix(".exp.h5"))],
plot_func,
plotting_args,
extensions,
decorator_funcs=kwargs.get("decorator_funcs", []),
extra_args={"coefficient": kwargs["coefficient"]},
)
def scan_plot_variance(
scan_dir: Union[Path, str],
variance: Union[Path, str],
extensions: List[str] = [
".png",
],
**kwargs,
):
scan_dir = make_path(scan_dir)
variance = make_path(variance)
kwargs["coefficient"] = kwargs.get("coefficient", 1.0)
plotting_args = PlotArgs2D.from_dict(kwargs)
selection = load_scan(scan_dir)
def plot_func(index, path, parameters):
del path
del parameters
data = read_expval_hdf5(
str((scan_dir / "by_index" / str(index) / variance).with_suffix(".var.h5"))
)
fig, axis = plt.subplots(1, 1)
plot_expval(axis, *data, **kwargs)
return fig, [axis]
return doit_plot_individual(
selection,
f"variance_{str(variance)}".replace("/", "_"),
[str(variance.with_suffix(".var.h5"))],
plot_func,
plotting_args,
extensions,
decorator_funcs=kwargs.get("decorator_funcs", []),
extra_args={"coefficient": kwargs["coefficient"]},
)
| 28.564286
| 88
| 0.636409
| 506
| 3,999
| 4.794466
| 0.173913
| 0.046167
| 0.049464
| 0.026381
| 0.761748
| 0.72094
| 0.72094
| 0.72094
| 0.72094
| 0.72094
| 0
| 0.009804
| 0.234809
| 3,999
| 139
| 89
| 28.769784
| 0.783007
| 0
| 0
| 0.702703
| 0
| 0
| 0.072518
| 0.006002
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072072
| false
| 0
| 0.099099
| 0
| 0.243243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
752792d2db55ad4d968b8ab127cfa45e80d8b3e6
| 31
|
py
|
Python
|
invoice2data/__init__.py
|
GfxKai/invoice2data
|
caec851578e99d40b989cabba07cfdf61f71ac58
|
[
"MIT"
] | 1
|
2018-02-14T17:24:09.000Z
|
2018-02-14T17:24:09.000Z
|
invoice2data/__init__.py
|
GfxKai/invoice2data
|
caec851578e99d40b989cabba07cfdf61f71ac58
|
[
"MIT"
] | null | null | null |
invoice2data/__init__.py
|
GfxKai/invoice2data
|
caec851578e99d40b989cabba07cfdf61f71ac58
|
[
"MIT"
] | 1
|
2020-08-15T19:38:16.000Z
|
2020-08-15T19:38:16.000Z
|
from .main import extract_data
| 15.5
| 30
| 0.83871
| 5
| 31
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7537ad02c36716a036312fc684cd33bf734c864a
| 26
|
py
|
Python
|
src/perceptron/Neuron/__init__.py
|
SweetBubaleXXX/sweet-perceptron
|
473f4372fb6821c073da249a35a06b82378357f6
|
[
"MIT"
] | 1
|
2022-03-21T14:48:27.000Z
|
2022-03-21T14:48:27.000Z
|
src/perceptron/Neuron/__init__.py
|
SweetBubaleXXX/sweet-perceptron
|
473f4372fb6821c073da249a35a06b82378357f6
|
[
"MIT"
] | null | null | null |
src/perceptron/Neuron/__init__.py
|
SweetBubaleXXX/sweet-perceptron
|
473f4372fb6821c073da249a35a06b82378357f6
|
[
"MIT"
] | null | null | null |
from .Neuron import Neuron
| 26
| 26
| 0.846154
| 4
| 26
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7542753a75521d181dd4d8301078e512e589a13d
| 125
|
py
|
Python
|
RiboGraphViz/__init__.py
|
rkretsch/RiboGraphViz
|
5a844dc9857bd4e20218ec65942ffbc899f8956f
|
[
"MIT"
] | 5
|
2020-09-11T23:32:59.000Z
|
2022-03-31T09:01:11.000Z
|
RiboGraphViz/__init__.py
|
rkretsch/RiboGraphViz
|
5a844dc9857bd4e20218ec65942ffbc899f8956f
|
[
"MIT"
] | 1
|
2021-08-13T20:40:46.000Z
|
2021-08-13T23:15:15.000Z
|
RiboGraphViz/__init__.py
|
rkretsch/RiboGraphViz
|
5a844dc9857bd4e20218ec65942ffbc899f8956f
|
[
"MIT"
] | 2
|
2021-07-28T20:03:36.000Z
|
2022-01-08T07:11:09.000Z
|
from .RiboGraphViz import RiboGraphViz as RGV
from .LoopExtruder import LoopExtruder
from .LoopExtruder import StackExtruder
| 31.25
| 45
| 0.864
| 14
| 125
| 7.714286
| 0.5
| 0.296296
| 0.407407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112
| 125
| 3
| 46
| 41.666667
| 0.972973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f33dc56a546a050d746f893b7bec5e3047c2293b
| 162
|
py
|
Python
|
server/tests/__init__.py
|
natowi/pepi
|
22df696209ac2545d1e8e1cf0c8822725acadb29
|
[
"Apache-2.0"
] | 4
|
2017-08-30T03:17:34.000Z
|
2019-09-24T08:57:41.000Z
|
server/tests/__init__.py
|
natowi/pepi
|
22df696209ac2545d1e8e1cf0c8822725acadb29
|
[
"Apache-2.0"
] | 14
|
2017-09-02T03:53:14.000Z
|
2022-03-11T23:19:04.000Z
|
server/tests/__init__.py
|
natowi/pepi
|
22df696209ac2545d1e8e1cf0c8822725acadb29
|
[
"Apache-2.0"
] | 3
|
2019-03-27T18:33:25.000Z
|
2021-07-17T02:18:19.000Z
|
from .test_server import MetaCameraServerContract
from .test_server_over_thrift import MetaCameraServerOverThrift
from .test_camera import AbstractCameraContract
| 40.5
| 63
| 0.907407
| 17
| 162
| 8.352941
| 0.588235
| 0.169014
| 0.197183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 162
| 3
| 64
| 54
| 0.946667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f364ccac5cd5496ec7276b703ef853b316a142bc
| 128
|
py
|
Python
|
mandala/core/exceptions.py
|
amakelov/mandala
|
a9ec051ef730ada4eed216c62a07b033126e78d5
|
[
"Apache-2.0"
] | 9
|
2022-02-22T19:24:01.000Z
|
2022-03-23T04:46:41.000Z
|
mandala/core/exceptions.py
|
amakelov/mandala
|
a9ec051ef730ada4eed216c62a07b033126e78d5
|
[
"Apache-2.0"
] | null | null | null |
mandala/core/exceptions.py
|
amakelov/mandala
|
a9ec051ef730ada4eed216c62a07b033126e78d5
|
[
"Apache-2.0"
] | null | null | null |
from ..common_imports import *
class SynchronizationError(Exception):
pass
class VRefNotInMemoryError(Exception):
pass
| 18.285714
| 38
| 0.78125
| 12
| 128
| 8.25
| 0.75
| 0.262626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148438
| 128
| 7
| 39
| 18.285714
| 0.908257
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.4
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
f38cecc11daf613785082bebda6a274a61e723f2
| 22,356
|
py
|
Python
|
test/unit/live_cluster/test_manage_controller.py
|
jfwm2/aerospike-admin
|
3ce721bbd249eca73046345620941a6aef325589
|
[
"Apache-2.0"
] | null | null | null |
test/unit/live_cluster/test_manage_controller.py
|
jfwm2/aerospike-admin
|
3ce721bbd249eca73046345620941a6aef325589
|
[
"Apache-2.0"
] | null | null | null |
test/unit/live_cluster/test_manage_controller.py
|
jfwm2/aerospike-admin
|
3ce721bbd249eca73046345620941a6aef325589
|
[
"Apache-2.0"
] | null | null | null |
from lib.base_controller import ShellException
import unittest
from mock import MagicMock, patch
from lib.live_cluster.client.info import ASProtocolError, ASResponse
from lib.live_cluster.manage_controller import (
ManageACLCreateRoleController,
ManageACLCreateUserController,
ManageACLQuotasRoleController,
)
from lib.live_cluster.live_cluster_root_controller import LiveClusterRootController
from test.unit import util as test_util
class ManageACLCreateUserControllerTest(unittest.TestCase):
def setUp(self) -> None:
patch("lib.live_cluster.live_cluster_root_controller.Cluster").start()
self.root_controller = LiveClusterRootController()
self.controller = ManageACLCreateUserController()
self.cluster_mock = patch(
"lib.live_cluster.manage_controller.ManageACLCreateUserController.cluster"
).start()
self.logger_mock = patch("lib.base_controller.BaseController.logger").start()
self.view_mock = patch("lib.base_controller.BaseController.view").start()
self.mods = {"like": [], "with": [], "for": [], "line": []}
self.addCleanup(patch.stopall)
def test_no_roles_and_no_password(self):
getpass_mock = patch("lib.live_cluster.manage_controller.getpass").start()
getpass_mock.return_value = "pass"
self.cluster_mock.get_expected_principal.return_value = "principal"
self.cluster_mock.admin_create_user.return_value = {
"principal_ip": ASResponse.OK
}
self.controller.execute(["test-user"])
self.cluster_mock.admin_create_user.assert_called_with(
"test-user", "pass", [], nodes=["principal"]
)
self.view_mock.print_result.assert_called_with(
"Successfully created user test-user."
)
def test_with_roles_and_password(self):
self.cluster_mock.get_expected_principal.return_value = "principal"
self.cluster_mock.admin_create_user.return_value = {
"principal_ip": ASResponse.OK
}
self.controller.execute(
["test-user", "password", "pass", "roles", "role1", "role2", "role3"]
)
self.cluster_mock.admin_create_user.assert_called_with(
"test-user", "pass", ["role1", "role2", "role3"], nodes=["principal"]
)
self.view_mock.print_result.assert_called_with(
"Successfully created user test-user."
)
def test_with_role_and_password(self):
self.cluster_mock.get_expected_principal.return_value = "principal"
self.cluster_mock.admin_create_user.return_value = {
"principal_ip": ASResponse.OK
}
self.controller.execute(
["test-user", "password", "pass", "role", "role1", "role2", "role3"]
)
self.cluster_mock.admin_create_user.assert_called_with(
"test-user", "pass", ["role1", "role2", "role3"], nodes=["principal"]
)
self.view_mock.print_result.assert_called_with(
"Successfully created user test-user."
)
def test_logs_error_when_asprotocol_error_returned(self):
as_error = ASProtocolError(ASResponse.USER_ALREADY_EXISTS, "test-message")
log_message = "test-message : User already exists."
line = "test-user password pass"
self.cluster_mock.get_expected_principal.return_value = "principal"
self.cluster_mock.admin_create_user.return_value = {"principal_ip": as_error}
self.controller.execute(line.split())
self.cluster_mock.admin_create_user.assert_called_with(
"test-user", "pass", [], nodes=["principal"]
)
self.logger_mock.error.assert_called_with(log_message)
self.view_mock.print_result.assert_not_called()
def test_raises_exception_when_exception_returned(self):
as_error = IOError("test-message")
line = "test-user password pass"
self.cluster_mock.get_expected_principal.return_value = "principal"
self.cluster_mock.admin_create_user.return_value = {"principal_ip": as_error}
test_util.assert_exception(
self, ShellException, "test-message", self.controller.execute, line.split()
)
self.cluster_mock.admin_create_user.assert_called_with(
"test-user", "pass", [], nodes=["principal"]
)
self.view_mock.print_result.assert_not_called()
class ManageACLCreateRoleControllerTest(unittest.TestCase):
def setUp(self) -> None:
patch("lib.live_cluster.live_cluster_root_controller.Cluster").start()
self.root_controller = LiveClusterRootController()
self.controller = ManageACLCreateRoleController()
self.cluster_mock = patch(
"lib.live_cluster.manage_controller.ManageACLCreateRoleController.cluster"
).start()
self.logger_mock = patch("lib.base_controller.BaseController.logger").start()
self.view_mock = patch("lib.base_controller.BaseController.view").start()
self.mods = {"like": [], "with": [], "for": [], "line": []}
self.cluster_mock.info_build_version.return_value = {"principal": "5.6.0.0"}
self.cluster_mock.get_expected_principal.return_value = "principal"
self.addCleanup(patch.stopall)
def test_logs_error_when_server_does_not_support_quotas(self):
log_message = "'read' and 'write' modifiers are not supported on aerospike versions <= 5.5"
line = "test-role priv test-priv read 100 write 200"
self.cluster_mock.info_build_version.side_effect = [
{"principal": "5.5.0.0"},
{"principal": "5.5.9.9"},
]
self.cluster_mock.get_expected_principal.side_effect = ["principal"] * 2
self.cluster_mock.admin_create_role.side_effect = [
{"principal_ip": ASResponse.OK}
] * 2
for _ in range(2):
self.controller.execute(line.split())
self.logger_mock.warning.assert_called_with(log_message)
def test_with_only_privilege(self):
self.cluster_mock.get_expected_principal.return_value = "principal"
self.cluster_mock.admin_create_role.return_value = {
"principal_ip": ASResponse.OK
}
line = "test-role priv test-priv"
self.controller.execute(line.split())
self.cluster_mock.admin_create_role.assert_called_with(
"test-role",
privileges=["test-priv"],
whitelist=[],
read_quota=None,
write_quota=None,
nodes=["principal"],
)
self.view_mock.print_result.assert_called_with(
"Successfully created role test-role."
)
def test_with_privilege_with_namespace(self):
self.cluster_mock.admin_create_role.return_value = {
"principal_ip": ASResponse.OK
}
line = "test-role priv test-priv ns test-ns"
self.controller.execute(line.split())
self.cluster_mock.admin_create_role.assert_called_with(
"test-role",
privileges=["test-priv.test-ns"],
whitelist=[],
read_quota=None,
write_quota=None,
nodes=["principal"],
)
self.view_mock.print_result.assert_called_with(
"Successfully created role test-role."
)
def test_with_privilege_and_namespace_and_set(self):
self.cluster_mock.admin_create_role.return_value = {
"principal_ip": ASResponse.OK
}
line = "test-role priv test-priv ns test-ns set test-set"
self.controller.execute(line.split())
self.cluster_mock.admin_create_role.assert_called_with(
"test-role",
privileges=["test-priv.test-ns.test-set"],
whitelist=[],
read_quota=None,
write_quota=None,
nodes=["principal"],
)
self.view_mock.print_result.assert_called_with(
"Successfully created role test-role."
)
def test_with_privilege_and_set_logs_error(self):
self.controller.execute_help = MagicMock()
line = "test-role priv test-priv set test-set"
self.controller.execute(line.split())
self.logger_mock.error.assert_called_with(
"A set must be accompanied by a namespace."
)
def test_with_privilege_and_allowlist(self):
self.cluster_mock.admin_create_role.return_value = {
"principal_ip": ASResponse.OK
}
line = "test-role priv test-priv ns test-ns set test-set allow 3.3.3.3 4.4.4.4"
self.controller.execute(line.split())
self.cluster_mock.admin_create_role.assert_called_with(
"test-role",
privileges=["test-priv.test-ns.test-set"],
whitelist=["3.3.3.3", "4.4.4.4"],
read_quota=None,
write_quota=None,
nodes=["principal"],
)
self.view_mock.print_result.assert_called_with(
"Successfully created role test-role."
)
def test_with_privilege_and_read_and_write_quota(self):
self.cluster_mock.admin_create_role.return_value = {
"principal_ip": ASResponse.OK
}
line = "test-role priv test-priv ns test-ns set test-set read 111 write 222"
self.controller.execute(line.split())
self.cluster_mock.admin_create_role.assert_called_with(
"test-role",
privileges=["test-priv.test-ns.test-set"],
whitelist=[],
read_quota=111,
write_quota=222,
nodes=["principal"],
)
self.view_mock.print_result.assert_called_with(
"Successfully created role test-role."
)
def test_with_privilege_and_allowlist_and_read_and_write_quota(self):
self.cluster_mock.admin_create_role.return_value = {
"principal_ip": ASResponse.OK
}
line = "test-role priv test-priv ns test-ns set test-set allow 3.3.3.3 4.4.4.4 read 111 write 222"
self.controller.execute(line.split())
self.cluster_mock.admin_create_role.assert_called_with(
"test-role",
privileges=["test-priv.test-ns.test-set"],
whitelist=["3.3.3.3", "4.4.4.4"],
read_quota=111,
write_quota=222,
nodes=["principal"],
)
self.view_mock.print_result.assert_called_with(
"Successfully created role test-role."
)
def test_with_read_privilege_only(self):
self.cluster_mock.admin_create_role.return_value = {
"principal_ip": ASResponse.OK
}
line = "test-role priv read"
self.controller.execute(line.split())
self.cluster_mock.admin_create_role.assert_called_with(
"test-role",
privileges=["read"],
whitelist=[],
read_quota=None,
write_quota=None,
nodes=["principal"],
)
self.view_mock.print_result.assert_called_with(
"Successfully created role test-role."
)
def test_with_write_privilege_only(self):
self.cluster_mock.admin_create_role.return_value = {
"principal_ip": ASResponse.OK
}
line = "test-role priv write"
self.controller.execute(line.split())
self.cluster_mock.admin_create_role.assert_called_with(
"test-role",
privileges=["write"],
whitelist=[],
read_quota=None,
write_quota=None,
nodes=["principal"],
)
self.view_mock.print_result.assert_called_with(
"Successfully created role test-role."
)
def test_with_conflicting_write_privilege_and_write_quota(self):
self.cluster_mock.admin_create_role.return_value = {
"principal_ip": ASResponse.OK
}
line = "test-role priv write write 111"
self.controller.execute(line.split())
self.cluster_mock.admin_create_role.assert_called_with(
"test-role",
privileges=["write"],
whitelist=[],
read_quota=None,
write_quota=111,
nodes=["principal"],
)
self.view_mock.print_result.assert_called_with(
"Successfully created role test-role."
)
def test_with_conflicting_read_privilege_and_read_quota(self):
self.cluster_mock.admin_create_role.return_value = {
"principal_ip": ASResponse.OK
}
line = "test-role priv read read 111"
self.controller.execute(line.split())
self.cluster_mock.admin_create_role.assert_called_with(
"test-role",
privileges=["read"],
whitelist=[],
read_quota=111,
write_quota=None,
nodes=["principal"],
)
self.view_mock.print_result.assert_called_with(
"Successfully created role test-role."
)
def test_with_conflicting_read_privilege_and_write_quota(self):
self.cluster_mock.admin_create_role.return_value = {
"principal_ip": ASResponse.OK
}
line = "test-role priv read write 111"
self.controller.execute(line.split())
self.cluster_mock.admin_create_role.assert_called_with(
"test-role",
privileges=["read"],
whitelist=[],
read_quota=None,
write_quota=111,
nodes=["principal"],
)
self.view_mock.print_result.assert_called_with(
"Successfully created role test-role."
)
def test_logs_error_when_quotas_are_not_int(self):
log_message = "Quotas must be integers."
line = "test-role priv write write 100a read 100"
self.controller.execute(line.split())
self.cluster_mock.admin_create_role.assert_not_called()
self.logger_mock.error.assert_called_with(log_message)
self.view_mock.print_result.assert_not_called()
line = "test-role priv write write 100 read 100a"
self.controller.execute(line.split())
self.cluster_mock.admin_create_role.assert_not_called()
self.logger_mock.error.assert_called_with(log_message)
self.view_mock.print_result.assert_not_called()
def test_logs_error_when_asprotocol_error_returned(self):
as_error = ASProtocolError(ASResponse.ROLE_ALREADY_EXISTS, "test-message")
log_message = "test-message : Role already exists."
line = "test-role priv sys-admin"
self.cluster_mock.admin_create_role.return_value = {"principal_ip": as_error}
self.controller.execute(line.split())
self.cluster_mock.admin_create_role.assert_called_with(
"test-role",
privileges=["sys-admin"],
whitelist=[],
read_quota=None,
write_quota=None,
nodes=["principal"],
)
self.logger_mock.error.assert_called_with(log_message)
self.view_mock.print_result.assert_not_called()
def test_raises_exception_when_exception_returned(self):
as_error = IOError("test-message")
line = "test-role priv sys-admin"
self.cluster_mock.admin_create_role.return_value = {"principal_ip": as_error}
test_util.assert_exception(
self, ShellException, "test-message", self.controller.execute, line.split()
)
self.cluster_mock.admin_create_role.assert_called_with(
"test-role",
privileges=["sys-admin"],
whitelist=[],
read_quota=None,
write_quota=None,
nodes=["principal"],
)
self.view_mock.print_result.assert_not_called()
class ManageACLRateLimitControllerTest(unittest.TestCase):
def setUp(self) -> None:
patch("lib.live_cluster.live_cluster_root_controller.Cluster").start()
self.root_controller = LiveClusterRootController()
self.controller = ManageACLQuotasRoleController()
self.cluster_mock = patch(
"lib.live_cluster.manage_controller.ManageACLQuotasRoleController.cluster"
).start()
self.logger_mock = patch("lib.base_controller.BaseController.logger").start()
self.view_mock = patch("lib.base_controller.BaseController.view").start()
self.mods = {"like": [], "with": [], "for": [], "line": []}
self.cluster_mock.info_build_version.return_value = {"principal": "5.6.0.0"}
self.cluster_mock.get_expected_principal.return_value = "principal"
self.addCleanup(patch.stopall)
def test_logs_error_when_server_does_not_support_quotas(self):
log_message = "'manage quotas' is not supported on aerospike versions <= 5.5"
line = "role test-role read 100 write 200"
self.cluster_mock.info_build_version.side_effect = [
{"principal": "5.5.0.0"},
{"principal": "5.5.9.9"},
]
self.cluster_mock.get_expected_principal.side_effect = ["principal"] * 2
self.cluster_mock.admin_set_quotas.side_effect = [
{"principal_ip": ASResponse.OK}
] * 2
for _ in range(2):
self.controller.execute(line.split())
self.logger_mock.error.assert_called_with(log_message)
def test_logs_error_with_read_and_write_not_provided(self):
log_message = "'read' or 'write' is required."
self.controller.execute(["role", "test-role"])
self.logger_mock.error.assert_called_with(log_message)
def test_success_with_read_and_write(self):
log_message = "Successfully set quotas for role test-role."
line = "role test-role read 100 write 200"
self.cluster_mock.admin_set_quotas.return_value = {
"principal_ip": ASResponse.OK
}
self.controller.execute(line.split())
self.cluster_mock.admin_set_quotas.assert_called_with(
"test-role", read_quota=100, write_quota=200, nodes=["principal"]
)
self.view_mock.print_result.assert_called_with(log_message)
def test_success_with_just_read(self):
log_message = "Successfully set quota for role test-role."
line = "role test-role read 100"
self.cluster_mock.admin_set_quotas.return_value = {
"principal_ip": ASResponse.OK
}
self.controller.execute(line.split())
self.cluster_mock.admin_set_quotas.assert_called_with(
"test-role", read_quota=100, write_quota=None, nodes=["principal"]
)
self.view_mock.print_result.assert_called_with(log_message)
def test_success_with_just_write(self):
log_message = "Successfully set quota for role test-role."
line = "role test-role write 100"
self.cluster_mock.admin_set_quotas.return_value = {
"principal_ip": ASResponse.OK
}
self.controller.execute(line.split())
self.cluster_mock.admin_set_quotas.assert_called_with(
"test-role", read_quota=None, write_quota=100, nodes=["principal"]
)
self.view_mock.print_result.assert_called_with(log_message)
def test_correct_call_with_conflicting_read_role_and_read_quota(self):
line = "role read read 100"
self.cluster_mock.admin_set_quotas.return_value = {
"principal_ip": ASResponse.OK
}
self.controller.execute(line.split())
self.cluster_mock.admin_set_quotas.assert_called_with(
"read", read_quota=100, write_quota=None, nodes=["principal"]
)
def test_correct_call_with_conflicting_write_role_and_write_quota(self):
line = "role write write 100"
self.cluster_mock.admin_set_quotas.return_value = {
"principal_ip": ASResponse.OK
}
self.controller.execute(line.split())
self.cluster_mock.admin_set_quotas.assert_called_with(
"write", read_quota=None, write_quota=100, nodes=["principal"]
)
def test_correct_call_with_conflicting_write_role_and_read_quota(self):
line = "role write read 100"
self.cluster_mock.admin_set_quotas.return_value = {
"principal_ip": ASResponse.OK
}
self.controller.execute(line.split())
self.cluster_mock.admin_set_quotas.assert_called_with(
"write", read_quota=100, write_quota=None, nodes=["principal"]
)
def test_logs_error_when_quotas_are_not_int(self):
log_message = "Quotas must be integers."
line = "role test-role write 100a read 100"
self.controller.execute(line.split())
self.cluster_mock.admin_set_quotas.assert_not_called()
self.logger_mock.error.assert_called_with(log_message)
self.view_mock.print_result.assert_not_called()
line = "role test-role write 100 read 100a"
self.controller.execute(line.split())
self.cluster_mock.admin_set_quotas.assert_not_called()
self.logger_mock.error.assert_called_with(log_message)
self.view_mock.print_result.assert_not_called()
def test_logs_error_when_asprotocol_error_returned(self):
as_error = ASProtocolError(ASResponse.RATE_QUOTA_EXCEEDED, "test-message")
log_message = "test-message : Rate quota exceeded."
line = "role test-role write 100 read 100"
self.cluster_mock.admin_set_quotas.return_value = {"principal_ip": as_error}
self.controller.execute(line.split())
self.cluster_mock.admin_set_quotas.assert_called_with(
"test-role", read_quota=100, write_quota=100, nodes=["principal"]
)
self.logger_mock.error.assert_called_with(log_message)
self.view_mock.print_result.assert_not_called()
def test_raises_exception_when_exception_returned(self):
as_error = IOError("test-message")
line = "role test-role write 100 read 100"
self.cluster_mock.admin_set_quotas.return_value = {"principal_ip": as_error}
test_util.assert_exception(
self, ShellException, "test-message", self.controller.execute, line.split()
)
self.cluster_mock.admin_set_quotas.assert_called_with(
"test-role", read_quota=100, write_quota=100, nodes=["principal"]
)
self.view_mock.print_result.assert_not_called()
| 37.447236
| 106
| 0.649803
| 2,641
| 22,356
| 5.189322
| 0.056797
| 0.060197
| 0.082087
| 0.084641
| 0.921343
| 0.916308
| 0.896826
| 0.884276
| 0.871069
| 0.858592
| 0
| 0.013138
| 0.24414
| 22,356
| 596
| 107
| 37.510067
| 0.797905
| 0
| 0
| 0.6639
| 0
| 0.004149
| 0.185632
| 0.03404
| 0
| 0
| 0
| 0
| 0.147303
| 1
| 0.072614
| false
| 0.029046
| 0.014523
| 0
| 0.093361
| 0.056017
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
34346eb8110c90b4aeba3424b6d1fd701f282806
| 253
|
py
|
Python
|
app/app/admin_views.py
|
AttiR/Flask-Web-Development
|
f8d6bf0f16b3858f21df87a3b09ed7dbe5d52636
|
[
"MIT"
] | null | null | null |
app/app/admin_views.py
|
AttiR/Flask-Web-Development
|
f8d6bf0f16b3858f21df87a3b09ed7dbe5d52636
|
[
"MIT"
] | null | null | null |
app/app/admin_views.py
|
AttiR/Flask-Web-Development
|
f8d6bf0f16b3858f21df87a3b09ed7dbe5d52636
|
[
"MIT"
] | null | null | null |
from app import app
from flask import render_template
@app.route("/admin/dashboard")
def dashboard():
return render_template("admin/dashboard.html")
@app.route("/admin/profile")
def profile():
return render_template("admin/profile.html")
| 25.3
| 50
| 0.735178
| 33
| 253
| 5.545455
| 0.393939
| 0.229508
| 0.142077
| 0.273224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 253
| 10
| 51
| 25.3
| 0.831818
| 0
| 0
| 0
| 0
| 0
| 0.267717
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
346e7c5b1137d08aa04255422af6fff0a73b54d3
| 132
|
py
|
Python
|
defaultindexfile.py
|
himanshurajora/VChat-Node
|
b5bca8491248b074607ad222931fa7965823ae09
|
[
"MIT"
] | 2
|
2021-05-06T15:12:49.000Z
|
2021-05-06T15:16:45.000Z
|
defaultindexfile.py
|
himanshurajora/VChat-Node
|
b5bca8491248b074607ad222931fa7965823ae09
|
[
"MIT"
] | null | null | null |
defaultindexfile.py
|
himanshurajora/VChat-Node
|
b5bca8491248b074607ad222931fa7965823ae09
|
[
"MIT"
] | null | null | null |
print("hello world")
print("this is another stuff i am gonna write in it")
print("see ya")
print("this is where the new file is")
| 33
| 54
| 0.704545
| 25
| 132
| 3.72
| 0.76
| 0.193548
| 0.236559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174242
| 132
| 4
| 55
| 33
| 0.853211
| 0
| 0
| 0
| 0
| 0
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
346f47182c2c06cb08adcdc75ffcb05376a32e22
| 307
|
py
|
Python
|
paypalpayoutssdk/core/__init__.py
|
truthiswill/Payouts-Python-SDK
|
ba04ffafb8165a1b7cdfd5841f08a96dccdd190b
|
[
"BSD-Source-Code"
] | 23
|
2020-03-02T13:31:55.000Z
|
2022-03-06T11:25:21.000Z
|
paypalpayoutssdk/core/__init__.py
|
truthiswill/Payouts-Python-SDK
|
ba04ffafb8165a1b7cdfd5841f08a96dccdd190b
|
[
"BSD-Source-Code"
] | 4
|
2020-09-26T08:40:26.000Z
|
2022-03-01T17:29:51.000Z
|
paypalpayoutssdk/core/__init__.py
|
truthiswill/Payouts-Python-SDK
|
ba04ffafb8165a1b7cdfd5841f08a96dccdd190b
|
[
"BSD-Source-Code"
] | 21
|
2020-02-07T10:02:57.000Z
|
2021-09-09T18:05:02.000Z
|
from paypalpayoutssdk.core.access_token import *
from paypalpayoutssdk.core.access_token_request import *
from paypalpayoutssdk.core.refresh_token_request import *
from paypalpayoutssdk.core.environment import *
from paypalpayoutssdk.core.paypal_http_client import *
from paypalpayoutssdk.core.util import *
| 51.166667
| 57
| 0.86645
| 37
| 307
| 7
| 0.351351
| 0.46332
| 0.555985
| 0.579151
| 0.57529
| 0.324324
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074919
| 307
| 6
| 58
| 51.166667
| 0.911972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
3473e1d04173dce320ccfad6577589b952c4e190
| 37,934
|
py
|
Python
|
python/REDSHIFT_DB_ENCRYPTED/REDSHIFT_DB_ENCRYPTED_test.py
|
UrfTheManatee/aws-config-rules
|
fbbfede71bf90f14a8b448447d28b12a68a1f20a
|
[
"CC0-1.0"
] | null | null | null |
python/REDSHIFT_DB_ENCRYPTED/REDSHIFT_DB_ENCRYPTED_test.py
|
UrfTheManatee/aws-config-rules
|
fbbfede71bf90f14a8b448447d28b12a68a1f20a
|
[
"CC0-1.0"
] | null | null | null |
python/REDSHIFT_DB_ENCRYPTED/REDSHIFT_DB_ENCRYPTED_test.py
|
UrfTheManatee/aws-config-rules
|
fbbfede71bf90f14a8b448447d28b12a68a1f20a
|
[
"CC0-1.0"
] | null | null | null |
# Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
import sys
import unittest
from botocore.exceptions import ClientError
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = "AWS::Redshift::Cluster"
#############
# Main Code #
#############
CONFIG_CLIENT_MOCK = MagicMock()
STS_CLIENT_MOCK = MagicMock()
REDSHIFT_CLIENT_MOCK = MagicMock()
PAGINATOR_MOCK = MagicMock()
class Boto3Mock:
@staticmethod
def client(client_name, *args, **kwargs):
if client_name == "config":
return CONFIG_CLIENT_MOCK
if client_name == "sts":
return STS_CLIENT_MOCK
if client_name == "redshift":
return REDSHIFT_CLIENT_MOCK
raise Exception("Attempting to create an unknown client")
sys.modules["boto3"] = Boto3Mock()
RULE = __import__("REDSHIFT_DB_ENCRYPTED")
class ComplianceTest(unittest.TestCase):
# Unit test for no Cluster is present -- GHERKIN Scenario 1
def test_scenario_1(self):
clusters_is_empty = [{"Clusters": []}]
REDSHIFT_CLIENT_MOCK.get_paginator.return_value = PAGINATOR_MOCK
PAGINATOR_MOCK.paginate.return_value = clusters_is_empty
response = RULE.lambda_handler(build_lambda_scheduled_event(), {})
expected_response = [
build_expected_response(
"NOT_APPLICABLE",
"123456789012",
"AWS::::Account",
annotation="No clusters found.",
)
]
assert_successful_evaluation(self, response, expected_response)
# Unit test for if Encrypted to false -- GHERKIN Scenario 2
def test_scenario_2(self):
clusters_is_present = [
{
"Clusters": [
{
"ClusterIdentifier": "redshift-cluster-1",
"NodeType": "ra3.4xlarge",
"ClusterStatus": "available",
"ClusterAvailabilityStatus": "Available",
"MasterUsername": "awsuser",
"DBName": "dev",
"Endpoint": {
"Address": "redshift-cluster-1.crmh4vec7kyo.us-east-2.redshift.amazonaws.com",
"Port": 5439,
},
"ClusterCreateTime": "datetime.datetime(2022, 1, 7, 7, 35, 2, 232000, tzinfo=tzlocal())",
"AutomatedSnapshotRetentionPeriod": 1,
"ManualSnapshotRetentionPeriod": -1,
"ClusterSecurityGroups": [],
"VpcSecurityGroups": [
{
"VpcSecurityGroupId": "sg-065ea7b9c71408f17",
"Status": "active",
}
],
"ClusterParameterGroups": [
{
"ParameterGroupName": "myparametergroup",
"ParameterApplyStatus": "in-sync",
"ClusterParameterStatusList": [
{
"ParameterName": "use_fips_ssl",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "query_group",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "datestyle",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "extra_float_digits",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "search_path",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "statement_timeout",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "wlm_json_configuration",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "require_ssl",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "enable_user_activity_logging",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "max_cursor_result_set_size",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "auto_analyze",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "max_concurrency_scaling_clusters",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "enable_case_sensitive_identifier",
"ParameterApplyStatus": "in-sync",
},
],
}
],
"ClusterSubnetGroupName": "default",
"VpcId": "vpc-0c1fbf2379152d7f4",
"AvailabilityZone": "us-east-2c",
"PreferredMaintenanceWindow": "wed:08:30-wed:09:00",
"PendingModifiedValues": {},
"ClusterVersion": "1.0",
"AllowVersionUpgrade": True,
"NumberOfNodes": 2,
"PubliclyAccessible": False,
"Encrypted": False,
"ClusterPublicKey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCanvmYtbNIGA5PiAY4rF"
"6ppg1wR3QY0f860EPUpRSaoc07UHOV4S2QLk21m5KEQm15rTE6dxVWrkXBzNabgdAsuiAo+Abur3D3y8xSZ"
"STMpD4e0Kn3UQ9nKw/2WWKWslNjKyzsBRSHv0jdVgg7KjtxoKAYNu/PbH4WCv2bcX+2nz8jxxDg2IOS/A6I3D"
"3pha9Q/FX0MMPYDKwKWw4TZ83PsZQvGWkW37TKaiGHFUpRfpuL/W8gHVD0ZJo8cK+WBxQsG5CujHlyifMQPBG"
"mKiFW8IeHS2evKzPAqIUlUTUA/t8t7EeCw5rnby8raUj7qWbeGqJ55d9CjcndHgaY5TZV "
"Amazon-Redshift\n",
"ClusterNodes": [
{
"NodeRole": "LEADER",
"PrivateIPAddress": "172.31.34.184",
"PublicIPAddress": "3.133.24.130",
},
{
"NodeRole": "COMPUTE-0",
"PrivateIPAddress": "172.31.35.209",
"PublicIPAddress": "18.220.99.27",
},
{
"NodeRole": "COMPUTE-1",
"PrivateIPAddress": "172.31.33.222",
"PublicIPAddress": "18.224.176.198",
},
],
"ClusterRevisionNumber": "35480",
"Tags": [],
"EnhancedVpcRouting": False,
"IamRoles": [
{
"IamRoleArn": "arn:aws:iam::529010877102:role/service-role/"
"AmazonRedshift-CommandsAccessRole-20211209T063611",
"ApplyStatus": "in-sync",
}
],
"MaintenanceTrackName": "current",
"ElasticResizeNumberOfNodeOptions": "[3,4,5,6,7,8]",
"DeferredMaintenanceWindows": [],
"NextMaintenanceWindowStartTime": "datetime.datetime(2022, 2, 16, 8, 30, tzinfo=tzlocal())",
"AvailabilityZoneRelocationStatus": "disabled",
"ClusterNamespaceArn": "arn:aws:redshift:us-east-2:529010877102:namespace:95b46b68-0afa-4ca9-"
"baf2-b6bf714431bc",
"TotalStorageCapacityInMegaBytes": 256000000,
"AquaConfiguration": {
"AquaStatus": "disabled",
"AquaConfigurationStatus": "auto",
},
}
]
}
]
REDSHIFT_CLIENT_MOCK.get_paginator.return_value = PAGINATOR_MOCK
PAGINATOR_MOCK.paginate.return_value = clusters_is_present
parameters = {
"Parameters": [
{
"ParameterName": "auto_analyze",
"ParameterValue": "true",
"Description": "Use auto analyze",
"Source": "engine-default",
"DataType": "boolean",
"AllowedValues": "true,false",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "datestyle",
"ParameterValue": "ISO, MDY",
"Description": "Sets the display format for date and time values.",
"Source": "engine-default",
"DataType": "string",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "enable_case_sensitive_identifier",
"ParameterValue": "true",
"Description": "Preserve case sensitivity for database identifiers such as table or column names in parser",
"Source": "user",
"DataType": "boolean",
"AllowedValues": "true,false",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "enable_user_activity_logging",
"ParameterValue": "false",
"Description": "parameter for audit logging purpose",
"Source": "user",
"DataType": "boolean",
"AllowedValues": "true,false",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "extra_float_digits",
"ParameterValue": "0",
"Description": "Sets the number of digits displayed for floating-point values",
"Source": "engine-default",
"DataType": "integer",
"AllowedValues": "-15-2",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "max_concurrency_scaling_clusters",
"ParameterValue": "1",
"Description": "The maximum concurrency scaling clusters can be used.",
"Source": "engine-default",
"DataType": "integer",
"AllowedValues": "0-10",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "max_cursor_result_set_size",
"ParameterValue": "default",
"Description": "Sets the max cursor result set size",
"Source": "engine-default",
"DataType": "integer",
"AllowedValues": "0-14400000",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "query_group",
"ParameterValue": "default",
"Description": "This parameter applies a user-defined label to a group of queries that are run during the "
"same session..",
"Source": "engine-default",
"DataType": "string",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "require_ssl",
"ParameterValue": "false",
"Description": "require ssl for all databaseconnections",
"Source": "user",
"DataType": "boolean",
"AllowedValues": "true,false",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "search_path",
"ParameterValue": "$user, public",
"Description": "Sets the schema search order for names that are not schema-qualified.",
"Source": "engine-default",
"DataType": "string",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "statement_timeout",
"ParameterValue": "0",
"Description": "Aborts any statement that takes over the specified number of milliseconds.",
"Source": "engine-default",
"DataType": "integer",
"AllowedValues": "0,100-2147483647",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "use_fips_ssl",
"ParameterValue": "false",
"Description": "Use fips ssl library",
"Source": "engine-default",
"DataType": "boolean",
"AllowedValues": "true,false",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "wlm_json_configuration",
"ParameterValue": '[{"auto_wlm":true}]',
"Description": "wlm json configuration",
"Source": "engine-default",
"DataType": "string",
"ApplyType": "static",
"IsModifiable": True,
},
],
"ResponseMetadata": {
"RequestId": "839a0ffc-1ea6-4700-94a2-76ead2a7cb5e",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "839a0ffc-1ea6-4700-94a2-76ead2a7cb5e",
"content-type": "text/xml",
"content-length": "5806",
"vary": "accept-encoding",
"date": "Tue, 15 Feb 2022 12:08:59 GMT",
},
"RetryAttempts": 0,
},
}
REDSHIFT_CLIENT_MOCK.describe_cluster_parameters = MagicMock(
return_value=parameters
)
response = RULE.lambda_handler(build_lambda_scheduled_event(), {})
expected_response = [
build_expected_response(
"NON_COMPLIANT",
"redshift-cluster-1",
annotation="The database cluster is not encrypted.",
)
]
assert_successful_evaluation(self, response, expected_response)
# Unit test for if Encrypted to true -- GHERKIN Scenario 3
def test_scenario_3(self):
clusters_is_present = [
{
"Clusters": [
{
"ClusterIdentifier": "redshift-cluster-1",
"NodeType": "ra3.4xlarge",
"ClusterStatus": "available",
"ClusterAvailabilityStatus": "Available",
"MasterUsername": "awsuser",
"DBName": "dev",
"Endpoint": {
"Address": "redshift-cluster-1.crmh4vec7kyo.us-east-2.redshift.amazonaws.com",
"Port": 5439,
},
"ClusterCreateTime": "datetime.datetime(2022, 1, 7, 7, 35, 2, 232000, tzinfo=tzlocal())",
"AutomatedSnapshotRetentionPeriod": 1,
"ManualSnapshotRetentionPeriod": -1,
"ClusterSecurityGroups": [],
"VpcSecurityGroups": [
{
"VpcSecurityGroupId": "sg-065ea7b9c71408f17",
"Status": "active",
}
],
"ClusterParameterGroups": [
{
"ParameterGroupName": "myparametergroup",
"ParameterApplyStatus": "in-sync",
"ClusterParameterStatusList": [
{
"ParameterName": "use_fips_ssl",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "query_group",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "datestyle",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "extra_float_digits",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "search_path",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "statement_timeout",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "wlm_json_configuration",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "require_ssl",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "enable_user_activity_logging",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "max_cursor_result_set_size",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "auto_analyze",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "max_concurrency_scaling_clusters",
"ParameterApplyStatus": "in-sync",
},
{
"ParameterName": "enable_case_sensitive_identifier",
"ParameterApplyStatus": "in-sync",
},
],
}
],
"ClusterSubnetGroupName": "default",
"VpcId": "vpc-0c1fbf2379152d7f4",
"AvailabilityZone": "us-east-2c",
"PreferredMaintenanceWindow": "wed:08:30-wed:09:00",
"PendingModifiedValues": {},
"ClusterVersion": "1.0",
"AllowVersionUpgrade": True,
"NumberOfNodes": 2,
"PubliclyAccessible": False,
"Encrypted": True,
"ClusterPublicKey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCanvmYtbNIGA5PiAY4rF"
"6ppg1wR3QY0f860EPUpRSaoc07UHOV4S2QLk21m5KEQm15rTE6dxVWrkXBzNabgdAsuiAo+Abur3D3y8xSZ"
"STMpD4e0Kn3UQ9nKw/2WWKWslNjKyzsBRSHv0jdVgg7KjtxoKAYNu/PbH4WCv2bcX+2nz8jxxDg2IOS/A6I3D"
"3pha9Q/FX0MMPYDKwKWw4TZ83PsZQvGWkW37TKaiGHFUpRfpuL/W8gHVD0ZJo8cK+WBxQsG5CujHlyifMQPBG"
"mKiFW8IeHS2evKzPAqIUlUTUA/t8t7EeCw5rnby8raUj7qWbeGqJ55d9CjcndHgaY5TZV "
"Amazon-Redshift\n",
"ClusterNodes": [
{
"NodeRole": "LEADER",
"PrivateIPAddress": "172.31.34.184",
"PublicIPAddress": "3.133.24.130",
},
{
"NodeRole": "COMPUTE-0",
"PrivateIPAddress": "172.31.35.209",
"PublicIPAddress": "18.220.99.27",
},
{
"NodeRole": "COMPUTE-1",
"PrivateIPAddress": "172.31.33.222",
"PublicIPAddress": "18.224.176.198",
},
],
"ClusterRevisionNumber": "35480",
"Tags": [],
"EnhancedVpcRouting": False,
"IamRoles": [
{
"IamRoleArn": "arn:aws:iam::529010877102:role/service-role/"
"AmazonRedshift-CommandsAccessRole-20211209T063611",
"ApplyStatus": "in-sync",
}
],
"MaintenanceTrackName": "current",
"ElasticResizeNumberOfNodeOptions": "[3,4,5,6,7,8]",
"DeferredMaintenanceWindows": [],
"NextMaintenanceWindowStartTime": "datetime.datetime(2022, 2, 16, 8, 30, tzinfo=tzlocal())",
"AvailabilityZoneRelocationStatus": "disabled",
"ClusterNamespaceArn": "arn:aws:redshift:us-east-2:529010877102:namespace:95b46b68-0afa-4ca9-"
"baf2-b6bf714431bc",
"TotalStorageCapacityInMegaBytes": 256000000,
"AquaConfiguration": {
"AquaStatus": "disabled",
"AquaConfigurationStatus": "auto",
},
}
]
}
]
REDSHIFT_CLIENT_MOCK.get_paginator.return_value = PAGINATOR_MOCK
PAGINATOR_MOCK.paginate.return_value = clusters_is_present
parameters = {
"Parameters": [
{
"ParameterName": "auto_analyze",
"ParameterValue": "true",
"Description": "Use auto analyze",
"Source": "engine-default",
"DataType": "boolean",
"AllowedValues": "true,false",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "datestyle",
"ParameterValue": "ISO, MDY",
"Description": "Sets the display format for date and time values.",
"Source": "engine-default",
"DataType": "string",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "enable_case_sensitive_identifier",
"ParameterValue": "true",
"Description": "Preserve case sensitivity for database identifiers such as table or column names in parser",
"Source": "user",
"DataType": "boolean",
"AllowedValues": "true,false",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "enable_user_activity_logging",
"ParameterValue": "false",
"Description": "parameter for audit logging purpose",
"Source": "user",
"DataType": "boolean",
"AllowedValues": "true,false",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "extra_float_digits",
"ParameterValue": "0",
"Description": "Sets the number of digits displayed for floating-point values",
"Source": "engine-default",
"DataType": "integer",
"AllowedValues": "-15-2",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "max_concurrency_scaling_clusters",
"ParameterValue": "1",
"Description": "The maximum concurrency scaling clusters can be used.",
"Source": "engine-default",
"DataType": "integer",
"AllowedValues": "0-10",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "max_cursor_result_set_size",
"ParameterValue": "default",
"Description": "Sets the max cursor result set size",
"Source": "engine-default",
"DataType": "integer",
"AllowedValues": "0-14400000",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "query_group",
"ParameterValue": "default",
"Description": "This parameter applies a user-defined label to a group of queries that are run during the "
"same session..",
"Source": "engine-default",
"DataType": "string",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "require_ssl",
"ParameterValue": "false",
"Description": "require ssl for all databaseconnections",
"Source": "user",
"DataType": "boolean",
"AllowedValues": "true,false",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "search_path",
"ParameterValue": "$user, public",
"Description": "Sets the schema search order for names that are not schema-qualified.",
"Source": "engine-default",
"DataType": "string",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "statement_timeout",
"ParameterValue": "0",
"Description": "Aborts any statement that takes over the specified number of milliseconds.",
"Source": "engine-default",
"DataType": "integer",
"AllowedValues": "0,100-2147483647",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "use_fips_ssl",
"ParameterValue": "false",
"Description": "Use fips ssl library",
"Source": "engine-default",
"DataType": "boolean",
"AllowedValues": "true,false",
"ApplyType": "static",
"IsModifiable": True,
},
{
"ParameterName": "wlm_json_configuration",
"ParameterValue": '[{"auto_wlm":true}]',
"Description": "wlm json configuration",
"Source": "engine-default",
"DataType": "string",
"ApplyType": "static",
"IsModifiable": True,
},
],
"ResponseMetadata": {
"RequestId": "839a0ffc-1ea6-4700-94a2-76ead2a7cb5e",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "839a0ffc-1ea6-4700-94a2-76ead2a7cb5e",
"content-type": "text/xml",
"content-length": "5806",
"vary": "accept-encoding",
"date": "Tue, 15 Feb 2022 12:08:59 GMT",
},
"RetryAttempts": 0,
},
}
REDSHIFT_CLIENT_MOCK.describe_cluster_parameters = MagicMock(
return_value=parameters
)
response = RULE.lambda_handler(build_lambda_scheduled_event(), {})
expected_response = [
build_expected_response(
"COMPLIANT",
"redshift-cluster-1",
)
]
assert_successful_evaluation(self, response, expected_response)
####################
# Helper Functions #
####################
def build_lambda_configurationchange_event(invoking_event, rule_parameters=None):
event_to_return = {
"configRuleName": "myrule",
"executionRoleArn": "roleArn",
"eventLeftScope": False,
"invokingEvent": invoking_event,
"accountId": "123456789012",
"configRuleArn": "arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan",
"resultToken": "token",
}
if rule_parameters:
event_to_return["ruleParameters"] = rule_parameters
return event_to_return
def build_lambda_scheduled_event(rule_parameters=None):
invoking_event = '{"messageType":"ScheduledNotification","notificationCreationTime":"2017-12-23T22:11:18.158Z"}'
event_to_return = {
"configRuleName": "myrule",
"executionRoleArn": "roleArn",
"eventLeftScope": False,
"invokingEvent": invoking_event,
"accountId": "123456789012",
"configRuleArn": "arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan",
"resultToken": "token",
}
if rule_parameters:
event_to_return["ruleParameters"] = rule_parameters
return event_to_return
def build_expected_response(
compliance_type,
compliance_resource_id,
compliance_resource_type=DEFAULT_RESOURCE_TYPE,
annotation=None,
):
if not annotation:
return {
"ComplianceType": compliance_type,
"ComplianceResourceId": compliance_resource_id,
"ComplianceResourceType": compliance_resource_type,
}
return {
"ComplianceType": compliance_type,
"ComplianceResourceId": compliance_resource_id,
"ComplianceResourceType": compliance_resource_type,
"Annotation": annotation,
}
def assert_successful_evaluation(
test_class, response, resp_expected, evaluations_count=1
):
if isinstance(response, dict):
test_class.assertEquals(
resp_expected["ComplianceResourceType"], response["ComplianceResourceType"]
)
test_class.assertEquals(
resp_expected["ComplianceResourceId"], response["ComplianceResourceId"]
)
test_class.assertEquals(
resp_expected["ComplianceType"], response["ComplianceType"]
)
test_class.assertTrue(response["OrderingTimestamp"])
if "Annotation" in resp_expected or "Annotation" in response:
test_class.assertEquals(resp_expected["Annotation"], response["Annotation"])
elif isinstance(response, list):
test_class.assertEquals(evaluations_count, len(response))
for i, response_expected in enumerate(resp_expected):
test_class.assertEquals(
response_expected["ComplianceResourceType"],
response[i]["ComplianceResourceType"],
)
test_class.assertEquals(
response_expected["ComplianceResourceId"],
response[i]["ComplianceResourceId"],
)
test_class.assertEquals(
response_expected["ComplianceType"], response[i]["ComplianceType"]
)
test_class.assertTrue(response[i]["OrderingTimestamp"])
if "Annotation" in response_expected or "Annotation" in response[i]:
test_class.assertEquals(
response_expected["Annotation"], response[i]["Annotation"]
)
def assert_customer_error_response(
test_class, response, customer_error_code=None, customer_error_message=None
):
if customer_error_code:
test_class.assertEqual(customer_error_code, response["customerErrorCode"])
if customer_error_message:
test_class.assertEqual(customer_error_message, response["customerErrorMessage"])
test_class.assertTrue(response["customerErrorCode"])
test_class.assertTrue(response["customerErrorMessage"])
if "internalErrorMessage" in response:
test_class.assertTrue(response["internalErrorMessage"])
if "internalErrorDetails" in response:
test_class.assertTrue(response["internalErrorDetails"])
def sts_mock():
assume_role_response = {
"Credentials": {
"AccessKeyId": "string",
"SecretAccessKey": "string",
"SessionToken": "string",
}
}
STS_CLIENT_MOCK.reset_mock(return_value=True)
STS_CLIENT_MOCK.assume_role = MagicMock(return_value=assume_role_response)
##################
# Common Testing #
##################
class TestStsErrors(unittest.TestCase):
def test_sts_unknown_error(self):
RULE.ASSUME_ROLE_MODE = True
STS_CLIENT_MOCK.assume_role = MagicMock(
side_effect=ClientError(
{"Error": {"Code": "unknown-code", "Message": "unknown-message"}},
"operation",
)
)
response = RULE.lambda_handler(build_lambda_configurationchange_event("{}"), {})
assert_customer_error_response(self, response, "InternalError", "InternalError")
def test_sts_access_denied(self):
RULE.ASSUME_ROLE_MODE = True
STS_CLIENT_MOCK.assume_role = MagicMock(
side_effect=ClientError(
{"Error": {"Code": "AccessDenied", "Message": "access-denied"}},
"operation",
)
)
response = RULE.lambda_handler(build_lambda_configurationchange_event("{}"), {})
assert_customer_error_response(
self,
response,
"AccessDenied",
"AWS Config does not have permission to assume the IAM role.",
)
if __name__ == "__main__":
unittest.main()
| 45.267303
| 128
| 0.442242
| 2,281
| 37,934
| 7.199036
| 0.207804
| 0.010962
| 0.044333
| 0.049083
| 0.811705
| 0.779307
| 0.774801
| 0.76932
| 0.76932
| 0.76932
| 0
| 0.040169
| 0.457268
| 37,934
| 837
| 129
| 45.321386
| 0.757432
| 0.021906
| 0
| 0.639429
| 0
| 0.007782
| 0.342689
| 0.088663
| 0
| 0
| 0
| 0
| 0.031128
| 1
| 0.015564
| false
| 0
| 0.009079
| 0
| 0.037613
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cad462b46d744a7bb97f4a59e074561b900bafaa
| 169
|
py
|
Python
|
app/app/calc.py
|
josekang/recipe-app-api
|
059e5b048d09943ccb11442d584d83a5f4e036df
|
[
"MIT"
] | null | null | null |
app/app/calc.py
|
josekang/recipe-app-api
|
059e5b048d09943ccb11442d584d83a5f4e036df
|
[
"MIT"
] | null | null | null |
app/app/calc.py
|
josekang/recipe-app-api
|
059e5b048d09943ccb11442d584d83a5f4e036df
|
[
"MIT"
] | null | null | null |
def add(x, y):
""" Add two numbers and return their sum"""
return x+y
def subtract(x, y):
""" Subtract to numbers and return their results"""
return x-y
| 24.142857
| 55
| 0.621302
| 28
| 169
| 3.75
| 0.464286
| 0.07619
| 0.304762
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.248521
| 169
| 7
| 56
| 24.142857
| 0.826772
| 0.47929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
1b6f15802051aabf0f363bfe4453b067eb32ef94
| 2,294
|
py
|
Python
|
epytope/Data/pssms/smmpmbec/mat/A_26_03_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/smmpmbec/mat/A_26_03_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/smmpmbec/mat/A_26_03_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
A_26_03_9 = {0: {'A': 0.049, 'C': -0.208, 'E': -0.569, 'D': -0.387, 'G': 0.127, 'F': -0.129, 'I': 0.311, 'H': -0.25, 'K': 0.62, 'M': 0.204, 'L': 0.206, 'N': -0.038, 'Q': 0.063, 'P': 0.176, 'S': -0.032, 'R': 0.67, 'T': -0.177, 'W': -0.351, 'V': 0.02, 'Y': -0.305}, 1: {'A': -0.372, 'C': 0.11, 'E': 0.235, 'D': 0.145, 'G': 0.022, 'F': 0.207, 'I': -0.5, 'H': 0.442, 'K': 0.22, 'M': 0.223, 'L': -0.13, 'N': 0.135, 'Q': 0.136, 'P': -0.076, 'S': -0.174, 'R': 0.556, 'T': -0.688, 'W': 0.115, 'V': -0.854, 'Y': 0.247}, 2: {'A': -0.071, 'C': -0.005, 'E': 0.183, 'D': 0.2, 'G': 0.18, 'F': 0.111, 'I': -0.484, 'H': 0.036, 'K': 0.088, 'M': 0.015, 'L': 0.088, 'N': -0.021, 'Q': 0.153, 'P': 0.019, 'S': 0.027, 'R': 0.069, 'T': -0.037, 'W': -0.137, 'V': -0.281, 'Y': -0.132}, 3: {'A': -0.189, 'C': -0.129, 'E': -0.233, 'D': -0.091, 'G': -0.141, 'F': 0.189, 'I': 0.405, 'H': 0.025, 'K': 0.202, 'M': 0.083, 'L': 0.19, 'N': -0.053, 'Q': -0.179, 'P': 0.01, 'S': -0.142, 'R': -0.008, 'T': -0.082, 'W': 0.021, 'V': 0.149, 'Y': -0.028}, 4: {'A': -0.037, 'C': -0.047, 'E': 0.011, 'D': -0.168, 'G': -0.015, 'F': -0.108, 'I': 0.229, 'H': -0.119, 'K': 0.005, 'M': 0.16, 'L': 0.152, 'N': 0.083, 'Q': 0.118, 'P': -0.077, 'S': 0.124, 'R': -0.115, 'T': 0.004, 'W': -0.074, 'V': 0.169, 'Y': -0.296}, 5: {'A': 0.052, 'C': 0.035, 'E': 0.125, 'D': -0.028, 'G': -0.097, 'F': -0.337, 'I': -0.016, 'H': 0.104, 'K': 0.234, 'M': -0.06, 'L': -0.077, 'N': -0.015, 'Q': 0.178, 'P': 0.184, 'S': 0.116, 'R': 0.163, 'T': -0.06, 'W': -0.266, 'V': -0.026, 'Y': -0.209}, 6: {'A': -0.232, 'C': 0.01, 'E': 0.078, 'D': 0.113, 'G': -0.313, 'F': -0.064, 'I': -0.08, 'H': 0.141, 'K': 0.096, 'M': 0.021, 'L': -0.005, 'N': 0.01, 'Q': 0.045, 'P': -0.015, 'S': -0.155, 'R': -0.018, 'T': 0.017, 'W': 0.19, 'V': 0.097, 'Y': 0.064}, 7: {'A': -0.238, 'C': -0.005, 'E': 0.152, 'D': 0.201, 'G': -0.377, 'F': -0.115, 'I': 0.171, 'H': 0.008, 'K': 0.084, 'M': 0.064, 'L': 0.015, 'N': -0.251, 'Q': 0.039, 'P': -0.321, 'S': -0.091, 'R': 0.3, 'T': 0.027, 'W': 0.097, 'V': 0.082, 'Y': 0.157}, 8: {'A': 0.25, 'C': 0.076, 'E': 0.233, 'D': 0.172, 'G': 0.166, 'F': -0.194, 'I': 0.014, 'H': -0.178, 'K': 0.203, 'M': -0.759, 'L': -0.11, 'N': -0.044, 'Q': 0.075, 'P': 0.459, 'S': 0.107, 'R': -0.419, 'T': 0.289, 'W': -0.154, 'V': 0.201, 'Y': -0.389}, -1: {'con': 4.43222}}
| 2,294
| 2,294
| 0.393636
| 557
| 2,294
| 1.615799
| 0.310592
| 0.02
| 0.011111
| 0.013333
| 0.031111
| 0
| 0
| 0
| 0
| 0
| 0
| 0.372723
| 0.162598
| 2,294
| 1
| 2,294
| 2,294
| 0.095783
| 0
| 0
| 0
| 0
| 0
| 0.079739
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1b9f6f5d303b9ad2ece7d73d6e474d7eef30db56
| 23,567
|
py
|
Python
|
tests/test_atokaconn.py
|
openpolis/atokaconn
|
071aebfc408e4f661a9cd98ea91daf9e01b87bad
|
[
"MIT"
] | 1
|
2021-08-13T10:36:34.000Z
|
2021-08-13T10:36:34.000Z
|
tests/test_atokaconn.py
|
openpolis/atokaconn
|
071aebfc408e4f661a9cd98ea91daf9e01b87bad
|
[
"MIT"
] | null | null | null |
tests/test_atokaconn.py
|
openpolis/atokaconn
|
071aebfc408e4f661a9cd98ea91daf9e01b87bad
|
[
"MIT"
] | null | null | null |
import logging
from requests.exceptions import Timeout
from atokaconn import __version__, AtokaObjectDoesNotExist, AtokaResponseError, AtokaMultipleObjectsReturned, \
AtokaException, AtokaTimeoutException
from atokaconn import AtokaConn
from faker import Factory
from unittest import TestCase
from unittest.mock import patch
from tests.factories import AreaFactory, PersonFactory
from tests.mocked_responses import get_void_response, get_person_ok, get_person_multiple, \
get_companies_economics, get_companies
def test_version():
assert __version__ == '0.1.6'
faker = Factory.create("it_IT") # a factory to create fake data for tests
logger = logging.getLogger(__name__)
class MockResponse:
"""class that mocks requests' response (json method)
"""
def __init__(self, json_data, status_code, ok, reason=None):
self.json_data = json_data
self.status_code = status_code
self.ok = ok
self.reason = reason
def json(self):
return self.json_data
class MockBrokenJsonResponse:
"""class that mocks requests' response with a broken json
"""
def __init__(self, json_data, status_code, ok, reason=None):
self.json_data = json_data
self.status_code = status_code
self.ok = ok
self.reason = reason
def json(self):
raise Exception("Json is broken")
class ConnectionsTestCase(TestCase):
@classmethod
def setUpClass(cls):
super(ConnectionsTestCase, cls).setUpClass()
setattr(cls, 'mock_get_patcher', patch('requests.Session.get'))
setattr(cls, 'mock_post_patcher', patch('requests.Session.post'))
cls.mock_get = getattr(cls, 'mock_get_patcher').start()
cls.mock_post = getattr(cls, 'mock_post_patcher').start()
@classmethod
def tearDownClass(cls):
getattr(cls, 'mock_get_patcher').stop()
getattr(cls, 'mock_post_patcher').stop()
super(ConnectionsTestCase, cls).tearDownClass()
class ATOKAConnTest(ConnectionsTestCase):
def test_no_key_failure(self):
with self.assertRaises(AtokaException):
_ = AtokaConn()
def test_get_person_from_tax_id_ok(self):
"""Test get_person_from_tax_id returns the correct result
"""
tax_id = faker.ssn()
# mock atoka request using tax_id
self.mock_get.return_value = MockResponse(
get_person_ok(tax_id=tax_id),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
atoka_p = atoka_conn.get_person_from_tax_id(tax_id)
self.assertEqual(atoka_p['base']['taxId'], tax_id)
def test_get_person_from_tax_id_broken_json_failure(self):
"""Test get_person_from_tax_id fails when json is broken
"""
tax_id = faker.ssn()
# mock atoka request using tax_id
self.mock_get.return_value = MockBrokenJsonResponse(
get_person_ok(tax_id=tax_id),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
with self.assertRaises(Exception):
_ = atoka_conn.get_person_from_tax_id(tax_id)
def test_get_person_from_tax_id_timeout_failure(self):
"""Test get_person_from_tax_id fails when connection fails
"""
tax_id = faker.ssn()
# mock atoka request using tax_id
self.mock_get.return_value = MockResponse(
get_person_ok(tax_id=tax_id),
status_code=200,
ok=True
)
self.mock_get.side_effect = Timeout()
# do the test
atoka_conn = AtokaConn(key='testing')
with self.assertRaises(AtokaTimeoutException):
_ = atoka_conn.get_person_from_tax_id(tax_id)
self.mock_get.side_effect = None
def test_search_person_ok(self):
"""Test get_person_from_tax_id returns the correct result
"""
parent_area = AreaFactory(name='Lazio')
area = AreaFactory(name='Roma', parent=parent_area)
person = PersonFactory.create(
family_name=faker.last_name_male(),
given_name=faker.first_name_male(),
birth_date=faker.date(pattern="%Y-%m-%d", end_datetime="-47y"),
birth_location_area=area
)
tax_id = faker.ssn()
# mock atoka request using tax_id
self.mock_get.return_value = MockResponse(
get_person_ok(tax_id=tax_id, search_params={
"family_name": person.family_name,
"given_name": person.given_name,
"birth_date": person.birth_date,
}),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
atoka_p = atoka_conn.search_person(person)
self.assertEqual(atoka_p['name'], person.name)
self.assertEqual(atoka_p['base']['taxId'], tax_id)
def test_search_person_timeout_failure(self):
"""Test timeout during search_person invocations
"""
parent_area = AreaFactory(name='Lazio')
area = AreaFactory(name='Roma', parent=parent_area)
person = PersonFactory.create(
family_name=faker.last_name_male(),
given_name=faker.first_name_male(),
birth_date=faker.date(pattern="%Y-%m-%d", end_datetime="-47y"),
birth_location_area=area
)
tax_id = faker.ssn()
# mock atoka request using tax_id
self.mock_get.return_value = MockResponse(
get_person_ok(tax_id=tax_id, search_params={
"family_name": person.family_name,
"given_name": person.given_name,
"birth_date": person.birth_date,
}),
status_code=200,
ok=True
)
self.mock_get.side_effect = Timeout()
# do the test
atoka_conn = AtokaConn(key='testing')
with self.assertRaises(AtokaTimeoutException):
_ = atoka_conn.search_person(person)
self.mock_get.side_effect = None
def test_get_person_from_tax_id_fails_doesnotexist(self):
"""Test get_person_from_tax_id returns void result
"""
tax_id = faker.ssn()
# mock atoka request using tax_id
self.mock_get.return_value = MockResponse(
get_void_response(),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
with self.assertRaises(AtokaObjectDoesNotExist):
atoka_conn.get_person_from_tax_id(tax_id)
def test_get_person_from_tax_id_fails_notok(self):
"""Test get_person_from_tax_id returns not ok
"""
tax_id = faker.ssn()
# mock atoka request using tax_id
self.mock_get.return_value = MockResponse(
get_void_response(),
status_code=404,
ok=False,
reason="Requested URI was not found here",
)
# do the test
atoka_conn = AtokaConn(key='testing')
with self.assertRaises(AtokaResponseError):
atoka_conn.get_person_from_tax_id(tax_id)
def test_get_person_from_tax_id_fails_multiple(self):
"""Test get_person_from_tax_id returns void result
"""
tax_id = faker.ssn()
# mock atoka request using tax_id
self.mock_get.return_value = MockResponse(
get_person_multiple(),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
with self.assertRaises(AtokaMultipleObjectsReturned):
atoka_conn.get_person_from_tax_id(tax_id)
def test_search_person_fails_doesnotexist(self):
"""Test search_person returns a not found result
"""
parent_area = AreaFactory(name='Lazio')
area = AreaFactory(name='Roma', parent=parent_area)
person = PersonFactory.create(
family_name=faker.last_name_male(),
given_name=faker.first_name_male(),
birth_date=faker.date(pattern="%Y-%m-%d", end_datetime="-47y"),
birth_location_area=area
)
person.tax_id = faker.ssn()
# mock atoka request using tax_id
self.mock_get.return_value = MockResponse(
get_void_response(),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
with self.assertRaises(AtokaObjectDoesNotExist):
atoka_conn.search_person(person)
def test_search_person_fails_notok(self):
"""Test search_person returns not ok
"""
parent_area = AreaFactory(name='Lazio')
area = AreaFactory(name='Roma', parent=parent_area)
person = PersonFactory.create(
family_name=faker.last_name_male(),
given_name=faker.first_name_male(),
birth_date=faker.date(pattern="%Y-%m-%d", end_datetime="-47y"),
birth_location_area=area
)
person.tax_id = faker.ssn()
# mock atoka request using tax_id
self.mock_get.return_value = MockResponse(
get_void_response(),
status_code=404,
ok=False,
reason="Requested URI was not found here",
)
# do the test
atoka_conn = AtokaConn(key='testing')
with self.assertRaises(AtokaResponseError):
atoka_conn.search_person(person)
def test_search_person_fails_multiple(self):
"""Test search_person returns multiple results
"""
parent_area = AreaFactory(name='Lazio')
area = AreaFactory(name='Roma', parent=parent_area)
person = PersonFactory.create(
family_name=faker.last_name_male(),
given_name=faker.first_name_male(),
birth_date=faker.date(pattern="%Y-%m-%d", end_datetime="-47y"),
birth_location_area=area
)
person.tax_id = faker.ssn()
# mock atoka request using tax_id
self.mock_get.return_value = MockResponse(
get_person_multiple(),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
with self.assertRaises(AtokaMultipleObjectsReturned):
atoka_conn.search_person(person)
def test_get_companies_from_tax_id_ok(self):
"""Test getcompany_from_tax_id returns one result
"""
tax_id = "80002270660"
# mock atoka request using tax_id
# mock atoka request using tax_id
self.mock_post.return_value = MockResponse(
get_companies(tax_id),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing', logger=logger)
atoka_p = atoka_conn.get_companies_from_tax_ids(tax_id.split(","), packages='base,shares', active="true")
self.assertEqual(len(atoka_p), 1)
self.assertEqual(atoka_p[0]['base']['taxId'], tax_id)
self.assertEqual('shares' in atoka_p[0], True)
def test_get_companies_from_tax_id_ok_extend_response(self):
"""Test get_companies_from_tax_id returns more than 50 results
"""
tax_id = "01234567890"
# mock atoka request using tax_id
# mock atoka request using tax_id
self.mock_post.return_value = MockResponse(
get_companies(tax_id),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing', logger=logger)
atoka_p = atoka_conn.get_companies_from_tax_ids(tax_id.split(","), packages='base,shares', active="true")
self.assertGreaterEqual(len(atoka_p), 50)
def test_get_companies_from_tax_id_multiple_results(self):
"""Test getcompany_from_tax_id returns more than one result
"""
tax_id = "02438750586"
# mock atoka request using tax_id
self.mock_post.return_value = MockResponse(
get_companies(tax_id),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
atoka_p = atoka_conn.get_companies_from_tax_ids(tax_id.split(","), packages='base,shares', active="true")
self.assertEqual(len(atoka_p), 2)
self.assertEqual(atoka_p[0]['base']['taxId'], tax_id)
self.assertEqual(atoka_p[1]['base']['taxId'], tax_id)
self.assertEqual('shares' in atoka_p[0], True)
self.assertEqual('shares' in atoka_p[1], False)
def test_get_companies_from_tax_id_returns_empty_if_missing(self):
"""Test get_person_from_tax_id returns void result
"""
tax_id = faker.ssn()
# mock atoka request using tax_id
self.mock_post.return_value = MockResponse(
get_void_response(),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
items = atoka_conn.get_companies_from_tax_ids(tax_id, packages='base,shares', active="true")
self.assertEqual(items, [])
def test_get_companies_from_tax_id_empty_if_post_response_notok(self):
"""Test get_companies_from_tax_id returns empty list when response is not ok
"""
tax_id = faker.ssn()
# mock atoka request using tax_id
self.mock_post.return_value = MockResponse(
get_void_response(),
status_code=404,
ok=False,
reason="Requested URI was not found here",
)
# do the test
atoka_conn = AtokaConn(key='testing')
items = atoka_conn.get_companies_from_tax_ids(tax_id, packages='base,shares', active="true")
self.assertEqual(items, [])
def test_get_companies_from_tax_id_empty_if_post_request_timeouts(self):
"""Test get_companies_from_tax_id returns empty list when post reuest timeouts
"""
tax_id = faker.ssn()
# mock atoka request using tax_id
self.mock_post.return_value = MockResponse(
get_void_response(),
status_code=404,
ok=False,
reason="Requested URI was not found here",
)
# do the test
self.mock_post.side_effect = Timeout()
atoka_conn = AtokaConn(key='testing')
items = atoka_conn.get_companies_from_tax_ids(tax_id, packages='base,shares', active="true")
self.assertEqual(items, [])
self.mock_post.side_effect = None
def test_get_companies_from_tax_id_empty_if_post_request_response_void(self):
"""Test get_companies_from_tax_id returns empty list when post reuest returns a void response
"""
tax_id = faker.ssn()
# mock atoka request using tax_id
self.mock_post.return_value = None
# do the test
atoka_conn = AtokaConn(key='testing')
items = atoka_conn.get_companies_from_tax_ids(tax_id, packages='base,shares', active="true")
self.assertEqual(items, [])
def test_get_items_from_ids_fails_wrong_ids_field_name(self):
"""Test get_items_from_ids fails when an unknown ids_field_name is passed
"""
tax_id = "02438750586"
# mock atoka request using tax_id
self.mock_post.return_value = MockResponse(
get_companies(tax_id),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
with self.assertRaises(AtokaException):
_ = atoka_conn.get_items_from_ids(
tax_id.split(","),
item_type='companies',
ids_field_name='cfs',
batch_size=50,
packages='base,shares', active="true"
)
def test_get_items_from_ids_fails_wrong_item_type(self):
"""Test get_items_from_ids fails when an unknown iem_type is passed
"""
tax_id = "02438750586"
# mock atoka request using tax_id
self.mock_post.return_value = MockResponse(
get_companies(tax_id),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
with self.assertRaises(AtokaException):
_ = atoka_conn.get_items_from_ids(
tax_id.split(","),
item_type='smurfs',
ids_field_name='taxIds',
batch_size=50,
packages='base,shares', active="true"
)
def test_get_items_from_ids_fails_wrong_batch_size(self):
"""Test get_items_from_ids fails when an batch_size out of range is passed
"""
tax_id = "02438750586"
# mock atoka request using tax_id
self.mock_post.return_value = MockResponse(
get_companies(tax_id),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
with self.assertRaises(AtokaException):
_ = atoka_conn.get_items_from_ids(
tax_id.split(","),
item_type='companies',
ids_field_name='taxIds',
batch_size=100,
packages='base,shares', active="true"
)
def test_get_items_from_ids_returns_empty_list_if_empty_ids(self):
"""Test get_items_from_ids returns and empy items list when an empty ids list is passed
"""
# mock atoka request using tax_id
self.mock_post.return_value = MockResponse(
get_void_response(),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
ids = atoka_conn.get_items_from_ids(
[],
item_type='companies',
ids_field_name='taxIds',
batch_size=50,
packages='base,shares', active="true"
)
self.assertEqual(len(ids), 0)
def test_get_items_from_ids_ok_with_chunks_and_logger(self):
"""Test get_items_from_ids is ok when requests are grouped in chunks (batch_size=1)
"""
tax_id = "02438750586"
# mock atoka request using tax_id
self.mock_post.return_value = MockResponse(
get_companies(tax_id),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing', max_batch_file_lines=1, logger=logger)
items = atoka_conn.get_items_from_ids(
["02438750586", "01234567890"],
item_type='companies',
ids_field_name='taxIds',
batch_size=1,
packages='base,shares', active="true"
)
self.assertEqual(len(items), 2)
self.assertEqual(items[0]['base']['taxId'], tax_id)
def test_get_companies_economics_ok(self):
"""Test get_companies_from_tax_ids with economics details has the correct information
"""
tax_ids = ['02241890223', '09988761004']
# mock atoka request using tax_id
# mock atoka request using tax_id
self.mock_post.return_value = MockResponse(
get_companies_economics(),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
atoka_resp = atoka_conn.get_companies_from_tax_ids(tax_ids, packages='base,economics', active="true")
self.assertEqual(len(atoka_resp), 2)
c = atoka_resp[0]
self.assertEqual(c['base']['taxId'], tax_ids[0])
self.assertEqual('economics' in c, True)
ce = c['economics']
self.assertEqual('balanceSheets' in ce, True)
self.assertEqual(len(ce['balanceSheets']) > 1, True)
self.assertEqual('employees' in ce, True)
self.assertEqual(len(ce['employees']) > 1, True)
def test_get_companies_from_atoka_ids_ok(self):
"""Test test get_companies_from_atoka_ids returns one result
The test only needs to test the correct wrapping of get_items_from_ids,
so it mocks the usual response, not a correct one
"""
tax_id = "80002270660"
# mock atoka request using tax_id
# mock atoka request using tax_id
self.mock_post.return_value = MockResponse(
get_companies(tax_id),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
atoka_p = atoka_conn.get_companies_from_atoka_ids(tax_id.split(","), packages='base,shares', active="true")
self.assertEqual(len(atoka_p), 1)
def test_get_people_from_tax_ids_ok(self):
"""Test test get_people_from_tax_ids returns one result
The test only needs to test the correct wrapping of get_items_from_ids,
so it mocks the usual response, not a correct one
"""
tax_id = "80002270660"
# mock atoka request using tax_id
# mock atoka request using tax_id
self.mock_post.return_value = MockResponse(
get_companies(tax_id),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
atoka_p = atoka_conn.get_people_from_tax_ids(tax_id.split(","), packages='base,shares', active="true")
self.assertEqual(len(atoka_p), 1)
def test_get_people_from_atoka_ids_ok(self):
"""Test test get_people_from_atoka_ids returns one result
The test only needs to test the correct wrapping of get_items_from_ids,
so it mocks the usual response, not a correct one
"""
tax_id = "80002270660"
# mock atoka request using tax_id
# mock atoka request using tax_id
self.mock_post.return_value = MockResponse(
get_companies(tax_id),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
atoka_p = atoka_conn.get_people_from_atoka_ids(tax_id.split(","), packages='base,shares', active="true")
self.assertEqual(len(atoka_p), 1)
def test_get_roles_from_atoka_ids_ok(self):
"""Test test get_roles_from_atoka_ids returns one result
The test only needs to test the correct wrapping of get_items_from_ids,
so it mocks the usual response, not a correct one
"""
tax_id = "80002270660"
# mock atoka request using tax_id
# mock atoka request using tax_id
self.mock_post.return_value = MockResponse(
get_companies(tax_id),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
atoka_p = atoka_conn.get_roles_from_atoka_ids(tax_id.split(","), packages='base,shares', active="true")
self.assertEqual(len(atoka_p), 1)
def test_get_roles_from_atoka_ids_handles_doesnotexist(self):
"""Test get_roles_from_atoka_ids handles the AtokaObjectDoesNotExist exception and returns empty list
"""
# mock atoka request using tax_id
self.mock_post.return_value = MockResponse(
get_void_response(),
status_code=200,
ok=True
)
# do the test
atoka_conn = AtokaConn(key='testing')
items = atoka_conn.get_roles_from_atoka_ids([faker.ssn(), faker.ssn()])
self.assertEqual(len(items), 0)
| 33.909353
| 115
| 0.622608
| 2,908
| 23,567
| 4.730055
| 0.073246
| 0.051618
| 0.041876
| 0.054962
| 0.838386
| 0.817884
| 0.791567
| 0.771065
| 0.736459
| 0.727154
| 0
| 0.01777
| 0.286036
| 23,567
| 694
| 116
| 33.958213
| 0.799715
| 0.175245
| 0
| 0.669725
| 0
| 0
| 0.068581
| 0.001099
| 0
| 0
| 0
| 0
| 0.103211
| 1
| 0.084862
| false
| 0
| 0.020642
| 0.002294
| 0.116972
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1baf2fcaf7159ab076b1c4d2870222d7a2a80ece
| 245
|
py
|
Python
|
test/runtime/frontend_test/tensorflow_test/util.py
|
gunpowder78/webdnn
|
c659ea49007f91d178ce422a1eebe289516a71ee
|
[
"MIT"
] | 1
|
2018-07-26T13:52:21.000Z
|
2018-07-26T13:52:21.000Z
|
test/runtime/frontend_test/tensorflow_test/util.py
|
gunpowder78/webdnn
|
c659ea49007f91d178ce422a1eebe289516a71ee
|
[
"MIT"
] | null | null | null |
test/runtime/frontend_test/tensorflow_test/util.py
|
gunpowder78/webdnn
|
c659ea49007f91d178ce422a1eebe289516a71ee
|
[
"MIT"
] | null | null | null |
import logging
logging.getLogger("tensorflow").setLevel(logging.WARNING)
# noinspection PyUnresolvedReferences
import tensorflow as tf
# noinspection PyUnresolvedReferences
from webdnn.frontend.tensorflow.converter import TensorFlowConverter
| 24.5
| 68
| 0.861224
| 23
| 245
| 9.173913
| 0.652174
| 0.322275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 245
| 9
| 69
| 27.222222
| 0.937778
| 0.289796
| 0
| 0
| 0
| 0
| 0.05848
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1bc0d0c400f5e2c75cba06c90e875d42e90539d7
| 163
|
py
|
Python
|
apps/responsible_disc/admin.py
|
blockomat2100/vulnman
|
835ff3aae1168d8e2fa5556279bc86efd2e46472
|
[
"MIT"
] | null | null | null |
apps/responsible_disc/admin.py
|
blockomat2100/vulnman
|
835ff3aae1168d8e2fa5556279bc86efd2e46472
|
[
"MIT"
] | 23
|
2021-12-01T10:00:38.000Z
|
2021-12-11T11:43:13.000Z
|
apps/responsible_disc/admin.py
|
blockomat2100/vulnman
|
835ff3aae1168d8e2fa5556279bc86efd2e46472
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from apps.responsible_disc import models
admin.site.register(models.Vulnerability)
admin.site.register(models.VulnerabilityLog)
| 23.285714
| 44
| 0.852761
| 21
| 163
| 6.571429
| 0.619048
| 0.130435
| 0.246377
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07362
| 163
| 6
| 45
| 27.166667
| 0.913907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
1bc4003a82673af42ffaa542b30d9f8cf9ed719d
| 196
|
py
|
Python
|
braintree/exceptions/too_many_requests_error.py
|
futureironman/braintree_python
|
26bb8a857bc29322a8bca2e8e0fe6d99cfe6a1ac
|
[
"MIT"
] | 182
|
2015-01-09T05:26:46.000Z
|
2022-03-16T14:10:06.000Z
|
braintree/exceptions/too_many_requests_error.py
|
futureironman/braintree_python
|
26bb8a857bc29322a8bca2e8e0fe6d99cfe6a1ac
|
[
"MIT"
] | 95
|
2015-02-24T23:29:56.000Z
|
2022-03-13T03:27:58.000Z
|
braintree/exceptions/too_many_requests_error.py
|
futureironman/braintree_python
|
26bb8a857bc29322a8bca2e8e0fe6d99cfe6a1ac
|
[
"MIT"
] | 93
|
2015-02-19T17:59:06.000Z
|
2022-03-19T17:01:25.000Z
|
from braintree.exceptions.braintree_error import BraintreeError
class TooManyRequestsError(BraintreeError):
"""
Raised when the rate limit request threshold is exceeded.
"""
pass
| 24.5
| 63
| 0.760204
| 20
| 196
| 7.4
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 196
| 7
| 64
| 28
| 0.919255
| 0.290816
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
1bce3cb96247b723262e79c696c42ce97633e06f
| 37
|
py
|
Python
|
specter/modules/SpecterTools/packagermodule.py
|
iplo/Specter
|
e1cfe48c9656efdddb3abbd2ea7d0bfa38d1380e
|
[
"MIT"
] | 2
|
2017-04-09T19:40:03.000Z
|
2017-04-21T16:49:57.000Z
|
specter/modules/SpecterTools/packagermodule.py
|
iplo/Specter
|
e1cfe48c9656efdddb3abbd2ea7d0bfa38d1380e
|
[
"MIT"
] | 1
|
2017-04-09T19:45:14.000Z
|
2017-04-22T12:37:37.000Z
|
specter/modules/SpecterTools/packagermodule.py
|
iplo/Specter
|
e1cfe48c9656efdddb3abbd2ea7d0bfa38d1380e
|
[
"MIT"
] | null | null | null |
def printname():
print("Packager")
| 12.333333
| 18
| 0.675676
| 4
| 37
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 37
| 3
| 19
| 12.333333
| 0.78125
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
59ef67df6b2617a20373d356ccca8f0058036eac
| 189
|
py
|
Python
|
python/test_rel_import/dir3/module3b.py
|
galdebert/sandbox
|
1489ed6dfe0b7e44fbc4dc71942bf1d5377a9de9
|
[
"Apache-2.0"
] | 1
|
2015-09-15T21:41:57.000Z
|
2015-09-15T21:41:57.000Z
|
python/test_rel_import/dir3/module3b.py
|
autodefrost/sandbox
|
1489ed6dfe0b7e44fbc4dc71942bf1d5377a9de9
|
[
"Apache-2.0"
] | null | null | null |
python/test_rel_import/dir3/module3b.py
|
autodefrost/sandbox
|
1489ed6dfe0b7e44fbc4dc71942bf1d5377a9de9
|
[
"Apache-2.0"
] | null | null | null |
#from . import module3a # ok but pylint makes an error: Attempted relative import beyond top-level package (relative-beyond-top-level)
def func3b():
print('3b')
#module3a.func3a()
| 31.5
| 134
| 0.724868
| 26
| 189
| 5.269231
| 0.769231
| 0.131387
| 0.20438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031646
| 0.164021
| 189
| 5
| 135
| 37.8
| 0.835443
| 0.78836
| 0
| 0
| 0
| 0
| 0.054054
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
94036ce97154cb2a51f593a1ed2d221389e7c264
| 282
|
py
|
Python
|
cupy/cuda/memory_hooks/__init__.py
|
svlandeg/cupy
|
484e007d5bf58a0445af2f6e7aa3fdfe0fcc2363
|
[
"MIT"
] | 6,180
|
2016-11-01T14:22:30.000Z
|
2022-03-31T08:39:20.000Z
|
cupy/cuda/memory_hooks/__init__.py
|
svlandeg/cupy
|
484e007d5bf58a0445af2f6e7aa3fdfe0fcc2363
|
[
"MIT"
] | 6,281
|
2016-12-22T07:42:31.000Z
|
2022-03-31T19:57:02.000Z
|
cupy/cuda/memory_hooks/__init__.py
|
svlandeg/cupy
|
484e007d5bf58a0445af2f6e7aa3fdfe0fcc2363
|
[
"MIT"
] | 829
|
2017-02-23T05:46:12.000Z
|
2022-03-27T17:40:03.000Z
|
from cupy.cuda.memory_hooks import debug_print # NOQA
from cupy.cuda.memory_hooks import line_profile # NOQA
# import class and function
from cupy.cuda.memory_hooks.debug_print import DebugPrintHook # NOQA
from cupy.cuda.memory_hooks.line_profile import LineProfileHook # NOQA
| 40.285714
| 71
| 0.826241
| 42
| 282
| 5.357143
| 0.380952
| 0.142222
| 0.213333
| 0.32
| 0.497778
| 0.395556
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120567
| 282
| 6
| 72
| 47
| 0.907258
| 0.159574
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
9405276746939238041e75e50590bbaf5faba208
| 20
|
py
|
Python
|
python/testData/editing/tripleQuotesInsideTripleQuotedStringLiteral.after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/editing/tripleQuotesInsideTripleQuotedStringLiteral.after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/editing/tripleQuotesInsideTripleQuotedStringLiteral.after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
s = '''
'\\''''
'''
| 5
| 7
| 0.05
| 1
| 20
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 20
| 3
| 8
| 6.666667
| 0.066667
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
941518d2b2d421b4b85957705d0030fc8262d4b3
| 20,870
|
py
|
Python
|
src/geocurrency/rates/tests.py
|
comradekingu/geocurrency
|
00131739555438b6926caea7c5b237bb23b9848d
|
[
"MIT"
] | null | null | null |
src/geocurrency/rates/tests.py
|
comradekingu/geocurrency
|
00131739555438b6926caea7c5b237bb23b9848d
|
[
"MIT"
] | null | null | null |
src/geocurrency/rates/tests.py
|
comradekingu/geocurrency
|
00131739555438b6926caea7c5b237bb23b9848d
|
[
"MIT"
] | null | null | null |
import datetime
import uuid
from datetime import date
from django.contrib.auth.models import User
from django.core.cache import cache
from django.conf import settings
from django.test import TestCase
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
from .models import Rate, RateConverter
from .serializers import RateAmountSerializer
class RateTest(TestCase):
base_currency = 'EUR'
currency = 'USD'
def setUp(self) -> None:
settings.RATE_SERVICE = 'forex'
self.user, created = User.objects.get_or_create(
username='test',
email='test@ipd.com'
)
self.user.set_password('test')
self.user.save()
Token.objects.create(user=self.user)
self.key = uuid.uuid4()
self.amounts = [
{
'currency': 'USD',
'amount': 100,
'date_obj': '2020-07-22'
},
{
'currency': 'AUD',
'amount': 50,
'date_obj': '2020-07-23'
},
]
self.trash_amounts = [
{
'currency': 'USD',
'amount': 'toto',
'date_obj': '01/01/2020'
},
{
'currency': 'LOL',
'date_obj': '2020-07-23'
},
{
'date_obj': '2020-07-23'
},
{
'currency': 'JPY',
},
]
def test_fetch_rates(self):
rates = Rate.objects.fetch_rates(base_currency=self.base_currency)
self.assertIsNotNone(rates)
def test_fetch_rates_with_date(self):
rates = Rate.objects.fetch_rates(base_currency=self.base_currency, date_obj=date(year=2020, month=6, day=1))
self.assertIsNotNone(rates)
def test_fetch_rate(self):
rate = Rate.objects.fetch_rates(base_currency=self.base_currency, currency=self.currency)
self.assertIsNotNone(rate)
def test_fetch_rate_with_date(self):
rate = Rate.objects.fetch_rates(
base_currency=self.base_currency, currency=self.currency,
date_obj=date(year=2020, month=6, day=1)
)
self.assertIsNotNone(rate)
def test_find_direct_rate(self):
Rate.objects.fetch_rates(base_currency=self.base_currency, currency=self.currency)
rate = Rate.objects.find_direct_rate(base_currency=self.base_currency, currency=self.currency)
self.assertIsNotNone(rate, msg="no direct rate found")
def test_find_pivot_rate(self):
Rate.objects.fetch_rates(base_currency=self.base_currency, currency=self.currency)
Rate.objects.fetch_rates(base_currency=self.currency, currency='AUD')
rate = Rate.objects.find_pivot_rate(base_currency=self.base_currency, currency='AUD')
self.assertIsNotNone(rate, msg="no pivot rate found")
def test_rate_at_date(self):
Rate.objects.fetch_rates(base_currency=self.base_currency, currency=self.currency)
Rate.objects.fetch_rates(base_currency=self.currency, currency='AUD')
rate = Rate.objects.find_direct_rate(base_currency=self.base_currency, currency=self.currency)
self.assertIsNotNone(rate.pk, msg="no direct rate found")
rate = Rate.objects.find_pivot_rate(base_currency=self.base_currency, currency='AUD')
self.assertIsNotNone(rate.pk, msg="no pivot rate found")
def test_post_rate(self):
client = APIClient()
token = Token.objects.get(user__username=self.user.username)
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = client.post(
'/rates/',
data={
'key': self.key,
'currency': 'USD',
'base_currency': 'EUR',
'value_date': '2020-01-01',
'value': 1.10,
}
)
self.assertEqual(response.status_code, 201)
response = client.get(
'/rates/',
format='json')
self.assertEqual(len(response.json()), 2)
def test_post_rate_without_key(self):
client = APIClient()
token = Token.objects.get(user__username=self.user.username)
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = client.post(
'/rates/',
data={
'key': '',
'currency': 'USD',
'base_currency': 'EUR',
'value_date': '2020-01-01',
'value': 1.10,
}
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_list_request(self):
Rate.objects.fetch_rates(base_currency=self.base_currency, currency=self.currency)
client = APIClient()
response = client.get(
'/rates/',
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_stats_request(self):
Rate.objects.fetch_rates(base_currency=self.base_currency, currency=self.currency)
client = APIClient()
response = client.get(
'/rates/stats/',
data={},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_connected_list_request(self):
Rate.objects.fetch_rates(base_currency=self.base_currency, currency=self.currency)
client = APIClient()
token = Token.objects.get(user__username=self.user.username)
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
post_response = client.post(
'/rates/',
data={
'key': self.key,
'currency': 'USD',
'base_currency': 'EUR',
'value_date': '2020-01-01',
'value': 1.10,
}
)
self.assertEqual(post_response.status_code, status.HTTP_201_CREATED)
self.assertIn('id', post_response.json())
response = client.get(
'/rates/',
format='json')
anon_client = APIClient()
anon_response = anon_client.get(
'/rates/',
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(anon_response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), len(anon_response.json()) + 2)
def test_list_user_request(self):
Rate.objects.fetch_rates(base_currency=self.base_currency, currency=self.currency)
client = APIClient()
token = Token.objects.get(user__username=self.user.username)
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
post_response = client.post(
'/rates/',
data={
'key': self.key,
'currency': 'USD',
'base_currency': 'EUR',
'value_date': '2020-01-01',
'value': 1.10,
}
)
self.assertEqual(post_response.status_code, status.HTTP_201_CREATED)
self.assertIn('id', post_response.json())
response = client.get(
'/rates/',
format='json')
anon_client = APIClient()
anon_response = anon_client.get(
'/rates/',
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(anon_response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), len(anon_response.json()) + 2)
def test_list_with_key_request(self):
client = APIClient()
token = Token.objects.get(user__username=self.user.username)
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
post_response = client.post(
'/rates/',
data={
'key': self.key,
'currency': 'USD',
'base_currency': 'EUR',
'value_date': '2020-01-01',
'value': 1.10,
}
)
self.assertEqual(post_response.status_code, status.HTTP_201_CREATED)
response = client.get(
'/rates/',
data={'key': self.key},
format='json')
self.assertEqual(response.json()[0]['key'], self.key)
def test_list_with_key_or_null_request(self):
client = APIClient()
token = Token.objects.get(user__username=self.user.username)
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
post_response = client.post(
'/rates/',
data={
'key': self.key,
'currency': 'USD',
'base_currency': 'EUR',
'value_date': '2020-01-01',
'value': 1.10,
}
)
self.assertEqual(post_response.status_code, status.HTTP_201_CREATED)
Rate.objects.fetch_rates(base_currency=self.base_currency, currency=self.currency)
response = client.get(
'/rates/',
data={'key_or_null': self.key},
format='json')
self.assertEqual(len(response.json()), 3)
def test_list_with_key_isnull_request(self):
client = APIClient()
token = Token.objects.get(user__username=self.user.username)
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
post_response = client.post(
'/rates/',
data={
'key': self.key,
'currency': 'USD',
'base_currency': 'EUR',
'value_date': '2020-01-01',
'value': 1.10,
}
)
self.assertEqual(post_response.status_code, status.HTTP_201_CREATED)
Rate.objects.fetch_rates(base_currency=self.base_currency, currency=self.currency)
response = client.get(
'/rates/',
data={'key_isnull': self.key},
format='json')
self.assertEqual(response.json()[0]['key'], None)
def test_list_with_key_and_currency_request(self):
client = APIClient()
token = Token.objects.get(user__username=self.user.username)
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
post_response = client.post(
'/rates/',
data={
'key': self.key,
'currency': 'USD',
'base_currency': 'EUR',
'value_date': '2020-01-01',
'value': 1.10,
}
)
self.assertEqual(post_response.status_code, status.HTTP_201_CREATED)
response = client.get(
'/rates/',
data={'key': self.key, 'currency': 'USD'},
format='json')
self.assertEqual(len(response.json()), 1)
def test_stats_with_key_and_currency_request(self):
client = APIClient()
token = Token.objects.get(user__username=self.user.username)
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
post_response = client.post(
'/rates/',
data={
'key': self.key,
'currency': 'USD',
'base_currency': 'EUR',
'value_date': '2020-01-01',
'value': 1.10,
}
)
self.assertEqual(post_response.status_code, status.HTTP_201_CREATED)
post_response = client.post(
'/rates/',
data={
'key': self.key,
'currency': 'USD',
'base_currency': 'EUR',
'value_date': '2020-01-02',
'value': 1.20,
}
)
self.assertEqual(post_response.status_code, status.HTTP_201_CREATED)
response = client.get(
'/rates/stats/',
data={'key': self.key, 'currency': 'EUR'},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_list_with_key_and_base_currency_request(self):
client = APIClient()
token = Token.objects.get(user__username=self.user.username)
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
post_response = client.post(
'/rates/',
data={
'key': self.key,
'currency': 'USD',
'base_currency': 'EUR',
'value_date': '2020-01-01',
'value': 1.10,
}
)
self.assertEqual(post_response.status_code, status.HTTP_201_CREATED)
response = client.get(
'/rates/',
data={'key': self.key, 'base_currency': 'USD'},
format='json')
self.assertEqual(len(response.json()), 1)
def test_retrieve_request(self):
client = APIClient()
response = client.get(
'/rates/',
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_bulk_create_request(self):
client = APIClient()
token = Token.objects.get(user__username=self.user.username)
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
post_response = client.post(
'/rates/bulk/',
data={
'key': self.key,
'currency': 'USD',
'base_currency': 'EUR',
'from_date': '2020-01-01',
'to_date': '2020-09-01',
'value': 1.10
}
)
self.assertEqual(post_response.status_code, status.HTTP_201_CREATED)
self.assertEqual(len(post_response.json()),
(datetime.date(year=2020, month=9, day=1) - datetime.date(year=2020, month=1, day=1)).days + 1)
def test_latest_currency_request(self):
client = APIClient()
token = Token.objects.get(user__username=self.user.username)
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
post_response = client.post(
'/rates/bulk/',
data={
'key': self.key,
'currency': 'USD',
'base_currency': 'EUR',
'from_date': '2020-01-01',
'to_date': '2020-09-01',
'value': 1.10
}
)
self.assertEqual(post_response.status_code, status.HTTP_201_CREATED)
response = client.get(
'/rates/?currency_latest_values=USD'
)
self.assertEqual(len(response.json()), 1)
response = client.get(
'/rates/?base_currency_latest_values=EUR'
)
self.assertEqual(len(response.json()), 1)
class RateConverterTest(TestCase):
base_currency = 'EUR'
currency = 'USD'
def setUp(self) -> None:
self.user, created = User.objects.get_or_create(
username='test',
email='test@ipd.com'
)
self.converter = RateConverter(user=self.user, base_currency='EUR')
self.amounts = [
{
'currency': 'USD',
'amount': 100,
'date_obj': '2020-07-22'
},
{
'currency': 'AUD',
'amount': 50,
'date_obj': '2020-07-23'
},
]
self.trash_amounts = [
{
'currency': 'USD',
'amount': 'toto',
'date_obj': '01/01/2020'
},
{
'currency': 'LOL',
'date_obj': '2020-07-23'
},
{
'date_obj': '2020-07-23'
},
{
'currency': 'JPY',
},
]
def test_created(self):
self.assertEqual(self.converter.status, self.converter.INITIATED_STATUS)
def test_add_data(self):
errors = self.converter.add_data(self.amounts)
self.assertEqual(errors, [])
self.assertEqual(self.converter.status, self.converter.INSERTING_STATUS)
self.assertIsNotNone(self.converter.cached_currencies)
self.assertIsNotNone(cache.get(self.converter.id))
def test_trash_amounts(self):
converter = RateConverter(user=self.user, base_currency='EUR')
errors = converter.add_data(self.trash_amounts)
self.assertEqual(len(errors), 4)
self.assertIn("amount", errors[0])
self.assertIn("currency", errors[1])
self.assertNotIn("date_obj", errors[2])
self.assertNotIn("currency", errors[3])
def test_convert(self):
result = self.converter.convert()
self.assertEqual(result.id, self.converter.id)
self.assertEqual(result.target, 'EUR')
self.assertEqual(self.converter.status, self.converter.FINISHED)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.detail), len(self.converter.data))
converted_sum = sum([d.converted_value for d in result.detail])
self.assertEqual(result.sum, converted_sum)
def test_convert_pivot(self):
converter = RateConverter(self.user, base_currency='JPY')
amounts = [
{
'currency': 'AUD',
'amount': 50,
'date_obj': '2020-07-23'
},
]
converter.add_data(amounts)
result = converter.convert()
self.assertEqual(result.id, converter.id)
self.assertEqual(result.target, 'JPY')
self.assertEqual(converter.status, converter.FINISHED)
self.assertEqual(result.errors, [])
self.assertEqual(len(result.detail), len(converter.data))
converted_sum = sum([d.converted_value for d in result.detail])
self.assertEqual(result.sum, converted_sum)
def test_convert_request(self):
Rate.objects.fetch_rates(base_currency=self.base_currency, currency=self.currency)
Rate.objects.fetch_rates(base_currency=self.currency, currency='AUD')
amounts = RateAmountSerializer(self.amounts, many=True)
client = APIClient()
response = client.post(
'/rates/convert/',
data={
'data': amounts.data,
'target': 'EUR',
},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('sum', response.json())
self.assertEqual(len(response.json().get('detail')), len(self.amounts))
def test_convert_batch_request(self):
batch_id = uuid.uuid4()
Rate.objects.fetch_rates(base_currency=self.base_currency, currency=self.currency)
Rate.objects.fetch_rates(base_currency=self.currency, currency='AUD')
client = APIClient()
amounts = RateAmountSerializer(self.amounts, many=True)
response = client.post(
'/rates/convert/',
data={
'data': amounts.data,
'target': 'EUR',
'batch_id': batch_id,
},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('id', response.json())
self.assertEqual(response.json().get('status'), RateConverter.INSERTING_STATUS)
self.assertEqual(response.json().get('id'), str(batch_id))
response = client.post(
'/rates/convert/',
data={
'data': amounts.data,
'batch_id': batch_id,
'target': 'EUR',
'eob': True
},
format='json')
self.assertEqual(response.json().get('status'), RateConverter.FINISHED)
self.assertEqual(len(response.json().get('detail')), 2 * len(self.amounts))
def test_watch_request(self):
batch_id = uuid.uuid4()
Rate.objects.fetch_rates(base_currency=self.base_currency, currency=self.currency)
Rate.objects.fetch_rates(base_currency=self.currency, currency='AUD')
client = APIClient()
amounts = RateAmountSerializer(self.amounts, many=True)
response = client.post(
'/rates/convert/',
data={
'data': amounts.data,
'target': 'EUR',
'batch_id': batch_id,
},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('id', response.json())
self.assertEqual(response.json().get('status'), RateConverter.INSERTING_STATUS)
self.assertEqual(response.json().get('id'), str(batch_id))
response = client.get(
f'/watch/{str(batch_id)}/',
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json().get('status'), RateConverter.INSERTING_STATUS)
self.assertEqual(response.json().get('id'), str(batch_id))
| 37.468582
| 120
| 0.566459
| 2,196
| 20,870
| 5.197632
| 0.070128
| 0.069388
| 0.036446
| 0.050464
| 0.86061
| 0.841861
| 0.802436
| 0.772998
| 0.764237
| 0.749255
| 0
| 0.026194
| 0.306708
| 20,870
| 556
| 121
| 37.535971
| 0.762665
| 0
| 0
| 0.65251
| 0
| 0
| 0.097604
| 0.0046
| 0
| 0
| 0
| 0
| 0.15444
| 1
| 0.061776
| false
| 0.001931
| 0.023166
| 0
| 0.096525
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
941a3fde75eb9abd61d2da57ff5caf55b245627c
| 1,740
|
py
|
Python
|
moog/env_wrappers/abstract_wrapper.py
|
juanpablordz/moog.github.io
|
d7995d3563492378d0877ce8d16f5ca9a8031794
|
[
"Apache-2.0",
"MIT"
] | 22
|
2021-02-26T18:19:35.000Z
|
2022-03-05T19:01:00.000Z
|
moog/env_wrappers/abstract_wrapper.py
|
juanpablordz/moog.github.io
|
d7995d3563492378d0877ce8d16f5ca9a8031794
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-04-01T06:15:02.000Z
|
2021-04-23T13:14:12.000Z
|
moog/env_wrappers/abstract_wrapper.py
|
juanpablordz/moog.github.io
|
d7995d3563492378d0877ce8d16f5ca9a8031794
|
[
"Apache-2.0",
"MIT"
] | 2
|
2021-05-02T02:20:39.000Z
|
2021-05-06T16:24:35.000Z
|
"""Abstract wrapper.
This file contains AbstractEnvironmentWrapper, an abstract base class for
environment wrappers that mimics the interface of the underlying environment.
"""
import abc
class AbstractEnvironmentWrapper(abc.ABC):
"""Abstract environment wrapper class.
All environment wrappers must inherit from this class.
"""
def __init__(self, environment):
self._environment = environment
def reset(self):
return self._environment.reset()
def step(self, action):
return self._environment.step(action)
def observation(self):
return self._environment.observation()
def observation_spec(self):
return self._environment.observation_spec()
def action_spec(self):
return self._environment.action_spec()
@property
def state(self):
return self._environment.state
@property
def meta_state(self):
return self._environment.meta_state
@property
def state_initializer(self):
return self._environment.state_initializer
@property
def physics(self):
return self._environment.physics
@property
def task(self):
return self._environment.task
@property
def action_space(self):
return self._environment.action_space
@property
def observers(self):
return self._environment.observers
@property
def game_rules(self):
return self._environment.game_rules
@property
def environment(self):
return self._environment
@property
def step_count(self):
return self._environment.step_count
@property
def reset_next_step(self):
return self._environment.reset_next_step
| 22.597403
| 77
| 0.683333
| 188
| 1,740
| 6.117021
| 0.239362
| 0.234783
| 0.292174
| 0.326087
| 0.258261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.243103
| 1,740
| 76
| 78
| 22.894737
| 0.873197
| 0.150575
| 0
| 0.234043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.361702
| false
| 0
| 0.021277
| 0.340426
| 0.744681
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
9453a06e79f1363f657d9429737a9a558bc3a9e5
| 1,482
|
py
|
Python
|
pa1-skeleton/pa1-data/8/www.stanford.edu_class_cs221_progAssignments_PA3_analysis.py
|
yzhong94/cs276-spring-2019
|
a4780a9f88b8c535146040fe11bb513c91c5693b
|
[
"MIT"
] | null | null | null |
pa1-skeleton/pa1-data/8/www.stanford.edu_class_cs221_progAssignments_PA3_analysis.py
|
yzhong94/cs276-spring-2019
|
a4780a9f88b8c535146040fe11bb513c91c5693b
|
[
"MIT"
] | null | null | null |
pa1-skeleton/pa1-data/8/www.stanford.edu_class_cs221_progAssignments_PA3_analysis.py
|
yzhong94/cs276-spring-2019
|
a4780a9f88b8c535146040fe11bb513c91c5693b
|
[
"MIT"
] | null | null | null |
analysis py licensing information please do not distribute or publish solutions to this project you are free to use and extend these projects for educational purposes the pacman ai projects were developed at uc berkeley primarily by john denero denero cs berkeley edu and dan klein klein cs berkeley edu for more info see http inst eecs berkeley edu cs188 sp09 pacman html analysis questions change these default values to obtain the specified policies through value iteration def question2a answerdiscount 0.9 answernoise 0.2 answerlivingreward 0.0 return answerdiscount answernoise answerlivingreward if not possible return not possible def question2b answerdiscount 0.9 answernoise 0.2 answerlivingreward 0.0 return answerdiscount answernoise answerlivingreward if not possible return not possible def question2c answerdiscount 0.9 answernoise 0.2 answerlivingreward 0.0 return answerdiscount answernoise answerlivingreward if not possible return not possible def question2d answerdiscount 0.9 answernoise 0.2 answerlivingreward 0.0 return answerdiscount answernoise answerlivingreward if not possible return not possible def question2e answerdiscount 0.9 answernoise 0.2 answerlivingreward 0.0 return answerdiscount answernoise answerlivingreward if not possible return not possible if __name__ __main__ print answers to analysis questions import analysis for q in q for q in dir analysis if q startswith question response getattr analysis q print question s t s q str response
| 741
| 1,481
| 0.853576
| 217
| 1,482
| 5.792627
| 0.414747
| 0.08751
| 0.063644
| 0.107399
| 0.518695
| 0.518695
| 0.518695
| 0.518695
| 0.518695
| 0.518695
| 0
| 0.03125
| 0.136302
| 1,482
| 1
| 1,482
| 1,482
| 0.950781
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 1
| null | null | 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
84cc32ee525f608a0c3d16f25e8391b031a0c411
| 44
|
py
|
Python
|
python/tinypostman/__init__.py
|
francc/tinypacks
|
96c63e872068ac70222887d6a36b56f01048febb
|
[
"MIT-0"
] | 23
|
2015-06-16T21:33:44.000Z
|
2021-06-20T01:19:10.000Z
|
python/tinypostman/__init__.py
|
francc/tinypacks
|
96c63e872068ac70222887d6a36b56f01048febb
|
[
"MIT-0"
] | null | null | null |
python/tinypostman/__init__.py
|
francc/tinypacks
|
96c63e872068ac70222887d6a36b56f01048febb
|
[
"MIT-0"
] | 8
|
2015-09-04T20:07:29.000Z
|
2020-06-22T04:48:30.000Z
|
#!/usr/bin/python
from tinypostman import *
| 14.666667
| 25
| 0.75
| 6
| 44
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 44
| 2
| 26
| 22
| 0.846154
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ca0a7666701e48d9a8acdf35dda9b4f2d842f32c
| 46
|
py
|
Python
|
zgres/tests/test_show.py
|
jinty/zgres
|
88730e94bb543ec4d48c27523d02e3136b332173
|
[
"MIT"
] | 12
|
2015-11-08T21:29:52.000Z
|
2018-10-25T04:45:58.000Z
|
zgres/tests/test_show.py
|
jinty/zgres
|
88730e94bb543ec4d48c27523d02e3136b332173
|
[
"MIT"
] | null | null | null |
zgres/tests/test_show.py
|
jinty/zgres
|
88730e94bb543ec4d48c27523d02e3136b332173
|
[
"MIT"
] | 6
|
2015-10-25T05:59:12.000Z
|
2021-01-06T08:02:46.000Z
|
def test_import():
from zgres import show
| 15.333333
| 26
| 0.717391
| 7
| 46
| 4.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217391
| 46
| 2
| 27
| 23
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 1
| 0
| 1.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
ca171731bedf825af0644d55db11dce2ad1c068e
| 102
|
py
|
Python
|
tests/dd.py
|
laiyuanliang/myBlog
|
0eb3a5e8ba857589a623c087fe2ca696b8339346
|
[
"BSD-3-Clause"
] | null | null | null |
tests/dd.py
|
laiyuanliang/myBlog
|
0eb3a5e8ba857589a623c087fe2ca696b8339346
|
[
"BSD-3-Clause"
] | null | null | null |
tests/dd.py
|
laiyuanliang/myBlog
|
0eb3a5e8ba857589a623c087fe2ca696b8339346
|
[
"BSD-3-Clause"
] | null | null | null |
import os
#direc = os.path.join(os.path.dirname(__file__), 'd.sql')
print(os.path.abspath(__file__))
| 20.4
| 57
| 0.72549
| 17
| 102
| 3.882353
| 0.647059
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 102
| 4
| 58
| 25.5
| 0.702128
| 0.54902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
ca30866aa21d788fd9582b7722ca56601dbe7379
| 5,090
|
py
|
Python
|
output/models/nist_data/list_pkg/id/schema_instance/nistschema_sv_iv_list_id_enumeration_3_xsd/nistschema_sv_iv_list_id_enumeration_3.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/list_pkg/id/schema_instance/nistschema_sv_iv_list_id_enumeration_3_xsd/nistschema_sv_iv_list_id_enumeration_3.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/list_pkg/id/schema_instance/nistschema_sv_iv_list_id_enumeration_3_xsd/nistschema_sv_iv_list_id_enumeration_3.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
__NAMESPACE__ = "NISTSchema-SV-IV-list-ID-enumeration-3-NS"
class NistschemaSvIvListIdEnumeration3Type(Enum):
MLEADERS_FFACT_PRODUC_FAND_MEMORY_THAT_AND_MEA_ACHIEVED_SIGNATURE_P_TCOMPATIBILITY_TO_ABOUT_NETWORKING_ROBUST_FROM_FOR_TH_XWHICH_OF_BUSINESS_INCLUDE_THESE_DEVICES_THE = (
"mleaders",
"ffact_produc",
"fand-memory_that-and.mea",
"_achieved_signature-p",
"tcompatibility_to_about-networking-robust-from_for_th",
"xwhich-of-business_include.these_devices_the",
)
VA_OF_USERS_THE_AND_AN_TO_FOR_VOICED_PROFILE_OF_XTHE_POSSIBLE_SUCCESS_WEB_OF_INCLUDING_I_YWIDE_DEFINES_BUS_AND_SMALL_BOTTLENECKS_THE_FIL_DOF_THE_TO_WID_LED_LOCALIZED_THE_TRANSFORMING_REGISTRIES_BY_OTO_INFORMATION_G_SENSORS_A_OLDER_OVER_INDUSTRY_PROVIDE_ENOUGH_INFLUENCE_NEWCOM = (
"va-of-users-the.and.an_to-for-voiced.profile-of_",
"xthe.possible.success.web.of_including_i",
"ywide_defines.bus",
"_and.small-_bottlenecks.the_fil",
"dof.the-to_wid",
"_led-localized_the.transforming_registries_by-",
"oto-information-g_sensors.a.older.over.industry.provide-enough-",
"_influence_newcom",
)
TREPOSITORY_AN_C_ICOMPUTING_FOR_AS_TESTING_MAKE_SOFTWARE_OF_INTERNATIONAL_NRETRIEVE_SENSE_TO_THEIR_SOL_XAPPLICATIONS_OF_AND_THE_FRAMEWORKS_THE_SPECIFIC_S_THESE_WE_RET_TSECOND_GENERATION_HAS_TO_LARGE_THE_THEM_RELAT_BCAN_MUST_CCOMPATIBILITY_SHIFT_G_AS_PARTNER_TKNOWN_FOR_WITH_SYSTEM_AREAS_CHO = (
"trepository_an.c",
"icomputing_for-as.testing_make.software_of-international",
"nretrieve-sense-to_their_sol",
"xapplications-of.and_the-frameworks_the-specific-s.these_we-ret",
"tsecond-generation.has-to.large.the_them.relat",
"bcan-must",
"ccompatibility_shift.g.as.partner",
"tknown_for.with.system-areas.cho",
)
LSPECIFICATIONS_TOOL_NEW_WIRELESS_THE_ARE_H_CCONSISTENCY_TO_PERSONAL_RBACK_IN_OF_THE_ON_AS_PROTOTYPE_PROVIDES_IN_LED_THE_PROVIDE_HUSER_ALL_TECHNOLOGY_THE_MUST_AS_BE_PROFILE_FOR_COMPETEN_XSTRUCTURE_USES_CUSED_AND_OF_IS_COMPUTING_ASPECTS_THOSE_E_NAND_BE_REPOSITORY_TESTS_ITS_CTHE_STAKEHOLDERS_DIRECTIONS_DEFINE_OF_BECOME_SOFTWA = (
"lspecifications-tool_new.wireless.the.are.h",
"cconsistency.to-personal",
"rback_in-of_the-on.as-prototype-provides-in_led-the-provide",
"huser-all.technology-the_must_as-be.profile_for-competen",
"xstructure-uses",
"cused_and_of.is.computing.aspects.those.e",
"nand-be_repository_tests.its",
"cthe-stakeholders.directions.define-of_become_softwa",
)
BRIGOROUS_THAT_CAN_TO_CRE_CCOST_OF_MANIPULATE_SEN_KINVOLVED_ORGANIZATIONS_AND_ISSUES_UWELL_DEVICES_HAS_WITH_BENEFITS_AUTOMATIC_MOST_STA_AND_SPECIFICATIONS_INDIVIDUAL_PORTABLE_SERIES_USE_QAND_THE_COME_THE_FILE_AU = (
"brigorous.that-can.to_cre",
"ccost.of_manipulate_sen",
"kinvolved.organizations-and.issues",
"uwell-devices.has-with-benefits_automatic.most_sta",
"_and_specifications.individual.portable_series.use",
"qand-the_come-the.file_au",
)
PLED_AND_THESE_AMONG_REPUTATION_FULL_AN_RECOGNITION_AND_INDICATION_UNDERSTAND_INDUSTRY_APPL_YAN_VCREATION_MEANS_TOOLS_ON_WITH_IS_THE_FIVE_WILL_H_NUNBIASED_RESULT_FROM_OUR_GENERATION_FILES_ALLOW_R_QBOTH_COMPLETION_PROCESSORS_RI_CF_QTO_EMBEDDED_ANY_EFFECTIVELY_AREAS_OF = (
"pled_and_these.among-reputation.full_an",
"_recognition_and-indication.understand-industry-appl",
"yan",
"vcreation_means_tools_on.with_is.the_five_will_h",
"nunbiased-result_from.our.generation-files_allow-r",
"qboth-completion_processors-ri",
"cf",
"qto-embedded.any_effectively_areas_of.",
)
MGUIDELINES_FOR_CHOICES_MARKET_MANIP_URETRIEVES_THE_WILL_OF_COST_FR_PL_PBETWEEN_TO_ENFORCEM_VFOR_HELP_WITH_AND_PRECISE_DEVELOPED_THAT_IN_USED_REVIE = (
"mguidelines-for.choices.market.manip",
"uretrieves.the.will_of_cost-fr",
"pl",
"pbetween-to.enforcem",
"vfor.help.with.and-precise-developed-that_in_used.revie",
)
@dataclass
class Out:
class Meta:
name = "out"
namespace = "NISTSchema-SV-IV-list-ID-enumeration-3-NS"
any_element: Optional[object] = field(
default=None,
metadata={
"type": "Wildcard",
"namespace": "##any",
}
)
@dataclass
class NistschemaSvIvListIdEnumeration3:
class Meta:
name = "NISTSchema-SV-IV-list-ID-enumeration-3"
namespace = "NISTSchema-SV-IV-list-ID-enumeration-3-NS"
value: Optional[NistschemaSvIvListIdEnumeration3Type] = field(
default=None,
metadata={
"required": True,
}
)
| 50.39604
| 333
| 0.729862
| 638
| 5,090
| 5.260188
| 0.363636
| 0.014303
| 0.016687
| 0.021454
| 0.889452
| 0.889452
| 0.889452
| 0.879917
| 0.879917
| 0.841478
| 0
| 0.001695
| 0.188605
| 5,090
| 100
| 334
| 50.9
| 0.810896
| 0
| 0
| 0.10989
| 0
| 0
| 0.363261
| 0.32947
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.032967
| 0
| 0.186813
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ca776bb9333fc39e5aa2a696195e371b76534d94
| 138
|
py
|
Python
|
pyyeti/nastran/__init__.py
|
twmacro/pyye
|
c4febd44be836bd87368da13c1fb0cf82838b687
|
[
"BSD-3-Clause"
] | 17
|
2016-03-02T18:29:13.000Z
|
2022-03-18T08:41:56.000Z
|
pyyeti/nastran/__init__.py
|
twmacro/pyye
|
c4febd44be836bd87368da13c1fb0cf82838b687
|
[
"BSD-3-Clause"
] | 2
|
2021-04-15T02:11:10.000Z
|
2021-12-06T12:49:57.000Z
|
pyyeti/nastran/__init__.py
|
twmacro/pyye
|
c4febd44be836bd87368da13c1fb0cf82838b687
|
[
"BSD-3-Clause"
] | 6
|
2020-06-11T17:09:50.000Z
|
2022-02-07T19:15:07.000Z
|
"""
A collection of tools for working with Nastran files
"""
from .bulk import *
from .n2p import *
from .op2 import *
from . import op4
| 15.333333
| 52
| 0.702899
| 21
| 138
| 4.619048
| 0.714286
| 0.309278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027273
| 0.202899
| 138
| 8
| 53
| 17.25
| 0.854545
| 0.376812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ca9a6019f7e38198676cf58bba1a1504560edd2d
| 2,811
|
py
|
Python
|
yolov2_caffe/udacity_darknet_label_converter.py
|
dedoogong/asrada
|
55fbc6acae562d534ee0dcbc6b2931d77abe5203
|
[
"Apache-2.0"
] | 2
|
2018-02-05T08:16:51.000Z
|
2020-01-11T08:48:28.000Z
|
yolov2_caffe/udacity_darknet_label_converter.py
|
dedoogong/asrada
|
55fbc6acae562d534ee0dcbc6b2931d77abe5203
|
[
"Apache-2.0"
] | null | null | null |
yolov2_caffe/udacity_darknet_label_converter.py
|
dedoogong/asrada
|
55fbc6acae562d534ee0dcbc6b2931d77abe5203
|
[
"Apache-2.0"
] | 1
|
2019-06-23T07:03:07.000Z
|
2019-06-23T07:03:07.000Z
|
import os
import cv2 as cv
import csv
import numpy as np
csv_dir_1 = '/media/lee/ETC_300_150GB/FaceDB/object-dataset/labels.csv'
csv_root_dir_1 = '/media/lee/ETC_300_150GB/FaceDB/object-dataset/'
csv_dir_2 = '/media/lee/ETC_300_150GB/FaceDB/object-detection-crowdai/labels.csv'
csv_root_dir_2 = '/media/lee/ETC_300_150GB/FaceDB/object-detection-crowdai/'
with open(csv_dir_1 , 'r') as f: # f == pts file
reader = csv.reader(f, dialect='excel', delimiter=' ')
for row in reader:
imageFullPath=csv_root_dir_1+row[0]
img = cv.imread(imageFullPath)
img_height = float(img.shape[0])
img_width = float(img.shape[1])
ori_x_min = float(row[1])
ori_y_min = float(row[2])
ori_x_max = float(row[3])
ori_y_max = float(row[4])
front_back = int(row[5])
label = row[6]
class_id = -1
if label == 'pedestrian':
class_id = 0
elif label == 'car':
class_id = 1
elif label == 'biker':
class_id = 2
elif label == 'truck':
class_id = 5
elif label == 'trafficLight':
class_id = 6
normed_cx = (ori_x_max+ori_x_min)/(2*img_width)
normed_cy = (ori_y_max+ori_y_min)/(2*img_height)
normed_w = (ori_x_max-ori_x_min)/img_width
normed_h = (ori_y_max-ori_y_min)/img_height
data=str( class_id ) + ' ' + str(normed_cx ) + ' ' +str(normed_cy ) + ' ' + str(normed_w ) + ' ' + str(normed_h ) + '\n'
f1=open(imageFullPath.replace('jpg','txt'),'a')
f1.write(data)
f1.close()
with open(csv_dir_2, 'r') as f: # f == pts file
reader = csv.reader(f, dialect='excel', delimiter=' ')
for row in reader:
imageFullPath = csv_root_dir_2 + row[0].split(',')[4]
img = cv.imread(imageFullPath)
img_height = float(img.shape[0])
img_width = float(img.shape[1])
ori_x_min = float(row[0].split(',')[0])
ori_y_min = float(row[0].split(',')[1])
ori_x_max = float(row[0].split(',')[2])
ori_y_max = float(row[0].split(',')[3])
label = row[0].split(',')[5]
class_id = -1
if label == 'Pedestrian':
class_id = 0
elif label == 'Car':
class_id = 1
elif label == 'Truck':
class_id = 5
normed_cx = (ori_x_max + ori_x_min) / (2 * img_width)
normed_cy = (ori_y_max + ori_y_min) / (2 * img_height)
normed_w = (ori_x_max - ori_x_min) / img_width
normed_h = (ori_y_max - ori_y_min) / img_height
data = str(class_id) + ' ' + str(normed_cx) + ' ' + str(normed_cy) + ' ' + str(normed_w) + ' ' + str(normed_h) + '\n'
f1 = open(imageFullPath.replace('jpg', 'txt'), 'a')
f1.write(data)
f1.close()
| 33.464286
| 128
| 0.567414
| 421
| 2,811
| 3.510689
| 0.194774
| 0.032476
| 0.028417
| 0.037889
| 0.873478
| 0.774696
| 0.751015
| 0.751015
| 0.751015
| 0.751015
| 0
| 0.037475
| 0.278549
| 2,811
| 84
| 129
| 33.464286
| 0.691322
| 0.009605
| 0
| 0.507463
| 0
| 0
| 0.117541
| 0.081955
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.059701
| 0
| 0.059701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0460fadd2b2a71f8b43a411436e4f494440bcb0c
| 139
|
py
|
Python
|
settings/__init__.py
|
symeonp/choronzon
|
a9eed1ea2cab6bf2c3e020adf4951f6d01eecf72
|
[
"BSD-3-Clause"
] | null | null | null |
settings/__init__.py
|
symeonp/choronzon
|
a9eed1ea2cab6bf2c3e020adf4951f6d01eecf72
|
[
"BSD-3-Clause"
] | null | null | null |
settings/__init__.py
|
symeonp/choronzon
|
a9eed1ea2cab6bf2c3e020adf4951f6d01eecf72
|
[
"BSD-3-Clause"
] | 1
|
2020-02-29T13:55:25.000Z
|
2020-02-29T13:55:25.000Z
|
import platform
if platform.system() == 'Linux':
from system import *
elif platform.system() == 'Windows':
from winsystem import *
| 23.166667
| 36
| 0.683453
| 16
| 139
| 5.9375
| 0.5625
| 0.294737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18705
| 139
| 5
| 37
| 27.8
| 0.840708
| 0
| 0
| 0
| 0
| 0
| 0.086331
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0466266d2220c9c760b9dacf4d32bb0347fb9a33
| 164
|
py
|
Python
|
opyoid/bindings/self_binding/__init__.py
|
illuin-tech/opyoid
|
a2ca485e1820ba0d12a86ba91100aa097a1e5736
|
[
"MIT"
] | 37
|
2020-08-25T07:22:41.000Z
|
2022-03-18T03:05:53.000Z
|
opyoid/bindings/self_binding/__init__.py
|
illuin-tech/opyoid
|
a2ca485e1820ba0d12a86ba91100aa097a1e5736
|
[
"MIT"
] | 18
|
2020-10-04T17:33:24.000Z
|
2021-12-16T16:28:35.000Z
|
opyoid/bindings/self_binding/__init__.py
|
illuin-tech/opyoid
|
a2ca485e1820ba0d12a86ba91100aa097a1e5736
|
[
"MIT"
] | 2
|
2021-01-26T19:58:15.000Z
|
2021-11-30T01:10:25.000Z
|
from .from_class_provider import FromClassProvider
from .self_binding import SelfBinding
from .self_binding_to_provider_adapter import SelfBindingToProviderAdapter
| 41
| 74
| 0.908537
| 19
| 164
| 7.473684
| 0.578947
| 0.112676
| 0.211268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 164
| 3
| 75
| 54.666667
| 0.934211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
04b62cbb1b4ac4c14a7ab80ed6743b18700bb823
| 24,977
|
py
|
Python
|
Main Program.py
|
akri16/adi-mone-buzzer
|
b49df404eef589b5487b198b7aadf3777b9092d2
|
[
"MIT"
] | null | null | null |
Main Program.py
|
akri16/adi-mone-buzzer
|
b49df404eef589b5487b198b7aadf3777b9092d2
|
[
"MIT"
] | null | null | null |
Main Program.py
|
akri16/adi-mone-buzzer
|
b49df404eef589b5487b198b7aadf3777b9092d2
|
[
"MIT"
] | null | null | null |
import os
os.startfile('p.mp3')
from random import *
from qa import *
from intro import *
from functions import blank
from RULES import*
def asdf1 (t,h):
t1=eval(''.join((t,'n1')))
t2=eval(''.join((t,'n2')))
s=0
if h==1:
print '(1)',k1
print '(2)',k2
print '(3)',k3
print '(4)',k4
pas=[k1,k2,k3,k4]
global pas
else:
pas.remove(zz2)
lok=0
for lkj in pas:
lok=lok+1
print lok,lkj
if h==1:
user_in=raw_input('Select the desired subject')
while user_in not in ('1','2','3','4'):
print 'INVALID input'
user_in=raw_input('Select the desired subject')
zz=pas
zz1=int(user_in)-1
zz2=zz[zz1]
print 'Guruji question please'
print 'Which of the following are',zz2,'?'
zz3=sub1.index(zz2)
zz4=zz3+1
zz5=eval(''.join(('n',str(zz4))))
print zz5
global zz2
else :
user_in=raw_input('Select the desired subject')
while user_in not in ('1','2','3'):
print 'INVALID input'
user_in=raw_input('Select the desired subject')
zz1=int(user_in)-1
print pas
zz2=pas[zz1]
print zz2
print 'Guruji question please'
print 'Which of the following are',zz2,'?'
zz3=sub1.index(zz2)
zz4=zz3+1
zz5=eval(''.join(('n',str(zz4))))
print zz5
global zz2
hyt=['1','2','3','4','5','6','7','8','9','10']
print t1,'it is your turn now'
print 'Type the correct answer:'
sln1=raw_input('sol1:')
while sln1 not in hyt:
print 'INVALID input'
sln1=raw_input('sol1:')
hyt.remove(sln1)
sln2=raw_input('sol2:')
while sln2 not in hyt:
print 'INVALID input'
sln2=raw_input('sol2:')
hyt.remove(sln2)
sln3=raw_input('sol3:')
while sln3 not in hyt:
print 'INVALID input'
sln3=raw_input('sol3:')
hyt.remove(sln3)
sln4=raw_input('sol4:')
while sln4 not in hyt:
print 'INVALID input'
sln4=raw_input('sol4:')
hyt.remove(sln4)
sln5=raw_input('sol5:')
while sln5 not in hyt:
print 'INVALID input'
sln5=raw_input('sol5:')
print t1,'type "change" to change the solutions else proceed'
cnf=raw_input()
while (cnf !='change') and (cnf !=''):
print 'INVALID input'
print t1,'type "change" to change the solutions else proceed'
cnf=raw_input()
hyt=['1','2','3','4','5','6','7','8','9','10']
if cnf=='change':
print 'retype the correct answers'
sln1=raw_input('sol1:')
while sln1 not in hyt:
print 'INVALID input'
sln1=raw_input('sol1:')
hyt.remove(sln1)
sln2=raw_input('sol2:')
while sln2 not in hyt:
print 'INVALID input'
sln2=raw_input('sol1:')
hyt.remove(sln2)
sln3=raw_input('sol3:')
while sln3 not in hyt:
print 'INVALID input'
sln3=raw_input('sol1:')
hyt.remove(sln3)
sln4=raw_input('sol4:')
while sln4 not in hyt:
print 'INVALID input'
sln4=raw_input('sol1:')
hyt.remove(sln4)
sln5=raw_input('sol5:')
while sln5 not in hyt:
print 'INVALID input'
sln5=raw_input('sol1:')
if cnf=='' or cnf=='change':
blank()
print t2,'its your turn.....'
print t2,'type "change" to change the solutions else proceed'
cnf1=raw_input()
while (cnf1 !='change')and (cnf1 !=''):
print 'INVALID input'
print t2,'type "change" to change the solutions else proceed'
cnf1=raw_input()
if cnf1=='change':
s=0
hyt=['1','2','3','4','5','6','7','8','9','10']
print 'retype the correct answers'
sLn1=raw_input('sol1:')
while sLn1 not in hyt:
print 'INVALID input'
sLn1=raw_input('sol1:')
hyt.remove(sLn1)
sLn2=raw_input('sol2:')
while sLn2 not in hyt :
print 'INVALID input'
sLn1=raw_input('sol1:')
hyt.remove(sLn2)
sLn3=raw_input('sol3:')
while sLn3 not in hyt:
print 'INVALID input'
sLn1=raw_input('sol1:')
hyt.remove(sLn3)
sLn4=raw_input('sol4:')
while sLn4 not in hyt:
print 'INVALID input'
sLn1=raw_input('sol1:')
hyt.remove(sLn4)
sLn5=raw_input('sol5:')
while sLn5 not in hyt:
print 'INVALID input'
sLn1=raw_input('sol1:')
if cnf1=='' or cnf1=='change':
blank()
print 'Now lets see to the answers','Dont be afraid!!!'
xcv=[sln1,sln2,sln3,sln4,sln5]
if cnf1=='':
for afgh in xcv:
if int(afgh)in (eval(''.join(('Ans',str(zz4))))):
ast=zz5[(int(afgh))-1]
print ast,'--> double correct-->2000 points'
s=s+2000
else:
ast=zz5[(int(afgh))-1]
print ast,'-->Sorry,wrong answer'
elif cnf1=='change':
mnb=[sLn1,sLn2,sLn3,sLn4,sLn5]
for y in xcv :
if (y in mnb) and (int(y) in(eval(''.join(('Ans',str(zz4)))))):
s=s+2000
mnb.remove(y)
mnj=zz5[(int(y))-1]
print mnj,'--> double correct-->2000 points'
for pl in mnb:
if int(pl) in (eval(''.join(('Ans',str(zz4))))):
s=s+1000
act=zz5[(int(pl))-1]
print act,'--> correct-->1000 points'
else:
act=zz5[(int(pl))-1]
print act,'-->Sorry,wrong answer'
print 'Now lets see which all are right answers'
print 'So the correct answers are--->'
for asw in (eval(''.join(('Ans',str(zz4))))):
print zz5[asw-1]
blank()
if s==10000:
print 'ya!hoo!,you`ve got a bonus!!!'
s=s+5000
if t=='g1':
score1=s
global score1
elif t=='g2':
score2=s
global score2
t=eval(t)
print 'team', t,'your team score is',s
def osub1 (R):
x1=randint(0,13)
if R=='r1':
k1=sub1[x1]
else :k1=sub3[x1]
print '(1)',k1
x2=randint(0,13)
while x2==x1:
x2=randint(0,13)
if R=='r1':
k2=sub1[x2]
else :k2=sub3[x2]
print '(2)',k2
x3=randint(0,13)
while (x3==x1) or (x3==x2):
x3=randint(0,13)
if R=='r1':
k3=sub1[x3]
else :k3=sub3[x3]
print '(3)',k3
x4=randint(0,13)
while (x4==x1)or (x4==x2)or(x4==x3):
x4=randint(0,13)
if R=='r1':
k4=sub1[x4]
else :k4=sub3[x4]
print '(4)',k4
global k1
global k2
global k3
global k4
#------------------------------
def main ():
print'ADI MONA BUZZER FOR ROUND-1-->TYPE IT RIGHT'
rul1()
print ('guruji subjects please')
osub1 ('r1')
blank()
print 'So your buzzer question is........'
x=randint(0,5)
print bq[x]
b=(raw_input('type 1,2,3,or 4 for player 1 player 2 player3 and player 4 respectively:'))
while (b not in ('1','2','3','4'))or b=='' :
print'INVALID input'
b=raw_input('type the right input:')
b=int(b)
an_no=('a',str(x+1))
an_no=eval(''.join(an_no))
if b==1:
print g1n2,'type the appropriate answer:'
s1=raw_input()
while len(s1)==0 or len(s1)>25:
print 'INVALID Input'
print g1n2,'type the appropriate answer:'
s1=raw_input()
if an_no in s1 :
print 'Right Answer'
print 'Team',g1,'get on to the play station'
print 'Lets review the subjects'
blank()
asdf1('g1',1)
blank()
asew='g2'
pm=g1
else:
print 'Sorry wrong answer'
print 'Team',g2,'get on to the play station'
print 'Lets review the subjects'
blank()
asdf1('g2',1)
blank()
asew='g1'
pm=g1
elif b==2:
print g1n1,'type the appropriate answer:'
s2=raw_input()
while len(s2)==0 or len(s2)>25:
print 'INVALID Input'
print g1n1,'type the appropriate answer:'
s2=raw_input()
if an_no in s2 :
print 'Right Answer'
print 'Team',g1,'get on to the play station'
print 'Lets review the subjects'
blank()
asdf1('g1',1)
blank()
asew='g2'
else:
print 'Sorry wrong answer'
print 'Team',g2,'get on to the play station'
print 'Lets review the subjects'
blank()
asdf1('g2',1)
blank()
asew='g1'
pm=g1
elif b==3:
print g2n2,'type the appropriate answer:'
u1=raw_input()
while len(u1)==0 or len(u1)>25:
print 'INVALID Input'
print g2n2,'type the appropriate answer:'
u1=raw_input()
if an_no in u1 :
print 'Right Answer'
print 'Team',g2,'get on to the play station'
print 'Lets review the subjects'
blank()
asdf1('g2',1)
blank()
asew='g1'
pm=g1
else:
print 'Sorry wrong answer'
print 'Team',g1,'get on to the play station'
print 'Lets review the subjects'
blank()
asdf1('g1',1)
blank()
asew='g2'
pm=g2
elif b==4:
print g2n1,'type the appropriate answer:'
u2=raw_input()
while len(u2)==0 or len(u2)>25:
print 'INVALID Input'
print g2n2,'type the appropriate answer:'
u2=raw_input()
if an_no in u2 :
print 'Right Answer'
print 'Team',g2,'get on to the play station'
print 'Lets review the subjects'
blank()
asdf1('g2',1)
blank()
asew='g1'
pm=g1
else:
print 'Sorry wrong answer'
print 'Team',g1,'get on to the play station'
print 'Lets review the subjects'
blank()
asdf1('g1',1)
blank()
asew='g2'
pm=g2
print 'So the right answer is-->'
print an_no
if asew ==g1:
print 'Team',g2,',you can get back to your team station'
print 'Team',g1,'get on to the play station'
else:
print 'Team',g1,',you can get back to your team station'
print 'Team',g2,'get on to the play station'
print 'Lets review the subjects'
asdf1(asew,2)
print 'So by the end of round 1 lets see the scores'
print 'Team',g1,'-->',score1
print 'Team',g2,'-->',score2
if score1!=score2:
qw=max( score1,score2)
wq=min(score1,score2)
if qw==score1:
mnk=g1
mpk=g2
else :
mnk=g2
mpk=g1
print mnk,'you are leading with',qw, 'Keep playing!!! '
print mpk,'don`t worry,you have earned',wq,'.One more round left.Keep playing!!!'
else:print 'Team',g1,'and Team',g2,'you have earned a tie of', score1
blank()
print 'Now lets play ROUND-2 --> KNOCK OUT ROUND'
blank()
print 'ADI MONA BUZZER for ROUND-2 --> KNOCK OUT ROUND'
blank()
rul2()
print 'Best of luck!!!!'
blank()
print 'Guruji Question Please'
print 'So your first question is.....'
just=range(0,21)
global just
asdf2()
blank()
print 'Now your second question .....'
asdf2()
blank()
print 'Here goes your third question.....'
asdf2()
blank()
print 'Now your fourth question...'
asdf2()
blank()
print 'And the final question of this round....'
print 'Here goes it'
asdf2()
blank()
print 'So by the end of Round 2, lets see your team scores'
print 'Team',g1,'-->',score1,'Points'
print 'Team',g2,'-->',score2,'Points'
if score1!=score2:
qw=max( score1,score2)
wq=min(score1,score2)
if qw==score1:
mnk=g1
mpk=g2
else :
mnk=g2
mpk=g1
print mnk,'you are leading with',qw, 'Keep playing!!! '
print 'So Team',mnk,'You are qualified for JACKPOT ROUND'
print mpk,'don`t worry,you have earned',wq,'you have done your best!!!'
else:
print 'Team',g1,'and Team',g2,'you have earned a tie of', score1
print 'So lets have a TIE BREAK question for JACKPOT qualification'
print 'So now lets play ROUND-3 JACKPOT ROUND'
print 'ADI MONA BUZZER for ROUND-3 -->JACKPOT ROUND'
blank()
rul3()
print 'Team',mnk,'get on to the play station'
blank()
asdf3()
print 'So by the end of JACKPOT ROUND ,lets view the scores'
print 'Team',g1,'--->',score1,'Points'
print 'Team',g2,'--->',score2,'Points'
if score1!=score2:
qw=max( score1,score2)
wq=min(score1,score2)
if qw==score1:
mnk=g1
mpk=g2
else :
mnk=g2
mpk=g1
print 'Team',mnk,'you are leading with',qw
print mpk,'don`t worry,you have earned',wq,'you have done your best!!!'
blank()
print'So today we had tough contestents'
blank()
print'They have cooperated with us well to provide the global a fruitful and enjoyable game'
print 'Thank you all my contestents'
blank()
print 'Now I thank the holy GURUJI who have spreaded knowledge to us'
blank()
print'Last but not the least, I thank all the global malayalees'
blank()
print'My buddies sweet hearts ,love sky high and keep falling in love again and again and again'
blank()
print 'SO THERE COME TO END THIS ADI MONA BUZZER PROGRAM!!!'
blank()
print 'Thank you for your kind cooperation'
blank()
dfrew=raw_input('Would you like to drop a feedback to us?(y/n)')
while dfrew!='y'and dfrew!='n':
print 'INVALID input'
dfrew=raw_input('Would you like to drop a feedback to us?(y/n)')
if dfrew=='y':
feedback=raw_input('Enter your feedback and name')
target=open('feedback.txt',"w")
target.write(feedback+"\n")
print 'Thank you for your kind feedback'
else:print 'Ok Thank You'
blank()
print 'I Amit signing off',',good bye'
os.system("TASKKILL /F /IM wmplayer.exe")
global mnk
print '----------------------THE END--------------------------'
asde=raw_input('Do you want to run the programme again?(y/n):')
while asde!='y' and asde!='n':
print'INVALID INPUT'
asde=raw_input('Do you want to run the programme again?(y/n):')
if asde=='y':
from intro import*
main()
else:
print 'Ok,Thank you'
quit()
def asdf2():
global score1
global score2
ran=choice(just)
_just= sub2[ran]
print _just
just.remove(ran)
bp=(raw_input('type 1,2,3,or 4 for player 1 player 2 player3 and player 4 respectively:'))
while (bp not in ('1','2','3','4')) :
print'INVALID input'
bp=raw_input('type the right input:')
bp=int(bp)
an_n=('p',str(ran+1))
an_n=eval(''.join(an_n))
if bp==1:
print g1n2,'type the appropriate answer:'
s1=raw_input()
while len(s1)==0 or len(s1)>25:
print 'INVALID Input'
print g1n2,'type the appropriate answer:'
s1=raw_input()
if an_n in s1 :
print 'Right Answer'
blank()
score1=score1+4000
print 'Team',g1 ,'You get 4000 points'
else:
print 'Sorry wrong answer'
blank()
score1=score1-3000
blank()
print 'Team',g1,'I am sorry,you get -3000 points'
print 'Team',g1,'your current score is',score1
elif bp==2:
print g1n1,'type the appropriate answer:'
s2=raw_input()
while len(s2)==0 or len(s2)>25:
print 'INVALID Input'
print g1n1,'type the appropriate answer:'
s2=raw_input()
if an_n in s2 :
print 'Right Answer'
blank()
print 'Team',g1 ,'You get 4000 points'
s1=score1+4000
else:
print 'Sorry wrong answer'
blank()
score1=score1-3000
print 'Team',g1,'I am sorry,you get -3000 points'
print 'Team',g1,'your current score is',score1
elif bp==3:
print g2n2,'type the appropriate answer:'
u1=raw_input()
while len(u1)==0 or len(u1)>25:
print 'INVALID Input'
print g2n2,'type the appropriate answer:'
u1=raw_input()
if an_n in u1 :
print 'Right Answer'
blank()
score2=score2+4000
print 'Team',g2 ,'You get 4000 points'
else:
print 'Sorry wrong answer'
blank()
score2=score2-3000
print 'Team',g2,'I am sorry,you get -3000 points'
print 'Team',g2,'your current score is',score2
elif bp==4:
print g2n1,'type the appropriate answer:'
u2=raw_input()
while len(u2)==0 or len(u2)>25:
print 'INVALID Input'
print g2n2,'type the appropriate answer:'
u2=raw_input()
if an_n in u2 :
print 'Right Answer'
blank()
score2=score2+4000
print 'Team',g2 ,'You get 4000 points'
else:
print 'Sorry wrong answer'
blank()
score2=score2-3000
print 'Team',g2,'I am sorry,you get -3000 points'
print 'Team',g2,'your current score is',score2
print 'So the right answer is-->'
print an_n
global sub2
def tb():
ran=choice(just)
_just= sub2[ran]
print _just
sub2.remove(_just)
bp=(raw_input('type 1,2,3,or 4 for player 1 player 2 player3 and player 4 respectively:'))
while (bp not in ('1','2','3','4')) :
print'INVALID input'
bp=raw_input('type the right input:')
bp=int(bp)
an_n=('p',str(just+1))
an_n=eval(''.join(an_n))
if b==1:
print g1n2,'type the appropriate answer:'
s1=raw_input()
while len(s1)==0 or len(s1)>25:
print 'INVALID Input'
print g1n2,'type the appropriate answer:'
s1=raw_input()
if an_no in s1 :
print 'Right Answer'
print 'Team',g1,'you are qualified for JACKPOT ROUND'
print 'Team',g1,'get on to the play station'
mnk=g1
else:
print 'Sorry wrong answer'
print 'Now,Team',g2,'you are qualified for JACKPOT ROUND'
print 'Team',g2,'get on to the play station'
mnk=g2
elif b==2:
print g1n1,'type the appropriate answer:'
s2=raw_input()
while len(s2)==0 or len(s2)>25:
print 'INVALID Input'
print g1n1,'type the appropriate answer:'
s2=raw_input()
if an_no in s2 :
print 'Right Answer'
print 'Team',g1,'you are qualified for JACKPOT ROUND'
print 'Team',g1,'get on to the play station'
mnk=g1
else:
print 'Sorry wrong answer'
print 'Team',g2,'get on to the play station'
mnk=g2
elif b==3:
print g2n2,'type the appropriate answer:'
u1=raw_input()
while len(u1)==0 or len(u1)>25:
print 'INVALID Input'
print g2n2,'type the appropriate answer:'
u1=raw_input()
if an_no in u1 :
print 'Right Answer'
print 'Team',g2,'you are qualified for JACKPOT ROUND'
print 'Team',g2,'get on to the play station'
mnk=g2
else:
print 'Sorry wrong answer'
print 'Team',g1,'get on to the play station'
mnk=g1
elif b==4:
print g2n1,'type the appropriate answer:'
u2=raw_input()
while len(u2)==0 or len(u2)>25:
print 'INVALID Input'
print g2n2,'type the appropriate answer:'
u2=raw_input()
if an_no in u2 :
print 'Right Answer'
print 'Team',g2,'you are qualified for JACKPOT ROUND'
print 'Team',g2,'get on to the play station'
mnk=g2
else:
print 'Sorry wrong answer'
print 'Team',g1,'get on to the play station'
mnk=g1
global mnk
print 'So the right answer is-->'
print an_n
def asdf3():
print 'So lets see today`s subjects'
blank()
osub1('r3')
blank()
if mnk==g1:
pl1=g1n1
pl2=g2n2
elif mnk==g2:
pl1=g2n1
pl2=g2n2
cmos=[pl1,pl2]
pq=choice(cmos)
cmos.remove(pq)
print pq,'Its your turn now'
user_in=raw_input('Select the desired subject')
while user_in not in ('1','2','3','4'):
print 'INVALID input'
user_in=raw_input('Select the desired subject')
pas=(k1,k2,k3,k4)
zz=pas
zz1=int(user_in)-1
zz2=zz[zz1]
print 'Guruji question please'
print 'Which of the following are',zz2,'?'
zz3=sub3.index(zz2)
zz4=zz3+1
zz5=eval(''.join(('m',str(zz4))))
print zz5
global zz2
print 'Type the correct answer:'
sln1=raw_input('sol1:')
while sln1 not in ('1','2','3','4','5','6','7','8','9','10'):
print 'INVALID input'
sln1=raw_input('sol1:')
sln2=raw_input('sol2:')
while sln2 not in ('1','2','3','4','5','6','7','8','9','10'):
print 'INVALID input'
sln2=raw_input('sol2:')
sln3=raw_input('sol3:')
while sln3 not in ('1','2','3','4','5','6','7','8','9','10'):
print 'INVALID input'
sln3=raw_input('sol3:')
sln4=raw_input('sol4:')
while sln4 not in ('1','2','3','4','5','6','7','8','9','10'):
print 'INVALID input'
sln4=raw_input('sol4:')
sln5=raw_input('sol5:')
while sln5 not in ('1','2','3','4','5','6','7','8','9','10'):
print 'INVALID input'
sln5=raw_input('sol5:')
print pq,'type "change" to change the solutions else proceed'
cnf=raw_input()
while (cnf !='change') and (cnf !=''):
print 'INVALID input'
print t1,'type "change" to change the solutions else proceed'
cnf=raw_input()
if cnf=='change':
print 'retype the correct answers'
sln1=raw_input('sol1:')
while sln1 not in ('1','2','3','4','5','6','7','8','9','10'):
print 'INVALID input'
sln1=raw_input('sol1:')
sln2=raw_input('sol2:')
while sln2 not in ('1','2','3','4','5','6','7','8','9','10'):
print 'INVALID input'
sln2=raw_input('sol1:')
sln3=raw_input('sol3:')
while sln3 not in ('1','2','3','4','5','6','7','8','9','10'):
print 'INVALID input'
sln3=raw_input('sol1:')
sln4=raw_input('sol4:')
while sln4 not in ('1','2','3','4','5','6','7','8','9','10'):
print 'INVALID input'
sln4=raw_input('sol1:')
sln5=raw_input('sol5:')
while sln5 not in ('1','2','3','4','5','6','7','8','9','10'):
print 'INVALID input'
sln5=raw_input('sol1:')
lk=cmos[0]
if cnf=='' or cnf=='change':
blank()
print lk,'its your turn.....'
print lk,'type "change" to change the solutions else proceed'
cnf1=raw_input()
while (cnf1 !='change')and (cnf1 !=''):
print 'INVALID input'
print t2,'type "change" to change the solutions else proceed'
cnf1=raw_input()
if cnf1=='change':
s=0
print 'retype the correct answers'
sln1=raw_input('sol1:')
while sLn1 not in ('1','2','3','4','5','6','7','8','9','10'):
print 'INVALID input'
sLn1=raw_input('sol1:')
sln2=raw_input('sol2:')
while sLn2 not in ('1','2','3','4','5','6','7','8','9','10'):
print 'INVALID input'
sLn1=raw_input('sol1:')
sln3=raw_input('sol3:')
while sLn3 not in ('1','2','3','4','5','6','7','8','9','10'):
print 'INVALID input'
sLn1=raw_input('sol1:')
sln4=raw_input('sol4:')
while sLn4 not in ('1','2','3','4','5','6','7','8','9','10'):
print 'INVALID input'
sLn1=raw_input('sol1:')
sln5=raw_input('sol5:')
while sln5 not in ('1','2','3','4','5','6','7','8','9','10'):
print 'INVALID input'
sLn1=raw_input('sol1:')
if cnf1=='' or cnf1=='change':
blank()
print 'Now lets see to the answers','Dont be afraid!!!'
xcv=[sln1,sln2,sln3,sln4,sln5]
if cnf1=='' or cnf1=='change':
for afgh in xcv:
ast=zz5[(int(afgh))-1]
if int(afgh)in (eval(''.join(('sol',str(zz4))))):
print ast,'You have got it right'
else:print 'I am Sorry',ast,'is wrong answer'
kljh=(eval(''.join(('sol',str(zz4)))))
print 'So the correct answers are--->'
for asw in kljh:
print zz5[asw-1]
if sln1 in kljh and sln2 in kljh and sln3 in kljh and sln4 in kljh :
print 'Team',mnk,'You have won a JACKPOT!!!!!!!!!!!!!!!!!'
if mnk==g1:
score1='JACKPOT +',score1
elif mnk==g2:
score2='JACKPOT +',score2
print'-----------YA!HOOO!!!,YOU HAVE WON JACKPOT!,JACKPOT!!,JACKPOT!!!-----------'
else:print 'I am sorry you have lost the JACKPOT ROUND'
main()
| 24.977
| 97
| 0.539016
| 3,573
| 24,977
| 3.726001
| 0.09096
| 0.0655
| 0.068955
| 0.043266
| 0.783895
| 0.761812
| 0.725607
| 0.704574
| 0.688876
| 0.670548
| 0
| 0.064237
| 0.310045
| 24,977
| 999
| 98
| 25.002002
| 0.708292
| 0.001201
| 0
| 0.739744
| 0
| 0
| 0.309308
| 0.005732
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.008974
| null | null | 0.337179
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
04b98009e7a3d9bca27a451c6b65771903ef34d0
| 6,239
|
py
|
Python
|
tests/test_require_org_member.py
|
PropelAuth/propelauth-fastapi
|
631bcfd923f25967214409ec8f87201096be9230
|
[
"MIT"
] | null | null | null |
tests/test_require_org_member.py
|
PropelAuth/propelauth-fastapi
|
631bcfd923f25967214409ec8f87201096be9230
|
[
"MIT"
] | null | null | null |
tests/test_require_org_member.py
|
PropelAuth/propelauth-fastapi
|
631bcfd923f25967214409ec8f87201096be9230
|
[
"MIT"
] | null | null | null |
from datetime import timedelta
from uuid import uuid4
from fastapi import Depends
from starlette.responses import PlainTextResponse
from tests.auth_helpers import create_access_token, orgs_to_org_id_map, random_org, random_user_id
from tests.conftest import HTTP_BASE_AUTH_URL
from propelauth_fastapi import UserRole
ROUTE_NAME = "/require_org_member_route"
def test_require_org_member_without_auth(app, auth, client, rsa_keys):
create_route_expecting_user_and_org(app, auth, None, None, None)
org_id = str(uuid4())
response = client.get(route_for(org_id))
assert response.status_code == 401
def test_require_org_member_with_auth_but_no_org_membership(app, auth, client, rsa_keys):
create_route_expecting_user_and_org(app, auth, None, None, None)
org_id = str(uuid4())
user_id = random_user_id()
access_token = create_access_token({"user_id": user_id}, rsa_keys.private_pem)
response = client.get(route_for(org_id), headers={"Authorization": "Bearer " + access_token})
assert response.status_code == 403
def test_require_org_member_with_auth_and_org_member(app, auth, client, rsa_keys):
user_id = random_user_id()
org = random_org("Owner")
org_id_to_org_member_info = orgs_to_org_id_map([org])
create_route_expecting_user_and_org(app, auth, user_id, org, UserRole.Owner)
access_token = create_access_token({
"user_id": user_id,
"org_id_to_org_member_info": org_id_to_org_member_info
}, rsa_keys.private_pem)
response = client.get(route_for(org["org_id"]), headers={"Authorization": "Bearer " + access_token})
assert response.status_code == 200
assert response.text == "ok"
def test_require_org_member_with_auth_but_wrong_org_id(app, auth, client, rsa_keys):
user_id = random_user_id()
org = random_org("Owner")
org_id_to_org_member_info = orgs_to_org_id_map([org])
wrong_org_id = str(uuid4())
create_route_expecting_user_and_org(app, auth, user_id, org, UserRole.Owner)
access_token = create_access_token({
"user_id": user_id,
"org_id_to_org_member_info": org_id_to_org_member_info
}, rsa_keys.private_pem)
# Pass wrong org_id as a path parameter
response = client.get(route_for(wrong_org_id), headers={"Authorization": "Bearer " + access_token})
assert response.status_code == 403
def test_require_org_member_with_auth_but_no_permission(app, auth, client, rsa_keys):
user_id = random_user_id()
org = random_org("Member")
org_id_to_org_member_info = orgs_to_org_id_map([org])
create_route_expecting_user_and_org(app, auth, user_id, org, UserRole.Admin)
access_token = create_access_token({
"user_id": user_id,
"org_id_to_org_member_info": org_id_to_org_member_info
}, rsa_keys.private_pem)
response = client.get(route_for(org["org_id"]), headers={"Authorization": "Bearer " + access_token})
assert response.status_code == 403
def test_require_org_member_with_auth_with_permission(app, auth, client, rsa_keys):
user_id = random_user_id()
org = random_org("Admin")
org_id_to_org_member_info = orgs_to_org_id_map([org])
create_route_expecting_user_and_org(app, auth, user_id, org, UserRole.Admin)
access_token = create_access_token({
"user_id": user_id,
"org_id_to_org_member_info": org_id_to_org_member_info
}, rsa_keys.private_pem)
response = client.get(route_for(org["org_id"]), headers={"Authorization": "Bearer " + access_token})
assert response.status_code == 200
assert response.text == "ok"
def test_require_org_member_with_bad_header(app, auth, client, rsa_keys):
create_route_expecting_user_and_org(app, auth, None, None, None)
user_id = random_user_id()
org = random_org("Admin")
org_id_to_org_member_info = orgs_to_org_id_map([org])
access_token = create_access_token({
"user_id": user_id,
"org_id_to_org_member_info": org_id_to_org_member_info
}, rsa_keys.private_pem)
response = client.get(route_for(org["org_id"]), headers={"Authorization": "token " + access_token})
assert response.status_code == 401
def test_require_org_member_with_wrong_token(app, auth, client, rsa_keys):
create_route_expecting_user_and_org(app, auth, None, None, None)
org_id = str(uuid4())
response = client.get(route_for(org_id), headers={"Authorization": "Bearer whatisthis"})
assert response.status_code == 401
def test_require_org_member_with_expired_token(app, auth, client, rsa_keys):
user_id = random_user_id()
org = random_org("Owner")
org_id_to_org_member_info = orgs_to_org_id_map([org])
create_route_expecting_user_and_org(app, auth, user_id, org, UserRole.Owner)
access_token = create_access_token({
"user_id": user_id,
"org_id_to_org_member_info": org_id_to_org_member_info
}, rsa_keys.private_pem, expires_in=timedelta(minutes=-1))
response = client.get(route_for(org["org_id"]), headers={"Authorization": "Bearer " + access_token})
assert response.status_code == 401
def test_require_user_with_bad_issuer(app, auth, client, rsa_keys):
user_id = random_user_id()
org = random_org("Owner")
org_id_to_org_member_info = orgs_to_org_id_map([org])
create_route_expecting_user_and_org(app, auth, user_id, org, UserRole.Owner)
access_token = create_access_token({
"user_id": user_id,
"org_id_to_org_member_info": org_id_to_org_member_info
}, rsa_keys.private_pem, issuer=HTTP_BASE_AUTH_URL)
response = client.get(route_for(org["org_id"]), headers={"Authorization": "Bearer " + access_token})
assert response.status_code == 401
def create_route_expecting_user_and_org(app, auth, user_id, org, user_role):
@app.get(ROUTE_NAME)
async def route(org_id, current_user=Depends(auth.require_user)):
current_org = auth.require_org_member(current_user, org_id, user_role)
assert current_user.user_id == user_id
assert current_org.org_id == org["org_id"]
assert current_org.org_name == org["org_name"]
assert current_org.user_role == user_role
return PlainTextResponse("ok")
def route_for(org_id):
return ROUTE_NAME + "?org_id=" + org_id
| 36.91716
| 104
| 0.741946
| 955
| 6,239
| 4.369634
| 0.092147
| 0.062305
| 0.045291
| 0.050324
| 0.816439
| 0.787203
| 0.784807
| 0.784807
| 0.78313
| 0.770189
| 0
| 0.006835
| 0.155794
| 6,239
| 168
| 105
| 37.136905
| 0.785457
| 0.00593
| 0
| 0.663793
| 0
| 0
| 0.087903
| 0.032258
| 0
| 0
| 0
| 0
| 0.137931
| 1
| 0.103448
| false
| 0
| 0.060345
| 0.008621
| 0.181034
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
04dbbb817466a90a9dede798d66c632f8c1fae56
| 147
|
py
|
Python
|
quizmous_api/version.py
|
szykol/quizmous-api
|
b33704701f258752bab0cddff44e7f357d6d5a99
|
[
"MIT"
] | 4
|
2020-05-17T19:26:55.000Z
|
2021-12-04T17:58:17.000Z
|
quizmous_api/version.py
|
szykol/quizmous-api
|
b33704701f258752bab0cddff44e7f357d6d5a99
|
[
"MIT"
] | null | null | null |
quizmous_api/version.py
|
szykol/quizmous-api
|
b33704701f258752bab0cddff44e7f357d6d5a99
|
[
"MIT"
] | null | null | null |
from json import load
def get_api_version():
with open('/usr/local/api/version.json', 'r') as version_file:
return load(version_file)
| 24.5
| 66
| 0.70068
| 23
| 147
| 4.304348
| 0.695652
| 0.20202
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176871
| 147
| 5
| 67
| 29.4
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 0.183673
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
b6f98dc527c7501d7e3880fb20d885cc038421f3
| 53
|
py
|
Python
|
PythonCrashCourse/3List/clip_test.py
|
dzylikecode/Python_Tutorial
|
bff425b11d6eeaa5733c1c710a570f83c52e4d97
|
[
"MIT"
] | null | null | null |
PythonCrashCourse/3List/clip_test.py
|
dzylikecode/Python_Tutorial
|
bff425b11d6eeaa5733c1c710a570f83c52e4d97
|
[
"MIT"
] | null | null | null |
PythonCrashCourse/3List/clip_test.py
|
dzylikecode/Python_Tutorial
|
bff425b11d6eeaa5733c1c710a570f83c52e4d97
|
[
"MIT"
] | null | null | null |
list = [0, 1, 2, 3, 4, 5, 6, 7, 8]
print(list[1:-1])
| 17.666667
| 34
| 0.45283
| 14
| 53
| 1.714286
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.268293
| 0.226415
| 53
| 2
| 35
| 26.5
| 0.317073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
6d072f189ad7ba990ab26410eca98ffec6db6f31
| 29
|
py
|
Python
|
crawlino/modules/sources_module/__init__.py
|
BBVA/crawlino
|
685f57e6b3e9356484ead2681bb178f651d2f371
|
[
"Apache-2.0"
] | 1
|
2018-11-11T21:07:54.000Z
|
2018-11-11T21:07:54.000Z
|
crawlino/modules/sources_module/__init__.py
|
BBVA/crawlino
|
685f57e6b3e9356484ead2681bb178f651d2f371
|
[
"Apache-2.0"
] | null | null | null |
crawlino/modules/sources_module/__init__.py
|
BBVA/crawlino
|
685f57e6b3e9356484ead2681bb178f651d2f371
|
[
"Apache-2.0"
] | null | null | null |
from .plugins_model import *
| 14.5
| 28
| 0.793103
| 4
| 29
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6d4588df711bf8609b1c23abeddd81d4f0629456
| 292
|
py
|
Python
|
Python/Strings/designer-door-mat.py
|
mateusnr/hackerrank-solutions
|
2fa60bae480d8afb46e3d99929707a7d9d92858f
|
[
"CC0-1.0"
] | 1
|
2015-08-01T04:03:47.000Z
|
2015-08-01T04:03:47.000Z
|
Python/Strings/designer-door-mat.py
|
mateusnr/hackerrank-solutions
|
2fa60bae480d8afb46e3d99929707a7d9d92858f
|
[
"CC0-1.0"
] | null | null | null |
Python/Strings/designer-door-mat.py
|
mateusnr/hackerrank-solutions
|
2fa60bae480d8afb46e3d99929707a7d9d92858f
|
[
"CC0-1.0"
] | 4
|
2020-05-04T15:12:21.000Z
|
2021-02-18T11:58:30.000Z
|
N, M = map(int,input().split())
for i in range(1,N,2):
print('-' * int((M-(3*i))/2) + i*'.|.' + '-' * int((M-(3*i))/2))
print('-' * int((M-7)/2) + "WELCOME" + '-' * int((M-7)/2))
for i in range(N-2,-1,-2):
print('-' * int((M - (3 * i)) / 2) + i * '.|.' + '-' * int((M - (3 * i)) / 2))
| 48.666667
| 82
| 0.380137
| 55
| 292
| 2.018182
| 0.290909
| 0.216216
| 0.18018
| 0.216216
| 0.378378
| 0.378378
| 0.378378
| 0.378378
| 0.378378
| 0.378378
| 0
| 0.073276
| 0.205479
| 292
| 6
| 82
| 48.666667
| 0.405172
| 0
| 0
| 0.333333
| 0
| 0
| 0.064846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
b65a8e8dd9515dcfca09370480910afbd6d840cc
| 19
|
py
|
Python
|
bayesian_networks_intro.py
|
https-seyhan/Bayesian-Networks
|
bf69e2cf52eba37ffea5596b767454899422bd9f
|
[
"MIT"
] | null | null | null |
bayesian_networks_intro.py
|
https-seyhan/Bayesian-Networks
|
bf69e2cf52eba37ffea5596b767454899422bd9f
|
[
"MIT"
] | null | null | null |
bayesian_networks_intro.py
|
https-seyhan/Bayesian-Networks
|
bf69e2cf52eba37ffea5596b767454899422bd9f
|
[
"MIT"
] | null | null | null |
import pymc3 as pm
| 9.5
| 18
| 0.789474
| 4
| 19
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 0.210526
| 19
| 1
| 19
| 19
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b667077f312cf39b99ae8493d597f29204857a42
| 126
|
py
|
Python
|
mcetl/cli_utils/__init__.py
|
materials-commons/pymcetl
|
b4311ba50bb35bc36527b9d313a91778f9550a92
|
[
"MIT"
] | null | null | null |
mcetl/cli_utils/__init__.py
|
materials-commons/pymcetl
|
b4311ba50bb35bc36527b9d313a91778f9550a92
|
[
"MIT"
] | null | null | null |
mcetl/cli_utils/__init__.py
|
materials-commons/pymcetl
|
b4311ba50bb35bc36527b9d313a91778f9550a92
|
[
"MIT"
] | null | null | null |
try:
from pathlib import Path
Path().expanduser()
except (ImportError, AttributeError):
from pathlib2 import Path
| 21
| 37
| 0.722222
| 14
| 126
| 6.5
| 0.714286
| 0.21978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009901
| 0.198413
| 126
| 5
| 38
| 25.2
| 0.891089
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b686af997b0efadf779a323fd3ef06eed8d7c64f
| 143
|
py
|
Python
|
azure/cognitive/Emotion.py
|
crwilcox/MicrosoftCognitive
|
62913e9c6d04d0b698d4b548c517cea07396eaf3
|
[
"Apache-2.0"
] | null | null | null |
azure/cognitive/Emotion.py
|
crwilcox/MicrosoftCognitive
|
62913e9c6d04d0b698d4b548c517cea07396eaf3
|
[
"Apache-2.0"
] | null | null | null |
azure/cognitive/Emotion.py
|
crwilcox/MicrosoftCognitive
|
62913e9c6d04d0b698d4b548c517cea07396eaf3
|
[
"Apache-2.0"
] | null | null | null |
raise NotImplementedError("Not Implemented. Jupyter Notebooks on these APIs are here: https://github.com/Microsoft/Cognitive-Emotion-Python ")
| 71.5
| 142
| 0.818182
| 18
| 143
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083916
| 143
| 1
| 143
| 143
| 0.89313
| 0
| 0
| 0
| 0
| 1
| 0.79021
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b6a3482ded0b1416ab665e9ddc374c24d4db10ed
| 197
|
py
|
Python
|
subject/contrib/plugins/artifacts_sample/__init__.py
|
laoyigrace/subject
|
e6ed989fdc250917a19788112b22322b73b3550f
|
[
"Apache-2.0"
] | null | null | null |
subject/contrib/plugins/artifacts_sample/__init__.py
|
laoyigrace/subject
|
e6ed989fdc250917a19788112b22322b73b3550f
|
[
"Apache-2.0"
] | null | null | null |
subject/contrib/plugins/artifacts_sample/__init__.py
|
laoyigrace/subject
|
e6ed989fdc250917a19788112b22322b73b3550f
|
[
"Apache-2.0"
] | null | null | null |
from subject.contrib.plugins.artifacts_sample.v1 import artifact as art1
from subject.contrib.plugins.artifacts_sample.v2 import artifact as art2
MY_ARTIFACT = [art1.MyArtifact, art2.MyArtifact]
| 32.833333
| 72
| 0.837563
| 28
| 197
| 5.785714
| 0.535714
| 0.135802
| 0.222222
| 0.308642
| 0.493827
| 0.493827
| 0
| 0
| 0
| 0
| 0
| 0.03352
| 0.091371
| 197
| 5
| 73
| 39.4
| 0.871508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fcc7975a3e747f5bd52d538e404d99777f6c893c
| 180
|
py
|
Python
|
tests/extract_method/test_9.py
|
Amin-MAG/CodART
|
a964a506d031f6eea505df081b9ba946f490d021
|
[
"MIT"
] | 1
|
2021-10-10T23:56:49.000Z
|
2021-10-10T23:56:49.000Z
|
tests/extract_method/test_9.py
|
Amin-MAG/CodART
|
a964a506d031f6eea505df081b9ba946f490d021
|
[
"MIT"
] | null | null | null |
tests/extract_method/test_9.py
|
Amin-MAG/CodART
|
a964a506d031f6eea505df081b9ba946f490d021
|
[
"MIT"
] | 1
|
2021-08-21T11:25:49.000Z
|
2021-08-21T11:25:49.000Z
|
"""
extracting lines containing method calls
test status: failed
"""
from refactorings.extract_method import extract_method
import os
import errno
def main():
pass
| 12.857143
| 54
| 0.733333
| 22
| 180
| 5.909091
| 0.772727
| 0.2
| 0.292308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205556
| 180
| 13
| 55
| 13.846154
| 0.909091
| 0.338889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0.2
| 0.6
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
1e125e069951aa5943282e8630e9541ec6266a68
| 9,024
|
py
|
Python
|
openstackinabox/tests/services/keystone/v2/user/test_update.py
|
BenjamenMeyer/openstackinabox
|
b5097695719b818dd06e3773899f80a15e7e71c1
|
[
"Apache-2.0"
] | 1
|
2017-11-19T20:31:48.000Z
|
2017-11-19T20:31:48.000Z
|
openstackinabox/tests/services/keystone/v2/user/test_update.py
|
TestInABox/openstackinabox
|
00dcac601d14e1cfc240840dd92895ee322caf96
|
[
"Apache-2.0"
] | 38
|
2016-05-05T18:03:21.000Z
|
2020-04-11T03:33:01.000Z
|
openstackinabox/tests/services/keystone/v2/user/test_update.py
|
BenjamenMeyer/openstackinabox
|
b5097695719b818dd06e3773899f80a15e7e71c1
|
[
"Apache-2.0"
] | 1
|
2015-05-28T14:53:46.000Z
|
2015-05-28T14:53:46.000Z
|
"""
Stack-In-A-Box: Basic Test
"""
import json
import unittest
import requests
import stackinabox.util.requests_mock.core
from stackinabox.stack import StackInABox
from openstackinabox.services.keystone import KeystoneV2Service
class TestKeystoneV2UserUpdate(unittest.TestCase):
def setUp(self):
super(TestKeystoneV2UserUpdate, self).setUp()
self.keystone = KeystoneV2Service()
self.headers = {
'x-auth-token': self.keystone.model.tokens.admin_token
}
self.tenant_id = self.keystone.model.tenants.add(
tenant_name='neo',
description='The One'
)
self.user_info = {
'user': {
'username': 'trinity',
'enabled': True,
'email': 'trinity@theone.matrix',
'OS-KSADM:password': 'Inl0veWithNeo'
}
}
self.user_info['user']['userid'] = self.keystone.model.users.add(
tenant_id=self.tenant_id,
username=self.user_info['user']['username'],
email=self.user_info['user']['email'],
password=self.user_info['user']['OS-KSADM:password'],
enabled=self.user_info['user']['enabled']
)
self.keystone.model.tokens.add(
tenant_id=self.tenant_id,
user_id=self.user_info['user']['userid']
)
StackInABox.register_service(self.keystone)
def tearDown(self):
super(TestKeystoneV2UserUpdate, self).tearDown()
StackInABox.reset_services()
def test_user_update_no_token(self):
with stackinabox.util.requests_mock.core.activate():
stackinabox.util.requests_mock.core.requests_mock_registration(
'localhost')
json_data = json.dumps(self.user_info)
res = requests.post('http://localhost/keystone/v2.0/users/{0}'
.format(self.user_info['user']['userid']),
data=json_data)
self.assertEqual(res.status_code, 403)
def test_user_update_invalid_token(self):
with stackinabox.util.requests_mock.core.activate():
stackinabox.util.requests_mock.core.requests_mock_registration(
'localhost')
json_data = json.dumps(self.user_info)
self.headers['x-auth-token'] = 'new_token'
res = requests.post('http://localhost/keystone/v2.0/users/{0}'
.format(self.user_info['user']['userid']),
headers=self.headers,
data=json_data)
self.assertEqual(res.status_code, 401)
def test_user_update_no_user(self):
with stackinabox.util.requests_mock.core.activate():
stackinabox.util.requests_mock.core.requests_mock_registration(
'localhost')
user_data = self.keystone.model.tokens.get_by_user_id(
self.user_info['user']['userid'])
self.headers['x-auth-token'] = user_data['token']
res = requests.post('http://localhost/keystone/v2.0/users/{0}'
.format(self.user_info['user']['userid']),
headers=self.headers,
data=json.dumps({'family': {}}))
self.assertEqual(res.status_code, 400)
def test_user_update_no_user_id(self):
with stackinabox.util.requests_mock.core.activate():
stackinabox.util.requests_mock.core.requests_mock_registration(
'localhost'
)
user_data = self.keystone.model.tokens.get_by_user_id(
self.user_info['user']['userid']
)
self.headers['x-auth-token'] = user_data['token']
res = requests.post(
'http://localhost/keystone/v2.0/users/{0}'.format(
self.user_info['user']['userid']
),
headers=self.headers,
data=json.dumps({'user': {}})
)
self.assertEqual(res.status_code, 400)
def test_user_update_invalid_user_id(self):
with stackinabox.util.requests_mock.core.activate():
stackinabox.util.requests_mock.core.requests_mock_registration(
'localhost')
user_data = self.keystone.model.tokens.get_by_user_id(
self.user_info['user']['userid'])
self.headers['x-auth-token'] = user_data['token']
self.user_info['user']['userid'] = '1234567890'
self.user_info['user']['email'] = 'trinity@lost.matrix'
self.user_info['user']['id'] = self.user_info['user']['userid']
self.user_info['user']['enabled'] = False
self.user_info['user']['OS-KSADM:password'] = 'neocortex'
res = requests.post('http://localhost/keystone/v2.0/users/{0}'
.format(self.user_info['user']['userid']),
headers=self.headers,
data=json.dumps(self.user_info))
self.assertEqual(res.status_code, 404)
def test_user_update(self):
with stackinabox.util.requests_mock.core.activate():
stackinabox.util.requests_mock.core.requests_mock_registration(
'localhost')
user_data = self.keystone.model.tokens.get_by_user_id(
self.user_info['user']['userid'])
self.headers['x-auth-token'] = user_data['token']
self.user_info['user']['email'] = 'trinity@lost.matrix'
self.user_info['user']['id'] = self.user_info['user']['userid']
self.user_info['user']['enabled'] = False
self.user_info['user']['OS-KSADM:password'] = 'neocortex'
res = requests.post('http://localhost/keystone/v2.0/users/{0}'
.format(self.user_info['user']['userid']),
headers=self.headers,
data=json.dumps(self.user_info))
self.assertEqual(res.status_code, 200)
def test_user_update_no_enabled(self):
with stackinabox.util.requests_mock.core.activate():
stackinabox.util.requests_mock.core.requests_mock_registration(
'localhost')
user_data = self.keystone.model.tokens.get_by_user_id(
self.user_info['user']['userid'])
self.headers['x-auth-token'] = user_data['token']
self.user_info['user']['email'] = 'trinity@lost.matrix'
self.user_info['user']['id'] = self.user_info['user']['userid']
del self.user_info['user']['enabled']
self.user_info['user']['OS-KSADM:password'] = 'neocortex'
res = requests.post('http://localhost/keystone/v2.0/users/{0}'
.format(self.user_info['user']['userid']),
headers=self.headers,
data=json.dumps(self.user_info))
self.assertEqual(res.status_code, 200)
def test_user_update_no_email(self):
with stackinabox.util.requests_mock.core.activate():
stackinabox.util.requests_mock.core.requests_mock_registration(
'localhost')
user_data = self.keystone.model.tokens.get_by_user_id(
self.user_info['user']['userid'])
self.headers['x-auth-token'] = user_data['token']
del self.user_info['user']['email']
self.user_info['user']['id'] = self.user_info['user']['userid']
self.user_info['user']['enabled'] = False
self.user_info['user']['OS-KSADM:password'] = 'neocortex'
res = requests.post('http://localhost/keystone/v2.0/users/{0}'
.format(self.user_info['user']['userid']),
headers=self.headers,
data=json.dumps(self.user_info))
self.assertEqual(res.status_code, 200)
def test_user_update_no_password(self):
with stackinabox.util.requests_mock.core.activate():
stackinabox.util.requests_mock.core.requests_mock_registration(
'localhost')
user_data = self.keystone.model.tokens.get_by_user_id(
self.user_info['user']['userid'])
self.headers['x-auth-token'] = user_data['token']
self.user_info['user']['email'] = 'trinity@lost.matrix'
self.user_info['user']['id'] = self.user_info['user']['userid']
self.user_info['user']['enabled'] = False
del self.user_info['user']['OS-KSADM:password']
res = requests.post('http://localhost/keystone/v2.0/users/{0}'
.format(self.user_info['user']['userid']),
headers=self.headers,
data=json.dumps(self.user_info))
self.assertEqual(res.status_code, 200)
| 47.746032
| 75
| 0.565935
| 976
| 9,024
| 5.039959
| 0.095287
| 0.091075
| 0.136613
| 0.159382
| 0.858711
| 0.810734
| 0.78085
| 0.76174
| 0.747103
| 0.747103
| 0
| 0.011031
| 0.296764
| 9,024
| 188
| 76
| 48
| 0.764103
| 0.002881
| 0
| 0.623529
| 0
| 0
| 0.149611
| 0.002336
| 0
| 0
| 0
| 0
| 0.052941
| 1
| 0.064706
| false
| 0.047059
| 0.035294
| 0
| 0.105882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1e7952c2f8bcfb01e23c83ef5580af74e47174fb
| 19
|
py
|
Python
|
clib/__init__.py
|
bracca95/Probabilistic-Face-Embeddings
|
3a4ae6f5e7b0287dbba55f14618cbfaf7ccb7b41
|
[
"MIT"
] | 312
|
2019-04-22T03:29:27.000Z
|
2022-03-30T07:29:04.000Z
|
clib/__init__.py
|
guanfangdong/Probabilistic-Face-Embeddings
|
23191e9b068dbf495a37daa071a1383f12f2799b
|
[
"MIT"
] | 15
|
2019-04-28T20:57:46.000Z
|
2021-10-13T07:26:55.000Z
|
clib/__init__.py
|
guanfangdong/Probabilistic-Face-Embeddings
|
23191e9b068dbf495a37daa071a1383f12f2799b
|
[
"MIT"
] | 57
|
2019-04-23T02:38:07.000Z
|
2022-03-21T13:05:06.000Z
|
from .mls import *
| 9.5
| 18
| 0.684211
| 3
| 19
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 19
| 1
| 19
| 19
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1ebbc67bb522a03771db9858ec68b68ddccdf463
| 257
|
py
|
Python
|
trees/tssb/__init__.py
|
islamazhar/trees
|
502565c5bf02503c7bece09cddd93f9368da02c3
|
[
"MIT"
] | null | null | null |
trees/tssb/__init__.py
|
islamazhar/trees
|
502565c5bf02503c7bece09cddd93f9368da02c3
|
[
"MIT"
] | null | null | null |
trees/tssb/__init__.py
|
islamazhar/trees
|
502565c5bf02503c7bece09cddd93f9368da02c3
|
[
"MIT"
] | null | null | null |
from trees.tssb.tssb import TSSB
from trees.tssb.itssb import InteractiveTSSB
from trees.tssb.gibbs import GibbsSampler
from trees.tssb.parameter import GaussianParameterProcess
from trees.tssb.df import DepthFunction, QuadraticDepth
import trees.tssb.util
| 36.714286
| 57
| 0.859922
| 35
| 257
| 6.314286
| 0.4
| 0.244344
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089494
| 257
| 6
| 58
| 42.833333
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9493617586973d6676a50a84d7aa4061b3d71aec
| 114
|
py
|
Python
|
atlas/providers/env.py
|
citruspi/Atlas
|
ae9d47e7410e7bb50b8891e6cbe1803620f46588
|
[
"Unlicense"
] | null | null | null |
atlas/providers/env.py
|
citruspi/Atlas
|
ae9d47e7410e7bb50b8891e6cbe1803620f46588
|
[
"Unlicense"
] | null | null | null |
atlas/providers/env.py
|
citruspi/Atlas
|
ae9d47e7410e7bb50b8891e6cbe1803620f46588
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf8 -*-
import os
def env(variable):
return os.environ.get(variable)
| 11.4
| 35
| 0.640351
| 16
| 114
| 4.5625
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010753
| 0.184211
| 114
| 9
| 36
| 12.666667
| 0.774194
| 0.359649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
44f4a198ac8866b45cb790c1bbd52360070ab988
| 165
|
py
|
Python
|
test1.py
|
ytyaru/Python.pylangstudy.Subjects.Data.Partial.201705221751
|
fd63dd3b7c0e1151365a1c66d32cc15d8c699916
|
[
"CC0-1.0"
] | null | null | null |
test1.py
|
ytyaru/Python.pylangstudy.Subjects.Data.Partial.201705221751
|
fd63dd3b7c0e1151365a1c66d32cc15d8c699916
|
[
"CC0-1.0"
] | null | null | null |
test1.py
|
ytyaru/Python.pylangstudy.Subjects.Data.Partial.201705221751
|
fd63dd3b7c0e1151365a1c66d32cc15d8c699916
|
[
"CC0-1.0"
] | null | null | null |
def Dict1():
return {'key1': 'value1'}
def Dict2():
return {'key2': 'value1'}
# print(Dict1().update(Dict2())) # None
d = Dict1()
d.update(Dict2())
print(d)
| 18.333333
| 39
| 0.587879
| 22
| 165
| 4.409091
| 0.5
| 0.226804
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072464
| 0.163636
| 165
| 8
| 40
| 20.625
| 0.630435
| 0.218182
| 0
| 0
| 0
| 0
| 0.15873
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.285714
| 0.571429
| 0.142857
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
781153a4c14bf8e15626011bb1b33fe4dddcd3d6
| 141
|
py
|
Python
|
SimPEG/utils/meshutils.py
|
prisae/simpeg
|
5cdd1b496bddcf3d9acd714b901a57bad6fb1ef9
|
[
"MIT"
] | 3
|
2021-08-04T02:27:41.000Z
|
2022-01-12T00:20:07.000Z
|
SimPEG/utils/meshutils.py
|
thast/simpeg
|
8021082b8b53f3c08fa87fc085547bdd56437c6b
|
[
"MIT"
] | 2
|
2020-06-16T00:11:37.000Z
|
2020-07-10T19:45:09.000Z
|
SimPEG/utils/meshutils.py
|
thast/simpeg
|
8021082b8b53f3c08fa87fc085547bdd56437c6b
|
[
"MIT"
] | 1
|
2021-12-29T00:06:07.000Z
|
2021-12-29T00:06:07.000Z
|
from .code_utils import deprecate_module
deprecate_module("meshutils", "mesh_utils", "0.16.0", future_warn=True)
from .mesh_utils import *
| 23.5
| 71
| 0.780142
| 21
| 141
| 4.952381
| 0.619048
| 0.211538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031496
| 0.099291
| 141
| 5
| 72
| 28.2
| 0.787402
| 0
| 0
| 0
| 0
| 0
| 0.177305
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7821f2a895ed1e8f9878313a8ef1a8553a2f9dd9
| 163
|
py
|
Python
|
app/auth/__init__.py
|
luxutao/staffms
|
6fe2a263fca4a817fbd18965327bd8ad5326dc6b
|
[
"Apache-2.0"
] | 1
|
2019-12-25T11:11:33.000Z
|
2019-12-25T11:11:33.000Z
|
app/auth/__init__.py
|
luxutao/staffms
|
6fe2a263fca4a817fbd18965327bd8ad5326dc6b
|
[
"Apache-2.0"
] | null | null | null |
app/auth/__init__.py
|
luxutao/staffms
|
6fe2a263fca4a817fbd18965327bd8ad5326dc6b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/local/bin/python3
# -*- conding: utf-8 -*-
from flask import Blueprint
auth_api = Blueprint('auth', __name__, url_prefix='/api/auth')
from . import views
| 20.375
| 62
| 0.699387
| 23
| 163
| 4.695652
| 0.73913
| 0.240741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014085
| 0.128834
| 163
| 8
| 63
| 20.375
| 0.746479
| 0.282209
| 0
| 0
| 0
| 0
| 0.112069
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
788b6c2d64fc46b3979208b77756c41de2149fb3
| 261
|
py
|
Python
|
Python/04/Py04_03.py
|
Pantoofle/Language-Practice
|
f5d4b3f5eb745f0e9abf50f2ddb08fd902225f07
|
[
"MIT"
] | null | null | null |
Python/04/Py04_03.py
|
Pantoofle/Language-Practice
|
f5d4b3f5eb745f0e9abf50f2ddb08fd902225f07
|
[
"MIT"
] | null | null | null |
Python/04/Py04_03.py
|
Pantoofle/Language-Practice
|
f5d4b3f5eb745f0e9abf50f2ddb08fd902225f07
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
x = 2.21
print(str(x), "Gigowatts !!!", str(x), "Gigowatts !! Nom de Zeus ! Marty !")
print("%s Gigowatts !!! %s Gigowatts !! Nom de Zeus ! Marty !" %(x, x))
print("{} Gigowatts !!! {} Gigowatts !! Nom de Zeus ! Marty !".format(x, x))
| 32.625
| 76
| 0.578544
| 39
| 261
| 3.871795
| 0.410256
| 0.238411
| 0.278146
| 0.357616
| 0.456954
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018692
| 0.180077
| 261
| 7
| 77
| 37.285714
| 0.686916
| 0.084291
| 0
| 0
| 0
| 0
| 0.651261
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.75
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
78c3a2c3801c686e3f64453a63ceafc93bc89b29
| 812
|
py
|
Python
|
src/gelanis/sql/_expressions/aggregate/aggregations.py
|
svaningelgem/gelanis
|
12360ead579816e5a2764dc9f995449bacf67ecc
|
[
"Apache-2.0"
] | 1
|
2021-07-30T11:23:43.000Z
|
2021-07-30T11:23:43.000Z
|
src/gelanis/sql/_expressions/aggregate/aggregations.py
|
svaningelgem/gelanis
|
12360ead579816e5a2764dc9f995449bacf67ecc
|
[
"Apache-2.0"
] | 3
|
2021-03-05T14:45:38.000Z
|
2021-03-10T16:19:38.000Z
|
src/gelanis/sql/_expressions/aggregate/aggregations.py
|
svaningelgem/gelanis
|
12360ead579816e5a2764dc9f995449bacf67ecc
|
[
"Apache-2.0"
] | 1
|
2021-03-17T19:43:05.000Z
|
2021-03-17T19:43:05.000Z
|
from .. import Expression
from ...types import ArrayType
class Aggregation(Expression):
@property
def is_an_aggregation(self):
return True
def merge(self, row, schema):
raise NotImplementedError
def mergeStats(self, other, schema):
raise NotImplementedError
def eval(self, row, schema):
raise NotImplementedError
def args(self):
raise NotImplementedError
def data_type(self, schema):
# TODO: Check if we can generalize this. By default, this should be fine, but needs to be overridden in each
# subclass where it deviates from this standard.
# pylint: disable=E1101
return ArrayType(
elementType=schema[str(self.column)].dataType,
containsNull=self.column.is_nullable
)
| 27.066667
| 116
| 0.660099
| 92
| 812
| 5.782609
| 0.619565
| 0.180451
| 0.203008
| 0.18609
| 0.150376
| 0.150376
| 0
| 0
| 0
| 0
| 0
| 0.006745
| 0.269704
| 812
| 29
| 117
| 28
| 0.890388
| 0.219212
| 0
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 0
| 1
| 0.315789
| false
| 0
| 0.105263
| 0.105263
| 0.578947
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
1531fd8a220b7ae58756e479981c53b088532678
| 110
|
py
|
Python
|
visualization/utils.py
|
hemiku/Visualization
|
b21bfc738278c1ce8f2df52e41230dcd58c8913e
|
[
"MIT"
] | null | null | null |
visualization/utils.py
|
hemiku/Visualization
|
b21bfc738278c1ce8f2df52e41230dcd58c8913e
|
[
"MIT"
] | 1
|
2022-02-20T12:37:19.000Z
|
2022-02-20T12:37:19.000Z
|
visualization/utils.py
|
hemiku/Visualization
|
b21bfc738278c1ce8f2df52e41230dcd58c8913e
|
[
"MIT"
] | null | null | null |
def letters( input):
return ''.join(filter(str.isalpha, input))
def strip_input_name( input ):
return
| 22
| 46
| 0.690909
| 15
| 110
| 4.933333
| 0.666667
| 0.297297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172727
| 110
| 5
| 47
| 22
| 0.813187
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
1543094846a6d68b30a0f9477428ed13a96860d0
| 117
|
py
|
Python
|
rpd/factories/__init__.py
|
ooliver1/RPD
|
e4900eed75ee636385749b883fe63e1cb48d81bd
|
[
"MIT"
] | null | null | null |
rpd/factories/__init__.py
|
ooliver1/RPD
|
e4900eed75ee636385749b883fe63e1cb48d81bd
|
[
"MIT"
] | null | null | null |
rpd/factories/__init__.py
|
ooliver1/RPD
|
e4900eed75ee636385749b883fe63e1cb48d81bd
|
[
"MIT"
] | null | null | null |
"""
rpd.factories
~~~~~~~~~~~~~
Factory Module for RPD.
"""
from .event_factory import *
from .rest_factory import *
| 14.625
| 28
| 0.649573
| 14
| 117
| 5.285714
| 0.642857
| 0.351351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136752
| 117
| 7
| 29
| 16.714286
| 0.732673
| 0.435897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
154c05f9fb11353a8fc87e83851f5359f0313358
| 112
|
py
|
Python
|
lang/py/cookbook/v2/source/cb2_11_15_exm_1.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
lang/py/cookbook/v2/source/cb2_11_15_exm_1.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
lang/py/cookbook/v2/source/cb2_11_15_exm_1.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
# jythonc -w C:\ImageJ\plugins\Jython -C C:\ImageJ\jikes
-J "-bootclasspath C:\ImageJ\jre\lib\rt.jar -nowarn"
| 37.333333
| 56
| 0.705357
| 19
| 112
| 4.157895
| 0.736842
| 0.265823
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 112
| 2
| 57
| 56
| 0.79
| 0.482143
| 0
| 0
| 0
| 0
| 0.839286
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
156f5e23167ce4f7b01f9c8ef4c3bd0d428f1aa3
| 164
|
py
|
Python
|
scripts/mlp/centroidal/muscod.py
|
stonneau/multicontact-locomotion-planning
|
a2c5dd35955a44c5a454d114c9dcaf0fec19424f
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/mlp/centroidal/muscod.py
|
stonneau/multicontact-locomotion-planning
|
a2c5dd35955a44c5a454d114c9dcaf0fec19424f
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/mlp/centroidal/muscod.py
|
stonneau/multicontact-locomotion-planning
|
a2c5dd35955a44c5a454d114c9dcaf0fec19424f
|
[
"BSD-2-Clause"
] | null | null | null |
import mlp.config as cfg
def generateCentroidalTrajectory(cs, cs_initGuess=None, fullBody=None, viewer=None, first_iter = True):
raise NotImplemented("TODO")
| 27.333333
| 103
| 0.780488
| 21
| 164
| 6
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 164
| 5
| 104
| 32.8
| 0.875
| 0
| 0
| 0
| 1
| 0
| 0.02439
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
159bab273e13b63b6ad11de09064927878b70d09
| 7,240
|
py
|
Python
|
plugin_kube/__init__.py
|
DNXLabs/plugin-kube
|
1c674dd6b5dd8f809bd1b0932c8e22773522556a
|
[
"Apache-2.0"
] | null | null | null |
plugin_kube/__init__.py
|
DNXLabs/plugin-kube
|
1c674dd6b5dd8f809bd1b0932c8e22773522556a
|
[
"Apache-2.0"
] | null | null | null |
plugin_kube/__init__.py
|
DNXLabs/plugin-kube
|
1c674dd6b5dd8f809bd1b0932c8e22773522556a
|
[
"Apache-2.0"
] | null | null | null |
import click
from one.one import cli
from one.docker.container import Container
from one.docker.image import Image
from one.utils.environment.aws import EnvironmentAws
from one.utils.config import get_config_value
container = Container()
image = Image()
environment = EnvironmentAws()
AWS_IMAGE = image.get_image('aws')
KUBE_TOOLS_IMAGE = 'dnxsolutions/docker-kube-tools:0.3.2'
def __init__():
cli.add_command(kubectl)
cli.add_command(helm)
cli.add_command(kube_shell)
cli.add_command(kube_proxy)
def get_kube_config(aws_default_region, cluster_name, envs):
kubeconfig = get_config_value('plugins.kube.parameters.kubeconfig', '') or '/work/.kube-config'
command = 'eks --region %s update-kubeconfig --name %s --kubeconfig %s' % (aws_default_region, cluster_name, kubeconfig)
container.create(
image=AWS_IMAGE,
command=command,
volumes=['.:/work'],
environment=envs
)
@click.command(name='kubectl', help='Kubectl wrap command entry.')
@click.argument('args', nargs=-1)
@click.option('-n', '--cluster-name', 'cluster_name', default=None, type=str, help='AWS EKS cluster name.')
@click.option('-w', '--workspace', default=None, type=str, help='Workspace to use.')
@click.option('-r', '--aws-role', 'aws_role', default=None, type=str, help='AWS role to use.')
@click.option('-a', '--aws-assume-role', 'aws_assume_role', default=None, type=str, help='AWS assume role.')
@click.option('-R', '--aws-default-region', 'aws_default_region', default=None, type=str, help='AWS default region to use.')
def kubectl(args, cluster_name, workspace, aws_role, aws_assume_role, aws_default_region):
cluster_name = cluster_name or get_config_value('plugins.kube.parameters.cluster_name')
aws_default_region = aws_default_region or get_config_value('plugins.kube.parameters.aws_default_region')
aws_assume_role = aws_assume_role or get_config_value('plugins.kube.parameters.aws_assume_role', 'false')
envs = environment.build(workspace=workspace, aws_role=aws_role, aws_assume_role=aws_assume_role).get_env()
envs['KUBECONFIG'] = get_config_value('plugins.kube.parameters.kubeconfig', '') or '/work/.kube-config'
get_kube_config(aws_default_region, cluster_name, envs)
entrypoint = 'kubectl'
command = ''
for arg in args:
command += '%s ' % (arg)
container.create(
image=KUBE_TOOLS_IMAGE,
command=command,
entrypoint=entrypoint,
volumes=['.:/work'],
environment=envs
)
@click.command(name='helm', help='Helm wrap command entry.')
@click.argument('args', nargs=-1)
@click.option('-n', '--cluster-name', 'cluster_name', default=None, type=str, help='AWS EKS cluster name.')
@click.option('-w', '--workspace', default=None, type=str, help='Workspace to use.')
@click.option('-r', '--aws-role', 'aws_role', default=None, type=str, help='AWS role to use.')
@click.option('-a', '--aws-assume-role', 'aws_assume_role', default=None, type=str, help='AWS assume role.')
@click.option('-R', '--aws-default-region', 'aws_default_region', default=None, type=str, help='AWS default region to use.')
def helm(args, cluster_name, workspace, aws_role, aws_assume_role, aws_default_region):
cluster_name = cluster_name or get_config_value('plugins.kube.parameters.cluster_name')
aws_default_region = aws_default_region or get_config_value('plugins.kube.parameters.aws_default_region')
aws_assume_role = aws_assume_role or get_config_value('plugins.kube.parameters.aws_assume_role', 'false')
envs = environment.build(workspace=workspace, aws_role=aws_role, aws_assume_role=aws_assume_role).get_env()
envs['KUBECONFIG'] = get_config_value('plugins.kube.parameters.kubeconfig', '') or '/work/.kube-config'
get_kube_config(aws_default_region, cluster_name, envs)
entrypoint = 'helm'
command = ''
for arg in args:
command += '%s ' % (arg)
container.create(
image=KUBE_TOOLS_IMAGE,
command=command,
entrypoint=entrypoint,
volumes=['.:/work'],
environment=envs
)
@click.command(name='kube-shell', help='Shell entry to EKS environment.')
@click.option('-n', '--cluster-name', 'cluster_name', default=None, type=str, help='AWS EKS cluster name.')
@click.option('-w', '--workspace', default=None, type=str, help='Workspace to use.')
@click.option('-r', '--aws-role', 'aws_role', default=None, type=str, help='AWS role to use.')
@click.option('-a', '--aws-assume-role', 'aws_assume_role', default=None, type=str, help='AWS assume role.')
@click.option('-R', '--aws-default-region', 'aws_default_region', default=None, type=str, help='AWS default region to use.')
def kube_shell(cluster_name, workspace, aws_role, aws_assume_role, aws_default_region):
cluster_name = cluster_name or get_config_value('plugins.kube.parameters.cluster_name')
aws_default_region = aws_default_region or get_config_value('plugins.kube.parameters.aws_default_region')
aws_assume_role = aws_assume_role or get_config_value('plugins.kube.parameters.aws_assume_role', 'false')
envs = environment.build(workspace=workspace, aws_role=aws_role, aws_assume_role=aws_assume_role).get_env()
envs['KUBECONFIG'] = get_config_value('plugins.kube.parameters.kubeconfig', '') or '/work/.kube-config'
get_kube_config(aws_default_region, cluster_name, envs)
entrypoint = '/bin/bash'
container.create(
image=KUBE_TOOLS_IMAGE,
entrypoint=entrypoint,
ports=['8001:8001'],
volumes=['.:/work'],
environment=envs
)
@click.command(name='kube-proxy', help='Proxy entry to EKS environment.')
@click.option('-n', '--cluster-name', 'cluster_name', default=None, type=str, help='AWS EKS cluster name.')
@click.option('-w', '--workspace', default=None, type=str, help='Workspace to use.')
@click.option('-r', '--aws-role', 'aws_role', default=None, type=str, help='AWS role to use.')
@click.option('-a', '--aws-assume-role', 'aws_assume_role', default=None, type=str, help='AWS assume role.')
@click.option('-R', '--aws-default-region', 'aws_default_region', default=None, type=str, help='AWS default region to use.')
@click.option('-p', '--port', 'port', default='8001:8001', type=str, help='Proxy port to expose.')
def kube_proxy(cluster_name, workspace, aws_role, aws_assume_role, aws_default_region, port):
cluster_name = cluster_name or get_config_value('plugins.kube.parameters.cluster_name')
aws_default_region = aws_default_region or get_config_value('plugins.kube.parameters.aws_default_region')
aws_assume_role = aws_assume_role or get_config_value('plugins.kube.parameters.aws_assume_role', 'false')
envs = environment.build(workspace=workspace, aws_role=aws_role, aws_assume_role=aws_assume_role).get_env()
envs['KUBECONFIG'] = get_config_value('plugins.kube.parameters.kubeconfig', '') or '/work/.kube-config'
get_kube_config(aws_default_region, cluster_name, envs)
entrypoint = 'kubectl'
command = 'proxy --address 0.0.0.0 --port %s' % port.split(":")[1]
container.create(
image=KUBE_TOOLS_IMAGE,
entrypoint=entrypoint,
command=command,
ports=[port],
volumes=['.:/work'],
environment=envs
)
| 50.277778
| 124
| 0.715331
| 1,003
| 7,240
| 4.939182
| 0.079761
| 0.065402
| 0.094469
| 0.072669
| 0.8478
| 0.84235
| 0.84235
| 0.833872
| 0.801978
| 0.793904
| 0
| 0.004126
| 0.129558
| 7,240
| 143
| 125
| 50.629371
| 0.781974
| 0
| 0
| 0.675
| 0
| 0
| 0.292127
| 0.093094
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
159c652139885ce492019dbb08e43f59fbd4718f
| 19
|
py
|
Python
|
bad/listOutOfBounds.py
|
Alberto42/Interpreter
|
a56c4d905672572734a8470ef607b66727489f15
|
[
"BSD-3-Clause"
] | null | null | null |
bad/listOutOfBounds.py
|
Alberto42/Interpreter
|
a56c4d905672572734a8470ef607b66727489f15
|
[
"BSD-3-Clause"
] | null | null | null |
bad/listOutOfBounds.py
|
Alberto42/Interpreter
|
a56c4d905672572734a8470ef607b66727489f15
|
[
"BSD-3-Clause"
] | null | null | null |
l = [0]
print(l[1])
| 9.5
| 11
| 0.473684
| 5
| 19
| 1.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0.157895
| 19
| 2
| 11
| 9.5
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
ec7bf5524ae0b7edeed7e6a2ecd35d8c77cb0f33
| 218
|
py
|
Python
|
src/myexceptions.py
|
delete/spymanager
|
c5886141000ad8b6b8e7c8ae6bc2ad631da33c72
|
[
"MIT"
] | 7
|
2017-02-01T00:34:30.000Z
|
2022-01-28T22:05:14.000Z
|
src/myexceptions.py
|
delete/spymanager
|
c5886141000ad8b6b8e7c8ae6bc2ad631da33c72
|
[
"MIT"
] | 8
|
2017-01-08T21:06:43.000Z
|
2020-10-18T13:20:13.000Z
|
src/myexceptions.py
|
delete/spylist
|
c5886141000ad8b6b8e7c8ae6bc2ad631da33c72
|
[
"MIT"
] | 1
|
2018-10-24T00:37:08.000Z
|
2018-10-24T00:37:08.000Z
|
class GroupNotFoundException(Exception):
pass
class UserNotFoundException(Exception):
pass
class AlreadyExistsOnDatabaseException(Exception):
pass
class ChatIdOrTextCannotBeEmpty(Exception):
pass
| 14.533333
| 50
| 0.788991
| 16
| 218
| 10.75
| 0.4375
| 0.302326
| 0.313953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155963
| 218
| 14
| 51
| 15.571429
| 0.934783
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
ec9ea6c4e8a18d90e2b074f39f761faafa07e859
| 176
|
py
|
Python
|
markdown_external_link_finder/__main__.py
|
MatMoore/markdown-external-link-finder
|
bb04d2573e150d8efe61063deafa3119c5f2ef3f
|
[
"MIT"
] | null | null | null |
markdown_external_link_finder/__main__.py
|
MatMoore/markdown-external-link-finder
|
bb04d2573e150d8efe61063deafa3119c5f2ef3f
|
[
"MIT"
] | 4
|
2019-06-04T22:36:17.000Z
|
2021-06-25T15:34:31.000Z
|
markdown_external_link_finder/__main__.py
|
MatMoore/markdown-external-link-finder
|
bb04d2573e150d8efe61063deafa3119c5f2ef3f
|
[
"MIT"
] | null | null | null |
import glob
from .extract import extract_markdown_links
markdown_files = glob.glob('**/*.md', recursive=True)
for url in extract_markdown_links(markdown_files):
print(url)
| 29.333333
| 53
| 0.784091
| 25
| 176
| 5.28
| 0.56
| 0.227273
| 0.30303
| 0.424242
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107955
| 176
| 6
| 54
| 29.333333
| 0.840764
| 0
| 0
| 0
| 0
| 0
| 0.039548
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0.2
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
01a7f4e684575fc5f27ecaaf024bb9c8377dbebd
| 69
|
py
|
Python
|
ui/__init__.py
|
neuromancer/lisa
|
817efdf2ffd69de983f2b4f12d4db2885bd1b308
|
[
"Apache-2.0"
] | 2
|
2017-11-05T22:14:19.000Z
|
2019-05-07T15:33:06.000Z
|
ui/__init__.py
|
kevinmel2000/lisa
|
817efdf2ffd69de983f2b4f12d4db2885bd1b308
|
[
"Apache-2.0"
] | 1
|
2018-02-26T19:24:54.000Z
|
2018-02-26T19:47:08.000Z
|
ui/__init__.py
|
kevinmel2000/lisa
|
817efdf2ffd69de983f2b4f12d4db2885bd1b308
|
[
"Apache-2.0"
] | 4
|
2017-11-05T22:14:23.000Z
|
2021-05-21T16:59:24.000Z
|
from ui.notification import notify
def say(x):
return notify(x)
| 13.8
| 34
| 0.724638
| 11
| 69
| 4.545455
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188406
| 69
| 4
| 35
| 17.25
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
01b4680ca55594868c63cb44eaf96b1ae3894315
| 3,085
|
py
|
Python
|
kitchen/tests/test_init.py
|
honzas83/kitchen
|
749953fe28895ed51cd7283800a9bb591c269da2
|
[
"BSD-3-Clause"
] | null | null | null |
kitchen/tests/test_init.py
|
honzas83/kitchen
|
749953fe28895ed51cd7283800a9bb591c269da2
|
[
"BSD-3-Clause"
] | null | null | null |
kitchen/tests/test_init.py
|
honzas83/kitchen
|
749953fe28895ed51cd7283800a9bb591c269da2
|
[
"BSD-3-Clause"
] | null | null | null |
import kitchen
import lasagne
import numpy as np
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.utils.testing import assert_equal
from nose.tools import raises
class NetInitNormal(kitchen.Network, kitchen.ADADelta, kitchen.BinaryCrossentropy):
def create_layers(self, X_dim, y_dim, random_state):
initW = kitchen.init.Normal(random_state=random_state)
initb = kitchen.init.Normal(random_state=random_state)
l0 = lasagne.layers.InputLayer(shape=(None, X_dim))
l02 = kitchen.layers.DropoutLayer(l0, p=0.5, random_state=random_state)
l1 = lasagne.layers.DenseLayer(l02, num_units=128, nonlinearity=lasagne.nonlinearities.LeakyRectify(), W=initW, b=initb)
l12 = kitchen.layers.DropoutLayer(l1, p=0.5, random_state=random_state)
l3 = lasagne.layers.DenseLayer(l12, num_units=y_dim, nonlinearity=lasagne.nonlinearities.sigmoid, W=initW, b=initb)
return l0, l3
def test_fit_normal():
X = np.array([[1, 2, 3], [4, 5, 6]])
y = np.array([[0], [1]])
net = NetInitNormal(random_state=42)
net.fit(X, y)
net.loss(X, y)
y_pred = net.predict(X)
class NetGlorotFail(kitchen.Network, kitchen.ADADelta, kitchen.BinaryCrossentropy):
def create_layers(self, X_dim, y_dim, random_state):
initW = kitchen.init.GlorotNormal(random_state=random_state)
initb = kitchen.init.GlorotNormal(random_state=random_state)
l0 = lasagne.layers.InputLayer(shape=(None, X_dim))
l02 = kitchen.layers.DropoutLayer(l0, p=0.5, random_state=random_state)
l1 = lasagne.layers.DenseLayer(l02, num_units=128, nonlinearity=lasagne.nonlinearities.LeakyRectify(), W=initW, b=initb)
l12 = kitchen.layers.DropoutLayer(l1, p=0.5, random_state=random_state)
l3 = lasagne.layers.DenseLayer(l12, num_units=y_dim, nonlinearity=lasagne.nonlinearities.sigmoid, W=initW, b=initb)
return l0, l3
@raises(RuntimeError)
def test_fit_glorot_fail():
X = np.array([[1, 2, 3], [4, 5, 6]])
y = np.array([[0], [1]])
net = NetGlorotFail(random_state=42)
net.fit(X, y)
class NetGlorotFail2(kitchen.Network, kitchen.ADADelta, kitchen.BinaryCrossentropy):
def create_layers(self, X_dim, y_dim, random_state):
initW = kitchen.init.GlorotNormal(random_state=random_state, c01b=True)
initb = kitchen.init.Uniform(random_state=random_state)
l0 = lasagne.layers.InputLayer(shape=(None, X_dim))
l02 = kitchen.layers.DropoutLayer(l0, p=0.5, random_state=random_state)
l1 = lasagne.layers.DenseLayer(l02, num_units=128, nonlinearity=lasagne.nonlinearities.LeakyRectify(), W=initW, b=initb)
l12 = kitchen.layers.DropoutLayer(l1, p=0.5, random_state=random_state)
l3 = lasagne.layers.DenseLayer(l12, num_units=y_dim, nonlinearity=lasagne.nonlinearities.sigmoid, W=initW, b=initb)
return l0, l3
@raises(RuntimeError)
def test_fit_glorot_fail2():
X = np.array([[1, 2, 3], [4, 5, 6]])
y = np.array([[0], [1]])
net = NetGlorotFail2(random_state=42)
net.fit(X, y)
| 39.551282
| 128
| 0.706969
| 432
| 3,085
| 4.905093
| 0.194444
| 0.155734
| 0.096272
| 0.124587
| 0.84285
| 0.84285
| 0.84285
| 0.77631
| 0.77631
| 0.77631
| 0
| 0.039178
| 0.164344
| 3,085
| 77
| 129
| 40.064935
| 0.782777
| 0
| 0
| 0.581818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018182
| 1
| 0.109091
| false
| 0
| 0.109091
| 0
| 0.327273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
01db0918d6d3710e18091923be0871474e8b909d
| 51
|
py
|
Python
|
_init_.py
|
devsahu99/hybrid_recommender
|
0e1b32de8bda5ababa83a366fa740528c4c63c5d
|
[
"MIT"
] | 1
|
2021-02-02T09:15:19.000Z
|
2021-02-02T09:15:19.000Z
|
_init_.py
|
devsahu99/hybrid_recommender
|
0e1b32de8bda5ababa83a366fa740528c4c63c5d
|
[
"MIT"
] | null | null | null |
_init_.py
|
devsahu99/hybrid_recommender
|
0e1b32de8bda5ababa83a366fa740528c4c63c5d
|
[
"MIT"
] | 1
|
2019-05-16T12:46:08.000Z
|
2019-05-16T12:46:08.000Z
|
from .hybrid_recommender import hybrid_recommender
| 25.5
| 50
| 0.901961
| 6
| 51
| 7.333333
| 0.666667
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 51
| 1
| 51
| 51
| 0.93617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bf147825412b56b22c16ab18fb1054a03a991aa6
| 19
|
py
|
Python
|
part_c/c_constants.py
|
cconnerolson/aerospace_assignment_6
|
adeb4aaee29198cac06321f2a7a23efd20cf56e6
|
[
"MIT"
] | 1
|
2020-11-28T05:16:22.000Z
|
2020-11-28T05:16:22.000Z
|
part_c/c_constants.py
|
cconnerolson/aerospace_assignment_6
|
adeb4aaee29198cac06321f2a7a23efd20cf56e6
|
[
"MIT"
] | null | null | null |
part_c/c_constants.py
|
cconnerolson/aerospace_assignment_6
|
adeb4aaee29198cac06321f2a7a23efd20cf56e6
|
[
"MIT"
] | 1
|
2020-11-28T05:16:29.000Z
|
2020-11-28T05:16:29.000Z
|
A_e = 4.478 # m**2
| 19
| 19
| 0.473684
| 6
| 19
| 1.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.357143
| 0.263158
| 19
| 1
| 19
| 19
| 0.214286
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1723ef876c0d25797b84c463a68b426d30239207
| 326
|
py
|
Python
|
model_field_meta/mixins.py
|
melvinkcx/django-model-field-meta
|
27ba8928bd90eeb3340cccafcc4c99f32a2e1c5e
|
[
"MIT"
] | 8
|
2019-11-07T08:23:22.000Z
|
2022-02-20T12:59:03.000Z
|
model_field_meta/mixins.py
|
melvinkcx/django-model-field-meta
|
27ba8928bd90eeb3340cccafcc4c99f32a2e1c5e
|
[
"MIT"
] | 7
|
2019-11-19T01:14:03.000Z
|
2021-06-09T18:41:49.000Z
|
model_field_meta/mixins.py
|
melvinkcx/django-model-field-meta
|
27ba8928bd90eeb3340cccafcc4c99f32a2e1c5e
|
[
"MIT"
] | 1
|
2020-12-07T02:59:43.000Z
|
2020-12-07T02:59:43.000Z
|
class FieldMetaMixin:
@classmethod
def get_field_meta(cls, field_name):
return cls._meta.get_field(field_name).get_meta()
@classmethod
def has_field_meta(cls, field_name):
return hasattr(cls._meta.get_field(field_name), "_meta") \
and cls.get_field_meta(field_name) is not None
| 32.6
| 66
| 0.693252
| 46
| 326
| 4.543478
| 0.347826
| 0.215311
| 0.114833
| 0.162679
| 0.488038
| 0.488038
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211656
| 326
| 9
| 67
| 36.222222
| 0.81323
| 0
| 0
| 0.25
| 0
| 0
| 0.015337
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 0.625
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
1736c11c89c70a1c7e787faaddd0bcfff5d92330
| 158
|
py
|
Python
|
sbroccoli/tasks/__init__.py
|
modamod/special-broccoli
|
7b82e15cf9ebef73e7bbb4e251780acddae75428
|
[
"MIT"
] | null | null | null |
sbroccoli/tasks/__init__.py
|
modamod/special-broccoli
|
7b82e15cf9ebef73e7bbb4e251780acddae75428
|
[
"MIT"
] | null | null | null |
sbroccoli/tasks/__init__.py
|
modamod/special-broccoli
|
7b82e15cf9ebef73e7bbb4e251780acddae75428
|
[
"MIT"
] | null | null | null |
from invoke import Collection
from . import utils, aws
ns = Collection()
ns.add_collection(ns.from_module(utils))
ns.add_collection(ns.from_module(aws))
| 26.333333
| 41
| 0.772152
| 24
| 158
| 4.916667
| 0.375
| 0.305085
| 0.254237
| 0.288136
| 0.457627
| 0.457627
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120253
| 158
| 6
| 42
| 26.333333
| 0.848921
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.