hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bbe5f77438b6a7c9f1988585984769150b227f0a
| 28
|
py
|
Python
|
devel/lib/python2.7/dist-packages/hdl_graph_slam/msg/__init__.py
|
shiyangyang24/hdl_graph_slam_plus
|
0e46016bd6da8234b6371cee4b3d9f15bb6a333c
|
[
"Apache-2.0"
] | 2
|
2019-06-08T16:20:15.000Z
|
2020-07-04T09:18:07.000Z
|
devel/lib/python2.7/dist-packages/hdl_graph_slam/msg/__init__.py
|
shiyangyang24/hdl_graph_slam_plus
|
0e46016bd6da8234b6371cee4b3d9f15bb6a333c
|
[
"Apache-2.0"
] | null | null | null |
devel/lib/python2.7/dist-packages/hdl_graph_slam/msg/__init__.py
|
shiyangyang24/hdl_graph_slam_plus
|
0e46016bd6da8234b6371cee4b3d9f15bb6a333c
|
[
"Apache-2.0"
] | 1
|
2020-02-04T09:22:10.000Z
|
2020-02-04T09:22:10.000Z
|
from ._FloorCoeffs import *
| 14
| 27
| 0.785714
| 3
| 28
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a5202760df1f69a450a688fe2a819c65b9c914ab
| 28
|
py
|
Python
|
__init__.py
|
OdooCommunityWidgets/product_image_list_view
|
e969fb0b05ef4bee0e5bce500a34b02f7864c123
|
[
"MIT"
] | 2
|
2015-03-25T18:24:51.000Z
|
2017-01-02T15:00:24.000Z
|
__init__.py
|
OdooCommunityWidgets/product_image_list_view
|
e969fb0b05ef4bee0e5bce500a34b02f7864c123
|
[
"MIT"
] | 3
|
2015-04-02T06:27:54.000Z
|
2015-06-29T07:37:41.000Z
|
__init__.py
|
OdooCommunityWidgets/product_image_list_view
|
e969fb0b05ef4bee0e5bce500a34b02f7864c123
|
[
"MIT"
] | 7
|
2015-05-31T19:17:10.000Z
|
2018-10-29T12:59:41.000Z
|
import product
import stock
| 9.333333
| 14
| 0.857143
| 4
| 28
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 2
| 15
| 14
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a541b65b144786c3da4e8ac1cf036da38541873b
| 5,217
|
py
|
Python
|
set_transformer/set_transformer/models.py
|
michaelsdr/sinkformers
|
80c9f68003eadc30d62ce581cebb1afaeb4c4bc3
|
[
"MIT"
] | 18
|
2021-12-25T21:59:11.000Z
|
2022-03-28T17:23:26.000Z
|
set_transformer/set_transformer/models.py
|
michaelsdr/sinkformers
|
80c9f68003eadc30d62ce581cebb1afaeb4c4bc3
|
[
"MIT"
] | 1
|
2022-02-08T02:59:34.000Z
|
2022-02-08T02:59:34.000Z
|
set_transformer/set_transformer/models.py
|
michaelsdr/sinkformers
|
80c9f68003eadc30d62ce581cebb1afaeb4c4bc3
|
[
"MIT"
] | 1
|
2021-12-27T21:58:27.000Z
|
2021-12-27T21:58:27.000Z
|
from set_transformer.modules import *
import torch.nn as nn
class DeepSet(nn.Module):
def __init__(self, dim_input, num_outputs, dim_output, dim_hidden=128):
super(DeepSet, self).__init__()
self.num_outputs = num_outputs
self.dim_output = dim_output
self.enc = nn.Sequential(
nn.Linear(dim_input, dim_hidden),
nn.ReLU(),
nn.Linear(dim_hidden, dim_hidden),
nn.ReLU(),
nn.Linear(dim_hidden, dim_hidden),
nn.ReLU(),
nn.Linear(dim_hidden, dim_hidden))
self.dec = nn.Sequential(
nn.Linear(dim_hidden, dim_hidden),
nn.ReLU(),
nn.Linear(dim_hidden, dim_hidden),
nn.ReLU(),
nn.Linear(dim_hidden, dim_hidden),
nn.ReLU(),
nn.Linear(dim_hidden, num_outputs*dim_output))
def forward(self, X):
X = self.enc(X).mean(-2)
X = self.dec(X).reshape(-1, self.num_outputs, self.dim_output)
return X
class SetTransformer(nn.Module):
def __init__(self, dim_input, num_outputs, dim_output,
num_inds=32, dim_hidden=128, num_heads=4, ln=False):
super(SetTransformer, self).__init__()
self.enc = nn.Sequential(
SAB(dim_input, dim_hidden, num_heads, ln=ln),
SAB(dim_hidden, dim_hidden, num_heads, ln=ln))
self.dec = nn.Sequential(
PMA(dim_hidden, num_heads, num_outputs, ln=ln),
SAB(dim_hidden, dim_hidden, num_heads, ln=ln),
SAB(dim_hidden, dim_hidden, num_heads, ln=ln),
nn.Linear(dim_hidden, dim_output))
def forward(self, X):
return self.dec(self.enc(X))
class SetTransformerLegacy(nn.Module):
def __init__(self, dim_input, num_outputs, dim_output,
num_inds=32, dim_hidden=128, num_heads=4, ln=False):
super(SetTransformerLegacy, self).__init__()
self.enc = nn.Sequential(
ISAB(dim_input, dim_hidden, num_heads, num_inds, ln=ln),
ISAB(dim_hidden, dim_hidden, num_heads, num_inds, ln=ln))
self.dec = nn.Sequential(
PMA(dim_hidden, num_heads, num_outputs, ln=ln),
SAB(dim_hidden, dim_hidden, num_heads, ln=ln),
SAB(dim_hidden, dim_hidden, num_heads, ln=ln),
nn.Linear(dim_hidden, dim_output))
def forward(self, X):
return self.dec(self.enc(X))
class ModelNet(nn.Module):
def __init__(
self,
dim_input=3,
num_outputs=1,
dim_output=40,
num_inds=32,
dim_hidden=128,
num_heads=4,
ln=False,
save_attn_0 = 'attn_0.npy',
save_attn_1 = 'attn_1.npy',
):
super(ModelNet, self).__init__()
self.enc = nn.Sequential(
ISAB(dim_input, dim_hidden, num_heads, num_inds, ln=ln, save_attn_0=None, save_attn_1=None),
ISAB(dim_hidden, dim_hidden, num_heads,num_inds, ln=ln, save_attn_0=save_attn_0, save_attn_1=save_attn_1),
)
self.dec = nn.Sequential(
nn.Dropout(),
PMA(dim_hidden, num_heads, num_outputs, ln=ln),
nn.Dropout(),
nn.Linear(dim_hidden, dim_output),
)
def forward(self, X):
Y = self.enc(X)
return self.dec(Y).squeeze()
class ModelNetSink(nn.Module):
def __init__(
self,
dim_input=3,
num_outputs=1,
dim_output=40,
num_inds=32,
dim_hidden=128,
num_heads=4,
ln=True,
n_it=1
):
super(ModelNetSink, self).__init__()
sinkhornkeops = SinkhornDistance(eps=eps, max_iter=n_it, cost=dotmat)
self.enc = nn.Sequential(
ISABSINK(dim_input, dim_hidden, num_heads, num_inds, ln=ln, sinkhorn=sinkhornkeops),
ISABSINK(dim_hidden, dim_hidden, num_heads, num_inds, ln=ln, sinkhorn=sinkhornkeops),
)
self.dec = nn.Sequential(
nn.Dropout(),
PMASINK(dim_hidden, num_heads, num_outputs, ln=ln, sinkhorn=sinkhornkeops),
nn.Dropout(),
nn.Linear(dim_hidden, dim_output),
)
def forward(self, X):
Y = self.enc(X)
return self.dec(Y).squeeze()
class ModelNetSabSink(nn.Module):
def __init__(
self,
dim_input=3,
num_outputs=1,
dim_output=40,
num_inds=32,
dim_hidden=128,
num_heads=4,
ln=False,
n_it=1,
):
super(ModelNetSabSink, self).__init__()
sinkhornkeops = SinkhornDistance(eps=eps, max_iter=n_it, cost=distmat2)
self.enc = nn.Sequential(
SABSINK(dim_input, dim_hidden, num_heads, ln=ln, sinkhorn=sinkhornkeops),
SABSINK(dim_hidden, dim_hidden, num_heads, ln=ln, sinkhorn=sinkhornkeops),
)
self.dec = nn.Sequential(
nn.Dropout(),
PMASINK(dim_hidden, num_heads, num_outputs, ln=ln, sinkhorn=sinkhornkeops),
nn.Dropout(),
nn.Linear(dim_hidden, dim_output),
)
def forward(self, X):
return self.dec(self.enc(X)).squeeze()
| 34.78
| 118
| 0.581177
| 675
| 5,217
| 4.191111
| 0.108148
| 0.168611
| 0.084836
| 0.114175
| 0.822552
| 0.785437
| 0.757511
| 0.757511
| 0.727819
| 0.715801
| 0
| 0.016529
| 0.304198
| 5,217
| 149
| 119
| 35.013423
| 0.76281
| 0
| 0
| 0.651852
| 0
| 0
| 0.003834
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.014815
| 0.022222
| 0.192593
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a555e876b98f1ad9a405403048825034e6cbe9d3
| 113
|
py
|
Python
|
stests/generators/wg_211/__main__.py
|
goral09/stests
|
4de26485535cadf1b708188a7133a976536ccba3
|
[
"Apache-2.0"
] | 4
|
2020-03-10T15:28:17.000Z
|
2021-10-02T11:41:17.000Z
|
stests/generators/wg_211/__main__.py
|
goral09/stests
|
4de26485535cadf1b708188a7133a976536ccba3
|
[
"Apache-2.0"
] | 1
|
2020-03-25T11:31:44.000Z
|
2020-03-25T11:31:44.000Z
|
stests/generators/wg_211/__main__.py
|
goral09/stests
|
4de26485535cadf1b708188a7133a976536ccba3
|
[
"Apache-2.0"
] | 9
|
2020-02-25T18:43:42.000Z
|
2021-08-10T17:08:42.000Z
|
from stests.generators import launcher
from stests.generators.wg_211 import meta
launcher.start_generator(meta)
| 22.6
| 41
| 0.858407
| 16
| 113
| 5.9375
| 0.625
| 0.210526
| 0.421053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029126
| 0.088496
| 113
| 4
| 42
| 28.25
| 0.893204
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a55d34f42f9a435d68846ac9b6309aa2ff02592f
| 3,814
|
py
|
Python
|
ubfquiz/cron.py
|
himasnhu1/example
|
27db7941c5f7bd16ffb407654818012e43d82f7e
|
[
"MIT"
] | null | null | null |
ubfquiz/cron.py
|
himasnhu1/example
|
27db7941c5f7bd16ffb407654818012e43d82f7e
|
[
"MIT"
] | 7
|
2021-04-08T21:17:18.000Z
|
2022-01-13T03:39:23.000Z
|
quiz/cron.py
|
saksham1991999/upscbasicfunda
|
b17e288081cb4ca9dd79d198cd0b22136c0794bb
|
[
"MIT"
] | null | null | null |
from .models import *
import datetime
def auto_sumbit_task():
quiztakers = QuizTaker.objects.filter(completed=False)
for i in quiztakers:
if i.quiz.live==False:
if datetime.datetime.now()>i.starttime+i.quiz.duration):
quiztaker = QuizTaker.objects.get(id=i.id)
quiztaker.complete=True
quiztaker.date_finished=datetime.now()
correct_answers = 0
for users_answer in models.UsersAnswer.objects.filter(quiz_taker=quiztaker):
answer = models.Answer.objects.get(question=users_answer.question, is_correct=True)
if users_answer.answer == answer:
correct_answers += 1
quiztaker.score = int(correct_answers / quiztaker.quiz.question_set.count() * 100)
aggregate = models.QuizTaker.objects.filter(quiz_id =quiztaker.quiz.id,score__gt=quiztaker.score).aggregate(ranking=Count('score'))
quiztaker.quiz_day_rank = int(aggregate['ranking'] + 1)
quiztaker.save()
if i.quiz.live == True:
slots = QuizSlot.objects.filter(quiz=i.quiz)
temp=False
slot=None
lastslot=slots[0]
for j in slots:
if j.start_datetime>lastslot.start_datetime:
lastslot=j
if j.start_datetime>=datetime.datetime.now():
temp=True
slot=j
break
if temp == False:
if datetime.datetime.now()>(lastslot.start_datetime+quiztaker.quiz.duration):
quiztaker = QuizTaker.objects.get(id=i.id)
quiztaker.complete=True
quiztaker.date_finished=datetime.now()
correct_answers = 0
for users_answer in models.UsersAnswer.objects.filter(quiz_taker=quiztaker):
answer = models.Answer.objects.get(question=users_answer.question, is_correct=True)
if users_answer.answer == answer:
correct_answers += 1
quiztaker.score = int(correct_answers / quiztaker.quiz.question_set.count() * 100)
aggregate = models.QuizTaker.objects.filter(quiz_id =quiztaker.quiz.id,score__gt=quiztaker.score).aggregate(ranking=Count('score'))
quiztaker.quiz_day_rank = int(aggregate['ranking'] + 1)
quiztaker.save()
else:
if datetime.datetime.now()>(slot.start_datetime+i.quiz.duration):
quiztaker = QuizTaker.objects.get(id=i.id)
quiztaker.complete=True
quiztaker.date_finished=datetime.now()
correct_answers = 0
for users_answer in models.UsersAnswer.objects.filter(quiz_taker=quiztaker):
answer = models.Answer.objects.get(question=users_answer.question, is_correct=True)
if users_answer.answer == answer:
correct_answers += 1
quiztaker.score = int(correct_answers / quiztaker.quiz.question_set.count() * 100)
aggregate = models.QuizTaker.objects.filter(quiz_id =quiztaker.quiz.id,score__gt=quiztaker.score).aggregate(ranking=Count('score'))
quiztaker.quiz_day_rank = int(aggregate['ranking'] + 1)
quiztaker.save()
def temp_task():
Tester.objects.create(name="testing")
| 50.853333
| 155
| 0.541426
| 373
| 3,814
| 5.394102
| 0.171582
| 0.064612
| 0.059145
| 0.031312
| 0.781312
| 0.755467
| 0.755467
| 0.755467
| 0.755467
| 0.755467
| 0
| 0.007894
| 0.368904
| 3,814
| 75
| 156
| 50.853333
| 0.828002
| 0
| 0
| 0.590164
| 0
| 0
| 0.011271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.032787
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3c0bcfc40407cc7a2d6ad8b3cff2bb3b631f7464
| 68
|
py
|
Python
|
library/__init__.py
|
BlueLenz/Blood-on-the-Clocktower-Storyteller-Discord-Bot
|
1932c649c81a5a1eab735d7abdee0761c2853940
|
[
"MIT"
] | 1
|
2022-02-18T00:42:14.000Z
|
2022-02-18T00:42:14.000Z
|
library/__init__.py
|
BlueLenz/Blood-on-the-Clocktower-Storyteller-Discord-Bot
|
1932c649c81a5a1eab735d7abdee0761c2853940
|
[
"MIT"
] | 1
|
2020-07-07T03:47:44.000Z
|
2020-07-07T03:47:44.000Z
|
library/__init__.py
|
BlueLenz/Blood-on-the-Clocktower-Storyteller-Discord-Bot
|
1932c649c81a5a1eab735d7abdee0761c2853940
|
[
"MIT"
] | 1
|
2022-02-18T00:42:19.000Z
|
2022-02-18T00:42:19.000Z
|
from .fancytext import fancy
from .display_time import display_time
| 22.666667
| 38
| 0.852941
| 10
| 68
| 5.6
| 0.6
| 0.392857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 68
| 2
| 39
| 34
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3c2c7eac9b817c33e2f8149c59edb240f2aa2f3e
| 112
|
py
|
Python
|
plotly/graph_objs/histogram/marker/__init__.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/graph_objs/histogram/marker/__init__.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/graph_objs/histogram/marker/__init__.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
from ._line import Line
from ._colorbar import ColorBar
from plotly.graph_objs.histogram.marker import colorbar
| 28
| 55
| 0.848214
| 16
| 112
| 5.75
| 0.5625
| 0.304348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 112
| 3
| 56
| 37.333333
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3c511fb3cf7e6910382bcd6b249af96985dd6204
| 164
|
py
|
Python
|
goals/admin.py
|
abrookins/quest
|
302e985ed4702d977990bc5438c1a6d0521d236e
|
[
"MIT"
] | 38
|
2020-08-12T12:15:51.000Z
|
2022-03-29T20:19:34.000Z
|
goals/admin.py
|
abrookins/quest
|
302e985ed4702d977990bc5438c1a6d0521d236e
|
[
"MIT"
] | 6
|
2021-03-19T10:51:50.000Z
|
2021-09-22T19:34:49.000Z
|
goals/admin.py
|
abrookins/quest
|
302e985ed4702d977990bc5438c1a6d0521d236e
|
[
"MIT"
] | 6
|
2021-05-24T09:58:24.000Z
|
2022-02-25T20:57:47.000Z
|
from quest.admin import admin_site
from .models import Goal, Task, TaskStatus
admin_site.register(Goal)
admin_site.register(Task)
admin_site.register(TaskStatus)
| 20.5
| 42
| 0.829268
| 24
| 164
| 5.5
| 0.416667
| 0.272727
| 0.386364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091463
| 164
| 7
| 43
| 23.428571
| 0.885906
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b1d280a72026daafc5b6faf95ba3219fe0a1ffca
| 6,966
|
py
|
Python
|
api/tests/constants.py
|
CMPUT404W22AMNRY/CMPUT404-project-socialdistribution
|
61d5c8aa2c7f038c137fc86c8b194d92a33d90e3
|
[
"W3C-20150513"
] | 1
|
2022-01-14T04:37:54.000Z
|
2022-01-14T04:37:54.000Z
|
api/tests/constants.py
|
CMPUT404W22AMNRY/CMPUT404-project-socialdistribution
|
61d5c8aa2c7f038c137fc86c8b194d92a33d90e3
|
[
"W3C-20150513"
] | 88
|
2022-02-19T00:16:44.000Z
|
2022-03-29T03:05:08.000Z
|
api/tests/constants.py
|
CMPUT404W22AMNRY/CMPUT404-project-socialdistribution
|
61d5c8aa2c7f038c137fc86c8b194d92a33d90e3
|
[
"W3C-20150513"
] | null | null | null |
from django.core.files.uploadedfile import SimpleUploadedFile
from posts.models import Post, ContentType
def get_test_image_jpeg():
jpeg = SimpleUploadedFile('img.jpeg', '', content_type='image/jpeg')
return jpeg
# by dsalaj on Stack Overflow at https://stackoverflow.com/a/42502775
def get_test_image_png():
valid_png_hex = ['\x89', 'P', 'N', 'G', '\r', '\n', '\x1a', '\n', '\x00',
'\x00', '\x00', '\r', 'I', 'H', 'D', 'R', '\x00',
'\x00', '\x00', '\x01', '\x00', '\x00', '\x00', '\x01',
'\x08', '\x02', '\x00', '\x00', '\x00', '\x90',
'w', 'S', '\xde', '\x00', '\x00', '\x00', '\x06', 'b', 'K',
'G', 'D', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\xf9', 'C', '\xbb', '\x7f', '\x00', '\x00',
'\x00', '\t', 'p', 'H', 'Y', 's', '\x00',
'\x00', '\x0e', '\xc3', '\x00', '\x00', '\x0e', '\xc3',
'\x01', '\xc7', 'o', '\xa8', 'd', '\x00', '\x00',
'\x00', '\x07', 't', 'I', 'M', 'E', '\x07', '\xe0', '\x05',
'\r', '\x08', '%', '/', '\xad', '+', 'Z',
'\x89', '\x00', '\x00', '\x00', '\x0c', 'I', 'D', 'A', 'T',
'\x08', '\xd7', 'c', '\xf8', '\xff', '\xff',
'?', '\x00', '\x05', '\xfe', '\x02', '\xfe', '\xdc', '\xcc',
'Y', '\xe7', '\x00', '\x00', '\x00', '\x00',
'I', 'E', 'N', 'D', '\xae', 'B', '`', '\x82']
valid_png_bin = bytes("".join(valid_png_hex), "utf-8")
png = SimpleUploadedFile(name="test.png", content=valid_png_bin, content_type='image/png')
return png
POST_IMG_DATA = {
'title': 'Test Image',
'description': 'This post is an image :P',
'content_type': ContentType.PNG,
'content': 'No',
'img_content': get_test_image_png(),
'categories': 'test',
'visibility': Post.Visibility.PUBLIC,
'unlisted': False,
}
# TODO: Update this when our groupmates have updated their interface
SAMPLE_REMOTE_POSTS = '''
[{
"id": "https://psdt11.herokuapp.com/authors/28b32de4-e5cc-4840-a6ea-8c05dca9dae6/posts/a8cd37e4-be1c-4f86-99cb-b20b1440606f",
"type": "post",
"author": {
"type": "author",
"id": "https://psdt11.herokuapp.com/authors/28b32de4-e5cc-4840-a6ea-8c05dca9dae6",
"url": "https://psdt11.herokuapp.com/authors/28b32de4-e5cc-4840-a6ea-8c05dca9dae6",
"host": "https://psdt11.herokuapp.com/",
"display_name": "Jarrett Knauer",
"github": "https://github.com/jlknauer"
},
"comment_src": [
{
"type": "comment",
"author": {
"type": "author",
"id": "https://psdt11.herokuapp.com/authors/28b32de4-e5cc-4840-a6ea-8c05dca9dae6",
"url": "https://psdt11.herokuapp.com/authors/28b32de4-e5cc-4840-a6ea-8c05dca9dae6",
"host": "https://psdt11.herokuapp.com/",
"display_name": "Jarrett Knauer",
"github": "https://github.com/jlknauer"
},
"comment": "First comment on the post!",
"published": "2022-03-23T00:01:32Z",
"id": "https://psdt11.herokuapp.com/authors/28b32de4-e5cc-4840-a6ea-8c05dca9dae6/posts/a8cd37e4-be1c-4f86-99cb-b20b1440606f/comments/e1b71a73-f302-4999-916a-2f5d57c4c626"
}
],
"title": "Hello from Team 11",
"source": "",
"origin": "",
"description": "This is a test post",
"content_type": "text/plain",
"content": "Web dev sucks",
"count": 0,
"comments": "https://psdt11.herokuapp.com/authors/28b32de4-e5cc-4840-a6ea-8c05dca9dae6/posts/a8cd37e4-be1c-4f86-99cb-b20b1440606f/comments",
"published": "2022-03-23T00:01:32Z",
"visibility": "PUBLIC",
"unlisted": false
}]'''
SAMPLE_REMOTE_POST = '''
{
"id": "https://psdt11.herokuapp.com/authors/28b32de4-e5cc-4840-a6ea-8c05dca9dae6/posts/a8cd37e4-be1c-4f86-99cb-b20b1440606f",
"type": "post",
"author": {
"type": "author",
"id": "https://psdt11.herokuapp.com/authors/28b32de4-e5cc-4840-a6ea-8c05dca9dae6",
"url": "https://psdt11.herokuapp.com/authors/28b32de4-e5cc-4840-a6ea-8c05dca9dae6",
"host": "https://psdt11.herokuapp.com/",
"display_name": "Jarrett Knauer",
"github": "https://github.com/jlknauer"
},
"comment_src": [
{
"type": "comment",
"author": {
"type": "author",
"id": "https://psdt11.herokuapp.com/authors/28b32de4-e5cc-4840-a6ea-8c05dca9dae6",
"url": "https://psdt11.herokuapp.com/authors/28b32de4-e5cc-4840-a6ea-8c05dca9dae6",
"host": "https://psdt11.herokuapp.com/",
"display_name": "Jarrett Knauer",
"github": "https://github.com/jlknauer"
},
"comment": "First comment on the post!",
"published": "2022-03-23T00:01:32Z",
"id": "https://psdt11.herokuapp.com/authors/28b32de4-e5cc-4840-a6ea-8c05dca9dae6/posts/a8cd37e4-be1c-4f86-99cb-b20b1440606f/comments/e1b71a73-f302-4999-916a-2f5d57c4c626"
}
],
"title": "Hello from Team 11",
"source": "",
"origin": "",
"description": "This is a test post",
"content_type": "text/plain",
"content": "Web dev sucks",
"count": 0,
"comments": "https://psdt11.herokuapp.com/authors/28b32de4-e5cc-4840-a6ea-8c05dca9dae6/posts/a8cd37e4-be1c-4f86-99cb-b20b1440606f/comments",
"published": "2022-03-23T00:01:32Z",
"visibility": "PUBLIC",
"unlisted": false
}
'''
SAMPLE_REMOTE_AUTHORS = '''
{
"type": "authors",
"items": [
{
"type": "author",
"id": "https://cmput-404-w22-project-group09.herokuapp.com/service/authors/8e7209b2-5682-4b18-8908-4b1ef1bd3365",
"url": "https://cmput-404-w22-project-group09.herokuapp.com/authors/8e7209b2-5682-4b18-8908-4b1ef1bd3365",
"host": "https://cmput-404-w22-project-group09.herokuapp.com/",
"displayName": "Group 10",
"github": "https://cmput-404-w22-project-group09.herokuapp.com/",
"profileImage": "https://cmput-404-w22-project-group09.herokuapp.com/"
},
{
"type": "author",
"id": "https://cmput-404-w22-project-group09.herokuapp.com/service/authors/4ffc1055-b513-43ce-9fc4-5e3095acb3fd",
"url": "https://cmput-404-w22-project-group09.herokuapp.com/authors/4ffc1055-b513-43ce-9fc4-5e3095acb3fd",
"host": "https://cmput-404-w22-project-group09.herokuapp.com/",
"displayName": "Jejoon Ryu",
"github": "https://github.com/rjejoon",
"profileImage": "https://avatars.githubusercontent.com/u/55664235?v=4"
}
]
}'''
SAMPLE_REMOTE_AUTHOR = '''
{
"type": "author",
"id": "https://cmput-404-w22-project-group09.herokuapp.com/service/authors/8e7209b2-5682-4b18-8908-4b1ef1bd3365",
"url": "https://cmput-404-w22-project-group09.herokuapp.com/authors/8e7209b2-5682-4b18-8908-4b1ef1bd3365",
"host": "https://cmput-404-w22-project-group09.herokuapp.com/",
"displayName": "Group 10",
"github": "https://cmput-404-w22-project-group09.herokuapp.com/",
"profileImage": "https://cmput-404-w22-project-group09.herokuapp.com/"
}'''
| 41.963855
| 176
| 0.591013
| 811
| 6,966
| 5.02836
| 0.235512
| 0.091221
| 0.088279
| 0.10152
| 0.754046
| 0.748161
| 0.732467
| 0.732467
| 0.732467
| 0.732467
| 0
| 0.149807
| 0.182601
| 6,966
| 165
| 177
| 42.218182
| 0.566386
| 0.019236
| 0
| 0.556291
| 0
| 0.13245
| 0.774052
| 0.013472
| 0
| 0
| 0
| 0.006061
| 0
| 1
| 0.013245
| false
| 0
| 0.013245
| 0
| 0.039735
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b1d69852c2d4c4897ce04594829deb7338efd007
| 41,446
|
py
|
Python
|
tira-protocol/build/python/tira_host_pb2.py
|
maexe/tira
|
2018fb08d9f8b07f68fd4dadc4633d1ff25a88a3
|
[
"MIT"
] | null | null | null |
tira-protocol/build/python/tira_host_pb2.py
|
maexe/tira
|
2018fb08d9f8b07f68fd4dadc4633d1ff25a88a3
|
[
"MIT"
] | null | null | null |
tira-protocol/build/python/tira_host_pb2.py
|
maexe/tira
|
2018fb08d9f8b07f68fd4dadc4633d1ff25a88a3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tira_host.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tira_host.proto',
package='tira.generated',
syntax='proto3',
serialized_options=b'\n\"de.webis.tira.client.web.generatedB\020TiraHostMessagesH\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0ftira_host.proto\x12\x0etira.generated\x1a\x1bgoogle/protobuf/empty.proto\"\xc5\x01\n\tVmDetails\x12\x0c\n\x04vmId\x18\x01 \x01(\t\x12\x0e\n\x06userId\x18\x02 \x01(\t\x12\x0f\n\x07ovaFile\x18\x03 \x01(\t\x12\x15\n\rbulkCommandId\x18\x04 \x01(\t\x12\x11\n\tadminName\x18\x05 \x01(\t\x12\x0f\n\x07\x61\x64minPw\x18\x06 \x01(\t\x12\x10\n\x08userName\x18\x07 \x01(\t\x12\x0e\n\x06userPw\x18\x08 \x01(\t\x12\n\n\x02ip\x18\t \x01(\t\x12\x0f\n\x07sshPort\x18\n \x01(\t\x12\x0f\n\x07rdpPort\x18\x0b \x01(\t\"\xbd\x01\n\nRunDetails\x12\x16\n\x0esubmissionFile\x18\x01 \x01(\t\x12\x16\n\x0einputDatasetId\x18\x02 \x01(\t\x12\x14\n\x0cinputRunPath\x18\x03 \x01(\t\x12\x15\n\routputDirName\x18\x04 \x01(\t\x12\x11\n\tsandboxed\x18\x05 \x01(\t\x12\r\n\x05runId\x18\x06 \x01(\t\x12\x14\n\x0csnapshotName\x18\x07 \x01(\t\x12\x1a\n\x12optionalParameters\x18\x08 \x01(\t\"]\n\x0bTransaction\x12&\n\x06status\x18\x01 \x01(\x0e\x32\x16.tira.generated.Status\x12\x15\n\rtransactionId\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\"v\n\x07VmState\x12&\n\x06status\x18\x01 \x01(\x0e\x32\x16.tira.generated.Status\x12$\n\x05state\x18\x02 \x01(\x0e\x32\x15.tira.generated.State\x12\x0c\n\x04vmId\x18\x03 \x01(\t\x12\x0f\n\x07message\x18\x04 \x01(\t\"\xef\x01\n\x06VmInfo\x12&\n\x06status\x18\x01 \x01(\x0e\x32\x16.tira.generated.Status\x12\x0f\n\x07guestOs\x18\x02 \x01(\t\x12\x12\n\nmemorySize\x18\x03 \x01(\t\x12\x14\n\x0cnumberOfCpus\x18\x04 \x01(\t\x12\x0f\n\x07sshPort\x18\x05 \x01(\t\x12\x0f\n\x07rdpPort\x18\x06 \x01(\t\x12\x0c\n\x04host\x18\x07 \x01(\t\x12\x15\n\rsshPortStatus\x18\x08 \x01(\x08\x12\x15\n\rrdpPortStatus\x18\t \x01(\x08\x12$\n\x05state\x18\n \x01(\x0e\x32\x15.tira.generated.State\"\xd4\x02\n\x0c\x43ommandState\x12\x10\n\x08hostname\x18\x01 \x01(\t\x12\x36\n\x08\x63ommands\x18\x02 \x03(\x0b\x32$.tira.generated.CommandState.Command\x1a\xf9\x01\n\x07\x43ommand\x12\n\n\x02id\x18\x01 \x01(\t\x12\x15\n\rcommandString\x18\x02 \x01(\t\x12\x11\n\tstartTime\x18\x03 \x01(\t\x12\x0f\n\x07\x65ndTime\x18\x04 \x01(\t\x12;\n\x06status\x18\x05 \x01(\x0e\x32+.tira.generated.CommandState.Command.Status\x12\x0f\n\x07logFile\x18\x06 \x01(\t\x12\x12\n\nreturnCode\x18\x07 \x01(\x05\x12\x15\n\rbulkCommandId\x18\x08 \x01(\t\".\n\x06Status\x12\x0b\n\x07RUNNING\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\n\n\x06\x46\x41ILED\x10\x02*!\n\x06Status\x12\x0b\n\x07SUCCESS\x10\x00\x12\n\n\x06\x46\x41ILED\x10\x01*\x96\x01\n\x05State\x12\r\n\tUNDEFINED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\x0f\n\x0bPOWERED_OFF\x10\x02\x12\x0f\n\x0bPOWERING_ON\x10\x03\x12\x10\n\x0cPOWERING_OFF\x10\x04\x12\x0e\n\nSANDBOXING\x10\x05\x12\x10\n\x0cUNSANDBOXING\x10\x06\x12\r\n\tEXECUTING\x10\x07\x12\x0c\n\x08\x41RCHIVED\x10\x08\x32\xab\x08\n\x0fTiraHostService\x12\x45\n\tvm_backup\x12\x19.tira.generated.VmDetails\x1a\x1b.tira.generated.Transaction\"\x00\x12\x45\n\tvm_create\x12\x19.tira.generated.VmDetails\x1a\x1b.tira.generated.Transaction\"\x00\x12\x45\n\tvm_delete\x12\x19.tira.generated.VmDetails\x1a\x1b.tira.generated.Transaction\"\x00\x12>\n\x07vm_info\x12\x19.tira.generated.VmDetails\x1a\x16.tira.generated.VmInfo\"\x00\x12@\n\x07vm_list\x12\x16.google.protobuf.Empty\x1a\x1b.tira.generated.Transaction\"\x00\x12\x46\n\nvm_metrics\x12\x19.tira.generated.VmDetails\x1a\x1b.tira.generated.Transaction\"\x00\x12\x46\n\nvm_sandbox\x12\x19.tira.generated.VmDetails\x1a\x1b.tira.generated.Transaction\"\x00\x12G\n\x0bvm_shutdown\x12\x19.tira.generated.VmDetails\x1a\x1b.tira.generated.Transaction\"\x00\x12G\n\x0bvm_snapshot\x12\x19.tira.generated.VmDetails\x1a\x1b.tira.generated.Transaction\"\x00\x12\x44\n\x08vm_start\x12\x19.tira.generated.VmDetails\x1a\x1b.tira.generated.Transaction\"\x00\x12\x43\n\x07vm_stop\x12\x19.tira.generated.VmDetails\x1a\x1b.tira.generated.Transaction\"\x00\x12H\n\x0cvm_unsandbox\x12\x19.tira.generated.VmDetails\x1a\x1b.tira.generated.Transaction\"\x00\x12H\n\x0brun_execute\x12\x1a.tira.generated.RunDetails\x1a\x1b.tira.generated.Transaction\"\x00\x12\x45\n\x08run_eval\x12\x1a.tira.generated.RunDetails\x1a\x1b.tira.generated.Transaction\"\x00\x12\x39\n\x05\x61live\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\"\x00\x32\xfd\x01\n\x16TiraApplicationService\x12\x43\n\tset_state\x12\x17.tira.generated.VmState\x1a\x1b.tira.generated.Transaction\"\x00\x12J\n\x10\x63onfirm_creation\x12\x17.tira.generated.VmState\x1a\x1b.tira.generated.Transaction\"\x00\x12R\n\x14\x63omplete_transaction\x12\x1b.tira.generated.Transaction\x1a\x1b.tira.generated.Transaction\"\x00\x42\x38\n\"de.webis.tira.client.web.generatedB\x10TiraHostMessagesH\x01\x62\x06proto3'
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='tira.generated.Status',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FAILED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1256,
serialized_end=1289,
)
_sym_db.RegisterEnumDescriptor(_STATUS)
Status = enum_type_wrapper.EnumTypeWrapper(_STATUS)
_STATE = _descriptor.EnumDescriptor(
name='State',
full_name='tira.generated.State',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNDEFINED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RUNNING', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='POWERED_OFF', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='POWERING_ON', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='POWERING_OFF', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SANDBOXING', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='UNSANDBOXING', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='EXECUTING', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ARCHIVED', index=8, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1292,
serialized_end=1442,
)
_sym_db.RegisterEnumDescriptor(_STATE)
State = enum_type_wrapper.EnumTypeWrapper(_STATE)
SUCCESS = 0
FAILED = 1
UNDEFINED = 0
RUNNING = 1
POWERED_OFF = 2
POWERING_ON = 3
POWERING_OFF = 4
SANDBOXING = 5
UNSANDBOXING = 6
EXECUTING = 7
ARCHIVED = 8
_COMMANDSTATE_COMMAND_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='tira.generated.CommandState.Command.Status',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='RUNNING', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FAILED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1208,
serialized_end=1254,
)
_sym_db.RegisterEnumDescriptor(_COMMANDSTATE_COMMAND_STATUS)
_VMDETAILS = _descriptor.Descriptor(
name='VmDetails',
full_name='tira.generated.VmDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='vmId', full_name='tira.generated.VmDetails.vmId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='userId', full_name='tira.generated.VmDetails.userId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ovaFile', full_name='tira.generated.VmDetails.ovaFile', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bulkCommandId', full_name='tira.generated.VmDetails.bulkCommandId', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='adminName', full_name='tira.generated.VmDetails.adminName', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='adminPw', full_name='tira.generated.VmDetails.adminPw', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='userName', full_name='tira.generated.VmDetails.userName', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='userPw', full_name='tira.generated.VmDetails.userPw', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ip', full_name='tira.generated.VmDetails.ip', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sshPort', full_name='tira.generated.VmDetails.sshPort', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rdpPort', full_name='tira.generated.VmDetails.rdpPort', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=65,
serialized_end=262,
)
_RUNDETAILS = _descriptor.Descriptor(
name='RunDetails',
full_name='tira.generated.RunDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='submissionFile', full_name='tira.generated.RunDetails.submissionFile', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='inputDatasetId', full_name='tira.generated.RunDetails.inputDatasetId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='inputRunPath', full_name='tira.generated.RunDetails.inputRunPath', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='outputDirName', full_name='tira.generated.RunDetails.outputDirName', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sandboxed', full_name='tira.generated.RunDetails.sandboxed', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='runId', full_name='tira.generated.RunDetails.runId', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='snapshotName', full_name='tira.generated.RunDetails.snapshotName', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='optionalParameters', full_name='tira.generated.RunDetails.optionalParameters', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=265,
serialized_end=454,
)
_TRANSACTION = _descriptor.Descriptor(
name='Transaction',
full_name='tira.generated.Transaction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='tira.generated.Transaction.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='transactionId', full_name='tira.generated.Transaction.transactionId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='message', full_name='tira.generated.Transaction.message', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=456,
serialized_end=549,
)
_VMSTATE = _descriptor.Descriptor(
name='VmState',
full_name='tira.generated.VmState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='tira.generated.VmState.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='tira.generated.VmState.state', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='vmId', full_name='tira.generated.VmState.vmId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='message', full_name='tira.generated.VmState.message', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=551,
serialized_end=669,
)
_VMINFO = _descriptor.Descriptor(
name='VmInfo',
full_name='tira.generated.VmInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='tira.generated.VmInfo.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='guestOs', full_name='tira.generated.VmInfo.guestOs', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='memorySize', full_name='tira.generated.VmInfo.memorySize', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='numberOfCpus', full_name='tira.generated.VmInfo.numberOfCpus', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sshPort', full_name='tira.generated.VmInfo.sshPort', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rdpPort', full_name='tira.generated.VmInfo.rdpPort', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host', full_name='tira.generated.VmInfo.host', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sshPortStatus', full_name='tira.generated.VmInfo.sshPortStatus', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rdpPortStatus', full_name='tira.generated.VmInfo.rdpPortStatus', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='tira.generated.VmInfo.state', index=9,
number=10, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=672,
serialized_end=911,
)
_COMMANDSTATE_COMMAND = _descriptor.Descriptor(
name='Command',
full_name='tira.generated.CommandState.Command',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tira.generated.CommandState.Command.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='commandString', full_name='tira.generated.CommandState.Command.commandString', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='startTime', full_name='tira.generated.CommandState.Command.startTime', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='endTime', full_name='tira.generated.CommandState.Command.endTime', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status', full_name='tira.generated.CommandState.Command.status', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='logFile', full_name='tira.generated.CommandState.Command.logFile', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='returnCode', full_name='tira.generated.CommandState.Command.returnCode', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bulkCommandId', full_name='tira.generated.CommandState.Command.bulkCommandId', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_COMMANDSTATE_COMMAND_STATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1005,
serialized_end=1254,
)
_COMMANDSTATE = _descriptor.Descriptor(
name='CommandState',
full_name='tira.generated.CommandState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='hostname', full_name='tira.generated.CommandState.hostname', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='commands', full_name='tira.generated.CommandState.commands', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_COMMANDSTATE_COMMAND, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=914,
serialized_end=1254,
)
_TRANSACTION.fields_by_name['status'].enum_type = _STATUS
_VMSTATE.fields_by_name['status'].enum_type = _STATUS
_VMSTATE.fields_by_name['state'].enum_type = _STATE
_VMINFO.fields_by_name['status'].enum_type = _STATUS
_VMINFO.fields_by_name['state'].enum_type = _STATE
_COMMANDSTATE_COMMAND.fields_by_name['status'].enum_type = _COMMANDSTATE_COMMAND_STATUS
_COMMANDSTATE_COMMAND.containing_type = _COMMANDSTATE
_COMMANDSTATE_COMMAND_STATUS.containing_type = _COMMANDSTATE_COMMAND
_COMMANDSTATE.fields_by_name['commands'].message_type = _COMMANDSTATE_COMMAND
DESCRIPTOR.message_types_by_name['VmDetails'] = _VMDETAILS
DESCRIPTOR.message_types_by_name['RunDetails'] = _RUNDETAILS
DESCRIPTOR.message_types_by_name['Transaction'] = _TRANSACTION
DESCRIPTOR.message_types_by_name['VmState'] = _VMSTATE
DESCRIPTOR.message_types_by_name['VmInfo'] = _VMINFO
DESCRIPTOR.message_types_by_name['CommandState'] = _COMMANDSTATE
DESCRIPTOR.enum_types_by_name['Status'] = _STATUS
DESCRIPTOR.enum_types_by_name['State'] = _STATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
VmDetails = _reflection.GeneratedProtocolMessageType('VmDetails', (_message.Message,), {
'DESCRIPTOR' : _VMDETAILS,
'__module__' : 'tira_host_pb2'
# @@protoc_insertion_point(class_scope:tira.generated.VmDetails)
})
_sym_db.RegisterMessage(VmDetails)
RunDetails = _reflection.GeneratedProtocolMessageType('RunDetails', (_message.Message,), {
'DESCRIPTOR' : _RUNDETAILS,
'__module__' : 'tira_host_pb2'
# @@protoc_insertion_point(class_scope:tira.generated.RunDetails)
})
_sym_db.RegisterMessage(RunDetails)
Transaction = _reflection.GeneratedProtocolMessageType('Transaction', (_message.Message,), {
'DESCRIPTOR' : _TRANSACTION,
'__module__' : 'tira_host_pb2'
# @@protoc_insertion_point(class_scope:tira.generated.Transaction)
})
_sym_db.RegisterMessage(Transaction)
VmState = _reflection.GeneratedProtocolMessageType('VmState', (_message.Message,), {
'DESCRIPTOR' : _VMSTATE,
'__module__' : 'tira_host_pb2'
# @@protoc_insertion_point(class_scope:tira.generated.VmState)
})
_sym_db.RegisterMessage(VmState)
VmInfo = _reflection.GeneratedProtocolMessageType('VmInfo', (_message.Message,), {
'DESCRIPTOR' : _VMINFO,
'__module__' : 'tira_host_pb2'
# @@protoc_insertion_point(class_scope:tira.generated.VmInfo)
})
_sym_db.RegisterMessage(VmInfo)
CommandState = _reflection.GeneratedProtocolMessageType('CommandState', (_message.Message,), {
'Command' : _reflection.GeneratedProtocolMessageType('Command', (_message.Message,), {
'DESCRIPTOR' : _COMMANDSTATE_COMMAND,
'__module__' : 'tira_host_pb2'
# @@protoc_insertion_point(class_scope:tira.generated.CommandState.Command)
})
,
'DESCRIPTOR' : _COMMANDSTATE,
'__module__' : 'tira_host_pb2'
# @@protoc_insertion_point(class_scope:tira.generated.CommandState)
})
_sym_db.RegisterMessage(CommandState)
_sym_db.RegisterMessage(CommandState.Command)
DESCRIPTOR._options = None
_TIRAHOSTSERVICE = _descriptor.ServiceDescriptor(
name='TiraHostService',
full_name='tira.generated.TiraHostService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1445,
serialized_end=2512,
methods=[
_descriptor.MethodDescriptor(
name='vm_backup',
full_name='tira.generated.TiraHostService.vm_backup',
index=0,
containing_service=None,
input_type=_VMDETAILS,
output_type=_TRANSACTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='vm_create',
full_name='tira.generated.TiraHostService.vm_create',
index=1,
containing_service=None,
input_type=_VMDETAILS,
output_type=_TRANSACTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='vm_delete',
full_name='tira.generated.TiraHostService.vm_delete',
index=2,
containing_service=None,
input_type=_VMDETAILS,
output_type=_TRANSACTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='vm_info',
full_name='tira.generated.TiraHostService.vm_info',
index=3,
containing_service=None,
input_type=_VMDETAILS,
output_type=_VMINFO,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='vm_list',
full_name='tira.generated.TiraHostService.vm_list',
index=4,
containing_service=None,
input_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
output_type=_TRANSACTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='vm_metrics',
full_name='tira.generated.TiraHostService.vm_metrics',
index=5,
containing_service=None,
input_type=_VMDETAILS,
output_type=_TRANSACTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='vm_sandbox',
full_name='tira.generated.TiraHostService.vm_sandbox',
index=6,
containing_service=None,
input_type=_VMDETAILS,
output_type=_TRANSACTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='vm_shutdown',
full_name='tira.generated.TiraHostService.vm_shutdown',
index=7,
containing_service=None,
input_type=_VMDETAILS,
output_type=_TRANSACTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='vm_snapshot',
full_name='tira.generated.TiraHostService.vm_snapshot',
index=8,
containing_service=None,
input_type=_VMDETAILS,
output_type=_TRANSACTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='vm_start',
full_name='tira.generated.TiraHostService.vm_start',
index=9,
containing_service=None,
input_type=_VMDETAILS,
output_type=_TRANSACTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='vm_stop',
full_name='tira.generated.TiraHostService.vm_stop',
index=10,
containing_service=None,
input_type=_VMDETAILS,
output_type=_TRANSACTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='vm_unsandbox',
full_name='tira.generated.TiraHostService.vm_unsandbox',
index=11,
containing_service=None,
input_type=_VMDETAILS,
output_type=_TRANSACTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='run_execute',
full_name='tira.generated.TiraHostService.run_execute',
index=12,
containing_service=None,
input_type=_RUNDETAILS,
output_type=_TRANSACTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='run_eval',
full_name='tira.generated.TiraHostService.run_eval',
index=13,
containing_service=None,
input_type=_RUNDETAILS,
output_type=_TRANSACTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='alive',
full_name='tira.generated.TiraHostService.alive',
index=14,
containing_service=None,
input_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_TIRAHOSTSERVICE)
DESCRIPTOR.services_by_name['TiraHostService'] = _TIRAHOSTSERVICE
_TIRAAPPLICATIONSERVICE = _descriptor.ServiceDescriptor(
name='TiraApplicationService',
full_name='tira.generated.TiraApplicationService',
file=DESCRIPTOR,
index=1,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=2515,
serialized_end=2768,
methods=[
_descriptor.MethodDescriptor(
name='set_state',
full_name='tira.generated.TiraApplicationService.set_state',
index=0,
containing_service=None,
input_type=_VMSTATE,
output_type=_TRANSACTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='confirm_creation',
full_name='tira.generated.TiraApplicationService.confirm_creation',
index=1,
containing_service=None,
input_type=_VMSTATE,
output_type=_TRANSACTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='complete_transaction',
full_name='tira.generated.TiraApplicationService.complete_transaction',
index=2,
containing_service=None,
input_type=_TRANSACTION,
output_type=_TRANSACTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_TIRAAPPLICATIONSERVICE)
DESCRIPTOR.services_by_name['TiraApplicationService'] = _TIRAAPPLICATIONSERVICE
# @@protoc_insertion_point(module_scope)
| 44.138445
| 4,601
| 0.752497
| 5,302
| 41,446
| 5.576198
| 0.064127
| 0.055403
| 0.100896
| 0.083105
| 0.806224
| 0.73012
| 0.694808
| 0.670793
| 0.666328
| 0.659665
| 0
| 0.038556
| 0.123896
| 41,446
| 938
| 4,602
| 44.185501
| 0.77566
| 0.015948
| 0
| 0.702032
| 1
| 0.001129
| 0.217058
| 0.179553
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.006772
| 0
| 0.006772
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3ce81065ff58f6764eef90ee507e72e333aa897c
| 249
|
py
|
Python
|
roerich/__init__.py
|
HSE-LAMBDA/roerich
|
17e178292593d1ea6a821b99705620ba066abd2a
|
[
"BSD-2-Clause"
] | 10
|
2020-12-01T13:58:27.000Z
|
2022-01-17T12:01:31.000Z
|
roerich/__init__.py
|
HSE-LAMBDA/roerich
|
17e178292593d1ea6a821b99705620ba066abd2a
|
[
"BSD-2-Clause"
] | 3
|
2021-03-07T14:06:22.000Z
|
2022-01-18T14:23:16.000Z
|
roerich/__init__.py
|
HSE-LAMBDA/roerich
|
17e178292593d1ea6a821b99705620ba066abd2a
|
[
"BSD-2-Clause"
] | 2
|
2020-12-01T14:04:36.000Z
|
2022-03-24T12:52:32.000Z
|
from .algorithms import OnlineNNClassifier, OnlineNNRuLSIF
from .rulsif import RuLSIF
from .dataset import generate_dataset
from .viz import display
__all__ = [
'OnlineNNClassifier', 'OnlineNNRuLSIF', 'RuLSIF', 'generate_dataset', 'display'
]
| 24.9
| 83
| 0.783133
| 25
| 249
| 7.56
| 0.44
| 0.338624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128514
| 249
| 9
| 84
| 27.666667
| 0.870968
| 0
| 0
| 0
| 1
| 0
| 0.24498
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.571429
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3cf59d390049dc9016a6aa194bd07ea57bfbd806
| 7
|
py
|
Python
|
12_module_basic/06_package/a/b/mod3.py
|
hemuke/python
|
bc99f2b5aee997083ae31f59a2b33db48c8255f3
|
[
"Apache-2.0"
] | null | null | null |
12_module_basic/06_package/a/b/mod3.py
|
hemuke/python
|
bc99f2b5aee997083ae31f59a2b33db48c8255f3
|
[
"Apache-2.0"
] | null | null | null |
12_module_basic/06_package/a/b/mod3.py
|
hemuke/python
|
bc99f2b5aee997083ae31f59a2b33db48c8255f3
|
[
"Apache-2.0"
] | null | null | null |
m = 59
| 3.5
| 6
| 0.428571
| 2
| 7
| 1.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0.428571
| 7
| 1
| 7
| 7
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a717015b76bf02466cfd55afb4e445d7b767b2dc
| 27
|
py
|
Python
|
prroipool/__init__.py
|
FscoreLab/PreciseRoIPooling
|
52246e71681e67ee074b78a771dc7d0968f50039
|
[
"MIT"
] | null | null | null |
prroipool/__init__.py
|
FscoreLab/PreciseRoIPooling
|
52246e71681e67ee074b78a771dc7d0968f50039
|
[
"MIT"
] | null | null | null |
prroipool/__init__.py
|
FscoreLab/PreciseRoIPooling
|
52246e71681e67ee074b78a771dc7d0968f50039
|
[
"MIT"
] | 1
|
2021-02-28T06:36:57.000Z
|
2021-02-28T06:36:57.000Z
|
from .prroi_pool import *
| 9
| 25
| 0.740741
| 4
| 27
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 27
| 2
| 26
| 13.5
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
59660e781c48734cfc23cb799487f409525cf63e
| 52
|
py
|
Python
|
src/core/__init__.py
|
1ramen/bfython
|
6a423223a6e969331baa01eb1f34ba13b1266764
|
[
"MIT"
] | 3
|
2017-12-05T15:52:50.000Z
|
2019-02-19T11:36:20.000Z
|
src/core/__init__.py
|
1ramen/bfython
|
6a423223a6e969331baa01eb1f34ba13b1266764
|
[
"MIT"
] | 1
|
2017-11-29T04:40:26.000Z
|
2017-11-29T04:40:26.000Z
|
src/core/__init__.py
|
1ramen/bfython
|
6a423223a6e969331baa01eb1f34ba13b1266764
|
[
"MIT"
] | null | null | null |
import core.environment
import core.parser
import IO
| 17.333333
| 23
| 0.865385
| 8
| 52
| 5.625
| 0.625
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096154
| 52
| 3
| 24
| 17.333333
| 0.957447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
59bac52a3a294599d614fd1dac520b48a5bc28ae
| 38
|
py
|
Python
|
wsgi.py
|
chowdhurya/git-loc-server
|
519d91e23002f128b8f07270c45c60053e40ffe7
|
[
"BSD-3-Clause"
] | 1
|
2016-03-27T05:19:07.000Z
|
2016-03-27T05:19:07.000Z
|
wsgi.py
|
chowdhurya/git-loc-server
|
519d91e23002f128b8f07270c45c60053e40ffe7
|
[
"BSD-3-Clause"
] | null | null | null |
wsgi.py
|
chowdhurya/git-loc-server
|
519d91e23002f128b8f07270c45c60053e40ffe7
|
[
"BSD-3-Clause"
] | null | null | null |
from gitloc import app as application
| 19
| 37
| 0.842105
| 6
| 38
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 38
| 1
| 38
| 38
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
59e36fc1f5c29dfc2beb02d9c07df72c34129561
| 19,942
|
py
|
Python
|
images.py
|
summerandwinter/poempythonweb
|
9f25047fb9ceb19400df59ac8b9d212fc688d6c6
|
[
"MIT"
] | null | null | null |
images.py
|
summerandwinter/poempythonweb
|
9f25047fb9ceb19400df59ac8b9d212fc688d6c6
|
[
"MIT"
] | null | null | null |
images.py
|
summerandwinter/poempythonweb
|
9f25047fb9ceb19400df59ac8b9d212fc688d6c6
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from datetime import datetime
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import HttpResponseServerError
from django.shortcuts import render
from django.urls import reverse
from django.views import View
from leancloud import Object
from leancloud import Query
from leancloud.errors import LeanCloudError
from PIL import Image, ImageColor, ImageFont, ImageDraw, ImageFilter
from io import BytesIO
from textwrap import *
import re
# 模糊
def filter_blur(request):
image_data = Image.open("photo.jpg")
fliter_data = image_data.filter(ImageFilter.BLUR)
msstream=BytesIO()
fliter_data.save(msstream,"jpeg")
fliter_data.close()
return HttpResponse(msstream.getvalue(),content_type="image/jpeg")
# 轮廓
def filter_contour(request):
image_data = Image.open("photo.jpg")
fliter_data = image_data.filter(ImageFilter.CONTOUR)
msstream=BytesIO()
fliter_data.save(msstream,"jpeg")
fliter_data.close()
return HttpResponse(msstream.getvalue(),content_type="image/jpeg")
# 细节
def filter_detail(request):
image_data = Image.open("photo.jpg")
fliter_data = image_data.filter(ImageFilter.DETAIL)
msstream=BytesIO()
fliter_data.save(msstream,"jpeg")
fliter_data.close()
return HttpResponse(msstream.getvalue(),content_type="image/jpeg")
# 边缘增强
def filter_edge_enhance(request):
image_data = Image.open("photo.jpg")
fliter_data = image_data.filter(ImageFilter.EDGE_ENHANCE)
msstream=BytesIO()
fliter_data.save(msstream,"jpeg")
fliter_data.close()
return HttpResponse(msstream.getvalue(),content_type="image/jpeg")
# 边缘增强
def filter_edge_enhance_more(request):
image_data = Image.open("photo.jpg")
fliter_data = image_data.filter(ImageFilter.EDGE_ENHANCE_MORE)
msstream=BytesIO()
fliter_data.save(msstream,"jpeg")
fliter_data.close()
return HttpResponse(msstream.getvalue(),content_type="image/jpeg")
# 浮雕
def filter_emboss(request):
image_data = Image.open("photo.jpg")
fliter_data = image_data.filter(ImageFilter.EMBOSS)
msstream=BytesIO()
fliter_data.save(msstream,"jpeg")
fliter_data.close()
return HttpResponse(msstream.getvalue(),content_type="image/jpeg")
#寻找边缘
def filter_find_edges(request):
image_data = Image.open("photo.jpg")
fliter_data = image_data.filter(ImageFilter.FIND_EDGES)
msstream=BytesIO()
fliter_data.save(msstream,"jpeg")
fliter_data.close()
return HttpResponse(msstream.getvalue(),content_type="image/jpeg")
#柔化
def filter_smooth(request):
image_data = Image.open("photo.jpg")
fliter_data = image_data.filter(ImageFilter.SMOOTH)
msstream=BytesIO()
fliter_data.save(msstream,"jpeg")
fliter_data.close()
return HttpResponse(msstream.getvalue(),content_type="image/jpeg")
#柔化
def filter_smooth_more(request):
image_data = Image.open("photo.jpg")
fliter_data = image_data.filter(ImageFilter.SMOOTH_MORE)
msstream=BytesIO()
fliter_data.save(msstream,"jpeg")
fliter_data.close()
return HttpResponse(msstream.getvalue(),content_type="image/jpeg")
# 锐化
def filter_sharpen(request):
image_data = Image.open("photo.jpg")
fliter_data = image_data.filter(ImageFilter.SHARPEN)
msstream=BytesIO()
fliter_data.save(msstream,"jpeg")
fliter_data.close()
return HttpResponse(msstream.getvalue(),content_type="image/jpeg")
# 高斯模糊
def filter_gaussian_blur(request):
image_data = Image.open("photo.jpg")
fliter_data = image_data.filter(ImageFilter.GaussianBlur(4))
msstream=BytesIO()
fliter_data.save(msstream,"jpeg")
fliter_data.close()
return HttpResponse(msstream.getvalue(),content_type="image/jpeg")
# 反遮罩锐化
def filter_unsharp_mask(request):
image_data = Image.open("photo.jpg")
fliter_data = image_data.filter(ImageFilter.UnsharpMask())
msstream=BytesIO()
fliter_data.save(msstream,"jpeg")
fliter_data.close()
return HttpResponse(msstream.getvalue(),content_type="image/jpeg")
def template(request):
w = 640
h = 862
iw = 600
ih = 340
title = '每日一言'
content = '觉得最失落的,大概是你还在为你们的未来出谋划策,他却已慢慢后退不再与你并肩。'
spacing = 20
content = fill(content, 15)
author = '- 天天码图 -'
copyright = '微信小程序「天天码图」'
title_fnt = ImageFont.truetype('font/zh/YueSong.ttf', 35)
content_fnt = ImageFont.truetype('font/zh/YueSong.ttf', 30)
author_fnt = ImageFont.truetype('font/zh/YueSong.ttf', 25)
copyright_fnt = ImageFont.truetype('font/zh/YueSong.ttf', 25)
base = Image.new('RGBA',(w,h),(255,255,255,255))
draw = ImageDraw.Draw(base)
tw,th = draw.multiline_textsize(title, font=title_fnt)
aw,ah = draw.multiline_textsize(author, font=author_fnt)
cw,ch = draw.multiline_textsize(content, font=content_fnt, spacing=spacing)
crw,crh = draw.multiline_textsize(copyright, font=copyright_fnt)
h = 635+th+ch+crh+ah;
base = Image.new('RGBA',(w,h),(255,255,255,255))
draw = ImageDraw.Draw(base)
photo = Image.open("photo.jpg").convert('RGBA')
(pw, ph) = photo.size
if pw/ph>iw/ih:
box = ((pw-ph*iw/ih)/2,0,(pw+ph*iw/ih)/2,ph)
else:
box = (0,(ph-pw*ih/iw)/2,pw,(ph+pw*ih/iw)/2)
photo = photo.crop(box)
photo = photo.resize((iw,ih))
base.paste(photo,box=(20,20))
# get a drawing context
draw = ImageDraw.Draw(base)
# draw text in the middle of the image, half opacity
draw.multiline_text((w/2-tw/2,420), title, font=title_fnt, fill=(0,0,0,255), align='center')
draw.multiline_text((w/2-cw/2,420+th+45), content, font=content_fnt, fill=(0,0,0,255), align='center', spacing=spacing)
draw.multiline_text((w/2-aw/2,420+th+45+ch+115), author, font=author_fnt, fill=(0,0,0,255), align='center')
draw.multiline_text((w-crw,420+th+45+ch+115+ah+50), copyright, font=copyright_fnt, fill=(189,189,189,255), align='center')
# get BytesIO
msstream = BytesIO()
# save image data to output stream
base.save(msstream,"png")
# release memory
base.close()
return HttpResponse(msstream.getvalue(),content_type="image/png")
def template2(request):
w = 640
h = 1020
iw = 600
ih = 340
title = '每日一言'
content = '觉得最失落的,大概是你还在为你们的未来出谋划策,他却已慢慢后退不再与你并肩。'
spacing = 20
padding = 2
author = '- 天天码图 -'
copyright = '微信小程序「天天码图」'
title_fnt = ImageFont.truetype('font/zh/YueSong.ttf', 35)
content_fnt = ImageFont.truetype('font/zh/YueSong.ttf', 30)
author_fnt = ImageFont.truetype('font/zh/YueSong.ttf', 25)
copyright_fnt = ImageFont.truetype('font/zh/YueSong.ttf', 25)
base = Image.new('RGBA',(w,h),(255,255,255,255))
draw = ImageDraw.Draw(base)
aw,ah = draw.multiline_textsize(author, font=author_fnt)
crw,crh = draw.multiline_textsize(copyright, font=copyright_fnt)
photo = Image.open("photo.jpg").convert('RGBA')
(pw, ph) = photo.size
if pw/ph>iw/ih:
box = ((pw-ph*iw/ih)/2,0,(pw+ph*iw/ih)/2,ph)
else:
box = (0,(ph-pw*ih/iw)/2,pw,(ph+pw*ih/iw)/2)
photo = photo.crop(box)
photo = photo.resize((iw,ih))
base.paste(photo,box=(20,20))
# get a drawing context
draw = ImageDraw.Draw(base)
# split the title
tlines = wrap(title, 1)
# current title height
tnh = 420
# get width and height of single title word
stw,sth = title_fnt.getsize("已")
for tline in tlines:
draw.text((w-115-stw,tnh), tline, fill=(0,0,0,255), font=title_fnt)
tnh = tnh+sth
# get width and height of single content word
scw,sch = content_fnt.getsize("已")
clines = wrap(content, 14)
# current width of content
cnw = w-115-stw-115-scw
for cline in clines:
# current height of content
cnh = 420
cwords = wrap(cline, 1)
for cword in cwords:
pattern = re.compile("[,。、]+")
if pattern.search(cword):
draw.text((cnw,cnh), cword, fill=(0,0,0,255), font=content_fnt)
# draw.text((cnw+30-12,cnh-30+12), cword, fill=(0,0,0,255), font=content_fnt)
else:
draw.text((cnw,cnh), cword, fill=(0,0,0,255), font=content_fnt)
cnh = cnh+sch+padding
cnw = cnw-scw-spacing
# draw text in the middle of the image, half opacity
# draw.multiline_text((w/2-tw/2,420), title, font=title_fnt, fill=(0,0,0,255), align='center')
# draw.multiline_text((w/2-cw/2,420+th+45), content, font=content_fnt, fill=(0,0,0,255), align='center', spacing=spacing)
draw.multiline_text((w/2-aw/2,h-50-15-crh-ah), author, font=author_fnt, fill=(0,0,0,255), align='center')
draw.multiline_text((w-crw,h-15-crh), copyright, font=copyright_fnt, fill=(189,189,189,255), align='center')
# get BytesIO
msstream = BytesIO()
# save image data to output stream
base.save(msstream,"png")
# release memory
base.close()
return HttpResponse(msstream.getvalue(),content_type="image/png")
def template3(request):
w = 640
h = 862
iw = 600
ih = 340
bw = 300
bh = 300
title = '每日一言'
content = '觉得最失落的,大概是你还在为你们的未来出谋划策,他却已慢慢后退不再与你并肩。'
spacing = 20
content = fill(content, 15)
author = '- 天天码图 -'
copyright = '微信小程序「天天码图」'
title_fnt = ImageFont.truetype('font/zh/YueSong.ttf', 35)
content_fnt = ImageFont.truetype('font/zh/YueSong.ttf', 30)
author_fnt = ImageFont.truetype('font/zh/YueSong.ttf', 25)
copyright_fnt = ImageFont.truetype('font/zh/YueSong.ttf', 25)
base = Image.new('RGBA',(w,h),(255,255,255,255))
draw = ImageDraw.Draw(base)
tw,th = draw.multiline_textsize(title, font=title_fnt)
aw,ah = draw.multiline_textsize(author, font=author_fnt)
cw,ch = draw.multiline_textsize(content, font=content_fnt, spacing=spacing)
crw,crh = draw.multiline_textsize(copyright, font=copyright_fnt)
h = 695+th+ch+crh+ah;
base = Image.new('RGBA',(w,h),(255,255,255,255))
draw = ImageDraw.Draw(base)
photo = Image.open("photo.jpg").convert('RGBA')
pw, ph = photo.size
if pw > ph:
box = ((pw-ph*bw/bh)/2,0,(pw+ph*bw/bh)/2,ph)
else:
box = (0,(ph-pw*bh/bw)/2,pw,(ph+pw*bh/bw)/2)
photo = photo.crop(box)
photo = photo.resize((bw*4,bh*4))
circle = Image.new('L', (bw*4, bh*4), 0)
draw = ImageDraw.Draw(circle)
draw.ellipse((0, 0, bw*4, bh*4), fill=255)
alpha = Image.new('L', (bw*4, bh*4), 255)
alpha.paste(circle, (0, 0))
photo.putalpha(alpha)
photo = photo.resize((bw,bh),Image.ANTIALIAS)
base.paste(photo,box=(170,120),mask=photo)
# get a drawing context
draw = ImageDraw.Draw(base)
# draw text in the middle of the image, half opacity
draw.multiline_text((w/2-tw/2,480), title, font=title_fnt, fill=(0,0,0,255), align='center')
draw.multiline_text((w/2-cw/2,480+th+45), content, font=content_fnt, fill=(0,0,0,255), align='center', spacing=spacing)
draw.multiline_text((w/2-aw/2,480+th+45+ch+115), author, font=author_fnt, fill=(0,0,0,255), align='center')
draw.multiline_text((w-crw,480+th+45+ch+115+ah+50), copyright, font=copyright_fnt, fill=(189,189,189,255), align='center')
# get BytesIO
msstream = BytesIO()
# save image data to output stream
base.save(msstream,"png")
# release memory
base.close()
return HttpResponse(msstream.getvalue(),content_type="image/png")
def template4(request):
w = 640
h = 1080
iw = 600
ih = 340
bw = 300
bh = 300
padding = 2
title = '每日一言'
content = '觉得最失落的,大概是你还在为你们的未来出谋划策,他却已慢慢后退不再与你并肩。'
spacing = 20
content = fill(content, 15)
author = '- 天天码图 -'
copyright = '微信小程序「天天码图」'
title_fnt = ImageFont.truetype('font/zh/WangQingHua.ttf', 35)
content_fnt = ImageFont.truetype('font/zh/WangQingHua.ttf', 30)
author_fnt = ImageFont.truetype('font/zh/WangQingHua.ttf', 25)
copyright_fnt = ImageFont.truetype('font/zh/WangQingHua.ttf', 25)
base = Image.new('RGBA',(w,h),(255,255,255,255))
draw = ImageDraw.Draw(base)
aw,ah = draw.multiline_textsize(author, font=author_fnt)
crw,crh = draw.multiline_textsize(copyright, font=copyright_fnt)
photo = Image.open("photo.jpg").convert('RGBA')
pw, ph = photo.size
if pw > ph:
box = ((pw-ph*bw/bh)/2,0,(pw+ph*bw/bh)/2,ph)
else:
box = (0,(ph-pw*bh/bw)/2,pw,(ph+pw*bh/bw)/2)
photo = photo.crop(box)
photo = photo.resize((bw*4,bh*4))
circle = Image.new('L', (bw*4, bh*4), 0)
draw = ImageDraw.Draw(circle)
draw.ellipse((0, 0, bw*4, bh*4), fill=255)
alpha = Image.new('L', (bw*4, bh*4), 255)
alpha.paste(circle, (0, 0))
photo.putalpha(alpha)
photo = photo.resize((bw,bh),Image.ANTIALIAS)
base.paste(photo,box=(170,120),mask=photo)
# get a drawing context
draw = ImageDraw.Draw(base)
# split the title
tlines = wrap(title, 1)
# current title height
tnh = 480
# get width and height of single title word
stw,sth = title_fnt.getsize("已")
for tline in tlines:
draw.text((w-115-stw,tnh), tline, fill=(0,0,0,255), font=title_fnt)
tnh = tnh+sth
# get width and height of single content word
scw,sch = content_fnt.getsize("已")
clines = wrap(content, 14)
# current width of content
cnw = w-115-stw-115-scw
for cline in clines:
# current height of content
cnh = 480
cwords = wrap(cline, 1)
for cword in cwords:
pattern = re.compile("[,。、]+")
if pattern.search(cword):
draw.text((cnw,cnh), cword, fill=(0,0,0,255), font=content_fnt)
# draw.text((cnw+30-12,cnh-30+12), cword, fill=(0,0,0,255), font=content_fnt)
else:
draw.text((cnw,cnh), cword, fill=(0,0,0,255), font=content_fnt)
cnh = cnh+sch+padding
cnw = cnw-scw-spacing
# draw text in the middle of the image, half opacity
# draw.multiline_text((w/2-tw/2,420), title, font=title_fnt, fill=(0,0,0,255), align='center')
# draw.multiline_text((w/2-cw/2,420+th+45), content, font=content_fnt, fill=(0,0,0,255), align='center', spacing=spacing)
draw.multiline_text((w/2-aw/2,h-50-15-crh-ah), author, font=author_fnt, fill=(0,0,0,255), align='center')
draw.multiline_text((w-crw,h-15-crh), copyright, font=copyright_fnt, fill=(189,189,189,255), align='center')
# get BytesIO
msstream = BytesIO()
# save image data to output stream
base.save(msstream,"png")
# release memory
base.close()
return HttpResponse(msstream.getvalue(),content_type="image/png")
def template5(request,font):
w = 640
h = 1080
iw = 600
ih = 340
bw = 300
bh = 300
padding = 2
title = '西江月·夜行黄沙道中'
author = '辛弃疾'
category = '#婉约#豪放#夏天#'
content = '''帘外雨潺潺,
春意阑珊。
罗衾不耐五更寒。
梦里不知身是客,
一晌贪欢。
独自莫凭栏,
无限江山,
别时容易见时难。
流水落花春去也,
天上人间。'''
spacing = 20
#content = content.replace(',','')
#content = content.replace('。','')
#content = content.replace('\r','。')
#content = fill(content, 14)
copyright = '微信小程序「天天码图」'
title_fnt = ImageFont.truetype('font/zh/'+font+'.ttf', 35)
author_fnt = ImageFont.truetype('font/zh/'+font+'.ttf', 25)
content_fnt = ImageFont.truetype('font/zh/'+font+'.ttf', 30)
copyright_fnt = ImageFont.truetype('font/zh/YueSong.ttf', 15)
clines = content.split('\n')
tlines = wrap(title, 1)
alines = wrap(author, 1)
# get width and height of single title word
stw,sth = title_fnt.getsize("已")
# get width and height of single content word
saw,sah = author_fnt.getsize("已")
scw,sch = content_fnt.getsize("已")
scrw,scrh = copyright_fnt.getsize("已")
wmh = len(tlines)*(sth+padding)
wmw = len(clines)*(scw+spacing)
for cline in clines:
clineh = len(cline)*sch
if clineh > wmh:
wmh = clineh
w = wmw+115+115+stw+115
h = wmh+80+80+scrh+15
base = Image.new('RGBA',(w,h),(255,255,255,255))
draw = ImageDraw.Draw(base)
# get a drawing context
draw = ImageDraw.Draw(base)
# split the title
# current title height
tnh = 80
for tline in tlines:
draw.text((w-115-stw,tnh), tline, fill=(0,0,0,255), font=title_fnt)
tnh = tnh+sth
anh = 80+sah
for aline in alines:
draw.text((w-115-stw-saw-10,anh), aline, fill=(0,0,0,255), font=author_fnt)
anh = anh+sah
#clines = wrap(content, 14)
# current width of content
cnw = w-115-stw-115-scw
lnh = 80
for cline in clines:
# current height of content
cnh = 80
cwords = wrap(cline, 1)
for cword in cwords:
if(cword != ',' and cword !='。'):
draw.text((cnw,cnh), cword, fill=(0,0,0,255), font=content_fnt)
cnh = cnh+sch+padding
else:
#draw.text((cnw,cnh), cword, fill=(0,0,0,255), font=content_fnt)
cnh = cnh+sch+padding
lnh = cnh
cnw = cnw-scw-spacing
copyrihtW,copyrightH = draw.multiline_textsize(copyright, font=copyright_fnt)
draw.multiline_text((w-copyrihtW,h-15-copyrightH), copyright, font=copyright_fnt, fill=(189,189,189,255), align='center')
stamp = Image.open("stamp.png").convert('RGBA')
stamp = stamp.resize((50,50))
base.paste(stamp,box=(cnw+scw+int((50-scw)/2),lnh-25),mask=stamp)
# get BytesIO
msstream = BytesIO()
# save image data to output stream
base.save(msstream,"png")
# release memory
base.close()
return HttpResponse(msstream.getvalue(),content_type="image/png")
def image_text(request):
fontSize = 40
w = 640
h = 640
text = '当一艘船沉入海底\n当一个人成了谜\n你不知道\n他们为何离去\n那声再见竟是他最后一句'
meta = '后会无期·G.E.M.邓紫棋'
copyright = '— 微信小程序 : 天天码图 —'
# 按长度(字数)换行
# text = fill(text,11)
# make a blank image as the background
base = Image.new('RGBA',(w,h),(255,255,255,255))
# get an image
photo = Image.open("photo.jpg").convert('RGBA')
(pw, ph) = photo.size
if pw/ph>w/h:
box = ((pw-ph)/2,0,(pw+ph)/2,ph)
else:
box = (0,(ph-pw)/2,pw,(pw+ph)/2)
photo = photo.crop(box)
photo = photo.resize((w,h))
# blur filter
photo = photo.filter(ImageFilter.GaussianBlur())
base.paste(photo)
# make a blank image for text, initailized to half-transparent text color
txt = Image.new('RGBA', (w, h), (0,0,0,100))
# get a font
fnt = ImageFont.truetype('font/zh/LiJin.ttf',fontSize)
meta_fnt = ImageFont.truetype('font/zh/PingFang.ttf',20)
copyright_fnt = ImageFont.truetype('font/zh/TongXin.ttf',14)
# get size of the text
# (tw, th) = fnt.getsize(text)
# get a drawing context
draw = ImageDraw.Draw(txt)
tw,th = draw.multiline_textsize(text, fnt)
mw,mh = draw.multiline_textsize(meta, meta_fnt)
cpw,cph =draw.multiline_textsize(copyright, copyright_fnt)
# draw text in the middle of the image, half opacity
draw.multiline_text(((w-tw)/2,(h-th)/2), text, font=fnt, fill=(255,255,255,255), align='center',spacing=15)
draw.multiline_text(((w-mw)/2,h-mh-30), meta, font=meta_fnt, fill=(255,255,255,150), align='center')
draw.multiline_text(((w-cpw)/2,h-cph-10), copyright, font=copyright_fnt, fill=(255,255,255,150), align='center')
# composite base image and text image
out = Image.alpha_composite(base, txt)
# get BytesIO
msstream = BytesIO()
# save image data to output stream
out.save(msstream,"png")
# release memory
out.close()
return HttpResponse(msstream.getvalue(),content_type="image/png")
| 34.561525
| 126
| 0.637148
| 2,869
| 19,942
| 4.354827
| 0.099686
| 0.008644
| 0.006003
| 0.013446
| 0.836161
| 0.821434
| 0.808068
| 0.792781
| 0.764447
| 0.7494
| 0
| 0.052119
| 0.212015
| 19,942
| 576
| 127
| 34.621528
| 0.742714
| 0.119647
| 0
| 0.690476
| 0
| 0
| 0.082537
| 0.016484
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042857
| false
| 0
| 0.033333
| 0
| 0.119048
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ab8891174aec997a2c655b53e29e6c2e74ae923f
| 97
|
py
|
Python
|
install/hooks/hook-threesdk.py
|
gneumann333/jumpscaleX_core
|
777d249fa3668c6e802c2f765f4b82fb39c3e5fa
|
[
"Apache-2.0"
] | 1
|
2020-06-21T11:18:52.000Z
|
2020-06-21T11:18:52.000Z
|
install/hooks/hook-threesdk.py
|
gneumann333/jumpscaleX_core
|
777d249fa3668c6e802c2f765f4b82fb39c3e5fa
|
[
"Apache-2.0"
] | 644
|
2019-08-25T10:19:56.000Z
|
2020-12-23T09:41:04.000Z
|
install/hooks/hook-threesdk.py
|
gneumann333/jumpscaleX_core
|
777d249fa3668c6e802c2f765f4b82fb39c3e5fa
|
[
"Apache-2.0"
] | 11
|
2019-08-29T21:38:50.000Z
|
2020-06-21T11:18:55.000Z
|
hiddenimports = ["packaging.requirements", "pkg_resources.py2_warn", "pathlib", "_cffi_backend"]
| 48.5
| 96
| 0.773196
| 10
| 97
| 7.1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010989
| 0.061856
| 97
| 1
| 97
| 97
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0.659794
| 0.453608
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ab8a44584aa902e4f0f45c1b9e09ad6d0fb230fd
| 9,244
|
py
|
Python
|
functional_tests/mpsserverfunctests/IntentionsTests.py
|
Strumenta/MPSServer
|
b5f7bf5918d94541eae1782c17c0797b76f91699
|
[
"Apache-1.1"
] | 15
|
2020-04-02T15:45:34.000Z
|
2021-11-04T10:06:35.000Z
|
functional_tests/mpsserverfunctests/IntentionsTests.py
|
Strumenta/MPSServer
|
b5f7bf5918d94541eae1782c17c0797b76f91699
|
[
"Apache-1.1"
] | 43
|
2020-03-25T17:55:52.000Z
|
2022-02-08T12:09:25.000Z
|
functional_tests/mpsserverfunctests/IntentionsTests.py
|
Strumenta/MPSServer
|
b5f7bf5918d94541eae1782c17c0797b76f91699
|
[
"Apache-1.1"
] | 2
|
2020-04-11T11:23:22.000Z
|
2021-11-19T20:17:32.000Z
|
import asyncio
import json
import time
import unittest
import requests
import websockets
from BaseTest import BaseTest, BASE_URL, BASE_WS_URL_CUSTOM, BASE_WS_URL_JSONRPC, BaseAsyncTest
class IntentionsHttpTestCase(BaseTest):
@classmethod
def setUpClass(cls):
BaseTest.setUpClass()
def setUp(self):
pass
def reloadAll(self):
pass
def test_create_intentions_block(self):
r = requests.post(
"%s/intentions/ProtocolLanguage.sandbox/5465070037663859703/createBlock"
% BASE_URL
)
self.assertEqual(200, r.status_code)
data = r.json()
self.assertEqual(True, data["success"])
uuid = data["value"]
time.sleep(0.5)
r = requests.get("%s/intentions/%s" % (BASE_URL, uuid))
self.assertEqual(200, r.status_code)
data = r.json()
self.assertEqual(True, data["success"], "returned %s" % str(data))
self.assertEqual(
[
{"index": 0, "description": "Intention on Protocol Element"},
{"index": 1, "description": "Intention on Protocol"},
],
data["value"],
)
class IntentionsWsTestCase(BaseAsyncTest):
@classmethod
def setUpClass(cls):
BaseAsyncTest.setUpClass()
def setUp(self):
pass
def reloadAll(self):
pass
def test_create_intentions_block_custom(self):
async def f():
websocket = await websockets.connect(BASE_WS_URL_CUSTOM)
await websocket.send(
json.dumps(
{
"type": "CreateIntentionsBlock",
"node": {
"model": "ProtocolLanguage.sandbox",
"id": {"regularId": 5465070037663859703},
},
}
)
)
response = json.loads(await websocket.recv())
self.assertEqual("CreateIntentionsBlockAnswer", response["type"])
uuid = response["blockUUID"]
await websocket.send(
json.dumps({"type": "GetIntentionsBlock", "blockUUID": uuid})
)
response = json.loads(await websocket.recv())
self.assertEqual("GetIntentionsBlockAnswer", response["type"])
self.assertEqual(
[
{"index": 0, "description": "Intention on Protocol Element"},
{"index": 1, "description": "Intention on Protocol"},
],
response["intentions"],
)
await websocket.close()
asyncio.get_event_loop().run_until_complete(f())
def test_delete_intentions_block_custom(self):
async def f():
websocket = await websockets.connect(BASE_WS_URL_CUSTOM)
await websocket.send(
json.dumps(
{
"type": "CreateIntentionsBlock",
"node": {
"model": "ProtocolLanguage.sandbox",
"id": {"regularId": 5465070037663859703},
},
}
)
)
response = json.loads(await websocket.recv())
self.assertEqual("CreateIntentionsBlockAnswer", response["type"])
uuid = response["blockUUID"]
await websocket.send(
json.dumps({"type": "DeleteIntentionsBlock", "blockUUID": uuid})
)
await websocket.send(
json.dumps({"type": "GetIntentionsBlock", "blockUUID": uuid})
)
response = json.loads(await websocket.recv())
self.assertEqual("GetIntentionsBlockAnswer", response["type"])
self.assertEqual(False, response["result"]["success"])
await websocket.close()
asyncio.get_event_loop().run_until_complete(f())
def test_execute_intention_custom(self):
async def f():
websocket = await websockets.connect(BASE_WS_URL_CUSTOM)
await websocket.send(
json.dumps(
{
"type": "CreateIntentionsBlock",
"node": {
"model": "ProtocolLanguage.sandbox",
"id": {"regularId": 5465070037663859703},
},
}
)
)
response = json.loads(await websocket.recv())
self.assertEqual("CreateIntentionsBlockAnswer", response["type"])
uuid = response["blockUUID"]
await websocket.send(
json.dumps({"type": "ExecuteIntention", "blockUUID": uuid, "index": 0})
)
await websocket.close()
asyncio.get_event_loop().run_until_complete(f())
def test_create_intentions_block_jsonrpc(self):
async def f():
websocket = await websockets.connect(BASE_WS_URL_JSONRPC)
await websocket.send(
json.dumps(
{
"method": "CreateIntentionsBlock",
"params": {
"node": {
"model": "ProtocolLanguage.sandbox",
"id": {"regularId": 5465070037663859703},
}
},
"id": "req-a-123"
}
)
)
response = json.loads(await websocket.recv())
self.assertEqual("CreateIntentionsBlockAnswer", response["result"]["type"])
self.assertEqual("req-a-123", response["id"])
uuid = response["result"]["blockUUID"]
await websocket.send(
json.dumps({"method": "GetIntentionsBlock",
"params": {"blockUUID": uuid},
"id": "req-a-124"})
)
response = json.loads(await websocket.recv())
self.assertEqual("GetIntentionsBlockAnswer", response["result"]["type"])
self.assertEqual(
[
{"index": 0, "description": "Intention on Protocol Element"},
{"index": 1, "description": "Intention on Protocol"},
],
response["result"]["intentions"],
)
self.assertEqual("req-a-124", response["id"])
await websocket.close()
asyncio.get_event_loop().run_until_complete(f())
def test_delete_intentions_block_jsonrpc(self):
async def f():
websocket = await websockets.connect(BASE_WS_URL_JSONRPC)
await websocket.send(
json.dumps(
{
"method": "CreateIntentionsBlock",
"params": {
"node": {
"model": "ProtocolLanguage.sandbox",
"id": {"regularId": 5465070037663859703},
}
},
"id": "req-a-125"
}
)
)
response = json.loads(await websocket.recv())
self.assertEqual("CreateIntentionsBlockAnswer", response['result']["type"])
uuid = response['result']["blockUUID"]
await websocket.send(
json.dumps({"method": "DeleteIntentionsBlock", "params": {"blockUUID": uuid}})
)
await websocket.send(
json.dumps({"method": "GetIntentionsBlock", "params": {"blockUUID": uuid}})
)
response = json.loads(await websocket.recv())
self.assertEqual("GetIntentionsBlockAnswer", response["result"]["type"])
self.assertEqual(False, response["result"]["result"]["success"])
await websocket.close()
asyncio.get_event_loop().run_until_complete(f())
def test_execute_intention_jsonrpc(self):
async def f():
websocket = await websockets.connect(BASE_WS_URL_JSONRPC)
await websocket.send(
json.dumps(
{
"method": "CreateIntentionsBlock",
"params": {
"node": {
"model": "ProtocolLanguage.sandbox",
"id": {"regularId": 5465070037663859703},
}
}
}
)
)
response = json.loads(await websocket.recv())
self.assertEqual("CreateIntentionsBlockAnswer", response["result"]["type"])
uuid = response["result"]["blockUUID"]
await websocket.send(
json.dumps({"method": "ExecuteIntention", "params": {"blockUUID": uuid, "index": 0}})
)
await websocket.close()
asyncio.get_event_loop().run_until_complete(f())
if __name__ == "__main__":
import os
import sys
sys.path.append(os.getcwd())
unittest.main()
| 37.124498
| 101
| 0.493834
| 706
| 9,244
| 6.344193
| 0.150142
| 0.093771
| 0.056263
| 0.068765
| 0.841482
| 0.836794
| 0.829649
| 0.823845
| 0.823845
| 0.823845
| 0
| 0.029124
| 0.390848
| 9,244
| 248
| 102
| 37.274194
| 0.766294
| 0
| 0
| 0.560538
| 0
| 0
| 0.175032
| 0.069234
| 0
| 0
| 0
| 0
| 0.09417
| 1
| 0.058296
| false
| 0.017937
| 0.040359
| 0
| 0.107623
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ab9248f9a2550f6ff5f3d4d7eadab6212246fa18
| 90
|
py
|
Python
|
calculation/gmhazard_calc/gmhazard_calc/site/__init__.py
|
ucgmsim/gmhazard
|
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
|
[
"MIT"
] | null | null | null |
calculation/gmhazard_calc/gmhazard_calc/site/__init__.py
|
ucgmsim/gmhazard
|
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
|
[
"MIT"
] | 8
|
2021-10-13T02:33:23.000Z
|
2022-03-29T21:01:08.000Z
|
calculation/gmhazard_calc/gmhazard_calc/site/__init__.py
|
ucgmsim/gmhazard
|
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
|
[
"MIT"
] | null | null | null |
from .site import get_site_from_coords, get_site_from_name
from .SiteInfo import SiteInfo
| 30
| 58
| 0.866667
| 15
| 90
| 4.8
| 0.466667
| 0.194444
| 0.305556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 90
| 2
| 59
| 45
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
abb306264973f5286b70b8211165083199e16ed3
| 21,480
|
py
|
Python
|
stix2patterns/v21/grammars/STIXPatternLexer.py
|
sthagen/cti-pattern-validator
|
def38cb3cec5cd73bc26535f6a595abb0af8f4a5
|
[
"BSD-3-Clause"
] | 22
|
2016-10-12T15:09:19.000Z
|
2022-01-11T08:52:05.000Z
|
stix2patterns/v21/grammars/STIXPatternLexer.py
|
sthagen/cti-pattern-validator
|
def38cb3cec5cd73bc26535f6a595abb0af8f4a5
|
[
"BSD-3-Clause"
] | 68
|
2016-11-29T15:53:16.000Z
|
2022-03-31T18:23:24.000Z
|
stix2patterns/v21/grammars/STIXPatternLexer.py
|
sthagen/cti-pattern-validator
|
def38cb3cec5cd73bc26535f6a595abb0af8f4a5
|
[
"BSD-3-Clause"
] | 23
|
2016-11-08T19:19:36.000Z
|
2021-04-20T06:09:37.000Z
|
# Generated from STIXPattern.g4 by ANTLR 4.9.2
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\28")
buf.write("\u01f8\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\3\2\3\2\3\2\3\2\7\2z\n\2\f\2\16\2}\13\2\5\2\177\n\2\3")
buf.write("\3\5\3\u0082\n\3\3\3\3\3\3\3\7\3\u0087\n\3\f\3\16\3\u008a")
buf.write("\13\3\5\3\u008c\n\3\3\4\3\4\7\4\u0090\n\4\f\4\16\4\u0093")
buf.write("\13\4\3\4\3\4\6\4\u0097\n\4\r\4\16\4\u0098\3\5\5\5\u009c")
buf.write("\n\5\3\5\7\5\u009f\n\5\f\5\16\5\u00a2\13\5\3\5\3\5\6\5")
buf.write("\u00a6\n\5\r\5\16\5\u00a7\3\6\3\6\3\6\7\6\u00ad\n\6\f")
buf.write("\6\16\6\u00b0\13\6\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3\7\3\7")
buf.write("\7\7\u00bb\n\7\f\7\16\7\u00be\13\7\3\7\3\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u00d1")
buf.write("\n\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\b\7\b\u00db\n\b\f\b")
buf.write("\16\b\u00de\13\b\3\b\3\b\3\t\3\t\5\t\u00e4\n\t\3\n\3\n")
buf.write("\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\5\n\u00f1\n\n\3\n")
buf.write("\3\n\3\n\3\n\3\n\3\n\3\n\5\n\u00fa\n\n\3\n\3\n\3\n\3\n")
buf.write("\3\n\5\n\u0101\n\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\5\n")
buf.write("\u010b\n\n\3\n\3\n\6\n\u010f\n\n\r\n\16\n\u0110\5\n\u0113")
buf.write("\n\n\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3\r\3")
buf.write("\r\3\r\3\r\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16")
buf.write("\3\16\3\16\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20")
buf.write("\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21")
buf.write("\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22")
buf.write("\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24")
buf.write("\3\24\3\24\3\24\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\26")
buf.write("\3\26\3\27\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\30")
buf.write("\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\32\3\32\3\32")
buf.write("\3\32\3\32\3\32\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\34")
buf.write("\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35")
buf.write("\3\35\3\35\3\36\3\36\7\36\u0193\n\36\f\36\16\36\u0196")
buf.write("\13\36\3\37\3\37\7\37\u019a\n\37\f\37\16\37\u019d\13\37")
buf.write("\3 \3 \3 \5 \u01a2\n \3!\3!\3!\3!\5!\u01a8\n!\3\"\3\"")
buf.write("\3#\3#\3#\3$\3$\3%\3%\3%\3&\3&\3\'\3\'\3(\3(\3)\3)\3*")
buf.write("\3*\3+\3+\3,\3,\3-\3-\3.\3.\3/\3/\3\60\3\60\3\61\3\61")
buf.write("\3\62\3\62\3\63\3\63\3\64\3\64\3\65\3\65\3\65\3\66\3\66")
buf.write("\3\67\6\67\u01d8\n\67\r\67\16\67\u01d9\3\67\3\67\38\3")
buf.write("8\38\38\78\u01e2\n8\f8\168\u01e5\138\38\38\38\38\38\3")
buf.write("9\39\39\39\79\u01f0\n9\f9\169\u01f3\139\39\39\3:\3:\3")
buf.write("\u01e3\2;\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25")
buf.write("\f\27\r\31\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26+")
buf.write("\27-\30/\31\61\32\63\33\65\34\67\359\36;\37= ?!A\"C#E")
buf.write("$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\2i\2k\2")
buf.write("m\65o\66q\67s8\3\2\21\3\2\63;\3\2\62;\4\2))^^\3\2\62\64")
buf.write("\3\2\63\64\3\2\62\63\3\2\62\65\3\2\62\67\5\2C\\aac|\6")
buf.write("\2\62;C\\aac|\7\2//\62;C\\aac|\5\2\62;CHch\6\2--\61;C")
buf.write("\\c|\f\2\13\17\"\"\u0087\u0087\u00a2\u00a2\u1682\u1682")
buf.write("\u2002\u200c\u202a\u202b\u2031\u2031\u2061\u2061\u3002")
buf.write("\u3002\4\2\f\f\17\17\2\u0214\2\3\3\2\2\2\2\5\3\2\2\2\2")
buf.write("\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3")
buf.write("\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2")
buf.write("\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2")
buf.write("\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2")
buf.write("\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63")
buf.write("\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2")
buf.write("\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2")
buf.write("\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3")
buf.write("\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y")
buf.write("\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2")
buf.write("c\3\2\2\2\2e\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2")
buf.write("\2s\3\2\2\2\3u\3\2\2\2\5\u0081\3\2\2\2\7\u008d\3\2\2\2")
buf.write("\t\u009b\3\2\2\2\13\u00a9\3\2\2\2\r\u00b3\3\2\2\2\17\u00d4")
buf.write("\3\2\2\2\21\u00e3\3\2\2\2\23\u00e5\3\2\2\2\25\u0117\3")
buf.write("\2\2\2\27\u011b\3\2\2\2\31\u011e\3\2\2\2\33\u0122\3\2")
buf.write("\2\2\35\u012d\3\2\2\2\37\u0132\3\2\2\2!\u013a\3\2\2\2")
buf.write("#\u0145\3\2\2\2%\u014e\3\2\2\2\'\u0155\3\2\2\2)\u015a")
buf.write("\3\2\2\2+\u015d\3\2\2\2-\u0163\3\2\2\2/\u0168\3\2\2\2")
buf.write("\61\u0170\3\2\2\2\63\u0175\3\2\2\2\65\u017b\3\2\2\2\67")
buf.write("\u0182\3\2\2\29\u018a\3\2\2\2;\u0190\3\2\2\2=\u0197\3")
buf.write("\2\2\2?\u01a1\3\2\2\2A\u01a7\3\2\2\2C\u01a9\3\2\2\2E\u01ab")
buf.write("\3\2\2\2G\u01ae\3\2\2\2I\u01b0\3\2\2\2K\u01b3\3\2\2\2")
buf.write("M\u01b5\3\2\2\2O\u01b7\3\2\2\2Q\u01b9\3\2\2\2S\u01bb\3")
buf.write("\2\2\2U\u01bd\3\2\2\2W\u01bf\3\2\2\2Y\u01c1\3\2\2\2[\u01c3")
buf.write("\3\2\2\2]\u01c5\3\2\2\2_\u01c7\3\2\2\2a\u01c9\3\2\2\2")
buf.write("c\u01cb\3\2\2\2e\u01cd\3\2\2\2g\u01cf\3\2\2\2i\u01d1\3")
buf.write("\2\2\2k\u01d4\3\2\2\2m\u01d7\3\2\2\2o\u01dd\3\2\2\2q\u01eb")
buf.write("\3\2\2\2s\u01f6\3\2\2\2u~\7/\2\2v\177\7\62\2\2w{\t\2\2")
buf.write("\2xz\t\3\2\2yx\3\2\2\2z}\3\2\2\2{y\3\2\2\2{|\3\2\2\2|")
buf.write("\177\3\2\2\2}{\3\2\2\2~v\3\2\2\2~w\3\2\2\2\177\4\3\2\2")
buf.write("\2\u0080\u0082\7-\2\2\u0081\u0080\3\2\2\2\u0081\u0082")
buf.write("\3\2\2\2\u0082\u008b\3\2\2\2\u0083\u008c\7\62\2\2\u0084")
buf.write("\u0088\t\2\2\2\u0085\u0087\t\3\2\2\u0086\u0085\3\2\2\2")
buf.write("\u0087\u008a\3\2\2\2\u0088\u0086\3\2\2\2\u0088\u0089\3")
buf.write("\2\2\2\u0089\u008c\3\2\2\2\u008a\u0088\3\2\2\2\u008b\u0083")
buf.write("\3\2\2\2\u008b\u0084\3\2\2\2\u008c\6\3\2\2\2\u008d\u0091")
buf.write("\7/\2\2\u008e\u0090\t\3\2\2\u008f\u008e\3\2\2\2\u0090")
buf.write("\u0093\3\2\2\2\u0091\u008f\3\2\2\2\u0091\u0092\3\2\2\2")
buf.write("\u0092\u0094\3\2\2\2\u0093\u0091\3\2\2\2\u0094\u0096\7")
buf.write("\60\2\2\u0095\u0097\t\3\2\2\u0096\u0095\3\2\2\2\u0097")
buf.write("\u0098\3\2\2\2\u0098\u0096\3\2\2\2\u0098\u0099\3\2\2\2")
buf.write("\u0099\b\3\2\2\2\u009a\u009c\7-\2\2\u009b\u009a\3\2\2")
buf.write("\2\u009b\u009c\3\2\2\2\u009c\u00a0\3\2\2\2\u009d\u009f")
buf.write("\t\3\2\2\u009e\u009d\3\2\2\2\u009f\u00a2\3\2\2\2\u00a0")
buf.write("\u009e\3\2\2\2\u00a0\u00a1\3\2\2\2\u00a1\u00a3\3\2\2\2")
buf.write("\u00a2\u00a0\3\2\2\2\u00a3\u00a5\7\60\2\2\u00a4\u00a6")
buf.write("\t\3\2\2\u00a5\u00a4\3\2\2\2\u00a6\u00a7\3\2\2\2\u00a7")
buf.write("\u00a5\3\2\2\2\u00a7\u00a8\3\2\2\2\u00a8\n\3\2\2\2\u00a9")
buf.write("\u00aa\7j\2\2\u00aa\u00ae\5K&\2\u00ab\u00ad\5i\65\2\u00ac")
buf.write("\u00ab\3\2\2\2\u00ad\u00b0\3\2\2\2\u00ae\u00ac\3\2\2\2")
buf.write("\u00ae\u00af\3\2\2\2\u00af\u00b1\3\2\2\2\u00b0\u00ae\3")
buf.write("\2\2\2\u00b1\u00b2\5K&\2\u00b2\f\3\2\2\2\u00b3\u00b4\7")
buf.write("d\2\2\u00b4\u00bc\5K&\2\u00b5\u00b6\5k\66\2\u00b6\u00b7")
buf.write("\5k\66\2\u00b7\u00b8\5k\66\2\u00b8\u00b9\5k\66\2\u00b9")
buf.write("\u00bb\3\2\2\2\u00ba\u00b5\3\2\2\2\u00bb\u00be\3\2\2\2")
buf.write("\u00bc\u00ba\3\2\2\2\u00bc\u00bd\3\2\2\2\u00bd\u00d0\3")
buf.write("\2\2\2\u00be\u00bc\3\2\2\2\u00bf\u00c0\5k\66\2\u00c0\u00c1")
buf.write("\5k\66\2\u00c1\u00c2\5k\66\2\u00c2\u00c3\5k\66\2\u00c3")
buf.write("\u00d1\3\2\2\2\u00c4\u00c5\5k\66\2\u00c5\u00c6\5k\66\2")
buf.write("\u00c6\u00c7\5k\66\2\u00c7\u00c8\3\2\2\2\u00c8\u00c9\7")
buf.write("?\2\2\u00c9\u00d1\3\2\2\2\u00ca\u00cb\5k\66\2\u00cb\u00cc")
buf.write("\5k\66\2\u00cc\u00cd\3\2\2\2\u00cd\u00ce\7?\2\2\u00ce")
buf.write("\u00cf\7?\2\2\u00cf\u00d1\3\2\2\2\u00d0\u00bf\3\2\2\2")
buf.write("\u00d0\u00c4\3\2\2\2\u00d0\u00ca\3\2\2\2\u00d1\u00d2\3")
buf.write("\2\2\2\u00d2\u00d3\5K&\2\u00d3\16\3\2\2\2\u00d4\u00dc")
buf.write("\5K&\2\u00d5\u00db\n\4\2\2\u00d6\u00d7\7^\2\2\u00d7\u00db")
buf.write("\7)\2\2\u00d8\u00d9\7^\2\2\u00d9\u00db\7^\2\2\u00da\u00d5")
buf.write("\3\2\2\2\u00da\u00d6\3\2\2\2\u00da\u00d8\3\2\2\2\u00db")
buf.write("\u00de\3\2\2\2\u00dc\u00da\3\2\2\2\u00dc\u00dd\3\2\2\2")
buf.write("\u00dd\u00df\3\2\2\2\u00de\u00dc\3\2\2\2\u00df\u00e0\5")
buf.write("K&\2\u00e0\20\3\2\2\2\u00e1\u00e4\5\61\31\2\u00e2\u00e4")
buf.write("\5\63\32\2\u00e3\u00e1\3\2\2\2\u00e3\u00e2\3\2\2\2\u00e4")
buf.write("\22\3\2\2\2\u00e5\u00e6\7v\2\2\u00e6\u00e7\5K&\2\u00e7")
buf.write("\u00e8\t\3\2\2\u00e8\u00e9\t\3\2\2\u00e9\u00ea\t\3\2\2")
buf.write("\u00ea\u00eb\t\3\2\2\u00eb\u00f0\5]/\2\u00ec\u00ed\7\62")
buf.write("\2\2\u00ed\u00f1\t\2\2\2\u00ee\u00ef\7\63\2\2\u00ef\u00f1")
buf.write("\t\5\2\2\u00f0\u00ec\3\2\2\2\u00f0\u00ee\3\2\2\2\u00f1")
buf.write("\u00f2\3\2\2\2\u00f2\u00f9\5]/\2\u00f3\u00f4\7\62\2\2")
buf.write("\u00f4\u00fa\t\2\2\2\u00f5\u00f6\t\6\2\2\u00f6\u00fa\t")
buf.write("\3\2\2\u00f7\u00f8\7\65\2\2\u00f8\u00fa\t\7\2\2\u00f9")
buf.write("\u00f3\3\2\2\2\u00f9\u00f5\3\2\2\2\u00f9\u00f7\3\2\2\2")
buf.write("\u00fa\u00fb\3\2\2\2\u00fb\u0100\7V\2\2\u00fc\u00fd\t")
buf.write("\7\2\2\u00fd\u0101\t\3\2\2\u00fe\u00ff\7\64\2\2\u00ff")
buf.write("\u0101\t\b\2\2\u0100\u00fc\3\2\2\2\u0100\u00fe\3\2\2\2")
buf.write("\u0101\u0102\3\2\2\2\u0102\u0103\5M\'\2\u0103\u0104\t")
buf.write("\t\2\2\u0104\u0105\t\3\2\2\u0105\u010a\5M\'\2\u0106\u0107")
buf.write("\t\t\2\2\u0107\u010b\t\3\2\2\u0108\u0109\78\2\2\u0109")
buf.write("\u010b\7\62\2\2\u010a\u0106\3\2\2\2\u010a\u0108\3\2\2")
buf.write("\2\u010b\u0112\3\2\2\2\u010c\u010e\5O(\2\u010d\u010f\t")
buf.write("\3\2\2\u010e\u010d\3\2\2\2\u010f\u0110\3\2\2\2\u0110\u010e")
buf.write("\3\2\2\2\u0110\u0111\3\2\2\2\u0111\u0113\3\2\2\2\u0112")
buf.write("\u010c\3\2\2\2\u0112\u0113\3\2\2\2\u0113\u0114\3\2\2\2")
buf.write("\u0114\u0115\7\\\2\2\u0115\u0116\5K&\2\u0116\24\3\2\2")
buf.write("\2\u0117\u0118\7C\2\2\u0118\u0119\7P\2\2\u0119\u011a\7")
buf.write("F\2\2\u011a\26\3\2\2\2\u011b\u011c\7Q\2\2\u011c\u011d")
buf.write("\7T\2\2\u011d\30\3\2\2\2\u011e\u011f\7P\2\2\u011f\u0120")
buf.write("\7Q\2\2\u0120\u0121\7V\2\2\u0121\32\3\2\2\2\u0122\u0123")
buf.write("\7H\2\2\u0123\u0124\7Q\2\2\u0124\u0125\7N\2\2\u0125\u0126")
buf.write("\7N\2\2\u0126\u0127\7Q\2\2\u0127\u0128\7Y\2\2\u0128\u0129")
buf.write("\7G\2\2\u0129\u012a\7F\2\2\u012a\u012b\7D\2\2\u012b\u012c")
buf.write("\7[\2\2\u012c\34\3\2\2\2\u012d\u012e\7N\2\2\u012e\u012f")
buf.write("\7K\2\2\u012f\u0130\7M\2\2\u0130\u0131\7G\2\2\u0131\36")
buf.write("\3\2\2\2\u0132\u0133\7O\2\2\u0133\u0134\7C\2\2\u0134\u0135")
buf.write("\7V\2\2\u0135\u0136\7E\2\2\u0136\u0137\7J\2\2\u0137\u0138")
buf.write("\7G\2\2\u0138\u0139\7U\2\2\u0139 \3\2\2\2\u013a\u013b")
buf.write("\7K\2\2\u013b\u013c\7U\2\2\u013c\u013d\7U\2\2\u013d\u013e")
buf.write("\7W\2\2\u013e\u013f\7R\2\2\u013f\u0140\7G\2\2\u0140\u0141")
buf.write("\7T\2\2\u0141\u0142\7U\2\2\u0142\u0143\7G\2\2\u0143\u0144")
buf.write("\7V\2\2\u0144\"\3\2\2\2\u0145\u0146\7K\2\2\u0146\u0147")
buf.write("\7U\2\2\u0147\u0148\7U\2\2\u0148\u0149\7W\2\2\u0149\u014a")
buf.write("\7D\2\2\u014a\u014b\7U\2\2\u014b\u014c\7G\2\2\u014c\u014d")
buf.write("\7V\2\2\u014d$\3\2\2\2\u014e\u014f\7G\2\2\u014f\u0150")
buf.write("\7Z\2\2\u0150\u0151\7K\2\2\u0151\u0152\7U\2\2\u0152\u0153")
buf.write("\7V\2\2\u0153\u0154\7U\2\2\u0154&\3\2\2\2\u0155\u0156")
buf.write("\7N\2\2\u0156\u0157\7C\2\2\u0157\u0158\7U\2\2\u0158\u0159")
buf.write("\7V\2\2\u0159(\3\2\2\2\u015a\u015b\7K\2\2\u015b\u015c")
buf.write("\7P\2\2\u015c*\3\2\2\2\u015d\u015e\7U\2\2\u015e\u015f")
buf.write("\7V\2\2\u015f\u0160\7C\2\2\u0160\u0161\7T\2\2\u0161\u0162")
buf.write("\7V\2\2\u0162,\3\2\2\2\u0163\u0164\7U\2\2\u0164\u0165")
buf.write("\7V\2\2\u0165\u0166\7Q\2\2\u0166\u0167\7R\2\2\u0167.\3")
buf.write("\2\2\2\u0168\u0169\7U\2\2\u0169\u016a\7G\2\2\u016a\u016b")
buf.write("\7E\2\2\u016b\u016c\7Q\2\2\u016c\u016d\7P\2\2\u016d\u016e")
buf.write("\7F\2\2\u016e\u016f\7U\2\2\u016f\60\3\2\2\2\u0170\u0171")
buf.write("\7v\2\2\u0171\u0172\7t\2\2\u0172\u0173\7w\2\2\u0173\u0174")
buf.write("\7g\2\2\u0174\62\3\2\2\2\u0175\u0176\7h\2\2\u0176\u0177")
buf.write("\7c\2\2\u0177\u0178\7n\2\2\u0178\u0179\7u\2\2\u0179\u017a")
buf.write("\7g\2\2\u017a\64\3\2\2\2\u017b\u017c\7Y\2\2\u017c\u017d")
buf.write("\7K\2\2\u017d\u017e\7V\2\2\u017e\u017f\7J\2\2\u017f\u0180")
buf.write("\7K\2\2\u0180\u0181\7P\2\2\u0181\66\3\2\2\2\u0182\u0183")
buf.write("\7T\2\2\u0183\u0184\7G\2\2\u0184\u0185\7R\2\2\u0185\u0186")
buf.write("\7G\2\2\u0186\u0187\7C\2\2\u0187\u0188\7V\2\2\u0188\u0189")
buf.write("\7U\2\2\u01898\3\2\2\2\u018a\u018b\7V\2\2\u018b\u018c")
buf.write("\7K\2\2\u018c\u018d\7O\2\2\u018d\u018e\7G\2\2\u018e\u018f")
buf.write("\7U\2\2\u018f:\3\2\2\2\u0190\u0194\t\n\2\2\u0191\u0193")
buf.write("\t\13\2\2\u0192\u0191\3\2\2\2\u0193\u0196\3\2\2\2\u0194")
buf.write("\u0192\3\2\2\2\u0194\u0195\3\2\2\2\u0195<\3\2\2\2\u0196")
buf.write("\u0194\3\2\2\2\u0197\u019b\t\n\2\2\u0198\u019a\t\f\2\2")
buf.write("\u0199\u0198\3\2\2\2\u019a\u019d\3\2\2\2\u019b\u0199\3")
buf.write("\2\2\2\u019b\u019c\3\2\2\2\u019c>\3\2\2\2\u019d\u019b")
buf.write("\3\2\2\2\u019e\u01a2\7?\2\2\u019f\u01a0\7?\2\2\u01a0\u01a2")
buf.write("\7?\2\2\u01a1\u019e\3\2\2\2\u01a1\u019f\3\2\2\2\u01a2")
buf.write("@\3\2\2\2\u01a3\u01a4\7#\2\2\u01a4\u01a8\7?\2\2\u01a5")
buf.write("\u01a6\7>\2\2\u01a6\u01a8\7@\2\2\u01a7\u01a3\3\2\2\2\u01a7")
buf.write("\u01a5\3\2\2\2\u01a8B\3\2\2\2\u01a9\u01aa\7>\2\2\u01aa")
buf.write("D\3\2\2\2\u01ab\u01ac\7>\2\2\u01ac\u01ad\7?\2\2\u01ad")
buf.write("F\3\2\2\2\u01ae\u01af\7@\2\2\u01afH\3\2\2\2\u01b0\u01b1")
buf.write("\7@\2\2\u01b1\u01b2\7?\2\2\u01b2J\3\2\2\2\u01b3\u01b4")
buf.write("\7)\2\2\u01b4L\3\2\2\2\u01b5\u01b6\7<\2\2\u01b6N\3\2\2")
buf.write("\2\u01b7\u01b8\7\60\2\2\u01b8P\3\2\2\2\u01b9\u01ba\7.")
buf.write("\2\2\u01baR\3\2\2\2\u01bb\u01bc\7+\2\2\u01bcT\3\2\2\2")
buf.write("\u01bd\u01be\7*\2\2\u01beV\3\2\2\2\u01bf\u01c0\7_\2\2")
buf.write("\u01c0X\3\2\2\2\u01c1\u01c2\7]\2\2\u01c2Z\3\2\2\2\u01c3")
buf.write("\u01c4\7-\2\2\u01c4\\\3\2\2\2\u01c5\u01c6\5_\60\2\u01c6")
buf.write("^\3\2\2\2\u01c7\u01c8\7/\2\2\u01c8`\3\2\2\2\u01c9\u01ca")
buf.write("\7`\2\2\u01cab\3\2\2\2\u01cb\u01cc\7\61\2\2\u01ccd\3\2")
buf.write("\2\2\u01cd\u01ce\7,\2\2\u01cef\3\2\2\2\u01cf\u01d0\t\r")
buf.write("\2\2\u01d0h\3\2\2\2\u01d1\u01d2\5g\64\2\u01d2\u01d3\5")
buf.write("g\64\2\u01d3j\3\2\2\2\u01d4\u01d5\t\16\2\2\u01d5l\3\2")
buf.write("\2\2\u01d6\u01d8\t\17\2\2\u01d7\u01d6\3\2\2\2\u01d8\u01d9")
buf.write("\3\2\2\2\u01d9\u01d7\3\2\2\2\u01d9\u01da\3\2\2\2\u01da")
buf.write("\u01db\3\2\2\2\u01db\u01dc\b\67\2\2\u01dcn\3\2\2\2\u01dd")
buf.write("\u01de\7\61\2\2\u01de\u01df\7,\2\2\u01df\u01e3\3\2\2\2")
buf.write("\u01e0\u01e2\13\2\2\2\u01e1\u01e0\3\2\2\2\u01e2\u01e5")
buf.write("\3\2\2\2\u01e3\u01e4\3\2\2\2\u01e3\u01e1\3\2\2\2\u01e4")
buf.write("\u01e6\3\2\2\2\u01e5\u01e3\3\2\2\2\u01e6\u01e7\7,\2\2")
buf.write("\u01e7\u01e8\7\61\2\2\u01e8\u01e9\3\2\2\2\u01e9\u01ea")
buf.write("\b8\2\2\u01eap\3\2\2\2\u01eb\u01ec\7\61\2\2\u01ec\u01ed")
buf.write("\7\61\2\2\u01ed\u01f1\3\2\2\2\u01ee\u01f0\n\20\2\2\u01ef")
buf.write("\u01ee\3\2\2\2\u01f0\u01f3\3\2\2\2\u01f1\u01ef\3\2\2\2")
buf.write("\u01f1\u01f2\3\2\2\2\u01f2\u01f4\3\2\2\2\u01f3\u01f1\3")
buf.write("\2\2\2\u01f4\u01f5\b9\2\2\u01f5r\3\2\2\2\u01f6\u01f7\13")
buf.write("\2\2\2\u01f7t\3\2\2\2 \2{~\u0081\u0088\u008b\u0091\u0098")
buf.write("\u009b\u00a0\u00a7\u00ae\u00bc\u00d0\u00da\u00dc\u00e3")
buf.write("\u00f0\u00f9\u0100\u010a\u0110\u0112\u0194\u019b\u01a1")
buf.write("\u01a7\u01d9\u01e3\u01f1\3\b\2\2")
return buf.getvalue()
class STIXPatternLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
IntNegLiteral = 1
IntPosLiteral = 2
FloatNegLiteral = 3
FloatPosLiteral = 4
HexLiteral = 5
BinaryLiteral = 6
StringLiteral = 7
BoolLiteral = 8
TimestampLiteral = 9
AND = 10
OR = 11
NOT = 12
FOLLOWEDBY = 13
LIKE = 14
MATCHES = 15
ISSUPERSET = 16
ISSUBSET = 17
EXISTS = 18
LAST = 19
IN = 20
START = 21
STOP = 22
SECONDS = 23
TRUE = 24
FALSE = 25
WITHIN = 26
REPEATS = 27
TIMES = 28
IdentifierWithoutHyphen = 29
IdentifierWithHyphen = 30
EQ = 31
NEQ = 32
LT = 33
LE = 34
GT = 35
GE = 36
QUOTE = 37
COLON = 38
DOT = 39
COMMA = 40
RPAREN = 41
LPAREN = 42
RBRACK = 43
LBRACK = 44
PLUS = 45
HYPHEN = 46
MINUS = 47
POWER_OP = 48
DIVIDE = 49
ASTERISK = 50
WS = 51
COMMENT = 52
LINE_COMMENT = 53
InvalidCharacter = 54
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'AND'", "'OR'", "'NOT'", "'FOLLOWEDBY'", "'LIKE'", "'MATCHES'",
"'ISSUPERSET'", "'ISSUBSET'", "'EXISTS'", "'LAST'", "'IN'",
"'START'", "'STOP'", "'SECONDS'", "'true'", "'false'", "'WITHIN'",
"'REPEATS'", "'TIMES'", "'<'", "'<='", "'>'", "'>='", "'''",
"':'", "'.'", "','", "')'", "'('", "']'", "'['", "'+'", "'-'",
"'^'", "'/'", "'*'" ]
symbolicNames = [ "<INVALID>",
"IntNegLiteral", "IntPosLiteral", "FloatNegLiteral", "FloatPosLiteral",
"HexLiteral", "BinaryLiteral", "StringLiteral", "BoolLiteral",
"TimestampLiteral", "AND", "OR", "NOT", "FOLLOWEDBY", "LIKE",
"MATCHES", "ISSUPERSET", "ISSUBSET", "EXISTS", "LAST", "IN",
"START", "STOP", "SECONDS", "TRUE", "FALSE", "WITHIN", "REPEATS",
"TIMES", "IdentifierWithoutHyphen", "IdentifierWithHyphen",
"EQ", "NEQ", "LT", "LE", "GT", "GE", "QUOTE", "COLON", "DOT",
"COMMA", "RPAREN", "LPAREN", "RBRACK", "LBRACK", "PLUS", "HYPHEN",
"MINUS", "POWER_OP", "DIVIDE", "ASTERISK", "WS", "COMMENT",
"LINE_COMMENT", "InvalidCharacter" ]
ruleNames = [ "IntNegLiteral", "IntPosLiteral", "FloatNegLiteral", "FloatPosLiteral",
"HexLiteral", "BinaryLiteral", "StringLiteral", "BoolLiteral",
"TimestampLiteral", "AND", "OR", "NOT", "FOLLOWEDBY",
"LIKE", "MATCHES", "ISSUPERSET", "ISSUBSET", "EXISTS",
"LAST", "IN", "START", "STOP", "SECONDS", "TRUE", "FALSE",
"WITHIN", "REPEATS", "TIMES", "IdentifierWithoutHyphen",
"IdentifierWithHyphen", "EQ", "NEQ", "LT", "LE", "GT",
"GE", "QUOTE", "COLON", "DOT", "COMMA", "RPAREN", "LPAREN",
"RBRACK", "LBRACK", "PLUS", "HYPHEN", "MINUS", "POWER_OP",
"DIVIDE", "ASTERISK", "HexDigit", "TwoHexDigits", "Base64Char",
"WS", "COMMENT", "LINE_COMMENT", "InvalidCharacter" ]
grammarFileName = "STIXPattern.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| 60.677966
| 103
| 0.567458
| 4,824
| 21,480
| 2.521352
| 0.142206
| 0.130724
| 0.073255
| 0.081888
| 0.304777
| 0.193867
| 0.133931
| 0.12242
| 0.114034
| 0.108361
| 0
| 0.339594
| 0.149767
| 21,480
| 353
| 104
| 60.849858
| 0.326398
| 0.002048
| 0
| 0.00597
| 1
| 0.641791
| 0.631766
| 0.578434
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00597
| false
| 0
| 0.014925
| 0
| 0.21194
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
abe9e4dceeeffc6d4c10e2ef0451ac7b212fceff
| 5,993
|
py
|
Python
|
tests/test_things.py
|
0x6f736f646f/sdk-py
|
9b9be4d512c94783d973ebe0910b81da636ff7ad
|
[
"Apache-2.0"
] | 4
|
2021-11-29T01:50:53.000Z
|
2021-12-28T15:18:54.000Z
|
tests/test_things.py
|
0x6f736f646f/sdk-py
|
9b9be4d512c94783d973ebe0910b81da636ff7ad
|
[
"Apache-2.0"
] | 25
|
2021-08-09T13:29:20.000Z
|
2022-01-21T20:33:42.000Z
|
tests/test_things.py
|
mainflux/sdk-py
|
53699a41aa39ff82bc7a3d2dc45724a9fd357845
|
[
"Apache-2.0"
] | 3
|
2021-07-22T09:17:10.000Z
|
2022-01-07T12:01:14.000Z
|
from lib import sdk
import json, requests_mock
s = sdk.SDK()
thing = {"thing_name": "thing"}
thing_id = "123-456-789"
thing_id1 = "123-223-333"
channel_id = "654-654-654"
channel_id1 = "654-654-654"
token = "9a8b7c6d5e4f3g21"
url = "http://localhost"
params = None
def test_create_thing(requests_mock):
requests_mock.register_uri("POST", url + "/things", headers={"location": "/things/" + thing_id}, status_code=201)
r = s.things.create(thing, token)
assert r.error.status == 0
assert thing_id == r.value
def test_create_existing_thing(requests_mock):
requests_mock.register_uri("POST", url + "/things", headers={"location": "/things/" + thing_id}, status_code=409)
r = s.things.create(thing, token)
assert r.error.status == 1
assert r.error.message == "Entity already exist."
def test_create_bulk_things(requests_mock):
requests_mock.register_uri("POST", url + "/things/bulk", json=[thing_id, thing_id1], headers={"location": "/things/" + thing_id}, status_code=201)
r = s.things.create_bulk(thing_id, token)
assert r.error.status == 0
assert [thing_id, thing_id1] == r.value
def test_create_bulk_things_missing_token(requests_mock):
requests_mock.register_uri("POST", url + "/things/bulk", json=[thing_id, thing_id1], headers={"location": "/things/" + thing_id}, status_code=401)
r = s.things.create_bulk(thing_id, token)
assert r.error.status == 1
assert r.error.message == "Missing or invalid access token provided."
def test_get_thing(requests_mock):
requests_mock.register_uri("GET", url + "/things/" + thing_id, json=thing, status_code=200)
r = s.things.get(thing_id, token)
assert r.error.status == 0
assert thing == r.value
def test_get_thing_malformed_query(requests_mock):
requests_mock.register_uri("GET", url + "/things/" + thing_id, json=thing, status_code=400)
r = s.things.get(thing_id, token)
assert r.error.status == 1
assert r.error.message == "Failed due to malformed query parameters."
def test_get_all_things(requests_mock):
requests_mock.register_uri("GET", url + "/things", json=[thing_id, thing_id1], status_code=200)
r = s.things.get_all(token)
assert r.error.status == 0
assert [thing_id, thing_id1] == r.value
def test_get_all_thing_does_not_exist(requests_mock):
requests_mock.register_uri("GET", url + "/things", json=[thing_id, thing_id1], status_code=404)
r = s.things.get_all(token)
assert r.error.status == 1
assert r.error.message == "Thing does not exist."
def test_get_by_channel(requests_mock):
requests_mock.register_uri("GET", url + "/channels/" + channel_id + "/things", json=channel_id, headers={"Authorization": "/channels/" + channel_id + "/things"}, status_code=200)
r = s.things.get_by_channel(channel_id, params, token)
assert r.error.status == 0
assert channel_id == r.value
def test_get_by_channel_missing_token(requests_mock):
requests_mock.register_uri("GET", url + "/channels/" + channel_id + "/things", json=channel_id, headers={"Authorization": "/channels/" + channel_id + "/things"}, status_code=401)
r = s.things.get_by_channel(channel_id, params, token)
assert r.error.status == 1
assert r.error.message == "Missing or invalid access token provided."
def test_update_thing(requests_mock):
requests_mock.register_uri("PUT", url + "/things/" + thing_id, json=json.dumps(thing), status_code=200)
r = s.things.update(thing_id, token, thing)
assert r.error.status == 0
def test_update_thing_bad_json(requests_mock):
requests_mock.register_uri("PUT", url + "/things/" + thing_id, json=json.dumps(thing), status_code=400)
r = s.things.update(thing_id, token, thing)
assert r.error.status == 1
assert r.error.message == "Failed due to malformed JSON."
def test_delete_thing(requests_mock):
requests_mock.register_uri("DELETE", url + "/things/" + thing_id, status_code=204)
r = s.things.delete(thing_id, token)
assert r.error.status == 0
def test_delete_bad_thing_id(requests_mock):
requests_mock.register_uri("DELETE", url + "/things/" + thing_id, status_code=400)
r = s.things.delete(thing_id, token)
assert r.error.status == 1
assert r.error.message == "Failed due to malformed thing's ID."
def test_connect_thing(requests_mock):
requests_mock.register_uri("POST", url + "/connect", json=[channel_id, thing_id], status_code=201)
r = s.things.connect(channel_id, thing_id, token)
assert r.error.status == 0
assert [channel_id, thing_id] == r.value
def test_connect_non_existing_entity(requests_mock):
requests_mock.register_uri("POST", url + "/connect", json=[channel_id, thing_id], status_code=404)
r = s.things.connect(channel_id, thing_id, token)
assert r.error.status == 1
assert r.error.message == "A non-existent entity request."
def test_disconnect_thing(requests_mock):
requests_mock.register_uri("DELETE", url + "/channels/" + channel_id + "/things/" + thing_id, status_code=204)
r = s.things.disconnect(channel_id, thing_id, token)
assert r.error.status == 0
def test_disconnect_thing_or_channel_does_not_exist(requests_mock):
requests_mock.register_uri("DELETE", url + "/channels/" + channel_id + "/things/" + thing_id, status_code=404)
r = s.things.disconnect(channel_id, thing_id, token)
assert r.error.status == 1
assert r.error.message == "Channel or thing does not exist."
def test_disconnect_things(requests_mock):
requests_mock.register_uri("PUT", url + "/disconnect/", status_code=200)
r = s.things.disconnect_things([channel_id], [thing_id, thing_id1], token)
assert r.error.status == 1
def test_disconnect_things_bad_json(requests_mock):
requests_mock.register_uri("PUT", url + "/disconnect/", status_code=400)
r = s.things.disconnect_things([channel_id], [thing_id, thing_id1], token)
assert r.error.status == 1
assert r.error.message == "Failed due to malformed thing's ID."
| 40.221477
| 182
| 0.714667
| 895
| 5,993
| 4.529609
| 0.094972
| 0.121362
| 0.088801
| 0.118402
| 0.882338
| 0.858905
| 0.834484
| 0.821164
| 0.813024
| 0.790824
| 0
| 0.026388
| 0.146337
| 5,993
| 148
| 183
| 40.493243
| 0.766028
| 0
| 0
| 0.429907
| 0
| 0
| 0.138328
| 0
| 0
| 0
| 0
| 0
| 0.336449
| 1
| 0.186916
| false
| 0
| 0.018692
| 0
| 0.205607
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f9f5b2d72e89d6a7f0ba6d4a25c8232c216b6e28
| 7,551
|
py
|
Python
|
app/site/views/auth.py
|
najens/item_catalog
|
1d5a3d6d2edc1b65bfab72c6a3a6644729ecf79d
|
[
"MIT"
] | null | null | null |
app/site/views/auth.py
|
najens/item_catalog
|
1d5a3d6d2edc1b65bfab72c6a3a6644729ecf79d
|
[
"MIT"
] | null | null | null |
app/site/views/auth.py
|
najens/item_catalog
|
1d5a3d6d2edc1b65bfab72c6a3a6644729ecf79d
|
[
"MIT"
] | null | null | null |
from flask import redirect, url_for, flash, make_response
from flask_dance.contrib.google import make_google_blueprint
from flask_dance.contrib.facebook import make_facebook_blueprint
from flask_dance.consumer.backend.sqla import SQLAlchemyBackend
from flask_dance.consumer import oauth_authorized, oauth_error
from flask_login import current_user, login_user
from app import app, db
from app.models import User, OAuth
from sqlalchemy.orm.exc import NoResultFound
from datetime import timedelta
from flask_jwt_extended import (
create_access_token, create_refresh_token,
set_access_cookies, set_refresh_cookies
)
# Initialize Flask-Dance blueprints
google_blueprint = make_google_blueprint(
client_id=app.config.get('GOOGLE_CLIENT_ID'),
client_secret=app.config.get('GOOGLE_CLIENT_SECRET'),
scope=["profile", "email"],
redirect_url="/token/google"
)
facebook_blueprint = make_facebook_blueprint(
client_id=app.config.get('FACEBOOK_APP_ID'),
client_secret=app.config.get('FACEBOOK_APP_SECRET'),
scope=["public_profile", "email"],
redirect_url="/token/facebook"
)
# Setup sqlalchemy backend for blueprints
google_blueprint.backend = SQLAlchemyBackend(
OAuth,
db.session,
user=current_user
)
facebook_blueprint.backend = SQLAlchemyBackend(
OAuth,
db.session,
user=current_user
)
@oauth_authorized.connect_via(google_blueprint)
def google_logged_in(blueprint, token):
"""Log in user on successful Google authorization."""
# If user doesn't have valid oauth token, flash error
if not token:
flash("Failed to log in {name}".format(name=blueprint.name))
return
# Get user data from oauth session
resp = blueprint.session.get("/oauth2/v2/userinfo")
# If failed to get data, flash error
if not resp.ok:
msg = "Failed to fetch user info from {name}.".format(
name=blueprint.name
)
flash(msg, category="error")
return False
# Setup query
user_info = resp.json()
user_id = str(user_info["id"])
query = OAuth.query.filter_by(
provider=blueprint.name,
provider_user_id=user_id,
)
# Try to find oauth user in database
try:
oauth = query.one()
# If no result found, create new oauth token account for user
except NoResultFound:
oauth = OAuth(
provider=blueprint.name,
provider_user_id=user_id,
token=token,
)
# If query successful, log in user
if oauth.user:
login_user(oauth.user)
flash("Successfully signed in with {name}.".format(
name=blueprint.name
))
# If query not successful, create new user then log in user
else:
# Create a new local user account for this user
user = User(
name=user_info['name'],
email=user_info['email'],
picture=user_info['picture']
)
user.generate_public_id()
# Associate the new local user account with the OAuth token account
oauth.user = user
# Save and commit database models
db.session.add_all([user, oauth])
db.session.commit()
# Log in the new local user account
login_user(user)
flash("Successfully signed in with {name}.".format(
name=blueprint.name
))
# Create the tokens to be sent to the user
expires = timedelta(seconds=20)
access_token = create_access_token(
identity=current_user.public_id,
expires_delta=expires
)
refresh_token = create_refresh_token(identity=current_user.public_id)
# Set JWT cookies in the response and
# redirect user to the home page
response = make_response(redirect(url_for('site.index')))
set_access_cookies(response, access_token)
set_refresh_cookies(response, refresh_token)
response.set_cookie('public_id', current_user.public_id)
return response
@oauth_error.connect_via(google_blueprint)
def google_error(blueprint, error, error_description=None,
error_uri=None):
"""Throw error on authentication failure from Google."""
msg = (
"OAuth error from {name}! "
"error={error} description={description} uri={uri}"
).format(
name=blueprint.name,
error=error,
description=error_description,
uri=error_uri,
)
flash(msg, category="error")
@oauth_authorized.connect_via(facebook_blueprint)
def facebook_logged_in(blueprint, token):
"""Log in user on successful Facebook authorization."""
# If user doesn't have valid oauth token, flash error
if not token:
flash("Failed to log in {name}".format(name=blueprint.name))
return
# Get user data from oauth session
resp = blueprint.session.get("/me?fields=id,name,email,picture")
# If failed to get data, flash error
if not resp.ok:
msg = "Failed to fetch user info from {name}.".format(
name=blueprint.name
)
flash(msg, category="error")
return False
# Setup query
user_info = resp.json()
user_id = str(user_info["id"])
query = OAuth.query.filter_by(
provider=blueprint.name,
provider_user_id=user_id,
)
# Try to find oauth user in database
try:
oauth = query.one()
# If no result found, create new oauth token account for user
except NoResultFound:
oauth = OAuth(
provider=blueprint.name,
provider_user_id=user_id,
token=token,
)
# If query successful, log in user
if oauth.user:
login_user(oauth.user)
flash("Successfully signed in with {name}.".format(
name=blueprint.name
))
# If query not successful, create new user then log in user
else:
# Create a new local user account for this user
user = User(
name=user_info['name'],
email=user_info['email'],
picture=user_info['picture']['data']['url']
)
user.generate_public_id()
# Associate the new local user account with the OAuth token
oauth.user = user
# Save and commit database models
db.session.add_all([user, oauth])
db.session.commit()
# Log in the new local user account
login_user(user)
flash("Successfully signed in with {name}.".format(
name=blueprint.name
))
# Create the tokens to be sent to the user
expires = timedelta(seconds=8)
access_token = create_access_token(
identity=current_user.public_id,
expires_delta=expires
)
refresh_token = create_refresh_token(identity=current_user.public_id)
# Set JWT cookies in the response and
# redirect user to the home page
response = make_response(redirect(url_for('site.index')))
set_access_cookies(response, access_token)
set_refresh_cookies(response, refresh_token)
response.set_cookie('public_id', current_user.public_id)
return response
@oauth_error.connect_via(facebook_blueprint)
def facebook_error(blueprint, error, error_description=None, error_uri=None):
"""Throw error on authentication failure from Facebook."""
msg = (
"OAuth error from {name}! "
"error={error} description={description} uri={uri}"
).format(
name=blueprint.name,
error=error,
description=error_description,
uri=error_uri,
)
flash(msg, category="error")
| 30.325301
| 77
| 0.662296
| 954
| 7,551
| 5.07652
| 0.146751
| 0.03758
| 0.039232
| 0.047491
| 0.834607
| 0.813545
| 0.765228
| 0.765228
| 0.765228
| 0.721867
| 0
| 0.000881
| 0.248576
| 7,551
| 248
| 78
| 30.447581
| 0.852661
| 0.195471
| 0
| 0.67052
| 0
| 0
| 0.114727
| 0.013614
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023121
| false
| 0
| 0.063584
| 0
| 0.121387
| 0.17341
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e60d85ed4f663d629a3b7ad1b0df81f3380b7e9b
| 133
|
py
|
Python
|
analyzer/data/__init__.py
|
frommwonderland/EManalysis
|
2787f063e2e83521fd6439d06a07f5521e43dc94
|
[
"MIT"
] | null | null | null |
analyzer/data/__init__.py
|
frommwonderland/EManalysis
|
2787f063e2e83521fd6439d06a07f5521e43dc94
|
[
"MIT"
] | null | null | null |
analyzer/data/__init__.py
|
frommwonderland/EManalysis
|
2787f063e2e83521fd6439d06a07f5521e43dc94
|
[
"MIT"
] | null | null | null |
from .dataset import *
from .ptc_dataset import *
from .pair_dataset import *
__all__ = ["Dataloader", "PtcDataset", "PairDataset"]
| 22.166667
| 53
| 0.736842
| 15
| 133
| 6.133333
| 0.6
| 0.423913
| 0.369565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135338
| 133
| 5
| 54
| 26.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.233083
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5579b445c13f3e098ef535b3d2c31091d2aba805
| 4,516
|
py
|
Python
|
remote-app-configuration/azure-configuration/apply-azure-configurations.py
|
digital-ECMT/acuity-deployment-scripts
|
df779d37073b7967f562f664e45450f549cb702e
|
[
"Apache-2.0"
] | null | null | null |
remote-app-configuration/azure-configuration/apply-azure-configurations.py
|
digital-ECMT/acuity-deployment-scripts
|
df779d37073b7967f562f664e45450f549cb702e
|
[
"Apache-2.0"
] | null | null | null |
remote-app-configuration/azure-configuration/apply-azure-configurations.py
|
digital-ECMT/acuity-deployment-scripts
|
df779d37073b7967f562f664e45450f549cb702e
|
[
"Apache-2.0"
] | 1
|
2022-03-28T15:25:39.000Z
|
2022-03-28T15:25:39.000Z
|
#!/usr/local/bin/python
import yaml
import os
current_dir = os.getcwd()
print("Setting up credentials for VaSecurity...")
with open('{0}/azure-credentials/vasecurity-sso.yml'.format(current_dir),'r') as file:
secrets = yaml.safe_load(file)
with open('{0}/acuity-docker/acuity-spring-configs/vasecurity-azure-sso.yml'.format(current_dir),'r') as dest_file:
destination_file = yaml.safe_load(dest_file)
with open('{0}/acuity-docker/acuity-spring-configs/vasecurity-azure-sso.yml'.format(current_dir),'w') as dest_file:
destination_file['azure']['resource']['clientId']=secrets['clientId']
destination_file['azure']['resource']['clientSecret']=secrets['clientSecret']
destination_file['azure']['resource']['preEstablishedRedirectUri']=secrets['redirectUrl']
destination_file['azure']['client']['clientId']=secrets['clientId']
destination_file['azure']['client']['clientSecret']=secrets['clientSecret']
destination_file['azure']['client']['registeredRedirectUri']=secrets['redirectUrl']
destination_file = yaml.dump(destination_file,dest_file,default_flow_style=False)
print("Setting up credentials for Admin Ui...")
with open('{0}/azure-credentials/admin-sso.yml'.format(current_dir),'r') as file:
secrets = yaml.safe_load(file)
with open('{0}/acuity-docker/acuity-spring-configs/admin-azure-sso.yml'.format(current_dir),'r') as dest_file:
destination_file = yaml.safe_load(dest_file)
with open('{0}/acuity-docker/acuity-spring-configs/admin-azure-sso.yml'.format(current_dir),'w') as dest_file:
destination_file['azure']['resource']['clientId']=secrets['clientId']
destination_file['azure']['resource']['clientSecret']=secrets['clientSecret']
destination_file['azure']['client']['clientId']=secrets['clientId']
destination_file['azure']['client']['clientSecret']=secrets['clientSecret']
destination_file = yaml.dump(destination_file,dest_file,default_flow_style=False)
print("setting up credentials for Vahub...")
with open('{0}/azure-credentials/vahub-sso.yml'.format(current_dir),'r') as file:
secrets = yaml.safe_load(file)
with open('{0}/acuity-docker/acuity-spring-configs/vahub-azure-sso.yml'.format(current_dir),'r') as dest_file:
destination_file = yaml.safe_load(dest_file)
with open('{0}/acuity-docker/acuity-spring-configs/vahub-azure-sso.yml'.format(current_dir),'w') as dest_file:
destination_file['azure']['resource']['clientId']=secrets['clientId']
destination_file['azure']['resource']['clientSecret']=secrets['clientSecret']
destination_file['azure']['client']['clientId']=secrets['clientId']
destination_file['azure']['client']['clientSecret']=secrets['clientSecret']
destination_file = yaml.dump(destination_file,dest_file,default_flow_style=False)
print ("setting up credentials for Azure Storage...")
with open('{0}/azure-credentials/admin-azure-storage.yml'.format(current_dir),'r') as file:
secrets = yaml.safe_load(file)
with open('{0}/acuity-docker/acuity-spring-configs/admin-azure-storage.yml'.format(current_dir),'r') as dest_file:
destination_file = yaml.safe_load(dest_file)
with open('{0}/acuity-docker/acuity-spring-configs/admin-azure-storage.yml'.format(current_dir),'w') as dest_file:
destination_file['azure']['storage']['account']=secrets['account']
destination_file['azure']['storage']['key']=secrets['key']
destination_file = yaml.dump(destination_file,dest_file,default_flow_style=False)
print("Setting up common config for application...")
with open('{0}/azure-credentials/application-azure-sso.yml'.format(current_dir),'r') as file:
secrets = yaml.safe_load(file)
with open('{0}/acuity-docker/acuity-spring-configs/application-azure-sso.yml'.format(current_dir),'r') as dest_file:
destination_file = yaml.safe_load(dest_file)
with open('{0}/acuity-docker/acuity-spring-configs/application-azure-sso.yml'.format(current_dir),'w') as dest_file:
destination_file['azure']['resource']['accessTokenUri']=secrets['accessTokenUri']
destination_file['azure']['resource']['userAuthorizationUri']=secrets['userAuthorizationUri']
destination_file['azure']['client']['accessTokenUri']=secrets['accessTokenUri']
destination_file['azure']['client']['userAuthorizationUri']=secrets['userAuthorizationUri']
destination_file['azure']['logoutUrl']=secrets['logoutUrl']
destination_file['azure']['authorityUri']=secrets['authorityUri']
destination_file = yaml.dump(destination_file,dest_file,default_flow_style=False)
| 64.514286
| 116
| 0.747786
| 579
| 4,516
| 5.671848
| 0.105354
| 0.169001
| 0.133983
| 0.086784
| 0.883678
| 0.852314
| 0.761267
| 0.757917
| 0.757917
| 0.747868
| 0
| 0.003593
| 0.075509
| 4,516
| 69
| 117
| 65.449275
| 0.782994
| 0.004872
| 0
| 0.45
| 0
| 0
| 0.398931
| 0.193232
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.033333
| 0
| 0.033333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e9503b79fab5081ac6e8efc26aae94e65353f024
| 219
|
py
|
Python
|
ocr_preprocess.py
|
lgk1910/ocr_pipeline
|
2489a5cde98f7d2425e33e97bec0e0bfb5ed9bb1
|
[
"MIT"
] | null | null | null |
ocr_preprocess.py
|
lgk1910/ocr_pipeline
|
2489a5cde98f7d2425e33e97bec0e0bfb5ed9bb1
|
[
"MIT"
] | null | null | null |
ocr_preprocess.py
|
lgk1910/ocr_pipeline
|
2489a5cde98f7d2425e33e97bec0e0bfb5ed9bb1
|
[
"MIT"
] | 1
|
2020-09-11T12:12:52.000Z
|
2020-09-11T12:12:52.000Z
|
import os
import shlex
import subprocess
def preprocessing(file_name):
os.system(f"chmod -wx ./imgtxtenh/{file_name}")
os.system(f"./imgtxtenh/imgtxtenh ./imgtxtenh/{file_name} -p ./imgtxtenh/pre_{file_name}")
| 27.375
| 94
| 0.739726
| 31
| 219
| 5.064516
| 0.483871
| 0.203822
| 0.127389
| 0.203822
| 0.216561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 219
| 7
| 95
| 31.285714
| 0.805128
| 0
| 0
| 0
| 0
| 0
| 0.497717
| 0.429224
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.5
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e9605edbd3faab327f7fdaf5e77f236f28020d64
| 28
|
py
|
Python
|
py/src/streamcorpus_filter/__init__.py
|
streamcorpus/streamcorpus-filter
|
7687aa25260d95b861efa63d7369e6dcd6bb20a4
|
[
"MIT"
] | 1
|
2015-11-13T15:48:23.000Z
|
2015-11-13T15:48:23.000Z
|
py/src/streamcorpus_filter/__init__.py
|
streamcorpus/streamcorpus-filter
|
7687aa25260d95b861efa63d7369e6dcd6bb20a4
|
[
"MIT"
] | null | null | null |
py/src/streamcorpus_filter/__init__.py
|
streamcorpus/streamcorpus-filter
|
7687aa25260d95b861efa63d7369e6dcd6bb20a4
|
[
"MIT"
] | null | null | null |
from _filter import Filter
| 9.333333
| 26
| 0.821429
| 4
| 28
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 28
| 2
| 27
| 14
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e9b0573ee2a5be6e7cf6234f5a56c05d3bedeaf6
| 115
|
py
|
Python
|
config.py
|
ponyatov/cp
|
a6b4f4fb3d086a53bf66f1ad8782a659eecbd8f2
|
[
"MIT"
] | null | null | null |
config.py
|
ponyatov/cp
|
a6b4f4fb3d086a53bf66f1ad8782a659eecbd8f2
|
[
"MIT"
] | null | null | null |
config.py
|
ponyatov/cp
|
a6b4f4fb3d086a53bf66f1ad8782a659eecbd8f2
|
[
"MIT"
] | null | null | null |
SECRET_KEY = b'V\x1d\xd4\xec\xf2\xc1u\x1bij\x9d\xf2\xcf\\\x02\x9f_'
HOST = "127.0.0.1"
PORT = 12345
| 28.75
| 68
| 0.591304
| 22
| 115
| 3
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 0.208696
| 115
| 3
| 69
| 38.333333
| 0.494505
| 0
| 0
| 0
| 0
| 0.333333
| 0.521739
| 0.443478
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7594966acdc368a1fb8dc0f563d0f9217177c309
| 6,793
|
py
|
Python
|
tests/test_pllcalcs.py
|
bobbyjsmith11/pll
|
6a4e3b2573e5af341e83b09acbf7473299ad8328
|
[
"MIT"
] | null | null | null |
tests/test_pllcalcs.py
|
bobbyjsmith11/pll
|
6a4e3b2573e5af341e83b09acbf7473299ad8328
|
[
"MIT"
] | null | null | null |
tests/test_pllcalcs.py
|
bobbyjsmith11/pll
|
6a4e3b2573e5af341e83b09acbf7473299ad8328
|
[
"MIT"
] | 2
|
2019-07-18T08:23:14.000Z
|
2020-12-29T20:54:13.000Z
|
from unittest import TestCase
from pll.pll_calcs import *
class TestGeneralFunctions(TestCase):
def test_interp_linear_1(self):
""" test the linear interpolator with a value within the x array
"""
test_var = interp_linear([10,20], [1,2], 12)
self.assertAlmostEqual(1.2, test_var[1])
def test_interp_linear_2(self):
""" test the linear interpolator with a value below the x array
"""
test_var = interp_linear([1,2,3], [1,0,3], 1.5)
self.assertAlmostEqual(0.5, test_var[1])
def test_interp_linear_2(self):
""" test the linear interpolator with a value above the x array
"""
test_var = interp_linear([1,2,3], [1,2,3], 3.5)
self.assertAlmostEqual(3.5, test_var[1])
def test_freq_points_per_decade(self):
""" tests that the get_freq_points_per_decade() function returns
the correct array
"""
f_good = list(range(10,100,10))
f_good.extend(range(100,1000,100))
f_good.extend(range(1000,11000,1000))
[float(i) for i in f_good]
f_test = get_freq_points_per_decade(10,10000,10)
self.assertEqual( set(f_good), set(f_test))
class Test2ndOrderPassive(TestCase):
""" The only real function of the class is to provide component values.
Testing this function will indirectly test all underlying functions
of the class.
"""
def test_2nd_order_passive_phase_margin(self):
""" Tests full operation of PllSecondOrderPassive.
Instantiate the class with some hard-coded values. Simulate the PLL
by calling simulatePll function. Test that the phase margin (pm) and
cutoff frequency (fc) are equal to the hard-coded values.
"""
fc = 100e3
pm = 45.0
gamma = 1.024
kphi = 4.69e-3
kvco = 10e6
fstart = 1
fstop = 100e6
ptsPerDec = 100
N = 200
R = 4
pll = PllSecondOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=gamma )
d_test = pll.calc_components()
pm_test, fc_test = get_pm_fc_from_actual_filter_components(d_test, fstart, fstop, ptsPerDec, kphi, kvco, N, R)
self.assertAlmostEqual(pm,pm_test)
def test_2nd_order_passive_loop_bandwidth(self):
""" Tests full operation of PllSecondOrderPassive.
Instantiate the class with some hard-coded values. Simulate the PLL
by calling simulatePll function. Test that the phase margin (pm) and
cutoff frequency (fc) are equal to the hard-coded values.
"""
fc = 100e3
pm = 45.0
gamma = 1.024
kphi = 4.69e-3
kvco = 10e6
fstart = 1
fstop = 100e6
ptsPerDec = 100
N = 200
R = 4
pll = PllSecondOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=gamma )
d_test = pll.calc_components()
pm_test, fc_test = get_pm_fc_from_actual_filter_components(d_test, fstart, fstop, ptsPerDec, kphi, kvco, N, R)
self.assertAlmostEqual(fc,fc_test)
class Test3rdOrderPassive(TestCase):
""" The only real function of the class is to provide component values.
Testing this function will indirectly test all underlying functions
of the class.
"""
def test_3rd_order_passive_phase_margin(self):
""" Tests full operation of PllThirdOrderPassive.
Instantiate the class with some hard-coded values. Simulate the PLL
by calling simulatePll function. Test that the phase margin (pm) and
cutoff frequency (fc) are equal to the hard-coded values.
"""
fc = 100e3
pm = 45.0
kphi = 5e-3
kvco = 10e6
N = 200
fstart = 1
fstop = 100e6
ptsPerDec = 100
R = 1
pll = PllThirdOrderPassive(fc,
pm,
kphi,
kvco,
N,
gamma=1.024,
t31=0.6)
d_test = pll.calc_components()
pm_test, fc_test = get_pm_fc_from_actual_filter_components(d_test, fstart, fstop, ptsPerDec, kphi, kvco, N, R)
self.assertAlmostEqual(pm, pm_test)
def test_3rd_order_passive_loop_bandwidth(self):
""" Tests full operation of PllThirdOrderPassive.
Instantiate the class with some hard-coded values. Simulate the PLL
by calling simulatePll function. Test that the phase margin (pm) and
cutoff frequency (fc) are equal to the hard-coded values.
"""
fc = 100e3
pm = 45.0
kphi = 5e-3
kvco = 10e6
N = 200
fstart = 1
fstop = 100e6
ptsPerDec = 100
R = 1
pll = PllThirdOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=1.024,
t31=0.6)
d_test = pll.calc_components()
pm_test, fc_test = get_pm_fc_from_actual_filter_components(d_test, fstart, fstop, ptsPerDec, kphi, kvco, N, R)
self.assertAlmostEqual(fc,fc_test)
############ Helper functions ############333
def get_pm_fc_from_actual_filter_components(d, fstart, fstop, ptsPerDec, kphi, kvco, N, R):
""" return pm and fc from simulating actual filter components
Parameters
d (dict) - returned from a call to calc_components in a pll class
Returns
tuple(pm (float), fc (float))
"""
flt = {
'c1':d['c1'],
'c2':d['c2'],
'c3':d['c3'],
'c4':d['c4'],
'r2':d['r2'],
'r3':d['r3'],
'r4':d['r4'],
'flt_type':"passive"
}
f,g,p,fz,pz,ref_cl,vco_cl = simulatePll( fstart,
fstop,
ptsPerDec,
kphi,
kvco,
N,
R,
filt=flt)
return pz, fz
| 33.628713
| 118
| 0.504637
| 764
| 6,793
| 4.335079
| 0.198953
| 0.024155
| 0.027174
| 0.043478
| 0.788043
| 0.771135
| 0.766002
| 0.739432
| 0.717391
| 0.688406
| 0
| 0.055933
| 0.415722
| 6,793
| 201
| 119
| 33.79602
| 0.778534
| 0.259826
| 0
| 0.669421
| 0
| 0
| 0.009224
| 0
| 0
| 0
| 0
| 0
| 0.066116
| 1
| 0.07438
| false
| 0.090909
| 0.016529
| 0
| 0.123967
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
75ad78373165893431c0a6fd9b8cc1f752849658
| 134
|
py
|
Python
|
backend/backend/mixins/__init__.py
|
ProjetoALES/ales-website
|
9dc5b460f5e780a1221d0ed5071043f088082395
|
[
"MIT"
] | null | null | null |
backend/backend/mixins/__init__.py
|
ProjetoALES/ales-website
|
9dc5b460f5e780a1221d0ed5071043f088082395
|
[
"MIT"
] | 19
|
2020-02-25T05:29:39.000Z
|
2021-09-22T18:38:26.000Z
|
backend/backend/mixins/__init__.py
|
ProjetoALES/ales-website
|
9dc5b460f5e780a1221d0ed5071043f088082395
|
[
"MIT"
] | null | null | null |
from .prefetch import PrefetchMixin, PrefetchQuerysetModelMixin
from .queryfields import QueryFieldsMixin, QueryFieldsPermissionMixin
| 44.666667
| 69
| 0.895522
| 10
| 134
| 12
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074627
| 134
| 2
| 70
| 67
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
75db0c60ff0f9a68f6eb9b271bcc01cb05dfb323
| 2,385
|
py
|
Python
|
tests/modules/auth/resources/test_getting_oauth2clients_info.py
|
IsmaelJS/test-github-actions
|
97223df261e9736c46875f590c9593dbac0d417b
|
[
"MIT"
] | 1,420
|
2015-11-20T01:25:14.000Z
|
2022-03-22T03:51:33.000Z
|
tests/modules/auth/resources/test_getting_oauth2clients_info.py
|
IsmaelJS/test-github-actions
|
97223df261e9736c46875f590c9593dbac0d417b
|
[
"MIT"
] | 151
|
2016-01-07T09:11:42.000Z
|
2020-11-17T08:37:07.000Z
|
tests/modules/auth/resources/test_getting_oauth2clients_info.py
|
IsmaelJS/test-github-actions
|
97223df261e9736c46875f590c9593dbac0d417b
|
[
"MIT"
] | 389
|
2015-11-23T01:14:31.000Z
|
2022-02-07T08:23:11.000Z
|
# encoding: utf-8
# pylint: disable=missing-docstring
import pytest
@pytest.mark.parametrize('auth_scopes', (
['auth:read'],
['auth:read', 'auth:write'],
))
def test_getting_list_of_oauth2_clients_by_authorized_user(
flask_app_client, regular_user, regular_user_oauth2_client, auth_scopes
):
# pylint: disable=invalid-name
with flask_app_client.login(regular_user, auth_scopes=auth_scopes):
response = flask_app_client.get(
'/api/v1/auth/oauth2_clients/',
query_string={'user_id': regular_user.id}
)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert isinstance(response.json, list)
assert set(response.json[0].keys()) >= {'client_id'}
assert response.json[0]['client_id'] == regular_user_oauth2_client.client_id
@pytest.mark.parametrize('auth_scopes', (
[],
['users:read'],
['auth:write'],
))
def test_getting_list_of_oauth2_clients_by_unauthorized_user_must_fail(
flask_app_client,
regular_user,
auth_scopes
):
# pylint: disable=invalid-name
with flask_app_client.login(regular_user, auth_scopes=auth_scopes):
response = flask_app_client.get('/api/v1/auth/oauth2_clients/')
assert response.status_code == 401
assert response.content_type == 'application/json'
assert set(response.json.keys()) >= {'status', 'message'}
def test_getting_list_of_oauth2_clients_should_fail_if_no_user_id(
flask_app_client, regular_user
):
# pylint: disable=invalid-name
with flask_app_client.login(regular_user, auth_scopes=['auth:read']):
response = flask_app_client.get('/api/v1/auth/oauth2_clients/')
assert response.status_code == 422
assert response.content_type == 'application/json'
assert set(response.json.keys()) >= {'status', 'message'}
def test_getting_list_of_oauth2_clients_should_fail_if_wrong_user_id(
flask_app_client, regular_user
):
# pylint: disable=invalid-name
with flask_app_client.login(regular_user, auth_scopes=['auth:read']):
response = flask_app_client.get(
'/api/v1/auth/oauth2_clients/',
query_string={'user_id': 100500}
)
assert response.status_code == 422
assert response.content_type == 'application/json'
assert set(response.json.keys()) >= {'status', 'message'}
| 33.591549
| 80
| 0.704822
| 307
| 2,385
| 5.123779
| 0.208469
| 0.06103
| 0.106802
| 0.066751
| 0.806103
| 0.734901
| 0.734901
| 0.705658
| 0.705658
| 0.705658
| 0
| 0.017713
| 0.171488
| 2,385
| 70
| 81
| 34.071429
| 0.77834
| 0.069182
| 0
| 0.557692
| 0
| 0
| 0.151378
| 0.05061
| 0
| 0
| 0
| 0
| 0.269231
| 1
| 0.076923
| false
| 0
| 0.019231
| 0
| 0.096154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f95977b8471e5d5dba7affd0d35a53facc3ef5e7
| 23,110
|
py
|
Python
|
optimization/second_sdEta_mjj_optimization/lumi_and_kin_plots/four_cuts_lum1000/Output/Histos/MadAnalysis5job_0/selection_11.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
optimization/second_sdEta_mjj_optimization/lumi_and_kin_plots/four_cuts_lum1000/Output/Histos/MadAnalysis5job_0/selection_11.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
optimization/second_sdEta_mjj_optimization/lumi_and_kin_plots/four_cuts_lum1000/Output/Histos/MadAnalysis5job_0/selection_11.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
def selection_11():
# Library import
import numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Library version
matplotlib_version = matplotlib.__version__
numpy_version = numpy.__version__
# Histo binning
xBinning = numpy.linspace(0.0,2000.0,81,endpoint=True)
# Creating data sequence: middle of each bin
xData = numpy.array([12.5,37.5,62.5,87.5,112.5,137.5,162.5,187.5,212.5,237.5,262.5,287.5,312.5,337.5,362.5,387.5,412.5,437.5,462.5,487.5,512.5,537.5,562.5,587.5,612.5,637.5,662.5,687.5,712.5,737.5,762.5,787.5,812.5,837.5,862.5,887.5,912.5,937.5,962.5,987.5,1012.5,1037.5,1062.5,1087.5,1112.5,1137.5,1162.5,1187.5,1212.5,1237.5,1262.5,1287.5,1312.5,1337.5,1362.5,1387.5,1412.5,1437.5,1462.5,1487.5,1512.5,1537.5,1562.5,1587.5,1612.5,1637.5,1662.5,1687.5,1712.5,1737.5,1762.5,1787.5,1812.5,1837.5,1862.5,1887.5,1912.5,1937.5,1962.5,1987.5])
# Creating weights for histo: y12_PT_0
y12_PT_0_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,643.794754682,695.073111248,744.509169373,786.268834001,785.347634782,775.828942844,779.41123981,768.86894874,741.950371541,742.257471281,697.529609167,671.429731274,647.888751214,616.77377757,586.272803405,561.708324212,509.815768167,497.021779004,467.544403973,435.917630762,404.086057724,384.74157411,361.712293616,339.604312343,319.543229335,301.836344334,283.412959939,254.242584647,241.857995137,227.017007708,210.94772132,201.019529729,174.612652097,167.038658512,159.362265014,148.922273857,137.356483654,127.223692237,115.146102467,111.359105675,94.7780497195,88.0228054414,90.0698437075,80.0393422037,77.787594111,73.0793980991,61.1042082425,57.5218812769,54.553673791,49.7431178658,48.7195987327,47.798429513,41.1455451482,40.0196761019,32.7526722573,31.4220933843,29.9891645981,26.8162472857,25.588028326,22.5174609269,23.23393032,18.8327840479,18.2186745681,17.7069150016,17.3998552617,16.0692763887,14.2269379493,11.4634302901])
# Creating weights for histo: y12_PT_1
y12_PT_1_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.607364400582,0.304383383345,0.0,0.0,0.0,0.0,0.303284616073,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y12_PT_2
y12_PT_2_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,8.53572065053,8.53056076975,2.25913319784,2.00671637508,2.25873239028,1.50654573295,0.754409693061,1.00324786216,0.250852330374,0.753563761855,0.0,0.502787770858,0.502232528432,0.0,0.0,0.251741751266,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y12_PT_3
y12_PT_3_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,34.5110230853,27.2248013435,20.9030252314,16.5031121208,12.3768237293,11.4137593757,8.6676909035,5.08668769453,3.84724317881,2.33779674693,1.65086778513,1.23721212964,1.09972639789,1.23797589412,0.687181450961,0.412702778045,0.550911343817,0.27479890555,0.137592679095,0.0,0.0,0.136980245608,0.137603648053,0.137360705947,0.0,0.137118576356,0.137399503557,0.0,0.137201554492,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.137855426264,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y12_PT_4
y12_PT_4_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,13.7176595311,12.2357347582,12.6303909478,11.4235250249,10.7081856295,9.5237120162,8.48693886266,6.73550471667,6.01984766426,4.36632601138,3.8731320355,2.66436618336,2.36878388877,1.45526463698,1.06067258001,0.715792733009,0.764942479741,0.51805629371,0.271410304431,0.271613123882,0.1480486799,0.271360701848,0.0987090103881,0.0493020416942,0.0740122850194,0.0493496401326,0.0740834522005,0.0,0.0246914845301,0.0,0.0246054866777,0.0739378711238,0.0494008760939,0.0245977206167,0.0,0.0,0.0,0.0246357192017,0.0,0.0,0.0,0.0493758042427,0.0,0.0245977206167,0.0,0.0,0.0,0.0,0.0,0.0,0.024669859808,0.0,0.0,0.0,0.0246572637581,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y12_PT_5
y12_PT_5_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.84193524811,2.70369746826,2.54602123535,2.59016061777,2.65310527563,2.54582317979,2.4451559396,2.45754741579,2.36307191257,2.22466608559,2.30013925807,2.03588112574,1.82752967691,1.63215186757,1.4492695636,1.47455965823,1.11547692475,0.96433132387,0.731091092987,0.542030055693,0.321386558572,0.3276505158,0.207982845428,0.145000076872,0.094557276153,0.0756478314864,0.132360731156,0.0189014624273,0.0441186366013,0.0378127376074,0.0252356793652,0.0315288447869,0.0126085670814,0.00630327025643,0.0125964236748,0.00630896085281,0.00629427373263,0.0,0.00629036663658,0.0,0.00629036663658,0.00630205891661,0.0,0.0,0.00630215494355,0.00630044646427,0.0063046646476,0.0,0.0,0.0,0.0,0.0,0.0,0.0062989540456,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y12_PT_6
y12_PT_6_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.637432524088,0.565760516298,0.493686624814,0.522603756063,0.551114504921,0.593824144554,0.529485514451,0.672474632741,0.622778515026,0.486165801375,0.665694095059,0.508465097856,0.572844216242,0.55104602474,0.508463098435,0.658463437645,0.522566516841,0.587143328014,0.479720416699,0.551088262516,0.515552296788,0.465353324602,0.493989287223,0.300672983852,0.393370906646,0.400709032958,0.300882673167,0.214734454859,0.257431997993,0.214773018698,0.193232352666,0.121568467525,0.10011177719,0.135912041429,0.0572050945819,0.0214477379258,0.02137370935,0.0214593595624,0.00711351131702,0.00715459442738,0.0,0.0215581334768,0.0,0.0,0.0,0.0214462783482,0.0,0.0,0.00715459442738,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y12_PT_7
y12_PT_7_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0415780313724,0.0421169462291,0.0334803002566,0.0383323360771,0.025911551435,0.0334767588999,0.0302056140326,0.0307324065529,0.021598126627,0.0291584830619,0.027548705954,0.0296988228432,0.0264616351857,0.0210626693079,0.0269955313742,0.0253778752716,0.0215950253205,0.0199885800216,0.0259272046505,0.0210563933533,0.0215955491899,0.0318629375208,0.0210744773226,0.0269614170035,0.0259143069876,0.0243048127692,0.0248366134803,0.0269902612488,0.0243074949801,0.0248472689825,0.0318513809634,0.0366841488699,0.0329532772337,0.0361845137356,0.0329368591689,0.0425292837783,0.036724696356,0.0356355091556,0.0421160975608,0.0285474837855,0.0313200831668,0.0302469682774,0.0216024747424,0.023182621801,0.0199796847203,0.0129554875894,0.0113402727178,0.00864377373857,0.00752037368001,0.00594365901323,0.00647549010882,0.00216232514,0.00216090859733,0.00323955135586,0.000539568855134,0.0,0.0,0.00108051087655,0.000539834875979,0.0,0.0,0.0,0.000541879642746,0.000540552053091,0.0,0.0,0.0,0.0])
# Creating weights for histo: y12_PT_8
y12_PT_8_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00284341803076,0.00212835953921,0.00141996951266,0.0,0.0021308693083,0.00212187593815,0.0007064333644,0.0,0.00142115776452,0.00284053704898,0.00284063616277,0.00282800899147,0.00212558026971,0.0,0.0021313604227,0.00354858630227,0.00284080432212,0.00142066702133,0.000708746761964,0.000708746761964,0.00142043501339,0.00141706180356,0.0,0.000708746761964,0.0,0.00141795865345,0.000710202286972,0.0,0.00142142503766,0.0,0.00141795865345,0.00141614193848,0.00283091076117,0.000710731636286,0.00142066702133,0.00213037002721,0.000709212262694,0.00282992036568,0.00212862681236,0.0028426214083,0.00212987891281,0.000709212262694,0.00284311252271,0.00491506406445,0.00142066702133,0.000709935013826,0.00351414852862,0.00283515557842,0.00142093429447,0.00284311252271,0.00709384876601,0.00425558984939,0.00355083882095,0.00213033995898,0.00141947839825,0.0028322237405,0.00354090591145,0.00426249069353,0.00426332220999,0.00354489199345,0.000711222750692,0.00351165435047,0.0035496245842,0.0,0.00141994389898,0.00139199232488,0.00283687800537,0.00211681185445])
# Creating weights for histo: y12_PT_9
y12_PT_9_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y12_PT_10
y12_PT_10_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,26.3028425168,52.6896670986,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y12_PT_11
y12_PT_11_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,80.6379257084,46.0895111191,109.426537181,74.8403296421,69.0981549098,34.5510852654,11.5034668947,11.5112099343,17.2419046017,0.0,5.75321384735,5.75900479554,5.76682949247,0.0,0.0,0.0,0.0,0.0,0.0,0.0,5.76682949247,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y12_PT_12
y12_PT_12_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,28.3784645814,20.7672556701,24.922020471,26.9955272656,22.8559379137,18.690267562,22.1553861053,22.1547994751,21.457738597,17.3107057739,18.0005059017,20.0795424069,12.453292683,7.61689249202,4.84232900233,2.07475025825,2.07571098535,5.54068719864,2.76601119137,1.38751303099,0.690953769718,0.691467119188,0.0,0.0,0.69174043114,0.693159883778,0.0,0.69326768909,0.692323503047,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y12_PT_13
y12_PT_13_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.01657788107,4.78910221357,3.02177701266,2.77277163859,2.52037528834,3.02628921095,3.27657511472,2.52054218202,3.77788428891,4.28627430687,4.79246891431,4.78955434373,5.54400330366,5.54112514625,8.31917669406,4.53627649139,5.03975224902,5.29397229205,3.52929008019,2.52074700609,1.76374457282,1.00897426668,1.26080499907,1.00793224322,0.756762864977,0.251073945401,1.51337067055,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.252014163719,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.25243792195,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y12_PT_14
y12_PT_14_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.41411689559,0.919996545983,0.920042715324,0.707379805646,0.777729280825,0.707556691933,0.424199479811,0.494673516024,0.353676484858,0.636344234823,0.848655007091,0.778346026271,0.848136275309,0.77798619397,0.990793094281,0.777840856732,0.566146829911,1.13081123923,1.34383369265,1.69771399592,1.3442001618,1.41441988189,1.97987599922,2.61882312683,2.19290518567,1.69761780979,1.2729050808,0.848584598846,1.06108783955,1.13093916678,0.989980321508,0.424215735266,0.21208694715,0.707055177468,0.424635299152,0.283099529489,0.353453525416,0.424492751312,0.0707530336935,0.0,0.0707744543439,0.141340511825,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y12_PT_15
y12_PT_15_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.152521877674,0.0767407877806,0.0,0.0766834659241,0.0,0.0,0.0,0.0379318521405,0.0381124159885,0.0378163515543,0.0,0.0377124039815,0.0,0.076381314736,0.0,0.0762156427516,0.0384073871709,0.0,0.0380407341205,0.0,0.0378163515543,0.0,0.0,0.0,0.0765038475911,0.0381611100193,0.0377124039815,0.0386352563243,0.0,0.0380651697779,0.038023744395,0.0,0.0,0.0760645080629,0.0380407341205,0.0760112046458,0.114448907396,0.152329288055,0.0378163515543,0.11351506344,0.228594511258,0.0760944690745,0.113622793073,0.038023744395,0.11408500225,0.0377124039815,0.0,0.0383334006097,0.0383334006097,0.0764566013805,0.0377124039815,0.0384553720652,0.0382882818082,0.0,0.0377789150635,0.0384553720652,0.0,0.0,0.0,0.0,0.0,0.0759287675428,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y12_PT_16
y12_PT_16_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00451382575002,0.00451258417556,0.0,0.0,0.00450009047166,0.00451565346312,0.0,0.0,0.00451565346312,0.0,0.0,0.0,0.00451565346312,0.0,0.0,0.0,0.00451835797802,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00452425593793,0.0,0.00449996727667,0.00451005097868,0.0,0.00903560238887,0.00450386428053,0.00451367849351,0.00451334548207,0.00904808646817,0.0135272617037,0.031617271041,0.00451642824408,0.00901014145034,0.0180492491215,0.0,0.00902776699534,0.0,0.00903285167585,0.0,0.00450507505625,0.00452425593793,0.0180287872045,0.0,0.0,0.0])
# Creating a new Canvas
fig = plt.figure(figsize=(12,6),dpi=80)
frame = gridspec.GridSpec(1,1,right=0.7)
pad = fig.add_subplot(frame[0])
# Creating a new Stack
pad.hist(x=xData, bins=xBinning, weights=y12_PT_0_weights+y12_PT_1_weights+y12_PT_2_weights+y12_PT_3_weights+y12_PT_4_weights+y12_PT_5_weights+y12_PT_6_weights+y12_PT_7_weights+y12_PT_8_weights+y12_PT_9_weights+y12_PT_10_weights+y12_PT_11_weights+y12_PT_12_weights+y12_PT_13_weights+y12_PT_14_weights+y12_PT_15_weights+y12_PT_16_weights,\
label="$bg\_dip\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#e5e5e5", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y12_PT_0_weights+y12_PT_1_weights+y12_PT_2_weights+y12_PT_3_weights+y12_PT_4_weights+y12_PT_5_weights+y12_PT_6_weights+y12_PT_7_weights+y12_PT_8_weights+y12_PT_9_weights+y12_PT_10_weights+y12_PT_11_weights+y12_PT_12_weights+y12_PT_13_weights+y12_PT_14_weights+y12_PT_15_weights,\
label="$bg\_dip\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#f2f2f2", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y12_PT_0_weights+y12_PT_1_weights+y12_PT_2_weights+y12_PT_3_weights+y12_PT_4_weights+y12_PT_5_weights+y12_PT_6_weights+y12_PT_7_weights+y12_PT_8_weights+y12_PT_9_weights+y12_PT_10_weights+y12_PT_11_weights+y12_PT_12_weights+y12_PT_13_weights+y12_PT_14_weights,\
label="$bg\_dip\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y12_PT_0_weights+y12_PT_1_weights+y12_PT_2_weights+y12_PT_3_weights+y12_PT_4_weights+y12_PT_5_weights+y12_PT_6_weights+y12_PT_7_weights+y12_PT_8_weights+y12_PT_9_weights+y12_PT_10_weights+y12_PT_11_weights+y12_PT_12_weights+y12_PT_13_weights,\
label="$bg\_dip\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y12_PT_0_weights+y12_PT_1_weights+y12_PT_2_weights+y12_PT_3_weights+y12_PT_4_weights+y12_PT_5_weights+y12_PT_6_weights+y12_PT_7_weights+y12_PT_8_weights+y12_PT_9_weights+y12_PT_10_weights+y12_PT_11_weights+y12_PT_12_weights,\
label="$bg\_dip\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#c1bfa8", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y12_PT_0_weights+y12_PT_1_weights+y12_PT_2_weights+y12_PT_3_weights+y12_PT_4_weights+y12_PT_5_weights+y12_PT_6_weights+y12_PT_7_weights+y12_PT_8_weights+y12_PT_9_weights+y12_PT_10_weights+y12_PT_11_weights,\
label="$bg\_dip\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#bab5a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y12_PT_0_weights+y12_PT_1_weights+y12_PT_2_weights+y12_PT_3_weights+y12_PT_4_weights+y12_PT_5_weights+y12_PT_6_weights+y12_PT_7_weights+y12_PT_8_weights+y12_PT_9_weights+y12_PT_10_weights,\
label="$bg\_dip\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b2a596", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y12_PT_0_weights+y12_PT_1_weights+y12_PT_2_weights+y12_PT_3_weights+y12_PT_4_weights+y12_PT_5_weights+y12_PT_6_weights+y12_PT_7_weights+y12_PT_8_weights+y12_PT_9_weights,\
label="$bg\_dip\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b7a39b", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y12_PT_0_weights+y12_PT_1_weights+y12_PT_2_weights+y12_PT_3_weights+y12_PT_4_weights+y12_PT_5_weights+y12_PT_6_weights+y12_PT_7_weights+y12_PT_8_weights,\
label="$bg\_vbf\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ad998c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y12_PT_0_weights+y12_PT_1_weights+y12_PT_2_weights+y12_PT_3_weights+y12_PT_4_weights+y12_PT_5_weights+y12_PT_6_weights+y12_PT_7_weights,\
label="$bg\_vbf\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#9b8e82", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y12_PT_0_weights+y12_PT_1_weights+y12_PT_2_weights+y12_PT_3_weights+y12_PT_4_weights+y12_PT_5_weights+y12_PT_6_weights,\
label="$bg\_vbf\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#876656", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y12_PT_0_weights+y12_PT_1_weights+y12_PT_2_weights+y12_PT_3_weights+y12_PT_4_weights+y12_PT_5_weights,\
label="$bg\_vbf\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#afcec6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y12_PT_0_weights+y12_PT_1_weights+y12_PT_2_weights+y12_PT_3_weights+y12_PT_4_weights,\
label="$bg\_vbf\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#84c1a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y12_PT_0_weights+y12_PT_1_weights+y12_PT_2_weights+y12_PT_3_weights,\
label="$bg\_vbf\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#89a8a0", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y12_PT_0_weights+y12_PT_1_weights+y12_PT_2_weights,\
label="$bg\_vbf\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#829e8c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y12_PT_0_weights+y12_PT_1_weights,\
label="$bg\_vbf\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#adbcc6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y12_PT_0_weights,\
label="$signal$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#7a8e99", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
# Axis
plt.rc('text',usetex=False)
plt.xlabel(r"p_{T} [ a_{1} ] ",\
fontsize=16,color="black")
plt.ylabel(r"$\mathrm{Events}$ $(\mathcal{L}_{\mathrm{int}} = 1000.0\ \mathrm{fb}^{-1})$ ",\
fontsize=16,color="black")
# Boundary of y-axis
ymax=(y12_PT_0_weights+y12_PT_1_weights+y12_PT_2_weights+y12_PT_3_weights+y12_PT_4_weights+y12_PT_5_weights+y12_PT_6_weights+y12_PT_7_weights+y12_PT_8_weights+y12_PT_9_weights+y12_PT_10_weights+y12_PT_11_weights+y12_PT_12_weights+y12_PT_13_weights+y12_PT_14_weights+y12_PT_15_weights+y12_PT_16_weights).max()*1.1
ymin=0 # linear scale
#ymin=min([x for x in (y12_PT_0_weights+y12_PT_1_weights+y12_PT_2_weights+y12_PT_3_weights+y12_PT_4_weights+y12_PT_5_weights+y12_PT_6_weights+y12_PT_7_weights+y12_PT_8_weights+y12_PT_9_weights+y12_PT_10_weights+y12_PT_11_weights+y12_PT_12_weights+y12_PT_13_weights+y12_PT_14_weights+y12_PT_15_weights+y12_PT_16_weights) if x])/100. # log scale
plt.gca().set_ylim(ymin,ymax)
# Log/Linear scale for X-axis
plt.gca().set_xscale("linear")
#plt.gca().set_xscale("log",nonposx="clip")
# Log/Linear scale for Y-axis
plt.gca().set_yscale("linear")
#plt.gca().set_yscale("log",nonposy="clip")
# Legend
plt.legend(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.)
# Saving the image
plt.savefig('../../HTML/MadAnalysis5job_0/selection_11.png')
plt.savefig('../../PDF/MadAnalysis5job_0/selection_11.png')
plt.savefig('../../DVI/MadAnalysis5job_0/selection_11.eps')
# Running!
if __name__ == '__main__':
selection_11()
| 119.123711
| 1,140
| 0.740545
| 4,668
| 23,110
| 3.50964
| 0.176949
| 0.199719
| 0.282549
| 0.354514
| 0.490692
| 0.475493
| 0.470366
| 0.461149
| 0.458829
| 0.456144
| 0
| 0.463747
| 0.071398
| 23,110
| 193
| 1,141
| 119.740933
| 0.299674
| 0.056988
| 0
| 0.185841
| 0
| 0.00885
| 0.047525
| 0.00933
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00885
| false
| 0
| 0.035398
| 0
| 0.044248
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f986ee0af33dbd2bf1058829bd2f036e2ffd6cff
| 47,756
|
py
|
Python
|
src/backend/marsha/core/tests/test_api_timed_text_track.py
|
marin-leonard/marsha
|
b5d6bf98fda27acd3a08577b82dd98bcd39bfd8d
|
[
"MIT"
] | null | null | null |
src/backend/marsha/core/tests/test_api_timed_text_track.py
|
marin-leonard/marsha
|
b5d6bf98fda27acd3a08577b82dd98bcd39bfd8d
|
[
"MIT"
] | null | null | null |
src/backend/marsha/core/tests/test_api_timed_text_track.py
|
marin-leonard/marsha
|
b5d6bf98fda27acd3a08577b82dd98bcd39bfd8d
|
[
"MIT"
] | null | null | null |
"""Tests for the TimedTextTrack API of the Marsha project."""
from datetime import datetime
import json
import random
from unittest import mock
from django.test import TestCase, override_settings
import pytz
from rest_framework_simplejwt.tokens import AccessToken
from ..api import timezone
from ..factories import TimedTextTrackFactory, UserFactory, VideoFactory
from ..models import TimedTextTrack
from .test_api_video import RSA_KEY_MOCK
# We don't enforce arguments documentation in tests
# pylint: disable=unused-argument,too-many-lines
class TimedTextTrackAPITest(TestCase):
"""Test the API of the timed text track object."""
maxDiff = None
@override_settings(ALL_LANGUAGES=(("af", "Afrikaans"), ("ast", "Asturian")))
def test_api_timed_text_track_options_as_instructor(self):
"""The details of choices fields should be available via http options for an instructor."""
timed_text_track = TimedTextTrackFactory(language="af")
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
response = self.client.options(
"/api/timedtexttracks/", HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token)
)
content = json.loads(response.content)
self.assertEqual(
content["actions"]["POST"]["mode"]["choices"],
[
{"value": "st", "display_name": "Subtitle"},
{"value": "ts", "display_name": "Transcript"},
{"value": "cc", "display_name": "Closed captioning"},
],
)
self.assertEqual(
content["actions"]["POST"]["language"]["choices"],
[
{"value": "af", "display_name": "Afrikaans"},
{"value": "ast", "display_name": "Asturian"},
],
)
@override_settings(ALL_LANGUAGES=(("af", "Afrikaans"), ("ast", "Asturian")))
def test_api_timed_text_track_options_as_student(self):
"""The details of choices fields should be available via http options for a student."""
timed_text_track = TimedTextTrackFactory(language="af")
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = ["student"]
response = self.client.options(
"/api/timedtexttracks/", HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token)
)
content = json.loads(response.content)
self.assertEqual(
content["actions"]["POST"]["mode"]["choices"],
[
{"value": "st", "display_name": "Subtitle"},
{"value": "ts", "display_name": "Transcript"},
{"value": "cc", "display_name": "Closed captioning"},
],
)
self.assertEqual(
content["actions"]["POST"]["language"]["choices"],
[
{"value": "af", "display_name": "Afrikaans"},
{"value": "ast", "display_name": "Asturian"},
],
)
@override_settings(ALL_LANGUAGES=(("af", "Afrikaans"), ("ast", "Asturian")))
def test_api_timed_text_track_options_as_administrator(self):
"""The details of choices fields should be available via http options for an admin."""
timed_text_track = TimedTextTrackFactory(language="af")
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = ["administrator"]
response = self.client.options(
"/api/timedtexttracks/", HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token)
)
content = json.loads(response.content)
self.assertEqual(
content["actions"]["POST"]["mode"]["choices"],
[
{"value": "st", "display_name": "Subtitle"},
{"value": "ts", "display_name": "Transcript"},
{"value": "cc", "display_name": "Closed captioning"},
],
)
self.assertEqual(
content["actions"]["POST"]["language"]["choices"],
[
{"value": "af", "display_name": "Afrikaans"},
{"value": "ast", "display_name": "Asturian"},
],
)
def test_api_timed_text_track_options_anonymous(self):
"""The details of choices fields should be available via http options for a student."""
response = self.client.options("/api/timedtexttracks/")
self.assertEqual(response.status_code, 401)
def test_api_timed_text_track_read_detail_anonymous(self):
"""Anonymous users should not be allowed to read a timed text track detail."""
timed_text_track = TimedTextTrackFactory()
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id)
)
self.assertEqual(response.status_code, 401)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "Authentication credentials were not provided."}
)
def test_api_timed_text_track_read_detail_student(self):
"""Student users should not be allowed to read a timed text track detail."""
timed_text_track = TimedTextTrackFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = ["student"]
# Get the timed text track using the JWT token
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "You do not have permission to perform this action."}
)
@override_settings(CLOUDFRONT_SIGNED_URLS_ACTIVE=False)
def test_api_timed_text_track_read_detail_token_user(self):
"""A token user associated to a video can read a timed text track related to this video."""
timed_text_track = TimedTextTrackFactory(
video__pk="b8d40ed7-95b8-4848-98c9-50728dfee25d",
video__playlist__title="foo",
mode="cc",
language="fr",
uploaded_on=datetime(2018, 8, 8, tzinfo=pytz.utc),
upload_state="ready",
extension="srt",
)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
# Get the timed text track using the JWT token
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
{
"active_stamp": "1533686400",
"is_ready_to_show": True,
"id": str(timed_text_track.id),
"mode": "cc",
"language": "fr",
"upload_state": "ready",
"source_url": (
"https://abc.cloudfront.net/b8d40ed7-95b8-4848-98c9-50728dfee25d/"
"timedtext/source/1533686400_fr_cc?response-content-disposition=a"
"ttachment%3B+filename%3Dfoo_1533686400.srt"
),
"url": (
"https://abc.cloudfront.net/b8d40ed7-95b8-4848-98c9-50728dfee25d/"
"timedtext/1533686400_fr_cc.vtt"
),
"video": str(timed_text_track.video.id),
},
)
# Try getting another timed_text_track
other_timed_text_track = TimedTextTrackFactory()
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(other_timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "You do not have permission to perform this action."}
)
@override_settings(CLOUDFRONT_SIGNED_URLS_ACTIVE=False)
def test_api_timed_text_track_without_extension_read_detail_token_user(self):
"""A timed text track without extension should return empty source url."""
timed_text_track = TimedTextTrackFactory(
video__pk="b8d40ed7-95b8-4848-98c9-50728dfee25d",
video__playlist__title="foo",
mode="cc",
language="fr",
uploaded_on=datetime(2018, 8, 8, tzinfo=pytz.utc),
upload_state="ready",
)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
# Get the timed text track using the JWT token
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
{
"active_stamp": "1533686400",
"is_ready_to_show": True,
"id": str(timed_text_track.id),
"mode": "cc",
"language": "fr",
"upload_state": "ready",
"source_url": None,
"url": (
"https://abc.cloudfront.net/b8d40ed7-95b8-4848-98c9-50728dfee25d/"
"timedtext/1533686400_fr_cc.vtt"
),
"video": str(timed_text_track.video.id),
},
)
# Try getting another timed_text_track
other_timed_text_track = TimedTextTrackFactory()
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(other_timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "You do not have permission to perform this action."}
)
@override_settings(CLOUDFRONT_SIGNED_URLS_ACTIVE=False)
def test_api_timed_text_track_read_detail_admin_user(self):
"""Admin user associated to a video can read a timed text track related to this video."""
timed_text_track = TimedTextTrackFactory(
video__pk="b8d40ed7-95b8-4848-98c9-50728dfee25d",
video__playlist__title="foo",
mode="cc",
language="fr",
uploaded_on=datetime(2018, 8, 8, tzinfo=pytz.utc),
upload_state="ready",
extension="srt",
)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = ["administrator"]
jwt_token.payload["permissions"] = {"can_update": True}
# Get the timed text track using the JWT token
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
{
"active_stamp": "1533686400",
"is_ready_to_show": True,
"id": str(timed_text_track.id),
"mode": "cc",
"language": "fr",
"upload_state": "ready",
"source_url": (
"https://abc.cloudfront.net/b8d40ed7-95b8-4848-98c9-50728dfee25d/timedtext/"
"source/1533686400_fr_cc?response-content-disposition=attachment%3B+filenam"
"e%3Dfoo_1533686400.srt"
),
"url": (
"https://abc.cloudfront.net/b8d40ed7-95b8-4848-98c9-50728dfee25d/timedtext/"
"1533686400_fr_cc.vtt"
),
"video": str(timed_text_track.video.id),
},
)
# Try getting another timed_text_track
other_timed_text_track = TimedTextTrackFactory()
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(other_timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "You do not have permission to perform this action."}
)
def test_api_timed_text_track_read_instructor_in_read_only(self):
"""Instructor should not be able to read a timed text track in read_only mode."""
timed_text_track = TimedTextTrackFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": False}
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 403)
@override_settings(CLOUDFRONT_SIGNED_URLS_ACTIVE=False)
def test_api_timed_text_track_read_detail_token_user_no_active_stamp(self):
"""A timed text track with no active stamp should not fail.
Its "url" field should be set to None.
"""
timed_text_track = TimedTextTrackFactory(uploaded_on=None)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
# Get the timed text track using the JWT token
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 200)
self.assertIn('"url":null', response.content.decode("utf-8"))
content = json.loads(response.content)
self.assertIsNone(content["url"])
@override_settings(CLOUDFRONT_SIGNED_URLS_ACTIVE=False)
def test_api_timed_text_track_read_detail_token_user_not_ready(self):
"""A timed_text_track that has never been uploaded successfully should have no url."""
timed_text_track = TimedTextTrackFactory(
uploaded_on=None, upload_state=random.choice(["pending", "error", "ready"])
)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
# Get the timed_text_track linked to the JWT token
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 200)
self.assertIn('"url":null', response.content.decode("utf-8"))
content = json.loads(response.content)
self.assertIsNone(content["url"])
@override_settings(
CLOUDFRONT_SIGNED_URLS_ACTIVE=True,
CLOUDFRONT_ACCESS_KEY_ID="cloudfront-access-key-id",
)
@mock.patch("builtins.open", new_callable=mock.mock_open, read_data=RSA_KEY_MOCK)
def test_api_timed_text_track_read_detail_token_user_signed_urls(self, mock_open):
"""Activating signed urls should add Cloudfront query string authentication parameters."""
timed_text_track = TimedTextTrackFactory(
video__pk="b8d40ed7-95b8-4848-98c9-50728dfee25d",
video__playlist__title="foo",
mode="cc",
language="fr",
uploaded_on=datetime(2018, 8, 8, tzinfo=pytz.utc),
upload_state="ready",
extension="srt",
)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
# Get the timed_text_track via the API using the JWT token
# fix the time so that the url signature is deterministic and can be checked
now = datetime(2018, 8, 8, tzinfo=pytz.utc)
with mock.patch.object(timezone, "now", return_value=now):
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content["url"],
(
"https://abc.cloudfront.net/b8d40ed7-95b8-4848-98c9-50728dfee25d/timedtext"
"/1533686400_fr_cc.vtt?Expires=1533693600&Signature=CWr09YDiSe-j2sKML3f29n"
"KfjCdF8nUMUeL1~yHPkMkQpxDXGc5mnKDKkelvzLyAhIUmEi1CtZgG18siFD4RzDVCNufOINx"
"KCWzKYmVjN67PJAitNi2nUazFhOA-QODJ03gEpCPgea7ntwgJemOtqkd1uj7kgay~HeslK1L2"
"HEIRHjbjaYEoCldCISC8l2FIh~fFryFv9Ptu9ajm4OfIrpc2~oDqe5QkGotQ7IrcZlq8MqMte"
"1tbDaGkaQD-NpURCj7rmkt8vkqpWij-IkWxzNWyX38SL1bg2Co762Ab~YKpdiS8jf-WppVS31"
"cCehf1bPdsqypBzSFMCqORZvEBtw__&Key-Pair-Id=cloudfront-access-key-id"
),
)
self.assertEqual(
content["source_url"],
(
"https://abc.cloudfront.net/b8d40ed7-95b8-4848-98c9-50728dfee25d/timedtext"
"/source/1533686400_fr_cc?response-content-disposition=attachment%3B+filen"
"ame%3Dfoo_1533686400.srt&Expires=1533693600&Signature=Fcb5y9wuTPBPQ2PETBZ"
"qAnlMYKTHWkv9fCm5uItq4t28GMMtITGKjpjzlnnUmRvlP0DI6IUjDKXWkZEFN8mM70z4oSn9"
"NSh9OLIOG0mAyXRq3XNPh4P0UG8RBkbq2JLSJHgzsDy~AS06LS6i14IQonXoTLsvXGoELNVuN"
"sIImqHh2jeH0qaOo34pTWc~GXROYKwwYGEhkmuI1LhX5tJ14aFAEq9ggcm1YRu-aFabQj6yin"
"ZkZAgfEqIOScVyG78h5NNDWdU4JbPoQgUr-r97uN91FuoZYn2nJDTxYS0wQQVAc5LGNFB4pjq"
"57uxu-aKIRDzKaxOiTrOn75GztmV4OA__&Key-Pair-Id=cloudfront-access-key-id"
),
)
def test_api_timed_text_track_read_detail_staff_or_user(self):
"""Users authenticated via a session are not allowed to read a timed text track detail."""
for user in [UserFactory(), UserFactory(is_staff=True)]:
self.client.login(username=user.username, password="test")
timed_text_track = TimedTextTrackFactory()
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id)
)
self.assertEqual(response.status_code, 401)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "Authentication credentials were not provided."}
)
def test_api_timed_text_track_read_list_anonymous(self):
"""Anonymous users should not be able to read a list of timed text tracks."""
TimedTextTrackFactory()
response = self.client.get("/api/timedtexttracks/")
self.assertEqual(response.status_code, 401)
def test_api_timed_text_track_read_list_token_user(self):
"""A token user associated to a video is able to read a list of timed text tracks."""
# Make sure modes differ to avoid random failures as attempting to create a TTT with the
# same language and mode as an existing one raises an exception.
timed_text_track_one = TimedTextTrackFactory(mode="st")
timed_text_track_two = TimedTextTrackFactory(
mode="cc", video=timed_text_track_one.video
)
# Add a timed text track for another video
TimedTextTrackFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track_one.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.get(
"/api/timedtexttracks/", HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token)
)
self.assertEqual(response.status_code, 200)
timed_text_track_list = json.loads(response.content)
self.assertEqual(len(timed_text_track_list["results"]), 2)
self.assertEqual(timed_text_track_list["count"], 2)
self.assertTrue(
str(timed_text_track_one.id)
in (ttt["id"] for ttt in timed_text_track_list["results"])
)
self.assertTrue(
str(timed_text_track_two.id)
in (ttt["id"] for ttt in timed_text_track_list["results"])
)
def test_api_timed_text_track_read_list_staff_or_user(self):
"""Users authenticated via a session shouldn't be able to read timed text tracks."""
for user in [UserFactory(), UserFactory(is_staff=True)]:
self.client.login(username=user.username, password="test")
TimedTextTrackFactory()
response = self.client.get("/api/timedtexttracks/")
self.assertEqual(response.status_code, 401)
def test_api_timed_text_track_create_anonymous(self):
"""Anonymous users should not be able to create a new timed text track."""
response = self.client.post("/api/timedtexttracks/")
self.assertEqual(response.status_code, 401)
self.assertFalse(TimedTextTrack.objects.exists())
def test_api_timed_text_track_create_token_user(self):
"""A token user should be able to create a timed text track for an existing video."""
video = VideoFactory(id="f8c30d0d-2bb4-440d-9e8d-f4b231511f1f")
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
data = {"language": "fr"}
response = self.client.post(
"/api/timedtexttracks/",
data,
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 201)
self.assertEqual(TimedTextTrack.objects.count(), 1)
content = json.loads(response.content)
self.assertEqual(
content,
{
"id": str(TimedTextTrack.objects.first().id),
"active_stamp": None,
"is_ready_to_show": False,
"mode": "st",
"language": "fr",
"upload_state": "pending",
"source_url": None,
"url": None,
"video": "f8c30d0d-2bb4-440d-9e8d-f4b231511f1f",
},
)
def test_api_timed_text_track_create_instructor_in_read_only(self):
"""Instructor should not be able to create a timed text track in read_only mode."""
timed_text_track = TimedTextTrackFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": False}
response = self.client.post(
"/api/timedtexttracks/", HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token)
)
self.assertEqual(response.status_code, 403)
def test_api_timed_text_track_create_staff_or_user(self):
"""Users authenticated via a session shouldn't be able to create new timed text tracks."""
for user in [UserFactory(), UserFactory(is_staff=True)]:
self.client.login(username=user.username, password="test")
response = self.client.post("/api/timedtexttracks/")
self.assertEqual(response.status_code, 401)
self.assertFalse(TimedTextTrack.objects.exists())
def test_api_timed_text_track_update_detail_anonymous(self):
"""Anonymous users should not be allowed to update a timed_text_track through the API."""
timed_text_track = TimedTextTrackFactory(language="fr")
data = {"language": "en"}
response = self.client.put(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 401)
timed_text_track.refresh_from_db()
self.assertEqual(timed_text_track.language, "fr")
def test_api_timed_text_track_update_detail_token_user_language(self):
"""Token users should be able to update the language of their timed_text_track."""
timed_text_track = TimedTextTrackFactory(language="fr")
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
data = {"language": "en"}
response = self.client.put(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
json.dumps(data),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
timed_text_track.refresh_from_db()
self.assertEqual(timed_text_track.language, "en")
def test_api_timed_text_track_update_detail_token_user_closed_captioning(self):
"""Token users should be able to update the mode flag through the API."""
timed_text_track = TimedTextTrackFactory(mode="cc")
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
data = json.loads(response.content)
data["mode"] = "ts"
response = self.client.put(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
json.dumps(data),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
timed_text_track.refresh_from_db()
self.assertEqual(timed_text_track.mode, "ts")
def test_api_timed_text_track_update_detail_token_user_active_stamp(self):
"""Token users trying to update "active_stamp" through the API should be ignored."""
timed_text_track = TimedTextTrackFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
data = json.loads(response.content)
self.assertIsNone(data["active_stamp"])
data["active_stamp"] = "1533686400"
response = self.client.put(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
json.dumps(data),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
timed_text_track.refresh_from_db()
self.assertIsNone(timed_text_track.uploaded_on)
def test_api_timed_text_track_update_detail_token_user_upload_state(self):
"""Token users trying to update "upload_state" through the API should be ignored."""
timed_text_track = TimedTextTrackFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
data = json.loads(response.content)
self.assertEqual(data["upload_state"], "pending")
data["upload_state"] = "ready"
response = self.client.put(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
json.dumps(data),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
timed_text_track.refresh_from_db()
self.assertEqual(timed_text_track.upload_state, "pending")
def test_api_timed_text_track_update_instructor_in_read_only(self):
"""Instructor should not be able to update a timed text track in read_only mode."""
timed_text_track = TimedTextTrackFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": False}
response = self.client.put(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 403)
def test_api_timed_text_track_patch_detail_token_user_stamp_and_state(self):
"""Token users should not be able to patch upload state and active stamp.
These 2 fields can only be updated by AWS via the separate update-state API endpoint.
"""
timed_text_track = TimedTextTrackFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
self.assertEqual(timed_text_track.upload_state, "pending")
self.assertIsNone(timed_text_track.uploaded_on)
data = {"active_stamp": "1533686400", "upload_state": "ready"}
response = self.client.patch(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
json.dumps(data),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
timed_text_track.refresh_from_db()
self.assertIsNone(timed_text_track.uploaded_on)
self.assertEqual(timed_text_track.upload_state, "pending")
def test_api_timed_text_track_update_detail_token_id(self):
"""Token users trying to update the ID of a timed text track they own should be ignored."""
timed_text_track = TimedTextTrackFactory()
original_id = timed_text_track.id
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
data = json.loads(response.content)
data["id"] = "my new id"
response = self.client.put(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
json.dumps(data),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
timed_text_track.refresh_from_db()
self.assertEqual(timed_text_track.id, original_id)
def test_api_timed_text_track_update_detail_token_video(self):
"""Token users trying to update the video of a timed text track should be ignored."""
timed_text_track = TimedTextTrackFactory()
original_video = timed_text_track.video
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.get(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
data = json.loads(response.content)
data["video"] = str(VideoFactory().id)
response = self.client.put(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
json.dumps(data),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
timed_text_track.refresh_from_db()
self.assertEqual(timed_text_track.video, original_video)
def test_api_timed_text_track_update_detail_token_user_other_video(self):
"""Token users are not allowed to update a timed text track related to another video."""
other_video = VideoFactory()
timed_text_track_update = TimedTextTrackFactory(language="en")
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(other_video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
data = {"language": "fr"}
response = self.client.put(
"/api/timedtexttracks/{!s}/".format(timed_text_track_update.id),
json.dumps(data),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
timed_text_track_update.refresh_from_db()
self.assertEqual(timed_text_track_update.language, "en")
def test_api_timed_text_track_patch_instructor_in_read_only(self):
"""Instructor should not be able to patch a timed text track in read_only mode."""
timed_text_track = TimedTextTrackFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": False}
response = self.client.patch(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 403)
def test_api_timed_text_track_delete_detail_anonymous(self):
"""Anonymous users should not be allowed to delete a timed text track."""
timed_text_track = TimedTextTrackFactory()
response = self.client.delete(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id)
)
self.assertEqual(response.status_code, 401)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "Authentication credentials were not provided."}
)
self.assertTrue(TimedTextTrack.objects.filter(id=timed_text_track.id).exists())
def test_api_timed_text_track_delete_detail_token_user(self):
"""A token user linked to a video should be allowed to delete its timed text tracks."""
timed_text_tracks = TimedTextTrackFactory.create_batch(2)
# Delete the timed text tracks using the JWT token
for timed_text_track in timed_text_tracks:
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [
random.choice(["instructor", "administrator"])
]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.delete(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 204)
self.assertFalse(
TimedTextTrack.objects.filter(id=timed_text_track.id).exists()
)
def test_api_timed_text_track_delete_detail_staff_or_user(self):
"""Users authenticated via a session should not be able to delete a timed text track."""
timed_text_track = TimedTextTrackFactory()
for user in [
UserFactory(),
UserFactory(is_staff=True),
UserFactory(is_superuser=True),
]:
self.client.login(username=user.username, password="test")
response = self.client.delete(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id)
)
self.assertEqual(response.status_code, 401)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "Authentication credentials were not provided."}
)
self.assertTrue(TimedTextTrack.objects.filter(id=timed_text_track.id).exists())
def test_api_timed_text_track_delete_list_anonymous(self):
"""Anonymous users should not be able to delete a list of timed text tracks."""
timed_text_track = TimedTextTrackFactory()
response = self.client.delete("/api/timedtexttracks/")
self.assertEqual(response.status_code, 401)
self.assertTrue(TimedTextTrack.objects.filter(id=timed_text_track.id).exists())
def test_api_timed_text_track_delete_list_token_user(self):
"""A token user should not be able to delete a list of their timed text tracks."""
timed_text_track = TimedTextTrackFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.delete(
"/api/timedtexttracks/", HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token)
)
self.assertEqual(response.status_code, 403)
self.assertTrue(TimedTextTrack.objects.filter(id=timed_text_track.id).exists())
def test_api_timed_text_track_delete_list_staff_or_user(self):
"""Users authenticated via session should not be allowed to delete timed text tracks."""
timed_text_track = TimedTextTrackFactory()
for user in [
UserFactory(),
UserFactory(is_staff=True),
UserFactory(is_superuser=True),
]:
self.client.login(username=user.username, password="test")
response = self.client.delete("/api/timedtexttracks/")
self.assertEqual(response.status_code, 401)
self.assertTrue(TimedTextTrack.objects.filter(id=timed_text_track.id).exists())
def test_api_timed_text_track_delete_instructor_in_read_only(self):
"""Instructor should not be able to delete a timed text track in read_only mode."""
timed_text_track = TimedTextTrackFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": False}
response = self.client.delete(
"/api/timedtexttracks/{!s}/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 403)
def test_api_timed_text_track_initiate_upload_anonymous_user(self):
"""Anonymous users should not be allowed to initiate an upload."""
timed_text_track = TimedTextTrackFactory()
response = self.client.post(
"/api/timedtexttracks/{!s}/initiate-upload/".format(timed_text_track.id)
)
self.assertEqual(response.status_code, 401)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "Authentication credentials were not provided."}
)
def test_api_timed_text_track_initiate_upload_token_user(self):
"""A token user should be able to initiate an upload."""
timed_text_track = TimedTextTrackFactory(
id="5c019027-1e1f-4d8c-9f83-c5e20edaad2b",
video__pk="b8d40ed7-95b8-4848-98c9-50728dfee25d",
language="fr",
upload_state=random.choice(["ready", "error"]),
mode="cc",
)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
# Create other timed text tracks to check that their upload state are unaffected
# Make sure we avoid unicty constraints by setting a different language
other_ttt_for_same_video = TimedTextTrackFactory(
video=timed_text_track.video,
language="en",
upload_state=random.choice(["ready", "error"]),
)
other_ttt_for_other_video = TimedTextTrackFactory(
upload_state=random.choice(["ready", "error"])
)
# Get the upload policy for this timed text track
# It should generate a key file with the Unix timestamp of the present time
now = datetime(2018, 8, 8, tzinfo=pytz.utc)
with mock.patch.object(timezone, "now", return_value=now), mock.patch(
"datetime.datetime"
) as mock_dt:
mock_dt.utcnow = mock.Mock(return_value=now)
response = self.client.post(
"/api/timedtexttracks/{!s}/initiate-upload/".format(
timed_text_track.id
),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content),
{
"url": "https://test-marsha-source.s3.amazonaws.com/",
"fields": {
"acl": "private",
"key": (
"b8d40ed7-95b8-4848-98c9-50728dfee25d/timedtexttrack/5c019027-1e1f-4d8c-"
"9f83-c5e20edaad2b/1533686400_fr_cc"
),
"x-amz-algorithm": "AWS4-HMAC-SHA256",
"x-amz-credential": "aws-access-key-id/20180808/eu-west-1/s3/aws4_request",
"x-amz-date": "20180808T000000Z",
"policy": (
"eyJleHBpcmF0aW9uIjogIjIwMTgtMDgtMDlUMDA6MDA6MDBaIiwgImNvbmRpdGlvbnMiOiBbe"
"yJhY2wiOiAicHJpdmF0ZSJ9LCBbImNvbnRlbnQtbGVuZ3RoLXJhbmdlIiwgMCwgMTA0ODU3Nl"
"0sIHsiYnVja2V0IjogInRlc3QtbWFyc2hhLXNvdXJjZSJ9LCB7ImtleSI6ICJiOGQ0MGVkNy0"
"5NWI4LTQ4NDgtOThjOS01MDcyOGRmZWUyNWQvdGltZWR0ZXh0dHJhY2svNWMwMTkwMjctMWUx"
"Zi00ZDhjLTlmODMtYzVlMjBlZGFhZDJiLzE1MzM2ODY0MDBfZnJfY2MifSwgeyJ4LWFtei1hb"
"Gdvcml0aG0iOiAiQVdTNC1ITUFDLVNIQTI1NiJ9LCB7IngtYW16LWNyZWRlbnRpYWwiOiAiYX"
"dzLWFjY2Vzcy1rZXktaWQvMjAxODA4MDgvZXUtd2VzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB"
"7IngtYW16LWRhdGUiOiAiMjAxODA4MDhUMDAwMDAwWiJ9XX0="
),
"x-amz-signature": (
"bab90cecbb4db4a6bd7d4036a6be95a7c398b0f9eaa78b14c7f10e6bb3349558"
),
},
},
)
# The upload state of the timed text track should have been reset
timed_text_track.refresh_from_db()
self.assertEqual(timed_text_track.upload_state, "pending")
# Check that the other timed text tracks are not reset
for ttt in [other_ttt_for_same_video, other_ttt_for_other_video]:
ttt.refresh_from_db()
self.assertNotEqual(ttt.upload_state, "pending")
# Try initiating an upload for a timed_text_track linked to another video
response = self.client.post(
"/api/timedtexttracks/{!s}/initiate-upload/".format(
other_ttt_for_other_video.id
),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "You do not have permission to perform this action."}
)
def test_api_timed_text_track_initiate_upload_staff_or_user(self):
"""Users authenticated via a session should not be able to initiate an upload."""
timed_text_track = TimedTextTrackFactory()
for user in [
UserFactory(),
UserFactory(is_staff=True),
UserFactory(is_superuser=True),
]:
self.client.login(username=user.username, password="test")
response = self.client.post(
"/api/timedtexttracks/{!s}/initiate-upload/".format(timed_text_track.id)
)
self.assertEqual(response.status_code, 401)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "Authentication credentials were not provided."}
)
def test_api_timed_text_track_instructor_initiate_upload_in_read_only(self):
"""Instructor should not be able to initiate a timed text track upload in read_only."""
timed_text_track = TimedTextTrackFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(timed_text_track.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": False}
response = self.client.post(
"/api/timedtexttracks/{!s}/initiate-upload/".format(timed_text_track.id),
HTTP_AUTHORIZATION="Bearer {!s}".format(jwt_token),
)
self.assertEqual(response.status_code, 403)
| 45.919231
| 99
| 0.633261
| 5,201
| 47,756
| 5.57662
| 0.074986
| 0.076024
| 0.111985
| 0.025927
| 0.843642
| 0.820508
| 0.804027
| 0.780375
| 0.758344
| 0.743242
| 0
| 0.026063
| 0.251194
| 47,756
| 1,039
| 100
| 45.963426
| 0.785011
| 0.101767
| 0
| 0.676887
| 0
| 0
| 0.201018
| 0.085819
| 0
| 0
| 0
| 0
| 0.119104
| 1
| 0.050708
| false
| 0.007075
| 0.012972
| 0
| 0.066038
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f9d675d3386ba21507199a3c8634221352c949e9
| 33
|
py
|
Python
|
python/milleniumcohort/__init__.py
|
wadpac/millenniumcohort-acc
|
a731dbbc844082cc2d670835d2e9fa88d58a8854
|
[
"Apache-2.0"
] | 6
|
2018-02-07T03:13:04.000Z
|
2022-02-04T09:44:22.000Z
|
python/milleniumcohort/__init__.py
|
wadpac/millenniumcohort-acc
|
a731dbbc844082cc2d670835d2e9fa88d58a8854
|
[
"Apache-2.0"
] | 3
|
2017-05-11T13:00:44.000Z
|
2017-05-19T15:52:57.000Z
|
python/milleniumcohort/__init__.py
|
wadpac/milleniumcohort-acc
|
a731dbbc844082cc2d670835d2e9fa88d58a8854
|
[
"Apache-2.0"
] | 3
|
2018-06-22T17:49:42.000Z
|
2019-05-10T20:40:17.000Z
|
from .utils import create_config
| 16.5
| 32
| 0.848485
| 5
| 33
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f9e8a7666bd119a6b235781a17202bc76b984491
| 835
|
py
|
Python
|
flickrsets/tests/__init__.py
|
gillesfabio/django-flickrsets
|
953481fde4029d4d613a5994bdbe987f731fe033
|
[
"BSD-3-Clause"
] | 1
|
2015-06-24T01:46:02.000Z
|
2015-06-24T01:46:02.000Z
|
flickrsets/tests/__init__.py
|
gillesfabio/django-flickrsets
|
953481fde4029d4d613a5994bdbe987f731fe033
|
[
"BSD-3-Clause"
] | null | null | null |
flickrsets/tests/__init__.py
|
gillesfabio/django-flickrsets
|
953481fde4029d4d613a5994bdbe987f731fe033
|
[
"BSD-3-Clause"
] | null | null | null |
from flickrsets.tests.client import FlickrClientTest
from flickrsets.tests.client import FakeClient
from flickrsets.tests.models import PersonTest
from flickrsets.tests.models import PersonManagerTest
from flickrsets.tests.models import PhotoTest
from flickrsets.tests.models import PhotoManagerTest
from flickrsets.tests.models import PhotosetTest
from flickrsets.tests.models import PhotosetManagerTest
from flickrsets.tests.models import TagTest
from flickrsets.tests.models import TagManagerTest
from flickrsets.tests.parsers import PersonParserTest
from flickrsets.tests.parsers import PhotoParserTest
from flickrsets.tests.parsers import PhotosetParserTest
from flickrsets.tests.parsers import PhotoTagsParserTest
from flickrsets.tests.templatetags import PhotoFlickrUrlsNodeTest
from flickrsets.tests.views import ViewsTest
| 39.761905
| 65
| 0.88024
| 96
| 835
| 7.65625
| 0.260417
| 0.304762
| 0.413605
| 0.272109
| 0.595918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081437
| 835
| 20
| 66
| 41.75
| 0.958279
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fb00950c4975f8fc15e40df2d199789083dbcce6
| 2,866
|
py
|
Python
|
app/src/main/assets/code/controller/color_sensor.py
|
tongjinlv/py_and
|
a069336c47dd233648fbbadee7275ef188696a44
|
[
"Apache-2.0"
] | null | null | null |
app/src/main/assets/code/controller/color_sensor.py
|
tongjinlv/py_and
|
a069336c47dd233648fbbadee7275ef188696a44
|
[
"Apache-2.0"
] | null | null | null |
app/src/main/assets/code/controller/color_sensor.py
|
tongjinlv/py_and
|
a069336c47dd233648fbbadee7275ef188696a44
|
[
"Apache-2.0"
] | null | null | null |
import sys
import math
import random
import imp
import struct
class color_sensor:
def __init__(self,call):
self.call=call
def get_light_strength(self):
data = [0x28,0x02,0x04]
self.call.blewrite(data)
r=self.call.blewait(0x28)
if(r==None):
return 0
return r[4]
def is_white(self):
data = [0x20,0x01,0x01]
self.call.blewrite(data)
r=self.call.blewait(0x20)
if(r==None):
return False
else:
if(r[4]>0):
return True
else:
return False
def is_red(self):
data = [0x20,0x01,0x02]
self.call.blewrite(data)
r=self.call.blewait(0x20)
if(r==None):
return False
else:
if(r[4]>0):
return True
else:
return False
def is_yellow(self):
data = [0x20,0x01,0x03]
self.call.blewrite(data)
r=self.call.blewait(0x20)
if(r==None):
return False
else:
if(r[4]>0):
return True
else:
return False
def is_green(self):
data = [0x20,0x01,0x04]
self.call.blewrite(data)
r=self.call.blewait(0x20)
if(r==None):
return False
else:
if(r[4]>0):
return True
else:
return False
def is_blue(self):
data = [0x20,0x01,0x05]
self.call.blewrite(data)
r=self.call.blewait(0x20)
if(r==None):
return False
else:
if(r[4]>0):
return True
else:
return False
def is_purple(self):
data = [0x20,0x01,0x06]
self.call.blewrite(data)
r=self.call.blewait(0x20)
if(r==None):
return False
else:
if(r[4]>0):
return True
else:
return False
def is_black(self):
data = [0x20,0x01,0x07]
self.call.blewrite(data)
r=self.call.blewait(0x20)
if(r==None):
return False
else:
if(r[4]>0):
return True
else:
return False
def is_bright(self):
data = [0x20,0x05,0x01]
self.call.blewrite(data)
r=self.call.blewait(0x20)
if(r==None):
return False
else:
if(r[4]>0):
return True
else:
return False
def is_dark(self):
data = [0x20,0x05,0x02]
self.call.blewrite(data)
r=self.call.blewait(0x20)
if(r==None):
return False
else:
if(r[4]>0):
return True
else:
return False
| 24.921739
| 33
| 0.456734
| 334
| 2,866
| 3.871257
| 0.140719
| 0.136118
| 0.123743
| 0.154679
| 0.717711
| 0.717711
| 0.717711
| 0.717711
| 0.717711
| 0.683681
| 0
| 0.086957
| 0.438241
| 2,866
| 115
| 34
| 24.921739
| 0.716149
| 0
| 0
| 0.72807
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055807
| 0
| 0
| 1
| 0.096491
| false
| 0
| 0.04386
| 0
| 0.403509
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fb23f38c53283257c36d2cb82aa4dc2a92575091
| 308
|
py
|
Python
|
tridet/modeling/dd3d/__init__.py
|
flipson/dd3d
|
86d8660c29612b79836dad9b6c39972ac2ca1557
|
[
"MIT"
] | 227
|
2021-08-17T02:42:28.000Z
|
2022-03-31T22:35:06.000Z
|
tridet/modeling/dd3d/__init__.py
|
flipson/dd3d
|
86d8660c29612b79836dad9b6c39972ac2ca1557
|
[
"MIT"
] | 21
|
2021-08-20T06:51:59.000Z
|
2022-03-31T16:47:18.000Z
|
tridet/modeling/dd3d/__init__.py
|
flipson/dd3d
|
86d8660c29612b79836dad9b6c39972ac2ca1557
|
[
"MIT"
] | 35
|
2021-08-21T08:22:17.000Z
|
2022-03-30T05:32:45.000Z
|
# Copyright 2021 Toyota Research Institute. All rights reserved.
from tridet.modeling.dd3d.core import DD3D
from tridet.modeling.dd3d.nuscenes_dd3d import NuscenesDD3D
from tridet.modeling.dd3d.nuscenes_dd3d_tta import NuscenesDD3DWithTTA
from tridet.modeling.dd3d.test_time_augmentation import DD3DWithTTA
| 51.333333
| 70
| 0.866883
| 41
| 308
| 6.390244
| 0.536585
| 0.152672
| 0.274809
| 0.335878
| 0.259542
| 0.259542
| 0
| 0
| 0
| 0
| 0
| 0.049645
| 0.084416
| 308
| 5
| 71
| 61.6
| 0.879433
| 0.204545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
34b4ce946909ad9ba55a225f5c73f6e3474a0cf7
| 1,004
|
py
|
Python
|
sorting/bubbleSort.py
|
rp581998/data-structures
|
ff6d96f77fddb5387117b315d781571f19469261
|
[
"MIT"
] | null | null | null |
sorting/bubbleSort.py
|
rp581998/data-structures
|
ff6d96f77fddb5387117b315d781571f19469261
|
[
"MIT"
] | null | null | null |
sorting/bubbleSort.py
|
rp581998/data-structures
|
ff6d96f77fddb5387117b315d781571f19469261
|
[
"MIT"
] | null | null | null |
'''def bubbleSort(arr):
n = len(arr)
for i in range(n):
for j in range(0,n-i-1):
if arr[j] > arr[j+1]:
temp = arr[j]
arr[j] = arr[j+1]
arr[j+1] = temp
arr = [10,9,15,1,5,2,6,3]
bubbleSort(arr)
print('Sorted array: ')
for i in range(len(arr)):
print(arr[i])
'''
# OR use this
'''
def bubbleSort(arr):
n = len(arr)
for i in range(n):
for j in range(0,n-i-1):
if arr[j] > arr[j+1]:
swap(arr, j, j+1)
def swap(arr, x, y):
temp = arr[x]
arr[x] = arr[y]
arr[y] = temp
'''
# OR use this,swap from last
def bubbleSort(arr):
n = len(arr)
for i in range(n):
for j in range(n-1,i,-1):
if arr[j] < arr[j-1]:
swap(arr, j, j-1)
def swap(arr, x, y):
temp = arr[x]
arr[x] = arr[y]
arr[y] = temp
arr = [10,9,15,1,5,2,6,3]
bubbleSort(arr)
print('Sorted array: ')
print(arr)
| 22.818182
| 50
| 0.447211
| 175
| 1,004
| 2.565714
| 0.182857
| 0.106904
| 0.077951
| 0.089087
| 0.85078
| 0.804009
| 0.804009
| 0.804009
| 0.804009
| 0.804009
| 0
| 0.052298
| 0.371514
| 1,004
| 44
| 51
| 22.818182
| 0.659271
| 0.427291
| 0
| 0
| 0
| 0
| 0.043478
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.142857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
34d4f5b5b721ef5149cdee91cebaa82ec642d645
| 354
|
py
|
Python
|
vo/douyin/SpiderDouyinVideoUrlByUserResponseVO.py
|
genji9071/dark_spider
|
8213fc92791506a3b2c99b696cd12ae330208d99
|
[
"MIT"
] | null | null | null |
vo/douyin/SpiderDouyinVideoUrlByUserResponseVO.py
|
genji9071/dark_spider
|
8213fc92791506a3b2c99b696cd12ae330208d99
|
[
"MIT"
] | null | null | null |
vo/douyin/SpiderDouyinVideoUrlByUserResponseVO.py
|
genji9071/dark_spider
|
8213fc92791506a3b2c99b696cd12ae330208d99
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel
from vo.SpiderBaseGetVideoInfoBatchResponseVO import SpiderBaseGetVideoInfoBatchResponseVO
from vo.douyin.SpiderDouyinUserInfoVO import SpiderDouyinUserInfoVO
class SpiderDouyinVideoUrlByUserResponseVO(BaseModel):
user_info: SpiderDouyinUserInfoVO = None
video_list: SpiderBaseGetVideoInfoBatchResponseVO = None
| 35.4
| 90
| 0.878531
| 26
| 354
| 11.884615
| 0.576923
| 0.038835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09322
| 354
| 9
| 91
| 39.333333
| 0.962617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
34d9d2413c70921e229144e1a642c8f77d6ba29e
| 12,773
|
py
|
Python
|
src/load_data.py
|
microsoft/Akoustos
|
c88caa58a77606f19108fe598ef6f911ebe83956
|
[
"MIT"
] | null | null | null |
src/load_data.py
|
microsoft/Akoustos
|
c88caa58a77606f19108fe598ef6f911ebe83956
|
[
"MIT"
] | null | null | null |
src/load_data.py
|
microsoft/Akoustos
|
c88caa58a77606f19108fe598ef6f911ebe83956
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
'''
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
'''
from datetime import datetime
import glob
import pandas as pd
import numpy as np
import os
from statistics import median
import cv2
import warnings
from math import ceil, floor
from joblib import Parallel, delayed
import multiprocessing
from multiprocessing import Pool
#from audio import Audio
from preprocessing import speechproc
from preprocessing import spectrogating
from copy import deepcopy
from scipy.signal import lfilter
class Load_Data:
def audio_filenames(directory):
begin = datetime.now()
audio_filenames = glob.glob(directory + '*.flac') + glob.glob(directory + '*.wav')
end = datetime.now()
print('Number of audio files:', len(audio_filenames))
print('Time spent to load audio files: ', (end - begin).total_seconds(), 'seconds')
return audio_filenames
def labeled_data(labeled_data_dir):
begin = datetime.now()
filenames = glob.glob(labeled_data_dir + '*.xlsx') + glob.glob(labeled_data_dir + '*.csv') + glob.glob(labeled_data_dir + '*.txt')
all_labeled_data = pd.DataFrame()
for filename in filenames:
if filename.endswith('xlsx'):
labeled_data = pd.read_excel(filename)
elif filename.endswith('csv'):
labeled_data = pd.read_csv(filename)
elif filename.endswith('txt'):
labeled_data = pd.read_csv(filename, sep='\t')
all_labeled_data = all_labeled_data.append(labeled_data, ignore_index=True)
if len(all_labeled_data) == 0:
print("No data found in {0}".format(labeled_data_dir))
return None
all_labeled_data.drop_duplicates(inplace=True)
all_labeled_data = all_labeled_data.sort_values(by=['Begin File', 'Begin Time (s)']).reset_index(drop=True)
summary = all_labeled_data.groupby(['Category']).size().reset_index(name='Count')
summary['Percentage'] = round(100 * summary['Count'] / summary['Count'].sum(), 2)
print(summary)
print(labeled_data_dir)
end = datetime.now()
print('Time spent to load labels: ', (end - begin).total_seconds(), 'seconds')
return all_labeled_data
def labeled_data_with_NoSoundEvent(labeled_data_dir, audio_dir):
begin = datetime.now()
filenames = glob.glob(labeled_data_dir + '*')
all_labeled_data = pd.DataFrame()
for filename in filenames:
if filename.endswith('xlsx'):
labeled_data = pd.read_excel(filename)
elif filename.endswith('csv'):
labeled_data = pd.read_csv(filename)
all_labeled_data = all_labeled_data.append(labeled_data, ignore_index=True)
all_labeled_data = all_labeled_data.sort_values(by=['Begin File', 'Begin Time (s)']).reset_index(drop=True)
### no_sound_event_data
no_sound_event_data = pd.DataFrame(columns = list(all_labeled_data))
annotation_base_audio_filenames = list(all_labeled_data['Begin File'].unique())
audio_filenames = glob.glob(audio_dir + '*')
for i, annotation_base_audio_filename in enumerate(annotation_base_audio_filenames):
matching_audio_filename = [audio_filename for audio_filename in audio_filenames if os.path.basename(audio_filename) == annotation_base_audio_filename]
audio = Audio.load(matching_audio_filename.pop())
## sound event detection
noise = audio.samples[0:1*audio.sample_rate]
x_dn = spectrogating.removeNoise(audio_clip=audio.samples,
noise_clip=noise,
n_grad_freq=2,
n_grad_time=4,
n_fft=2048,
win_length=2048,
hop_length=512,
n_std_thresh=2.5,
prop_decrease=1.0,
verbose=False,
visual=False)
winlen, ovrlen, pre_coef, nfilter, nftt = 0.025, 0.01, 0.97, 20, 2048
ftThres = 0.4
vadThres = 0.2
opts = 1
ft, flen, fsh10, nfr10 = speechproc.sflux(x_dn, audio.sample_rate, winlen, ovrlen, nftt)
# --spectral flatness --
pv01 = np.zeros(nfr10)
pv01[np.less_equal(ft, ftThres)] = 1
pitch = deepcopy(ft)
pvblk = speechproc.pitchblockdetect(pv01, pitch, nfr10, opts)
# --filtering--
ENERGYFLOOR = np.exp(-50)
b = np.array([0.9770, -0.9770])
a = np.array([0.3, -0.3])
fdata = lfilter(b, a, x_dn, axis=0)
vad_seg = speechproc.snre_vad(fdata, nfr10, flen, fsh10, ENERGYFLOOR, pv01, pvblk, vadThres)
no_events_starttime = [0] + [i / len(vad_seg) * audio.duration() for i in range(len(vad_seg)) if vad_seg[i] == 0 and vad_seg[i-1] == 1]
no_events_endtime = [i / len(vad_seg) * audio.duration() for i in range(len(vad_seg)) if vad_seg[i] == 1 and vad_seg[i-1] == 0] + [audio.duration()]
for start, end in zip(no_events_starttime, no_events_endtime):
new_row = {'Begin Time (s)': start,
'End Time (s)': end,
'Low Freq (Hz)': 0,
'High Freq (Hz)': 0,
'Begin File': annotation_base_audio_filename,
'Category': 'No Sound Event'}
no_sound_event_data = no_sound_event_data.append(new_row, ignore_index=True)
no_sound_event_data['duration'] = no_sound_event_data['End Time (s)'] - no_sound_event_data['Begin Time (s)']
no_sound_event_data = no_sound_event_data.sort_values(by='duration',ascending=False)[: len(all_labeled_data) // len(all_labeled_data.Category.unique())]
no_sound_event_data = no_sound_event_data.drop(['duration'], axis = 1)
all_labeled_data = all_labeled_data.append(no_sound_event_data, ignore_index=True)
summary = all_labeled_data.groupby(['Category']).size().reset_index(name='Count')
summary['Percentage'] = round(100 * summary['Count'] / summary['Count'].sum(), 2)
print(summary)
end = datetime.now()
print('Time spent to preprocess data: ', (end - begin).total_seconds(), 'seconds')
return all_labeled_data
def labeled_data_with_NoSoundEvent_parallel(labeled_data_dir, audio_dir):
###################
### TODO: fix
###################
begin = datetime.now()
filenames = glob.glob(labeled_data_dir + '*')
all_labeled_data = pd.DataFrame()
for filename in filenames:
if filename.endswith('xlsx'):
labeled_data = pd.read_excel(filename)
elif filename.endswith('csv'):
labeled_data = pd.read_csv(filename)
all_labeled_data = all_labeled_data.append(labeled_data, ignore_index=True)
all_labeled_data = all_labeled_data.sort_values(by=['Begin File', 'Begin Time (s)']).reset_index(drop=True)
### no_sound_event_data
annotation_base_audio_filenames = list(all_labeled_data['Begin File'].unique())
audio_filenames = glob.glob(audio_dir + '*')
def sound_event_detection_for_single_audio_file(annotation_base_audio_filename):
df = pd.DataFrame(columns = list(all_labeled_data))
matching_audio_filename = [audio_filename for audio_filename in audio_filenames if os.path.basename(audio_filename) == annotation_base_audio_filename]
audio = Audio.load(matching_audio_filename.pop())
## sound event detection
noise = audio.samples[0:1*audio.sample_rate]
x_dn = spectrogating.removeNoise(audio_clip=audio.samples,
noise_clip=noise,
n_grad_freq=2,
n_grad_time=4,
n_fft=2048,
win_length=2048,
hop_length=512,
n_std_thresh=2.5,
prop_decrease=1.0,
verbose=False,
visual=False)
winlen, ovrlen, pre_coef, nfilter, nftt = 0.025, 0.01, 0.97, 20, 2048
ftThres = 0.4
vadThres = 0.2
opts = 1
ft, flen, fsh10, nfr10 = speechproc.sflux(x_dn, audio.sample_rate, winlen, ovrlen, nftt)
# --spectral flatness --
pv01 = np.zeros(nfr10)
pv01[np.less_equal(ft, ftThres)] = 1
pitch = deepcopy(ft)
pvblk = speechproc.pitchblockdetect(pv01, pitch, nfr10, opts)
# --filtering--
ENERGYFLOOR = np.exp(-50)
b = np.array([0.9770, -0.9770])
a = np.array([0.3, -0.3])
fdata = lfilter(b, a, x_dn, axis=0)
vad_seg = speechproc.snre_vad(fdata, nfr10, flen, fsh10, ENERGYFLOOR, pv01, pvblk, vadThres)
no_events_starttime = [0] + [i / len(vad_seg) * audio.duration() for i in range(len(vad_seg)) if vad_seg[i] == 0 and vad_seg[i-1] == 1]
no_events_endtime = [i / len(vad_seg) * audio.duration() for i in range(len(vad_seg)) if vad_seg[i] == 1 and vad_seg[i-1] == 0] + [audio.duration()]
for start, end in zip(no_events_starttime, no_events_endtime):
new_row = {'Begin Time (s)': start,
'End Time (s)': end,
'Low Freq (Hz)': 0,
'High Freq (Hz)': 0,
'Begin File': annotation_base_audio_filename,
'Category': 'No Sound Event'}
df = df.append(new_row, ignore_index=True)
return df
num_cores = multiprocessing.cpu_count()
with Pool(processes=num_cores) as pool:
df_list = pool.map(sound_event_detection_for_single_audio_file, annotation_base_audio_filenames)
no_sound_event_data = pd.concat(df_list, ignore_index=True)
no_sound_event_data['duration'] = no_sound_event_data['End Time (s)'] - no_sound_event_data['Begin Time (s)']
no_sound_event_data = no_sound_event_data.sort_values(by='duration',ascending=False)[: len(all_labeled_data) // len(all_labeled_data.Category.unique())]
no_sound_event_data = no_sound_event_data.drop(['duration'], axis = 1)
all_labeled_data = all_labeled_data.append(no_sound_event_data, ignore_index=True)
summary = all_labeled_data.groupby(['Category']).size().reset_index(name='Count')
summary['Percentage'] = round(100 * summary['Count'] / summary['Count'].sum(), 2)
print(summary)
end = datetime.now()
print('Time spent to preprocess data: ', (end - begin).total_seconds(), 'seconds')
return all_labeled_data
def load_spectrograms(directory, shape=(224, 224)):
"""
load spectrograms into vector
Args:
filename: path of image to load
shape: tuple of (nrow, ncol)
"""
begin = datetime.now()
spectrogram_filenames = glob.glob(directory + '*.png')
spectrogram_median_file_size = median([os.path.getsize(filename) for filename in spectrogram_filenames])
spectrogram_vector = []
for filename in spectrogram_filenames:
if os.path.getsize(filename) >= spectrogram_median_file_size * 0.8:
img = cv2.imread(filename)
img = cv2.resize(img, shape) / 255.0
spectrogram_vector.append(img)
end = datetime.now()
print('number of valid spectrograms:', len(spectrogram_filenames))
print('shape of vector for valid spectrograms:', spectrogram_vector[0].shape)
print('Time spent to load spectrograms as array: ', (end - begin).total_seconds(), 'seconds')
return np.asarray(spectrogram_vector), spectrogram_filenames
| 48.751908
| 164
| 0.573554
| 1,489
| 12,773
| 4.668905
| 0.163868
| 0.091772
| 0.070483
| 0.050633
| 0.780207
| 0.748418
| 0.721519
| 0.707422
| 0.700518
| 0.700518
| 0
| 0.023834
| 0.32005
| 12,773
| 262
| 165
| 48.751908
| 0.776626
| 0.031942
| 0
| 0.701031
| 0
| 0
| 0.067815
| 0
| 0
| 0
| 0
| 0.003817
| 0
| 1
| 0.030928
| false
| 0
| 0.082474
| 0
| 0.154639
| 0.06701
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5510155018e42ea41653a3ee9aea3bf27bcfbe24
| 46
|
py
|
Python
|
module/color.py
|
solitary-s/python-learn
|
05d3aa5704f74daa91ec9129bba86a77aea3724b
|
[
"Apache-2.0"
] | 1
|
2020-08-08T02:18:24.000Z
|
2020-08-08T02:18:24.000Z
|
module/color.py
|
solitary-s/python-learn
|
05d3aa5704f74daa91ec9129bba86a77aea3724b
|
[
"Apache-2.0"
] | null | null | null |
module/color.py
|
solitary-s/python-learn
|
05d3aa5704f74daa91ec9129bba86a77aea3724b
|
[
"Apache-2.0"
] | null | null | null |
def printColor():
print("color is green")
| 15.333333
| 27
| 0.652174
| 6
| 46
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195652
| 46
| 2
| 28
| 23
| 0.810811
| 0
| 0
| 0
| 0
| 0
| 0.304348
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
9b4f33d2156dcf4824efcf28e67521c5e42f81dd
| 23
|
py
|
Python
|
yanrin.py
|
jaideep2/yanrin
|
ba34f7f1c9d7f43f4800ea5cc25a333553ce9186
|
[
"Apache-2.0"
] | null | null | null |
yanrin.py
|
jaideep2/yanrin
|
ba34f7f1c9d7f43f4800ea5cc25a333553ce9186
|
[
"Apache-2.0"
] | null | null | null |
yanrin.py
|
jaideep2/yanrin
|
ba34f7f1c9d7f43f4800ea5cc25a333553ce9186
|
[
"Apache-2.0"
] | null | null | null |
from yanrin import app
| 11.5
| 22
| 0.826087
| 4
| 23
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9b95d9631dfddf995ea31a12805742d8ca0bfaca
| 166
|
py
|
Python
|
botlistbot/lib/__init__.py
|
anandpskerala/BotListBot
|
4ac1b1f7c4f4d251c80a24306542001f40b85216
|
[
"MIT"
] | 66
|
2017-07-21T07:16:14.000Z
|
2022-02-13T03:52:52.000Z
|
botlistbot/lib/__init__.py
|
anandpskerala/BotListBot
|
4ac1b1f7c4f4d251c80a24306542001f40b85216
|
[
"MIT"
] | 10
|
2017-10-20T00:51:43.000Z
|
2021-06-02T00:07:32.000Z
|
botlistbot/lib/__init__.py
|
anandpskerala/BotListBot
|
4ac1b1f7c4f4d251c80a24306542001f40b85216
|
[
"MIT"
] | 44
|
2018-01-05T15:01:47.000Z
|
2022-02-10T20:32:41.000Z
|
from .inlinecallbackbutton import InlineCallbackButton
from .inlinecallbackhandler import InlineCallbackHandler
from .inlineactionhandler import InlineActionHandler
| 33.2
| 56
| 0.903614
| 12
| 166
| 12.5
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078313
| 166
| 4
| 57
| 41.5
| 0.980392
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9bbd66fe68bd8fc1ddead077eb18bf9bcd9d1b73
| 9,815
|
py
|
Python
|
src/model.py
|
mmin0/SigDFP
|
e2a93faa658741d693b8070bcc7038d2fb7c3e74
|
[
"MIT"
] | null | null | null |
src/model.py
|
mmin0/SigDFP
|
e2a93faa658741d693b8070bcc7038d2fb7c3e74
|
[
"MIT"
] | null | null | null |
src/model.py
|
mmin0/SigDFP
|
e2a93faa658741d693b8070bcc7038d2fb7c3e74
|
[
"MIT"
] | 1
|
2022-02-28T23:26:23.000Z
|
2022-02-28T23:26:23.000Z
|
import torch
import torch.nn as nn
class Action(nn.Module):
def __init__(self, args, mode):
"""
input:
args.in_dim -- the dimension of (t, X_t)
args.neurons -- the size of hidden layers
args.out_dim -- the dimension of alpha
"""
super(Action, self).__init__()
indim = args.in_dim + 1
self.linear = nn.ModuleList([nn.Linear(indim, args.neurons[0])])
for i in range(len(args.neurons)-1):
self.linear.append(nn.Linear(args.neurons[i], args.neurons[i+1]))
self.linear.append(nn.Linear(args.neurons[-1], args.out_dim))
self.mode = mode
def forward(self, bm, cn, m, initial):
"""
input:
bm -- tensor(batch, N, dim), brownian increments
cn -- tensor(batch, N+1, dim), common noise
m -- tensor(batch, N+1, dim), mu from previous step
initial -- starting point
return:
tensor(batch, N+1, dim), generate paths of controlled SDE
"""
device = bm.device
self.strategy = []
batch, N, _ = bm.size()
X = torch.zeros(batch, N+1, 2, device=device)
X[:, 0, 1:] = initial #torch.randn(batch, 1, device=device)
for i in range(1, N+1):
X[:, i, 0] = i/N
self.strategy.append(self.one_step(X[:, i-1, :].clone(), m[:, i]))
X[:, i, 1:] = self.mode.one_step_simulation(X[:, i-1, 1:], m[:, i],
self.strategy[-1], bm[:, i-1],
cn[:, i]-cn[:, i-1])
return X
def one_step(self, x, mt):
"""
input:
x -- the augmented data (t, X_t)
mt -- conditional distribution
return:
alpha -- torch.tensor(batch, dim), control
"""
x = torch.cat([x, mt], dim=1)
alpha = torch.relu(self.linear[0](x))
for i in range(1, len(self.linear)-1):
alpha = torch.relu(self.linear[i](alpha))
alpha = self.linear[-1](alpha)
return alpha
## search for nonconstant EQ
class Action1(nn.Module):
def __init__(self, args, mode):
"""
input:
args.in_dim -- the dimension of typeVec
args.neurons -- the size of hidden layers
args.out_dim -- the dimension of alpha
"""
super(Action1, self).__init__()
indim = args.in_dim + 3 # type vector dimensions +(t,x,m)
self.linear = nn.ModuleList([nn.Linear(indim, args.neurons[0])])
self.bn = nn.BatchNorm1d(args.in_dim)
for i in range(len(args.neurons)-1):
self.linear.append(nn.Linear(args.neurons[i], args.neurons[i+1]))
self.linear.append(nn.Linear(args.neurons[-1], args.out_dim))
self.mode = mode
def forward(self, bm, cn, typeVec, m, initial):
"""
input:
typeVec -- type vector
bm -- tensor(batch, N, dim), brownian increments
cn -- tensor(batch, N+1, dim), common noise
m -- tensor(batch, N+1, dim), mu from previous step
initial -- starting point
return:
tensor(batch, N+1, dim), generate paths of controlled SDE
"""
device = bm.device
self.strategy = []
batch, N, _ = bm.size()
X = torch.zeros(batch, N+1, 2, device=device)
X[:, 0, 1:] = initial #torch.randn(batch, 1, device=device)
self.mode.initialize(typeVec)
for i in range(1, N+1):
X[:, i, 0] = i/N*self.mode.T
self.strategy.append(self.one_step(typeVec, X[:, i-1].clone(), m[:, i-1]))
X[:, i, 1:] = self.mode.one_step_simulation(X[:, i-1, 1:], m[:, i],
self.strategy[-1], bm[:, i-1],
cn[:, i]-cn[:, i-1])
return X
def one_step(self, typeVec, x, m):
"""
input:
typeVec -- type vector
return:
alpha -- torch.tensor(batch, dim), control
"""
x = torch.cat([self.bn(typeVec), x, m], dim=1)
alpha = torch.relu(self.linear[0](x))
for i in range(1, len(self.linear)-1):
alpha = torch.relu(self.linear[i](alpha))
alpha = self.linear[-1](alpha)
return alpha
class Action2(nn.Module):
def __init__(self, args, mode):
"""
used for invest consumption model, since this model produce strategy (pi, c)
input:
args.in_dim -- the dimension of typeVec
args.neurons -- the size of hidden layers
args.out_dim -- the dimension of alpha
"""
super(Action2, self).__init__()
indim = args.in_dim + 4
self.bn = nn.BatchNorm1d(args.in_dim)
self.bnc = nn.BatchNorm1d(args.in_dim)
self.linear = nn.ModuleList([nn.Linear(indim, args.neurons[0])])
for i in range(len(args.neurons)-1):
self.linear.append(nn.Linear(args.neurons[i], args.neurons[i+1]))
self.linear.append(nn.Linear(args.neurons[-1], 1))
self.linearc = nn.ModuleList([nn.Linear(indim, args.neurons[0])])
for i in range(len(args.neurons)-1):
self.linearc.append(nn.Linear(args.neurons[i], args.neurons[i+1]))
self.linearc.append(nn.Linear(args.neurons[-1], 1))
self.mode = mode
def forward(self, bm, cn, typeVec, mx, mc, initial):
"""
input:
bm -- tensor(batch, N, dim), brownian increments
cn -- tensor(batch, N+1, dim), common noise
mx -- tensor(batch, N+1, dim), from previous step
mc -- tensor(batch, N, dim), from previous step
initial -- starting point
return:
tensor(batch, N+1, dim), generate paths of controlled SDE
"""
device = bm.device
self.strategy = []
batch, N, _ = bm.size()
X = torch.zeros(batch, N+1, 2, device=device)
X[:, 0, 1:] = initial #torch.randn(batch, 1, device=device)
self.mode.initialize(typeVec)
for i in range(1, N+1):
X[:, i, 0] = i/N*self.mode.T
pi = self.one_step_pi(typeVec, X[:, i-1, :].clone(), mx[:, i-1], mc[:, i-1])
c = self.one_step_c(typeVec, X[:, i-1, :].clone(), mx[:, i-1], mc[:, i-1])
self.strategy.append(torch.cat([pi, c],
dim=1))
X[:, i, 1:] = torch.relu(self.mode.one_step_simulation(X[:, i-1, 1:].clone(),
pi, c, bm[:, i-1],
cn[:, i]-cn[:, i-1])-0.0001)+0.0001
return X
def one_step_pi(self, typeVec, x, mt, ct):
"""
input:
x -- the augmented data (t, X_t)
mt -- conditional averaged state
ct -- conditional averaged consumption
return:
alpha -- torch.tensor(batch, dim), control
"""
pi = torch.cat([self.bn(typeVec), x, mt, ct], dim=1)
#pi = (pi-torch.mean(pi, dim=0))/torch.std(pi, dim=0)
for i in range(len(self.linear)-1):
pi = torch.relu(self.linear[i](pi))
pi = self.linear[-1](pi)
return pi
def one_step_c(self, typeVec, x, mt, ct):
c = torch.cat([self.bnc(typeVec), x, mt, ct], dim=1)
for i in range(len(self.linearc)-1):
c = torch.relu(self.linearc[i](c))
c = self.linearc[-1](c)
return torch.exp(c) #torch.relu(c-0.00001)+0.00001
class LossTotal(nn.Module):
def __init__(self, mode, depth, dim=1):
"""
input:
mode -- which example we are running
"""
super(LossTotal, self).__init__()
self.mode = mode
self.dim = dim
self.depth = depth
def forward(self, X, m, strategy):
"""
input:
X -- augmented path
m -- torch.tensor(batch, N+1, dim), the distribution interaction
process from last round simulation, for example \bar{m}_t in
the case of SystemicRisk.
strategy -- list[N]
"""
N = len(strategy)
# control lost
loss_c = self.mode.terminal(X[:, -1, 1:], m[:, -1])
for i in range(N):
loss_c = loss_c + self.mode.running(X[:, i, 1:], m[:, i], strategy[i])/N*self.mode.T
return torch.mean(loss_c)
class LossTotal2(nn.Module):
def __init__(self, mode, depth, dim=1):
"""
input:
mode -- which example we are running
"""
super(LossTotal2, self).__init__()
self.mode = mode
self.dim = dim
self.depth = depth
def forward(self, X, m, strategy, mc):
"""
input:
X -- augmented path
m -- torch.tensor(batch, N+1, dim), the distribution interaction
process from last round simulation, for example \bar{m}_t in
the case of SystemicRisk.
strategy -- list[N]
"""
N = len(strategy)
# control lost
loss_c = self.mode.terminal(X[:, -1, 1:], m[:, -1])
for i in range(N):
#c = strategy[i][:, 1:]
loss_c = loss_c + self.mode.running(X[:, i, 1:], m[:, i], strategy[i], mc[:, i])/N*self.mode.T
return torch.mean(loss_c)
| 34.804965
| 106
| 0.490474
| 1,259
| 9,815
| 3.753773
| 0.104051
| 0.011849
| 0.038087
| 0.030258
| 0.851037
| 0.83157
| 0.77592
| 0.750529
| 0.738468
| 0.723022
| 0
| 0.022961
| 0.365461
| 9,815
| 281
| 107
| 34.928826
| 0.73587
| 0.268161
| 0
| 0.592308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107692
| false
| 0
| 0.015385
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fd3f0d71cadcad3c77f308fa5aea0eddc32dbcab
| 1,903
|
py
|
Python
|
music_trees/utils/train.py
|
hugofloresgarcia/music-trees
|
51c47f1fc924936c25b84bf35edff52d88e9cccd
|
[
"MIT"
] | 25
|
2021-07-16T10:24:35.000Z
|
2022-03-25T04:46:25.000Z
|
music_trees/utils/train.py
|
loretoparisi/music-trees
|
0ea3f49c2bafa97efd7352728ca687d7fee9e025
|
[
"MIT"
] | 4
|
2021-11-12T10:25:29.000Z
|
2021-12-01T16:11:13.000Z
|
music_trees/utils/train.py
|
loretoparisi/music-trees
|
0ea3f49c2bafa97efd7352728ca687d7fee9e025
|
[
"MIT"
] | 3
|
2021-11-09T02:40:03.000Z
|
2022-03-04T19:26:39.000Z
|
""" utils for training """
import torch
def batch_detach_cpu(x):
"""syntax honey"""
return batch_cpu(batch_detach(x))
def batch_detach(nested_collection):
""" move a dict of tensors to detach.
no op if tensors already in detach
"""
if isinstance(nested_collection, dict):
for k, v in nested_collection.items():
if isinstance(v, torch.Tensor):
nested_collection[k] = v.detach()
if isinstance(v, dict):
nested_collection[k] = batch_detach(v)
elif isinstance(v, list):
nested_collection[k] = batch_detach(v)
if isinstance(nested_collection, list):
for i, v in enumerate(nested_collection):
if isinstance(v, torch.Tensor):
nested_collection[i] = v.detach()
elif isinstance(v, dict):
nested_collection[i] = batch_detach(v)
elif isinstance(v, list):
nested_collection[i] = batch_detach(v)
return nested_collection
def batch_cpu(nested_collection):
""" move a dict of tensors to cpu.
no op if tensors already in cpu
"""
if isinstance(nested_collection, dict):
for k, v in nested_collection.items():
if isinstance(v, torch.Tensor):
nested_collection[k] = v.cpu()
if isinstance(v, dict):
nested_collection[k] = batch_cpu(v)
elif isinstance(v, list):
nested_collection[k] = batch_cpu(v)
if isinstance(nested_collection, list):
for i, v in enumerate(nested_collection):
if isinstance(v, torch.Tensor):
nested_collection[i] = v.cpu()
elif isinstance(v, dict):
nested_collection[i] = batch_cpu(v)
elif isinstance(v, list):
nested_collection[i] = batch_cpu(v)
return nested_collection
| 35.240741
| 54
| 0.591172
| 230
| 1,903
| 4.726087
| 0.152174
| 0.353266
| 0.071757
| 0.103036
| 0.827967
| 0.827967
| 0.769089
| 0.769089
| 0.574057
| 0.384545
| 0
| 0
| 0.309511
| 1,903
| 53
| 55
| 35.90566
| 0.827245
| 0.086705
| 0
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.025641
| 0
| 0.179487
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fd4b3e880c378a41980d826d953a114b91893fbc
| 24
|
py
|
Python
|
src/yui/doc/__init__.py
|
dinoboff/yuidoc
|
f1f607876c24a09c6a9bf649f25bbe4c896901a5
|
[
"BSD-3-Clause"
] | 2
|
2015-12-18T11:06:32.000Z
|
2016-05-08T18:52:57.000Z
|
src/yui/doc/__init__.py
|
dinoboff/yuidoc
|
f1f607876c24a09c6a9bf649f25bbe4c896901a5
|
[
"BSD-3-Clause"
] | null | null | null |
src/yui/doc/__init__.py
|
dinoboff/yuidoc
|
f1f607876c24a09c6a9bf649f25bbe4c896901a5
|
[
"BSD-3-Clause"
] | null | null | null |
from yuidoc import main
| 12
| 23
| 0.833333
| 4
| 24
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bd14725a9549c9f72568bb567d206925b36c36ff
| 1,994
|
py
|
Python
|
tests/test_json_to_mako.py
|
melexis/json-to-mako
|
244f99e1ba11001ad74fa76a00f7c5acfcddddde
|
[
"Apache-2.0"
] | null | null | null |
tests/test_json_to_mako.py
|
melexis/json-to-mako
|
244f99e1ba11001ad74fa76a00f7c5acfcddddde
|
[
"Apache-2.0"
] | null | null | null |
tests/test_json_to_mako.py
|
melexis/json-to-mako
|
244f99e1ba11001ad74fa76a00f7c5acfcddddde
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from mlx.json_to_mako import json_to_mako_wrapper
class TestJsonToMako(TestCase):
def test_help(self):
with self.assertRaises(SystemExit) as ex:
json_to_mako_wrapper(['--help'])
self.assertEqual(0, ex.exception.code)
def test_version(self):
with self.assertRaises(SystemExit) as ex:
json_to_mako_wrapper(['--version'])
self.assertEqual(0, ex.exception.code)
def test_example_single_input(self):
json_to_mako_wrapper(['--input', 'example/family.json',
'--input', 'example/work.json',
'--template', 'example/address-book.mako',
'--output', 'tests/address-book.html'])
def test_example_dual_input(self):
json_to_mako_wrapper(['--input', 'example/family.json',
'--template', 'example/address-book.mako',
'--output', 'tests/address-book.html'])
def test_example_no_input(self):
with self.assertRaises(SystemExit) as ex:
json_to_mako_wrapper(['--template', 'example/address-book.mako',
'--output', 'tests/address-book.html'])
self.assertEqual(2, ex.exception.code)
def test_example_no_template(self):
with self.assertRaises(SystemExit) as ex:
json_to_mako_wrapper(['--input', 'example/family.json',
'--input', 'example/work.json',
'--output', 'tests/address-book.html'])
self.assertEqual(2, ex.exception.code)
def test_example_no_output(self):
with self.assertRaises(SystemExit) as ex:
json_to_mako_wrapper(['--input', 'example/family.json',
'--input', 'example/work.json',
'--template', 'example/address-book.mako'])
self.assertEqual(2, ex.exception.code)
| 42.425532
| 77
| 0.567202
| 213
| 1,994
| 5.107981
| 0.183099
| 0.049632
| 0.082721
| 0.125
| 0.859375
| 0.859375
| 0.824449
| 0.824449
| 0.754596
| 0.754596
| 0
| 0.003584
| 0.300401
| 1,994
| 46
| 78
| 43.347826
| 0.776344
| 0
| 0
| 0.621622
| 0
| 0
| 0.228185
| 0.096289
| 0
| 0
| 0
| 0
| 0.27027
| 1
| 0.189189
| false
| 0
| 0.054054
| 0
| 0.27027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bd4251fe9aad607af485a9071a3bf71ec82ec90b
| 115
|
py
|
Python
|
testing/fibonacci/fibonacci/tests/test_fibonacci.py
|
asteroidshappen/school2021
|
7533c8524fa53127c4da1ffe0ad83b09eb59107a
|
[
"MIT"
] | 252
|
2021-05-18T11:58:17.000Z
|
2022-03-12T06:48:52.000Z
|
testing/fibonacci/fibonacci/tests/test_fibonacci.py
|
asteroidshappen/school2021
|
7533c8524fa53127c4da1ffe0ad83b09eb59107a
|
[
"MIT"
] | 44
|
2021-05-21T14:28:34.000Z
|
2021-07-12T22:36:06.000Z
|
testing/fibonacci/fibonacci/tests/test_fibonacci.py
|
asteroidshappen/school2021
|
7533c8524fa53127c4da1ffe0ad83b09eb59107a
|
[
"MIT"
] | 128
|
2021-05-24T18:32:54.000Z
|
2022-03-26T11:24:16.000Z
|
def test_initial():
from fibonacci import fibonacci
assert fibonacci(0) == 0
assert fibonacci(1) == 1
| 19.166667
| 35
| 0.669565
| 15
| 115
| 5.066667
| 0.6
| 0.394737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.234783
| 115
| 5
| 36
| 23
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1ff218d8aa593f1feed74d0c894e9603dd861e8a
| 23
|
py
|
Python
|
examples/example_module/__init__.py
|
lukefx/stardust
|
4d9e399ffba9d4a47a2f428b59b5abf4c5bd41ad
|
[
"MIT"
] | 2
|
2020-11-27T10:30:38.000Z
|
2020-12-22T16:48:49.000Z
|
examples/example_module/__init__.py
|
lukefx/stardust
|
4d9e399ffba9d4a47a2f428b59b5abf4c5bd41ad
|
[
"MIT"
] | null | null | null |
examples/example_module/__init__.py
|
lukefx/stardust
|
4d9e399ffba9d4a47a2f428b59b5abf4c5bd41ad
|
[
"MIT"
] | null | null | null |
from .app import serve
| 11.5
| 22
| 0.782609
| 4
| 23
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1f2884dc7bba54aeefba262b33c0b7e8421bb006
| 29
|
py
|
Python
|
GitMarco/torch/__init__.py
|
GitMarco27/GitMarco
|
2d9dd93a73a6d7b68d63222512a646cdd988909e
|
[
"MIT"
] | null | null | null |
GitMarco/torch/__init__.py
|
GitMarco27/GitMarco
|
2d9dd93a73a6d7b68d63222512a646cdd988909e
|
[
"MIT"
] | null | null | null |
GitMarco/torch/__init__.py
|
GitMarco27/GitMarco
|
2d9dd93a73a6d7b68d63222512a646cdd988909e
|
[
"MIT"
] | null | null | null |
from GitMarco.torch import *
| 14.5
| 28
| 0.793103
| 4
| 29
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1f2d5dcd807ddff5d5933eb12aaa869d74409ece
| 24
|
py
|
Python
|
src/lib/StringIO.py
|
blockpy-edu/skulpt
|
dc70288aedcd7670605ef28f8525546440b39f93
|
[
"MIT"
] | 4
|
2020-01-19T01:42:06.000Z
|
2021-05-13T09:51:38.000Z
|
src/lib/StringIO.py
|
blockpy-edu/skulpt
|
dc70288aedcd7670605ef28f8525546440b39f93
|
[
"MIT"
] | null | null | null |
src/lib/StringIO.py
|
blockpy-edu/skulpt
|
dc70288aedcd7670605ef28f8525546440b39f93
|
[
"MIT"
] | 4
|
2019-10-16T21:50:53.000Z
|
2021-01-11T06:25:57.000Z
|
from io import StringIO
| 12
| 23
| 0.833333
| 4
| 24
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1f3570987845d9f9176610a0f0264f965d756eb1
| 2,395
|
py
|
Python
|
original_program/test_bowling_program.py
|
pau13-loop/bowling-Game
|
b82ab1b67ead5feafed095fcc7649382781fb9ea
|
[
"MIT"
] | null | null | null |
original_program/test_bowling_program.py
|
pau13-loop/bowling-Game
|
b82ab1b67ead5feafed095fcc7649382781fb9ea
|
[
"MIT"
] | null | null | null |
original_program/test_bowling_program.py
|
pau13-loop/bowling-Game
|
b82ab1b67ead5feafed095fcc7649382781fb9ea
|
[
"MIT"
] | null | null | null |
from original_program.bowling_program import bowling_game
import pytest
# INTEGRES tests
def test_integres():
assert bowling_game(12345123451234512345) == 60
assert bowling_game(32611661144527814225) == 71
# SPARES tests
def test_spare():
assert bowling_game('5/5/5/5/5/5/5/5/5/5/5') == 150
assert bowling_game('5/324/5/343152424152') == 82
assert bowling_game('3/4/5/3/1/421/8/2/6/7') == 136
# NULLS tests
def test_null():
assert bowling_game('9-9-9-9-9-9-9-9-9-9-') == 90
assert bowling_game('2-452763----4245326-') == 55
# STRIKES tests
# First throw is a strike and the following next two throws the following examples
#!First throw after a strike is an integer
#INT - INT
def test_strike_int_int():
assert bowling_game('X24X17332542143517') == 88
assert bowling_game('42X4225X5224524536') == 90
assert bowling_game('3518X54X24X71X31') == 111
#INT - NULL
def test_strike_int_null():
assert bowling_game('X6-52X7-4245722662') == 93
#INT - SPARE
def test_strike_int_spare():
assert bowling_game('X5/35X2/4235712116') == 107
#! First throw after a strike is a null
#NULL -INT
def test_strike_null_integer():
assert bowling_game('X-471X-84215724571') == 90
#NULL - SPARE
def test_strike_null_spare():
assert bowling_game('X-/42X-/5215423681') == 112
#NULL -NULL
def test_strike_null_null():
assert bowling_game('X--42X--5234411836') == 63
#!First throw after strike is another strike
#STRIKE - INT
def test_strike_strike_integer():
assert bowling_game('XX6272X6235721662') == 119
assert bowling_game('XX5326XX52523651') == 130
#STRIKE - STRIKE
# def test_strike_strike_strike():
assert bowling_game('XXXXXXXXXXXX') == 300
#STRIKE - NULL
def test_strike_strike_null():
assert bowling_game('XX-625XX-5136235') == 109
# MIXED random test cases
def test_mixed():
assert bowling_game('625/6353X436/2441-5') == 93
assert bowling_game('26X3/4281X422/5/2/5') == 121
assert bowling_game('5/3/X9---2/4/XXX4/') == 169
assert bowling_game('XX4/4/3/XX2-1-XX9') == 157
assert bowling_game('317/4/-79/532/X4/XXX') == 148
assert bowling_game('X7/326/XX5/435/XXX') == 174
assert bowling_game('13635/6/8/X6/545/X7/') == 151
assert bowling_game('4/6/XX9/X8/XXXXX') == 235
assert bowling_game('4/X-/4/-/XX7/4/7/X') == 182
assert bowling_game('2/6/X639/6/-4XXXXX') == 184
| 26.318681
| 82
| 0.698121
| 369
| 2,395
| 4.360434
| 0.341463
| 0.205096
| 0.306401
| 0.019888
| 0.068987
| 0.042884
| 0.013052
| 0.013052
| 0.013052
| 0.006837
| 0
| 0.203859
| 0.156159
| 2,395
| 90
| 83
| 26.611111
| 0.592281
| 0.17286
| 0
| 0
| 0
| 0.023256
| 0.248853
| 0.021418
| 0
| 0
| 0
| 0
| 0.674419
| 1
| 0.27907
| true
| 0
| 0.046512
| 0
| 0.325581
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1f665108ab190f053cff94aa42d8e13643b1d6f6
| 171
|
py
|
Python
|
backend/local_basket/location/admin.py
|
localbasket/local_basket
|
8c34cc25de95c9c0d9431b86546e94dd1c97280f
|
[
"MIT"
] | null | null | null |
backend/local_basket/location/admin.py
|
localbasket/local_basket
|
8c34cc25de95c9c0d9431b86546e94dd1c97280f
|
[
"MIT"
] | null | null | null |
backend/local_basket/location/admin.py
|
localbasket/local_basket
|
8c34cc25de95c9c0d9431b86546e94dd1c97280f
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Pin
from .models import Location
# Register your models here.
admin.site.register(Pin)
admin.site.register(Location)
| 21.375
| 32
| 0.807018
| 25
| 171
| 5.52
| 0.48
| 0.144928
| 0.231884
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116959
| 171
| 7
| 33
| 24.428571
| 0.913907
| 0.152047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2f1eec2dc3017a1e2e9cb413ad5311f2bae450d0
| 186
|
py
|
Python
|
app/tests/refs/args_dict.py
|
superphy/spfy
|
867e61b32ab00ec536378f96a63f0fb379f47c58
|
[
"Apache-2.0"
] | 2
|
2019-05-22T14:29:37.000Z
|
2020-02-13T11:30:46.000Z
|
app/tests/refs/args_dict.py
|
superphy/backend
|
867e61b32ab00ec536378f96a63f0fb379f47c58
|
[
"Apache-2.0"
] | 88
|
2017-04-07T21:52:10.000Z
|
2018-03-10T23:12:47.000Z
|
app/tests/refs/args_dict.py
|
superphy/backend
|
867e61b32ab00ec536378f96a63f0fb379f47c58
|
[
"Apache-2.0"
] | 2
|
2017-02-10T21:30:13.000Z
|
2017-06-05T22:30:17.000Z
|
args_dict = {'i': '/home/kevin/dev/fresh/backend/app/tests/ecoli/GCA_001894495.1_ASM189449v1_genomic.fna', 'disable_vf': False, 'pi': 90, 'disable_amr': False, 'disable_serotype': False}
| 186
| 186
| 0.752688
| 28
| 186
| 4.75
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108571
| 0.05914
| 186
| 1
| 186
| 186
| 0.651429
| 0
| 0
| 0
| 0
| 1
| 0.668449
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2f6f9bfcd066c5e9bd90c5e63d28d02fd4053b73
| 34
|
py
|
Python
|
tflib/losses/__init__.py
|
yaojia1/AttGAN-final
|
92906e6b32cdcabaa841461c6d2efe06a54057d1
|
[
"MIT"
] | 581
|
2018-05-06T05:15:05.000Z
|
2022-03-29T08:13:54.000Z
|
tflib/losses/__init__.py
|
yaojia1/darknet_my
|
92906e6b32cdcabaa841461c6d2efe06a54057d1
|
[
"MIT"
] | 52
|
2018-05-11T09:33:30.000Z
|
2022-03-24T04:27:07.000Z
|
tflib/losses/__init__.py
|
yaojia1/darknet_my
|
92906e6b32cdcabaa841461c6d2efe06a54057d1
|
[
"MIT"
] | 137
|
2018-05-08T14:30:03.000Z
|
2022-02-24T01:50:37.000Z
|
from tflib.losses.losses import *
| 17
| 33
| 0.794118
| 5
| 34
| 5.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2f89c5479d8e470d54e1827f781036a8576e75d7
| 97
|
py
|
Python
|
luffy/models/layers/__init__.py
|
Fei-Wang/dl-pytorch
|
a7672603e2de7824d0ff7e97b69dedad3fd9d476
|
[
"MIT"
] | null | null | null |
luffy/models/layers/__init__.py
|
Fei-Wang/dl-pytorch
|
a7672603e2de7824d0ff7e97b69dedad3fd9d476
|
[
"MIT"
] | null | null | null |
luffy/models/layers/__init__.py
|
Fei-Wang/dl-pytorch
|
a7672603e2de7824d0ff7e97b69dedad3fd9d476
|
[
"MIT"
] | null | null | null |
from .activation import *
from .attention import *
from .mlp import *
from .transformer import *
| 19.4
| 26
| 0.752577
| 12
| 97
| 6.083333
| 0.5
| 0.410959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164948
| 97
| 4
| 27
| 24.25
| 0.901235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
85df6888fe72b10e9504f6ce2afc6bc2c72cc9d7
| 5,327
|
py
|
Python
|
posts/tests.py
|
nurettinabaci/Tower-Django-Blog
|
d45b7efac0d3c8fdf5ad0ec1cf2253f6c6d912a0
|
[
"MIT"
] | null | null | null |
posts/tests.py
|
nurettinabaci/Tower-Django-Blog
|
d45b7efac0d3c8fdf5ad0ec1cf2253f6c6d912a0
|
[
"MIT"
] | null | null | null |
posts/tests.py
|
nurettinabaci/Tower-Django-Blog
|
d45b7efac0d3c8fdf5ad0ec1cf2253f6c6d912a0
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
# Create your tests here.
WEBDRIVER_PATH = "C:/Program Files (x86)/Google/Chrome/Application/chromedriver.exe"
class AccountTestCase(LiveServerTestCase):
def setUp(self):
self.driver = webdriver.Chrome(WEBDRIVER_PATH)
super(AccountTestCase, self).setUp()
def tearDown(self):
self.driver.close()
super(AccountTestCase, self).tearDown()
def test_existing_user_login(self):
self.driver.get('http://127.0.0.1:8000/login/')
try:
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.ID, 'username')))
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.ID, 'password')))
except Exception as e:
print(e)
self.driver.find_element_by_id('username').send_keys(settings.LOGIN_USERNAME)
self.driver.find_element_by_id('password').send_keys(settings.LOGIN_PASSWD)
self.driver.find_element_by_id('loginButton').click()
page = self.driver.page_source
self.assertIn("Logout", page)
def test_non_existing_user_login(self):
self.driver.get('http://127.0.0.1:8000/login/')
try:
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.ID, 'username')))
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.ID, 'password')))
except Exception as e:
print(e)
self.driver.find_element_by_id('username').send_keys("fadsfadsf")
self.driver.find_element_by_id('password').send_keys("fadsfadsf")
self.driver.find_element_by_id('loginButton').click()
page = self.driver.page_source
self.assertIn("Username or password incorrect!", page)
def test_existing_user_register(self):
self.driver.get('http://127.0.0.1:8000/register/')
try:
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.NAME, 'username')))
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.NAME, 'email')))
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.NAME, 'password1')))
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.NAME, 'password2')))
except Exception as e:
print(e)
self.driver.find_element_by_name("username").send_keys(settings.LOGIN_USERNAME)
self.driver.find_element_by_name("email").send_keys(settings.LOGIN_USERNAME + "@hotmail.com")
self.driver.find_element_by_name("password1").send_keys("password11")
self.driver.find_element_by_name("password2").send_keys("password11")
self.driver.find_element_by_id('registerButton').click()
try:
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.NAME, 'username')))
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.NAME, 'email')))
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.NAME, 'password1')))
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.NAME, 'password2')))
except Exception as e:
print(e)
page = self.driver.page_source
self.assertIn("A user with that username already exists.", page)
def test_new_user_register(self):
register_username = "adsfadsf"
self.driver.get('http://127.0.0.1:8000/register/')
try:
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.NAME, 'username')))
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.NAME, 'email')))
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.NAME, 'password1')))
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.NAME, 'password2')))
except Exception as e:
print(e)
self.driver.find_element_by_name("username").send_keys(register_username)
self.driver.find_element_by_name("email").send_keys(register_username + "@hotmail.com")
self.driver.find_element_by_name("password1").send_keys("password21*")
self.driver.find_element_by_name("password2").send_keys("password21*")
self.driver.find_element_by_id('registerButton').click()
try:
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.ID, 'username')))
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.ID, 'password')))
except Exception as e:
print(e)
page = self.driver.page_source
self.assertIn(f"You successfuly created an account for {register_username}.", page)
| 45.144068
| 101
| 0.651399
| 641
| 5,327
| 5.191888
| 0.159126
| 0.132212
| 0.124399
| 0.135216
| 0.770433
| 0.766526
| 0.766526
| 0.766526
| 0.754507
| 0.704928
| 0
| 0.023278
| 0.225831
| 5,327
| 117
| 102
| 45.529915
| 0.783705
| 0.004318
| 0
| 0.653465
| 0
| 0
| 0.13146
| 0.009053
| 0
| 0
| 0
| 0
| 0.039604
| 1
| 0.059406
| false
| 0.158416
| 0.059406
| 0
| 0.128713
| 0.059406
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
c80ae21a079fe5404d1c58281eca0f4882f01e5d
| 114
|
py
|
Python
|
reapy/tools/__init__.py
|
tomas1808/reapy
|
f24a53bf226dbfa82c8dd83f6be88477ab1636e9
|
[
"MIT"
] | null | null | null |
reapy/tools/__init__.py
|
tomas1808/reapy
|
f24a53bf226dbfa82c8dd83f6be88477ab1636e9
|
[
"MIT"
] | null | null | null |
reapy/tools/__init__.py
|
tomas1808/reapy
|
f24a53bf226dbfa82c8dd83f6be88477ab1636e9
|
[
"MIT"
] | null | null | null |
"""Define tools such as Program and custom json module."""
import reapy
from .inside_reaper import inside_reaper
| 22.8
| 58
| 0.789474
| 17
| 114
| 5.176471
| 0.823529
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140351
| 114
| 4
| 59
| 28.5
| 0.897959
| 0.45614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c80edbf1998847dc4a3eafbd0a951f3051d16e89
| 35
|
py
|
Python
|
appear/commands/test.py
|
zacpez/appear
|
766b7989da82dcf5bcbad718e8cf43b74e21e249
|
[
"Apache-2.0"
] | null | null | null |
appear/commands/test.py
|
zacpez/appear
|
766b7989da82dcf5bcbad718e8cf43b74e21e249
|
[
"Apache-2.0"
] | 5
|
2021-07-21T00:24:15.000Z
|
2022-02-28T00:41:21.000Z
|
appear/commands/test.py
|
zacpez/appear
|
766b7989da82dcf5bcbad718e8cf43b74e21e249
|
[
"Apache-2.0"
] | null | null | null |
def run_tests():
print('test')
| 11.666667
| 17
| 0.6
| 5
| 35
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 35
| 2
| 18
| 17.5
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
c85a5beb9a55a2642660f5029b021e1e233daa75
| 54
|
py
|
Python
|
colosseumrl/envs/poker/__init__.py
|
carletonz/colosseumrl
|
878f0459731511d716672aee8a5adafcb96cf0a7
|
[
"MIT"
] | 8
|
2019-06-04T00:22:30.000Z
|
2022-02-14T15:27:17.000Z
|
colosseumrl/envs/poker/__init__.py
|
carletonz/colosseumrl
|
878f0459731511d716672aee8a5adafcb96cf0a7
|
[
"MIT"
] | 1
|
2019-07-23T03:32:59.000Z
|
2019-07-23T06:16:35.000Z
|
colosseumrl/envs/poker/__init__.py
|
carletonz/colosseumrl
|
878f0459731511d716672aee8a5adafcb96cf0a7
|
[
"MIT"
] | 3
|
2020-01-13T08:09:27.000Z
|
2021-11-14T01:30:25.000Z
|
from .KuhnPokerEnvironment import KuhnPokerEnvironment
| 54
| 54
| 0.925926
| 4
| 54
| 12.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 54
| 1
| 54
| 54
| 0.980392
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c075a4753066ee3d54ce801336b1d6f2ec6a4fa0
| 2,455
|
py
|
Python
|
maml_rl_taewoo/modifyant.py
|
kyuhoJeong11/GrewRL
|
a514698df8d38df34de0bd1667d99927f0aa3885
|
[
"MIT"
] | null | null | null |
maml_rl_taewoo/modifyant.py
|
kyuhoJeong11/GrewRL
|
a514698df8d38df34de0bd1667d99927f0aa3885
|
[
"MIT"
] | null | null | null |
maml_rl_taewoo/modifyant.py
|
kyuhoJeong11/GrewRL
|
a514698df8d38df34de0bd1667d99927f0aa3885
|
[
"MIT"
] | null | null | null |
import xml.etree.ElementTree as elemTree
def modifystr(s, length):
strs = s.split(" ")
if len(strs) == 3:
return str(float(strs[0]) * length) + " " + str(float(strs[1]) * length) + " " + str(float(strs[2]) * length)
elif len(strs) == 6:
return str(float(strs[0]) * length) + " " + str(float(strs[1]) * length) + " " + str(float(strs[2]) * length) + " " + str(float(strs[3]) * length) + " " + str(float(strs[4]) * length) + " " + str(float(strs[5]) * length)
def modify(reset_args):
tree = elemTree.parse("vendor/mujoco_models/ant.xml")
for body in tree.iter("body"):
if "name" in body.attrib:
if(body.attrib["name"] == "aux_1"):
geom = body.find("geom")
geom.attrib["fromto"] = modifystr(geom.attrib["fromto"], reset_args[0])
body2 = body.find("body")
body2.attrib["pos"] = modifystr(body2.attrib["pos"], reset_args[0])
geom = body2.find("geom")
geom.attrib["fromto"] = modifystr(geom.attrib["fromto"], reset_args[0])
if(body.attrib["name"] == "aux_2"):
geom = body.find("geom")
geom.attrib["fromto"] = modifystr(geom.attrib["fromto"], reset_args[1])
body2 = body.find("body")
body2.attrib["pos"] = modifystr(body2.attrib["pos"], reset_args[1])
geom = body2.find("geom")
geom.attrib["fromto"] = modifystr(geom.attrib["fromto"], reset_args[1])
if(body.attrib["name"] == "aux_3"):
geom = body.find("geom")
geom.attrib["fromto"] = modifystr(geom.attrib["fromto"], reset_args[2])
body2 = body.find("body")
body2.attrib["pos"] = modifystr(body2.attrib["pos"], reset_args[2])
geom = body2.find("geom")
geom.attrib["fromto"] = modifystr(geom.attrib["fromto"], reset_args[2])
if(body.attrib["name"] == "aux_4"):
geom = body.find("geom")
geom.attrib["fromto"] = modifystr(geom.attrib["fromto"], reset_args[3])
body2 = body.find("body")
body2.attrib["pos"] = modifystr(body2.attrib["pos"], reset_args[3])
geom = body2.find("geom")
geom.attrib["fromto"] = modifystr(geom.attrib["fromto"], reset_args[3])
tree.write("vendor/mujoco_models/modified.xml")
modify([1.0, 1.0, 1.0, 1.0])
| 52.234043
| 228
| 0.535234
| 299
| 2,455
| 4.331104
| 0.160535
| 0.123552
| 0.197683
| 0.111197
| 0.773745
| 0.715058
| 0.70888
| 0.70888
| 0.70888
| 0.70888
| 0
| 0.028652
| 0.274949
| 2,455
| 46
| 229
| 53.369565
| 0.698876
| 0
| 0
| 0.487805
| 0
| 0
| 0.11446
| 0.024847
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.02439
| 0
| 0.121951
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c07c520c650ee2f0a2705f1e2b40cc7473f3a507
| 42,209
|
py
|
Python
|
cogent/src/codes/tc_code_globalvar.py
|
Lcrypto/CGO2019-AE
|
cba7598b42f10eab655a8907a6db71094c1f558d
|
[
"BSD-4-Clause"
] | 4
|
2019-12-03T16:08:14.000Z
|
2020-08-26T16:38:54.000Z
|
cogent/src/codes/tc_code_globalvar.py
|
Lcrypto/CGO2019-AE
|
cba7598b42f10eab655a8907a6db71094c1f558d
|
[
"BSD-4-Clause"
] | null | null | null |
cogent/src/codes/tc_code_globalvar.py
|
Lcrypto/CGO2019-AE
|
cba7598b42f10eab655a8907a6db71094c1f558d
|
[
"BSD-4-Clause"
] | 1
|
2020-03-03T20:31:37.000Z
|
2020-03-03T20:31:37.000Z
|
import src.generators.tc_helper as tc_helper
#
def tc_gen_global_variables_common(f):
#
# This is for Common Global Variables among Inner-Groups
#
f.write("// Common Global Variables\n")
#
# To-Do: The Size of "Unit" for Internal Indices might be different among Inner-Groups as well as Tensor Contractions in an Inner-Group.
#
tc_gen_code_helper_varible(f, "int", "size_internal")
# for output
tc_gen_code_helper_varible(f, "int", "size_T3")
#
#
#
def tc_gen_variables(kernel_number, l_interface_info,
l_input_tensors, l_external_idx, l_internal_idx,
l_t3_d_decl_var, l_t3_parameters,
l_t2_d_decl_var, l_t2_parameters,
l_v2_d_decl_var, l_v2_parameters,
l_input_strides,
l_cuda_malloc, l_device_dynamic,
l_var_thread_blocks, l_var_outputs, l_var_outputs_helpers, l_var_input_left, l_var_input_right, l_var_internal,
opt_data_type):
#
#
# 1. # of Thread-Blocks
l_var_thread_blocks.append(["int", "num_thread_blocks_kernel_" + str(kernel_number)])
# 2. Outputs
tc_gen_variables_outputs(kernel_number, l_interface_info, l_external_idx, l_t3_d_decl_var, l_t3_parameters, l_cuda_malloc, l_device_dynamic, l_var_outputs, opt_data_type)
# 3. Outputs-Helpers
tc_gen_variables_outputs_helpers(kernel_number, l_t3_d_decl_var, l_t3_parameters, l_cuda_malloc, l_device_dynamic, l_var_outputs_helpers)
# 4. Inputs
for each_input in l_input_tensors:
#print ("each_input:", each_input)
#
# Left
#
tc_gen_variables_input_left(kernel_number, each_input, l_external_idx, l_t2_d_decl_var, l_t2_parameters, l_cuda_malloc, l_device_dynamic, l_var_input_left, opt_data_type)
#
# Right
#
tc_gen_variables_input_right(kernel_number, each_input, l_external_idx, l_v2_d_decl_var, l_v2_parameters, l_cuda_malloc, l_device_dynamic, l_var_input_right, opt_data_type)
#
# |K| > 1
#
if len(l_internal_idx) > 1:
tc_gen_variables_input_internal(l_internal_idx, l_var_internal)
#
# |K| == 1
#
elif len(l_internal_idx) == 1:
#
#
#
tmp_input_left = each_input[0]
tmp_input_right = each_input[1]
str_stride_left = ""
str_stride_right = ""
#
idx_count = 0
for each_idx in tmp_input_left[1]:
if each_idx == l_internal_idx[0]:
break
else:
if idx_count == 0:
str_stride_left = "size_" + each_idx
else:
str_stride_left = str_stride_left + " * size_" + each_idx
idx_count = idx_count + 1
#
if idx_count == 0:
str_stride_left = "1"
#
idx_count = 0
for each_idx in tmp_input_right[1]:
if each_idx == l_internal_idx[0]:
break
else:
if idx_count == 0:
str_stride_right = "size_" + each_idx
else:
str_stride_right = str_stride_right + " * size_" + each_idx
idx_count = idx_count + 1
#
if idx_count == 0:
str_stride_right = "1"
#
# Assumption: Inputs' name are different.
#
l_input_strides.append(["stride_int_" + tmp_input_left[0], str_stride_left, "stride_int_" + tmp_input_right[0], str_stride_right])
# To-Do: Need to differentiate all parameters as inputs and outputs.
def tc_gen_global_variables(f, l_input_tensors, l_external_idx, l_internal_idx,
l_t3_d_decl_var, l_t3_parameters,
l_t3_parameters_nf, l_t2_parameters_nf, l_v2_parameters_nf,
l_t3_parameters_f, l_t2_parameters_f, l_v2_parameters_f,
l_device_dynamic, l_t2_d_decl_var, l_v2_d_decl_var, l_t2_parameters, l_v2_parameters,
l_cuda_malloc, possible_diff, kernel_number):
#
str_size_T3_all = ""
str_size_T3_blk = ""
idx_count = 0
for each_idx in l_external_idx:
if idx_count != 0:
str_size_T3_blk = str_size_T3_blk + " * "
str_size_T3_all = str_size_T3_all + " * "
str_size_T3_blk = str_size_T3_blk + "SIZE_SLICE_" + str(kernel_number) + "_" + each_idx.capitalize()
str_size_T3_all = str_size_T3_all + "SIZE_IDX_" + each_idx.capitalize()
idx_count = idx_count + 1
# Global - Variables
f.write("\n")
f.write("// created by tc_gen_global_variables()\n")
#
# (Global) Variables for Sizes
#
tc_gen_global_variables_sizes(f, l_input_tensors, possible_diff, kernel_number)
#
# (Global) Variables for Output Inself
#
tc_gen_global_variables_outputs(f, possible_diff, kernel_number, # Input
l_t3_d_decl_var, # Outputs
l_t3_parameters, l_t3_parameters_nf, l_t3_parameters_f, # Outputs
l_cuda_malloc, l_device_dynamic) # Outputs
#
# (Global) Variables for Arrays related to Output
#
tc_gen_global_variables_outputs_helpers(f, possible_diff, kernel_number, # Input
l_t3_parameters, l_t3_parameters_nf, l_t3_parameters_f, # Outputs
l_cuda_malloc, l_device_dynamic, l_t3_d_decl_var) # Outputs
# >>>>>>>>>>>>> To-Do: Inner-Group
#. For Each Tensor Contraction
#. Data Structure: l_input_tensors.append(((("t2_1"), ("p4","p7","h1","h2")), (("v2_1"), ("p6","p7","h3","p5"))))
for each_input in l_input_tensors:
#
# (Global) Variables For Left
#
tc_gen_global_variables_outputs_input_left(f, each_input, l_external_idx, possible_diff,
l_t2_d_decl_var, l_t2_parameters, l_t2_parameters_nf, l_t2_parameters_f,
l_cuda_malloc, l_device_dynamic, kernel_number)
#
# (Global) Variables For Right
#
tc_gen_global_variables_outputs_input_right(f, each_input, l_external_idx, possible_diff,
l_v2_d_decl_var, l_v2_parameters, l_v2_parameters_nf, l_v2_parameters_f,
l_cuda_malloc, l_device_dynamic, kernel_number)
#
#
#
if len(l_internal_idx) > 1:
tc_gen_global_variables_outputs_input_internal(f, l_internal_idx)
#
def tc_gen_global_variables_outputs_input_internal(f, l_internal_idx):
f.write("// Global Variables for Internal Indices\n")
tc_gen_code_helper_varible(f, "int*", "d_internal_t2_1" + "_offset")
tc_gen_code_helper_varible(f, "int*", "h_internal_t2_1" + "_offset")
tc_gen_code_helper_varible(f, "int*", "d_internal_v2_1" + "_offset")
tc_gen_code_helper_varible(f, "int*", "h_internal_v2_1" + "_offset")
# Create Constant Memory
str_size_internal = ""
idx_count = 0
for each_idx in l_internal_idx:
if idx_count == 0:
str_size_internal = "SIZE_IDX_" + each_idx.capitalize()
else:
str_size_internal = str_size_internal + " * SIZE_IDX_" + each_idx.capitalize()
idx_count = idx_count + 1
f.write("\n")
tc_gen_code_helper_varible(f, "__constant__ int", "const_internal_t2_1_offset[" + str_size_internal + "]")
tc_gen_code_helper_varible(f, "__constant__ int", "const_internal_v2_1_offset[" + str_size_internal + "]")
#
def tc_gen_variables_input_internal(l_internal_idx, l_var_internal):
#
#
#
l_var_internal.append(["int*", "host_internal_left_offset"])
l_var_internal.append(["int*", "host_internal_right_offset"])
# Create Constant Memory
str_size_internal = ""
idx_count = 0
for each_idx in l_internal_idx:
if idx_count == 0:
str_size_internal = "SIZE_IDX_" + each_idx.capitalize()
else:
str_size_internal = str_size_internal + " * SIZE_IDX_" + each_idx.capitalize()
idx_count = idx_count + 1
# >> To-Do??
#f.write("\n")
#tc_gen_code_helper_varible(f, "__constant__ int", "const_internal_left_offset[" + str_size_internal + "]")
#tc_gen_code_helper_varible(f, "__constant__ int", "const_internal_left_offset[" + str_size_internal + "]")
#
def tc_gen_variables_input_left(kernel_number, each_input, l_external_idx, l_t2_d_decl_var, l_t2_parameters, l_cuda_malloc, l_device_dynamic, l_var_input_left, opt_data_type):
#
#
#
# Left Input
d_input_name = "dev_" + each_input[0][0]
h_input_name = "host_" + each_input[0][0]
input_f_size = ""
input_s_size = ""
#
idx_s_count = 0
idx_f_count = 0
for each_index in each_input[0][1]:
if tc_helper.tc_gen_helper_find_1d(l_external_idx, each_index) != -1:
if idx_f_count == 0:
input_f_size = "size_" + each_index
else:
input_f_size = "size_" + each_index + " * " + input_f_size
if idx_s_count == 0:
input_s_size = "SIZE_SLICE_" + str(kernel_number) + "_" + each_index.capitalize()
else:
input_s_size = "SIZE_SLICE_" + str(kernel_number) + "_" + each_index.capitalize() + " * " + input_s_size
idx_s_count = idx_s_count + 1
idx_f_count = idx_f_count + 1
else:
if idx_f_count == 0:
input_f_size = "size_" + each_index
else:
input_f_size = "size_" + each_index + " * " + input_f_size
idx_f_count = idx_f_count + 1
#
if opt_data_type == "DOUBLE":
l_var_input_left.append(["double*", d_input_name])
else:
l_var_input_left.append(["float*", d_input_name])
#l_var_input_left.append(["double*", h_input_name])
l_var_input_left.append(["int*", d_input_name + "_addr"])
l_var_input_left.append(["int*", h_input_name + "_addr"])
l_var_input_left.append(["int*", d_input_name + "_offset"])
l_var_input_left.append(["int*", h_input_name + "_offset"])
#
if opt_data_type == "DOUBLE":
l_t2_d_decl_var.append("double* " + d_input_name)
else:
l_t2_d_decl_var.append("float* " + d_input_name)
l_t2_d_decl_var.append("const int* __restrict__ " + d_input_name + "_addr")
l_t2_d_decl_var.append("const int* __restrict__ " + d_input_name + "_offset")
#
if opt_data_type == "DOUBLE":
l_cuda_malloc.append([d_input_name, "double", input_f_size])
else:
l_cuda_malloc.append([d_input_name, "float", input_f_size])
l_cuda_malloc.append([d_input_name + "_addr", "int", input_s_size + " * num_thread_blocks_kernel_" + str(kernel_number)])
l_cuda_malloc.append([d_input_name + "_offset", "int", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"])
#
if opt_data_type == "DOUBLE":
l_t2_parameters.append([d_input_name, "double", input_f_size])
else:
l_t2_parameters.append([d_input_name, "float", input_f_size])
l_t2_parameters.append([d_input_name + "_addr", "int", input_s_size + " * num_thread_blocks_kernel_" + str(kernel_number)])
l_t2_parameters.append([d_input_name + "_offset", "int", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"])
#
if opt_data_type == "DOUBLE":
l_device_dynamic.append(["double", d_input_name, h_input_name, input_f_size])
else:
l_device_dynamic.append(["float", d_input_name, h_input_name, input_f_size])
l_device_dynamic.append(["int", d_input_name + "_addr", h_input_name + "_addr", input_s_size + " * num_thread_blocks_kernel_" + str(kernel_number)])
l_device_dynamic.append(["int", d_input_name + "_offset", h_input_name + "_offset", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"])
#
def tc_gen_global_variables_outputs_input_left(f, each_input, l_external_idx, possible_diff,
l_t2_d_decl_var, l_t2_parameters, l_t2_parameters_nf, l_t2_parameters_f,
l_cuda_malloc, l_device_dynamic, kernel_number,
opt_data_type):
f.write("// Global Variables for Left Input\n")
# Left Input
d_input_name = "d_" + each_input[0][0]
h_input_name = "h_" + each_input[0][0]
input_f_size = ""
input_s_size = ""
#
idx_s_count = 0
idx_f_count = 0
for each_index in each_input[0][1]:
if tc_helper.tc_gen_helper_find_1d(l_external_idx, each_index) != -1:
if idx_f_count == 0:
input_f_size = "SIZE_IDX_" + each_index.capitalize()
else:
input_f_size = "SIZE_IDX_" + each_index.capitalize() + " * " + input_f_size
if idx_s_count == 0:
input_s_size = "SIZE_SLICE_" + str(kernel_number) + "_" + each_index.capitalize()
else:
input_s_size = "SIZE_SLICE_" + str(kernel_number) + "_" + each_index.capitalize() + " * " + input_s_size
idx_s_count = idx_s_count + 1
idx_f_count = idx_f_count + 1
else:
if idx_f_count == 0:
input_f_size = "SIZE_IDX_" + each_index.capitalize()
else:
input_f_size = "SIZE_IDX_" + each_index.capitalize() + " * " + input_f_size
idx_f_count = idx_f_count + 1
#
if opt_data_type == "DOUBLE":
tc_gen_code_helper_varible(f, "double*", d_input_name)
tc_gen_code_helper_varible(f, "double*", h_input_name)
else:
tc_gen_code_helper_varible(f, "float*", d_input_name)
tc_gen_code_helper_varible(f, "float*", h_input_name)
tc_gen_code_helper_varible(f, "int*", d_input_name + "_addr")
tc_gen_code_helper_varible(f, "int*", h_input_name + "_addr")
tc_gen_code_helper_varible(f, "int*", d_input_name + "_offset")
tc_gen_code_helper_varible(f, "int*", h_input_name + "_offset")
#
if opt_data_type == "DOUBLE":
l_t2_d_decl_var.append("double* " + d_input_name)
else:
l_t2_d_decl_var.append("float* " + d_input_name)
l_t2_d_decl_var.append("const int* __restrict__ " + d_input_name + "_addr")
l_t2_d_decl_var.append("const int* __restrict__ " + d_input_name + "_offset")
#
if opt_data_type == "DOUBLE":
l_cuda_malloc.append((d_input_name, "double", input_f_size))
else:
l_cuda_malloc.append((d_input_name, "float", input_f_size))
l_cuda_malloc.append((d_input_name + "_addr", "int", input_s_size + " * n_blks_" + str(kernel_number)))
l_cuda_malloc.append((d_input_name + "_offset", "int", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"))
#
if opt_data_type == "DOUBLE":
l_t2_parameters.append((d_input_name, "double", input_f_size))
else:
l_t2_parameters.append((d_input_name, "float", input_f_size))
l_t2_parameters.append((d_input_name + "_addr", "int", input_s_size + " * n_blks"))
l_t2_parameters.append((d_input_name + "_offset", "int", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"))
#
if opt_data_type == "DOUBLE":
l_device_dynamic.append(("double", d_input_name, h_input_name, input_f_size))
else:
l_device_dynamic.append(("float", d_input_name, h_input_name, input_f_size))
l_device_dynamic.append(("int", d_input_name + "_addr", h_input_name + "_addr", input_s_size + " * n_blks_" + str(kernel_number)))
l_device_dynamic.append(("int", d_input_name + "_offset", h_input_name + "_offset", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"))
#
#
#
if possible_diff == 1:
tc_gen_code_helper_varible(f, "int*", d_input_name + "_addr_full")
tc_gen_code_helper_varible(f, "int*", h_input_name + "_addr_full")
tc_gen_code_helper_varible(f, "int*", d_input_name + "_addr_non_full")
tc_gen_code_helper_varible(f, "int*", h_input_name + "_addr_non_full")
#
if opt_data_type == "DOUBLE":
l_t2_parameters_nf.append((d_input_name, "double", input_f_size))
else:
l_t2_parameters_nf.append((d_input_name, "float", input_f_size))
l_t2_parameters_nf.append((d_input_name + "_addr_non_full", "int", input_s_size + " * num_blk_non_full_" + str(kernel_number)))
l_t2_parameters_nf.append((d_input_name + "_offset", "int", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"))
#
if opt_data_type == "DOUBLE":
l_t2_parameters_f.append((d_input_name, "double", input_f_size))
else:
l_t2_parameters_f.append((d_input_name, "float", input_f_size))
l_t2_parameters_f.append((d_input_name + "_addr_full", "int", input_s_size + " * num_blk_full_" + str(kernel_number)))
l_t2_parameters_f.append((d_input_name + "_offset", "int", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"))
l_device_dynamic.append(("int", d_input_name + "_addr_full", h_input_name + "_addr_full", input_s_size + " * num_blk_full_" + str(kernel_number)))
l_device_dynamic.append(("int", d_input_name + "_addr_non_full", h_input_name + "_addr_non_full", input_s_size + " * num_blk_non_full_" + str(kernel_number)))
l_cuda_malloc.append((d_input_name + "_addr_full", "int", input_s_size + " * num_blk_full_" + str(kernel_number)))
l_cuda_malloc.append((d_input_name + "_addr_non_full", "int", input_s_size + " * num_blk_non_full_" + str(kernel_number)))
f.write("\n")
#
def tc_gen_variables_input_right(kernel_number, each_input, l_external_idx, l_v2_d_decl_var, l_v2_parameters, l_cuda_malloc, l_device_dynamic, l_var_input_right, opt_data_type):
# Right Input
d_input_name = "dev_" + each_input[1][0]
h_input_name = "host_" + each_input[1][0]
input_f_size = ""
input_s_size = ""
#
idx_f_count = 0
idx_s_count = 0
for each_index in each_input[1][1]:
if tc_helper.tc_gen_helper_find_1d(l_external_idx, each_index) != -1:
if idx_f_count == 0:
input_f_size = "size_" + each_index
else:
input_f_size = "size_" + each_index + " * " + input_f_size
if idx_s_count == 0:
input_s_size = "SIZE_SLICE_" + str(kernel_number) + "_" + each_index.capitalize()
else:
input_s_size = "SIZE_SLICE_" + str(kernel_number) + "_" + each_index.capitalize() + " * " + input_s_size
idx_f_count = idx_f_count + 1
idx_s_count = idx_s_count + 1
else:
if idx_f_count == 0:
input_f_size = "size_" + each_index
else:
input_f_size = "size_" + each_index + " * " + input_f_size
idx_f_count = idx_f_count + 1
#
if opt_data_type == "DOUBLE":
l_var_input_right.append(["double*", d_input_name])
else:
l_var_input_right.append(["float*", d_input_name])
l_var_input_right.append(["int*", d_input_name + "_addr"])
l_var_input_right.append(["int*", h_input_name + "_addr"])
l_var_input_right.append(["int*", d_input_name + "_offset"])
l_var_input_right.append(["int*", h_input_name + "_offset"])
#
if opt_data_type == "DOUBLE":
l_v2_d_decl_var.append("double* " + d_input_name)
else:
l_v2_d_decl_var.append("float* " + d_input_name)
l_v2_d_decl_var.append("const int* __restrict__ " + d_input_name + "_addr")
l_v2_d_decl_var.append("const int* __restrict__ " + d_input_name + "_offset")
#
if opt_data_type == "DOUBLE":
l_cuda_malloc.append([d_input_name, "double", input_f_size])
else:
l_cuda_malloc.append([d_input_name, "float", input_f_size])
l_cuda_malloc.append([d_input_name + "_addr", "int", input_s_size + " * num_thread_blocks_kernel_" + str(kernel_number)])
l_cuda_malloc.append([d_input_name + "_offset", "int", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"])
#
if opt_data_type == "DOUBLE":
l_v2_parameters.append([d_input_name, "double", input_f_size])
else:
l_v2_parameters.append([d_input_name, "float", input_f_size])
l_v2_parameters.append([d_input_name + "_addr", "int", input_s_size + " * num_thread_blocks_kernel_" + str(kernel_number)])
l_v2_parameters.append([d_input_name + "_offset", "int", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"])
#
if opt_data_type == "DOUBLE":
l_device_dynamic.append(["double", d_input_name, h_input_name, input_f_size])
else:
l_device_dynamic.append(["float", d_input_name, h_input_name, input_f_size])
l_device_dynamic.append(["int", d_input_name + "_addr", h_input_name + "_addr", input_s_size + " * num_thread_blocks_kernel_" + str(kernel_number)])
l_device_dynamic.append(["int", d_input_name + "_offset", h_input_name + "_offset", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"])
#
def tc_gen_global_variables_outputs_input_right(f, each_input, l_external_idx, possible_diff,
l_v2_d_decl_var, l_v2_parameters, l_v2_parameters_nf, l_v2_parameters_f,
l_cuda_malloc, l_device_dynamic, kernel_number,
opt_data_type):
f.write("// Global Variables for Right Input\n")
# Right Input
d_input_name = "d_" + each_input[1][0]
h_input_name = "h_" + each_input[1][0]
input_f_size = ""
input_s_size = ""
#
idx_f_count = 0
idx_s_count = 0
for each_index in each_input[1][1]:
if tc_helper.tc_gen_helper_find_1d(l_external_idx, each_index) != -1:
if idx_f_count == 0:
input_f_size = "SIZE_IDX_" + each_index.capitalize()
else:
input_f_size = "SIZE_IDX_" + each_index.capitalize() + " * " + input_f_size
if idx_s_count == 0:
input_s_size = "SIZE_SLICE_" + str(kernel_number) + "_" + each_index.capitalize()
else:
input_s_size = "SIZE_SLICE_" + str(kernel_number) + "_" + each_index.capitalize() + " * " + input_s_size
idx_f_count = idx_f_count + 1
idx_s_count = idx_s_count + 1
else:
if idx_f_count == 0:
input_f_size = "SIZE_IDX_" + each_index.capitalize()
else:
input_f_size = "SIZE_IDX_" + each_index.capitalize() + " * " + input_f_size
idx_f_count = idx_f_count + 1
#
if opt_data_type == "DOUBLE":
tc_gen_code_helper_varible(f, "double*", d_input_name)
tc_gen_code_helper_varible(f, "double*", h_input_name)
else:
tc_gen_code_helper_varible(f, "float*", d_input_name)
tc_gen_code_helper_varible(f, "float*", h_input_name)
tc_gen_code_helper_varible(f, "int*", d_input_name + "_addr")
tc_gen_code_helper_varible(f, "int*", h_input_name + "_addr")
tc_gen_code_helper_varible(f, "int*", d_input_name + "_offset")
tc_gen_code_helper_varible(f, "int*", h_input_name + "_offset")
#
if opt_data_type == "DOUBLE":
l_v2_d_decl_var.append("double* " + d_input_name)
else:
l_v2_d_decl_var.append("float* " + d_input_name)
l_v2_d_decl_var.append("const int* __restrict__ " + d_input_name + "_addr")
l_v2_d_decl_var.append("const int* __restrict__ " + d_input_name + "_offset")
#
if opt_data_type == "DOUBLE":
l_v2_parameters.append((d_input_name, "double", input_f_size))
else:
l_v2_parameters.append((d_input_name, "float", input_f_size))
l_v2_parameters.append((d_input_name + "_addr", "int", input_s_size + " * n_blks_" + str(kernel_number)))
l_v2_parameters.append((d_input_name + "_offset", "int", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"))
#
if opt_data_type == "DOUBLE":
l_cuda_malloc.append((d_input_name, "double", input_f_size))
else:
l_cuda_malloc.append((d_input_name, "float", input_f_size))
l_cuda_malloc.append((d_input_name + "_addr", "int", input_s_size + " * n_blks_" + str(kernel_number)))
l_cuda_malloc.append((d_input_name + "_offset", "int", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"))
#
if opt_data_type == "DOUBLE":
l_device_dynamic.append(("double", d_input_name, h_input_name, input_f_size))
else:
l_device_dynamic.append(("float", d_input_name, h_input_name, input_f_size))
l_device_dynamic.append(("int", d_input_name + "_addr", h_input_name + "_addr", input_s_size + " * n_blks_" + str(kernel_number)))
l_device_dynamic.append(("int", d_input_name + "_offset", h_input_name + "_offset", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"))
#
#
#
if possible_diff == 1:
tc_gen_code_helper_varible(f, "int*", d_input_name + "_addr_full")
tc_gen_code_helper_varible(f, "int*", h_input_name + "_addr_full")
tc_gen_code_helper_varible(f, "int*", d_input_name + "_addr_non_full")
tc_gen_code_helper_varible(f, "int*", h_input_name + "_addr_non_full")
#
if opt_data_type == "DOUBLE":
l_v2_parameters_nf.append((d_input_name, "double", input_f_size))
else:
l_v2_parameters_nf.append((d_input_name, "float", input_f_size))
l_v2_parameters_nf.append((d_input_name + "_addr_non_full", "int", input_s_size + " * num_blk_non_full_" + str(kernel_number)))
l_v2_parameters_nf.append((d_input_name + "_offset", "int", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"))
#
if opt_data_type == "DOUBLE":
l_v2_parameters_f.append((d_input_name, "double", input_f_size))
else:
l_v2_parameters_f.append((d_input_name, "float", input_f_size))
l_v2_parameters_f.append((d_input_name + "_addr_full", "int", input_s_size + " * num_blk_full"))
l_v2_parameters_f.append((d_input_name + "_offset", "int", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"))
l_device_dynamic.append(("int", d_input_name + "_addr_full", h_input_name + "_addr_full", input_s_size + " * num_blk_full_" + str(kernel_number)))
l_device_dynamic.append(("int", d_input_name + "_addr_non_full", h_input_name + "_addr_non_full", input_s_size + " * num_blk_non_full_" + str(kernel_number)))
l_cuda_malloc.append((d_input_name + "_addr_full", "int", input_s_size + " * num_blk_full_" + str(kernel_number)))
l_cuda_malloc.append((d_input_name + "_addr_non_full", "int", input_s_size + " * num_blk_non_full_" + str(kernel_number)))
f.write("\n")
#
def tc_gen_variables_outputs_helpers(kernel_number, l_t3_d_decl_var, l_t3_parameters, l_cuda_malloc, l_device_dynamic, l_var_outputs_helpers):
#
#
#
# 1. Block Index
#l_var_outputs_helpers.append(["int*", "dev_t3_block_index_" + str(kernel_number)])
l_var_outputs_helpers.append(["int*", "host_t3_block_index_" + str(kernel_number)])
# 2. Block Range
str_name = "t3_block_range_" + str(kernel_number)
str_size = "num_thread_blocks_kernel_" + str(kernel_number) + " * NUM_INDEX"
l_var_outputs_helpers.append(["int*", "dev_" + str_name])
l_var_outputs_helpers.append(["int*", "host_" + str_name])
l_t3_d_decl_var.append( "const int* __restrict__ dev_" + str_name)
l_t3_parameters.append( ["dev_" + str_name, "int", str_size])
l_cuda_malloc.append( ["dev_" + str_name, "int", str_size])
l_device_dynamic.append(["int", "dev_" + str_name, "host_" + str_name, str_size])
# 3. Output-Base
str_name = "t3_output_base_" + str(kernel_number)
str_size = "num_thread_blocks_kernel_" + str(kernel_number)
l_var_outputs_helpers.append(["int*", "dev_" + str_name])
l_var_outputs_helpers.append(["int*", "host_" + str_name])
l_t3_d_decl_var.append( "const int* __restrict__ dev_" + str_name)
l_t3_parameters.append( ["dev_" + str_name, "int", str_size])
l_cuda_malloc.append( ["dev_" + str_name, "int", str_size])
l_device_dynamic.append(["int", "dev_" + str_name, "host_" + str_name, str_size])
# 4. Output-Offset
str_name = "t3_output_offset_" + str(kernel_number)
str_size = "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"
l_var_outputs_helpers.append(["int*", "dev_t3_output_offset_" + str(kernel_number)])
l_var_outputs_helpers.append(["int*", "host_t3_output_offset_" + str(kernel_number)])
l_t3_d_decl_var.append( "const int* __restrict__ dev_" + str_name)
l_t3_parameters.append( ["dev_" + str_name, "int", str_size])
l_cuda_malloc.append( ["dev_" + str_name, "int", str_size])
l_device_dynamic.append(["int", "dev_" + str_name, "host_" + str_name, str_size])
#
def tc_gen_global_variables_outputs_helpers(f, possible_diff, kernel_number, # Input
l_t3_parameters, l_t3_parameters_nf, l_t3_parameters_f, # Outputs
l_cuda_malloc, l_device_dynamic, l_t3_d_decl_var): # Outputs
#
# Depends on # of Fused Kernel.
#
if possible_diff == 1:
tc_gen_code_helper_varible(f, "int*", "t3_blk_idx_full_" + str(kernel_number))
tc_gen_code_helper_varible(f, "int*", "t3_blk_idx_non_full_" + str(kernel_number))
f.write("\n")
tc_gen_code_helper_varible(f, "int*", "d_t3_blk_rng_" + str(kernel_number))
tc_gen_code_helper_varible(f, "int*", "h_t3_blk_idx_" + str(kernel_number)) # only for host
tc_gen_code_helper_varible(f, "int*", "h_t3_blk_rng_" + str(kernel_number))
if possible_diff == 1:
tc_gen_code_helper_varible(f, "int*", "d_t3_blk_rng_nf_" + str(kernel_number))
tc_gen_code_helper_varible(f, "int*", "h_t3_blk_rng_nf_" + str(kernel_number))
l_t3_parameters_nf.append(( "d_t3_blk_rng_nf_" + str(kernel_number), "int", "num_blk_non_full_" + str(kernel_number) + " * NUM_INDEX"))
l_t3_parameters_f.append(( "d_t3_blk_rng_" + str(kernel_number), "int", "n_blks_" + str(kernel_number) + " * NUM_INDEX"))
l_cuda_malloc.append(( "d_t3_blk_rng_nf_" + str(kernel_number), "int", "num_blk_non_full_" + str(kernel_number) + " * NUM_INDEX"))
l_device_dynamic.append(("int", "d_t3_blk_rng_nf_" + str(kernel_number), "h_t3_blk_rng_nf_" + str(kernel_number), "num_blk_non_full_" + str(kernel_number) + " * NUM_INDEX"))
l_t3_d_decl_var.append("const int* __restrict__ t3_blk_rng_" + str(kernel_number))
l_t3_parameters.append( ("d_t3_blk_rng_" + str(kernel_number), "int", "n_blks_" + str(kernel_number) + " * NUM_INDEX"))
l_cuda_malloc.append( ("d_t3_blk_rng_" + str(kernel_number), "int", "n_blks_" + str(kernel_number) + " * NUM_INDEX"))
l_device_dynamic.append(("int", "d_t3_blk_rng_" + str(kernel_number), "h_t3_blk_rng_" + str(kernel_number), "n_blks_" + str(kernel_number) + " * NUM_INDEX"))
f.write("\n")
tc_gen_code_helper_varible(f, "int*", "d_t3_output_base_" + str(kernel_number))
tc_gen_code_helper_varible(f, "int*", "h_t3_output_base_" + str(kernel_number))
l_t3_d_decl_var.append("const int* __restrict__ t3_output_base_" + str(kernel_number))
l_t3_parameters.append( ("d_t3_output_base_" + str(kernel_number), "int", "n_blks_" + str(kernel_number)))
l_cuda_malloc.append( ("d_t3_output_base_" + str(kernel_number), "int", "n_blks_" + str(kernel_number)))
l_device_dynamic.append(("int", "d_t3_output_base_" + str(kernel_number), "h_t3_output_base_" + str(kernel_number), "n_blks_" + str(kernel_number)))
if possible_diff == 1:
tc_gen_code_helper_varible(f, "int*", "d_t3_output_base_full_" + str(kernel_number))
tc_gen_code_helper_varible(f, "int*", "d_t3_output_base_non_full_" + str(kernel_number))
tc_gen_code_helper_varible(f, "int*", "h_t3_output_base_full_" + str(kernel_number))
tc_gen_code_helper_varible(f, "int*", "h_t3_output_base_non_full_" + str(kernel_number))
l_t3_parameters_f.append( ("d_t3_output_base_full_" + str(kernel_number), "int", "num_blk_full_" + str(kernel_number)))
l_t3_parameters_nf.append( ("d_t3_output_base_non_full_" + str(kernel_number), "int", "num_blk_non_full_" + str(kernel_number)))
l_device_dynamic.append(("int", "d_t3_output_base_full_" + str(kernel_number), "h_t3_output_base_full_" + str(kernel_number), "num_blk_full_" + str(kernel_number)))
l_device_dynamic.append(("int", "d_t3_output_base_non_full_" + str(kernel_number), "h_t3_output_base_non_full_" + str(kernel_number), "num_blk_non_full_" + str(kernel_number)))
l_cuda_malloc.append(("d_t3_output_base_full_" + str(kernel_number), "int", "num_blk_full_" + str(kernel_number)))
l_cuda_malloc.append(("d_t3_output_base_non_full_" + str(kernel_number), "int", "num_blk_non_full_" + str(kernel_number)))
f.write("\n")
tc_gen_code_helper_varible(f, "int*", "d_t3_output_offset_" + str(kernel_number))
tc_gen_code_helper_varible(f, "int*", "h_t3_output_offset_" + str(kernel_number))
l_t3_d_decl_var.append("const int* __restrict__ t3_output_offset_" + str(kernel_number))
l_cuda_malloc.append( ("d_t3_output_offset_" + str(kernel_number), "int", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"))
l_t3_parameters.append( ("d_t3_output_offset_" + str(kernel_number), "int", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"))
l_device_dynamic.append(("int", "d_t3_output_offset_" + str(kernel_number), "h_t3_output_offset_" + str(kernel_number), "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"))
f.write("\n") # this should be stored in a table to be used in future.
if possible_diff == 1:
l_t3_parameters_nf.append( ("d_t3_output_offset_" + str(kernel_number), "int", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"))
l_t3_parameters_f.append( ("d_t3_output_offset_" + str(kernel_number), "int", "SIZE_TB_" + str(kernel_number) + "_X * SIZE_TB_" + str(kernel_number) + "_Y"))
#
def tc_gen_global_variables_outputs(f, possible_diff, kernel_number, # Input
l_t3_d_decl_var, # output
l_t3_parameters, l_t3_parameters_nf, l_t3_parameters_f, # output
l_cuda_malloc, l_device_dynamic, # output
opt_data_type):
#
# To-Do: Should Support Multiple Outputs
# Depends on # of Fused Kernel.
#
if kernel_number == 1:
# To-Do (Oct. 12)
#if possible_diff == 1:
# tc_gen_code_helper_varible(f, "int*", "t3_blk_idx_full_" + str(kernel_number))
# tc_gen_code_helper_varible(f, "int*", "t3_blk_idx_non_full_" + str(kernel_number))
#. Common for a single inner-group
f.write("\n")
f.write("// Depends on # of Fused Kernels\n")
if opt_data_type == "DOUBLE":
tc_gen_code_helper_varible(f, "double*", "d_t3") # this will be used for pre_SD2_Functions().
tc_gen_code_helper_varible(f, "double*", "h_t3") # this will be used for pre_SD2_Functions().
tc_gen_code_helper_varible(f, "double*", "h_t3_chk") # this will be used for pre_SD2_Functions().
else:
tc_gen_code_helper_varible(f, "float*", "d_t3") # this will be used for pre_SD2_Functions().
tc_gen_code_helper_varible(f, "float*", "h_t3") # this will be used for pre_SD2_Functions().
tc_gen_code_helper_varible(f, "float*", "h_t3_chk") # this will be used for pre_SD2_Functions().
#
if opt_data_type == "DOUBLE":
l_cuda_malloc.append(("d_t3", "double", "size_T3"))
l_device_dynamic.append(("double", "d_t3", "h_t3", "size_T3"))
else:
l_cuda_malloc.append(("d_t3", "float", "size_T3"))
l_device_dynamic.append(("float", "d_t3", "h_t3", "size_T3"))
#
if possible_diff == 1:
if opt_data_type == "DOUBLE":
l_t3_parameters_nf.append(("d_t3", "double", "size_T3"))
l_t3_parameters_f.append(("d_t3", "double", "size_T3"))
else:
l_t3_parameters_nf.append(("d_t3", "float", "size_T3"))
l_t3_parameters_f.append(("d_t3", "float", "size_T3"))
else:
if opt_data_type == "DOUBLE":
l_t3_parameters.append(("d_t3", "double", "size_T3"))
else:
l_t3_parameters.append(("d_t3", "float", "size_T3"))
#
if opt_data_type == "DOUBLE":
l_t3_d_decl_var.append("double* t3")
else:
l_t3_d_decl_var.append("float* t3")
f.write("\n")
#
def tc_gen_variables_outputs(kernel_number, l_interface_info, l_external_idx,
l_t3_d_decl_var, l_t3_parameters, l_cuda_malloc, l_device_dynamic,
l_var_outputs, opt_data_type):
#
# Because the Output is COMMON among Inner Groups.
#
idx_count = 0
str_t3_size = ""
for each_idx in l_external_idx:
if idx_count == 0:
str_t3_size = "size_" + each_idx
else:
str_t3_size = str_t3_size + " * size_" + each_idx
idx_count = idx_count + 1
#
if kernel_number == 1:
if opt_data_type == "DOUBLE":
l_var_outputs.append(["double*", "dev_t3"])
else:
l_var_outputs.append(["float*", "dev_t3"])
#
if opt_data_type == "DOUBLE":
l_cuda_malloc.append(["dev_t3", "double", str_t3_size])
l_device_dynamic.append(["double", "dev_t3", l_interface_info[0][1], str_t3_size])
else:
l_cuda_malloc.append(["dev_t3", "float", str_t3_size])
l_device_dynamic.append(["float", "dev_t3", l_interface_info[0][1], str_t3_size])
#
if opt_data_type == "DOUBLE":
l_t3_parameters.append(["dev_t3", "double", str_t3_size])
l_t3_d_decl_var.append("double* dev_t3")
else:
l_t3_parameters.append(["dev_t3", "float", str_t3_size])
l_t3_d_decl_var.append("float* dev_t3")
#
# End of def.
#
#
def tc_gen_global_variables_sizes(f, l_input_tensors, possible_diff, kernel_number):
#
# To-Do: Inner-Groups
#
if possible_diff == 1:
tc_gen_code_helper_varible(f, "int", "n_blks_" + str(kernel_number))
tc_gen_code_helper_varible(f, "int", "num_blk_full_" + str(kernel_number))
tc_gen_code_helper_varible(f, "int", "num_blk_non_full_" + str(kernel_number))
else:
tc_gen_code_helper_varible(f, "int", "n_blks_" + str(kernel_number))
#
# To-Do: Inner-Groupse
# sizes for two tensor inputs
#
f.write("// Each Input Tensor Size\n")
for each_tc in l_input_tensors:
tc_gen_code_helper_varible(f, "int", "size_" + each_tc[0][0].capitalize())
tc_gen_code_helper_varible(f, "int", "size_" + each_tc[1][0].capitalize())
f.write("\n")
#
def tc_gen_code_helper_varible(f, type, name):
f.write(type)
f.write(" ")
f.write(name)
f.write(";\n")
| 50.010664
| 203
| 0.603876
| 5,773
| 42,209
| 3.849991
| 0.029447
| 0.093944
| 0.102583
| 0.043193
| 0.925763
| 0.912265
| 0.886484
| 0.855395
| 0.819941
| 0.779672
| 0
| 0.013684
| 0.278045
| 42,209
| 843
| 204
| 50.069988
| 0.715683
| 0.054301
| 0
| 0.60644
| 0
| 0
| 0.141429
| 0.01732
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026834
| false
| 0
| 0.001789
| 0
| 0.028623
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
23900874399adf250079e9988bc86d9770bf14db
| 173
|
py
|
Python
|
rllib/agents/ppo/__init__.py
|
qsays/ray
|
2fb53396ad1dc7a5b7bd0f6135b48c4f40c5adf6
|
[
"Apache-2.0"
] | 4
|
2019-10-18T17:44:58.000Z
|
2021-04-14T14:37:21.000Z
|
rllib/agents/ppo/__init__.py
|
Eric2Hamel/ray
|
bfaee49880611a65d16a4561c94c60851573b6f2
|
[
"Apache-2.0"
] | 1
|
2022-03-30T17:52:44.000Z
|
2022-03-30T17:52:44.000Z
|
rllib/agents/ppo/__init__.py
|
Eric2Hamel/ray
|
bfaee49880611a65d16a4561c94c60851573b6f2
|
[
"Apache-2.0"
] | 1
|
2020-06-26T07:54:25.000Z
|
2020-06-26T07:54:25.000Z
|
from ray.rllib.agents.ppo.ppo import PPOTrainer, DEFAULT_CONFIG
from ray.rllib.agents.ppo.appo import APPOTrainer
__all__ = ["APPOTrainer", "PPOTrainer", "DEFAULT_CONFIG"]
| 34.6
| 63
| 0.797688
| 23
| 173
| 5.73913
| 0.521739
| 0.106061
| 0.181818
| 0.272727
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086705
| 173
| 4
| 64
| 43.25
| 0.835443
| 0
| 0
| 0
| 0
| 0
| 0.202312
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
23bd98eeb8dd99cd6289ed1168d179f9f99b0192
| 40,439
|
py
|
Python
|
tests/test_selectors.py
|
PayneLab/covid19pandas
|
681722c1def6592c3f3801ec19fa9d8a171584c9
|
[
"Apache-2.0"
] | 7
|
2020-04-08T11:52:11.000Z
|
2021-02-25T21:14:28.000Z
|
tests/test_selectors.py
|
PayneLab/covid19pandas
|
681722c1def6592c3f3801ec19fa9d8a171584c9
|
[
"Apache-2.0"
] | 1
|
2020-04-01T17:04:41.000Z
|
2020-04-02T02:37:55.000Z
|
tests/test_selectors.py
|
PayneLab/covid19pandas
|
681722c1def6592c3f3801ec19fa9d8a171584c9
|
[
"Apache-2.0"
] | 3
|
2020-04-02T18:41:41.000Z
|
2020-11-19T06:27:02.000Z
|
# Copyright 2018 Samuel Payne sam_payne@byu.edu
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import covid19pandas as cod
import covid19pandas.exceptions as codex
from test_getters import _check_gotten
import pandas as pd
import numpy as np
import datetime
import pytest
import math
formats = ["wide", "long"]
jhu_data_types = ["all", "cases", "deaths", "recovered"]
jhu_regions = ["global", "us"]
nyt_data_types = ["all", "cases", "deaths"]
nyt_county_options = [True, False]
@pytest.mark.filterwarnings("ignore::covid19pandas.exceptions.FileNotUpdatedWarning")
class TestSelectors:
@classmethod
def setup_class(cls):
"""Ensures that all data tables have been recently downloaded, so we can skip the update in all our tests to improve speed."""
cod.get_data_jhu(data_type="all", region="global", update=True)
cod.get_data_jhu(data_type="all", region="us", update=True)
cod.get_data_nyt(data_type="all", counties=False, update=True)
cod.get_data_nyt(data_type="all", counties=True, update=True)
# -------------------------------------------------------------------------------------------------------------
# Tests for select_top_x_regions
# -------------------------------------------------------------------------------------------------------------
def test_select_top_x_jhu(self):
for format in formats:
for data_type in jhu_data_types:
for region in jhu_regions:
if (region == "us" and data_type == "recovered") or (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_jhu(format=format, data_type=data_type, region=region, update=False)
if data_type == "all":
compare_by_types = set(jhu_data_types)
compare_by_types.remove("all")
if region == "us":
compare_by_types.remove("recovered")
for compare_by_type in compare_by_types:
self._check_select_top_x(df, format, compare_by_type, num_regions=1) # Don't keep others
self._check_select_top_x(df, format, compare_by_type, num_regions=1, other_to_keep=[col for col in compare_by_types if col != compare_by_type]) # Keep others
self._check_select_top_x(df, format, compare_by_type, num_regions=2) # Don't keep others
self._check_select_top_x(df, format, compare_by_type, num_regions=2, other_to_keep=[col for col in compare_by_types if col != compare_by_type]) # Keep others
else:
self._check_select_top_x(df, format, data_type, num_regions=1)
self._check_select_top_x(df, format, data_type, num_regions=2)
def test_select_top_x_nyt(self):
for format in formats:
for data_type in nyt_data_types:
for county_option in nyt_county_options:
if (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_nyt(format=format, data_type=data_type, counties=county_option, update=False)
if data_type == "all":
compare_by_types = set(nyt_data_types)
compare_by_types.remove("all")
for compare_by_type in compare_by_types:
self._check_select_top_x(df, format, compare_by_type, num_regions=1) # Don't keep others
self._check_select_top_x(df, format, compare_by_type, num_regions=1, other_to_keep=[col for col in compare_by_types if col != compare_by_type]) # Keep others
# It only will work to do more than 1 grouping col if we're using the states and counties table, because the just states table only has one grouping col
if county_option:
self._check_select_top_x(df, format, compare_by_type, num_regions=2) # Don't keep others
self._check_select_top_x(df, format, compare_by_type, num_regions=2, other_to_keep=[col for col in compare_by_types if col != compare_by_type]) # Keep others
else:
self._check_select_top_x(df, format, data_type, num_regions=1)
# It only will work to do more than 1 grouping col if we're using the states and counties table, because the just states table only has one grouping col
if county_option:
self._check_select_top_x(df, format, data_type, num_regions=2)
# -------------------------------------------------------------------------------------------------------------
# Tests for select_regions
# -------------------------------------------------------------------------------------------------------------
def test_select_regions_jhu(self):
for format in formats:
for data_type in jhu_data_types:
for region in jhu_regions:
if (region == "us" and data_type == "recovered") or (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_jhu(format=format, data_type=data_type, region=region, update=False)
if data_type == "all":
cols_to_keep = {"cases", "deaths", "recovered"}
if region == "us":
cols_to_keep.remove("recovered")
cols_to_keep = sorted(cols_to_keep) # Convert it back to a list
else:
cols_to_keep = [data_type]
self._check_select_regions(df, format, cols_kept=cols_to_keep)
def test_select_regions_nyt(self):
for format in formats:
for data_type in nyt_data_types:
for county_option in nyt_county_options:
if (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_nyt(format=format, data_type=data_type, counties=county_option, update=False)
if data_type == "all":
cols_to_keep = ["cases", "deaths"]
else:
cols_to_keep = [data_type]
self._check_select_regions(df, format, cols_kept=cols_to_keep)
# -------------------------------------------------------------------------------------------------------------
# Tests for calc_x_day_rolling_mean
# -------------------------------------------------------------------------------------------------------------
def test_calc_x_day_rolling_mean_jhu(self):
for format in formats:
for data_type in jhu_data_types:
for region in jhu_regions:
if (region == "us" and data_type == "recovered") or (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_jhu(format=format, data_type=data_type, region=region, update=False)
if data_type == "all":
input_data_types = set(jhu_data_types)
input_data_types.remove("all")
if region == "us":
input_data_types.remove("recovered")
for input_data_type in input_data_types:
self._check_calc_x_day_rolling_mean(df, format, data_type=input_data_type, other_input_data_types=[col for col in input_data_types if col != input_data_type])
# Note that we still also perform this test if data_type == "all" because we can also calculate the x day mean for all columns.
self._check_calc_x_day_rolling_mean(df, format, data_type)
def test_calc_x_day_rolling_mean_nyt(self):
for format in formats:
for data_type in nyt_data_types:
for county_option in nyt_county_options:
if (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_nyt(format=format, data_type=data_type, counties=county_option, update=False)
if data_type == "all":
input_data_types = set(nyt_data_types)
input_data_types.remove("all")
for input_data_type in input_data_types:
self._check_calc_x_day_rolling_mean(df, format, data_type=input_data_type, other_input_data_types=[col for col in input_data_types if col != input_data_type])
# Note that we still also perform this test if data_type == "all" because we can also calculate the x day mean for all columns.
self._check_calc_x_day_rolling_mean(df, format, data_type)
# -------------------------------------------------------------------------------------------------------------
# Tests for calc_daily_change
# -------------------------------------------------------------------------------------------------------------
def test_calc_daily_change_jhu(self):
for format in formats:
for data_type in jhu_data_types:
for region in jhu_regions:
if (region == "us" and data_type == "recovered") or (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_jhu(format=format, data_type=data_type, region=region, update=False)
if data_type == "all":
input_data_types = set(jhu_data_types)
input_data_types.remove("all")
if region == "us":
input_data_types.remove("recovered")
for input_data_type in input_data_types:
self._check_daily_change(df, format=format, data_type=input_data_type, other_data_types=[col for col in input_data_types if col != input_data_type])
# Note that we still also perform this test if data_type == "all" because we can also calculate daily change for all columns.
self._check_daily_change(df, format=format, data_type=data_type)
def test_calc_daily_change_long_nyt(self):
for format in formats:
for data_type in nyt_data_types:
for county_option in nyt_county_options:
if format == "wide" and data_type == "all":
pass # Invalid table parameter combination
else:
df = cod.get_data_nyt(format=format, data_type=data_type, counties=county_option, update=False)
if data_type == "all":
input_data_types = set(nyt_data_types)
input_data_types.remove("all")
for input_data_type in input_data_types:
self._check_daily_change(df, format=format, data_type=input_data_type, other_data_types=[col for col in input_data_types if col != input_data_type])
# Note that we still also perform this test if data_type == "all" because we can also calculate daily change for all columns.
self._check_daily_change(df, format=format, data_type=data_type)
# -------------------------------------------------------------------------------------------------------------
# Tests for calc_days_since_min_count
# -------------------------------------------------------------------------------------------------------------
def test_calc_days_since_min_count_jhu(self):
for format in formats:
for data_type in jhu_data_types:
for region in jhu_regions:
if (region == "us" and data_type == "recovered") or (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_jhu(format=format, data_type=data_type, region=region, update=False)
if data_type == "all":
count_by_types = set(jhu_data_types)
count_by_types.remove("all")
if region == "us":
count_by_types.remove("recovered")
for count_by_type in count_by_types:
self._check_days_since(df, format, count_by_type)
else:
self._check_days_since(df, format, data_type)
def test_calc_days_since_min_count_nyt(self):
for format in formats:
for data_type in nyt_data_types:
for county_option in nyt_county_options:
if (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_nyt(format=format, data_type=data_type, counties=county_option, update=False)
if data_type == "all":
for count_by_type in [type for type in nyt_data_types if type != "all"]:
self._check_days_since(df, format, count_by_type)
else:
self._check_days_since(df, format, data_type)
# -------------------------------------------------------------------------------------------------------------
# Helper methods
# -------------------------------------------------------------------------------------------------------------
@staticmethod
def _check_select_top_x(df, format, data_type, num_regions, other_to_keep=[]):
if num_regions == 1:
# Search for defined region cols (based on data source)
if {"Province/State", "Country/Region"}.issubset(df.columns): # JHU global table
region_col = "Country/Region"
exclude = ["US", "China"]
elif {"Combined_Key"}.issubset(df.columns): # JHU USA table
region_col = "Province_State"
exclude = ["New York", "Illinois"]
elif {"state"}.issubset(df.columns): # NYT USA state only or states and counties table.
region_col = "state"
exclude = ["Washington", "Illinois"]
else:
raise ParameterError("The dataframe you passed does not contain any of the standard location grouping columns. Must contain one of these sets of columns: \n\n{'Province/State', 'Country/Region'}\n{'Combined_Key'}\n{'county', 'state'}\n{'state'}\n\n" + f"Your dataframe's columns are:\n{df.columns}")
if format == "wide":
group_cols = [region_col]
else: # format == "long"
group_cols = ["date", region_col]
num_top = 10
# Call the function
outs = {
"top_others_kept": cod.select_top_x_regions(df, region_cols=region_col, data_col=data_type, x=num_top, combine_subregions=True, other_data_cols=other_to_keep),
"top_uncombined": cod.select_top_x_regions(df, region_cols=region_col, data_col=data_type, x=num_top, combine_subregions=False, other_data_cols=other_to_keep),
"top_with_exclusions": cod.select_top_x_regions(df, region_cols=region_col, data_col=data_type, x=num_top, combine_subregions=True, other_data_cols=other_to_keep, exclude=exclude),
}
# Run basic table checks
for name, out in outs.items():
if name == "top_uncombined" and {"Admin2"}.issubset(df.columns):
_check_gotten(out, format, group_cols=group_cols + ["Admin2"]) # If it's the JHU U.S. table, we need to add "Admin2" as a group col, but only for the uncombined table.
elif name == "top_uncombined" and {"Province/State"}.issubset(df.columns):
_check_gotten(out, format, group_cols=group_cols + ["Province/State"]) # If it's the JHU global table, we need to add "Province/State" as a group col, but only for the uncombined table.
elif name == "top_uncombined" and {"county"}.issubset(df.columns):
_check_gotten(out, format, group_cols=group_cols + ["county"]) # If it's the NYT county table, we need to add "county" as a group col, but only for the uncombined table.
else:
_check_gotten(out, format, group_cols=group_cols)
# Make sure that the data values weren't changed, if we didn't aggregate
if format == "wide":
for name, out in outs.items():
df_dates = df.columns[df.columns.map(lambda col: issubclass(type(col), datetime.date))]
out_dates = out.columns[out.columns.map(lambda col: issubclass(type(col), datetime.date))]
assert df_dates.equals(out_dates)
if name == "top_uncombined":
for date in df_dates:
for region in out[region_col].unique():
assert out.loc[out[region_col] == region, date].equals(df.loc[df[region_col] == region, date])
else:
for name, out in outs.items():
assert data_type in out.columns
if name == "top_uncombined":
for region in out[region_col].unique():
assert out.loc[out[region_col] == region, data_type].equals(df.loc[df[region_col] == region, data_type])
# If we had other cols to keep, make sure they were kept, and are equal to their original values.
for keep in other_to_keep:
for name, out in outs.items():
assert keep in out.columns
if name == "top_uncombined":
for region in out[region_col].unique():
assert out.loc[out[region_col] == region, keep].equals(df.loc[df[region_col] == region, keep])
# Check that the excluded countries aren't in the list
assert not outs["top_with_exclusions"][region_col].isin(exclude).any()
# Check that length of combined table is x * len(unique(dates))
if format == "wide":
for name, out in outs.items():
if name != "top_uncombined":
assert out.shape[0] == num_top
assert out.shape[0] == out[region_col].unique().size
else:
for name, out in outs.items():
if name == "top_uncombined":
assert out.shape[0] == df[region_col].isin(out[region_col]).sum()
else:
assert out.shape[0] <= num_top * out["date"].unique().size # We check <= because some of the regions may not have counts for all days at the beginning
assert num_top == out[region_col].unique().size
elif num_regions == 2:
# Search for defined region cols (based on data source)
if {"Province/State", "Country/Region"}.issubset(df.columns): # JHU global table
region_cols = ["Country/Region", "Province/State"]
exclude = ["US", "China"]
elif {"Combined_Key"}.issubset(df.columns): # JHU USA table
region_cols = ["Province_State", "Admin2"]
exclude = ["New York", "Illinois"]
elif {"county", "state"}.issubset(df.columns): # NYT USA state and county table
region_cols = ["county", "state"]
exclude = ["Washington", "Illinois"]
elif {"state"}.issubset(df.columns): # NYT USA state only table. Note that this column also exists in the state/county table, so we do the check after we've determined it's not that table.
raise Exception("Can't do more than one region col for NYT states only table.")
else:
raise ParameterError("The dataframe you passed does not contain any of the standard location grouping columns. Must contain one of these sets of columns: \n\n{'Province/State', 'Country/Region'}\n{'Combined_Key'}\n{'county', 'state'}\n{'state'}\n\n" + f"Your dataframe's columns are:\n{df.columns}")
num_top = 10
# Call the function
outs = {
"top_others_kept": cod.select_top_x_regions(df, region_cols=region_cols, data_col=data_type, x=num_top, combine_subregions=True, other_data_cols=other_to_keep),
"top_uncombined": cod.select_top_x_regions(df, region_cols=region_cols, data_col=data_type, x=num_top, combine_subregions=False, other_data_cols=other_to_keep),
"top_with_exclusions": cod.select_top_x_regions(df, region_cols=region_cols, data_col=data_type, x=num_top, combine_subregions=True, other_data_cols=other_to_keep, exclude=exclude),
}
# Run basic table checks
if format == "wide":
group_cols = region_cols
else: # format == "long"
group_cols = ["date"] + region_cols
for name, out in outs.items():
_check_gotten(out, format, group_cols=group_cols)
# For these tests, we need any NaNs in the region cols to be filled with strings so they can compare equal.
for name in outs.keys():
out = outs[name]
for region_col in region_cols:
out[region_col] = out[region_col].fillna("n/a")
outs[name] = out
for region_col in region_cols:
df[region_col] = df[region_col].fillna("n/a")
# Make sure that the data values weren't changed, if we didn't aggregate
if format == "wide":
for name, out in outs.items():
df_dates = df.columns[df.columns.map(lambda col: issubclass(type(col), datetime.date))]
out_dates = out.columns[out.columns.map(lambda col: issubclass(type(col), datetime.date))]
assert df_dates.equals(out_dates)
if name == "top_uncombined":
for date in df_dates:
region_combos = out[region_cols].drop_duplicates(keep="first")
rcol1 = region_cols[0]
rcol2 = region_cols[1]
for i in range(0, region_combos.index.size):
rval1 = region_combos.iloc[i, 0]
rval2 = region_combos.iloc[i, 1]
assert out.loc[(out[rcol1] == rval1) & (out[rcol2] == rval2), date].equals(df.loc[(df[rcol1] == rval1) & (df[rcol2] == rval2), date])
else:
for name, out in outs.items():
assert data_type in out.columns
if name == "top_uncombined":
region_combos = out[region_cols].drop_duplicates(keep="first")
rcol1 = region_cols[0]
rcol2 = region_cols[1]
for i in range(0, region_combos.index.size):
rval1 = region_combos.iloc[i, 0]
rval2 = region_combos.iloc[i, 1]
assert out.loc[(out[rcol1] == rval1) & (out[rcol2] == rval2), data_type].equals(df.loc[(df[rcol1] == rval1) & (df[rcol2] == rval2), data_type])
# If we had other cols to keep, make sure they were kept, and are equal to their original values.
for keep in other_to_keep:
for name, out in outs.items():
assert keep in out.columns
if name == "top_uncombined":
region_combos = out[region_cols].drop_duplicates(keep="first")
rcol1 = region_cols[0]
rcol2 = region_cols[1]
for i in range(0, region_combos.index.size):
rval1 = region_combos.iloc[i, 0]
rval2 = region_combos.iloc[i, 1]
assert out.loc[(out[rcol1] == rval1) & (out[rcol2] == rval2), keep].equals(df.loc[(df[rcol1] == rval1) & (df[rcol2] == rval2), keep])
# Check that the excluded countries aren't in the list
for region_col in region_cols:
assert not outs["top_with_exclusions"][region_col].isin(exclude).any()
# Check that length of combined table is x * len(unique(dates))
if format == "wide":
for name, out in outs.items():
if name != "top_uncombined":
assert out.shape[0] == num_top
assert out.shape[0] == out[region_cols].drop_duplicates(keep="first").index.size
else:
for name, out in outs.items():
if name == "top_uncombined":
rcol1 = region_cols[0]
rcol2 = region_cols[1]
assert out.shape[0] == df[region_cols].isin({rcol1: out[rcol1], rcol2: out[rcol2]}).all(axis="columns").sum()
else:
assert out.shape[0] <= num_top * out["date"].unique().size # We check <= because some of the regions may not have counts for all days at the beginning
assert num_top == out[region_cols].drop_duplicates(keep="first").index.size
else:
raise Exception("Test doesn't support that number of regions. Options are 1 or 2.")
@staticmethod
def _check_select_regions(df, format, cols_kept):
# Search for defined region cols (based on data source)
if {"Province/State", "Country/Region"}.issubset(df.columns): # JHU global table
region_col = "Country/Region"
regions = ["US", "China", "Turkey"]
elif {"Combined_Key"}.issubset(df.columns): # JHU USA table
region_col = "Province_State"
regions = ["Washington", "New York", "Arizona"]
elif {"state"}.issubset(df.columns): # NYT USA state only or states and counties table.
region_col = "state"
regions = ["Washington", "New York", "Arizona"]
else:
raise ParameterError("The dataframe you passed does not contain any of the standard location grouping columns. Must contain one of these sets of columns: \n\n{'Province/State', 'Country/Region'}\n{'Combined_Key'}\n{'county', 'state'}\n{'state'}\n\n" + f"Your dataframe's columns are:\n{df.columns}")
# Call the function
dfs = {
"selected": cod.select_regions(df, region_col=region_col, regions=regions, combine_subregions=True, data_cols=cols_kept),
"selected_uncombined": cod.select_regions(df, region_col=region_col, regions=regions, combine_subregions=False, data_cols=cols_kept),
}
# Run basic table checks
for name, out in dfs.items():
if name == "selected":
if format == "long":
_check_gotten(out, format, group_cols=["date", region_col])
else:
_check_gotten(out, format, group_cols=[region_col])
else: # name == "selected_uncombined"
_check_gotten(out, format)
# Make sure that only the regions we specified exist in the region col
for out in dfs.values():
assert out[region_col].isin(regions).all()
# Make sure cols_kept were kept
for name, out in dfs.items():
if format == "wide":
df_dates = df.columns[df.columns.map(lambda col: issubclass(type(col), datetime.date))]
out_dates = out.columns[out.columns.map(lambda col: issubclass(type(col), datetime.date))]
assert df_dates.equals(out_dates)
if name == "selected_uncombined":
for date in df_dates:
assert out[date].equals(df.loc[df[region_col].isin(regions), date])
else: # format == "long"
for col in cols_kept:
assert col in out.columns
if name == "selected_uncombined":
assert out[col].equals(df.loc[df[region_col].isin(regions), col])
@staticmethod
def _check_calc_x_day_rolling_mean(df, format, data_type, other_input_data_types=[]):
# Process the data_type parameter
if data_type == "all":
data_types = ["cases", "deaths"]
if "recovered" in df.columns:
data_types.append("recovered")
elif data_type in ["cases", "deaths", "recovered"]:
data_types = [data_type]
else:
raise ParameterError(f"{data_type} is not a valid data type. Pass 'cases', 'deaths', 'recovered', or 'all'.")
# Decide on our region_cols
# Search for defined region cols (based on data source)
if {"Province/State", "Country/Region"}.issubset(df.columns): # JHU global table
region_cols = ["Country/Region", "Province/State"]
elif {"Combined_Key"}.issubset(df.columns): # JHU USA table
region_cols = ["Province_State", "Admin2"]
elif {"county", "state"}.issubset(df.columns): # NYT USA state and county table
region_cols = ["county", "state"]
elif {"state"}.issubset(df.columns): # NYT USA state only table. Note that this column also exists in the state/county table, so we do the check after we've determined it's not that table.
region_cols = ["state"]
else:
raise ParameterError("The dataframe you passed does not contain any of the standard location grouping columns. Must contain one of these sets of columns: \n\n{'Province/State', 'Country/Region'}\n{'Combined_Key'}\n{'county', 'state'}\n{'state'}\n\n" + f"Your dataframe's columns are:\n{df.columns}")
mean_range = 3
dfs = {
"centered": cod.calc_x_day_rolling_mean(df, data_cols=data_types, region_cols=region_cols, x=mean_range, center=True),
"not_centered": cod.calc_x_day_rolling_mean(df, data_cols=data_types, region_cols=region_cols, x=mean_range, center=False),
}
# Run basic table checks
if format == "long":
unique_cols = ["date"] + region_cols
else: # format == "wide"
unique_cols = region_cols
for name, out in dfs.items():
_check_gotten(out, format, group_cols=unique_cols)
# Check that data_types got averaged
for out in dfs.values():
for data_type in data_types:
if format == "long":
assert f"mean_{data_type}" in out.columns
else: # format == "wide"
assert not df.equals(out)
# Make sure other_input_data_types are unchanged
if format == "long":
for other_input in other_input_data_types:
for out in dfs.values():
assert out[other_input].equals(df[other_input])
@staticmethod
def _check_daily_change(df, format, data_type, other_data_types=[]):
"""Verifies that when df is passed to calc_daily_change, the daily count columns generated are correct.
df (pandas.DataFrame): A dataframe from the package.
format (str): The format of the table. Either "wide" or "long".
data_type (str): The data type the table is for. Either "cases", "deaths", "recovered", or "all".
other_data_types (list of str, optional): Other data types for which the daily change isn't calculated, and which should be unchanged by the function.
Returns:
None
"""
# Search for defined grouping cols (based on data source and region)
if {"Combined_Key"}.issubset(df.columns): # JHU table
group_cols = ["Combined_Key"]
elif {"county", "state"}.issubset(df.columns): # NYT USA state and county table
group_cols = ["county", "state"]
elif {"state"}.issubset(df.columns): # NYT USA state only table. Note that this column also exists in the state/county table, so we do the check after we've determined it's not that table.
group_cols = ["state"]
else:
raise ParameterError("The dataframe you passed does not contain any of the standard location grouping columns. Must contain one of these sets of columns: \n\n{'Combined_Key'}\n{'county', 'state'}\n{'state'}\n\n" + f"Your dataframe's columns are:\n{df.columns}")
if format == "long":
if data_type == "all":
data_types = ["cases", "deaths"]
if "recovered" in df.columns:
data_types.append("recovered")
elif data_type in ["cases", "deaths", "recovered"]:
data_types = [data_type]
else:
raise ParameterError(f"{data_type} is not a valid data type. Pass 'cases', 'deaths', or 'recovered'.")
daily = cod.calc_daily_change(df, data_types, region_cols=group_cols)
for other_data_type in other_data_types:
assert daily[other_data_type].equals(df[other_data_type])
# Run basic table checks
_check_gotten(daily, format, allow_negs=True)
# Check that no columns were lost
assert df.columns.isin(daily.columns).all()
# Check daily change calculations against original cumulative columns
for iter_data_type in data_types:
if len(group_cols) == 1:
group_col = group_cols[0]
for group in df[group_col].drop_duplicates():
row_filter = df[group_col] == group
group_df = df[row_filter]
group_daily = daily[row_filter]
assert group_daily["daily_" + iter_data_type].equals(pd.Series(group_daily[iter_data_type] - np.insert(group_daily[iter_data_type].values[:-1], 0, 0))) # Check the daily calculation against the cumulative col in the same df
assert group_daily[iter_data_type].equals(group_df[iter_data_type]) # Check the cumulative col against the one in the original df
elif len(group_cols) == 2:
group_col1 = group_cols[0]
group_col2 = group_cols[1]
existing_groups = df[group_cols].drop_duplicates(keep="first")
for i in range(0, existing_groups.index.size):
group1 = existing_groups.iloc[i, 0]
group2 = existing_groups.iloc[i, 1]
df_filter = (df[group_col1] == group1) & (df[group_col2] == group2)
if df_filter.any():
group_df = df[df_filter]
group_daily = daily[df_filter]
assert group_daily["daily_" + iter_data_type].equals(pd.Series(group_daily[iter_data_type] - np.insert(group_daily[iter_data_type].values[:-1], 0, 0))) # Check the daily calculation against the cumulative col in the same df
assert group_daily[iter_data_type].equals(group_df[iter_data_type]) # Check the cumulative col against the one in the original df
else:
raise Exception("That was unexpected.")
else:
raise Exception(f"Unexpected length of group_cols: '{len(group_cols)}'. group_cols:\n{group_cols}")
elif format == "wide":
daily = cod.calc_daily_change(df, data_type, region_cols=group_cols)
# Run basic table checks
_check_gotten(daily, format, allow_negs=True)
date_cols = [col for col in df.columns if issubclass(type(col), datetime.date)]
# The first day should be the same in both the daily and original dfs, since all cases/deaths/recovered were "new"
assert np.equal(df[date_cols[0]], daily[date_cols[0]]).all()
# Check that each daily change equals that day's columns minus the previous day's column, element-wise, in the original df
for i in range(1, len(date_cols)):
day = date_cols[i]
prev_day = date_cols[i - 1]
assert np.equal(daily[day].values, (df[day] - df[prev_day]).values).all()
else:
raise Exception(f"Invalid format '{format}'")
@staticmethod
def _check_days_since(df, format, data_type):
"""Verifies that when df is passed to calc_days_since_min_count, the functions works.
df (pandas.DataFrame): A dataframe from the package.
format (str): The format of the table. Either "wide" or "long".
data_type (str): The data type the table is for. Either "cases", "deaths", "recovered", or "all".
Returns:
None
"""
# Search for defined grouping cols (based on data source and region)
if {"Combined_Key"}.issubset(df.columns): # JHU table
group_cols = ["Combined_Key"]
elif {"county", "state"}.issubset(df.columns): # NYT USA state and county table
group_cols = ["county", "state"]
elif {"state"}.issubset(df.columns): # NYT USA state only table. Note that this column also exists in the state/county table, so we do the check after we've determined it's not that table.
group_cols = ["state"]
else:
raise ParameterError("The dataframe you passed does not contain any of the standard location grouping columns. Must contain one of these sets of columns: \n\n{'Combined_Key'}\n{'county', 'state'}\n{'state'}\n\n" + f"Your dataframe's columns are:\n{df.columns}")
# Call the function
min_count = 100
ct = cod.calc_days_since_min_count(df, data_type, region_cols=group_cols, min_count=min_count)
# Run basic table checks
_check_gotten(ct, format="long") # The calc_days_since_min_count function only outputs table in long format, even if given wide format as input
# Check that all values in data type col are >= min count
assert (ct[data_type] >= min_count).all()
# Check that all values in days_since_{min_count}_{data_type} are <= number of days in original df
if format == "long":
num_days = df["date"].unique().size
else: # format == "wide"
num_days = df.columns.map(lambda col: issubclass(type(col), datetime.date)).to_series().astype(bool).sum()
assert (ct[f"days_since_{min_count}_{data_type}"] <= num_days).all()
| 57.19802
| 315
| 0.55852
| 4,953
| 40,439
| 4.348476
| 0.076721
| 0.05423
| 0.015322
| 0.009657
| 0.809685
| 0.773517
| 0.749373
| 0.716873
| 0.701597
| 0.696722
| 0
| 0.005011
| 0.318974
| 40,439
| 706
| 316
| 57.279037
| 0.777052
| 0.204011
| 0
| 0.686117
| 0
| 0.016097
| 0.117251
| 0.018503
| 0
| 0
| 0
| 0
| 0.084507
| 1
| 0.032193
| false
| 0.036217
| 0.016097
| 0
| 0.050302
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
23e23d429453ae875995a68128a163484746c5b2
| 133
|
py
|
Python
|
readthedocs/projects/exceptions.py
|
attakei/readthedocs-oauth
|
1c80736fed654dc93c26ea6109a34eebd5eff08b
|
[
"MIT"
] | 1
|
2021-04-27T05:55:34.000Z
|
2021-04-27T05:55:34.000Z
|
readthedocs/projects/exceptions.py
|
attakei/readthedocs-oauth
|
1c80736fed654dc93c26ea6109a34eebd5eff08b
|
[
"MIT"
] | null | null | null |
readthedocs/projects/exceptions.py
|
attakei/readthedocs-oauth
|
1c80736fed654dc93c26ea6109a34eebd5eff08b
|
[
"MIT"
] | 1
|
2016-03-06T08:43:53.000Z
|
2016-03-06T08:43:53.000Z
|
"""Project exceptions"""
class ProjectImportError (Exception):
"""Failure to import a project from a repository."""
pass
| 14.777778
| 56
| 0.684211
| 14
| 133
| 6.5
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195489
| 133
| 8
| 57
| 16.625
| 0.850467
| 0.488722
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
9b16d7c9b692991dec0a51fa5cd7769aefa89044
| 37
|
py
|
Python
|
osu_map_gen/preprocess/__init__.py
|
Syps/osu_beatmap_generator
|
684b5356bbf79ba847b3ab20e2b6b3a73d7721ad
|
[
"MIT"
] | 15
|
2018-11-28T12:00:53.000Z
|
2022-02-04T05:56:45.000Z
|
osu_map_gen/preprocess/__init__.py
|
Syps/osu_beatmap_generator
|
684b5356bbf79ba847b3ab20e2b6b3a73d7721ad
|
[
"MIT"
] | 3
|
2020-07-16T11:40:33.000Z
|
2021-06-15T15:13:25.000Z
|
osu_map_gen/preprocess/__init__.py
|
Syps/osu_beatmap_generator
|
684b5356bbf79ba847b3ab20e2b6b3a73d7721ad
|
[
"MIT"
] | 4
|
2020-11-12T09:12:39.000Z
|
2021-12-26T16:35:11.000Z
|
from ..aisu_circles import markov
| 7.4
| 33
| 0.756757
| 5
| 37
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189189
| 37
| 4
| 34
| 9.25
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f1a2d9a4aeb131dfb3b92325355c97a1b06571fe
| 87
|
py
|
Python
|
src/services/f12019/__init__.py
|
jordansilva/raspberry-f1-dashboard
|
96446a348d036a75f4699bab4459eabec16705f8
|
[
"Apache-2.0"
] | null | null | null |
src/services/f12019/__init__.py
|
jordansilva/raspberry-f1-dashboard
|
96446a348d036a75f4699bab4459eabec16705f8
|
[
"Apache-2.0"
] | null | null | null |
src/services/f12019/__init__.py
|
jordansilva/raspberry-f1-dashboard
|
96446a348d036a75f4699bab4459eabec16705f8
|
[
"Apache-2.0"
] | null | null | null |
from .utils.formatHelper import *
from .utils.enums import *
from .driver import Driver
| 29
| 33
| 0.793103
| 12
| 87
| 5.75
| 0.5
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126437
| 87
| 3
| 34
| 29
| 0.907895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f1aa2ae5ffc56633475f6fbc164e89e44d291674
| 157
|
py
|
Python
|
tests/__init__.py
|
amitkpandey-in/business-rules
|
32ee05acc25d1c7626420019eaede7c046d185c6
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
amitkpandey-in/business-rules
|
32ee05acc25d1c7626420019eaede7c046d185c6
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
amitkpandey-in/business-rules
|
32ee05acc25d1c7626420019eaede7c046d185c6
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
try:
from unittest2 import TestCase
except ImportError:
from unittest import TestCase
assert TestCase
| 19.625
| 39
| 0.77707
| 18
| 157
| 6.5
| 0.611111
| 0.239316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008065
| 0.210191
| 157
| 7
| 40
| 22.428571
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f1beef180ea9de2efe0081fe02523f90b89c2a67
| 45
|
py
|
Python
|
GNN/model/__init__.py
|
HamletWantToCode/alchemy
|
4af98323e7ae64e1c4b7901c544dddb48b4780b6
|
[
"MIT"
] | 2
|
2019-07-23T03:47:09.000Z
|
2019-07-24T09:20:15.000Z
|
GNN/model/__init__.py
|
HamletWantToCode/alchemy
|
4af98323e7ae64e1c4b7901c544dddb48b4780b6
|
[
"MIT"
] | null | null | null |
GNN/model/__init__.py
|
HamletWantToCode/alchemy
|
4af98323e7ae64e1c4b7901c544dddb48b4780b6
|
[
"MIT"
] | null | null | null |
from .AGCN import AGCN
from .MPNN import MPNN
| 22.5
| 22
| 0.8
| 8
| 45
| 4.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 45
| 2
| 23
| 22.5
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7b106ea51ec220880949cef051c07849165298a7
| 711
|
py
|
Python
|
pyntel4004/src/hardware/suboperations/rom.py
|
alshapton/Pyntel4004
|
865a7fc5264d24f1281bee44c40a51e7e42598a0
|
[
"MIT"
] | 6
|
2021-02-12T21:37:53.000Z
|
2022-02-24T23:09:37.000Z
|
pyntel4004/src/hardware/suboperations/rom.py
|
alshapton/Pyntel4004
|
865a7fc5264d24f1281bee44c40a51e7e42598a0
|
[
"MIT"
] | 43
|
2021-04-23T09:32:24.000Z
|
2022-02-01T15:17:09.000Z
|
pyntel4004/src/hardware/suboperations/rom.py
|
alshapton/Pyntel4004
|
865a7fc5264d24f1281bee44c40a51e7e42598a0
|
[
"MIT"
] | 2
|
2021-06-11T01:12:44.000Z
|
2021-09-14T22:44:11.000Z
|
"""ROM methods."""
def read_all_rom(self) -> list:
"""
Return the values of all the locations of ROM.
Parameters
----------
self : Processor, mandatory
The instance of the processor containing the registers, accumulator etc
Returns
-------
ROM
The values of all the locations of ROM
"""
return self.ROM
def read_all_rom_ports(self) -> list:
"""
Return the values of all the ROM ports.
Parameters
----------
self : Processor, mandatory
The instance of the processor containing the registers, accumulator etc
Returns
-------
ROM_PORT
The values of all the ROM ports
"""
return self.ROM_PORT
| 18.710526
| 79
| 0.601969
| 87
| 711
| 4.83908
| 0.264368
| 0.085511
| 0.104513
| 0.133017
| 0.821853
| 0.821853
| 0.821853
| 0.743468
| 0.489311
| 0.489311
| 0
| 0
| 0.295359
| 711
| 37
| 80
| 19.216216
| 0.840319
| 0.673699
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
7b327cc3b83f11c99c306aecc0daa6d323980e1e
| 11,885
|
py
|
Python
|
forms/main_forms.py
|
chucktilbury/Accounting
|
15c467dac4405e872f3820e3ff35a53240335631
|
[
"MIT"
] | null | null | null |
forms/main_forms.py
|
chucktilbury/Accounting
|
15c467dac4405e872f3820e3ff35a53240335631
|
[
"MIT"
] | null | null | null |
forms/main_forms.py
|
chucktilbury/Accounting
|
15c467dac4405e872f3820e3ff35a53240335631
|
[
"MIT"
] | null | null | null |
from system.forms import Forms
from system.logger import *
from dialogs.edit_dialogs import *
from dialogs.select_dialog import *
#
# TODO: A customer cannot be deleted if a committed sale exists. If a customer is
# deleted, then all uncommitted sales are also deleted.
#
# Show total committed and uncommitted sales for customer.
#
@class_wrapper
class CustomersForm(Forms):
def __init__(self, notebook):
self.logger.set_level(Logger.DEBUG)
index = notebook.get_tab_index('Customers')
self.logger.debug('tab index = %d'%(index))
super().__init__(notebook.frame_list[index]['frame'], 'Customer')
notebook.frame_list[index]['show_cb'] = self.load_form
width1 = 70
width2 = 28
self.add_title('Browse Customers')
self.add_label('Date:')
self.add_dynamic_label('date_created', 1, bg='white', width=width2, anchor='w')
self.add_spacer(2)
self.add_label('Name:')
self.add_dynamic_label('name', 3, bg='white', width=width1, anchor='w')
self.add_label('Address1:')
self.add_dynamic_label('address1', 3, bg='white', width=width1, anchor='w')
self.add_label('Address2:')
self.add_dynamic_label('address2', 3, bg='white', width=width1, anchor='w')
self.add_label('City:')
self.add_dynamic_label('city', 1, bg='white', width=width2, anchor='w')
self.add_label('State:')
self.add_dynamic_label('state', 1, bg='white', width=width2, anchor='w')
self.add_label('Zip Code:')
self.add_dynamic_label('zip', 1, bg='white', width=width2, anchor='w')
self.add_label('Country:')
self.add_indirect_label('country_ID', 1, 'Country', 'name', bg='white', width=width2, anchor='w')
self.add_label('Email:')
self.add_dynamic_label('email_address', 1, bg='white', width=width2, anchor='w')
self.add_label('Email Status:')
self.add_indirect_label('email_status_ID', 1, 'EmailStatus', 'name', bg='white', width=width2, anchor='w')
self.add_label('Phone:')
self.add_dynamic_label('phone_number', 1, bg='white', width=width2, anchor='w')
self.add_label('Phone Status:')
self.add_indirect_label('phone_status_ID', 1, 'PhoneStatus', 'name', bg='white', width=width2, anchor='w')
self.add_label('Web Site:')
self.add_dynamic_label('web_site', 1, bg='white', width=width2, anchor='w')
self.add_label('Class:')
self.add_indirect_label('class_ID', 1, 'ContactClass', 'name', bg='white', width=width2, anchor='w')
self.add_label('Description:')
self.add_dynamic_label('description', 3, bg='white', width=width1, anchor='w')
self.add_label('Notes:')
self.add_text('notes', 3, state='disabled', width=77, height=10)
self.add_ctl_button('Prev')
self.add_ctl_button('Next')
self.add_btn_spacer()
self.add_select_button(SelectDialog, owner=self.owner,
table=self.table, column='name')
self.add_btn_spacer()
self.add_ctl_button('Delete')
self.add_custom_button('Edit', EditCustomerDialog, owner=self.owner,
table=self.table, row_index=self.row_index)
self.add_custom_button('New', NewCustomerDialog, owner=self.owner,
table=self.table, row_index=self.row_index)
#
# TODO: A vendor cannot be deleted if a committed purchase exists. If a vendor is
# deleted, then all uncommitted purchases are also deleted.
#
# Associate a vendor with an account. When a purchase is made, then that
# account is debit when the purchase is committed.
#
# Show total committed and uncommitted purchases for vendor
#
@class_wrapper
class VendorsForm(Forms):
def __init__(self, notebook):
self.logger.set_level(Logger.DEBUG)
index = notebook.get_tab_index('Vendors')
super().__init__(notebook.frame_list[index]['frame'], 'Vendor')
notebook.frame_list[index]['show_cb'] = self.load_form
width1 = 70
width2 = 28
self.add_title('Browse Vendors')
self.add_label('Date:')
self.add_dynamic_label('date_created', 1, bg='white', width=width2, anchor='w')
self.add_spacer(2)
self.add_label('Name:')
self.add_dynamic_label('name', 3, bg='white', width=width1, anchor='w')
self.add_label('Contact Name:')
self.add_dynamic_label('contact_name', 3, bg='white', width=width1, anchor='w')
self.add_label('Address1:')
self.add_dynamic_label('address1', 3, bg='white', width=width1, anchor='w')
self.add_label('Address2:')
self.add_dynamic_label('address2', 3, bg='white', width=width1, anchor='w')
self.add_label('City:')
self.add_dynamic_label('city', 1, bg='white', width=width2, anchor='w')
self.add_label('State:')
self.add_dynamic_label('state', 1, bg='white', width=width2, anchor='w')
self.add_label('Zip Code:')
self.add_dynamic_label('zip', 1, bg='white', width=width2, anchor='w')
self.add_label('Country:')
self.add_indirect_label('country_ID', 1, 'Country', 'name', bg='white', width=width2, anchor='w')
self.add_label('Email:')
self.add_dynamic_label('email_address', 1, bg='white', width=width2, anchor='w')
self.add_label('Email Status:')
self.add_indirect_label('email_status_ID', 1, 'EmailStatus', 'name', bg='white', width=width2, anchor='w')
self.add_label('Phone:')
self.add_dynamic_label('phone_number', 1, bg='white', width=width2, anchor='w')
self.add_label('Phone Status:')
self.add_indirect_label('phone_status_ID', 1, 'PhoneStatus', 'name', bg='white', width=width2, anchor='w')
self.add_label('Web Site:')
self.add_dynamic_label('web_site', 1, bg='white', width=width2, anchor='w')
self.add_label('Type:')
self.add_indirect_label('type_ID', 1, 'ContactClass', 'name', bg='white', width=width2, anchor='w')
self.add_label('Description:')
self.add_dynamic_label('description', 3, bg='white', width=width1, anchor='w')
self.add_label('Notes:')
self.add_text('notes', 3, state='disabled', width=77, height=10)
self.add_ctl_button('Prev')
self.add_ctl_button('Next')
self.add_btn_spacer()
#self.add_ctl_button('Select', 'name')
self.add_select_button(SelectDialog, owner=self.owner,
table=self.table, column='name')
self.add_btn_spacer()
self.add_ctl_button('Delete')
self.add_custom_button('Edit', EditVendorDialog, owner=self.owner,
table=self.table, row_index=self.row_index)
self.add_custom_button('New', NewVendorDialog, owner=self.owner,
table=self.table, row_index=self.row_index)
#
# TODO: Modify these forms so that a new sale can be entered and committed sales
# cannot be modified. (sales and products)
#
# Need to select sales based on customer name and pull up all sales associated
# a customer for selections.
#
# Find a way to prevent duplicate sales from being imported.
#
# Make product widget simpler. This form only displays the products. Products
# for this sale are edited in a different dialog that is activated by a button.
# If the sale is committed, then the button is disabled.
#
# Sales and purchases need to show if they have been committed. When the commit
# button is pressed, then the accounts are debited.
#
@class_wrapper
class SalesForm(Forms):
def __init__(self, notebook):
index = notebook.get_tab_index('Sales')
super().__init__(notebook.frame_list[index]['frame'], 'SaleRecord')
notebook.frame_list[index]['show_cb'] = self.load_form
width2 = 25
self.add_title('Browse Sales')
self.add_label('Date:')
self.add_dynamic_label('date', 1, bg='white', width=width2, anchor='w')
self.add_spacer(2)
self.add_label('Customer:')
self.add_indirect_label('customer_ID', 1, 'Customer', 'name', bg='white', width=width2, anchor='w')
self.add_label('Gross:')
self.add_dynamic_label('gross', 1, bg='white', width=width2, anchor='w')
self.add_label('Fees:')
self.add_dynamic_label('fees', 1, bg='white', width=width2, anchor='w')
self.add_label('Shipping:')
self.add_dynamic_label('shipping', 1, bg='white', width=width2, anchor='w')
self.add_label('Status:')
self.add_indirect_label('status_ID', 1, 'SaleStatus', 'name', bg='white', width=width2, anchor='w')
#self.add_products_widget()
self.add_label('Committed:')
self.add_checkbox('committed', state='disabled')
self.add_label('Notes:')
self.add_text('notes', 3, state='disabled', width=77, height=10)
self.add_ctl_button('Prev')
self.add_ctl_button('Next')
self.add_btn_spacer()
self.add_select_button(IndirectSelectDialog, owner=self.owner,
loc_tab=self.table, loc_col='customer_ID',
for_tab='Customer', for_col='name')
self.add_btn_spacer()
self.add_ctl_button('Delete')
self.add_custom_button('Edit', EditSaleDialog, owner=self.owner,
table=self.table, row_index=self.row_index)
self.add_custom_button('New', NewSaleDialog, owner=self.owner,
table=self.table, row_index=self.row_index)
@class_wrapper
class PurchaseForm(Forms):
def __init__(self, notebook):
index = notebook.get_tab_index('Purchases')
super().__init__(notebook.frame_list[index]['frame'], 'PurchaseRecord')
notebook.frame_list[index]['show_cb'] = self.load_form
width2 = 28
self.add_title('Browse Purchases')
self.add_label('Date:')
self.add_dynamic_label('date', 1, bg='white', width=width2, anchor='w')
self.add_spacer(2)
self.add_label('Vendor:')
self.add_indirect_label('vendor_ID', 1, 'Vendor', 'name', bg='white', width=width2, anchor='w')
self.add_label('Gross:')
self.add_dynamic_label('gross', 1, bg='white', width=width2, anchor='w')
self.add_label('Tax:')
self.add_dynamic_label('tax', 1, bg='white', width=width2, anchor='w')
self.add_label('Shipping:')
self.add_dynamic_label('shipping', 1, bg='white', width=width2, anchor='w')
self.add_label('Type:')
self.add_indirect_label('type_ID', 1, 'PurchaseType', 'name', bg='white', width=width2, anchor='w')
self.add_label('Status:')
self.add_indirect_label('status_ID', 1, 'PurchaseStatus', 'name', bg='white', width=width2, anchor='w')
self.add_label('Committed:')
self.add_checkbox('committed', state='disabled')
self.add_spacer(2)
self.add_label('Notes:')
self.add_text('notes', 3, state='disabled', width=77, height=10)
self.add_ctl_button('Prev')
self.add_ctl_button('Next')
self.add_btn_spacer()
self.add_select_button(IndirectSelectDialog, owner=self.owner,
loc_tab=self.table, loc_col='vendor_ID',
for_tab='Vendor', for_col='name')
self.add_btn_spacer()
self.add_ctl_button('Delete')
self.add_custom_button('Edit', EditPurchaseDialog, owner=self.owner,
table=self.table, row_index=self.row_index)
self.add_custom_button('New', NewPurchaseDialog, owner=self.owner,
table=self.table, row_index=self.row_index)
| 41.701754
| 114
| 0.630963
| 1,561
| 11,885
| 4.588085
| 0.122357
| 0.139765
| 0.083775
| 0.086009
| 0.788886
| 0.769199
| 0.758866
| 0.737783
| 0.736526
| 0.7315
| 0
| 0.015079
| 0.224401
| 11,885
| 284
| 115
| 41.848592
| 0.761879
| 0.107615
| 0
| 0.739583
| 0
| 0
| 0.152005
| 0
| 0
| 0
| 0
| 0.003521
| 0
| 1
| 0.020833
| false
| 0
| 0.020833
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9e56fe95a6e01f57f06f2207b17cd50393f51015
| 67
|
py
|
Python
|
backend/api/minhash/__init__.py
|
Carbonara-Project/Carbonara-Web
|
be1fac46ed3e4f590f3d885e646970a29ec43cb5
|
[
"MIT"
] | 2
|
2020-01-07T12:35:11.000Z
|
2021-09-18T10:30:57.000Z
|
backend/api/minhash/__init__.py
|
Carbonara-Project/Carbonara-Web
|
be1fac46ed3e4f590f3d885e646970a29ec43cb5
|
[
"MIT"
] | null | null | null |
backend/api/minhash/__init__.py
|
Carbonara-Project/Carbonara-Web
|
be1fac46ed3e4f590f3d885e646970a29ec43cb5
|
[
"MIT"
] | null | null | null |
from .lean_minhash import LeanMinHash
from .minhash import MinHash
| 22.333333
| 37
| 0.850746
| 9
| 67
| 6.222222
| 0.555556
| 0.464286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119403
| 67
| 2
| 38
| 33.5
| 0.949153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9e5dd2eebb954fc5094f30f45639779d9dc6cafc
| 121
|
py
|
Python
|
Pandas/5-Loading-data-plain-text-files.py
|
Pythobit/Python-libraries
|
cd94b61ee65e989dd29827edbd02567bd06afacb
|
[
"MIT"
] | 1
|
2021-08-13T16:13:25.000Z
|
2021-08-13T16:13:25.000Z
|
Pandas/5-Loading-data-plain-text-files.py
|
Pythobit/Python-libraries
|
cd94b61ee65e989dd29827edbd02567bd06afacb
|
[
"MIT"
] | null | null | null |
Pandas/5-Loading-data-plain-text-files.py
|
Pythobit/Python-libraries
|
cd94b61ee65e989dd29827edbd02567bd06afacb
|
[
"MIT"
] | null | null | null |
df4 = pandas.read_csv('supermarkets-commas.txt')
df4
df5 = pandas.read_csv('supermarkets-semi-colons.txt',sep=';')
df5
| 17.285714
| 61
| 0.735537
| 18
| 121
| 4.833333
| 0.611111
| 0.229885
| 0.298851
| 0.574713
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036036
| 0.082645
| 121
| 6
| 62
| 20.166667
| 0.747748
| 0
| 0
| 0
| 0
| 0
| 0.429752
| 0.421488
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9e60a21d3f63f5361f33973e8e89172b1839c420
| 398
|
py
|
Python
|
ucate/application/workflows/__init__.py
|
HadarRosenwald/ucate
|
f57e47b1baec802c955adc7b07317d8014e50d91
|
[
"Apache-2.0"
] | 25
|
2020-10-27T21:30:19.000Z
|
2022-01-04T08:12:12.000Z
|
ucate/application/workflows/__init__.py
|
HadarRosenwald/ucate
|
f57e47b1baec802c955adc7b07317d8014e50d91
|
[
"Apache-2.0"
] | 5
|
2020-11-06T21:30:29.000Z
|
2021-05-20T10:03:57.000Z
|
ucate/application/workflows/__init__.py
|
HadarRosenwald/ucate
|
f57e47b1baec802c955adc7b07317d8014e50d91
|
[
"Apache-2.0"
] | 8
|
2020-12-01T05:44:31.000Z
|
2022-03-31T18:18:36.000Z
|
from ucate.application.workflows.tarnet import train as train_tarnet
from ucate.application.workflows.tlearner import train as train_tlearner
from ucate.application.workflows.cevae import train as train_cevae
from ucate.application.workflows.evaluation import evaluate
from ucate.application.workflows.evaluation import summarize
from ucate.application.workflows.evaluation import build_summary
| 39.8
| 72
| 0.871859
| 52
| 398
| 6.596154
| 0.288462
| 0.157434
| 0.349854
| 0.507289
| 0.393586
| 0.393586
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082915
| 398
| 9
| 73
| 44.222222
| 0.939726
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9e69489946c8bdb27bb22d15512c7067e6aa42ef
| 46
|
py
|
Python
|
binance/__init__.py
|
cottonmalone/binance-dex
|
2b33e5d0dd0e24e78eea58e857d9872e5adc8c5f
|
[
"MIT"
] | null | null | null |
binance/__init__.py
|
cottonmalone/binance-dex
|
2b33e5d0dd0e24e78eea58e857d9872e5adc8c5f
|
[
"MIT"
] | null | null | null |
binance/__init__.py
|
cottonmalone/binance-dex
|
2b33e5d0dd0e24e78eea58e857d9872e5adc8c5f
|
[
"MIT"
] | null | null | null |
from .constants import *
from .client import *
| 23
| 24
| 0.76087
| 6
| 46
| 5.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152174
| 46
| 2
| 25
| 23
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9e74d5a30684aba4039a9a25863d4e1cfc8444a9
| 137
|
py
|
Python
|
django/www/MeteoGaliciaDB/faq/views.py
|
hugo-lorenzo-mato/Meteo-Galicia-DB
|
3dd52534c16216de5f25cd40877d2facc7cffe24
|
[
"MIT"
] | null | null | null |
django/www/MeteoGaliciaDB/faq/views.py
|
hugo-lorenzo-mato/Meteo-Galicia-DB
|
3dd52534c16216de5f25cd40877d2facc7cffe24
|
[
"MIT"
] | null | null | null |
django/www/MeteoGaliciaDB/faq/views.py
|
hugo-lorenzo-mato/Meteo-Galicia-DB
|
3dd52534c16216de5f25cd40877d2facc7cffe24
|
[
"MIT"
] | 1
|
2021-04-27T18:37:41.000Z
|
2021-04-27T18:37:41.000Z
|
from django.shortcuts import render
# Create your views here.
def faq(request):
return render(request, 'faq/form/fandq.html', None)
| 22.833333
| 55
| 0.744526
| 20
| 137
| 5.1
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145985
| 137
| 6
| 55
| 22.833333
| 0.871795
| 0.167883
| 0
| 0
| 0
| 0
| 0.168142
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
9e993a2e9c9cbbb6ee923eb8112390250e485279
| 310
|
py
|
Python
|
python/rosie/__init__.py
|
amininger/rosie
|
1cd80ba6f7548bc2e8077c38c2c497e31d38c9b8
|
[
"BSD-3-Clause"
] | null | null | null |
python/rosie/__init__.py
|
amininger/rosie
|
1cd80ba6f7548bc2e8077c38c2c497e31d38c9b8
|
[
"BSD-3-Clause"
] | null | null | null |
python/rosie/__init__.py
|
amininger/rosie
|
1cd80ba6f7548bc2e8077c38c2c497e31d38c9b8
|
[
"BSD-3-Clause"
] | null | null | null |
__all__ = [ "ActionStackConnector", "CommandConnector", "InternalCommandConnector", "RosieClient", "RosieGUI" ]
from .ActionStackConnector import ActionStackConnector
from .CommandConnector import CommandConnector, InternalCommandConnector
from .RosieClient import RosieClient
from .RosieGUI import RosieGUI
| 38.75
| 111
| 0.83871
| 23
| 310
| 11.130435
| 0.347826
| 0.3125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090323
| 310
| 7
| 112
| 44.285714
| 0.907801
| 0
| 0
| 0
| 0
| 0
| 0.255663
| 0.07767
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9ec02ec1567456e5d0070fdfaf0fdc321a76165f
| 160
|
py
|
Python
|
sec.py
|
nathanielmathew/BERT-QuestionAns-API
|
3c831a1f8037b50357cd6f66c19301beca0f2c00
|
[
"MIT"
] | 3
|
2021-04-13T07:13:24.000Z
|
2021-04-17T19:47:39.000Z
|
sec.py
|
nathanielmathew/BERT-QuestionAns-API
|
3c831a1f8037b50357cd6f66c19301beca0f2c00
|
[
"MIT"
] | null | null | null |
sec.py
|
nathanielmathew/BERT-QuestionAns-API
|
3c831a1f8037b50357cd6f66c19301beca0f2c00
|
[
"MIT"
] | 1
|
2021-06-09T06:19:35.000Z
|
2021-06-09T06:19:35.000Z
|
import string
import random
N = 32
def generate_token():
return ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=N))
| 22.857143
| 101
| 0.76875
| 23
| 160
| 5.217391
| 0.695652
| 0.183333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014085
| 0.1125
| 160
| 7
| 101
| 22.857143
| 0.830986
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
7bc0ae3a27aef469d1085ed19ec81af910ca55c5
| 28
|
py
|
Python
|
modules/__init__.py
|
OpenVoiceOS/ovos_api_service
|
58b5e2cc91958ebacf212d10ba31ea59fda8452f
|
[
"Apache-2.0"
] | null | null | null |
modules/__init__.py
|
OpenVoiceOS/ovos_api_service
|
58b5e2cc91958ebacf212d10ba31ea59fda8452f
|
[
"Apache-2.0"
] | 1
|
2020-09-01T06:14:48.000Z
|
2020-09-01T06:14:48.000Z
|
modules/__init__.py
|
OpenVoiceOS/ovos_api_service
|
58b5e2cc91958ebacf212d10ba31ea59fda8452f
|
[
"Apache-2.0"
] | 2
|
2021-01-08T20:54:00.000Z
|
2021-01-08T21:21:32.000Z
|
from .storage import Storage
| 28
| 28
| 0.857143
| 4
| 28
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 28
| 1
| 28
| 28
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7bca80b1cee476ff337d8ad6e017929b27483c80
| 199
|
py
|
Python
|
ics2000/TrustEnconder.py
|
trdvangraft/ics2000
|
9d93e64a43c9f94e3264f994f8ccfaeaad2c9485
|
[
"MIT"
] | null | null | null |
ics2000/TrustEnconder.py
|
trdvangraft/ics2000
|
9d93e64a43c9f94e3264f994f8ccfaeaad2c9485
|
[
"MIT"
] | null | null | null |
ics2000/TrustEnconder.py
|
trdvangraft/ics2000
|
9d93e64a43c9f94e3264f994f8ccfaeaad2c9485
|
[
"MIT"
] | null | null | null |
from json import JSONEncoder
class TrustEncodable():
def toJson(self) -> dict:
pass
class TrustEnconder(JSONEncoder):
def default(self, o: TrustEncodable):
return o.toJson()
| 22.111111
| 41
| 0.683417
| 22
| 199
| 6.181818
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.221106
| 199
| 9
| 42
| 22.111111
| 0.877419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.142857
| 0.142857
| 0.142857
| 0.857143
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
c894ccf6bd035b01a29db1be3b5672e45b82476e
| 207
|
py
|
Python
|
practiceset1.py
|
shivam123-dev/PythonWithShivam
|
5b902c27df2cf21c6131fcbc2765187d52ca4e92
|
[
"MIT"
] | 1
|
2021-04-01T06:37:14.000Z
|
2021-04-01T06:37:14.000Z
|
practiceset1.py
|
shivam123-dev/pythonwithshivam
|
5b902c27df2cf21c6131fcbc2765187d52ca4e92
|
[
"MIT"
] | null | null | null |
practiceset1.py
|
shivam123-dev/pythonwithshivam
|
5b902c27df2cf21c6131fcbc2765187d52ca4e92
|
[
"MIT"
] | 1
|
2021-05-09T16:50:45.000Z
|
2021-05-09T16:50:45.000Z
|
# Importing the module of "Python" named as "OS"
import os
# Getting the current working directory
cwd = os.getcwd()
# Printing the current working directory
print("Current working directory before:", cwd)
| 29.571429
| 49
| 0.763285
| 29
| 207
| 5.448276
| 0.62069
| 0.265823
| 0.436709
| 0.329114
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154589
| 207
| 6
| 50
| 34.5
| 0.902857
| 0.599034
| 0
| 0
| 0
| 0
| 0.417722
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
c8e096ad292368c72537124fb0f321c10f486b61
| 404
|
py
|
Python
|
python/dupeFilesFinder_conf.py
|
bison--/singlePurposeScripts
|
b061e118e367898f8415bcdb915e10bc38cd6a3c
|
[
"MIT"
] | null | null | null |
python/dupeFilesFinder_conf.py
|
bison--/singlePurposeScripts
|
b061e118e367898f8415bcdb915e10bc38cd6a3c
|
[
"MIT"
] | null | null | null |
python/dupeFilesFinder_conf.py
|
bison--/singlePurposeScripts
|
b061e118e367898f8415bcdb915e10bc38cd6a3c
|
[
"MIT"
] | 1
|
2021-06-04T11:09:02.000Z
|
2021-06-04T11:09:02.000Z
|
# directories where to look for duplicates
dir_list = [
r"C:\Users\Gamer\Documents\Projekte\azzap\azzap-docker-dev\data\html\pub\media\catalog\product",
r"C:\Users\Gamer\Documents\Projekte\azzap\azzap-docker-dev\data\html\pub\media\import\multishopifystoremageconnect",
r"C:\Users\Gamer\Documents\Projekte\azzap\azzap-docker-dev\data\html\pub\media\import\mpmultishopifystoremageconnect",
]
| 50.5
| 122
| 0.789604
| 56
| 404
| 5.678571
| 0.464286
| 0.018868
| 0.066038
| 0.113208
| 0.641509
| 0.641509
| 0.641509
| 0.641509
| 0.641509
| 0.641509
| 0
| 0
| 0.066832
| 404
| 7
| 123
| 57.714286
| 0.843501
| 0.09901
| 0
| 0
| 0
| 0.6
| 0.880886
| 0.880886
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
cde943d859d8c382062329e85f867ca532564883
| 46
|
py
|
Python
|
tiktok_marketing/__init__.py
|
GearPlug/tiktok-marketing-python
|
910a01ab8bbca6ecafba02e202de33fc156b0c13
|
[
"MIT"
] | 1
|
2022-02-19T06:02:24.000Z
|
2022-02-19T06:02:24.000Z
|
tiktok_marketing/__init__.py
|
GearPlug/tiktok-marketing-python
|
910a01ab8bbca6ecafba02e202de33fc156b0c13
|
[
"MIT"
] | null | null | null |
tiktok_marketing/__init__.py
|
GearPlug/tiktok-marketing-python
|
910a01ab8bbca6ecafba02e202de33fc156b0c13
|
[
"MIT"
] | null | null | null |
from tiktok_marketing.api import TikTokClient
| 23
| 45
| 0.891304
| 6
| 46
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cdfbea0913cd86af28c0f40ba58b5dc5fedaa92c
| 31
|
py
|
Python
|
tofu/tests/tests06_mesh/__init__.py
|
WinstonLHS/tofu
|
c95b2eb6aedcf4bac5676752b9635b78f31af6ca
|
[
"MIT"
] | 6
|
2016-09-15T17:01:19.000Z
|
2017-03-06T22:53:10.000Z
|
tofu/tests/tests06_mesh/__init__.py
|
WinstonLHS/tofu
|
c95b2eb6aedcf4bac5676752b9635b78f31af6ca
|
[
"MIT"
] | 9
|
2016-09-14T17:23:52.000Z
|
2017-04-13T07:30:07.000Z
|
tofu/tests/tests06_mesh/__init__.py
|
Didou09/tofu
|
4a4e1f058bab8e7556ed9d518f90807cec605476
|
[
"MIT"
] | null | null | null |
from . import test_01_checks
| 7.75
| 28
| 0.774194
| 5
| 31
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0.193548
| 31
| 3
| 29
| 10.333333
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a82ca2ecc12dbc467ddaa2a39df07cdfd5fd64f1
| 27
|
py
|
Python
|
addons14/sale_timesheet_rounded/tests/__init__.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | 1
|
2021-06-10T14:59:13.000Z
|
2021-06-10T14:59:13.000Z
|
addons14/sale_timesheet_rounded/tests/__init__.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | null | null | null |
addons14/sale_timesheet_rounded/tests/__init__.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | 1
|
2021-04-09T09:44:44.000Z
|
2021-04-09T09:44:44.000Z
|
from . import test_rounded
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b555799226335e173dedc424b8a3d711137108a1
| 40
|
py
|
Python
|
up/tasks/multitask/models/wrappers/__init__.py
|
ModelTC/EOD
|
164bff80486e9ae6a095a97667b365c46ceabd86
|
[
"Apache-2.0"
] | 196
|
2021-10-30T05:15:36.000Z
|
2022-03-30T18:43:40.000Z
|
up/tasks/multitask/models/wrappers/__init__.py
|
ModelTC/EOD
|
164bff80486e9ae6a095a97667b365c46ceabd86
|
[
"Apache-2.0"
] | 12
|
2021-10-30T11:33:28.000Z
|
2022-03-31T14:22:58.000Z
|
up/tasks/multitask/models/wrappers/__init__.py
|
ModelTC/EOD
|
164bff80486e9ae6a095a97667b365c46ceabd86
|
[
"Apache-2.0"
] | 23
|
2021-11-01T07:26:17.000Z
|
2022-03-27T05:55:37.000Z
|
from .multitask_wrapper import * # noqa
| 40
| 40
| 0.775
| 5
| 40
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 40
| 1
| 40
| 40
| 0.882353
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b586379e2443e6fcac53fb6088a84dcb91ad7ed6
| 3,600
|
py
|
Python
|
tests/test_ls7_gap_mask.py
|
Oceancolour-RG/wagl
|
f002a1c0a373d21758d44d2a808bdfd755d90226
|
[
"Apache-2.0"
] | 22
|
2018-05-30T23:42:10.000Z
|
2021-12-25T14:21:46.000Z
|
tests/test_ls7_gap_mask.py
|
Oceancolour-RG/wagl
|
f002a1c0a373d21758d44d2a808bdfd755d90226
|
[
"Apache-2.0"
] | 52
|
2018-02-20T05:31:55.000Z
|
2021-11-23T23:38:15.000Z
|
tests/test_ls7_gap_mask.py
|
Oceancolour-RG/wagl
|
f002a1c0a373d21758d44d2a808bdfd755d90226
|
[
"Apache-2.0"
] | 8
|
2018-02-20T05:08:38.000Z
|
2021-08-12T23:16:41.000Z
|
#!/usr/bin/env python
import unittest
from wagl.acquisition import acquisitions
from .data import LS7_GAP_MASK, LS7_NO_GAP_MASK
class GapMaskRadianceTest(unittest.TestCase):
"""
Test that the SLC gap mask loads correctly.
The values to check against were derived by manually selecting
the band and the corresponding gap mask, and creating a null mask.
"""
def setUp(self):
self.acqs = acquisitions(LS7_GAP_MASK).get_all_acquisitions()
def test_band8(self):
acq = self.acqs[0]
mask = acq.radiance_data() == -999
count = mask.sum()
self.assertEqual(count, 259512)
def test_band1(self):
acq = self.acqs[1]
mask = acq.radiance_data() == -999
count = mask.sum()
self.assertEqual(count, 64746)
def test_band2(self):
acq = self.acqs[2]
mask = acq.radiance_data() == -999
count = mask.sum()
self.assertEqual(count, 64766)
def test_band3(self):
acq = self.acqs[3]
mask = acq.radiance_data() == -999
count = mask.sum()
self.assertEqual(count, 64761)
def test_band4(self):
acq = self.acqs[4]
mask = acq.radiance_data() == -999
count = mask.sum()
self.assertEqual(count, 64770)
def test_band5(self):
acq = self.acqs[5]
mask = acq.radiance_data() == -999
count = mask.sum()
self.assertEqual(count, 64769)
def test_band61(self):
acq = self.acqs[6]
mask = acq.radiance_data() == -999
count = mask.sum()
self.assertEqual(count, 64862)
def test_band62(self):
acq = self.acqs[7]
mask = acq.radiance_data() == -999
count = mask.sum()
self.assertEqual(count, 64898)
def test_band7(self):
acq = self.acqs[8]
mask = acq.radiance_data() == -999
count = mask.sum()
self.assertEqual(count, 64747)
class NoGapMaskRadianceTest(unittest.TestCase):
"""
Test that the abscence of a gap mask has no effect on loading
the data.
"""
def setUp(self):
self.acqs = acquisitions(LS7_NO_GAP_MASK).get_all_acquisitions()
def test_band8(self):
acq = self.acqs[0]
_ = acq.radiance_data()
count = acq._gap_mask.sum()
self.assertEqual(count, 0)
def test_band1(self):
acq = self.acqs[1]
_ = acq.radiance_data()
count = acq._gap_mask.sum()
self.assertEqual(count, 0)
def test_band2(self):
acq = self.acqs[2]
_ = acq.radiance_data()
count = acq._gap_mask.sum()
self.assertEqual(count, 0)
def test_band3(self):
acq = self.acqs[3]
_ = acq.radiance_data()
count = acq._gap_mask.sum()
self.assertEqual(count, 0)
def test_band4(self):
acq = self.acqs[4]
_ = acq.radiance_data()
count = acq._gap_mask.sum()
self.assertEqual(count, 0)
def test_band5(self):
acq = self.acqs[5]
_ = acq.radiance_data()
count = acq._gap_mask.sum()
self.assertEqual(count, 0)
def test_band61(self):
acq = self.acqs[6]
_ = acq.radiance_data()
count = acq._gap_mask.sum()
self.assertEqual(count, 0)
def test_band62(self):
acq = self.acqs[7]
_ = acq.radiance_data()
count = acq._gap_mask.sum()
self.assertEqual(count, 0)
def test_band7(self):
acq = self.acqs[8]
_ = acq.radiance_data()
count = acq._gap_mask.sum()
self.assertEqual(count, 0)
| 26.086957
| 72
| 0.585
| 459
| 3,600
| 4.420479
| 0.185185
| 0.078857
| 0.097585
| 0.13307
| 0.81518
| 0.788566
| 0.788566
| 0.754066
| 0.558896
| 0.558896
| 0
| 0.049685
| 0.295556
| 3,600
| 137
| 73
| 26.277372
| 0.750394
| 0.073889
| 0
| 0.838384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.20202
| false
| 0
| 0.030303
| 0
| 0.252525
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b588a6fcf9715da9c197bafdc4ab8b935a29b459
| 48
|
py
|
Python
|
server/src/police_lineups/controllers/recommendations/__init__.py
|
vabalcar/police-lineups
|
9c4a17d58e973d6db6e442bd9d5f4313ad4d51b7
|
[
"MIT"
] | null | null | null |
server/src/police_lineups/controllers/recommendations/__init__.py
|
vabalcar/police-lineups
|
9c4a17d58e973d6db6e442bd9d5f4313ad4d51b7
|
[
"MIT"
] | 2
|
2021-09-24T11:43:58.000Z
|
2021-09-24T12:00:21.000Z
|
server/src/police_lineups/controllers/recommendations/__init__.py
|
vabalcar/police-lineups
|
9c4a17d58e973d6db6e442bd9d5f4313ad4d51b7
|
[
"MIT"
] | null | null | null |
from .queries import get_lineup_recommendations
| 24
| 47
| 0.895833
| 6
| 48
| 6.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 48
| 1
| 48
| 48
| 0.931818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a91350056005fb98b63ecedb46296629b20cd5bd
| 78
|
py
|
Python
|
boilerplate_app/tasks.py
|
taher-systango/DjangoUnboxed
|
808ab771a44564458b897b6ec854c08f43cccf2a
|
[
"MIT"
] | 68
|
2018-05-04T13:00:59.000Z
|
2022-03-25T09:28:28.000Z
|
boilerplate_app/tasks.py
|
taher-systango/DjangoUnboxed
|
808ab771a44564458b897b6ec854c08f43cccf2a
|
[
"MIT"
] | 38
|
2020-01-06T07:39:20.000Z
|
2022-01-07T07:49:38.000Z
|
qzzzme_app/tasks.py
|
aboudzein/Qzzz.me-API
|
b5ee8e63fb7cf58d26fb5b6e4c9f22c04e90df08
|
[
"MIT"
] | 27
|
2018-10-17T17:35:42.000Z
|
2022-03-25T09:28:33.000Z
|
from celery import shared_task
@shared_task
def add(a, b):
return (a+b)
| 11.142857
| 30
| 0.692308
| 14
| 78
| 3.714286
| 0.714286
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205128
| 78
| 6
| 31
| 13
| 0.83871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
a95a0c77f19b52bb300b0ca1aec9676982976a6f
| 32
|
py
|
Python
|
training/soundnet/net/__init__.py
|
chigur/pose
|
3e8ecebbc24ea59a1cb217b15a9b2a1a1de09085
|
[
"MIT"
] | null | null | null |
training/soundnet/net/__init__.py
|
chigur/pose
|
3e8ecebbc24ea59a1cb217b15a9b2a1a1de09085
|
[
"MIT"
] | null | null | null |
training/soundnet/net/__init__.py
|
chigur/pose
|
3e8ecebbc24ea59a1cb217b15a9b2a1a1de09085
|
[
"MIT"
] | null | null | null |
from . import Module as SoundNet
| 32
| 32
| 0.8125
| 5
| 32
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 32
| 1
| 32
| 32
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8d3641a7376677a811a55b8ad68c4bb3cf2acfea
| 184
|
py
|
Python
|
keepachangelog/__init__.py
|
Colin-b/keepachangelog
|
128a920183ace5bb90e977b35171d81a76666da0
|
[
"MIT"
] | 20
|
2020-02-19T20:22:06.000Z
|
2022-01-28T22:20:37.000Z
|
keepachangelog/__init__.py
|
Colin-b/keepachangelog
|
128a920183ace5bb90e977b35171d81a76666da0
|
[
"MIT"
] | 29
|
2020-02-19T20:27:39.000Z
|
2022-02-05T17:26:14.000Z
|
keepachangelog/__init__.py
|
Colin-b/keepachangelog
|
128a920183ace5bb90e977b35171d81a76666da0
|
[
"MIT"
] | 6
|
2020-02-24T16:37:37.000Z
|
2022-01-28T22:38:39.000Z
|
from keepachangelog.version import __version__
from keepachangelog._changelog import to_dict, to_raw_dict, release, from_dict
from keepachangelog._versioning import to_sorted_semantic
| 46
| 78
| 0.88587
| 24
| 184
| 6.291667
| 0.5
| 0.357616
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081522
| 184
| 3
| 79
| 61.333333
| 0.893491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8d6672ece9c205b02681f6c29c29050bf9bd925a
| 32
|
py
|
Python
|
bazaar_bundle/__init__.py
|
applauncher-team/bazaar_bundle
|
95f51d418ab3563f150268a638717e409c80485f
|
[
"Apache-2.0"
] | null | null | null |
bazaar_bundle/__init__.py
|
applauncher-team/bazaar_bundle
|
95f51d418ab3563f150268a638717e409c80485f
|
[
"Apache-2.0"
] | null | null | null |
bazaar_bundle/__init__.py
|
applauncher-team/bazaar_bundle
|
95f51d418ab3563f150268a638717e409c80485f
|
[
"Apache-2.0"
] | null | null | null |
from .bundle import BazaarBundle
| 32
| 32
| 0.875
| 4
| 32
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 32
| 1
| 32
| 32
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8d6d03a390d4f46b1b1ea85902b04bdf49ff6729
| 3,383
|
py
|
Python
|
sources/praline/client/project/pipeline/stages/load_clang_format_test.py
|
dansandu/praline
|
f1e87c8048787480262b330e6cc6d92d473eb50c
|
[
"MIT"
] | null | null | null |
sources/praline/client/project/pipeline/stages/load_clang_format_test.py
|
dansandu/praline
|
f1e87c8048787480262b330e6cc6d92d473eb50c
|
[
"MIT"
] | null | null | null |
sources/praline/client/project/pipeline/stages/load_clang_format_test.py
|
dansandu/praline
|
f1e87c8048787480262b330e6cc6d92d473eb50c
|
[
"MIT"
] | null | null | null |
from os.path import normpath
from praline.client.project.pipeline.stages.load_clang_format import clang_format_style_file_contents, ClangFormatConfigurationError, load_clang_format
from praline.common.testing.file_system_mock import FileSystemMock
from unittest import TestCase
class LoadClangFormatStageTest(TestCase):
def test_load_clang_format_stage_with_client_configuration(self):
normalized_executable_path = normpath('path/to/clang_format_executable')
normalized_style_file_path = normpath('my/project/.clang-format')
file_system = FileSystemMock({'path/to', 'my/project'}, {normalized_executable_path: b''})
resources = {'project_directory': 'my/project'}
configuration = {'clang-format-executable-path': normalized_executable_path}
load_clang_format(file_system, resources, None, None, configuration, None)
self.assertEqual(resources['clang_format_executable'], normalized_executable_path)
self.assertEqual(normpath(resources['clang_format_style_file']), normalized_style_file_path)
self.assertEqual(file_system.files[normalized_style_file_path].decode('utf-8'), clang_format_style_file_contents)
def test_load_clang_format_stage_with_file_configuration(self):
normalized_executable_path = normpath('path/to/clang_format_executable')
normalized_style_file_path = normpath('my/project/.clang-format')
file_system = FileSystemMock({'path/to', 'my/project'}, {normalized_executable_path: b''}, on_which=lambda t: normalized_executable_path if t == 'clang-format' else None)
resources = {'project_directory': 'my/project'}
configuration = {}
load_clang_format(file_system, resources, None, None, configuration, None)
self.assertEqual(resources['clang_format_executable'], normalized_executable_path)
self.assertEqual(normpath(resources['clang_format_style_file']), normalized_style_file_path)
self.assertEqual(file_system.files[normalized_style_file_path].decode('utf-8'), clang_format_style_file_contents)
def test_load_clang_format_stage_with_user_supplied_style_file(self):
normalized_executable_path = normpath('path/to/clang_format_executable')
normalized_style_file_path = normpath('my/project/.clang-format')
file_system = FileSystemMock({'path/to', 'my/project'}, {normalized_executable_path: b'', normalized_style_file_path: b'IndentWidth: 8'})
resources = {'project_directory': 'my/project'}
configuration = {'clang-format-executable-path': normalized_executable_path}
load_clang_format(file_system, resources, None, None, configuration, None)
self.assertEqual(normpath(resources['clang_format_executable']), normalized_executable_path)
self.assertEqual(normpath(resources['clang_format_style_file']), normalized_style_file_path)
self.assertEqual(file_system.files[normalized_style_file_path], b'IndentWidth: 8')
def test_load_clang_format_stage_with_no_configuration(self):
file_system = FileSystemMock({'my/project'})
resources = {
'project_directory': 'my/project'
}
configuration = {}
self.assertRaises(ClangFormatConfigurationError, load_clang_format, file_system, resources, None, None, configuration, None)
| 52.046154
| 180
| 0.751108
| 385
| 3,383
| 6.218182
| 0.145455
| 0.128655
| 0.120301
| 0.096074
| 0.817878
| 0.806182
| 0.763576
| 0.71721
| 0.71721
| 0.71721
| 0
| 0.001392
| 0.150754
| 3,383
| 64
| 181
| 52.859375
| 0.831883
| 0
| 0
| 0.547619
| 0
| 0
| 0.170854
| 0.106119
| 0
| 0
| 0
| 0
| 0.238095
| 1
| 0.095238
| false
| 0
| 0.095238
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8d978746fa090aa2999b7df714ff2af3e4749942
| 6,434
|
py
|
Python
|
tests/test_login.py
|
Kcode17/CS684_ST_QA
|
6d7aef29744f51ff436fbf3081c2dac30addf9bf
|
[
"MIT"
] | null | null | null |
tests/test_login.py
|
Kcode17/CS684_ST_QA
|
6d7aef29744f51ff436fbf3081c2dac30addf9bf
|
[
"MIT"
] | 1
|
2022-03-22T00:55:37.000Z
|
2022-03-22T00:55:37.000Z
|
tests/test_login.py
|
Kcode17/CS684_ST_QA
|
6d7aef29744f51ff436fbf3081c2dac30addf9bf
|
[
"MIT"
] | 1
|
2022-03-22T21:44:36.000Z
|
2022-03-22T21:44:36.000Z
|
from multiprocessing.connection import wait
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.common.by import By
import unittest
import pytest
class TestSample():
@pytest.fixture()
def test_setup(self):
global driver
ser = Service("C:/Program Files (x86)/chromedriver.exe")
op = webdriver.ChromeOptions()
driver = webdriver.Chrome(service=ser, options=op)
driver.maximize_window()
driver.implicitly_wait(10)
yield
driver.close()
driver.quit()
def test_Register(self, test_setup):
driver.get("http://127.0.0.1:3000")
register_Button = driver.find_element(by=By.LINK_TEXT, value="Register")
register_Button.click()
driver.find_element(by=By.NAME, value="name").send_keys("Krishna1_Abc")
driver.find_element(by=By.NAME, value="email").send_keys("krishna12@gmail.com")
driver.find_element(by=By.NAME, value="password").send_keys("Sri@12345")
driver.find_element(by=By.NAME, value="password2").send_keys("Sri@12345")
register_link = driver.find_element(by=By.ID, value="register_button")
register_link.click()
bodyText = driver.find_element(by=By.ID, value="success_msg").text
assert bodyText == "You have now registered!"
def test_user_login(self, test_setup):
driver.get("http://127.0.0.1:3000")
login_button = driver.find_element(by=By.LINK_TEXT, value="Log In")
login_button.click()
driver.find_element(by=By.NAME, value="email").send_keys("krishna12@gmail.com")
driver.find_element(by=By.NAME, value="password").send_keys("Sri@12345")
login_link = driver.find_element(by=By.ID, value="Login_Button")
login_link.click()
bodyText = driver.find_element(by=By.ID, value="dashboard").text
assert bodyText == "My Dashboard"
def test_user_search_basic(self, test_setup):
driver.get("http://127.0.0.1:3000")
login_button = driver.find_element(by=By.LINK_TEXT, value="Log In")
login_button.click()
driver.find_element(by=By.NAME, value="email").send_keys("krishna12@gmail.com")
driver.find_element(by=By.NAME, value="password").send_keys("Sri@12345")
login_link = driver.find_element(by=By.ID, value="Login_Button")
login_link.click()
#Apple Facebook Nintendo
searchTerm = "nintendo"
driver.find_element(by=By.ID, value="search_bar").send_keys(searchTerm)
search_link = driver.find_element(by=By.ID, value="search_button")
search_link.click()
#articlesText = driver.find_element(by=By.CLASS_NAME, value="news").text
articlesText = driver.page_source
count = articlesText.count(searchTerm)
assert count > 10
def test_user_search_AND(self, test_setup):
driver.get("http://127.0.0.1:3000")
login_button = driver.find_element(by=By.LINK_TEXT, value="Log In")
login_button.click()
driver.find_element(by=By.NAME, value="email").send_keys("krishna12@gmail.com")
driver.find_element(by=By.NAME, value="password").send_keys("Sri@12345")
login_link = driver.find_element(by=By.ID, value="Login_Button")
login_link.click()
#Porsche AND Audi
searchTerm1 = "Porsche"
searchTerm2 = "Audi"
searchTerm = searchTerm1 + " AND " + searchTerm2
driver.find_element(by=By.ID, value="search_bar").send_keys(searchTerm)
search_link = driver.find_element(by=By.ID, value="search_button")
search_link.click()
#articlesText = driver.find_element(by=By.CLASS_NAME, value="news").text
articlesText = driver.page_source
count1 = articlesText.count(searchTerm1)
count2 = articlesText.count(searchTerm2)
assert (count1 > 10) & (count2 > 10)
def test_user_search_OR(self, test_setup):
driver.get("http://127.0.0.1:3000")
login_button = driver.find_element(by=By.LINK_TEXT, value="Log In")
login_button.click()
driver.find_element(by=By.NAME, value="email").send_keys("krishna12@gmail.com")
driver.find_element(by=By.NAME, value="password").send_keys("Sri@12345")
login_link = driver.find_element(by=By.ID, value="Login_Button")
login_link.click()
#ethereum OR litecoin
searchTerm1 = "Audi"
searchTerm2 = "Volvo"
searchTerm = searchTerm1 + " OR " + searchTerm2
driver.find_element(by=By.ID, value="search_bar").send_keys(searchTerm)
search_link = driver.find_element(by=By.ID, value="search_button")
search_link.click()
#articlesText = driver.find_element(by=By.CLASS_NAME, value="news").text
articlesText = driver.page_source
count1 = articlesText.count(searchTerm1)
count2 = articlesText.count(searchTerm2)
assert count1 > 10 | count2 > 10
def test_user_search_Complex(self, test_setup):
driver.get("http://127.0.0.1:3000")
login_button = driver.find_element(by=By.LINK_TEXT, value="Log In")
login_button.click()
driver.find_element(by=By.NAME, value="email").send_keys("krishna12@gmail.com")
driver.find_element(by=By.NAME, value="password").send_keys("Sri@12345")
login_link = driver.find_element(by=By.ID, value="Login_Button")
login_link.click()
# Cars AND (Tesla OR Rivian) NOT Volvo
searchTerm1 = "Tesla"
searchTerm2 = "Rivian"
searchTerm3 = "Volvo"
searchTerm = "Cars" + " AND " + "(" + searchTerm1 + " OR " + searchTerm2 + ")" + " NOT " + searchTerm3
driver.find_element(by=By.ID, value="search_bar").send_keys(searchTerm)
search_link = driver.find_element(by=By.ID, value="search_button")
search_link.click()
#articlesText = driver.find_element(by=By.CLASS_NAME, value="news").text
articlesText = driver.page_source
count1 = articlesText.count(searchTerm1)
count2 = articlesText.count(searchTerm2)
count3 = articlesText.count(searchTerm3)
countcars = articlesText.count("Cars")
assert (countcars > 10 & (count1 > 10 | count2 >10 ) & count3 == 0)
| 36.977011
| 112
| 0.654803
| 817
| 6,434
| 4.980416
| 0.138311
| 0.098304
| 0.167117
| 0.186778
| 0.71246
| 0.707791
| 0.707791
| 0.700418
| 0.692553
| 0.672647
| 0
| 0.032706
| 0.215884
| 6,434
| 174
| 113
| 36.977011
| 0.773835
| 0.059372
| 0
| 0.526316
| 0
| 0
| 0.129529
| 0.003639
| 0.008772
| 0
| 0
| 0
| 0.052632
| 1
| 0.061404
| false
| 0.061404
| 0.070175
| 0
| 0.140351
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
a5c5ccec5c23dc9cff1835166c17c5b4af92a340
| 596
|
py
|
Python
|
bip_utils/monero/mnemonic/__init__.py
|
MIPPLTeam/bip_utils
|
c66446e7ac3879d2cf6308c5b8eb7f7705292660
|
[
"MIT"
] | 149
|
2020-05-15T08:11:43.000Z
|
2022-03-29T16:34:42.000Z
|
bip_utils/monero/mnemonic/__init__.py
|
MIPPLTeam/bip_utils
|
c66446e7ac3879d2cf6308c5b8eb7f7705292660
|
[
"MIT"
] | 41
|
2020-04-03T15:57:56.000Z
|
2022-03-31T08:25:11.000Z
|
bip_utils/monero/mnemonic/__init__.py
|
MIPPLTeam/bip_utils
|
c66446e7ac3879d2cf6308c5b8eb7f7705292660
|
[
"MIT"
] | 55
|
2020-04-03T17:05:15.000Z
|
2022-03-24T12:43:42.000Z
|
from bip_utils.monero.mnemonic.monero_mnemonic_ex import MoneroChecksumError
from bip_utils.monero.mnemonic.monero_mnemonic import (
MoneroLanguages, MoneroWordsNum, MoneroMnemonic, MoneroMnemonicDecoder, MoneroMnemonicEncoder
)
from bip_utils.monero.mnemonic.monero_entropy_generator import MoneroEntropyBitLen, MoneroEntropyGenerator
from bip_utils.monero.mnemonic.monero_mnemonic_generator import MoneroMnemonicGenerator
from bip_utils.monero.mnemonic.monero_mnemonic_validator import MoneroMnemonicValidator
from bip_utils.monero.mnemonic.monero_seed_generator import MoneroSeedGenerator
| 66.222222
| 106
| 0.899329
| 64
| 596
| 8.109375
| 0.34375
| 0.26975
| 0.138728
| 0.208092
| 0.431599
| 0.431599
| 0.308285
| 0
| 0
| 0
| 0
| 0
| 0.058725
| 596
| 8
| 107
| 74.5
| 0.925134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9391b2c4b9d13629fd20d67f6b5fae6b2b856187
| 1,125
|
py
|
Python
|
day6.py
|
kdrag0n/aoc2021
|
469bd861a7d7c0add14412a705ec4cb1e1b5a10f
|
[
"MIT"
] | 2
|
2021-12-04T21:15:14.000Z
|
2021-12-12T09:28:28.000Z
|
day6.py
|
kdrag0n/aoc2021
|
469bd861a7d7c0add14412a705ec4cb1e1b5a10f
|
[
"MIT"
] | null | null | null |
day6.py
|
kdrag0n/aoc2021
|
469bd861a7d7c0add14412a705ec4cb1e1b5a10f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
def ints(itr):
return [int(i) for i in itr]
with open(sys.argv[1], 'r') as f:
file_lines = [l for l in f.read().strip().split('\n')]
in_nums = [1,1,1,1,2,1,1,4,1,4,3,1,1,1,1,1,1,1,1,4,1,3,1,1,1,5,1,3,1,4,1,2,1,1,5,1,1,1,1,1,1,1,1,1,1,3,4,1,5,1,1,1,1,1,1,1,1,1,3,1,4,1,1,1,1,3,5,1,1,2,1,1,1,1,4,4,1,1,1,4,1,1,4,2,4,4,5,1,1,1,1,2,3,1,1,4,1,5,1,1,1,3,1,1,1,1,5,5,1,2,2,2,2,1,1,2,1,1,1,1,1,3,1,1,1,2,3,1,5,1,1,1,2,2,1,1,1,1,1,3,2,1,1,1,4,3,1,1,4,1,5,4,1,4,1,1,1,1,1,1,1,1,1,1,2,2,4,5,1,1,1,1,5,4,1,3,1,1,1,1,4,3,3,3,1,2,3,1,1,1,1,1,1,1,1,2,1,1,1,5,1,3,1,4,3,1,3,1,5,1,1,1,1,3,1,5,1,2,4,1,1,4,1,4,4,2,1,2,1,3,3,1,4,4,1,1,3,4,1,1,1,2,5,2,5,1,1,1,4,1,1,1,1,1,1,3,1,5,1,2,1,1,1,1,1,4,4,1,1,1,5,1,1,5,1,2,1,5,1,1,1,1,1,1,1,1,1,1,1,1,3,2,4,1,1,2,1,1,3,2]
#in_nums = [3,4,3,1,2]
total = 0
result = 0
other = 0
for day in range(80):
add_fs = []
for fi, count in enumerate(in_nums):
count -= 1
if count < 0:
count = 6
add_fs += [8]
in_nums[fi] = count
in_nums += add_fs
print('Day', day+1, ':', in_nums)
print(len(in_nums))
| 36.290323
| 611
| 0.517333
| 392
| 1,125
| 1.456633
| 0.127551
| 0.434326
| 0.441331
| 0.392294
| 0.469352
| 0.394046
| 0.294221
| 0.225919
| 0.185639
| 0.087566
| 0
| 0.326804
| 0.137778
| 1,125
| 30
| 612
| 37.5
| 0.261856
| 0.037333
| 0
| 0
| 0
| 0
| 0.006475
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0.05
| 0.15
| 0.1
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
93ae5aa950b3817fcf7f1d427b75ab1a305f61ee
| 138
|
py
|
Python
|
models/__init__.py
|
edubadges/django_ims_toolbox
|
b1c5a79e62eac951780e89924c098b00402b5591
|
[
"MIT"
] | 1
|
2019-01-20T23:00:40.000Z
|
2019-01-20T23:00:40.000Z
|
models/__init__.py
|
edubadges/django_ims_toolbox
|
b1c5a79e62eac951780e89924c098b00402b5591
|
[
"MIT"
] | 1
|
2018-12-19T06:51:07.000Z
|
2018-12-19T06:51:07.000Z
|
models/__init__.py
|
edubadges/django_ims_toolbox
|
b1c5a79e62eac951780e89924c098b00402b5591
|
[
"MIT"
] | 2
|
2018-12-22T20:19:13.000Z
|
2020-09-02T07:32:21.000Z
|
from ims.models.content import IMSArchive, CommonCartridge, ContentPackage
from ims.models.lti import LTIApp, LTITenant, LTIPrivacyLevels
| 46
| 74
| 0.855072
| 16
| 138
| 7.375
| 0.75
| 0.118644
| 0.220339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 138
| 2
| 75
| 69
| 0.936508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f548f901177c31226a92c992869f1941701ddc37
| 1,901
|
py
|
Python
|
tests/test_health.py
|
LandRegistry/feeder-utilities
|
580f2ba09a8aa62c52103e46c4c105210c05fcbb
|
[
"MIT"
] | null | null | null |
tests/test_health.py
|
LandRegistry/feeder-utilities
|
580f2ba09a8aa62c52103e46c4c105210c05fcbb
|
[
"MIT"
] | null | null | null |
tests/test_health.py
|
LandRegistry/feeder-utilities
|
580f2ba09a8aa62c52103e46c4c105210c05fcbb
|
[
"MIT"
] | 1
|
2021-04-11T05:25:09.000Z
|
2021-04-11T05:25:09.000Z
|
from unittest import TestCase
from unittest.mock import patch
from feeder_utilities import health
from amqp.exceptions import NotFound
class TestFeederHealth(TestCase):
@patch('feeder_utilities.health.rabbitmq')
def test_no_error_queue(self, mock_rabbit):
feeder_health = health.FeederHealth("none", "of", "this", "matters", "much")
mock_rabbit.get_queue_count.side_effect = [1, 1, NotFound()]
response = feeder_health.generate_health_msg()
self.assertEqual(response, {'app': 'none',
'error_queue_size': None,
'queue_size': 1,
'rpc_queue_size': 1,
'status': 'OK'})
@patch('feeder_utilities.health.rabbitmq')
def test_empty_error_queue(self, mock_rabbit):
feeder_health = health.FeederHealth("none", "of", "this", "matters", "much")
mock_rabbit.get_queue_count.side_effect = [1, 1, 0]
response = feeder_health.generate_health_msg()
self.assertEqual(response, {'app': 'none',
'error_queue_size': 0,
'queue_size': 1,
'rpc_queue_size': 1,
'status': 'OK'})
@patch('feeder_utilities.health.rabbitmq')
def test_not_empty_error_queue(self, mock_rabbit):
feeder_health = health.FeederHealth("none", "of", "this", "matters", "much")
mock_rabbit.get_queue_count.side_effect = [1, 1, 1]
response = feeder_health.generate_health_msg()
self.assertEqual(response, {'app': 'none',
'error_queue_size': 1,
'queue_size': 1,
'rpc_queue_size': 1,
'status': 'BAD'})
| 46.365854
| 84
| 0.537612
| 191
| 1,901
| 5.057592
| 0.240838
| 0.083851
| 0.072464
| 0.080745
| 0.825052
| 0.825052
| 0.825052
| 0.782609
| 0.752588
| 0.752588
| 0
| 0.012945
| 0.349816
| 1,901
| 40
| 85
| 47.525
| 0.768608
| 0
| 0
| 0.571429
| 0
| 0
| 0.170963
| 0.0505
| 0
| 0
| 0
| 0
| 0.085714
| 1
| 0.085714
| false
| 0
| 0.114286
| 0
| 0.228571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f5491c52e85785192ce8073f5dfa557a28d18941
| 24
|
py
|
Python
|
djangular_serve/__init__.py
|
forafekt/djangular-serve
|
356797dd6fe78db5da815f0d3ad3601c6ddf739c
|
[
"Unlicense",
"MIT"
] | null | null | null |
djangular_serve/__init__.py
|
forafekt/djangular-serve
|
356797dd6fe78db5da815f0d3ad3601c6ddf739c
|
[
"Unlicense",
"MIT"
] | null | null | null |
djangular_serve/__init__.py
|
forafekt/djangular-serve
|
356797dd6fe78db5da815f0d3ad3601c6ddf739c
|
[
"Unlicense",
"MIT"
] | null | null | null |
from .serve import main
| 12
| 23
| 0.791667
| 4
| 24
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f55a4808e587596d69a4cfc08c14b5fbcc808180
| 152
|
py
|
Python
|
clocwalk/__init__.py
|
ksiswhite/clocwalk
|
884b5c3efe61d005a003749bcf4bae079fac8e70
|
[
"Apache-2.0"
] | 11
|
2018-07-18T05:14:42.000Z
|
2019-05-14T01:11:07.000Z
|
clocwalk/__init__.py
|
ksiswhite/clocwalk
|
884b5c3efe61d005a003749bcf4bae079fac8e70
|
[
"Apache-2.0"
] | 5
|
2019-10-15T13:10:35.000Z
|
2020-03-06T05:36:00.000Z
|
clocwalk/__init__.py
|
ksiswhite/clocwalk
|
884b5c3efe61d005a003749bcf4bae079fac8e70
|
[
"Apache-2.0"
] | 7
|
2019-10-08T08:04:55.000Z
|
2021-04-02T05:32:02.000Z
|
# coding:utf-8
__version__ = '2.0.1'
from clocwalk.cli import ClocDetector
from clocwalk.cli import query_cve
__all_ = ['ClocDetector', 'query_cve']
| 16.888889
| 38
| 0.75
| 22
| 152
| 4.772727
| 0.681818
| 0.228571
| 0.285714
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.131579
| 152
| 8
| 39
| 19
| 0.765152
| 0.078947
| 0
| 0
| 0
| 0
| 0.188406
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f56f3085ebc8bab8c5b1d6488b6c77ec82e98d31
| 59,721
|
py
|
Python
|
qgs/toolbox/lyapunov.py
|
Climdyn/qgs
|
33d79b1fa360de22b7ae595c142dbe9b6a8fb53a
|
[
"MIT"
] | 25
|
2020-03-19T14:35:47.000Z
|
2022-03-17T06:56:12.000Z
|
qgs/toolbox/lyapunov.py
|
Climdyn/qgs
|
33d79b1fa360de22b7ae595c142dbe9b6a8fb53a
|
[
"MIT"
] | 9
|
2020-11-06T23:03:42.000Z
|
2021-09-28T08:05:44.000Z
|
qgs/toolbox/lyapunov.py
|
Climdyn/qgs
|
33d79b1fa360de22b7ae595c142dbe9b6a8fb53a
|
[
"MIT"
] | 7
|
2020-11-30T03:23:14.000Z
|
2022-01-25T04:36:45.000Z
|
"""
Lyapunov module
=================
Module with the classes of multi-thread the computation of the various
`Lyapunov vectors`_ and `exponents`_. Integrate using the `Runge-Kutta method`_
defined in the :mod:`~.integrators.integrate` module.
See :cite:`lyap-KP2012` for more details on the Lyapunov vectors theoretical framework.
Module classes
--------------
* :class:`LyapunovsEstimator` to estimate the Backward and Forward Lyapunov Vectors (BLVs and FLVs) along a trajectory
* :class:`CovariantLyapunovsEstimator` to estimate the Covariant Lyapunov Vectors (CLVs) along a trajectory
.. _Lyapunov vectors: https://en.wikipedia.org/wiki/Lyapunov_vector
.. _exponents: https://en.wikipedia.org/wiki/Lyapunov_exponent
.. _Runge-Kutta method: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
.. _Numba: https://numba.pydata.org/
References
----------
.. bibliography:: ../model/ref.bib
:labelprefix: LYAP-
:keyprefix: lyap-
"""
from numba import njit
import numpy as np
import qgs.integrators.integrate as integrate
from qgs.functions.util import normalize_matrix_columns, solve_triangular_matrix, reverse
import multiprocessing
class LyapunovsEstimator(object):
"""Class to compute the Forward and Backward `Lyapunov vectors`_ and `exponents`_ along a trajectory of a dynamical system
.. math:: \\dot{\\boldsymbol{x}} = \\boldsymbol{f}(t, \\boldsymbol{x})
with a set of :class:`LyapProcess` and a specified `Runge-Kutta method`_.
The tangent linear model must also be provided. I.e. one must provide the linearized ODEs
.. math :: \\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}
where :math:`\\boldsymbol{\\mathrm{J}} = \\frac{\\partial \\boldsymbol{f}}{\\partial \\boldsymbol{x}}` is the
Jacobian matrix of :math:`\\boldsymbol{f}`.
The method used to compute the Lyapunov vectors is the one introduced by
Benettin et al. :cite:`lyap-BGGS1980`.
Parameters
----------
num_threads: None or int, optional
Number of :class:`LyapProcess` workers (threads) to use. If `None`, use the number of machine's
cores available. Default to `None`.
b: None or ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
c: None or ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
a: None or ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
number_of_dimensions: None or int, optional
Allow to hardcode the dynamical system dimension. If `None`, evaluate the dimension from the
callable :attr:`func`. Default to `None`.
Attributes
----------
num_threads: int
Number of :class:`LyapProcess` workers (threads) to use.
b: ~numpy.ndarray
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
n_dim: int
Dynamical system dimension.
n_vec: int
The number of Lyapunov vectors to compute.
n_traj: int
The number of trajectories (initial conditions) computed at the last estimation
performed by the estimator.
n_records: int
The number of saved states of the last estimation performed by the estimator.
ic: ~numpy.ndarray
Store the estimator initial conditions.
func: callable
Last function :math:`\\boldsymbol{f}` used by the estimator.
func_jac: callable
Last Jacobian matrix function :math:`\\boldsymbol{J}` used by the estimator.
"""
def __init__(self, num_threads=None, b=None, c=None, a=None, number_of_dimensions=None):
if num_threads is None:
self.num_threads = multiprocessing.cpu_count()
else:
self.num_threads = num_threads
# Default is RK4
if a is None and b is None and c is None:
self.c = np.array([0., 0.5, 0.5, 1.])
self.b = np.array([1./6, 1./3, 1./3, 1./6])
self.a = np.zeros((len(self.c), len(self.b)))
self.a[1, 0] = 0.5
self.a[2, 1] = 0.5
self.a[3, 2] = 1.
else:
self.a = a
self.b = b
self.c = c
self.ic = None
self._time = None
self._pretime = None
self._recorded_traj = None
self._recorded_exp = None
self._recorded_vec = None
self.n_traj = 0
self.n_dim = number_of_dimensions
self.n_records = 0
self.n_vec = 0
self.write_steps = 0
self._adjoint = False
self._forward = -1
self._inverse = 1.
self.func = None
self.func_jac = None
self._ics_queue = None
self._lyap_queue = None
self._processes_list = list()
def terminate(self):
"""Stop the workers (threads) and release the resources of the estimator."""
for process in self._processes_list:
process.terminate()
process.join()
def start(self):
"""Start or restart the workers (threads) of the estimator.
Warnings
--------
If the estimator was not previously terminated, it will be terminated first in the case
of a restart.
"""
self.terminate()
self._processes_list = list()
self._ics_queue = multiprocessing.JoinableQueue()
self._lyap_queue = multiprocessing.Queue()
for i in range(self.num_threads):
self._processes_list.append(LyapProcess(i, self.func, self.func_jac, self.b, self.c, self.a,
self._ics_queue, self._lyap_queue))
for process in self._processes_list:
process.daemon = True
process.start()
def set_bca(self, b=None, c=None, a=None, ic_init=True):
"""Set the coefficients of the `Runge-Kutta method`_ and restart the estimator.
.. _Runge-Kutta method: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
Parameters
----------
b: None or ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
c: None or ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
a: None or ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
ic_init: bool, optional
Re-initialize or not the initial conditions of the estimator. Default to `True`.
"""
if a is not None:
self.a = a
if b is not None:
self.b = b
if c is not None:
self.c = c
if ic_init:
self.ic = None
self.start()
def set_func(self, f, fjac):
"""Set the `Numba`_-jitted function :math:`\\boldsymbol{f}` and Jacobian matrix function
:math:`\\boldsymbol{\\mathrm{J}}` to integrate.
.. _Numba: https://numba.pydata.org/
Parameters
----------
f: callable
The `Numba`_-jitted function :math:`\\boldsymbol{f}`.
Should have the signature ``f(t, x)`` where ``x`` is the state value and ``t`` is the time.
fjac: callable
The `Numba`_-jitted Jacobian matrix function :math:`\\boldsymbol{J}`.
Should have the signature ``J(t, x)`` where ``x`` is the state value and ``t`` is the time.
Warnings
--------
This function restarts the estimator!
"""
self.func = f
self.func_jac = fjac
self.start()
def compute_lyapunovs(self, t0, tw, t, dt, mdt, ic=None, write_steps=1, n_vec=None, forward=False, adjoint=False,
inverse=False):
"""Estimate the Lyapunov vectors using the Benettin algorithm along a given trajectory, always integrating the said trajectory
forward in time from `ic` at `t0` to time `t`.
The result of the estimation can be obtained afterward by calling :meth:`get_lyapunovs`.
If `forward` is `True`, it yields the Forward Lyapunov Vectors (FLVs) between `t0` and `tw`, otherwise, returns the Backward
Lyapunov Vectors (BLVs) between `tw` and `t`.
Parameters
----------
t0: float
Initial time of the time integration. Corresponds to the initial condition's `ic` time.
tw: float
Time at which the algorithm start to store the Lyapunov vectors. Define thus also the transient before the which the Lyapunov
vectors are considered as having not yet converged. Must be between `t0` and `t`.
t: float
Final time of the time integration. Corresponds to the final condition.
dt: float
Timestep of the integration.
mdt: float
Micro-timestep to integrate the tangent linear equation between the nonlinear system `dt` timesteps. Should be smaller or equal to `dt`.
ic: None or ~numpy.ndarray(float), optional
Initial conditions of the system. Can be a 1D or a 2D array:
* 1D: Provide a single initial condition.
Should be of shape (`n_dim`,) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`.
* 2D: Provide an ensemble of initial condition.
Should be of shape (`n_traj`, `n_dim`) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`,
and where `n_traj` is the number of initial conditions.
If `None`, use the initial conditions stored in :attr:`ic`.
If then :attr:`ic` is `None`, use a zero initial condition.
Default to `None`.
forward: bool, optional
If `True`, yield the `Forward Lyapunov Vectors` (FLVs) between `t0` and `tw`.
If `False`, yield the `Backward Lyapunov Vectors` (BLVs) between `tw` and `t`.
Default to `False`, i.e. Backward Lyapunov Vectors estimation.
adjoint: bool, optional
If true, integrate the tangent :math:`\\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}` ,
else, integrate the adjoint linear model :math:`\\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}^T(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}`.
Integrate the tangent model by default.
inverse: bool, optional
Whether or not to invert the Jacobian matrix
:math:`\\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\rightarrow \\boldsymbol{\\mathrm{J}}^{-1}(t, \\boldsymbol{x})`.
`False` by default.
write_steps: int, optional
Save the state of the integration in memory every `write_steps` steps. The other intermediary
steps are lost. It determines the size of the returned objects. Default is 1.
Set to 0 to return only the final state.
n_vec: int, optional
The number of Lyapunov vectors to compute. Should be smaller or equal to :attr:`n_dim`.
"""
if self.func is None or self.func_jac is None:
print('No function to integrate defined!')
return 0
if ic is None:
i = 1
while True:
self.ic = np.zeros(i)
try:
x = self.func(0., self.ic)
except:
i += 1
else:
break
i = len(self.func(0., self.ic))
self.ic = np.zeros(i)
else:
self.ic = ic
if len(self.ic.shape) == 1:
self.ic = self.ic.reshape((1, -1))
self.n_traj = self.ic.shape[0]
self.n_dim = self.ic.shape[1]
if n_vec is not None:
self.n_vec = n_vec
else:
self.n_vec = self.n_dim
self._pretime = np.concatenate((np.arange(t0, tw, dt), np.full((1,), tw)))
self._time = np.concatenate((np.arange(tw, t, dt), np.full((1,), t)))
self.write_steps = write_steps
if forward:
self._forward = 1
else:
self._forward = -1
self._adjoint = adjoint
self._inverse = 1.
if inverse:
self._inverse *= -1.
if write_steps == 0:
self.n_records = 1
else:
if not forward:
tot = self._time[::self.write_steps]
self.n_records = len(tot)
if tot[-1] != self._time[-1]:
self.n_records += 1
else:
tot = self._pretime[::self.write_steps]
self.n_records = len(tot)
if tot[-1] != self._pretime[-1]:
self.n_records += 1
self._recorded_traj = np.zeros((self.n_traj, self.n_dim, self.n_records))
self._recorded_vec = np.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
self._recorded_exp = np.zeros((self.n_traj, self.n_vec, self.n_records))
for i in range(self.n_traj):
self._ics_queue.put((i, self._pretime, self._time, mdt, self.ic[i], self.n_vec, self.write_steps,
self._forward, self._adjoint, self._inverse))
self._ics_queue.join()
for i in range(self.n_traj):
args = self._lyap_queue.get()
self._recorded_traj[args[0]] = args[1]
self._recorded_exp[args[0]] = args[2]
self._recorded_vec[args[0]] = args[3]
def get_lyapunovs(self):
"""Returns the result of the previous Lyapunov vectors estimation.
Returns
-------
time, traj, exponents, vectors: ~numpy.ndarray
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D array of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D array of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the input initial conditions, it is maximum a 4D array of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is squeezed.
"""
if self._forward == -1:
tt = self._time
else:
tt = self._pretime
if self.write_steps > 0:
if tt[::self.write_steps][-1] == tt[-1]:
return tt[::self.write_steps], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_vec)
else:
return np.concatenate((tt[::self.write_steps], np.full((1,), tt[-1]))), \
np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), np.squeeze(self._recorded_vec)
else:
return tt[-1], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_vec)
class LyapProcess(multiprocessing.Process):
""":class:`LyapunovsEstimator`'s workers class. Allows to multi-thread Lyapunov vectors estimation.
Parameters
----------
processID: int
Number identifying the worker.
func: callable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: callable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
ics_queue: multiprocessing.JoinableQueue
Queue to which the worker ask for initial conditions and parameters input.
lyap_queue: multiprocessing.Queue
Queue to which the worker returns the estimation results.
Attributes
----------
processID: int
Number identifying the worker.
func: callable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: callable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~numpy.ndarray
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
"""
def __init__(self, processID, func, func_jac, b, c, a, ics_queue, lyap_queue):
super().__init__()
self.processID = processID
self._ics_queue = ics_queue
self._lyap_queue = lyap_queue
self.func = func
self.func_jac = func_jac
self.a = a
self.b = b
self.c = c
def run(self):
"""Main worker computing routine. Perform the estimation with the fetched initial conditions and parameters."""
while True:
args = self._ics_queue.get()
if args[7] == -1:
recorded_traj, recorded_exp, recorded_vec = _compute_backward_lyap_jit(self.func, self.func_jac,
args[1], args[2], args[3],
args[4][np.newaxis, :], args[5],
args[6], args[8], args[9],
self.b, self.c, self.a)
else:
recorded_traj, recorded_exp, recorded_vec = _compute_forward_lyap_jit(self.func, self.func_jac,
args[1], args[2], args[3],
args[4][np.newaxis, :], args[5],
args[6], args[8], args[9],
self.b, self.c, self.a)
self._lyap_queue.put((args[0], np.squeeze(recorded_traj), np.squeeze(recorded_exp),
np.squeeze(recorded_vec)))
self._ics_queue.task_done()
@njit
def _compute_forward_lyap_jit(f, fjac, time, posttime, mdt, ic, n_vec, write_steps, adjoint, inverse, b, c, a):
ttraj = integrate._integrate_runge_kutta_jit(f, np.concatenate((time[:-1], posttime)), ic, 1, 1, b, c, a)
recorded_traj, recorded_exp, recorded_vec = _compute_forward_lyap_traj_jit(f, fjac, time, posttime, ttraj, mdt,
n_vec, write_steps, adjoint, inverse, b, c, a)
return recorded_traj, recorded_exp, recorded_vec
@njit
def _compute_forward_lyap_traj_jit(f, fjac, time, posttime, ttraj, mdt, n_vec, write_steps, adjoint, inverse, b, c, a):
traj = ttraj[:, :, :len(time)]
posttraj = ttraj[:, :, len(time)-1:]
n_traj = ttraj.shape[0]
n_dim = ttraj.shape[1]
Id = np.zeros((1, n_dim, n_dim))
Id[0] = np.eye(n_dim)
if write_steps == 0:
n_records = 1
else:
tot = time[::write_steps]
n_records = len(tot)
if tot[-1] != time[-1]:
n_records += 1
recorded_vec = np.zeros((n_traj, n_dim, n_vec, n_records))
recorded_traj = np.zeros((n_traj, n_dim, n_records))
recorded_exp = np.zeros((n_traj, n_vec, n_records))
rposttime = reverse(posttime)
rtime = reverse(time)
for i_traj in range(n_traj):
y = np.zeros((1, n_dim))
qr = np.linalg.qr(np.random.random((n_dim, n_vec)))
q = qr[0]
m_exp = np.zeros((n_dim))
for ti, (tt, dt) in enumerate(zip(rposttime[:-1], np.diff(rposttime))):
y[0] = posttraj[i_traj, :, -1-ti]
subtime = np.concatenate((np.arange(tt + dt, tt, mdt), np.full((1,), tt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, -1, 0, b, c, a,
adjoint, inverse, integrate._zeros_func)
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
r = qr[1]
iw = -1
for ti, (tt, dt) in enumerate(zip(rtime[:-1], np.diff(rtime))):
y[0] = traj[i_traj, :, -1-ti]
m_exp = np.log(np.abs(np.diag(r)))/dt
if write_steps > 0 and np.mod(ti, write_steps) == 0:
recorded_exp[i_traj, :, iw] = m_exp
recorded_traj[i_traj, :, iw] = y[0]
recorded_vec[i_traj, :, :, iw] = q
iw -= 1
subtime = np.concatenate((np.arange(tt + dt, tt, mdt), np.full((1,), tt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, -1, 0, b, c, a,
adjoint, inverse, integrate._zeros_func)
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
r = qr[1]
recorded_exp[i_traj, :, 0] = m_exp
recorded_traj[i_traj, :, 0] = y[0]
recorded_vec[i_traj, :, :, 0] = q
return recorded_traj, recorded_exp, recorded_vec
@njit
def _compute_backward_lyap_jit(f, fjac, pretime, time, mdt, ic, n_vec, write_steps, adjoint, inverse, b, c, a):
ttraj = integrate._integrate_runge_kutta_jit(f, np.concatenate((pretime[:-1], time)), ic, 1, 1, b, c, a)
recorded_traj, recorded_exp, recorded_vec = _compute_backward_lyap_traj_jit(f, fjac, pretime, time, ttraj, mdt,
n_vec, write_steps, adjoint, inverse, b, c, a)
return recorded_traj, recorded_exp, recorded_vec
@njit
def _compute_backward_lyap_traj_jit(f, fjac, pretime, time, ttraj, mdt, n_vec, write_steps, adjoint, inverse, b, c, a):
pretraj = ttraj[:, :, :len(pretime)]
traj = ttraj[:, :, (len(pretime)-1):]
n_traj = ttraj.shape[0]
n_dim = ttraj.shape[1]
Id = np.zeros((1, n_dim, n_dim))
Id[0] = np.eye(n_dim)
if write_steps == 0:
n_records = 1
else:
tot = time[::write_steps]
n_records = len(tot)
if tot[-1] != time[-1]:
n_records += 1
recorded_vec = np.zeros((n_traj, n_dim, n_vec, n_records))
recorded_traj = np.zeros((n_traj, n_dim, n_records))
recorded_exp = np.zeros((n_traj, n_vec, n_records))
for i_traj in range(n_traj):
y = np.zeros((1, n_dim))
y[0] = pretraj[i_traj, :, 0]
qr = np.linalg.qr(np.random.random((n_dim, n_vec)))
q = qr[0]
m_exp = np.zeros((n_dim))
for ti, (tt, dt) in enumerate(zip(pretime[:-1], np.diff(pretime))):
subtime = np.concatenate((np.arange(tt, tt + dt, mdt), np.full((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
adjoint, inverse, integrate._zeros_func)
y[0] = pretraj[i_traj, :, ti+1]
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
r = qr[1]
iw = 0
for ti, (tt, dt) in enumerate(zip(time[:-1], np.diff(time))):
m_exp = np.log(np.abs(np.diag(r)))/dt
if write_steps > 0 and np.mod(ti, write_steps) == 0:
recorded_exp[i_traj, :, iw] = m_exp
recorded_traj[i_traj, :, iw] = y[0]
recorded_vec[i_traj, :, :, iw] = q
iw += 1
subtime = np.concatenate((np.arange(tt, tt + dt, mdt), np.full((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
adjoint, inverse, integrate._zeros_func)
y[0] = traj[i_traj, :, ti+1]
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
r = qr[1]
recorded_exp[i_traj, :, -1] = m_exp
recorded_traj[i_traj, :, -1] = y[0]
recorded_vec[i_traj, :, :, -1] = q
return recorded_traj, recorded_exp, recorded_vec
class CovariantLyapunovsEstimator(object):
"""Class to compute the Covariant `Lyapunov vectors`_ (CLVs) and `exponents`_ along a trajectory of a dynamical system
.. math:: \\dot{\\boldsymbol{x}} = \\boldsymbol{f}(t, \\boldsymbol{x})
with a set of :class:`LyapProcess` and a specified `Runge-Kutta method`_.
The tangent linear model must also be provided. I.e. one must provide the linearized ODEs
.. math :: \\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}
where :math:`\\boldsymbol{\\mathrm{J}} = \\frac{\\partial \\boldsymbol{f}}{\\partial \\boldsymbol{x}}` is the
Jacobian matrix of :math:`\\boldsymbol{f}`.
Parameters
----------
num_threads: None or int, optional
Number of :class:`LyapProcess` workers (threads) to use. If `None`, use the number of machine's
cores available. Default to `None`.
b: None or ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
c: None or ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
a: None or ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
number_of_dimensions: None or int, optional
Allow to hardcode the dynamical system dimension. If `None`, evaluate the dimension from the
callable :attr:`func`. Default to `None`.
method: int, optional
Allow to select the method used to compute the CLVs. Presently can be `0` or `1`:
* `0`: Uses the method of Ginelli et al. :cite:`lyap-GPTCLP2007`. Suitable for a trajectory not too long (depends on the memory available).
* `1`: Uses the method of the intersection of the subspace spanned by the BLVs and FLVs described in :cite:`lyap-ER1985` and :cite:`lyap-KP2012`
(see also :cite:`lyap-DPV2021`, Appendix A). Suitable for longer trajectories (uses less memory).
Default to `0`, i.e. Ginelli et al. algorithm.
noise_pert: float, optional
Noise perturbation amplitude parameter of the diagonal of the R matrix in the QR decomposition during the Ginelli step. Mainly done to avoid ill-conditioned matrices
near tangencies (see :cite:`lyap-KP2012`). Default to 0 (no perturbation).
Only apply if using the Ginelli et al. algorithm, i.e. if ``method=0``.
Attributes
----------
num_threads: int
Number of :class:`LyapProcess` workers (threads) to use.
b: ~numpy.ndarray
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
n_dim: int
Dynamical system dimension.
n_vec: int
The number of Lyapunov vectors to compute.
n_traj: int
The number of trajectories (initial conditions) computed at the last estimation
performed by the estimator.
n_records: int
The number of saved states of the last estimation performed by the estimator.
ic: ~numpy.ndarray
Store the estimator initial conditions.
func: callable
Last function :math:`\\boldsymbol{f}` used by the estimator.
func_jac: callable
Last Jacobian matrix function :math:`\\boldsymbol{J}` used by the estimator.
method: int
Select the method used to compute the CLVs:
* `0`: Uses the method of Ginelli et al. :cite:`lyap-GPTCLP2007`. Suitable for a trajectory not too long (depends on the memory available).
* `1`: Uses the method of the intersection of the subspaces spanned by the BLVs and FLVs described in :cite:`lyap-ER1985` and :cite:`lyap-KP2012`
(see also :cite:`lyap-DPV2021`, Appendix A). Suitable for longer trajectories (uses less memory).
noise_pert: float
Noise perturbation parameter of the diagonal of the matrix resulting from the backpropagation during the Ginelli step.
Mainly done to avoid ill-conditioned matrices near tangencies (see :cite:`lyap-KP2012`).
Only apply if using the Ginelli et al. algorithm, i.e. if ``method=0``.
"""
def __init__(self, num_threads=None, b=None, c=None, a=None, number_of_dimensions=None, noise_pert=0., method=0):
if num_threads is None:
self.num_threads = multiprocessing.cpu_count()
else:
self.num_threads = num_threads
# Default is RK4
if a is None and b is None and c is None:
self.c = np.array([0., 0.5, 0.5, 1.])
self.b = np.array([1./6, 1./3, 1./3, 1./6])
self.a = np.zeros((len(self.c), len(self.b)))
self.a[1, 0] = 0.5
self.a[2, 1] = 0.5
self.a[3, 2] = 1.
else:
self.a = a
self.b = b
self.c = c
self.noise_pert = noise_pert
self.ic = None
self._time = None
self._pretime = None
self._aftertime = None
self._recorded_traj = None
self._recorded_exp = None
self._recorded_vec = None
self._recorded_bvec = None
self._recorded_fvec = None
self.n_traj = 0
self.n_dim = number_of_dimensions
self.n_records = 0
self.n_vec = 0
self.write_steps = 0
self.method = method
self.func = None
self.func_jac = None
self._ics_queue = None
self._clv_queue = None
self._processes_list = list()
def terminate(self):
"""Stop the workers (threads) and release the resources of the estimator."""
for process in self._processes_list:
process.terminate()
process.join()
def set_noise_pert(self, noise_pert):
"""Set the noise perturbation :attr:`noise_pert` parameter.
Parameters
----------
noise_pert: float, optional
Noise perturbation amplitude parameter of the diagonal of the R matrix in the QR decomposition during the Ginelli step. Mainly done to avoid ill-conditioned matrices
near tangencies (see :cite:`lyap-KP2012`).
Only apply if using the Ginelli et al. algorithm, i.e. if :attr:`method` is 0.
"""
self.noise_pert = noise_pert
self.start()
def set_bca(self, b=None, c=None, a=None, ic_init=True):
"""Set the coefficients of the `Runge-Kutta method`_ and restart the estimator.
.. _Runge-Kutta method: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
Parameters
----------
b: None or ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
c: None or ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
a: None or ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
ic_init: bool, optional
Re-initialize or not the initial conditions of the estimator. Default to `True`.
"""
if a is not None:
self.a = a
if b is not None:
self.b = b
if c is not None:
self.c = c
if ic_init:
self.ic = None
self.start()
def start(self):
"""Start or restart the workers (threads) of the estimator.
Warnings
--------
If the estimator was not previously terminated, it will be terminated first in the case
of a restart.
"""
self.terminate()
self._processes_list = list()
self._ics_queue = multiprocessing.JoinableQueue()
self._clv_queue = multiprocessing.Queue()
for i in range(self.num_threads):
self._processes_list.append(ClvProcess(i, self.func, self.func_jac, self.b, self.c, self.a,
self._ics_queue, self._clv_queue, self.noise_pert))
for process in self._processes_list:
process.daemon = True
process.start()
def set_func(self, f, fjac):
"""Set the `Numba`_-jitted function :math:`\\boldsymbol{f}` and Jacobian matrix function
:math:`\\boldsymbol{\\mathrm{J}}` to integrate.
.. _Numba: https://numba.pydata.org/
Parameters
----------
f: callable
The `Numba`_-jitted function :math:`\\boldsymbol{f}`.
Should have the signature ``f(t, x)`` where ``x`` is the state value and ``t`` is the time.
fjac: callable
The `Numba`_-jitted Jacobian matrix function :math:`\\boldsymbol{J}`.
Should have the signature ``J(t, x)`` where ``x`` is the state value and ``t`` is the time.
Warnings
--------
This function restarts the estimator!
"""
self.func = f
self.func_jac = fjac
self.start()
def compute_clvs(self, t0, ta, tb, tc, dt, mdt, ic=None, write_steps=1, n_vec=None, method=None, backward_vectors=False, forward_vectors=False):
"""Estimate the Covariant Lyapunov Vectors (CLVs) along a given trajectory, always integrating the said trajectory
forward in time from `ic` at `t0` to time `tc`. Return the CLVs between `ta` and `tb`.
The result of the estimation can be obtained afterward by calling :meth:`get_clvs`.
Parameters
----------
t0: float
Initial time of the time integration. Corresponds to the initial condition's `ic` time.
ta: float
Define the time span between `t0` and `ta` of the first part of the algorithm, which obtain the convergence to the Backward Lyapunov vectors
(initialization of the Benettin algorithm).
tb: float
Define the time span between `ta` and `tb` where the Covariant Lyapunov Vectors are computed.
tc: float
Final time of the time integration algorithm. Define the time span between `tb` and `tc` where, depending on the value of :attr:`method`,
the convergence to the Forward Lyapunov Vectors or to the Covariant Lyapunov Vectors (thanks to the Ginelli steps) is obtained.
dt: float
Timestep of the integration.
mdt: float
Micro-timestep to integrate the tangent linear equation between the nonlinear system `dt` timesteps. Should be smaller or equal to `dt`.
ic: None or ~numpy.ndarray(float), optional
Initial conditions of the system. Can be a 1D or a 2D array:
* 1D: Provide a single initial condition.
Should be of shape (`n_dim`,) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`.
* 2D: Provide an ensemble of initial condition.
Should be of shape (`n_traj`, `n_dim`) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`,
and where `n_traj` is the number of initial conditions.
If `None`, use the initial conditions stored in :attr:`ic`.
If then :attr:`ic` is `None`, use a zero initial condition.
Default to `None`.
write_steps: int, optional
Save the state of the integration in memory every `write_steps` steps. The other intermediary
steps are lost. It determines the size of the returned objects. Default is 1.
Set to 0 to return only the final state.
n_vec: int, optional
The number of Lyapunov vectors to compute. Should be smaller or equal to :attr:`n_dim`.
method: int, optional
Allow to select the method used to compute the CLVs. Presently can be `0` or `1`:
* `0`: Uses the method of Ginelli et al. :cite:`lyap-GPTCLP2007`. Suitable for a trajectory not too long (depends on the memory available).
* `1`: Uses the method of the intersection of the subspace spanned by the BLVs and FLVs described in :cite:`lyap-ER1985` and :cite:`lyap-KP2012`
(see also :cite:`lyap-DPV2021`, Appendix A). Suitable for longer trajectories (uses less memory).
Use the Ginelli et al. algorithm if not provided.
backward_vectors: bool, optional
Store also the computed Backward Lyapunov vectors between `ta` and `tb`. Only applies if ``method=1``.
Does not store the BLVs if not provided.
forward_vectors: bool, optional
Store also the computed Forward Lyapunov vectors between `ta` and `tb`. Only applies if ``method=1``.
Does not store the FLVs if not provided.
"""
if self.func is None or self.func_jac is None:
print('No function to integrate defined!')
return 0
if ic is None:
i = 1
while True:
self.ic = np.zeros(i)
try:
x = self.func(0., self.ic)
except:
i += 1
else:
break
i = len(self.func(0., self.ic))
self.ic = np.zeros(i)
else:
self.ic = ic
if len(self.ic.shape) == 1:
self.ic = self.ic.reshape((1, -1))
self.n_traj = self.ic.shape[0]
self.n_dim = self.ic.shape[1]
if n_vec is not None:
self.n_vec = n_vec
else:
self.n_vec = self.n_dim
if method is not None:
self.method = method
self._pretime = np.concatenate((np.arange(t0, ta, dt), np.full((1,), ta)))
self._time = np.concatenate((np.arange(ta, tb, dt), np.full((1,), tb)))
self._aftertime = np.concatenate((np.arange(tb, tc, dt), np.full((1,), tc)))
self.write_steps = write_steps
if write_steps == 0:
self.n_records = 1
else:
tot = self._time[::self.write_steps]
self.n_records = len(tot)
if tot[-1] != self._time[-1]:
self.n_records += 1
self._recorded_traj = np.zeros((self.n_traj, self.n_dim, self.n_records))
self._recorded_vec = np.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
self._recorded_exp = np.zeros((self.n_traj, self.n_vec, self.n_records))
if self.method == 1:
if forward_vectors:
self._recorded_fvec = np.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
if backward_vectors:
self._recorded_bvec = np.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
for i in range(self.n_traj):
self._ics_queue.put((i, self._pretime, self._time, self._aftertime, mdt, self.ic[i], self.n_vec,
self.write_steps, self.method))
self._ics_queue.join()
for i in range(self.n_traj):
args = self._clv_queue.get()
self._recorded_traj[args[0]] = args[1]
self._recorded_exp[args[0]] = args[2]
self._recorded_vec[args[0]] = args[3]
if self.method == 1:
if forward_vectors:
self._recorded_fvec[args[0]] = args[5]
if backward_vectors:
self._recorded_bvec[args[0]] = args[4]
def get_clvs(self):
"""Returns the result of the previous CLVs estimation.
Returns
-------
time, traj, exponents, vectors: ~numpy.ndarray
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D array of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D array of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the input initial conditions, it is maximum a 4D array of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is squeezed.
"""
if self.write_steps > 0:
if self._time[::self.write_steps][-1] == self._time[-1]:
return self._time[::self.write_steps], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_vec)
else:
return np.concatenate((self._time[::self.write_steps], np.full((1,), self._time[-1]))), \
np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), np.squeeze(self._recorded_vec)
else:
return self._time[-1], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_vec)
def get_blvs(self):
"""Returns the BLVs obtained during the previous CLVs estimation.
Returns
-------
time, traj, exponents, vectors: ~numpy.ndarray
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D array of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D array of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the input initial conditions, it is maximum a 4D array of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is squeezed.
Warnings
--------
The BLVs are only available if :attr:`method` is set to 1.
"""
if self._recorded_bvec is None:
return None
if self.write_steps > 0:
if self._time[::self.write_steps][-1] == self._time[-1]:
return self._time[::self.write_steps], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_bvec)
else:
return np.concatenate((self._time[::self.write_steps], np.full((1,), self._time[-1]))), \
np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), np.squeeze(self._recorded_bvec)
else:
return self._time[-1], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_bvec)
def get_flvs(self):
"""Returns the FLVs obtained during the previous CLVs estimation.
Returns
-------
time, traj, exponents, vectors: ~numpy.ndarray
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D array of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D array of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the input initial conditions, it is maximum a 4D array of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is squeezed.
Warnings
--------
The FLVs are only available if :attr:`method` is set to 1.
"""
if self._recorded_fvec is None:
return None
if self.write_steps > 0:
if self._time[::self.write_steps][-1] == self._time[-1]:
return self._time[::self.write_steps], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_fvec)
else:
return np.concatenate((self._time[::self.write_steps], np.full((1,), self._time[-1]))), \
np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), np.squeeze(self._recorded_fvec)
else:
return self._time[-1], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_fvec)
class ClvProcess(multiprocessing.Process):
""":class:`CovariantLyapunovsEstimator`'s workers class. Allows to multi-thread Lyapunov vectors estimation.
Parameters
----------
processID: int
Number identifying the worker.
func: callable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: callable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
ics_queue: multiprocessing.JoinableQueue
Queue to which the worker ask for initial conditions and parameters input.
clv_queue: multiprocessing.Queue
Queue to which the worker returns the estimation results.
Attributes
----------
processID: int
Number identifying the worker.
func: callable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: callable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~numpy.ndarray
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
"""
def __init__(self, processID, func, func_jac, b, c, a, ics_queue, clv_queue, noise_pert):
super().__init__()
self.processID = processID
self._ics_queue = ics_queue
self._clv_queue = clv_queue
self.func = func
self.func_jac = func_jac
self.a = a
self.b = b
self.c = c
self.noise_pert = noise_pert
def run(self):
"""Main worker computing routine. Perform the estimation with the fetched initial conditions and parameters."""
while True:
args = self._ics_queue.get()
method = args[8]
if method == 0:
recorded_traj, recorded_exp, recorded_vec = _compute_clv_gin_jit(self.func, self.func_jac, args[1], args[2],
args[3], args[4], args[5][np.newaxis, :],
args[6], args[7],
self.b, self.c, self.a, self.noise_pert)
self._clv_queue.put((args[0], np.squeeze(recorded_traj), np.squeeze(recorded_exp),
np.squeeze(recorded_vec)))
else:
recorded_traj, recorded_exp, recorded_vec, backward_vec, forward_vec = _compute_clv_sub_jit(self.func, self.func_jac, args[1], args[2],
args[3], args[4], args[5][np.newaxis, :],
args[7], self.b, self.c, self.a)
self._clv_queue.put((args[0], np.squeeze(recorded_traj), np.squeeze(recorded_exp),
np.squeeze(recorded_vec), np.squeeze(backward_vec), np.squeeze(forward_vec)))
self._ics_queue.task_done()
# Ginelli et al. method
@njit
def _compute_clv_gin_jit(f, fjac, pretime, time, aftertime, mdt, ic, n_vec, write_steps, b, c, a, noise_pert):
n_traj = ic.shape[0]
n_dim = ic.shape[1]
Id = np.zeros((1, n_dim, n_dim))
Id[0] = np.eye(n_dim)
if write_steps == 0:
n_records = 1
else:
tot = time[::write_steps]
n_records = len(tot)
if tot[-1] != time[-1]:
n_records += 1
recorded_vec = np.zeros((n_traj, n_dim, n_vec, n_records))
recorded_traj = np.zeros((n_traj, n_dim, n_records))
recorded_exp = np.zeros((n_traj, n_vec, n_records))
for i_traj in range(n_traj):
# first part, making the backward vectors converge (initialization of the Benettin algorithm)
y = np.zeros((1, n_dim))
y[0] = ic[i_traj]
qr = np.linalg.qr(np.random.randn(n_dim, n_vec))
q = qr[0]
for tt, dt in zip(pretime[:-1], np.diff(pretime)):
subtime = np.concatenate((np.arange(tt, tt + dt, mdt), np.full((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
False, 1, integrate._zeros_func)
y[0] = y_new[0, :, 0]
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
# second part, stores the backward vectors and the r matrix (Benettin steps)
# save the trajectories
tw = len(time)-1
tew = len(time)+len(aftertime)-2
tmp_traj = np.zeros((tw+1, n_dim))
tmp_vec = np.zeros((tw+1, n_dim, n_vec))
tmp_R = np.zeros((tew, n_vec, n_vec))
for ti, (tt, dt) in enumerate(zip(time[:-1], np.diff(time))):
tmp_vec[ti] = q.copy()
tmp_traj[ti] = y[0].copy()
subtime = np.concatenate((np.arange(tt, tt + dt, mdt), np.full((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
False, 1, integrate._zeros_func)
y[0] = y_new[0, :, 0]
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
tmp_R[ti] = qr[1].copy()
tmp_vec[-1] = q.copy()
tmp_traj[-1] = y[0].copy()
# third part, stores the r matrix (Benettin steps)
for ti, (tt, dt) in enumerate(zip(aftertime[:-1], np.diff(aftertime))):
subtime = np.concatenate((np.arange(tt, tt + dt, mdt), np.full((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
False, 1, integrate._zeros_func)
y[0] = y_new[0, :, 0]
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
tmp_R[ti+tw] = qr[1].copy()
# fourth part, going backward until tb (Ginelli steps)
qr = np.linalg.qr(np.random.randn(n_dim, n_vec))
am, norm = normalize_matrix_columns(qr[1])
for ti in range(tew-1, tw, -1):
am_new = solve_triangular_matrix(tmp_R[ti], am)
noise = np.random.randn(n_dim)
for i in range(n_vec):
am_new[i, i] += noise[i] * noise_pert
am, norm = normalize_matrix_columns(am_new)
# fifth and last part, going backward from tb to ta (Ginelli steps)
# save the data
dte = np.concatenate((np.diff(time), np.full((1,), aftertime[1] - aftertime[0])))
iw = 1
for ti in range(tw, -1, -1):
am_new = solve_triangular_matrix(tmp_R[ti], am)
noise = np.random.randn(n_vec)
for i in range(n_dim):
am_new[i, i] += noise[i] * noise_pert
am, mloc_exp = normalize_matrix_columns(am_new)
if write_steps > 0 and np.mod(tw-ti, write_steps) == 0:
recorded_traj[i_traj, :, -iw] = tmp_traj[ti]
recorded_exp[i_traj, :, -iw] = -np.log(np.abs(mloc_exp))/dte[ti]
recorded_vec[i_traj, :, :, -iw] = tmp_vec[ti] @ am
iw += 1
recorded_traj[i_traj, :, 0] = tmp_traj[0]
recorded_exp[i_traj, :, 0] = -np.log(np.abs(mloc_exp))/dte[0]
recorded_vec[i_traj, :, :, 0] = tmp_vec[0] @ am
return recorded_traj, recorded_exp, recorded_vec
# Subspace intersection method
@njit
def _compute_clv_sub_jit(f, fjac, pretime, time, aftertime, mdt, ic, write_steps, b, c, a):
n_traj = ic.shape[0]
n_dim = ic.shape[1]
lp = len(pretime)
la = len(aftertime)
ttraj = integrate._integrate_runge_kutta_jit(f, np.concatenate((pretime[:-1], time[:-1], aftertime)), ic, 1, 1, b, c, a)
traj, exp, fvec = _compute_forward_lyap_traj_jit(f, fjac, time, aftertime, ttraj[:, :, lp-1:], mdt,
n_dim, write_steps, False, 1, b, c, a)
traj, exp, bvec = _compute_backward_lyap_traj_jit(f, fjac, pretime, time, ttraj[:, :, :-la+1], mdt,
n_dim, write_steps, False, 1, b, c, a)
recorded_traj = traj
recorded_exp = np.zeros_like(traj)
n_records = traj.shape[-1]
recorded_vec = np.zeros((n_traj, n_dim, n_dim, n_records))
subtime = np.array([0., mdt])
y = np.zeros((1, n_dim))
vec = np.zeros((1, n_dim, n_dim))
for i_traj in range(n_traj):
for ti in range(n_records):
for j in range(n_dim):
u, z, w = np.linalg.svd(bvec[i_traj, :, :j+1, ti].T @ fvec[i_traj, :, :n_dim-j, ti])
basis = bvec[i_traj, :, :j+1, ti] @ u
recorded_vec[i_traj, :, j, ti] = basis[:, 0]
y[0] = recorded_traj[i_traj, :, ti]
vec[0] = recorded_vec[i_traj, :, :, ti]
y_new, sol = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, vec, 1, 0, b, c, a,
False, 1, integrate._zeros_func)
soln, mloc_exp = normalize_matrix_columns(sol[0, :, :, 0])
recorded_exp[i_traj, :, ti] = np.log(np.abs(mloc_exp))/mdt
return recorded_traj, recorded_exp, recorded_vec, bvec, fvec
if __name__ == "__main__":
a = 0.25
F = 8.
G = 1.
b = 4.
@njit
def fL84(t, x):
xx = -x[1] ** 2 - x[2] ** 2 - a * x[0] + a * F
yy = x[0] * x[1] - b * x[0] * x[2] - x[1] + G
zz = b * x[0] * x[1] + x[0] * x[2] - x[2]
return np.array([xx, yy, zz])
@njit
def DfL84(t, x):
return np.array([[ -a , -2. * x[1], -2. * x[2]],
[x[1] - b * x[2], -1. + x[0], -b * x[0]],
[b * x[1] + x[2], b * x[0], -1. + x[0]]])
sigma = 10.
r = 28.
bb = 8. / 3.
@njit
def fL63(t, x):
xx = sigma * (x[1] - x[0])
yy = r * x[0] - x[1] - x[0] * x[2]
zz = x[0] * x[1] - bb * x[2]
return np.array([xx, yy, zz])
@njit
def DfL63(t, x):
return np.array([[-sigma, sigma, 0.],
[r - x[2], -1., - x[0]],
[x[1], x[0], -bb]])
ic = np.random.random(3)
# tt, ic_L84 = integrate.integrate_runge_kutta(fL84, 0., 10000., 0.01, ic=ic, write_steps=0)
tt, ic = integrate.integrate_runge_kutta(fL63, 0., 10000., 0.01, ic=ic, write_steps=0)
print('Computing Backward Lyapunovs')
lyapint = LyapunovsEstimator()
# lyapint.set_func(fL84, DfL84)
lyapint.set_func(fL63, DfL63)
lyapint.compute_lyapunovs(0., 10000., 30000., 0.01, 0.01, ic, write_steps=1) #, n_vec=2)
btl, btraj, bexp, bvec = lyapint.get_lyapunovs()
print('Computing Forward Lyapunovs')
# lyapint.set_func(fL84, DfL84)
lyapint.set_func(fL63, DfL63)
lyapint.compute_lyapunovs(0., 20000., 30000., 0.01, 0.01, ic, write_steps=1, forward=True, adjoint=False, inverse=False) #, n_vec=2)
ftl, ftraj, fexp, fvec = lyapint.get_lyapunovs()
print('Computing Covariant Lyapunovs')
clvint = CovariantLyapunovsEstimator()
# clvint.set_func(fL84, DfL84)
clvint.set_func(fL63, DfL63)
clvint.compute_clvs(0., 10000., 20000., 30000., 0.01, 0.01, ic, write_steps=1) #, n_vec=2)
ctl, ctraj, cexp, cvec = clvint.get_clvs()
clvint.compute_clvs(0., 10000., 20000., 30000., 0.01, 0.01, ic, write_steps=10, method=1, backward_vectors=True) #, n_vec=2)
ctl2, ctraj2, cexp2, cvec2 = clvint.get_clvs()
lyapint.terminate()
clvint.terminate()
| 42.688349
| 177
| 0.574923
| 8,086
| 59,721
| 4.101534
| 0.061835
| 0.014021
| 0.018333
| 0.022795
| 0.856053
| 0.828524
| 0.806935
| 0.788723
| 0.778652
| 0.763395
| 0
| 0.017979
| 0.305203
| 59,721
| 1,399
| 178
| 42.688349
| 0.781294
| 0.424591
| 0
| 0.678354
| 0
| 0
| 0.005021
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047256
| false
| 0
| 0.007622
| 0.003049
| 0.10061
| 0.007622
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.