hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
d0ec67e18fcced834b36dc99b5855ab17c2cf278
1,291
py
Python
Fast_style_transfer/drawLoss.py
HunterJ-Lin/Lightweight-Image-Style-Transfer-Model
4afb49b7d3d3d9c75d265e9e997b32b5747bd4fa
[ "MIT" ]
1
2022-03-08T06:37:24.000Z
2022-03-08T06:37:24.000Z
Fast_style_transfer/drawLoss.py
HunterJ-Lin/Lightweight-Image-Style-Transfer-Model
4afb49b7d3d3d9c75d265e9e997b32b5747bd4fa
[ "MIT" ]
null
null
null
Fast_style_transfer/drawLoss.py
HunterJ-Lin/Lightweight-Image-Style-Transfer-Model
4afb49b7d3d3d9c75d265e9e997b32b5747bd4fa
[ "MIT" ]
1
2021-06-10T06:43:57.000Z
2021-06-10T06:43:57.000Z
import matplotlib.pyplot as plt import numpy as np file = open("c:/users/hunterj/desktop/实验数据/starry_night/styleloss.txt", "r") x = [] for line in file.readlines(): x.append(float(line.strip('\n'))) print(min(x)) plt.title("Loss") plt.xlabel("update times/100") plt.ylabel("variance updated every 100") line1, = plt.plot(range(1, len(x) + 1), x, color='r', linestyle='--') file.close() file = open("c:/users/hunterj/desktop/实验数据/Fire/starry_night/styleloss.txt", "r") x = [] for line in file.readlines(): x.append(float(line.strip('\n'))) print(min(x)) line2, = plt.plot(range(1, len(x) + 1), x, color='b') file.close() plt.legend([line1, line2], ["Normal", "Fire"], loc='upper left')#添加图例 # file = open("c:/users/hunterj/desktop/实验数据/starry_night/styleloss.txt", "r") # x = [] # for line in file.readlines(): # x.append(float(line.strip('\n'))) # print(min(x)) # plt.figure(3) # plt.subplot(312) # plt.title("styleloss") # plt.plot(range(1, len(x) + 1), x) # file.close() # # file = open("c:/users/hunterj/desktop/实验数据/starry_night/totalloss.txt", "r") # x = [] # for line in file.readlines(): # x.append(float(line.strip('\n'))) # print(min(x)) # plt.figure(3) # plt.subplot(313) # plt.title("totalloss") # plt.plot(range(1, len(x) + 1), x) # file.close() plt.show()
26.346939
81
0.644462
209
1,291
3.961722
0.30622
0.038647
0.043478
0.067633
0.708937
0.708937
0.708937
0.708937
0.708937
0.549517
0
0.022867
0.119287
1,291
48
82
26.895833
0.705365
0.416731
0
0.47619
0
0
0.264022
0.160055
0
0
0
0
0
1
0
false
0
0.095238
0
0.095238
0.095238
0
0
0
null
0
0
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
efc32ed5f957b0274df10b0d0a33af23d40c8ec1
242
py
Python
car_sim_gen/quantities.py
svenlr/car-physics-pacejka
bef64a7c3c813419a76f55c2b0553b5fe82f0808
[ "BSD-2-Clause" ]
null
null
null
car_sim_gen/quantities.py
svenlr/car-physics-pacejka
bef64a7c3c813419a76f55c2b0553b5fe82f0808
[ "BSD-2-Clause" ]
null
null
null
car_sim_gen/quantities.py
svenlr/car-physics-pacejka
bef64a7c3c813419a76f55c2b0553b5fe82f0808
[ "BSD-2-Clause" ]
null
null
null
from casadi import MX class CarPhysicalQuantities: def __init__(self, n_wheels): self.wheel_quantities = [WheelPhysicalQuantities() for _ in range(n_wheels)] class WheelPhysicalQuantities: def __init__(self): pass
20.166667
84
0.727273
26
242
6.307692
0.692308
0.085366
0.134146
0
0
0
0
0
0
0
0
0
0.202479
242
11
85
22
0.849741
0
0
0
0
0
0
0
0
0
0
0
0
1
0.285714
false
0.142857
0.142857
0
0.714286
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
4
efcd801d140d15270f4c1cec683ccf428d4a8c74
1,640
py
Python
tikzqlmap.py
shipcod3/tikzqlmap
6377e76b702975ea5183a4bde948f4d55c796ac9
[ "Apache-2.0" ]
1
2022-02-18T02:06:08.000Z
2022-02-18T02:06:08.000Z
tikzqlmap.py
shipcod3/tikzqlmap
6377e76b702975ea5183a4bde948f4d55c796ac9
[ "Apache-2.0" ]
null
null
null
tikzqlmap.py
shipcod3/tikzqlmap
6377e76b702975ea5183a4bde948f4d55c796ac9
[ "Apache-2.0" ]
null
null
null
tikz = ''' /~\ |oo ) _\=/_ ___ / _ \ /() \ //|/.\|\\ _|_____|_ \\ \_/ || | | === | | \|\ /||| |_| O |_| # _ _/# || O || | | | ||__*__|| | | | |~ \___/ ~| []|[] /=\ /=\ /=\ | | | ________________[_]_[_]_[_]________/_]_[_\_________________________ _ _ _ _ | | (_) | | | | |_ _| | __ ______ _| |_ __ ___ __ _ _ __ | __| | |/ /|_ / _` | | '_ ` _ \ / _` | '_ \ | |_| | < / / (_| | | | | | | | (_| | |_) | \__|_|_|\_\/___\__, |_|_| |_| |_|\__,_| .__/ | | | | |_| |_| -= automatic pwet grabber exploitation and fingerprinting tool =- ''' arguments = ''' -h show help -wafnuke try to bypass web application firewall --grab-pwet downloads ass pics -u URL of the website -finger fingerprints the website --crawl crawls the website recursively based on the hyperlinks ''' print tikz print arguments
44.324324
70
0.246951
49
1,640
5.346939
0.795918
0.114504
0
0
0
0
0
0
0
0
0
0
0.625
1,640
36
71
45.555556
0.426016
0
0
0.058824
0
0.117647
0.962195
0.040854
0
0
0
0
0
0
null
null
0.029412
0
null
null
0.117647
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
1
0
0
0
0
0
1
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
ef0d0922136f9c74db39d2857149365c6b91c7b5
18
py
Python
python/testData/formatter/spaceAroundKeywords.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/formatter/spaceAroundKeywords.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/formatter/spaceAroundKeywords.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
True and False
9
17
0.666667
3
18
4
1
0
0
0
0
0
0
0
0
0
0
0
0.333333
18
1
18
18
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
ef2deb86eed2b2677efad7846572bc7f5ccbc42a
121
py
Python
dissect/formats/pdf.py
AKOU0/dissect
b521153d86fe94dddc04846eb7ba3b6196917ee7
[ "Apache-2.0" ]
19
2015-07-08T18:51:40.000Z
2020-03-08T16:06:16.000Z
dissect/formats/pdf.py
AKOU0/dissect
b521153d86fe94dddc04846eb7ba3b6196917ee7
[ "Apache-2.0" ]
5
2016-02-24T15:23:13.000Z
2019-11-09T11:23:47.000Z
dissect/formats/pdf.py
AKOU0/dissect
b521153d86fe94dddc04846eb7ba3b6196917ee7
[ "Apache-2.0" ]
11
2015-10-22T00:32:20.000Z
2017-07-14T01:45:14.000Z
import os import sys from binascii import unhexlify as xeh from vstruct2.types import * from dissect.filelab import *
13.444444
37
0.793388
18
121
5.333333
0.666667
0
0
0
0
0
0
0
0
0
0
0.01
0.173554
121
8
38
15.125
0.95
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
ef314a18194cf2e00ea5c4f4b193afea4b8dc75a
4,589
py
Python
app/migrations/0001_initial.py
raptor419/SIH2020_AN314_AMRTrack
8bbdfce967ff8f52a6ee2aae5664047ce953d8a8
[ "MIT" ]
null
null
null
app/migrations/0001_initial.py
raptor419/SIH2020_AN314_AMRTrack
8bbdfce967ff8f52a6ee2aae5664047ce953d8a8
[ "MIT" ]
null
null
null
app/migrations/0001_initial.py
raptor419/SIH2020_AN314_AMRTrack
8bbdfce967ff8f52a6ee2aae5664047ce953d8a8
[ "MIT" ]
null
null
null
# Generated by Django 2.1.1 on 2020-08-02 23:20 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Hospital', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('hospitalid', models.CharField(max_length=25)), ('name', models.CharField(max_length=50)), ('state', models.CharField(max_length=50)), ('district', models.CharField(max_length=50)), ('hospital', models.CharField(max_length=50)), ('address', models.CharField(max_length=50)), ], ), migrations.CreateModel( name='PathTest', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('testid', models.CharField(max_length=25)), ('patientid', models.CharField(max_length=25)), ('date', models.DateField(null=True)), ('year', models.IntegerField()), ('month', models.IntegerField()), ('sampletype', models.CharField(max_length=50)), ('organism', models.CharField(max_length=50)), ('hospital', models.CharField(max_length=50)), ('collsite', models.CharField(max_length=50)), ('amikacin', models.IntegerField(default=-1)), ('amoxicillin_clavulanicacid', models.IntegerField(default=-1)), ('ampicillin', models.IntegerField(default=-1)), ('ampicillin_sulbactum', models.IntegerField(default=-1)), ('cefaperazone_sulbactum', models.IntegerField(default=-1)), ('cefexime', models.IntegerField(default=-1)), ('cefotaxime', models.IntegerField(default=-1)), ('cefoxitin', models.IntegerField(default=-1)), ('ceftazidime', models.IntegerField(default=-1)), ('ceftazidime_clavalunicacid', models.IntegerField(default=-1)), ('ceftriaxone', models.IntegerField(default=-1)), ('chloramphenicol', models.IntegerField(default=-1)), ('ciprofloxacin', models.IntegerField(default=-1)), ('colistin', models.IntegerField(default=-1)), ('cotrimoxazole', models.IntegerField(default=-1)), ('ertapenem', models.IntegerField(default=-1)), ('erythromycin', models.IntegerField(default=-1)), ('gentamicin_highlevel', models.IntegerField(default=-1)), ('imipenem', models.IntegerField(default=-1)), ('levofloxacin', models.IntegerField(default=-1)), ('linezolid', models.IntegerField(default=-1)), ('meropenem', models.IntegerField(default=-1)), ('netilmicin', models.IntegerField(default=-1)), ('nitrofurantoin', models.IntegerField(default=-1)), ('penicillin', models.IntegerField(default=-1)), ('piperacillin_tazobactum', models.IntegerField(default=-1)), ('rifampicin', models.IntegerField(default=-1)), ('teicoplanin', models.IntegerField(default=-1)), ('tetracycline', models.IntegerField(default=-1)), ('ticarcillin_clavulanicacid', models.IntegerField(default=-1)), ('tigecycline', models.IntegerField(default=-1)), ('vancomycin', models.IntegerField(default=-1)), ], ), migrations.CreateModel( name='Patient', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('patientid', models.CharField(max_length=25)), ('testid', models.CharField(max_length=25)), ('state', models.CharField(max_length=50)), ('district', models.CharField(max_length=50)), ('hospital', models.CharField(max_length=50)), ('symptoms', models.CharField(max_length=50)), ('diagnosis', models.CharField(max_length=50)), ('test', models.CharField(max_length=50)), ('prescription', models.CharField(max_length=50)), ('allergy', models.IntegerField(default=0)), ], ), ]
50.988889
114
0.559599
392
4,589
6.456633
0.252551
0.248913
0.325958
0.328724
0.447649
0.248518
0.195575
0.195575
0.195575
0.195575
0
0.027464
0.285901
4,589
89
115
51.561798
0.744889
0.009806
0
0.317073
1
0
0.141127
0.027081
0
0
0
0
0
1
0
false
0
0.012195
0
0.060976
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
ef3c1ce12ecd236763b55074ef1090bf71ffdbe7
51
py
Python
inst/python/install.py
BDSI-Utwente/ExtractAnonymizeChop
1379a7a6770190540cdf0a881f5d0c9968e19bd5
[ "MIT" ]
null
null
null
inst/python/install.py
BDSI-Utwente/ExtractAnonymizeChop
1379a7a6770190540cdf0a881f5d0c9968e19bd5
[ "MIT" ]
null
null
null
inst/python/install.py
BDSI-Utwente/ExtractAnonymizeChop
1379a7a6770190540cdf0a881f5d0c9968e19bd5
[ "MIT" ]
1
2021-09-22T20:49:48.000Z
2021-09-22T20:49:48.000Z
import spacy spacy.cli.download("en_core_web_md")
12.75
36
0.803922
9
51
4.222222
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.078431
51
3
37
17
0.808511
0
0
0
0
0
0.27451
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
ef41a7c152b636de5e791672af62e8b5f0ceb7b3
127
py
Python
examples/docs_snippets/docs_snippets/guides/dagster/versioning_memoization/memoization_enabled_job.py
rpatil524/dagster
6f918d94cbd543ab752ab484a65e3a40fd441716
[ "Apache-2.0" ]
1
2021-01-31T19:16:29.000Z
2021-01-31T19:16:29.000Z
examples/docs_snippets/docs_snippets/guides/dagster/versioning_memoization/memoization_enabled_job.py
rpatil524/dagster
6f918d94cbd543ab752ab484a65e3a40fd441716
[ "Apache-2.0" ]
null
null
null
examples/docs_snippets/docs_snippets/guides/dagster/versioning_memoization/memoization_enabled_job.py
rpatil524/dagster
6f918d94cbd543ab752ab484a65e3a40fd441716
[ "Apache-2.0" ]
1
2019-09-11T03:02:27.000Z
2019-09-11T03:02:27.000Z
from dagster import SourceHashVersionStrategy, job @job(version_strategy=SourceHashVersionStrategy()) def the_job(): ...
18.142857
50
0.779528
12
127
8.083333
0.75
0
0
0
0
0
0
0
0
0
0
0
0.11811
127
6
51
21.166667
0.866071
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
true
0
0.25
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
0
0
4
3234f9432a4e1d95b3440b26f5f530a38c481117
109
py
Python
src/airflow_docker/views/__init__.py
Jwan622/airflow-docker
55310bc730f94bc1a293ba6e27ecf5bb663052ba
[ "Apache-2.0" ]
17
2019-11-16T13:25:59.000Z
2022-03-31T02:50:59.000Z
src/airflow_docker/views/__init__.py
Jwan622/airflow-docker
55310bc730f94bc1a293ba6e27ecf5bb663052ba
[ "Apache-2.0" ]
14
2019-09-13T20:02:15.000Z
2022-03-16T19:23:13.000Z
src/airflow_docker/views/__init__.py
Jwan622/airflow-docker
55310bc730f94bc1a293ba6e27ecf5bb663052ba
[ "Apache-2.0" ]
2
2020-02-16T10:46:51.000Z
2022-03-14T18:52:04.000Z
import pkg_resources template_folder = pkg_resources.resource_filename("airflow_docker", "views/templates")
27.25
86
0.844037
13
109
6.692308
0.846154
0.275862
0
0
0
0
0
0
0
0
0
0
0.06422
109
3
87
36.333333
0.852941
0
0
0
0
0
0.266055
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
324c3d75b6d3f9125f663c5da34186cea470b784
99
py
Python
pyshader/_coreutils.py
pygfx/pyshader
804f2a63221b40434ebcbeb4a01eeebe0d361a90
[ "BSD-2-Clause" ]
48
2020-07-19T15:55:08.000Z
2022-03-21T15:02:45.000Z
pyshader/_coreutils.py
almarklein/python-shader
804f2a63221b40434ebcbeb4a01eeebe0d361a90
[ "BSD-2-Clause" ]
22
2019-12-31T16:01:28.000Z
2020-06-15T20:03:58.000Z
pyshader/_coreutils.py
almarklein/spirv-py
804f2a63221b40434ebcbeb4a01eeebe0d361a90
[ "BSD-2-Clause" ]
2
2020-10-12T09:42:28.000Z
2021-03-04T08:20:19.000Z
class ShaderError(Exception): """Error raised when the user shader code cannot be compiled."""
33
68
0.737374
13
99
5.615385
1
0
0
0
0
0
0
0
0
0
0
0
0.161616
99
2
69
49.5
0.879518
0.585859
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
4
3264c13b328b7fbf7bfcb024a7be66d4da37afe3
58
py
Python
flask_value_checker/restrictions/parsing/__init__.py
cbcoutinho/flask-value-checker
cbdff51c401486dea7d49eda30eae1211d392ee7
[ "MIT" ]
null
null
null
flask_value_checker/restrictions/parsing/__init__.py
cbcoutinho/flask-value-checker
cbdff51c401486dea7d49eda30eae1211d392ee7
[ "MIT" ]
null
null
null
flask_value_checker/restrictions/parsing/__init__.py
cbcoutinho/flask-value-checker
cbdff51c401486dea7d49eda30eae1211d392ee7
[ "MIT" ]
null
null
null
from .parsing import make_restrictions, RestrictionParser
29
57
0.87931
6
58
8.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.086207
58
1
58
58
0.943396
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
3277671f2a9aea4c149b705c8cad9510b4b5b14c
156
py
Python
logic/emails/send_blast.py
q82cap/company-website
72ec56e1d7965c6ffbba61f567599224a68e4533
[ "MIT" ]
null
null
null
logic/emails/send_blast.py
q82cap/company-website
72ec56e1d7965c6ffbba61f567599224a68e4533
[ "MIT" ]
null
null
null
logic/emails/send_blast.py
q82cap/company-website
72ec56e1d7965c6ffbba61f567599224a68e4533
[ "MIT" ]
null
null
null
from logic.emails import mailing_list from tools import db_utils with db_utils.request_context(): mailing_list.send_one_off("may_2018_newsletter")
26
52
0.807692
24
156
4.875
0.75
0.188034
0
0
0
0
0
0
0
0
0
0.029412
0.128205
156
6
53
26
0.830882
0
0
0
0
0
0.121019
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
328c3c8b8d7133f481c99d1ad6bc62ef4f9fd60b
89
py
Python
bridges/apps.py
vitale232/InspectionPlanner
4d9c9b494e6b3587eb182e9c34ea3d6aee5546e8
[ "MIT" ]
1
2020-01-30T12:32:38.000Z
2020-01-30T12:32:38.000Z
bridges/apps.py
vitale232/InspectionPlanner
4d9c9b494e6b3587eb182e9c34ea3d6aee5546e8
[ "MIT" ]
45
2019-07-27T02:12:11.000Z
2022-03-02T04:59:15.000Z
bridges/apps.py
vitale232/InspectionPlanner
4d9c9b494e6b3587eb182e9c34ea3d6aee5546e8
[ "MIT" ]
null
null
null
from django.apps import AppConfig class BridgesConfig(AppConfig): name = 'bridges'
14.833333
33
0.752809
10
89
6.7
0.9
0
0
0
0
0
0
0
0
0
0
0
0.168539
89
5
34
17.8
0.905405
0
0
0
0
0
0.078652
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
329175f4e32f38f66cb295a6b73fad817389e4c1
783
py
Python
boundaries/tests/test_titlecase.py
MinnPost/represent-boundaries
17f65d34a6ed761e72dbdf13ea78b64fdeaa356d
[ "MIT" ]
20
2015-03-17T09:10:39.000Z
2020-06-30T06:08:08.000Z
boundaries/tests/test_titlecase.py
rhymeswithcycle/represent-boundaries
f487f9b18d6c1b8fe3e7f47171fc7741c14be4b3
[ "MIT" ]
14
2015-04-24T17:22:00.000Z
2021-06-22T16:50:24.000Z
boundaries/tests/test_titlecase.py
rhymeswithcycle/represent-boundaries
f487f9b18d6c1b8fe3e7f47171fc7741c14be4b3
[ "MIT" ]
16
2015-04-27T23:32:46.000Z
2020-07-05T11:18:04.000Z
# coding: utf-8 from __future__ import unicode_literals from django.test import TestCase from boundaries.titlecase import titlecase class TitlecaseTestCase(TestCase): def test_uc_initials(self): self.assertEqual(titlecase('X.Y.Z. INC.'), 'X.Y.Z. Inc.') def test_apos_second(self): self.assertEqual(titlecase("duck à l'orange"), "Duck à L'Orange") def test_inline_period(self): self.assertEqual(titlecase('example.com'), 'example.com') def test_small_words(self): self.assertEqual(titlecase('FOR WHOM THE BELL TOLLS'), 'For Whom the Bell Tolls') def test_mac_mc(self): self.assertEqual(titlecase('MACDONALD'), 'MacDonald') def test_slash(self): self.assertEqual(titlecase('foo/bar/baz'), 'Foo/Bar/Baz')
29
89
0.696041
106
783
4.990566
0.45283
0.079395
0.215501
0.31758
0.071834
0
0
0
0
0
0
0.001546
0.173691
783
26
90
30.115385
0.816074
0.016603
0
0
0
0
0.208333
0
0
0
0
0
0.375
1
0.375
false
0
0.1875
0
0.625
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
329c21839027d705c850e0bf8df70524cc1456e4
35
py
Python
workflows/arlexecute/simulation/__init__.py
ska-telescope/algorithm-reference-library
1b2c8d6079249202864abf8c60cdea40f0f123cb
[ "Apache-2.0" ]
22
2016-12-14T11:20:07.000Z
2021-08-13T15:23:41.000Z
workflows/arlexecute/simulation/__init__.py
ska-telescope/algorithm-reference-library
1b2c8d6079249202864abf8c60cdea40f0f123cb
[ "Apache-2.0" ]
30
2017-06-27T09:15:38.000Z
2020-09-11T18:16:37.000Z
workflows/arlexecute/simulation/__init__.py
SKA-ScienceDataProcessor/algorithm-reference-library
1b2c8d6079249202864abf8c60cdea40f0f123cb
[ "Apache-2.0" ]
20
2017-07-02T03:45:49.000Z
2019-12-11T17:19:01.000Z
__all__ = ['simulation_arlexecute']
35
35
0.8
3
35
7.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.057143
35
1
35
35
0.69697
0
0
0
0
0
0.583333
0.583333
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
32a2a86b28fae6eb32687687a9cbda5afcbb4e80
34
py
Python
homeassistant/components/hikvisioncam/__init__.py
domwillcode/home-assistant
f170c80bea70c939c098b5c88320a1c789858958
[ "Apache-2.0" ]
30,023
2016-04-13T10:17:53.000Z
2020-03-02T12:56:31.000Z
homeassistant/components/hikvisioncam/__init__.py
jagadeeshvenkatesh/core
1bd982668449815fee2105478569f8e4b5670add
[ "Apache-2.0" ]
31,101
2020-03-02T13:00:16.000Z
2022-03-31T23:57:36.000Z
homeassistant/components/hikvisioncam/__init__.py
jagadeeshvenkatesh/core
1bd982668449815fee2105478569f8e4b5670add
[ "Apache-2.0" ]
11,956
2016-04-13T18:42:31.000Z
2020-03-02T09:32:12.000Z
"""The hikvisioncam component."""
17
33
0.705882
3
34
8
1
0
0
0
0
0
0
0
0
0
0
0
0.088235
34
1
34
34
0.774194
0.794118
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
32c33de1ece537d30014c207b4d3008f0d237ab2
305
py
Python
cwProject/cwApp/views.py
cs-fullstack-2019-spring/django-bootstrap-grid-cw-autumn-ragland
d85d0ae326f55f9adab4c963241e54d516fc36b2
[ "Apache-2.0" ]
null
null
null
cwProject/cwApp/views.py
cs-fullstack-2019-spring/django-bootstrap-grid-cw-autumn-ragland
d85d0ae326f55f9adab4c963241e54d516fc36b2
[ "Apache-2.0" ]
null
null
null
cwProject/cwApp/views.py
cs-fullstack-2019-spring/django-bootstrap-grid-cw-autumn-ragland
d85d0ae326f55f9adab4c963241e54d516fc36b2
[ "Apache-2.0" ]
null
null
null
from django.shortcuts import render # render page 1 def index(request): return render(request, 'cwApp/index.html') # render page 2 def page_two(request): return render(request, 'cwApp/pageTwo.html') # render page 3 def page_three(request): return render(request, 'cwApp/pageThree.html')
17.941176
50
0.727869
43
305
5.116279
0.44186
0.136364
0.259091
0.354545
0.422727
0
0
0
0
0
0
0.011719
0.160656
305
16
51
19.0625
0.847656
0.134426
0
0
0
0
0.207692
0
0
0
0
0
0
1
0.428571
false
0
0.142857
0.428571
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
4
08713962a8cff65770d654593ba556dc5f9b3472
137
py
Python
reddit2telegram/channels/~inactive/cahaf_avir/app.py
mainyordle/reddit2telegram
1163e15aed3b6ff0fba65b222d3d9798f644c386
[ "MIT" ]
187
2016-09-20T09:15:54.000Z
2022-03-29T12:22:33.000Z
reddit2telegram/channels/~inactive/cahaf_avir/app.py
mainyordle/reddit2telegram
1163e15aed3b6ff0fba65b222d3d9798f644c386
[ "MIT" ]
84
2016-09-22T14:25:07.000Z
2022-03-19T01:26:17.000Z
reddit2telegram/channels/~inactive/cahaf_avir/app.py
mainyordle/reddit2telegram
1163e15aed3b6ff0fba65b222d3d9798f644c386
[ "MIT" ]
172
2016-09-21T15:39:39.000Z
2022-03-16T15:15:58.000Z
#encoding:utf-8 subreddit = 'ani_bm' t_channel = '@cahaf_avir' def send_post(submission, r2t): return r2t.send_simple(submission)
15.222222
38
0.737226
20
137
4.8
0.85
0
0
0
0
0
0
0
0
0
0
0.025424
0.138686
137
8
39
17.125
0.788136
0.10219
0
0
0
0
0.139344
0
0
0
0
0
0
1
0.25
false
0
0
0.25
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
4
087ea32939f31a431d80f8969192e6beb737d794
72
py
Python
planning_poker_jira/__init__.py
rheinwerk-verlag/planning-poker-jira
23a4f4d5cc33a148d648c341416723216f338397
[ "BSD-3-Clause" ]
1
2021-08-24T08:12:17.000Z
2021-08-24T08:12:17.000Z
planning_poker_jira/__init__.py
rheinwerk-verlag/planning-poker-jira
23a4f4d5cc33a148d648c341416723216f338397
[ "BSD-3-Clause" ]
1
2021-09-13T07:18:46.000Z
2021-09-13T07:18:46.000Z
planning_poker_jira/__init__.py
rheinwerk-verlag/planning-poker-jira
23a4f4d5cc33a148d648c341416723216f338397
[ "BSD-3-Clause" ]
null
null
null
default_app_config = 'planning_poker_jira.apps.PlanningPokerJiraConfig'
36
71
0.888889
8
72
7.5
1
0
0
0
0
0
0
0
0
0
0
0
0.041667
72
1
72
72
0.869565
0
0
0
0
0
0.666667
0.666667
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
0882bf0632cfccae643916359cf99a3312615318
1,673
py
Python
EasyDeep/base/base_experiment.py
strawsyz/straw
db313c78c2e3c0355cd10c70ac25a15bb5632d41
[ "MIT" ]
2
2020-04-06T09:09:19.000Z
2020-07-24T03:59:55.000Z
EasyDeep/base/base_experiment.py
strawsyz/straw
db313c78c2e3c0355cd10c70ac25a15bb5632d41
[ "MIT" ]
null
null
null
EasyDeep/base/base_experiment.py
strawsyz/straw
db313c78c2e3c0355cd10c70ac25a15bb5632d41
[ "MIT" ]
null
null
null
from configs.experiment_config import BaseExperimentConfig class BaseExperiment(BaseExperimentConfig): def __init__(self): super(BaseExperiment, self).__init__() # self.config_instance = config_instance # self.load_config() # def load_config(self): # if self.config_instance is not None: # copy_attr(self.config_instance, self) # else: # self.logger.error("need a eperiment config file!") # raise NotImplementedError # def show_config(self): # """list all configure in a experiment""" # # if self.config_instance is None: # # self.logger.warning("please set a configure file for the experiment") # # raise RuntimeError("please set a configure file for the experiment") # # else: # config_instance_str = str(self.config_instance) # self.logger.info(config_instance_str) # return config_instance_str def prepare_net(self): raise NotImplementedError def prepare_dataset(self): raise NotImplementedError def train(self): raise NotImplementedError def before_test(self): raise NotImplementedError def estimate(self): raise NotImplementedError def save(self): raise NotImplementedError def load(self): raise NotImplementedError def estimate_history(self): raise NotImplementedError def check(self): # used to check config file and something else raise NotImplementedError if __name__ == '__main__': expriment = BaseExperiment() expriment.train() expriment.estimate_history()
27.42623
85
0.656904
174
1,673
6.097701
0.344828
0.226202
0.229029
0.233742
0.188501
0.073516
0.073516
0.073516
0
0
0
0
0.266587
1,673
60
86
27.883333
0.864711
0.426778
0
0.346154
0
0
0.008547
0
0
0
0
0
0
1
0.384615
false
0
0.038462
0
0.461538
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
0884adcc5c0d637f5645e931117cb42cd5f102fd
324
py
Python
Dataset/Leetcode/valid/3/602.py
kkcookies99/UAST
fff81885aa07901786141a71e5600a08d7cb4868
[ "MIT" ]
null
null
null
Dataset/Leetcode/valid/3/602.py
kkcookies99/UAST
fff81885aa07901786141a71e5600a08d7cb4868
[ "MIT" ]
null
null
null
Dataset/Leetcode/valid/3/602.py
kkcookies99/UAST
fff81885aa07901786141a71e5600a08d7cb4868
[ "MIT" ]
null
null
null
class Solution: def XXX(self, s: str) -> int: max_sub_s = '' len_sub_s = 0 for i_s in s: if i_s in max_sub_s: max_sub_s = max_sub_s[max_sub_s.index(i_s)+1:] max_sub_s += i_s len_sub_s = max(len_sub_s, len(max_sub_s)) return len_sub_s
27
62
0.524691
60
324
2.4
0.316667
0.305556
0.340278
0.208333
0.194444
0.194444
0.194444
0.194444
0.194444
0
0
0.01
0.382716
324
11
63
29.454545
0.71
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
089907f3d9f8bcc2abd0bcd73b6358fc421e90ed
105
py
Python
niftivis/__init__.py
jstutters/niftivis
65b440d6c82e6ed87b824208f0b8d1c4dac083fa
[ "MIT" ]
null
null
null
niftivis/__init__.py
jstutters/niftivis
65b440d6c82e6ed87b824208f0b8d1c4dac083fa
[ "MIT" ]
null
null
null
niftivis/__init__.py
jstutters/niftivis
65b440d6c82e6ed87b824208f0b8d1c4dac083fa
[ "MIT" ]
null
null
null
from niftivis.niftivis import make_thumbnails __version__ = "2021.04.13" __all__ = ["make_thumbnails"]
17.5
45
0.780952
13
105
5.538462
0.769231
0.388889
0
0
0
0
0
0
0
0
0
0.086022
0.114286
105
5
46
21
0.688172
0
0
0
0
0
0.238095
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
08afabb347256359a4c52576b478792751976be2
332
py
Python
photo_location_plotter/file_structure_helper.py
Wesley-Fisher/photo-location-plotter
169b68a0bd44fd128c4fb364b5ea0a5c8bc99386
[ "MIT" ]
null
null
null
photo_location_plotter/file_structure_helper.py
Wesley-Fisher/photo-location-plotter
169b68a0bd44fd128c4fb364b5ea0a5c8bc99386
[ "MIT" ]
null
null
null
photo_location_plotter/file_structure_helper.py
Wesley-Fisher/photo-location-plotter
169b68a0bd44fd128c4fb364b5ea0a5c8bc99386
[ "MIT" ]
null
null
null
class FileStructureHelper: def __init__(self, run_settings): self.run_settings = run_settings def get_project_directory(self): return self.run_settings.app_directory + '/projects/' + self.run_settings.project def get_config_file_path(self): return self.get_project_directory() + "/config.yaml"
33.2
89
0.725904
41
332
5.463415
0.414634
0.245536
0.267857
0
0
0
0
0
0
0
0
0
0.180723
332
10
90
33.2
0.823529
0
0
0
0
0
0.066265
0
0
0
0
0
0
1
0.428571
false
0
0
0.285714
0.857143
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
4
08b2bb78c4d351b874f83d9446eed8f2e0e0d6bc
94
py
Python
subjects/apps.py
encrypted-fox/students_performance_monitoring
1a6c80ff70f3738496809586ae3fc204a156ca3b
[ "MIT" ]
null
null
null
subjects/apps.py
encrypted-fox/students_performance_monitoring
1a6c80ff70f3738496809586ae3fc204a156ca3b
[ "MIT" ]
22
2020-01-23T17:41:36.000Z
2021-07-02T14:00:00.000Z
subjects/apps.py
encrypted-fox/students_performance_monitoring
1a6c80ff70f3738496809586ae3fc204a156ca3b
[ "MIT" ]
null
null
null
from django.apps import AppConfig class DepartmentsConfig(AppConfig): name = 'subjects'
15.666667
35
0.765957
10
94
7.2
0.9
0
0
0
0
0
0
0
0
0
0
0
0.159574
94
5
36
18.8
0.911392
0
0
0
0
0
0.085106
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
08beafa1af35ca9dbbe9eae07311e3e6d2c8601e
120
py
Python
decred/decred/__init__.py
JoeGruffins/tinydecred
9c62378d04139446391f675f0e6646c5c882deb1
[ "ISC" ]
null
null
null
decred/decred/__init__.py
JoeGruffins/tinydecred
9c62378d04139446391f675f0e6646c5c882deb1
[ "ISC" ]
2
2021-06-02T03:28:57.000Z
2021-06-02T03:36:44.000Z
decred/decred/__init__.py
JoeGruffins/tinydecred
9c62378d04139446391f675f0e6646c5c882deb1
[ "ISC" ]
null
null
null
""" Copyright (c) 2019-2020, the Decred developers See LICENSE for details """ class DecredError(Exception): pass
13.333333
46
0.716667
15
120
5.733333
1
0
0
0
0
0
0
0
0
0
0
0.080808
0.175
120
8
47
15
0.787879
0.583333
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
4
08dbad8ef499caea590cfebbce6ecfc3d9c666a3
332
py
Python
10. Functions Advanced - Exercise/05_function_executor.py
elenaborisova/Python-Advanced
4c266d81f294372c3599741e8ba53f59fdc834c5
[ "MIT" ]
2
2021-04-04T06:26:13.000Z
2022-02-18T22:21:49.000Z
10. Functions Advanced - Exercise/05_function_executor.py
elenaborisova/Python-Advanced
4c266d81f294372c3599741e8ba53f59fdc834c5
[ "MIT" ]
null
null
null
10. Functions Advanced - Exercise/05_function_executor.py
elenaborisova/Python-Advanced
4c266d81f294372c3599741e8ba53f59fdc834c5
[ "MIT" ]
3
2021-02-01T12:32:03.000Z
2021-04-12T13:45:20.000Z
def sum_numbers(num1, num2): return num1 + num2 def multiply_numbers(num1, num2): return num1 * num2 def func_executor(*args): results = [] for arg in args: func, nums = arg results.append(func(*nums)) return results print(func_executor((sum_numbers, (1, 2)), (multiply_numbers, (2, 4))))
17.473684
71
0.63253
45
332
4.533333
0.444444
0.156863
0.147059
0.205882
0.313725
0.313725
0.313725
0
0
0
0
0.047431
0.237952
332
19
71
17.473684
0.758893
0
0
0
0
0
0
0
0
0
0
0
0
1
0.272727
false
0
0
0.181818
0.545455
0.090909
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
08e875dfd1b08ef238c3ff6f6ffaf0995cebec61
96
py
Python
tests/__init__.py
hsolbrig/definednamespace
f1178ba9c36a94bbd422844f4ddc71de67521d7b
[ "CC0-1.0" ]
null
null
null
tests/__init__.py
hsolbrig/definednamespace
f1178ba9c36a94bbd422844f4ddc71de67521d7b
[ "CC0-1.0" ]
null
null
null
tests/__init__.py
hsolbrig/definednamespace
f1178ba9c36a94bbd422844f4ddc71de67521d7b
[ "CC0-1.0" ]
1
2021-09-02T09:03:07.000Z
2021-09-02T09:03:07.000Z
import os cwd = os.path.abspath(os.path.dirname(__file__)) test_dir = os.path.join(cwd, 'data')
24
48
0.729167
17
96
3.823529
0.647059
0.276923
0
0
0
0
0
0
0
0
0
0
0.09375
96
4
49
24
0.747126
0
0
0
0
0
0.041237
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
08f1f581807b6c318f30c2f8071ca32b679f49f3
132
py
Python
examples/print_for_function.py
dominicj-nylas/traceback_with_variables
899565d52f89587a29f14745ef5820b05bda8187
[ "MIT" ]
null
null
null
examples/print_for_function.py
dominicj-nylas/traceback_with_variables
899565d52f89587a29f14745ef5820b05bda8187
[ "MIT" ]
null
null
null
examples/print_for_function.py
dominicj-nylas/traceback_with_variables
899565d52f89587a29f14745ef5820b05bda8187
[ "MIT" ]
null
null
null
from traceback_with_variables import prints_tb @prints_tb def f(n): print(1 / n) def main(): f(0) main()
9.428571
47
0.583333
20
132
3.65
0.7
0.219178
0
0
0
0
0
0
0
0
0
0.021978
0.310606
132
13
48
10.153846
0.78022
0
0
0
0
0
0
0
0
0
0
0
0
1
0.285714
false
0
0.142857
0
0.428571
0.428571
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
1
0
4
08f215a3bf979fde2e638a9ab0de680f225db804
428
py
Python
numpy/random_fn.py
dnootana/Python
2881bafe8bc378fa3cae50a747fcea1a55630c63
[ "MIT" ]
1
2021-02-19T11:00:11.000Z
2021-02-19T11:00:11.000Z
numpy/random_fn.py
dnootana/Python
2881bafe8bc378fa3cae50a747fcea1a55630c63
[ "MIT" ]
null
null
null
numpy/random_fn.py
dnootana/Python
2881bafe8bc378fa3cae50a747fcea1a55630c63
[ "MIT" ]
null
null
null
#!/usr/bin/env python3.8 from numpy import random a = random.randint(100) print(a) a = random.rand() print(a) a = random.randint(100, size=(5)) print(a) a = random.randint(100, size=(3, 5)) print(a) a = random.rand(5) print(a) a = random.rand(5,3) print(a) a = random.choice([1,2,3,4,5]) print(a) a = random.choice(["dfasdf", "dsfas", "sdfsdf", "asdfasd"]) print(a) a = random.choice([1,2,3,4,5], size=(2,2)) print(a)
13.806452
59
0.628505
82
428
3.280488
0.304878
0.234201
0.208178
0.386617
0.654275
0.516729
0.516729
0.178439
0.178439
0.178439
0
0.078591
0.13785
428
31
60
13.806452
0.650407
0.053738
0
0.473684
0
0
0.059259
0
0
0
0
0
0
1
0
false
0
0.052632
0
0.052632
0.473684
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
3ea5683a9b712d868cdacf5d12db38e5e5993d95
90
py
Python
tests/__init__.py
Mischback/django-calingen
3354c751e29d301609ec44e64d69a8729ec36de4
[ "MIT" ]
null
null
null
tests/__init__.py
Mischback/django-calingen
3354c751e29d301609ec44e64d69a8729ec36de4
[ "MIT" ]
51
2021-11-15T20:44:19.000Z
2022-02-10T08:33:08.000Z
tests/__init__.py
Mischback/django-calingen
3354c751e29d301609ec44e64d69a8729ec36de4
[ "MIT" ]
null
null
null
# SPDX-License-Identifier: MIT """Contains the app's tests and utility configuration."""
22.5
57
0.744444
12
90
5.583333
1
0
0
0
0
0
0
0
0
0
0
0
0.122222
90
3
58
30
0.848101
0.9
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
3ecead7ac0c6586a36a616dc6c2d38dabe00e091
77
py
Python
B/apps.py
subhrangshu/django-backend-demo
43ac5dbe1566e95eaf0fa5f5562b88bfb3036820
[ "Apache-2.0" ]
null
null
null
B/apps.py
subhrangshu/django-backend-demo
43ac5dbe1566e95eaf0fa5f5562b88bfb3036820
[ "Apache-2.0" ]
null
null
null
B/apps.py
subhrangshu/django-backend-demo
43ac5dbe1566e95eaf0fa5f5562b88bfb3036820
[ "Apache-2.0" ]
1
2020-11-24T08:47:37.000Z
2020-11-24T08:47:37.000Z
from django.apps import AppConfig class BConfig(AppConfig): name = 'B'
12.833333
33
0.714286
10
77
5.5
0.9
0
0
0
0
0
0
0
0
0
0
0
0.194805
77
5
34
15.4
0.887097
0
0
0
0
0
0.012987
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
3ecf44dba3ad409b29b4bc1ec8dde98a3f75d30c
48
py
Python
Language Skills/Python/Unit 01 Python Syntax/01 Python Syntax/Whitespace and Statements/whiteSpace.py
rhyep/Python_tutorials
f5c8a64b91802b005dfe7dd9035f8d8daae8c3e3
[ "MIT" ]
346
2016-02-22T20:21:10.000Z
2022-01-27T20:55:53.000Z
Language Skills/Python/Unit 1/1-Python Syntax/Whitespace and Statements/whiteSpace.py
vpstudios/Codecademy-Exercise-Answers
ebd0ee8197a8001465636f52c69592ea6745aa0c
[ "MIT" ]
55
2016-04-07T13:58:44.000Z
2020-06-25T12:20:24.000Z
Language Skills/Python/Unit 1/1-Python Syntax/Whitespace and Statements/whiteSpace.py
vpstudios/Codecademy-Exercise-Answers
ebd0ee8197a8001465636f52c69592ea6745aa0c
[ "MIT" ]
477
2016-02-21T06:17:02.000Z
2021-12-22T10:08:01.000Z
def spam(): eggs = 12 return eggs print spam()
8
12
0.666667
8
48
4
0.75
0
0
0
0
0
0
0
0
0
0
0.052632
0.208333
48
5
13
9.6
0.789474
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0.25
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
3eda5769b22c30c7a2068c471089851cd3055efb
107
py
Python
web/transiq/restapi/search.py
manibhushan05/transiq
763fafb271ce07d13ac8ce575f2fee653cf39343
[ "Apache-2.0" ]
5
2019-01-31T10:41:24.000Z
2019-09-22T12:38:53.000Z
web/transiq/restapi/search.py
manibhushan05/transiq
763fafb271ce07d13ac8ce575f2fee653cf39343
[ "Apache-2.0" ]
14
2020-06-05T23:06:45.000Z
2022-03-12T00:00:18.000Z
web/transiq/restapi/search.py
manibhushan05/transiq
763fafb271ce07d13ac8ce575f2fee653cf39343
[ "Apache-2.0" ]
1
2019-09-14T11:39:49.000Z
2019-09-14T11:39:49.000Z
from rest_framework import filters class CustomSearch(filters.SearchFilter): search_param = "search"
17.833333
41
0.794393
12
107
6.916667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.140187
107
5
42
21.4
0.902174
0
0
0
0
0
0.056075
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
3ef85def06a38ce108c741808e7814259543f838
251
py
Python
examples/materials/testing_materials.py
JorgeDeLosSantos/nusa
05623a72b892330e4b0e059a03ac4614da934ce9
[ "MIT" ]
92
2016-11-14T01:39:55.000Z
2022-03-27T17:23:41.000Z
examples/materials/testing_materials.py
JorgeDeLosSantos/nusa
05623a72b892330e4b0e059a03ac4614da934ce9
[ "MIT" ]
1
2017-11-30T05:04:02.000Z
2018-08-29T04:31:39.000Z
examples/materials/testing_materials.py
JorgeDeLosSantos/nusa
05623a72b892330e4b0e059a03ac4614da934ce9
[ "MIT" ]
31
2017-05-17T18:50:18.000Z
2022-03-12T03:08:00.000Z
# -*- coding: utf-8 -*- # *********************************** # Author: Pedro Jorge De Los Santos # E-mail: delossantosmfq@gmail.com # License: MIT License # *********************************** from nusa.lib import * print(dir(STEEL_1018))
22.818182
41
0.462151
25
251
4.6
0.96
0
0
0
0
0
0
0
0
0
0
0.023585
0.155378
251
10
42
25.1
0.518868
0.756972
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
4
4108c571fa013aef935edc19bd403ecff79c30ad
250
py
Python
config.py
LemmyMwaura/Personal-log
cec2186497a7c4829ba07fb5522ac06a9192b5b3
[ "MIT" ]
null
null
null
config.py
LemmyMwaura/Personal-log
cec2186497a7c4829ba07fb5522ac06a9192b5b3
[ "MIT" ]
null
null
null
config.py
LemmyMwaura/Personal-log
cec2186497a7c4829ba07fb5522ac06a9192b5b3
[ "MIT" ]
1
2022-03-15T07:50:08.000Z
2022-03-15T07:50:08.000Z
# class Config(): # app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY') # app.config['SQLALCHEMY_DATABASE_URI'] = f'postgresql+psycopg2://{DB_USER}:{DB_PASS}@localhost/pitchesapp' # app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
62.5
111
0.72
31
250
5.548387
0.709677
0.156977
0.22093
0
0
0
0
0
0
0
0
0.004464
0.104
250
4
112
62.5
0.763393
0.968
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
412baa593f442058aa6c5b3927fbc08e74fa47a0
95
py
Python
enif_app/apps.py
aejsi5/Enif
60c82cfe5272ee748d2c10dbd21b6c52cd674d5b
[ "MIT" ]
null
null
null
enif_app/apps.py
aejsi5/Enif
60c82cfe5272ee748d2c10dbd21b6c52cd674d5b
[ "MIT" ]
null
null
null
enif_app/apps.py
aejsi5/Enif
60c82cfe5272ee748d2c10dbd21b6c52cd674d5b
[ "MIT" ]
null
null
null
from django.apps import AppConfig class EnifAppConfig(AppConfig): name = 'enif_app'
15.833333
34
0.715789
11
95
6.090909
0.909091
0
0
0
0
0
0
0
0
0
0
0
0.210526
95
5
35
19
0.893333
0
0
0
0
0
0.088889
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
f5cded9f45f4dc3a14ba4c47145b431ea8a2a5fa
66
py
Python
src/main.py
strah19/Contacts
9eda372c77ce965208549c2a6dc07484311149aa
[ "MIT" ]
null
null
null
src/main.py
strah19/Contacts
9eda372c77ce965208549c2a6dc07484311149aa
[ "MIT" ]
null
null
null
src/main.py
strah19/Contacts
9eda372c77ce965208549c2a6dc07484311149aa
[ "MIT" ]
null
null
null
import app application = app.Application() application.update()
11
31
0.772727
7
66
7.285714
0.571429
0.54902
0
0
0
0
0
0
0
0
0
0
0.121212
66
5
32
13.2
0.87931
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
eb2a8cba6f899bbd6964f98a78ab026eb5d68a94
88
py
Python
ex6.py
arunkumarang/python
1960e285dfe2ef54d2e3ab37584bfef8b24ecca9
[ "Apache-2.0" ]
null
null
null
ex6.py
arunkumarang/python
1960e285dfe2ef54d2e3ab37584bfef8b24ecca9
[ "Apache-2.0" ]
null
null
null
ex6.py
arunkumarang/python
1960e285dfe2ef54d2e3ab37584bfef8b24ecca9
[ "Apache-2.0" ]
null
null
null
month_year = input("Please enter the current month and the year: ") print(month_year)
29.333333
68
0.75
14
88
4.571429
0.642857
0.28125
0
0
0
0
0
0
0
0
0
0
0.159091
88
3
69
29.333333
0.864865
0
0
0
0
0
0.523256
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
de1794969b6610640fca85505c9f95f733361c13
182
py
Python
geral/curso_devmedia_api_django_rest/api_vagas/api/serializer.py
flaviogf/Cursos
2b120dbcd24a907121f58482fdcdfa01b164872c
[ "MIT" ]
2
2021-02-20T23:50:07.000Z
2021-08-15T03:04:35.000Z
geral/curso_devmedia_api_django_rest/api_vagas/api/serializer.py
flaviogf/Cursos
2b120dbcd24a907121f58482fdcdfa01b164872c
[ "MIT" ]
18
2019-08-07T02:33:00.000Z
2021-03-18T22:52:38.000Z
geral/curso_devmedia_api_django_rest/api_vagas/api/serializer.py
flaviogf/Cursos
2b120dbcd24a907121f58482fdcdfa01b164872c
[ "MIT" ]
2
2020-09-28T13:00:09.000Z
2021-12-30T12:21:08.000Z
from rest_framework import serializers from .models import Vaga class VagaSerializer(serializers.ModelSerializer): class Meta: model = Vaga fields = '__all__'
18.2
50
0.71978
19
182
6.631579
0.736842
0
0
0
0
0
0
0
0
0
0
0
0.225275
182
9
51
20.222222
0.893617
0
0
0
0
0
0.038462
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
de599521042a18c885a7bfc5328650f37e7281eb
4,113
py
Python
plots/2020-plots/prepare-workers-vs-data-data.py
etesami/MOE-FL
b2bc45334d4df2f47959fba1f7771486793c8010
[ "Apache-2.0" ]
null
null
null
plots/2020-plots/prepare-workers-vs-data-data.py
etesami/MOE-FL
b2bc45334d4df2f47959fba1f7771486793c8010
[ "Apache-2.0" ]
null
null
null
plots/2020-plots/prepare-workers-vs-data-data.py
etesami/MOE-FL
b2bc45334d4df2f47959fba1f7771486793c8010
[ "Apache-2.0" ]
1
2021-06-08T22:20:46.000Z
2021-06-08T22:20:46.000Z
#!/usr/bin/python DIR = "data_tmp/" FILES = {} FILES['20_avg'] = [] FILES['40_avg'] = [] FILES['50_avg'] = [] FILES['60_avg'] = [] FILES['80_avg'] = [] FILES['20_opt'] = [] FILES['40_opt'] = [] FILES['50_opt'] = [] FILES['60_opt'] = [] FILES['80_opt'] = [] FILES['20_avg'].append("04_attk2_avg20_20_test") FILES['20_avg'].append("04_attk2_avg20_40_test") FILES['20_avg'].append("04_attk2_avg20_60_test") FILES['20_avg'].append("04_attk2_avg20_80_test") FILES['40_avg'].append("04_attk2_avg40_20_test") FILES['40_avg'].append("04_attk2_avg40_40_test") FILES['40_avg'].append("04_attk2_avg40_60_test") FILES['40_avg'].append("04_attk2_avg40_80_test") FILES['50_avg'].append("04_attk2_avg50_20_test") FILES['50_avg'].append("04_attk2_avg50_40_test") FILES['50_avg'].append("04_attk2_avg50_60_test") FILES['50_avg'].append("04_attk2_avg50_80_test") FILES['60_avg'].append("04_attk2_avg60_20_test") FILES['60_avg'].append("04_attk2_avg60_40_test") FILES['60_avg'].append("04_attk2_avg60_60_test") FILES['60_avg'].append("04_attk2_avg60_80_test") FILES['80_avg'].append("04_attk2_avg80_20_test") FILES['80_avg'].append("04_attk2_avg80_40_test") FILES['80_avg'].append("04_attk2_avg80_60_test") FILES['80_avg'].append("04_attk2_avg80_80_test") FILES['20_opt'].append("05_attk2_opt20_20_test") FILES['20_opt'].append("05_attk2_opt20_40_test") FILES['20_opt'].append("05_attk2_opt20_60_test") FILES['20_opt'].append("05_attk2_opt20_80_test") FILES['40_opt'].append("05_attk2_opt40_20_test") FILES['40_opt'].append("05_attk2_opt40_40_test") FILES['40_opt'].append("05_attk2_opt40_60_test") FILES['40_opt'].append("05_attk2_opt40_80_test") FILES['50_opt'].append("05_attk2_opt50_20_test") FILES['50_opt'].append("05_attk2_opt50_40_test") FILES['50_opt'].append("05_attk2_opt50_60_test") FILES['50_opt'].append("05_attk2_opt50_80_test") FILES['60_opt'].append("05_attk2_opt60_20_test") FILES['60_opt'].append("05_attk2_opt60_40_test") FILES['60_opt'].append("05_attk2_opt60_60_test") FILES['60_opt'].append("05_attk2_opt60_80_test") FILES['80_opt'].append("05_attk2_opt80_20_test") FILES['80_opt'].append("05_attk2_opt80_40_test") FILES['80_opt'].append("05_attk2_opt80_60_test") FILES['80_opt'].append("05_attk2_opt80_80_test") # AVG nums = [] for id in ['20_avg', '40_avg', '50_avg', '60_avg', '80_avg']: for ff in FILES[id]: with open(DIR + ff, 'r') as f: print("Working on " + ff) lines = f.readlines() nums.append(lines[4].split()[2]) f.close() with open(DIR + "09-workers-data-avg.txt", 'w') as f: f.write('- - "20% Data Alteration" - "40% Data Alteration" - "50% Data Alteration" - "60% Data Alteration" - "80% Data Alteration "\n') f.write("20 20% " + nums[0] + " " + nums[1] + " " + nums[2] + " " + nums[3] + "\n") f.write("40 40% " + nums[4] + " " + nums[5] + " " + nums[6] + " " + nums[7] + "\n") f.write("50 50% " + nums[8] + " " + nums[9] + " " + nums[10] + " " + nums[11] + "\n") f.write("60 60% " + nums[12] + " " + nums[13] + " " + nums[14] + " " + nums[15] + "\n") f.write("80 80% " + nums[16] + " " + nums[17] + " " + nums[17] + " " + nums[19] + "\n") f.close() # OPT nums = [] for id in ['20_opt', '40_opt', '50_opt', '60_opt', '80_opt']: for ff in FILES[id]: with open(DIR + ff, 'r') as f: print("Working on " + ff) lines = f.readlines() nums.append(lines[4].split()[2]) f.close() with open(DIR + "09-workers-data-opt.txt", 'w') as f: f.write('- - "20% Data Alteration" - "40% Data Alteration" - "50% Data Alteration" - "60% Data Alteration" - "80% Data Alteration "\n') f.write("20 20% " + nums[0] + " " + nums[1] + " " + nums[2] + " " + nums[3] + "\n") f.write("40 40% " + nums[4] + " " + nums[5] + " " + nums[6] + " " + nums[7] + "\n") f.write("50 50% " + nums[8] + " " + nums[9] + " " + nums[10] + " " + nums[11] + "\n") f.write("60 60% " + nums[12] + " " + nums[13] + " " + nums[14] + " " + nums[15] + "\n") f.write("80 80% " + nums[16] + " " + nums[17] + " " + nums[17] + " " + nums[19] + "\n") f.close()
38.801887
139
0.619985
673
4,113
3.460624
0.106984
0.150708
0.094461
0.137398
0.8845
0.873336
0.873336
0.861314
0.325462
0.325462
0
0.151116
0.150498
4,113
106
140
38.801887
0.515455
0.005835
0
0.333333
0
0.02381
0.413262
0.226572
0
0
0
0
0
1
0
false
0
0
0
0
0.02381
0
0
0
null
0
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
de66f3469e1aaa32c62e49257ca4cc56b01c0357
68
py
Python
recognizer/__main__.py
janaSunrise/OpenCV-Image-Color-Recognizer
51d94f080a4893ee2ff2e04e6ddbc22e201eb47f
[ "MIT" ]
3
2021-05-08T05:48:49.000Z
2021-05-08T11:24:04.000Z
recognizer/__main__.py
janaSunrise/OpenCV-Image-Color-Recognizer
51d94f080a4893ee2ff2e04e6ddbc22e201eb47f
[ "MIT" ]
null
null
null
recognizer/__main__.py
janaSunrise/OpenCV-Image-Color-Recognizer
51d94f080a4893ee2ff2e04e6ddbc22e201eb47f
[ "MIT" ]
null
null
null
from . import recognize if __name__ == "__main__": recognize()
13.6
26
0.676471
7
68
5.428571
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.205882
68
4
27
17
0.703704
0
0
0
0
0
0.117647
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
de6991aea501859c14a432e679accc5a1653c1eb
186
py
Python
src/dataprocess/transform/__init__.py
jiangtaoo2333/StaticGestureRecognition
9d554b137f217f3bcb046b2c6978b9487685de2a
[ "MIT" ]
null
null
null
src/dataprocess/transform/__init__.py
jiangtaoo2333/StaticGestureRecognition
9d554b137f217f3bcb046b2c6978b9487685de2a
[ "MIT" ]
null
null
null
src/dataprocess/transform/__init__.py
jiangtaoo2333/StaticGestureRecognition
9d554b137f217f3bcb046b2c6978b9487685de2a
[ "MIT" ]
null
null
null
# # Lightnet data transforms # Copyright EAVISE # from .dataAug_box import * from .dataAug_pts import * from .mixup import mixup_data,mixup_criterion from .cutmix import cutmix_data
20.666667
45
0.784946
25
186
5.64
0.52
0.156028
0
0
0
0
0
0
0
0
0
0
0.155914
186
9
46
20.666667
0.898089
0.231183
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
de6e03420bdc590b582884cd97eff82b497ee4df
130
py
Python
netspot/apps.py
MaxIV-KitsControls/netspot
42f505d004bcadcfb32b6ca0511572d38641c23a
[ "MIT" ]
null
null
null
netspot/apps.py
MaxIV-KitsControls/netspot
42f505d004bcadcfb32b6ca0511572d38641c23a
[ "MIT" ]
null
null
null
netspot/apps.py
MaxIV-KitsControls/netspot
42f505d004bcadcfb32b6ca0511572d38641c23a
[ "MIT" ]
null
null
null
from __future__ import unicode_literals from django.apps import AppConfig class NetspotConfig(AppConfig): name = 'netspot'
16.25
39
0.792308
15
130
6.533333
0.8
0
0
0
0
0
0
0
0
0
0
0
0.153846
130
7
40
18.571429
0.890909
0
0
0
0
0
0.053846
0
0
0
0
0
0
1
0
false
0
0.5
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
debc6848ff46f86e7849ab616b9dfafde5e1ce35
848
py
Python
blog/views.py
OleMissSquad/MyBlog
72902a00d134e662834ab76f75f240a0616265cf
[ "MIT" ]
null
null
null
blog/views.py
OleMissSquad/MyBlog
72902a00d134e662834ab76f75f240a0616265cf
[ "MIT" ]
11
2017-09-17T16:53:23.000Z
2017-10-06T13:47:14.000Z
blog/views.py
khoa102/MyBlog
72902a00d134e662834ab76f75f240a0616265cf
[ "MIT" ]
null
null
null
from django.shortcuts import render, get_object_or_404 from django.http import HttpResponse from .models import Post # Create your views here. def index(request): latest_post_list = Post.objects.order_by("-date_published")[:5] context = {'latest_post_list': latest_post_list} return render(request, 'blog/index.html', context) def post(request): return None def detail(request, post_id): post = get_object_or_404(Post, pk=post_id) context = {'post': post} return render(request, 'blog/detail.html', context) def archive(request): return HttpResponse("This is the archive page") def dashboard(request): return HttpResponse("This is the dashboard page") def signup(request): return HttpResponse("This is the singup page") def signin(request): return HttpResponse("This is the signin page")
21.74359
67
0.728774
118
848
5.101695
0.381356
0.107973
0.166113
0.192691
0.225914
0.225914
0
0
0
0
0
0.009915
0.167453
848
38
68
22.315789
0.842776
0.027123
0
0
0
0
0.19732
0
0
0
0
0
0
1
0.333333
false
0
0.142857
0.238095
0.809524
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
def08788d9bb9f800fdb0ca4974608734125b149
4,218
py
Python
tests/test_utils.py
certego/django-group-role
264b8e80578bed53b119cc1d141a1fcb8f21efcc
[ "Apache-2.0" ]
3
2021-11-29T08:23:03.000Z
2021-12-01T20:29:20.000Z
tests/test_utils.py
certego/django-group-role
264b8e80578bed53b119cc1d141a1fcb8f21efcc
[ "Apache-2.0" ]
null
null
null
tests/test_utils.py
certego/django-group-role
264b8e80578bed53b119cc1d141a1fcb8f21efcc
[ "Apache-2.0" ]
null
null
null
from django.test import SimpleTestCase, override_settings from django_group_role.utils import map_permissions class UtilsSimpleTestCase(SimpleTestCase): def test_map_permissions_list_wrong_code(self): with self.assertRaisesMessage( ValueError, "Permissions, should be defined in the format: 'app_label.codename' (is view_user)", ): map_permissions(["view_user"]) def test_map_permissions_list(self): perm_map = map_permissions( ["auth.view_user", "auth.view_group", "myapp.view_mymodel"] ) self.assertEqual( perm_map, { "auth": {"_codenames": {"view_user", "view_group"}}, "myapp": {"_codenames": {"view_mymodel"}}, }, ) def test_map_permissions_dict(self): perm_map = map_permissions( { "auth.user": ["view_user", "change_user"], "auth": {"group": ["view_group"]}, "myapp.mymodel": ["view_mymodel", "change_mymodel"], } ) self.assertEqual( perm_map, { "auth": {"user": {"view_user", "change_user"}, "group": {"view_group"}}, "myapp": { "mymodel": {"view_mymodel", "change_mymodel"}, }, }, ) def test_map_permissions_dict_plus_list(self): perm_map = map_permissions( { "auth.user": ["view_user", "change_user"], "auth": {"group": ["view_group"]}, "myapp.mymodel": ["view_mymodel", "change_mymodel"], }, ["auth.view_user", "myapp.delete_mymodel", "otherapp.view_element"], ) self.assertEqual( perm_map, { "auth": { "_codenames": {"view_user"}, "user": {"view_user", "change_user"}, "group": {"view_group"}, }, "myapp": { "_codenames": {"delete_mymodel"}, "mymodel": {"view_mymodel", "change_mymodel"}, }, "otherapp": { "_codenames": {"view_element"}, }, }, ) def test_map_permissions_list_plus_dict(self): perm_map = map_permissions( ["auth.view_user", "myapp.delete_mymodel", "otherapp.view_element"], { "auth.user": ["view_user", "change_user"], "auth": {"group": ["view_group"]}, "myapp.mymodel": ["view_mymodel", "change_mymodel"], }, ) self.assertEqual( perm_map, { "auth": { "_codenames": {"view_user"}, "user": {"view_user", "change_user"}, "group": {"view_group"}, }, "myapp": { "_codenames": {"delete_mymodel"}, "mymodel": {"view_mymodel", "change_mymodel"}, }, "otherapp": { "_codenames": {"view_element"}, }, }, ) def test_map_permissions_dict_plus_dict(self): perm_map = map_permissions( { "auth": {"user": ["view_user"]}, "myapp": {"mymodel": ["delete_mymodel"]}, "otherapp.element": ["view_element"], }, { "auth.user": ["view_user", "change_user"], "auth": {"group": ["view_group"]}, "myapp.mymodel": ["view_mymodel", "change_mymodel"], }, ) self.assertEqual( perm_map, { "auth": { "user": {"view_user", "change_user"}, "group": {"view_group"}, }, "myapp": { "mymodel": {"view_mymodel", "change_mymodel", "delete_mymodel"}, }, "otherapp": { "element": {"view_element"}, }, }, )
33.744
96
0.439545
322
4,218
5.385093
0.142857
0.078431
0.080738
0.083045
0.814879
0.786044
0.750865
0.703576
0.6609
0.581892
0
0
0.416785
4,218
124
97
34.016129
0.704878
0
0
0.461538
0
0
0.293267
0.009957
0
0
0
0
0.051282
1
0.051282
false
0
0.017094
0
0.076923
0
0
0
0
null
0
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
defd2af25ef58312169d6b60464e4d8fe2a4140c
97
py
Python
list/migrations/__init__.py
juli212/kitchenin
aefd0e493442c36b2044753fb19e19ae789eae67
[ "MIT" ]
null
null
null
list/migrations/__init__.py
juli212/kitchenin
aefd0e493442c36b2044753fb19e19ae789eae67
[ "MIT" ]
null
null
null
list/migrations/__init__.py
juli212/kitchenin
aefd0e493442c36b2044753fb19e19ae789eae67
[ "MIT" ]
null
null
null
from django.contrib.auth.models import User, models User._meta.get_field('email')._unique = True
32.333333
51
0.793814
15
97
4.933333
0.866667
0
0
0
0
0
0
0
0
0
0
0
0.082474
97
3
52
32.333333
0.831461
0
0
0
0
0
0.05102
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
a0cb2903adb55c3f8734eb3936f02862dc8e6457
101
py
Python
snipsmanager/utils/object_from_dict.py
beevesuw/snipsmanager
9eb45c076db4ed90e1a5a7cdeadfda253affaadb
[ "MIT" ]
1
2019-02-14T08:13:04.000Z
2019-02-14T08:13:04.000Z
snipsmanager/utils/object_from_dict.py
beevesuw/snipsmanager
9eb45c076db4ed90e1a5a7cdeadfda253affaadb
[ "MIT" ]
null
null
null
snipsmanager/utils/object_from_dict.py
beevesuw/snipsmanager
9eb45c076db4ed90e1a5a7cdeadfda253affaadb
[ "MIT" ]
1
2019-02-14T08:13:18.000Z
2019-02-14T08:13:18.000Z
class ObjectFromDict(object): def __init__(self, dictionary): self.__dict__ = dictionary
25.25
35
0.712871
10
101
6.4
0.8
0
0
0
0
0
0
0
0
0
0
0
0.19802
101
3
36
33.666667
0.790123
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
a0e3b8e3fbf4c0ebeeb0ac9f242b2185ba872eae
839
py
Python
notebook/numpy_broadcasting_error.py
vhn0912/python-snippets
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
[ "MIT" ]
174
2018-05-30T21:14:50.000Z
2022-03-25T07:59:37.000Z
notebook/numpy_broadcasting_error.py
vhn0912/python-snippets
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
[ "MIT" ]
5
2019-08-10T03:22:02.000Z
2021-07-12T20:31:17.000Z
notebook/numpy_broadcasting_error.py
vhn0912/python-snippets
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
[ "MIT" ]
53
2018-04-27T05:26:35.000Z
2022-03-25T07:59:37.000Z
import numpy as np a = np.zeros((4, 3), dtype=np.int) print(a) # [[0 0 0] # [0 0 0] # [0 0 0] # [0 0 0]] print(a.shape) # (4, 3) b = np.arange(6).reshape(2, 3) print(b) # [[0 1 2] # [3 4 5]] print(b.shape) # (2, 3) # print(a + b) # ValueError: operands could not be broadcast together with shapes (4,3) (2,3) a = np.zeros((2, 3, 4), dtype=np.int) print(a) # [[[0 0 0 0] # [0 0 0 0] # [0 0 0 0]] # # [[0 0 0 0] # [0 0 0 0] # [0 0 0 0]]] print(a.shape) # (2, 3, 4) b = np.arange(3) print(b) # [0 1 2] print(b.shape) # (3,) # print(a + b) # ValueError: operands could not be broadcast together with shapes (2,3,4) (3,) b_3_1 = b.reshape(3, 1) print(b_3_1) # [[0] # [1] # [2]] print(b_3_1.shape) # (3, 1) print(a + b_3_1) # [[[0 0 0 0] # [1 1 1 1] # [2 2 2 2]] # # [[0 0 0 0] # [1 1 1 1] # [2 2 2 2]]]
13.109375
80
0.498212
187
839
2.192513
0.15508
0.195122
0.263415
0.312195
0.670732
0.634146
0.585366
0.585366
0.585366
0.585366
0
0.176948
0.265793
839
63
81
13.31746
0.488636
0.536353
0
0.470588
0
0
0
0
0
0
0
0
0
1
0
false
0
0.058824
0
0.058824
0.647059
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
9d2b134528efd1519790f90bdca15b1f067664aa
46
py
Python
beginner/chapter_1/exam_1_2.py
Bokji24Dev/CodeStudy
4c0fc852e6f472d082e9836c59ad22d229f74d87
[ "MIT" ]
null
null
null
beginner/chapter_1/exam_1_2.py
Bokji24Dev/CodeStudy
4c0fc852e6f472d082e9836c59ad22d229f74d87
[ "MIT" ]
null
null
null
beginner/chapter_1/exam_1_2.py
Bokji24Dev/CodeStudy
4c0fc852e6f472d082e9836c59ad22d229f74d87
[ "MIT" ]
null
null
null
# -*- coding:utf-8 -*- Print("Hello World!")
11.5
22
0.543478
6
46
4.166667
1
0
0
0
0
0
0
0
0
0
0
0.025641
0.152174
46
3
23
15.333333
0.615385
0.434783
0
0
0
0
0.5
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
9d2bb262434b62b3e8edffc6f00e37354f3d80c0
114
py
Python
app/articles/__init__.py
AlexRAV/flask-blog
df8036e01794914ca0e88856ed93f8a91cc1d47a
[ "BSD-3-Clause" ]
null
null
null
app/articles/__init__.py
AlexRAV/flask-blog
df8036e01794914ca0e88856ed93f8a91cc1d47a
[ "BSD-3-Clause" ]
null
null
null
app/articles/__init__.py
AlexRAV/flask-blog
df8036e01794914ca0e88856ed93f8a91cc1d47a
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """The articles module, including operations with articles.""" from . import views # noqa
38
62
0.675439
14
114
5.5
0.928571
0
0
0
0
0
0
0
0
0
0
0.010417
0.157895
114
3
63
38
0.791667
0.736842
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
9d364cddb663a76cf46c67bd36066ef710af4952
777
py
Python
fdk_client/platform/models/EventSubscription.py
kavish-d/fdk-client-python
a1023eb530473322cb52e095fc4ceb226c1e6037
[ "MIT" ]
null
null
null
fdk_client/platform/models/EventSubscription.py
kavish-d/fdk-client-python
a1023eb530473322cb52e095fc4ceb226c1e6037
[ "MIT" ]
null
null
null
fdk_client/platform/models/EventSubscription.py
kavish-d/fdk-client-python
a1023eb530473322cb52e095fc4ceb226c1e6037
[ "MIT" ]
null
null
null
"""Platform Models.""" from marshmallow import fields, Schema from marshmallow.validate import OneOf from ..enums import * from ..models.BaseSchema import BaseSchema from .EventSubscriptionTemplate import EventSubscriptionTemplate class EventSubscription(BaseSchema): # Communication swagger.json template = fields.Nested(EventSubscriptionTemplate, required=False) is_default = fields.Boolean(required=False) _id = fields.Str(required=False) application = fields.Str(required=False) event = fields.Str(required=False) slug = fields.Str(required=False) created_at = fields.Str(required=False) updated_at = fields.Str(required=False) __v = fields.Int(required=False)
15.54
71
0.697555
79
777
6.78481
0.43038
0.218284
0.190299
0.246269
0.089552
0
0
0
0
0
0
0
0.216216
777
49
72
15.857143
0.880131
0.056628
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
c236a82d9fbe06bc0d88fbb431bd687b87e99c5b
50
py
Python
run.py
thautwarm/voicecontrol
dc5565d114fe80f0f06a0e7c541ee447fb7712f3
[ "MIT" ]
2
2021-06-05T08:27:44.000Z
2021-06-05T13:46:27.000Z
run.py
thautwarm/voicecontrol
dc5565d114fe80f0f06a0e7c541ee447fb7712f3
[ "MIT" ]
null
null
null
run.py
thautwarm/voicecontrol
dc5565d114fe80f0f06a0e7c541ee447fb7712f3
[ "MIT" ]
null
null
null
from voicecontrol.pinyin_typing import main main()
25
43
0.86
7
50
6
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.08
50
2
44
25
0.913043
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
dfa5b5298217bec2ae65c111e2e3c14f4fc907c4
91
py
Python
python/learn/base/module/l1/pack/p1.py
qrsforever/workspace
53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f
[ "MIT" ]
2
2017-06-07T03:20:42.000Z
2020-01-07T09:14:26.000Z
python/learn/base/module/l1/pack/p1.py
qrsforever/workspace
53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f
[ "MIT" ]
null
null
null
python/learn/base/module/l1/pack/p1.py
qrsforever/workspace
53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f
[ "MIT" ]
null
null
null
#!/usr/bin/python2.7 print "run here pack/p1" def p1_fun(): print "function pack/p1_fun"
15.166667
42
0.703297
17
91
3.647059
0.705882
0.193548
0
0
0
0
0
0
0
0
0
0.063291
0.131868
91
5
43
18.2
0.721519
0.208791
0
0
0
0
0.507042
0
0
0
0
0
0
0
null
null
0
0
null
null
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
4
dfa74e9db5d8c1d6b3ac0016f3c11dfb850e59bd
11,936
py
Python
scripts/venv/lib/python2.7/site-packages/cogent/app/gctmpca.py
sauloal/cnidaria
fe6f8c8dfed86d39c80f2804a753c05bb2e485b4
[ "MIT" ]
3
2015-11-20T08:44:42.000Z
2016-12-14T01:40:03.000Z
scripts/venv/lib/python2.7/site-packages/cogent/app/gctmpca.py
sauloal/cnidaria
fe6f8c8dfed86d39c80f2804a753c05bb2e485b4
[ "MIT" ]
1
2017-09-04T14:04:32.000Z
2020-05-26T19:04:00.000Z
scripts/venv/lib/python2.7/site-packages/cogent/app/gctmpca.py
sauloal/cnidaria
fe6f8c8dfed86d39c80f2804a753c05bb2e485b4
[ "MIT" ]
null
null
null
#!/usr/bin/env python # Author: Greg Caporaso (gregcaporaso@gmail.com) # gctmpca.py """Application controller for the Generalized Continuous-Time Markov Process Coevolutionary Algorithm (GCTMPCA). GCTMPCA is presented in: Detecting coevolution in and among protein domains. Yeang CH, Haussler D., PLoS Comput Biol. 2007 Nov;3(11):e211. Detecting the coevolution of biosequences--an example of RNA interaction prediction. Yeang CH, Darot JF, Noller HF, Haussler D. Mol Biol Evol. 2007 Sep;24(9):2119-31. This code requires the GCTMPCA package to be installed. As of Nov. 2008, that software is available at: http://www.sns.ias.edu/~chyeang/coevolution_download.zip Note that the authors did not name their algorithm or software when they published it. GCTMPCA was suggested as a name by the first author via e-mail. """ from __future__ import division from cogent.app.util import CommandLineApplication, ResultPath,\ ApplicationError from cogent.app.parameters import FilePath from cogent.evolve.models import DSO78_freqs, DSO78_matrix __author__ = "Greg Caporaso" __copyright__ = "Copyright 2007-2012, The Cogent Project" __credits__ = ["Greg Caporaso"] __license__ = "GPL" __version__ = "1.5.3" __maintainer__ = "Greg Caporaso" __email__ = "gregcaporaso@gmail.com" __status__ = "Beta" # Are these values in PyCogent somewhere? gctmpca_base_order = 'ACGU' default_gctmpca_rna_priors = {'A':0.2528,'C':0.2372,'G':0.3099,'U':0.2001} default_gctmpca_rna_sub_matrix = """-1.4150\t0.2372\t0.9777\t0.2001 0.2528\t-1.1940\t0.3099\t0.6313 0.7976\t0.2372\t-1.2349\t0.2001 0.2528\t0.7484\t0.3099\t-1.3111""" gctmpca_aa_order = 'ARNDCQEGHILKMFPSTWYV' # By default, the Gctmpca method used the Dayhoff 78 frequencies and rate matrix default_gctmpca_aa_priors = DSO78_freqs default_gctmpca_aa_sub_matrix = """-133.941451\t1.104408\t3.962336\t5.624640\t1.205064\t3.404695\t9.806940\t21.266880\t0.773214\t2.397590\t3.499637\t2.092532\t1.062216\t0.715896\t12.670000\t28.456993\t21.719082\t0.000000\t0.717984\t13.461344 2.352429\t-86.970372\t1.293824\t0.000000\t0.769902\t9.410730\t0.049530\t0.797508\t8.068320\t2.360704\t1.280355\t37.343648\t1.327770\t0.556808\t5.220040\t10.714858\t1.522092\t2.109294\t0.239328\t1.553232 8.538446\t1.308928\t-179.776579\t42.419160\t0.000000\t3.940265\t7.330440\t12.317068\t17.985630\t2.840222\t2.902138\t25.593276\t0.014753\t0.556808\t2.128560\t34.440615\t13.406118\t0.241362\t2.842020\t0.970770 10.455240\t0.000000\t36.590960\t-142.144945\t0.000000\t5.126170\t57.108090\t11.076500\t2.891148\t0.885264\t0.000000\t5.714222\t0.000000\t0.000000\t0.658840\t6.609815\t3.863772\t0.000000\t0.000000\t1.164924 3.136572\t0.940792\t0.000000\t0.000000\t-26.760991\t0.000000\t0.000000\t0.974732\t0.941304\t1.622984\t0.000000\t0.000000\t0.000000\t0.000000\t0.962920\t11.201897\t0.936672\t0.000000\t2.871936\t3.171182 7.754303\t10.062384\t4.164496\t6.280848\t0.000000\t-124.487960\t35.463480\t2.481136\t20.372508\t0.663948\t6.231061\t12.313746\t1.681842\t0.000000\t7.754040\t3.896312\t3.102726\t0.000000\t0.000000\t2.265130 17.251146\t0.040904\t5.983936\t54.043416\t0.000000\t27.390580\t-136.769106\t7.177572\t1.445574\t2.250046\t0.938927\t6.680006\t0.442590\t0.000000\t2.584680\t5.496583\t1.990428\t0.000000\t0.658152\t2.394566 20.910480\t0.368136\t5.620048\t5.859000\t0.368214\t1.071140\t4.011930\t-65.418192\t0.336180\t0.000000\t0.597499\t2.173014\t0.250801\t0.596580\t1.723120\t16.281018\t1.756260\t0.000000\t0.000000\t3.494772 2.003921\t9.816960\t21.631120\t4.030992\t0.937272\t23.182530\t2.129790\t0.886120\t-88.051504\t0.258202\t3.755708\t2.092532\t0.000000\t1.909056\t4.763920\t2.435195\t1.287924\t0.283338\t3.799332\t2.847592 5.663255\t2.617856\t3.113264\t1.124928\t1.472856\t0.688590\t3.021330\t0.000000\t0.235326\t-128.487912\t21.936749\t3.702172\t4.957008\t7.795312\t0.608160\t1.669848\t11.240064\t0.000000\t1.106892\t57.534302 3.572207\t0.613560\t1.374688\t0.000000\t0.000000\t2.792615\t0.544830\t0.620284\t1.479192\t9.479702\t-53.327266\t1.448676\t7.774831\t6.244204\t1.621760\t1.182809\t1.931886\t0.482724\t0.837648\t11.325650 2.265302\t18.979456\t12.857376\t3.327912\t0.000000\t5.853015\t4.110990\t2.392524\t0.874068\t1.696756\t1.536426\t-74.828436\t3.584979\t0.000000\t1.672440\t6.679392\t7.961712\t0.000000\t0.388908\t0.647180 6.273144\t3.681360\t0.040432\t0.000000\t0.000000\t4.361070\t1.485900\t1.506404\t0.000000\t12.393696\t44.983139\t19.557126\t-125.902241\t3.659024\t0.861560\t4.313774\t6.088368\t0.000000\t0.000000\t16.697244 1.568286\t0.572656\t0.566048\t0.000000\t0.000000\t0.000000\t0.000000\t1.329180\t1.613664\t7.229656\t13.401049\t0.000000\t1.357276\t-54.612411\t0.557480\t3.200542\t0.761046\t0.797544\t20.881368\t0.776616 21.781750\t4.213112\t1.698144\t0.609336\t0.636006\t5.853015\t2.526030\t3.012808\t3.160092\t0.442632\t2.731424\t2.655906\t0.250801\t0.437492\t-74.727653\t17.046365\t4.566276\t0.000000\t0.000000\t3.106464 35.634943\t6.299216\t20.013840\t4.452840\t5.389314\t2.142280\t3.912870\t20.735208\t1.176630\t0.885264\t1.451069\t7.726272\t0.914686\t1.829512\t12.416600\t-160.924378\t32.198100\t0.787050\t1.017144\t1.941540 32.324117\t1.063504\t9.258928\t3.093552\t0.535584\t2.027515\t1.684020\t2.658360\t0.739596\t7.082112\t2.816781\t10.945552\t1.534312\t0.517036\t3.953040\t38.267350\t-129.918557\t0.000000\t1.256472\t10.160726 0.000000\t8.221704\t0.929936\t0.000000\t0.000000\t0.000000\t0.000000\t0.000000\t0.907686\t0.000000\t3.926422\t0.000000\t0.000000\t3.022672\t0.000000\t5.218275\t0.000000\t-24.051571\t1.824876\t0.000000 2.091048\t0.327232\t3.841040\t0.000000\t3.213504\t0.000000\t1.089660\t0.000000\t4.269486\t1.364782\t2.389996\t1.046266\t0.000000\t27.760856\t0.000000\t2.365618\t2.458764\t0.640134\t-54.670490\t1.812104 18.122416\t0.981696\t0.606480\t0.843696\t1.640226\t1.338925\t1.832610\t4.785048\t1.479192\t32.791654\t14.937475\t0.804820\t3.806274\t0.477264\t2.432640\t2.087310\t9.191094\t0.000000\t0.837648\t-98.996468""" class Gctmpca(CommandLineApplication): """ App controller for the GCTMPCA algorithm for detecting sequence coevolution The Generalized Continuous-Time Markov Process Coevolutionary Algorithm (GCTMPCA) is presented in: Detecting coevolution in and among protein domains. Yeang CH, Haussler D., PLoS Comput Biol. 2007 Nov;3(11):e211. Detecting the coevolution of biosequences--an example of RNA interaction prediction. Yeang CH, Darot JF, Noller HF, Haussler D. Mol Biol Evol. 2007 Sep;24(9):2119-31. This code requires the GCTMPCA package to be installed. As of 11/08, that software is available at: http://www.sns.ias.edu/~chyeang/coevolution_download.zip """ _command = 'calculate_likelihood' _input_handler = '_gctmpca_cl_input' _data = {'mol_type':None,'comparison_type':0,'seqs1':None,\ 'seqs2':'-','tree1':None,'tree2':'-',\ 'seq_names':None,'species_tree':None,\ 'seq_to_species1':None,'seq_to_species2':'-',\ 'char_priors':None,'sub_matrix':None,'epsilon':0.7,\ 'max_gap_threshold':1.0,'max_seq_distance':1.0,\ 'covariation_threshold':0.0,'likelihood_threshold':0.0,\ 'output_path':None,'single_pair_only':0,'family_reps':'-',\ 'pos1':'','pos2':''} _parameter_order = ['mol_type','comparison_type','seqs1','seqs2',\ 'tree1','tree2','seq_names','species_tree',\ 'seq_to_species1','seq_to_species2','char_priors',\ 'sub_matrix','epsilon','max_gap_threshold','max_seq_distance',\ 'covariation_threshold','likelihood_threshold','output_path',\ 'single_pair_only','family_reps','pos1','pos2'] _potential_paths = ['seqs1','tree1','seq_names',\ 'species_tree','seq_to_species1'] _mol_type_lookup = {'rna':0,'0':0,'protein':1,'1':1} _default_priors = {0:default_gctmpca_rna_priors, 1:default_gctmpca_aa_priors} _default_sub_matrix = {0:default_gctmpca_rna_sub_matrix, 1:default_gctmpca_aa_sub_matrix} _char_order = {0:gctmpca_base_order,1:gctmpca_aa_order} _required_parameters = {}.fromkeys(['mol_type','seqs1','tree1',\ 'seq_names','species_tree','seq_to_species1']) def _set_command_line_parameters(self,data): """ Get the right setting for each command line parameter """ # This function could be cleaned up. # for each command line parameter, set it to the value passed in or # the default value. for p in self._parameter_order: if p not in data: if p in self._required_parameters: raise ApplicationError,\ "Required parameter %s missing." % p else: data[p] = self._data[p] # Write necessary files to disk -- need to modify this so paths # to existing files can be passed in. if p in self._potential_paths: try: data[p] = self._input_as_lines(data[p]) except TypeError: pass if data['single_pair_only'] == 1 and \ not (data['pos1'] and data['pos2']): raise ApplicationError,\ "Must specify pos1 and pos2 if single_pair_only == 1." # Make sure the MolType is in the correct format (i.e., 1 or 0) data['mol_type'] = mol_type = \ self._mol_type_lookup[str(data['mol_type']).lower()] char_order = self._char_order[mol_type] # If we didn't get several values as parameters, set the defaults. # These are done outside of the above loop b/c they require special # handling. if not data['char_priors']: data['char_priors'] = self._default_priors[mol_type] data['char_priors'] = \ self._input_as_lines(\ self._input_as_gctmpca_char_priors(\ data['char_priors'],char_order)) if not data['sub_matrix']: data['sub_matrix'] = \ self._input_as_multiline_string(\ self._default_sub_matrix[mol_type]) else: data['sub_matrix'] = \ self._input_as_lines(\ self._input_as_gctmpca_rate_matrix(\ data['sub_matrix'],char_order)) if not data['output_path']: data['output_path'] = \ self._input_as_path(self.getTmpFilename()) return data def _gctmpca_cl_input(self,data): """ Write the list of 22 command line parameters to a string """ # Get the right setting for each parameter data = self._set_command_line_parameters(data) # Explicitly disallow intermolecular experiments (I do this here to # make sure I'm looking at the final version of data) if data['comparison_type'] == 1: raise NotImplementedError,\ "Intermolecular experiments currently supported only via coevolve_alignments." # Create the command line parameter string and return it return ' '.join([str(data[p]) for p in self._parameter_order]).strip() def _input_as_gctmpca_char_priors(self,priors,char_order): """convert dict of priors to string and write it to tmp file """ # priors t be followed by a newline return ['\t'.join([str(priors[c]) for c in char_order]),''] def _input_as_gctmpca_rate_matrix(self,matrix,char_order): """convert 2D dict rate matrix to string and write it to tmp file """ matrix_rows = [] for c in char_order: matrix_rows.append('\t'.join([str(matrix[c][col_c]) \ for col_c in char_order])) return matrix_rows def _get_result_paths(self,data): """A single file is written, w/ name specified in command line input """ return {'output':ResultPath(Path=data['output_path'],IsWritten=True)}
59.089109
241
0.70258
1,864
11,936
4.362124
0.373927
0.065921
0.039356
0.041323
0.227155
0.170459
0.147214
0.143279
0.124339
0.088058
0
0.31042
0.159015
11,936
201
242
59.383085
0.499602
0.072805
0
0.049587
0
0.165289
0.580881
0.463593
0
0
0
0
0
0
null
null
0.008264
0.033058
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
1
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
dfc85cb397c45ef64a1d11124832fefedde08680
12,235
py
Python
rivendell/splunk/defaultNav.py
ezaspy/elrond
3e358f20112be403b895d873a7e3892ce4181d8b
[ "MIT" ]
1
2021-03-29T08:05:31.000Z
2021-03-29T08:05:31.000Z
rivendell/splunk/defaultNav.py
ezaspy/elrond
3e358f20112be403b895d873a7e3892ce4181d8b
[ "MIT" ]
17
2020-11-24T11:00:38.000Z
2021-05-18T18:20:21.000Z
rivendell/splunk/defaultNav.py
ezaspy/elrond
3e358f20112be403b895d873a7e3892ce4181d8b
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 -tt def doNav(defaultxml): defaultxml.write("<collection label=\"MITRE\">\n ") defaultxml.write("<view name=\"mitre\" default=\"true\" />\n ") defaultxml.write("<a href=\"http://localhost/attack-navigator/index.html\" target=\"_blank\">ATT&amp;CK® Navigator Mapping</a>\n ") defaultxml.write("<view name=\"info\" />\n ") defaultxml.write("</collection>\n ") defaultxml.write("<collection label=\"ATT&amp;CK® Techniques\">\n ") defaultxml.write("<collection label=\"Initial Access\">\n ") defaultxml.write("<view name=\"t1189\" />\n ") defaultxml.write("<view name=\"t1190\" />\n ") defaultxml.write("<view name=\"t1133\" />\n ") defaultxml.write("<view name=\"t1200\" />\n ") defaultxml.write("<view name=\"t1566\" />\n ") defaultxml.write("<view name=\"t1091\" />\n ") defaultxml.write("<view name=\"t1195\" />\n ") defaultxml.write("<view name=\"t1199\" />\n ") defaultxml.write("<view name=\"t1078\" />\n ") defaultxml.write("</collection>\n ") defaultxml.write("<collection label=\"Execution\">\n ") defaultxml.write("<view name=\"t1059\" />\n ") defaultxml.write("<view name=\"t1609\" />\n ") defaultxml.write("<view name=\"t1610\" />\n ") defaultxml.write("<view name=\"t1203\" />\n ") defaultxml.write("<view name=\"t1559\" />\n ") defaultxml.write("<view name=\"t1106\" />\n ") defaultxml.write("<view name=\"t1053\" />\n ") defaultxml.write("<view name=\"t1129\" />\n ") defaultxml.write("<view name=\"t1072\" />\n ") defaultxml.write("<view name=\"t1569\" />\n ") defaultxml.write("<view name=\"t1204\" />\n ") defaultxml.write("<view name=\"t1047\" />\n ") defaultxml.write("</collection>\n ") defaultxml.write("<collection label=\"Persistence\">\n ") defaultxml.write("<view name=\"t1098\" />\n ") defaultxml.write("<view name=\"t1197\" />\n ") defaultxml.write("<view name=\"t1547\" />\n ") defaultxml.write("<view name=\"t1037\" />\n ") defaultxml.write("<view name=\"t1176\" />\n ") defaultxml.write("<view name=\"t1554\" />\n ") defaultxml.write("<view name=\"t1136\" />\n ") defaultxml.write("<view name=\"t1543\" />\n ") defaultxml.write("<view name=\"t1546\" />\n ") defaultxml.write("<view name=\"t1133\" />\n ") defaultxml.write("<view name=\"t1574\" />\n ") defaultxml.write("<view name=\"t1525\" />\n ") defaultxml.write("<view name=\"t1556\" />\n ") defaultxml.write("<view name=\"t1137\" />\n ") defaultxml.write("<view name=\"t1542\" />\n ") defaultxml.write("<view name=\"t1053\" />\n ") defaultxml.write("<view name=\"t1505\" />\n ") defaultxml.write("<view name=\"t1205\" />\n ") defaultxml.write("<view name=\"t1078\" />\n ") defaultxml.write("</collection>\n ") defaultxml.write("<collection label=\"Privilege Escalation\">\n ") defaultxml.write("<view name=\"t1548\" />\n ") defaultxml.write("<view name=\"t1134\" />\n ") defaultxml.write("<view name=\"t1547\" />\n ") defaultxml.write("<view name=\"t1037\" />\n ") defaultxml.write("<view name=\"t1543\" />\n ") defaultxml.write("<view name=\"t1484\" />\n ") defaultxml.write("<view name=\"t1611\" />\n ") defaultxml.write("<view name=\"t1546\" />\n ") defaultxml.write("<view name=\"t1068\" />\n ") defaultxml.write("<view name=\"t1574\" />\n ") defaultxml.write("<view name=\"t1055\" />\n ") defaultxml.write("<view name=\"t1053\" />\n ") defaultxml.write("<view name=\"t1078\" />\n ") defaultxml.write("</collection>\n ") defaultxml.write("<collection label=\"Defense Evasion\">\n ") defaultxml.write("<view name=\"t1548\" />\n ") defaultxml.write("<view name=\"t1134\" />\n ") defaultxml.write("<view name=\"t1197\" />\n ") defaultxml.write("<view name=\"t1612\" />\n ") defaultxml.write("<view name=\"t1140\" />\n ") defaultxml.write("<view name=\"t1610\" />\n ") defaultxml.write("<view name=\"t1006\" />\n ") defaultxml.write("<view name=\"t1480\" />\n ") defaultxml.write("<view name=\"t1211\" />\n ") defaultxml.write("<view name=\"t1222\" />\n ") defaultxml.write("<view name=\"t1484\" />\n ") defaultxml.write("<view name=\"t1564\" />\n ") defaultxml.write("<view name=\"t1574\" />\n ") defaultxml.write("<view name=\"t1562\" />\n ") defaultxml.write("<view name=\"t1070\" />\n ") defaultxml.write("<view name=\"t1202\" />\n ") defaultxml.write("<view name=\"t1036\" />\n ") defaultxml.write("<view name=\"t1556\" />\n ") defaultxml.write("<view name=\"t1578\" />\n ") defaultxml.write("<view name=\"t1112\" />\n ") defaultxml.write("<view name=\"t1601\" />\n ") defaultxml.write("<view name=\"t1599\" />\n ") defaultxml.write("<view name=\"t1027\" />\n ") defaultxml.write("<view name=\"t1542\" />\n ") defaultxml.write("<view name=\"t1055\" />\n ") defaultxml.write("<view name=\"t1207\" />\n ") defaultxml.write("<view name=\"t1014\" />\n ") defaultxml.write("<view name=\"t1218\" />\n ") defaultxml.write("<view name=\"t1216\" />\n ") defaultxml.write("<view name=\"t1553\" />\n ") defaultxml.write("<view name=\"t1221\" />\n ") defaultxml.write("<view name=\"t1205\" />\n ") defaultxml.write("<view name=\"t1127\" />\n ") defaultxml.write("<view name=\"t1535\" />\n ") defaultxml.write("<view name=\"t1550\" />\n ") defaultxml.write("<view name=\"t1078\" />\n ") defaultxml.write("<view name=\"t1497\" />\n ") defaultxml.write("<view name=\"t1600\" />\n ") defaultxml.write("<view name=\"t1220\" />\n ") defaultxml.write("</collection>\n ") defaultxml.write("<collection label=\"Credential Access\">\n ") defaultxml.write("<view name=\"t1110\" />\n ") defaultxml.write("<view name=\"t1555\" />\n ") defaultxml.write("<view name=\"t1212\" />\n ") defaultxml.write("<view name=\"t1187\" />\n ") defaultxml.write("<view name=\"t1606\" />\n ") defaultxml.write("<view name=\"t1056\" />\n ") defaultxml.write("<view name=\"t1557\" />\n ") defaultxml.write("<view name=\"t1556\" />\n ") defaultxml.write("<view name=\"t1040\" />\n ") defaultxml.write("<view name=\"t1003\" />\n ") defaultxml.write("<view name=\"t1528\" />\n ") defaultxml.write("<view name=\"t1539\" />\n ") defaultxml.write("<view name=\"t1111\" />\n ") defaultxml.write("<view name=\"t1552\" />\n ") defaultxml.write("</collection>\n ") defaultxml.write("<collection label=\"Discovery\">\n ") defaultxml.write("<view name=\"t1087\" />\n ") defaultxml.write("<view name=\"t1010\" />\n ") defaultxml.write("<view name=\"t1217\" />\n ") defaultxml.write("<view name=\"t1580\" />\n ") defaultxml.write("<view name=\"t1538\" />\n ") defaultxml.write("<view name=\"t1526\" />\n ") defaultxml.write("<view name=\"t1613\" />\n ") defaultxml.write("<view name=\"t1482\" />\n ") defaultxml.write("<view name=\"t1083\" />\n ") defaultxml.write("<view name=\"t1046\" />\n ") defaultxml.write("<view name=\"t1135\" />\n ") defaultxml.write("<view name=\"t1040\" />\n ") defaultxml.write("<view name=\"t1201\" />\n ") defaultxml.write("<view name=\"t1120\" />\n ") defaultxml.write("<view name=\"t1069\" />\n ") defaultxml.write("<view name=\"t1057\" />\n ") defaultxml.write("<view name=\"t1012\" />\n ") defaultxml.write("<view name=\"t1018\" />\n ") defaultxml.write("<view name=\"t1518\" />\n ") defaultxml.write("<view name=\"t1082\" />\n ") defaultxml.write("<view name=\"t1614\" />\n ") defaultxml.write("<view name=\"t1016\" />\n ") defaultxml.write("<view name=\"t1049\" />\n ") defaultxml.write("<view name=\"t1033\" />\n ") defaultxml.write("<view name=\"t1007\" />\n ") defaultxml.write("<view name=\"t1124\" />\n ") defaultxml.write("<view name=\"t1497\" />\n ") defaultxml.write("</collection>\n ") defaultxml.write("<collection label=\"Lateral Movement\">\n ") defaultxml.write("<view name=\"t1210\" />\n ") defaultxml.write("<view name=\"t1534\" />\n ") defaultxml.write("<view name=\"t1570\" />\n ") defaultxml.write("<view name=\"t1563\" />\n ") defaultxml.write("<view name=\"t1021\" />\n ") defaultxml.write("<view name=\"t1091\" />\n ") defaultxml.write("<view name=\"t1072\" />\n ") defaultxml.write("<view name=\"t1080\" />\n ") defaultxml.write("<view name=\"t1550\" />\n ") defaultxml.write("</collection>\n ") defaultxml.write("<collection label=\"Collection\">\n ") defaultxml.write("<view name=\"t1560\" />\n ") defaultxml.write("<view name=\"t1123\" />\n ") defaultxml.write("<view name=\"t1119\" />\n ") defaultxml.write("<view name=\"t1115\" />\n ") defaultxml.write("<view name=\"t1530\" />\n ") defaultxml.write("<view name=\"t1602\" />\n ") defaultxml.write("<view name=\"t1213\" />\n ") defaultxml.write("<view name=\"t1005\" />\n ") defaultxml.write("<view name=\"t1039\" />\n ") defaultxml.write("<view name=\"t1025\" />\n ") defaultxml.write("<view name=\"t1074\" />\n ") defaultxml.write("<view name=\"t1114\" />\n ") defaultxml.write("<view name=\"t1056\" />\n ") defaultxml.write("<view name=\"t1185\" />\n ") defaultxml.write("<view name=\"t1557\" />\n ") defaultxml.write("<view name=\"t1113\" />\n ") defaultxml.write("<view name=\"t1125\" />\n ") defaultxml.write("</collection>\n ") defaultxml.write("<collection label=\"Command &amp; Control\">\n ") defaultxml.write("<view name=\"t1071\" />\n ") defaultxml.write("<view name=\"t1092\" />\n ") defaultxml.write("<view name=\"t1132\" />\n ") defaultxml.write("<view name=\"t1001\" />\n ") defaultxml.write("<view name=\"t1568\" />\n ") defaultxml.write("<view name=\"t1573\" />\n ") defaultxml.write("<view name=\"t1008\" />\n ") defaultxml.write("<view name=\"t1105\" />\n ") defaultxml.write("<view name=\"t1104\" />\n ") defaultxml.write("<view name=\"t1095\" />\n ") defaultxml.write("<view name=\"t1571\" />\n ") defaultxml.write("<view name=\"t1572\" />\n ") defaultxml.write("<view name=\"t1090\" />\n ") defaultxml.write("<view name=\"t1219\" />\n ") defaultxml.write("<view name=\"t1205\" />\n ") defaultxml.write("<view name=\"t1102\" />\n ") defaultxml.write("</collection>\n ") defaultxml.write("<collection label=\"Exfiltration\">\n ") defaultxml.write("<view name=\"t1020\" />\n ") defaultxml.write("<view name=\"t1030\" />\n ") defaultxml.write("<view name=\"t1048\" />\n ") defaultxml.write("<view name=\"t1041\" />\n ") defaultxml.write("<view name=\"t1011\" />\n ") defaultxml.write("<view name=\"t1052\" />\n ") defaultxml.write("<view name=\"t1567\" />\n ") defaultxml.write("<view name=\"t1029\" />\n ") defaultxml.write("<view name=\"t1537\" />\n ") defaultxml.write("</collection>\n ") defaultxml.write("<collection label=\"Impact\">\n ") defaultxml.write("<view name=\"t1531\" />\n ") defaultxml.write("<view name=\"t1485\" />\n ") defaultxml.write("<view name=\"t1486\" />\n ") defaultxml.write("<view name=\"t1565\" />\n ") defaultxml.write("<view name=\"t1491\" />\n ") defaultxml.write("<view name=\"t1561\" />\n ") defaultxml.write("<view name=\"t1499\" />\n ") defaultxml.write("<view name=\"t1495\" />\n ") defaultxml.write("<view name=\"t1490\" />\n ") defaultxml.write("<view name=\"t1498\" />\n ") defaultxml.write("<view name=\"t1496\" />\n ") defaultxml.write("<view name=\"t1489\" />\n ") defaultxml.write("<view name=\"t1529\" />\n ") defaultxml.write("</collection>\n </collection>\n ")
53.195652
136
0.561422
1,373
12,235
5.003642
0.158048
0.495633
0.526346
0.57933
0.84425
0.422416
0.413683
0.413683
0.413683
0.315575
0
0.078837
0.182019
12,235
229
137
53.427948
0.607414
0.002043
0
0.289474
0
0
0.378983
0
0
0
0
0
0
1
0.004386
false
0
0
0
0.004386
0
0
0
0
null
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
dff2396e618936b8330f4bd520daa9f14998d954
233
py
Python
06-programing-languages/01-pythons/01-lang-features/01-introspection-codes/01_id.py
jameszhan/notes-ml
c633d04e5443eab71bc3b27fff89d57b89d1786c
[ "Apache-2.0" ]
null
null
null
06-programing-languages/01-pythons/01-lang-features/01-introspection-codes/01_id.py
jameszhan/notes-ml
c633d04e5443eab71bc3b27fff89d57b89d1786c
[ "Apache-2.0" ]
null
null
null
06-programing-languages/01-pythons/01-lang-features/01-introspection-codes/01_id.py
jameszhan/notes-ml
c633d04e5443eab71bc3b27fff89d57b89d1786c
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- a = [] b = a c = [] print("id(a) = ", id(a)) print("id(b) = ", id(b)) print("id(c) = ", id(c)) a.append(1) b.append(2) c.append(3) print("id(a) = ", id(a)) print("id(b) = ", id(b)) print("id(c) = ", id(c))
13.705882
24
0.446352
46
233
2.26087
0.26087
0.403846
0.153846
0.192308
0.634615
0.634615
0.634615
0.634615
0.634615
0.634615
0
0.021164
0.188841
233
16
25
14.5625
0.529101
0.090129
0
0.5
0
0
0.228571
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
0
0
0
null
1
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
5f0fdc875fbc92072f12a7e82fddf978d28d52f5
14,524
py
Python
assets/View/keyboards.py
HoolyPanda/HorNet
b9d811ac74fdd3f9b01a4a886c566c90090abab3
[ "MIT" ]
null
null
null
assets/View/keyboards.py
HoolyPanda/HorNet
b9d811ac74fdd3f9b01a4a886c566c90090abab3
[ "MIT" ]
null
null
null
assets/View/keyboards.py
HoolyPanda/HorNet
b9d811ac74fdd3f9b01a4a886c566c90090abab3
[ "MIT" ]
null
null
null
import json mKB = { 'one_time': True, 'buttons': [ [ { "action": { "type":"text", "payload": "{\"mainMenu\":\"wallet\"}", "label": "Украсть деньги со счета" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"mainMenu\":\"profile\"}", "label": "Получить доступ к профилю" }, "color": "secondary" } ] ] } hCKB = { 'one_time': True, 'buttons': [ [ { "action": { "type":"text", "payload": "{\"button\":\"name\"}", "label": "Имя" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"button\":\"work\"}", "label": "Работа" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"button\":\"color\"}", "label": "Любимый Цвет" }, "color": "secondary" } ], [ { "action": { "type":"text", "payload": "{\"button\":\"eyes\"}", "label": "Цвет Глаз" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"button\":\"hair\"}", "label": "Цвет волос" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"button\":\"music\"}", "label": "Любимая музыка" }, "color": "secondary" } ], [ { "action": { "type":"text", "payload": "{\"button\":\"end\"}", "label": "Завершить" }, "color": "negative" }, { "action": { "type":"text", "payload": "{\"button\":\"confirm\"}", "label": "Подтвердить" }, "color": "positive" } ] ] } dKB = { 'one_time': True, 'buttons': [ [ { "action": { "type":"text", "payload": "{\"district\":\"Фавеллы\"}", "label": "Фавеллы" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"district\":\"Ист-Енд\"}", "label": "Ист-Енд" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"district\":\"Коулун\"}", "label": "Коулун" }, "color": "secondary" } ], [ { "action": { "type":"text", "payload": "{\"district\":\"Тобэй\"}", "label": "Тобэй" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"district\":\"Доминго\"}", "label": "Доминго" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"district\":\"Дурбан\"}", "label": "Дурбан" }, "color": "secondary" } ], [ { "action": { "type":"text", "payload": "{\"district\":\"Кангване\"}", "label": "Кангване" }, "color": "secondary" } ] ] } heKB = { 'one_time': True, 'buttons': [ [ { "action": { "type":"text", "payload": "{\"height\":\"Низкий\"}", "label": "Низкий" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"height\":\"Средний\"}", "label": "Средний" }, "color": "secondary" } ], [ { "action": { "type":"text", "payload": "{\"height\":\"Высокий\"}", "label": "Высокий" }, "color": "secondary" } ] ] } nKB = { 'one_time': True, 'buttons':[] } wKB = { 'one_time': True, 'buttons': [ [ { "action": { "type":"text", "payload": "{\"work\":\"shogun\"}", "label": "ShoGun" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"work\":\"sintech\"}", "label": "SinTech" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"work\":\"cybersteel\"}", "label": "CyberSteel" }, "color": "secondary" } ], [ { "action": { "type":"text", "payload": "{\"work\":\"c-corp\"}", "label": "C-Corp" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"work\":\"dell\"}", "label": "Dell" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"work\":\"obs news\"}", "label": "OBS News" }, "color": "secondary" } ], [ { "action": { "type":"text", "payload": "{\"work\":\"безработный\"}", "label": "Безработный" }, "color": "secondary" } ] ] } eKB = { 'one_time': True, 'buttons': [ [ { "action": { "type":"text", "payload": "{\"eyeColor\":\"зеленые\"}", "label": "Зеленый" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"eyeColor\":\"Синие\"}", "label": "Синий" }, "color": "secondary" } ], [ { "action": { "type":"text", "payload": "{\"eyeColor\":\"коричневые\"}", "label": "Карий" }, "color": "secondary" } ] ] } hairKB = { 'one_time': True, 'buttons': [ [ { "action": { "type":"text", "payload": "{\"hairColor\":\"Русые\"}", "label": "Русые" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"hairColor\":\"Шатен\"}", "label": "Шатен" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"hairColor\":\"Рыжие\"}", "label": "Рыжие" }, "color": "secondary" } ], [ { "action": { "type":"text", "payload": "{\"hairColor\":\"Брюнет\"}", "label": "Брюнет" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"hairColor\":\"Цветные\"}", "label": "Цветные" }, "color": "secondary" } ] ] } hCKB = { 'one_time': True, 'buttons': [ [ { "action": { "type":"text", "payload": "{\"button\":\"name\"}", "label": "Имя" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"button\":\"work\"}", "label": "Работа" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"button\":\"district\"}", "label": "Район" }, "color": "secondary" } ], [ { "action": { "type":"text", "payload": "{\"button\":\"eyes\"}", "label": "Цвет Глаз" }, "color": "secondary" }, { "action": { "type":"text", "payload": "{\"button\":\"hair\"}", "label": "Цвет волос" }, "color": "secondary" } ], [ { "action": { "type":"text", "payload": "{\"button\":\"height\"}", "label": "Рост" }, "color": "secondary" } ], [ { "action": { "type":"text", "payload": "{\"button\":\"end\"}", "label": "Завершить" }, "color": "negative" }, { "action": { "type":"text", "payload": "{\"button\":\"confirm\"}", "label": "Подтвердить" }, "color": "positive" } ] ] } districtKB = json.dumps(dKB) nullKB = json.dumps(nKB) heightKB = json.dumps(heKB) worksKB = json.dumps(wKB) hairKB = json.dumps(hairKB) humanCreatorKB = json.dumps(hCKB) eyeColorKB = json.dumps(eKB) humanCreatorKB = json.dumps(hCKB) # eyeColorKB = json.dumps(eKB) mainKB = json.dumps(mKB)
28.534381
69
0.207312
535
14,524
5.611215
0.181308
0.143238
0.200533
0.300799
0.726849
0.70986
0.698201
0.41972
0.309127
0.309127
0
0
0.655605
14,524
509
70
28.534381
0.60016
0.001928
0
0.380368
0
0
0.166126
0
0
0
0
0
0
1
0
false
0
0.002045
0
0.002045
0
0
0
0
null
0
1
1
0
1
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
5f1c35bc86c1194855e8c04264be264563566b39
271
py
Python
iex/market/__init__.py
udrea/iex
5db5e84575a999b41f7ddf74aa356018941c9ee6
[ "Apache-2.0" ]
2
2018-04-14T17:52:34.000Z
2021-03-12T09:16:49.000Z
iex/market/__init__.py
udrea/iex
5db5e84575a999b41f7ddf74aa356018941c9ee6
[ "Apache-2.0" ]
null
null
null
iex/market/__init__.py
udrea/iex
5db5e84575a999b41f7ddf74aa356018941c9ee6
[ "Apache-2.0" ]
null
null
null
# Filename: market/__init__.py """ Data provided for free by IEX (https://iextrading.com/developer/). See https://iextrading.com/api-exhibit-a/ for more information. """ from iex.market.tops import TOPS from iex.market.hist import HIST from iex.market.deep import DEEP
24.636364
66
0.760148
42
271
4.809524
0.595238
0.10396
0.193069
0
0
0
0
0
0
0
0
0
0.114391
271
10
67
27.1
0.841667
0.590406
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
a029982ab95e58adcfde2bbea943451b433de0ed
321
py
Python
backend/drugs/admin.py
hippocampus13/IndianMedicineDB
f8d96c0a96c622937d12d3eaf7257f68a2e488c5
[ "MIT" ]
null
null
null
backend/drugs/admin.py
hippocampus13/IndianMedicineDB
f8d96c0a96c622937d12d3eaf7257f68a2e488c5
[ "MIT" ]
null
null
null
backend/drugs/admin.py
hippocampus13/IndianMedicineDB
f8d96c0a96c622937d12d3eaf7257f68a2e488c5
[ "MIT" ]
1
2022-03-28T08:27:57.000Z
2022-03-28T08:27:57.000Z
from django.contrib import admin from .models import DrugType, Manufacturer, DrugComposition, Drug, DataSource, PackSizeLabel admin.site.register(Manufacturer) admin.site.register(PackSizeLabel) admin.site.register(DrugComposition) admin.site.register(DataSource) admin.site.register(Drug) admin.site.register(DrugType)
32.1
92
0.841121
38
321
7.105263
0.368421
0.2
0.377778
0.222222
0
0
0
0
0
0
0
0
0.062305
321
9
93
35.666667
0.89701
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.25
0
0.25
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
a03c6c227b161fd955d9d9b97c7e2848c5e30bab
122
py
Python
ptpop/__main__.py
andrewburnheimer/ptpop
4801ae38169f1fac738969f9d05b0811fff5e47e
[ "Apache-2.0" ]
2
2018-04-18T05:29:13.000Z
2020-11-24T01:36:05.000Z
ptpop/__main__.py
andrewburnheimer/ptpop
4801ae38169f1fac738969f9d05b0811fff5e47e
[ "Apache-2.0" ]
3
2016-10-23T18:12:36.000Z
2016-11-01T21:39:39.000Z
ptpop/__main__.py
andrewburnheimer/ptpop
4801ae38169f1fac738969f9d05b0811fff5e47e
[ "Apache-2.0" ]
null
null
null
# __main__.py is executed when the package is instantiated import Console if __name__ == '__main__': Console.main()
17.428571
58
0.737705
16
122
4.875
0.75
0
0
0
0
0
0
0
0
0
0
0
0.180328
122
6
59
20.333333
0.78
0.459016
0
0
0
0
0.125
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
a04cb1e91ecba363731bfd3edce8f91ab9a3c574
52
py
Python
venv/lib/python2.7/site-packages/way2sms.py
theabstractguy/Fall-detection-using-piezo-electrice-sensors
f26d7c0979f0a519cc2c416862af83acea633bf7
[ "Apache-2.0" ]
null
null
null
venv/lib/python2.7/site-packages/way2sms.py
theabstractguy/Fall-detection-using-piezo-electrice-sensors
f26d7c0979f0a519cc2c416862af83acea633bf7
[ "Apache-2.0" ]
null
null
null
venv/lib/python2.7/site-packages/way2sms.py
theabstractguy/Fall-detection-using-piezo-electrice-sensors
f26d7c0979f0a519cc2c416862af83acea633bf7
[ "Apache-2.0" ]
null
null
null
from lib import gui start = gui.Gui() start.main()
17.333333
20
0.692308
9
52
4
0.666667
0.444444
0
0
0
0
0
0
0
0
0
0
0.173077
52
3
21
17.333333
0.837209
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
a05ba8a02019d6dd0886a7ccee15fe961bc34d9e
43
py
Python
pysnc/__version__.py
ServiceNow/PySNC
4be8fa19e1a15fa2d0cc1b1e5d4c96ed4857b735
[ "MIT" ]
22
2020-10-22T23:44:50.000Z
2022-03-26T11:21:39.000Z
pysnc/__version__.py
ServiceNow/PySNC
4be8fa19e1a15fa2d0cc1b1e5d4c96ed4857b735
[ "MIT" ]
15
2020-11-09T23:27:05.000Z
2021-05-20T02:47:55.000Z
pysnc/__version__.py
ServiceNow/PySNC
4be8fa19e1a15fa2d0cc1b1e5d4c96ed4857b735
[ "MIT" ]
9
2020-10-23T01:58:13.000Z
2022-03-22T22:32:03.000Z
__title__ = 'pysnc' __version__ = '1.0.4'
10.75
21
0.651163
6
43
3.333333
1
0
0
0
0
0
0
0
0
0
0
0.083333
0.162791
43
3
22
14.333333
0.472222
0
0
0
0
0
0.238095
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
a07988d8b615fd5a1eba1800190f50437708c41e
421
py
Python
online_pharmacy/items/models.py
geekyJock8/online_pharmacy
892852857786ec17259b71f2a178896cd6d12e60
[ "Apache-2.0" ]
5
2020-09-09T13:59:17.000Z
2021-09-30T07:20:55.000Z
online_pharmacy/items/models.py
geekyJock8/online_pharmacy
892852857786ec17259b71f2a178896cd6d12e60
[ "Apache-2.0" ]
10
2017-09-03T06:13:31.000Z
2017-10-10T15:22:30.000Z
online_pharmacy/items/models.py
geekyJock8/Online-Pharmacy
892852857786ec17259b71f2a178896cd6d12e60
[ "Apache-2.0" ]
9
2017-09-03T04:59:18.000Z
2019-10-17T11:33:18.000Z
from django.db import models class item(models.Model): item_id = models.CharField(max_length=20,primary_key = True) item_name = models.CharField(max_length=50) image = models.ImageField() otc_or_not = models.BooleanField() brand_name = models.CharField(max_length=50) salts = models.TextField() specifications = models.CharField(max_length=100) category = models.CharField(max_length=30)
35.083333
64
0.741093
56
421
5.375
0.553571
0.249169
0.299003
0.398671
0.199336
0.199336
0
0
0
0
0
0.030899
0.154394
421
11
65
38.272727
0.814607
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
a08d7944fdb739dda28ac833c848f3aed8cac63b
508
py
Python
awe/tasks/views.py
Awesomebug95/aweum
618fe27e1792722e6a622c0801c97529195f76a6
[ "BSD-3-Clause" ]
null
null
null
awe/tasks/views.py
Awesomebug95/aweum
618fe27e1792722e6a622c0801c97529195f76a6
[ "BSD-3-Clause" ]
null
null
null
awe/tasks/views.py
Awesomebug95/aweum
618fe27e1792722e6a622c0801c97529195f76a6
[ "BSD-3-Clause" ]
null
null
null
from django.shortcuts import render from tasks.forms import TaskForm def index(request): return render(request, 'tasks/index.html') def profile(request): return render(request, 'tasks/profile.html') def create_task(request): form = TaskForm(request.POST or None) if not form.is_valid(): return render(request, 'tasks/create_task.html', {'form': form}) form.instance.author = request.user form.save() return render(request, 'tasks/create_task.html', {'form': form})
24.190476
72
0.702756
68
508
5.191176
0.411765
0.135977
0.215297
0.271955
0.436261
0.260623
0.260623
0.260623
0.260623
0
0
0
0.167323
508
20
73
25.4
0.834515
0
0
0.153846
0
0
0.169291
0.086614
0
0
0
0
0
1
0.230769
false
0
0.153846
0.153846
0.692308
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
4
a0946cf0268b1dae5b2e48f45e274f9d3252cf5e
881
py
Python
core/embeds.py
Pug234/BytesBump
d5ff3130bffae92e1c5c671db4ed8904c403e9dc
[ "MIT" ]
11
2020-11-14T17:28:50.000Z
2021-05-19T18:21:07.000Z
core/embeds.py
AnimeDyno/BytesBump
a0cf0bfc4c13592c7b10ad46faa46a2a98dc1443
[ "MIT" ]
3
2021-01-22T15:48:41.000Z
2021-06-22T17:16:50.000Z
core/embeds.py
zImPinguin/Bump-Bot
3f449a4e5581a35a5cff998e94a13ae33dbe2b04
[ "MIT" ]
13
2020-11-18T05:20:31.000Z
2021-06-19T16:31:30.000Z
import random from discord import Embed, Color class Embeds: def __init__(self, message): self.message = message def success(self, **kwargs): embed = Embed( description=self.message, color=Color.green() ) for i in kwargs: embed.add_field(name=i.replace("_", " "), value=kwargs[i]) return embed def error(self, **kwargs): embed = Embed( description=self.message, color=Color.red() ) for i in kwargs: embed.add_field(name=i.replace("_", " "), value=kwargs[i]) return embed def warn(self, **kwargs): embed = Embed( description=self.message, color=Color.orange() ) for i in kwargs: embed.add_field(name=i.replace("_", " "), value=kwargs[i]) return embed
26.69697
70
0.53462
97
881
4.752577
0.298969
0.143167
0.097614
0.130152
0.741866
0.741866
0.741866
0.741866
0.741866
0.403471
0
0
0.343927
881
33
71
26.69697
0.797578
0
0
0.517241
0
0
0.006803
0
0
0
0
0
0
1
0.137931
false
0
0.068966
0
0.344828
0
0
0
0
null
0
0
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
a0b624137782bd6b6d6896194a83419c30323ebb
225
py
Python
src/pyscoresaber/models/player_info.py
Kiyomi-Parents/PyScoreSaber
5dbfbfad04bf53ac3fc2fc803acf0374db5c2552
[ "MIT" ]
null
null
null
src/pyscoresaber/models/player_info.py
Kiyomi-Parents/PyScoreSaber
5dbfbfad04bf53ac3fc2fc803acf0374db5c2552
[ "MIT" ]
6
2022-02-10T08:59:01.000Z
2022-03-01T08:06:57.000Z
src/pyscoresaber/models/player_info.py
Kiyomi-Parents/PyScoreSaber
5dbfbfad04bf53ac3fc2fc803acf0374db5c2552
[ "MIT" ]
null
null
null
from dataclasses import dataclass from dataclasses_json import dataclass_json from .fields import default from .player import Player @dataclass_json @dataclass class PlayerInfo: player: Player = default("playerInfo")
17.307692
43
0.808889
27
225
6.62963
0.37037
0.167598
0
0
0
0
0
0
0
0
0
0
0.142222
225
12
44
18.75
0.927461
0
0
0
0
0
0.044444
0
0
0
0
0
0
1
0
true
0
0.5
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
267c20816c379981340c001e13271911022efec6
213
py
Python
my/stackexchange.py
jhermann/HPI
a60c30868b285f233caf65a59d3496082fb9d5c2
[ "MIT" ]
null
null
null
my/stackexchange.py
jhermann/HPI
a60c30868b285f233caf65a59d3496082fb9d5c2
[ "MIT" ]
null
null
null
my/stackexchange.py
jhermann/HPI
a60c30868b285f233caf65a59d3496082fb9d5c2
[ "MIT" ]
null
null
null
import mycfg.repos.stexport.model as stexport from mycfg import paths def get_data(): sources = [max(paths.stexport.export_dir.glob('*.json'))] return stexport.Model(sources).site_model('stackoverflow')
26.625
62
0.751174
29
213
5.413793
0.689655
0.165605
0
0
0
0
0
0
0
0
0
0
0.117371
213
7
63
30.428571
0.835106
0
0
0
0
0
0.089202
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
13fc27e9793e76ffc420b061ce7bf793662d8fb0
122
py
Python
config.py
mesmesgit/SCNN_Pytorch
6b4ec6af124bac66b58624dc03943a510a397007
[ "MIT" ]
199
2019-03-23T07:28:25.000Z
2022-03-09T05:32:25.000Z
config.py
mesmesgit/SCNN_Pytorch
6b4ec6af124bac66b58624dc03943a510a397007
[ "MIT" ]
65
2019-04-05T02:10:23.000Z
2022-02-17T08:49:31.000Z
config.py
mesmesgit/SCNN_Pytorch
6b4ec6af124bac66b58624dc03943a510a397007
[ "MIT" ]
69
2019-03-24T03:12:45.000Z
2022-03-28T04:14:59.000Z
Dataset_Path = dict( CULane = "/home/lion/Dataset/CULane/data/CULane", Tusimple = "/home/lion/Dataset/tusimple" )
24.4
53
0.688525
15
122
5.533333
0.533333
0.192771
0.361446
0
0
0
0
0
0
0
0
0
0.147541
122
4
54
30.5
0.798077
0
0
0
0
0
0.52459
0.52459
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
cd825a774635cbe7d44ed5f08f03161b16113c71
321
py
Python
welut/settings.py
agusmakmun/welut
ee176b51d4637691a616a17eb0491c933211799a
[ "MIT" ]
6
2017-11-17T10:34:27.000Z
2022-01-06T14:17:23.000Z
welut/settings.py
agusmakmun/welut
ee176b51d4637691a616a17eb0491c933211799a
[ "MIT" ]
2
2017-12-06T14:40:31.000Z
2020-08-15T10:41:11.000Z
welut/settings.py
agusmakmun/welut
ee176b51d4637691a616a17eb0491c933211799a
[ "MIT" ]
7
2017-11-13T20:47:05.000Z
2020-06-04T03:20:13.000Z
# -*- coding: utf-8 -*- from django.conf import settings WELUT_EXTENSIONS = getattr(settings, 'WELUT_EXTENSIONS', ['.pdf', '.epub', '.mobi']) WELUT_REMOVED_EXTENSIONS = getattr(settings, 'WELUT_REMOVED_EXTENSIONS', ['.pdf', '.epub', '.mobi']) WELUT_IMAGES_EXTENSION = getattr(settings, 'WELUT_IMAGE_EXTENSION', '.png')
40.125
100
0.719626
37
321
5.972973
0.513514
0.235294
0.271493
0.271493
0.235294
0
0
0
0
0
0
0.003436
0.093458
321
7
101
45.857143
0.756014
0.065421
0
0
0
0
0.312081
0.151007
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
cd9214ae5c09797aea4b3c581123b349f3199538
238
py
Python
models/faster_rcnn.py
wk910930/mask_rcnn_pytorch
21dc137f4dd75384b39a384437b5fbb18f111d9e
[ "MIT" ]
5
2017-08-17T02:53:02.000Z
2021-10-19T01:44:45.000Z
models/faster_rcnn.py
wk910930/mask_rcnn_pytorch
21dc137f4dd75384b39a384437b5fbb18f111d9e
[ "MIT" ]
null
null
null
models/faster_rcnn.py
wk910930/mask_rcnn_pytorch
21dc137f4dd75384b39a384437b5fbb18f111d9e
[ "MIT" ]
4
2017-08-22T14:19:58.000Z
2021-03-09T02:04:23.000Z
from .modules.mask_rcnn import FasterRCNN def create_model(data, config_of_data, num_classes=80, backbone='resnet-50-c4', share_features=True, **kwargs): return FasterRCNN(backbone=backbone, num_classes=num_classes)
39.666667
79
0.747899
32
238
5.3125
0.75
0.176471
0
0
0
0
0
0
0
0
0
0.024752
0.151261
238
5
80
47.6
0.816832
0
0
0
0
0
0.05042
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
cd9a22f99f90d02aa1d56d319d23ea25c9dc05cc
141
py
Python
other_rank_computers.py
touqir14/LUP-rank-computer
e790f0a2954ec57190d4314a9cec48c309e7ff8b
[ "MIT" ]
5
2020-07-18T10:59:03.000Z
2021-12-06T13:30:56.000Z
other_rank_computers.py
touqir14/LUP-rank-computer
e790f0a2954ec57190d4314a9cec48c309e7ff8b
[ "MIT" ]
null
null
null
other_rank_computers.py
touqir14/LUP-rank-computer
e790f0a2954ec57190d4314a9cec48c309e7ff8b
[ "MIT" ]
null
null
null
import torch def rank_torch(A, args=None): A_tensor = torch.from_numpy(A).cuda() return torch.matrix_rank(A_tensor).cpu().item()
28.2
51
0.695035
23
141
4.043478
0.652174
0.150538
0
0
0
0
0
0
0
0
0
0
0.156028
141
5
51
28.2
0.781513
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
269976e74bd9467621016a4a3883427d7f697ec0
284
py
Python
topCoder/srms/500s/srm524/div2/shipping_cubes.py
ferhatelmas/algo
a7149c7a605708bc01a5cd30bf5455644cefd04d
[ "WTFPL" ]
25
2015-01-21T16:39:18.000Z
2021-05-24T07:01:24.000Z
topCoder/srms/500s/srm524/div2/shipping_cubes.py
ferhatelmas/algo
a7149c7a605708bc01a5cd30bf5455644cefd04d
[ "WTFPL" ]
2
2020-09-30T19:39:36.000Z
2020-10-01T17:15:16.000Z
topCoder/srms/500s/srm524/div2/shipping_cubes.py
ferhatelmas/algo
a7149c7a605708bc01a5cd30bf5455644cefd04d
[ "WTFPL" ]
15
2015-01-21T16:39:27.000Z
2020-10-01T17:00:22.000Z
class ShippingCubes: def minimalCost(self, N): m = 600 for i in xrange(1, 200): for j in xrange(1, i + 1): for k in xrange(1, j + 1): if i * j * k == N: m = min(m, i + j + k) return m
28.4
45
0.380282
41
284
2.634146
0.463415
0.222222
0.25
0
0
0
0
0
0
0
0
0.079137
0.510563
284
9
46
31.555556
0.697842
0
0
0
0
0
0
0
0
0
0
0
0
1
0.111111
false
0
0
0
0.333333
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
26b0ea5b5385591b1c3491de693ff6f69b050aee
147
py
Python
toggl/__main__.py
Bass-03/toggl-cli
ba1bb0409bdd85dab5cf10fba9fc37b6b533eb38
[ "MIT" ]
178
2018-12-03T08:45:43.000Z
2022-03-24T21:44:49.000Z
toggl/__main__.py
Bass-03/toggl-cli
ba1bb0409bdd85dab5cf10fba9fc37b6b533eb38
[ "MIT" ]
123
2018-02-04T10:03:49.000Z
2022-03-30T18:30:31.000Z
toggl/__main__.py
beauraines/toggl-cli
d79af4f48518725a80db1fddf3e5c180aecfdf20
[ "MIT" ]
44
2015-02-12T20:30:39.000Z
2018-10-29T22:53:12.000Z
"""toggl.__main__: executed when bootstrap directory is called as script.""" from toggl.toggl import main if __name__ == '__main__': main()
18.375
76
0.714286
19
147
4.894737
0.736842
0
0
0
0
0
0
0
0
0
0
0
0.170068
147
7
77
21
0.762295
0.47619
0
0
0
0
0.114286
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
26c538ca6f64f44af0f642f4fc6774dcf32916eb
42
py
Python
vic/drivers/python/vic/__init__.py
lingyunan0510/VIC
dbc00a813b5df5a88027d1dc57a7805e9a464436
[ "MIT" ]
1
2022-01-18T01:23:47.000Z
2022-01-18T01:23:47.000Z
vic/drivers/python/vic/__init__.py
yusheng-wang/VIC
8f6cc0661bdc67c4f6caabdd4dcd0b8782517435
[ "MIT" ]
null
null
null
vic/drivers/python/vic/__init__.py
yusheng-wang/VIC
8f6cc0661bdc67c4f6caabdd4dcd0b8782517435
[ "MIT" ]
null
null
null
from .vic import * VIC_DRIVER = b'Python'
14
22
0.714286
7
42
4.142857
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.166667
42
2
23
21
0.828571
0
0
0
0
0
0.142857
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
26c9d013d21a7b87454a5f0891c90f03e5e150f2
151
py
Python
slackd/common/__init__.py
dmwesterhoff/slackd
ec87abc693d65fcedb2233b97f84b604c37b5930
[ "MIT" ]
1
2016-03-18T21:35:54.000Z
2016-03-18T21:35:54.000Z
slackd/common/__init__.py
dmwesterhoff/slackd
ec87abc693d65fcedb2233b97f84b604c37b5930
[ "MIT" ]
null
null
null
slackd/common/__init__.py
dmwesterhoff/slackd
ec87abc693d65fcedb2233b97f84b604c37b5930
[ "MIT" ]
null
null
null
""" slackd.common ~~~~~~~~~~~~~ Provides application level utility functions and classes :copyright: (c) 2016 Pinn :license: All rights reserved """
15.1
56
0.688742
17
151
6.117647
1
0
0
0
0
0
0
0
0
0
0
0.030769
0.139073
151
9
57
16.777778
0.769231
0.933775
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
f809d9c0f524641a254177bb85f519109563bcac
184
py
Python
challenge_1/python/wobboz/src/reverse.py
rchicoli/2017-challenges
44f0b672e5dea34de1dde131b6df837d462f8e29
[ "Apache-2.0" ]
271
2017-01-01T22:58:36.000Z
2021-11-28T23:05:29.000Z
challenge_1/python/wobboz/src/reverse.py
AakashOfficial/2017Challenges
a8f556f1d5b43c099a0394384c8bc2d826f9d287
[ "Apache-2.0" ]
283
2017-01-01T23:26:05.000Z
2018-03-23T00:48:55.000Z
challenge_1/python/wobboz/src/reverse.py
AakashOfficial/2017Challenges
a8f556f1d5b43c099a0394384c8bc2d826f9d287
[ "Apache-2.0" ]
311
2017-01-01T22:59:23.000Z
2021-09-23T00:29:12.000Z
from sys import argv if len(argv) == 1: print("[usage] reverse needs one argument") elif len(argv) == 2: print(argv[1][::-1]) else: print("[usage] reverse only takes one argument")
23
49
0.673913
30
184
4.133333
0.6
0.112903
0.274194
0
0
0
0
0
0
0
0
0.025641
0.152174
184
8
49
23
0.769231
0
0
0
0
0
0.394595
0
0
0
0
0
0
1
0
true
0
0.142857
0
0.142857
0.428571
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
4
f80d7fcc447d776157c66367c1a886befd5836b9
1,320
py
Python
setup.py
jayclassless/coverage_python_version
a5fc967455beb666d0cc2be23f59df230bc82f4c
[ "MIT" ]
null
null
null
setup.py
jayclassless/coverage_python_version
a5fc967455beb666d0cc2be23f59df230bc82f4c
[ "MIT" ]
2
2020-11-25T10:27:49.000Z
2022-01-25T07:18:41.000Z
setup.py
jayclassless/coverage_python_version
a5fc967455beb666d0cc2be23f59df230bc82f4c
[ "MIT" ]
null
null
null
from setuptools import setup, find_packages setup( name='coverage_python_version', version='0.2.0', description='A coverage.py plugin to facilitate exclusions based on' ' Python version', long_description=open('README.rst', 'r').read(), keywords='coverage plugin version exclude', author='Jason Simeone', author_email='jay@classless.net', license='MIT', classifiers=[ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Software Development', 'Topic :: Software Development :: Testing', ], url='https://github.com/jayclassless/coverage_python_version', package_dir={'': 'src'}, packages=find_packages('src'), zip_safe=True, include_package_data=True, install_requires=[ 'coverage>=4.5,<6', ], )
33.846154
72
0.624242
136
1,320
5.963235
0.544118
0.210851
0.277435
0.160296
0.066584
0
0
0
0
0
0
0.017734
0.231061
1,320
38
73
34.736842
0.781281
0
0
0.057143
0
0
0.571645
0.017437
0
0
0
0
0
1
0
true
0
0.028571
0
0.028571
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
f872a017476740479ee2c7f7ad5a39b95832f940
122
py
Python
apps/reversedns/forms.py
jawr/kontrolvm
74bfd8af3f2da173ddf2c8f77e79ff8d6b83e032
[ "MIT" ]
2
2016-09-24T17:38:29.000Z
2016-12-31T13:35:31.000Z
apps/reversedns/forms.py
jawr/kontrolvm
74bfd8af3f2da173ddf2c8f77e79ff8d6b83e032
[ "MIT" ]
2
2020-04-10T02:09:18.000Z
2020-04-10T02:09:24.000Z
apps/reversedns/forms.py
jawr/kontrolvm
74bfd8af3f2da173ddf2c8f77e79ff8d6b83e032
[ "MIT" ]
null
null
null
from django import forms class ReverseDNSRequestForm(forms.Form): rdns = forms.CharField(max_length=255, label="rDNS")
24.4
54
0.786885
16
122
5.9375
0.8125
0
0
0
0
0
0
0
0
0
0
0.027523
0.106557
122
4
55
30.5
0.844037
0
0
0
0
0
0.032787
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
f8a266cbbb0bf8252c9bd86761132de780c2bc65
541
py
Python
meeting_scheduler/scheduler_api/models.py
HulewiczKamil/kpz-2021-meeting-scheduler
f17227ff8f3b8450cbbb6a8b285972054f577b94
[ "MIT" ]
3
2021-03-15T16:14:12.000Z
2021-03-15T16:15:48.000Z
meeting_scheduler/scheduler_api/models.py
HulewiczKamil/kpz-2021-meeting-scheduler
f17227ff8f3b8450cbbb6a8b285972054f577b94
[ "MIT" ]
8
2021-03-24T23:51:23.000Z
2021-04-15T18:22:41.000Z
meeting_scheduler/scheduler_api/models.py
HulewiczKamil/kpz-2021-meeting-scheduler
f17227ff8f3b8450cbbb6a8b285972054f577b94
[ "MIT" ]
1
2021-09-07T17:59:48.000Z
2021-09-07T17:59:48.000Z
import json from django.db import models # Create your models here. class Calendar(models.Model): id = models.CharField(max_length=100, primary_key=True) etag = models.CharField(max_length=100) summary = models.CharField(max_length=1000) accessRole = models.CharField(max_length=100) timeZone = models.CharField(max_length=100) description = models.CharField(max_length=1000) def toJSON(self): return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
28.473684
61
0.702403
71
541
5.183099
0.549296
0.244565
0.293478
0.391304
0.445652
0
0
0
0
0
0
0.048499
0.19963
541
18
62
30.055556
0.801386
0.044362
0
0
0
0
0
0
0
0
0
0
0
1
0.083333
false
0
0.166667
0.083333
0.916667
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
f8b12f3bfb7ce05402fea34272e50bdcc1a63a12
151
py
Python
moto/timestreamwrite/__init__.py
symroe/moto
4e106995af6f2820273528fca8a4e9ee288690a5
[ "Apache-2.0" ]
5,460
2015-01-01T01:11:17.000Z
2022-03-31T23:45:38.000Z
moto/timestreamwrite/__init__.py
symroe/moto
4e106995af6f2820273528fca8a4e9ee288690a5
[ "Apache-2.0" ]
4,475
2015-01-05T19:37:30.000Z
2022-03-31T13:55:12.000Z
moto/timestreamwrite/__init__.py
symroe/moto
4e106995af6f2820273528fca8a4e9ee288690a5
[ "Apache-2.0" ]
1,831
2015-01-14T00:00:44.000Z
2022-03-31T20:30:04.000Z
from .models import timestreamwrite_backends from ..core.models import base_decorator mock_timestreamwrite = base_decorator(timestreamwrite_backends)
30.2
63
0.874172
17
151
7.470588
0.529412
0.188976
0
0
0
0
0
0
0
0
0
0
0.07947
151
4
64
37.75
0.913669
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
3e2585b5a4a0add556169780e41819053890efc5
180
py
Python
Recognition/transformer/test.py
MengLcool/Ac-OCR
370152cc33995f41ee79374b3f5d62e94fea09d3
[ "MIT" ]
1
2021-07-11T10:24:58.000Z
2021-07-11T10:24:58.000Z
Recognition/transformer/test.py
MengLcool/Oc-OCR
370152cc33995f41ee79374b3f5d62e94fea09d3
[ "MIT" ]
null
null
null
Recognition/transformer/test.py
MengLcool/Oc-OCR
370152cc33995f41ee79374b3f5d62e94fea09d3
[ "MIT" ]
null
null
null
import sys import os name = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) print(name) sys.path.append(name) import HyperParameters as hp print(hp.EPOCH)
16.363636
67
0.727778
28
180
4.535714
0.5
0.141732
0.204724
0.23622
0.251969
0
0
0
0
0
0
0
0.15
180
10
68
18
0.830065
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.428571
0
0.428571
0.285714
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
3e3546e7a8813ffa85fe71914c97ab13d0e321c0
53
py
Python
mnmt/encoder/__init__.py
Lawhy/Multi-task-NMT
d8e6a957f3d6e870172f6aa92e9871769d863244
[ "MIT" ]
5
2020-12-05T14:53:33.000Z
2022-01-12T02:04:10.000Z
mnmt/encoder/__init__.py
Lawhy/Multi-task-NMT
d8e6a957f3d6e870172f6aa92e9871769d863244
[ "MIT" ]
null
null
null
mnmt/encoder/__init__.py
Lawhy/Multi-task-NMT
d8e6a957f3d6e870172f6aa92e9871769d863244
[ "MIT" ]
2
2021-01-15T02:37:55.000Z
2022-01-12T02:04:14.000Z
from mnmt.encoder.basic_encoder import BasicEncoder
26.5
52
0.867925
7
53
6.428571
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.09434
53
1
53
53
0.9375
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
3e456eacec4796619077c19329eac993a396ba6d
154
py
Python
aws_lambda_powertools/utilities/batch/exceptions.py
JRetza/aws-lambda-powertools-python
f0ae5a1dce0b54793da5a61c45e9ad2d1394bfe3
[ "MIT-0" ]
1
2021-07-11T07:14:25.000Z
2021-07-11T07:14:25.000Z
aws_lambda_powertools/utilities/batch/exceptions.py
JRetza/aws-lambda-powertools-python
f0ae5a1dce0b54793da5a61c45e9ad2d1394bfe3
[ "MIT-0" ]
null
null
null
aws_lambda_powertools/utilities/batch/exceptions.py
JRetza/aws-lambda-powertools-python
f0ae5a1dce0b54793da5a61c45e9ad2d1394bfe3
[ "MIT-0" ]
null
null
null
""" Batch processing exceptions """ class SQSBatchProcessingError(Exception): """When at least one message within a batch could not be processed"""
19.25
73
0.74026
18
154
6.333333
0.944444
0
0
0
0
0
0
0
0
0
0
0
0.162338
154
7
74
22
0.883721
0.590909
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
4
3e472db0c3da293b4f63e31ba9a50096eb3ec3c7
837
py
Python
lib/database/database.py
LogicJake/Proxy_IP
11adc2a98a5e6e65b6e3a24abc5377040af41c04
[ "MIT" ]
3
2018-03-23T09:26:59.000Z
2019-01-10T04:13:03.000Z
lib/database/database.py
LogicJake/proxy_pool
11adc2a98a5e6e65b6e3a24abc5377040af41c04
[ "MIT" ]
null
null
null
lib/database/database.py
LogicJake/proxy_pool
11adc2a98a5e6e65b6e3a24abc5377040af41c04
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # @Author: LogicJake # @Date: 2019-01-16 13:21:21 # @Last Modified time: 2019-01-22 19:55:42 from abc import ABCMeta, abstractmethod class DataBase(metaclass=ABCMeta): @abstractmethod def connect(self): pass @abstractmethod def close(self): pass @abstractmethod def update(self, table_name, key_values, where): pass @abstractmethod def insert(self, table_name, key_values): pass @abstractmethod def is_table_exist(self, table_name): pass @abstractmethod def select(self, table_name, column_name, where=None, limit=None, order_by=None, order='desc'): pass @abstractmethod def delete(self, table_name, where=None): pass @abstractmethod def create_required_tables(self): pass
20.414634
99
0.647551
102
837
5.186275
0.5
0.257089
0.277883
0.094518
0.083176
0
0
0
0
0
0
0.046326
0.252091
837
40
100
20.925
0.798722
0.131422
0
0.615385
0
0
0.00554
0
0
0
0
0
0
1
0.307692
false
0.307692
0.038462
0
0.384615
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
4
3e69e3a22f84436bfb90e8f8d4c349b29e56379e
343
py
Python
disk/exceptions.py
idin/revelio
3c67a05cb71c27af36469205e88d73ba72097da9
[ "MIT" ]
3
2019-05-04T07:34:24.000Z
2020-01-02T06:13:38.000Z
disk/exceptions.py
idin/revelio
3c67a05cb71c27af36469205e88d73ba72097da9
[ "MIT" ]
1
2020-03-02T21:27:22.000Z
2020-03-02T21:27:22.000Z
disk/exceptions.py
idin/revelio
3c67a05cb71c27af36469205e88d73ba72097da9
[ "MIT" ]
2
2019-11-01T03:23:10.000Z
2020-08-20T05:06:47.000Z
class DiskError(RuntimeError): pass class SaveError(DiskError): pass class LoadError(DiskError): pass class RenameError(DiskError): pass class PathDoesNotExistError(DiskError): pass class PathExistsError(DiskError): pass class NotAFileError(FileNotFoundError): pass class DirectoryNotFoundError(NotADirectoryError): pass
11.064516
49
0.795918
32
343
8.53125
0.40625
0.230769
0.32967
0
0
0
0
0
0
0
0
0
0.134111
343
30
50
11.433333
0.919192
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
4
3e73c05f4fe4ab98d6c104b536cc9fd9e7dfb098
802
py
Python
apps/auth/models/trackuser.py
rainydaygit/testtcloudserver
8037603efe4502726a4d794fb1fc0a3f3cc80137
[ "MIT" ]
349
2020-08-04T10:21:01.000Z
2022-03-23T08:31:29.000Z
apps/auth/models/trackuser.py
rainydaygit/testtcloudserver
8037603efe4502726a4d794fb1fc0a3f3cc80137
[ "MIT" ]
2
2021-01-07T06:17:05.000Z
2021-04-01T06:01:30.000Z
apps/auth/models/trackuser.py
rainydaygit/testtcloudserver
8037603efe4502726a4d794fb1fc0a3f3cc80137
[ "MIT" ]
70
2020-08-24T06:46:14.000Z
2022-03-25T13:23:27.000Z
from library.api.db import db, EntityModel class TrackUser(EntityModel): ACTIVE = 0 DISABLE = 1 nickname = db.Column(db.String(100)) wx_userid = db.Column(db.String(200)) status = db.Column(db.Integer, default=ACTIVE) email = db.Column(db.String(100)) telephone = db.Column(db.String(30)) weight = db.Column(db.Integer, default=1) track_token = db.Column(db.Text()) name = db.Column(db.String(100)) user_id = db.Column(db.Integer) class TrackUpload(EntityModel): ACTIVE = 0 DISABLE = 1 project_id = db.Column(db.Integer) user_id = db.Column(db.Integer) device_type = db.Column(db.Integer) device_typename = db.Column(db.String(200)) device_number = db.Column(db.String(500)) status = db.Column(db.Integer, default=ACTIVE)
29.703704
50
0.677057
117
802
4.57265
0.333333
0.224299
0.280374
0.209346
0.629907
0.220561
0.134579
0
0
0
0
0.038285
0.185786
802
26
51
30.846154
0.781011
0
0
0.363636
0
0
0
0
0
0
0
0
0
1
0
false
0
0.045455
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
e42cfa73397bd683e0f52120bccd8b51c69d56b6
38
py
Python
ctrl-hyper/ctrl-v.py
MTfirst/cmd-ctrl_onLinux
38a6db67796bdc8d438ca63171d9fea03e84f5f7
[ "MIT" ]
1
2020-05-02T03:46:10.000Z
2020-05-02T03:46:10.000Z
ctrl-hyper/ctrl-v.py
MTfirst/cmd-ctrl_onLinux
38a6db67796bdc8d438ca63171d9fea03e84f5f7
[ "MIT" ]
null
null
null
ctrl-hyper/ctrl-v.py
MTfirst/cmd-ctrl_onLinux
38a6db67796bdc8d438ca63171d9fea03e84f5f7
[ "MIT" ]
null
null
null
keyboard.send_keys("<ctrl>+<shift>+v")
38
38
0.710526
6
38
4.333333
1
0
0
0
0
0
0
0
0
0
0
0
0
38
1
38
38
0.684211
0
0
0
0
0
0.410256
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
e43ddcafbcfe5e580e5ca9df83cae47cdab797f5
117
py
Python
Chapter__7/unittest/cap.py
nil1729/python__noob
d82d951dc511eafa9f4315e1fdfdc749f484abf1
[ "MIT" ]
null
null
null
Chapter__7/unittest/cap.py
nil1729/python__noob
d82d951dc511eafa9f4315e1fdfdc749f484abf1
[ "MIT" ]
null
null
null
Chapter__7/unittest/cap.py
nil1729/python__noob
d82d951dc511eafa9f4315e1fdfdc749f484abf1
[ "MIT" ]
null
null
null
def cap_text(text): """ Input a String Output the capitalized String """ return text.title()
19.5
34
0.581197
14
117
4.785714
0.785714
0
0
0
0
0
0
0
0
0
0
0
0.316239
117
6
35
19.5
0.8375
0.376068
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
e448bd123598af9e0ecb5fcd1dbe365539555c32
280
py
Python
recommender/server/views.py
abhishekpathak/recommendation-system
b91961f2baa2ab70626aaadad2f90f609c92a449
[ "MIT" ]
null
null
null
recommender/server/views.py
abhishekpathak/recommendation-system
b91961f2baa2ab70626aaadad2f90f609c92a449
[ "MIT" ]
null
null
null
recommender/server/views.py
abhishekpathak/recommendation-system
b91961f2baa2ab70626aaadad2f90f609c92a449
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from server import api from server.resources import EngineResource, EnginesResource, TaskResource api.add_resource(EngineResource, '/engines/<engine_id>') api.add_resource(EnginesResource, '/engines/') api.add_resource(TaskResource, '/tasks/<task_id>')
28
74
0.771429
33
280
6.393939
0.545455
0.085308
0.199052
0
0
0
0
0
0
0
0
0.003906
0.085714
280
9
75
31.111111
0.820313
0.075
0
0
0
0
0.175097
0
0
0
0
0
0
1
0
true
0
0.4
0
0.4
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
e47b3f30006b1125fe3fb166d95cf04ee299ccdb
38,969
py
Python
resource.py
amandashack/QDmapping
ee93dc693ebc8e6cfd378d5b69367c5293d232be
[ "MIT" ]
null
null
null
resource.py
amandashack/QDmapping
ee93dc693ebc8e6cfd378d5b69367c5293d232be
[ "MIT" ]
null
null
null
resource.py
amandashack/QDmapping
ee93dc693ebc8e6cfd378d5b69367c5293d232be
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Resource object code # # Created by: The Resource Compiler for PyQt5 (Qt v5.12.1) # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore qt_resource_data = b"\ \x00\x00\x1a\xfb\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x80\x00\x00\x00\x80\x08\x06\x00\x00\x00\xc3\x3e\x61\xcb\ \x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\ \x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x1a\x9d\x49\x44\x41\x54\x78\x9c\xed\ \x9d\x79\x60\x55\xc5\xbd\xc7\xbf\xbf\x39\x77\xcb\xbe\x2f\x20\x09\ \x4b\x80\x90\x8d\x1d\x64\xc9\x22\xa8\x54\x41\x13\xa4\x60\x5d\x4a\ \xeb\xd2\xfa\x5a\xab\xed\xb3\x8b\x55\xeb\x53\xdc\xa8\xa8\xad\xb6\ \xf6\xf5\x3d\x6b\xdd\x9e\x5a\x17\x28\x10\x5c\xaa\x42\xc5\x24\x80\ \x80\x06\x05\x92\x4b\x02\x61\x49\xc2\x96\x84\x84\xec\x77\x9f\x79\ \x7f\x84\x24\xf7\xdc\x2d\x37\xb9\x4b\xc2\xe5\x7e\xfe\xca\x9c\x7b\ \x66\x39\x99\xdf\x99\xf3\x9b\xdf\xfc\xe6\x37\x40\x90\x20\x41\x82\ \x04\x09\x12\x24\x48\x90\x20\x41\x82\x04\x09\x12\x24\x48\x90\x20\ \x41\x02\x1f\x1a\xee\x06\x8c\x14\x26\x17\x5c\x17\xaf\x80\x62\x8a\ \x04\x1a\x25\x84\x88\x00\x00\x22\xea\xb0\x40\x9c\xe1\x0a\x1c\xaa\ \xfa\xf7\xe6\xe6\xe1\x6e\xa3\x2f\xb8\x64\x05\x60\xd6\xac\xbb\x94\ \xba\xb0\xb3\xd7\x42\x50\x11\x23\xba\x0a\x40\xaa\xcb\x0c\x82\xd7\ \x82\xd8\x36\xc1\xb1\x39\x5e\x3a\xff\x49\x49\x49\x89\xd9\x3f\x2d\ \xf5\x2d\x97\x9c\x00\xe4\xe4\x2e\x8b\x11\x4c\xf9\x73\x0e\x7e\x37\ \x03\x4b\x1c\x4a\x19\x9c\xa3\x81\x08\x7f\xd1\xa8\x95\x2f\x96\x6f\ \x5b\xdf\xe6\xed\x36\xfa\x93\x4b\x46\x00\x0a\x0a\x0a\x14\xcd\x22\ \xf6\x6e\x40\x3c\x01\x20\xd2\x1b\x65\x72\x8e\x56\x62\xe2\x77\x99\ \x49\xaa\x97\xd6\xaf\x5f\x6f\xf1\x46\x99\xfe\xe6\x92\x10\x80\xa9\ \x79\xd7\x8f\xe7\x44\xef\x02\x6c\xae\xb3\x7b\x22\x42\x08\xe3\x93\ \x25\x24\xc7\x4a\x08\x55\x33\x00\x40\xb7\x81\xe3\x6c\x8b\x05\xc7\ \xcf\x5a\xd0\xa1\x13\x4e\xcb\xe7\x9c\x7f\x29\x94\x8a\x9b\x0f\x6d\ \xdf\x58\xeb\xfd\xd6\xfb\x96\x80\x17\x80\xac\xbc\xe5\x4b\x84\xe0\ \xef\x33\xc6\xa2\x6c\x7f\x9b\x3c\x46\x81\x65\x97\x6b\x90\x9f\xad\ \xc2\x84\x64\x05\x18\x73\x5c\x06\xe7\xc0\xb1\xb3\x66\x94\x1e\x34\ \xe0\x83\xdd\x06\xd4\x9c\xb6\xff\xfc\x73\xe0\x3c\x04\xad\xd4\x96\ \x6d\xfa\xdc\xeb\x0f\xe1\x43\x02\x5a\x00\xb2\xf2\x8a\xbe\x27\x48\ \xbc\xc9\xc0\x94\xd6\xd7\x67\x4c\x54\xe2\x67\xd7\x87\x63\x6e\xba\ \x12\x34\xc8\xff\x80\x10\xc0\x9e\x6a\x23\x5e\xdc\xdc\x89\x03\xc7\ \x6d\x05\x81\x1b\x85\xa0\x5b\x2a\xcb\x8a\xff\xe9\x61\xd3\xfd\x46\ \xc0\x0a\x40\x66\x7e\x61\x21\x38\x36\x32\xc6\xa4\xde\x6b\xa1\x6a\ \xc2\x83\x37\x85\xa3\x68\x7e\xc8\xa0\x3b\xde\x16\xce\x81\x8d\xbb\ \xf4\x58\xf7\x5e\x07\xf4\x46\x61\x7d\xdd\x2c\x31\x14\x1e\x2c\xdd\ \xfc\x2f\xcf\x6a\xf0\x0f\x01\x29\x00\x39\x79\x37\xcc\x14\x82\xef\ \x04\x23\x4d\xef\xb5\x94\x04\x09\x7f\xbd\x27\x0a\xe3\x92\x15\x2e\ \xf3\x6e\xdd\x67\xc0\xe3\x6f\x75\x80\x08\x58\xb3\x3a\x02\x8b\xa7\ \xab\x5d\xde\x7f\xf4\x8c\x19\x77\xbf\xd8\x86\xd3\xcd\xfd\x3a\x20\ \x07\xba\x19\xb7\xcc\xaf\xd8\xf1\xc1\x01\x0f\x1f\xc5\xe7\x04\x9c\ \x00\xcc\xba\x6a\x55\x94\xce\x68\xd8\xc7\xc0\x26\xf4\x5e\x1b\x9f\ \x2c\xe1\xd5\x5f\xc5\x20\x3e\xd2\xc9\x47\xde\x8a\xbc\x5f\x9e\x43\ \x6b\x17\x07\x00\xc4\x44\x10\x4a\x9f\x4b\x18\x30\x4f\x63\x2b\xc7\ \x1d\x7f\x38\x8f\xda\xc6\x7e\x21\x10\xe0\x87\xcd\x66\xcc\xae\xde\ \xb5\xa5\x63\x28\xcf\xe1\x2f\xa4\x81\x6f\xb9\xb8\x88\x19\x33\xe9\ \x05\x06\x76\x75\x6f\x3a\x3e\x92\xe1\xf5\x5f\xc7\x20\x29\xda\xf5\ \xa3\xea\x8c\x02\x5f\x6a\x4d\xd8\xb8\x53\xd7\x77\x4d\x6f\xec\xf9\ \x6c\x74\x1b\x04\xe2\xa3\x18\x94\x92\xe3\xf7\x25\x4c\x43\x28\x98\ \xaa\xc6\x47\x7b\x0d\x7d\x9f\x03\x02\xc5\x49\x8c\x42\x1a\x6b\xab\ \x3e\xf5\xc6\x73\xf9\x8a\x80\x1a\x01\xb2\x0b\x0a\x67\x43\xb0\xbd\ \xb8\xf0\x5c\x44\xc0\xcb\xff\x19\x8d\xcb\xa7\xa8\x9c\xe6\x69\xeb\ \x12\x78\xe9\xa3\x2e\x6c\x28\xd3\x41\x67\x74\x3e\xd5\x53\x2b\x81\ \xab\x67\x69\xf0\x93\xa5\x61\x18\x9b\xe4\x58\x98\x76\x54\x1a\xf1\ \xd3\x3f\xb7\x5a\x5d\xe1\x1c\x5c\xcc\x18\xc9\x9f\x82\x80\x1a\x01\ \x12\x52\x32\x5e\x26\xc2\xe4\xde\xf4\xca\x5c\x0d\x6e\xbd\x32\xd4\ \xe9\xfd\xda\x5a\x33\x6e\x7b\xee\x3c\xbe\x3c\x64\x84\x79\x00\x33\ \x8e\x85\x03\x87\x4f\x9a\xb1\xbe\x4c\x87\xd1\xf1\x12\xd2\xc7\xd8\ \xeb\x12\xa9\x89\x12\x4e\x9d\xe3\xa8\x3e\xd9\x3b\x3b\x20\x82\xa0\ \xa4\xc6\xba\xea\xf7\x87\xf4\x40\x7e\x20\x60\x46\x80\xec\xdc\xeb\ \xa7\x82\x49\xfb\x7b\xd3\x6a\x15\xe1\xb3\xa7\xe2\x10\xeb\xe4\xbb\ \x7f\xfc\xac\x05\xb7\x3c\xdd\x82\x4e\x17\x06\x1e\x67\x30\x06\xbc\ \x72\x5f\x0c\x66\x4f\x56\xda\xfd\xd6\xd4\xc6\x71\xcd\x43\xcd\x30\ \x9a\xad\xca\x25\x96\x51\x51\xb2\xb1\x6a\xd0\x15\xf9\x81\x81\xb5\ \xa2\x8b\x06\x76\x87\x75\x6a\xc5\x02\x8d\xd3\xce\x17\x02\x78\xf8\ \xf5\x76\x59\xe7\x33\x06\xe4\x65\xdb\x6b\xfc\x3f\xb8\x2a\x14\x93\ \x6d\xde\x76\xce\x81\x17\x36\x75\x3a\x2c\x3b\x21\x8a\xa1\x70\xbe\ \x46\x76\x4d\x70\x7e\x9b\x5b\x8f\x30\x0c\x04\x88\x00\x3c\xca\x38\ \x13\x37\x5b\x5f\xb9\x21\x57\xe3\xec\x66\x7c\x7d\xc4\x88\x03\xc7\ \x4d\x7d\xe9\xa8\x30\xc2\xfb\x0f\xc5\xe2\xaf\xf7\xda\x19\x0b\xf1\ \x9b\x55\xe1\xd8\xf0\x70\x2c\xd6\xdd\x19\x29\xb3\x14\x56\xd5\x9b\ \xec\xee\xed\xab\x7b\xa1\xbc\x6e\x22\xdc\x82\x11\x3a\xda\x06\x84\ \x00\x64\x2d\x3c\x90\x63\xbd\xb2\x77\x59\xbc\x84\x29\x63\xec\x87\ \xe7\x5e\x76\x69\x8d\xb2\xf4\x4d\x57\x84\x22\x3d\xc5\xb9\x7d\x80\ \x08\x58\x3a\x57\x83\xa5\x73\xfb\x3b\xd6\xfa\x6f\x5b\x72\xc6\x29\ \x91\x28\x9f\x75\xa4\xe4\x2c\x28\x9c\xec\xec\xfe\xe1\xc4\xb5\x55\ \xe4\x22\x41\x30\xe4\x5a\xbf\x5e\x73\x26\xab\x5c\x5a\xfa\xda\xba\ \xe4\xdf\xfd\x38\x37\xec\x03\x00\xf0\xe4\x0f\x23\xb0\x64\x66\xcf\ \x67\xa2\x20\xc7\xb9\x81\x88\x08\x98\x3d\x59\x89\x8f\xf7\xf6\x6b\ \x96\x16\x25\xcb\x07\x50\xed\x56\x45\x7e\x24\x20\x46\x00\x46\x3c\ \xc3\x3a\x3d\xc5\xc5\xdb\x0c\x00\x63\x13\xe5\x93\x9f\x77\xb6\x77\ \x3b\x5c\xe0\xb1\x45\x62\x84\x45\xd3\xd4\x58\x34\x4d\xed\x74\xe1\ \xa8\xaf\x0d\x36\x7a\x03\x13\xc8\x70\x72\xeb\xb0\x12\x10\x02\x20\ \x84\x98\x60\x9d\x4e\x49\x70\x3d\xbb\x5d\x3a\x57\x03\x8d\xaa\x7f\ \x88\x38\x7e\xd6\x82\x1b\x1e\x6b\xc1\x75\x8f\xd8\x7b\x7d\xbd\xf6\ \x69\x37\x76\x56\x1a\x5d\xda\x08\x1c\x91\x6a\x6b\x2b\xe0\x48\x1b\ \x54\x01\x7e\x22\x20\x3e\x01\x44\xf2\xa5\xde\xc8\x50\xd7\xfa\x56\ \x42\x14\xc3\x63\xab\x23\xf0\xc0\xab\xed\x10\x56\xfd\x5a\xdb\x60\ \x6f\x0c\xf8\xe3\xc6\x1e\x6d\x5f\xad\x22\x2c\x9b\xa3\xc1\xdd\xd7\ \x87\x21\x29\x66\xe0\xf7\xc6\xae\x0d\xe4\x1d\x27\x14\x6f\x13\x10\ \x23\x00\x04\x64\xd6\x1e\xb5\x72\x60\x85\x7b\xe9\x5c\x0d\x5e\xbc\ \x3b\xda\xe9\x54\xd1\x16\x83\x51\x60\xe3\x4e\x1d\x96\x3f\xd6\x8c\ \x3d\x55\xce\x67\x00\x4e\xdb\x20\x44\x98\x5b\x15\xf9\x99\x80\x10\ \x00\x01\xa1\xb7\x4e\x1b\x4c\xee\x0d\xd7\x05\x53\x55\xf8\xe8\xf1\ \x58\xfc\x72\x45\x38\x26\x8e\x76\x6f\x30\xec\xd4\x09\xdc\xfb\xdf\ \xad\xa8\x6f\x72\x6d\x3a\x34\xda\xc8\x88\x00\xe9\x1d\xdf\x39\xbc\ \x04\x84\x00\x80\xd0\x60\x9d\x6c\x6a\xe3\x6e\x67\x0d\x0f\x61\xb8\ \xfd\x3b\xa1\xd8\xf4\x68\x2c\xb6\x3f\x1b\x6f\xf7\xfb\x7d\xdf\x0d\ \xc7\xaa\xfc\x10\xc4\x44\xf4\xbf\xd1\x3a\x63\xcf\xfa\x81\x2b\x9a\ \xdb\xe5\x6d\x20\x86\x46\xb7\x1b\xe5\x47\x02\x43\x00\x04\x8e\x5a\ \x27\x8f\x9e\x19\x9a\x7f\xa6\xa3\xe5\xe2\x3b\x96\x84\xe2\x91\x5b\ \x23\xf0\xc6\xaf\x63\x65\xd7\xcb\x2a\x8d\x76\xf7\x5a\x73\xa2\xc1\ \x76\x56\x21\x8e\x0d\xa9\x51\x3e\x26\x20\x04\x80\xc0\xf6\x59\xa7\ \xcb\x0f\xbb\xee\x1c\x21\x80\xe2\x5d\x7a\x3c\xfe\x76\x07\xf6\x56\ \xbb\xbe\xb7\x17\xdb\x99\x45\x6b\xa7\xeb\x51\xa6\xe2\x84\x8d\x00\ \x70\x1a\x91\x2b\x82\x17\xd5\x2c\x20\x27\x77\x59\x0c\x67\x8a\x5c\ \x21\x28\xd6\x68\xd0\x6f\xaa\xd9\xfb\x49\x3b\x00\x08\xf0\x52\xb2\ \xb2\xb4\x96\xd7\x18\xd1\xa1\x13\x88\x08\x71\xac\x0c\x6e\xd9\xad\ \xc7\xc3\x6f\xb4\x03\x00\xfe\xb9\x43\x87\x77\x1f\x8a\x41\x46\x8a\ \x73\xcb\x21\x00\xec\xa9\x92\x0b\x4a\x72\x8c\xf3\xa9\xa6\xc1\x04\ \x3b\xc1\x92\x24\xb1\xc3\x65\x05\xc3\xc4\x88\x1f\x01\x72\x72\x57\ \x4c\xc8\xce\x5b\xfe\x60\x76\xfe\xf2\xdd\x82\x29\x9b\x09\xb4\x85\ \x11\x5e\x57\x69\x34\x65\x05\x05\x05\x0a\x00\x28\x5b\x36\xbf\x5d\ \xc5\x70\xa4\x37\x8f\xc9\x0c\x7c\xb8\xc7\xb9\xce\x65\xbd\x0e\xc0\ \x39\x70\xd7\xf3\x6d\x78\xbf\x44\x87\x6e\x83\xbd\xf2\x28\x04\x50\ \x7a\xd0\x88\x07\x5f\x93\xef\xff\xb8\x6a\x86\x73\x4b\x60\xc9\x01\ \xbd\xbc\x2c\xce\x0f\xed\x2f\x29\x3e\x31\xf0\xd3\xfa\x1f\x9f\x2d\ \x50\x4c\xbd\xfa\xea\x30\xa1\x0f\x4f\xb7\x08\xcb\x04\xc6\x58\xaa\ \x10\x48\x24\x42\x9c\x80\x08\x21\x90\x5a\x08\x21\x08\xc2\xcc\x89\ \xda\x98\x40\x33\x40\x27\x05\x70\x8c\x31\xd2\xaa\x3a\x13\x1a\x74\ \xe1\x8d\x2b\x19\xe7\x77\x83\x58\xae\xb3\x3a\x5e\xce\xcf\xde\x97\ \x1e\x1b\x95\xc2\x18\x12\xde\xac\x3e\x89\x97\x2a\xfb\xdd\xf2\x53\ \x12\x24\x6c\x79\x2c\x0e\x0a\x07\x2f\xea\x9e\x2a\x13\x7e\xf4\xfc\ \x79\xbb\xeb\x2a\x05\xc9\x97\x71\xd1\x63\x33\xb0\x55\x2a\xa3\xc3\ \x18\x36\xad\x89\x75\xea\x62\xf6\xc3\x67\xcf\x63\x5f\x8d\x6c\x1a\ \xb0\xa6\xa2\x74\xf3\x63\xce\x9e\x63\x38\xf1\xda\x27\x60\x6a\xde\ \xf5\xe3\xcd\x4c\xba\x8a\x71\xe4\x71\xc2\x5c\x6e\xc0\x64\x90\x20\ \x46\x3d\xff\xa4\x5e\xdb\x7c\xef\x50\x4d\x44\x00\xa8\x67\x08\xea\ \xfb\x0d\x10\x42\xc0\x10\xd6\x08\x26\x00\x90\xf3\x01\x2a\x4e\xa3\ \x44\x5a\x4c\xc4\xcc\x5e\x93\xec\xd2\xb1\x49\x78\xe5\x50\x1d\xcc\ \xbc\xa7\x03\xeb\x9b\x2c\x28\xab\x30\x60\xd1\x34\xfb\x37\xf5\xf2\ \x29\x4a\x3c\xba\x3a\x12\x4f\xbc\xdd\x0e\x6e\xd5\xb7\xb6\x9d\x0f\ \xd8\xcf\x28\x34\x2a\xc2\xf3\x3f\x89\x72\xda\xf9\x3b\x2b\x8d\x36\ \x9d\xcf\xb9\x10\xf4\xaa\xd3\x07\x19\x66\x3c\x12\x80\xac\xbc\xa2\ \x14\x10\xfd\x80\x80\x9b\x39\x90\xd5\xd3\x69\xbe\xfd\xae\x8c\x8f\ \x0c\xc5\xcc\x84\x28\xac\x4c\x1b\x0d\x95\xd4\x5f\x53\x9c\x46\x89\ \xa5\xa9\x89\xd8\x72\xa2\x7f\x46\xc8\x5c\xac\x08\xad\xcc\xd5\x60\ \xd2\x68\x09\x8f\xbd\xd5\x81\x23\xa7\xdc\xdb\xe7\x39\x61\x94\x84\ \xa7\xef\x8c\x74\xaa\x2f\xe8\x8d\x02\x6b\xdf\x91\xfb\x80\x72\xe0\ \x1f\xda\xb2\xe2\x7a\xb7\x2a\x18\x06\x86\x24\x00\x53\x73\x97\x4f\ \xe6\xc4\xd7\x70\x21\x6e\x64\x44\x3e\x77\x2b\x9b\x1a\x17\x89\xab\ \x53\x13\x90\x97\x1c\x8b\xf8\x10\xe7\xfe\x7d\x77\x65\x8d\x43\x5d\ \x97\x0e\x87\x5a\x3a\xf0\x9d\xf4\x68\xe4\x65\xbb\x56\xec\xa6\x4d\ \x50\x62\xc3\xc3\xb1\xd8\xf6\x8d\x1e\x1b\xca\xf4\xd8\x53\x6d\x94\ \x8d\x08\xbd\x64\xa4\x2a\x70\x63\x41\x28\x96\xcf\xd7\x38\xfc\xa4\ \xf4\xb2\xee\xfd\x4e\xd4\x35\x59\xbb\x87\x73\x13\x49\x6c\xcd\x40\ \xcf\x37\x9c\x0c\x4a\x07\x58\xb5\x6a\x95\x74\xe8\xac\xf1\x21\x2e\ \xe8\x11\xc6\x9c\x0b\x4f\x98\x42\x42\x5a\x54\x18\xc6\x45\x84\x20\ \x39\x4c\x8d\x04\x8d\x1a\x91\x2a\x25\xc2\x95\x0c\x12\x63\x20\x00\ \x66\xce\xd1\x6d\xe6\x68\x33\x9a\x70\x4e\x67\xc2\xa9\x2e\x3d\x6a\ \x3b\xba\x50\xdd\xd6\x05\x83\x99\x43\xc9\x08\xcb\xc6\x25\xe1\xc6\ \xb4\xd1\x48\x8d\x08\x19\xd2\xc3\x85\x4e\x3b\x03\x65\xb2\x63\xcf\ \x1d\x47\x74\xe9\x05\xae\x7e\xa0\x19\x1d\xba\x1e\x29\x88\x08\x25\ \x14\xaf\x89\x43\x42\xd4\xc0\x63\xda\x3b\xdb\x75\x58\xfb\xae\x8d\ \x07\xb8\xc0\xda\x8a\xb2\xcd\xbf\x1b\x54\xa3\xfd\xcc\xa0\x46\x80\ \x43\x0d\xa6\x97\x40\x74\x27\xb3\x11\x1b\x15\x63\x98\x9b\x14\x83\ \x05\xc9\xd1\x98\x1e\x1f\x85\x94\x70\xcd\x85\x6f\xfc\xe0\xb1\x08\ \x81\x53\x9d\x7a\xc4\xa8\x95\x88\x50\x79\xa6\xa2\xe8\x2a\x92\xc0\ \x42\xcd\x90\x22\xdd\xb3\xc2\x86\x69\x08\x4f\xde\x16\x81\x35\x6f\ \xb5\x83\x88\xb0\xe6\xfb\x11\x6e\x75\xfe\x87\xbb\xf5\xf8\xfd\x7b\ \xf2\xce\x17\x9c\x57\x74\x4a\x31\x4f\x0c\xa9\xe1\x7e\xc4\xed\x5e\ \xca\xca\x2f\x5c\x44\x60\xb2\x8d\x8f\xd1\x6a\x05\x56\xa7\xa7\x60\ \x69\x6a\xa2\xc7\x9d\xe5\x2b\x48\x65\x41\xd8\xec\x53\x90\x22\x0c\ \x10\x02\xe0\x42\x80\x11\x79\xbc\x35\xac\x97\xea\x7a\x33\x6e\x5c\ \xdb\x22\xfb\x74\x84\x28\x24\xf1\xe0\xec\x49\xbf\x5b\xf1\xcc\xd3\ \xbf\xf7\x4e\x2d\xbe\xc3\xed\x5e\x23\xb0\x3c\xeb\x74\x66\x4c\x38\ \x9e\x5b\x98\x85\xc8\x11\xd4\xf1\x66\x2e\x50\x76\xa6\x05\x47\x5a\ \xbb\x70\xba\x5b\x87\xc6\x6e\x23\xce\x1b\x8c\xe8\xd8\x64\x41\x97\ \xc5\x0c\x93\x95\x96\x2f\x31\x42\x78\x08\x10\x11\xca\x90\x18\xc5\ \x90\x1c\x23\x61\xfc\x28\x05\x26\x24\x33\xe4\x8c\x57\x62\x54\xac\ \x7b\xaa\x4d\x65\x9d\x49\xd6\xf9\x12\x11\x9e\xb8\x7c\x0a\xcd\x4b\ \x8a\x5e\xdb\xfa\xc2\x33\xb3\xbf\x6c\x6c\xbd\xe7\xd7\x3b\x2a\x1f\ \x12\x82\xe6\x31\x12\xef\x56\x94\x16\xff\x11\xc0\xe0\x5d\x91\x7d\ \xc4\x20\x7a\x4f\x74\x59\x0f\x18\xd7\x8e\x4b\x1a\x51\x9d\x0f\x00\ \x4f\x96\x1f\xc6\xb6\xfa\x73\x6e\xdd\x6b\xe1\x02\x6d\x5d\x40\x5b\ \x97\x05\x27\x9b\x2c\x00\xe4\xcb\x77\xc9\x31\x12\x16\x64\xaa\xb0\ \x78\xba\x1a\xf3\x32\x54\x50\x3b\xd1\x27\xe7\xa5\x44\x21\x52\xdd\ \x8d\x76\x83\x05\x12\x11\xd6\xcc\x4d\xc7\xbc\xa4\xe8\x0b\xbf\xd2\ \x0a\xed\xb9\xf6\xc5\x04\x8a\xee\x19\x71\x68\x76\x76\x7e\xd1\xec\ \x0e\x8a\xbe\xbd\xb6\xe4\x8d\xc1\xac\x0e\x52\x56\xee\x8a\x05\x20\ \xcb\xd5\x00\xc5\x12\x70\x98\x9b\x14\xff\xd0\xee\xde\xd0\xe2\x2c\ \x43\x4e\xee\xf2\x59\x9c\x61\x1d\x81\x5b\x84\x45\xba\xbf\x72\xe7\ \xa6\xfd\x8e\xee\x73\x7b\x20\xcc\xc9\x5d\x3e\x4b\x30\x7c\xdd\x9b\ \x9e\x14\x15\x86\xbf\x2f\x9e\x06\xc9\x5b\x63\xa9\x87\x08\x21\x50\ \xb0\xf9\x4b\x70\xe1\xfd\x97\x2b\x3a\x8c\x61\xc5\x42\x0d\xbe\xb7\ \x28\x14\xa3\x63\x7b\x74\x02\xae\x57\xc0\x70\x34\x0e\xc6\x53\x91\ \x38\xd7\x6d\xc4\xd7\x4d\xad\x98\x12\x13\x81\x71\x36\x0a\xeb\x9f\ \xf6\x1f\xc3\xfa\xa3\x67\xe4\x6d\x05\x4a\x35\x2a\x65\xa1\x3b\xe1\ \x65\xd2\x17\x14\x8e\x56\x2a\xe8\x4d\x80\x16\xcb\x7e\xe0\x38\x27\ \x48\x14\x55\x96\x15\xef\x72\x90\x8d\x32\xf3\x0b\x6b\x7a\xf7\x47\ \x72\x2e\xaa\xb4\x3b\x8a\x1d\xba\xa4\xb9\x3d\x85\x6b\xac\xab\x3a\ \x9b\x98\x92\xbe\x14\x44\x97\x01\x40\x8b\xc1\x04\x13\x17\x98\x93\ \x18\x3d\x50\x56\xbf\x40\x44\xf8\xba\xa9\x0d\x0d\xdd\x06\xa7\xf7\ \x30\x22\x28\x19\x03\x11\xc0\x07\x21\x27\x7a\x93\xc0\x37\x47\x4d\ \x78\xaf\xa4\x1b\xad\x2d\x0a\xa4\x19\x46\x41\x54\x27\xc3\xd2\xde\ \xe3\x19\x1c\xaa\x94\x30\x31\x2a\x0c\xd1\x0e\x86\x89\xe4\x30\x0d\ \xb6\x9d\x6c\x82\xc1\xd2\xff\x9d\x20\x60\xac\xc9\xc2\xaf\x4e\x18\ \x95\xbd\xa1\xe9\xa4\x56\x67\x97\xe9\x02\xd3\x0a\x8a\xa2\x09\x6c\ \x17\x88\x66\xd9\x3f\x30\x42\x05\xd1\x8a\xe4\x71\x19\x6f\x37\xd6\ \x56\xb5\xdb\xfe\x9a\x94\x92\xb1\x0e\xd4\x33\xc2\x13\x51\x7c\xfc\ \xa8\xec\x17\x1d\xd5\x35\xa8\xd7\xd7\x91\x22\xf8\xe0\xac\x49\x58\ \x36\x76\x48\xb1\x96\xbc\x4e\x9b\xd1\x8c\xf5\x47\x4f\xc3\x64\x11\ \x48\x8d\x08\x41\x72\x88\x0a\xf1\x21\x6a\x44\xa9\x15\x08\x57\x28\ \xa0\xb0\x9a\xbe\x70\x01\xe8\x2d\x16\xb4\x1b\xcd\x68\xd2\x19\x70\ \xaa\x4b\x8f\xba\x0e\x3d\x0e\x9d\xef\x80\xf6\x7c\x27\xba\x4c\xce\ \x8d\x43\xb1\x6a\x15\x1e\x9a\x3d\x11\xf3\x92\x62\xdc\x6a\xd7\xa9\ \x2e\x3d\x7e\xb3\x4b\x8b\xba\x0e\xf9\xff\x5f\x40\xec\x53\x10\xae\ \xdc\x5f\x52\xdc\xea\x28\x5f\x56\x7e\xd1\x5a\x02\x3d\xe8\xba\x74\ \xf1\x6e\x45\x69\xf1\xcd\xb6\x57\x33\xf3\x97\xef\x67\xc0\xd4\xde\ \x34\x09\x9a\x75\xb0\x6c\xd3\x3e\xdb\xfb\x06\x3d\x7e\x67\xe6\x15\ \xbd\xc2\x88\xfa\x76\xe1\x30\x02\xd6\xcc\x49\xc7\xe2\x31\xf6\xce\ \x14\x17\x2b\x16\x21\x50\xd1\xdc\x8e\xd2\x33\xe7\xf1\x69\x6d\x23\ \x5a\x6d\xdd\x7b\x2e\xf0\xd3\xec\xb1\xb8\x75\xf2\x18\xb7\xca\x6c\ \x37\x9a\x71\xff\x2e\x2d\x2a\x5a\x6c\x77\x8b\x8b\x9d\xea\x2e\xcb\ \x92\xf2\xf2\x0f\xbb\x6d\xf3\x64\xe5\x17\x56\x13\x98\xd5\x7e\x02\ \xf1\x86\x10\xa8\x25\xa2\x47\xac\x0b\x90\x48\x4c\xb0\x5d\x6c\xca\ \xce\x2f\xfc\x14\x60\x4b\xfa\xaf\xd0\x92\x8a\xd2\x4d\x5b\x6d\xeb\ \x18\xbc\xd5\x96\xa9\x7e\x01\x40\xdb\x9b\xe4\x02\x58\xf3\xd5\x61\ \x6c\xad\x6f\x1a\x74\x51\x23\x15\x89\x08\xd3\xe2\xa3\x70\x6f\xce\ \x38\x6c\x5c\x3a\x1b\xbf\x9b\x35\x09\xc9\xa1\xf6\x6b\x0a\x6f\x54\ \x9d\x74\xbb\xcc\x48\x95\x02\xcf\xe7\x66\x61\x46\xbc\xed\xee\x23\ \x5a\x68\x08\x91\xde\x04\x1e\x95\xf5\xc5\xaa\x55\xab\x24\x02\x9b\ \xd4\x9b\xe6\x1c\x0d\xfa\x50\xc3\x7f\x54\x96\x15\x3f\x0a\xf0\x0f\ \xad\x0b\x30\x0b\xdc\x62\x57\x21\x67\x36\x4a\x26\x77\xa8\xc6\x0e\ \x5a\x00\xb4\x25\xeb\x3b\x39\x59\x8a\xc0\x79\x9f\x0f\x35\x17\x02\ \x8f\x7f\x7d\x04\x9b\x8f\x9d\x1d\x6c\x71\x23\x1e\x15\x63\xb8\x76\ \x6c\x22\xfe\xb1\x64\x26\xee\xcc\x4c\x95\xad\x2f\x24\x39\x10\x0a\ \x57\x84\x28\x24\x3c\xbb\x30\x03\x53\xe3\x6c\x1c\x84\x19\xad\xc8\ \xca\xff\xf6\x29\xeb\x4b\x95\x95\x90\x60\x35\x42\x33\xe0\x74\xcd\ \x27\x9f\xf4\x28\x38\x9c\x5e\xb1\xbe\x57\x40\x2c\x81\x2d\x36\x5e\ \xc8\x02\xc2\xa1\xae\x31\xa4\x75\x1b\x6d\xc9\x07\x35\x24\x49\xd7\ \x71\xa0\x6f\xd8\x12\x42\xe0\xb9\x6f\x8f\xe2\x15\x6d\x1d\x84\x0f\ \x34\xf1\xe1\x46\xc5\x18\x6e\x9f\x92\x82\x97\xae\x98\x8a\xf9\xc9\ \x31\x58\x38\x2a\x06\x8f\xcf\x4d\x1f\x74\x39\x1a\x49\xc2\x33\x0b\ \x32\x30\x21\x52\xee\x24\x4c\xa0\x07\xb2\xf2\x97\xaf\xe8\x4d\x6b\ \xb5\xeb\x8d\x9c\xa3\x4f\x37\xe0\x8c\x67\x4f\xcf\x5d\x99\x00\x00\ \x66\x35\x95\xc9\x4b\x65\x73\x6c\x46\x10\xe2\xc4\x33\xad\xef\x10\ \x5c\xee\x37\xd9\x97\x73\xd0\x4f\x70\x81\x83\x25\x9b\x76\x13\xf8\ \x75\x1c\x5c\x26\x59\xaf\x55\xd5\xe3\xa9\xf2\x1a\x18\x1d\xad\xaa\ \x04\x00\x19\x31\xe1\x78\x76\x41\x26\xd6\xcd\xcf\xc4\xf8\x48\xe7\ \xb1\x07\x5c\x11\xae\x54\xe0\xb9\x85\x19\x88\xd3\xc8\x47\x65\x02\ \x5e\x4b\x5f\x50\x38\xba\xff\x8a\xf8\xaa\xf7\x2f\x06\xa6\x34\x92\ \xe9\x61\x00\xa8\xfa\xf7\xe6\x66\x0e\xde\xd4\xff\x1b\x42\x73\x0a\ \x0e\xf4\xe5\xcb\xca\x2b\x9a\x2f\x8f\x82\xca\x8d\x5d\x52\x8c\x43\ \x9f\x44\x8f\x56\x6e\x2b\x4b\xb7\x6c\x27\xa2\xa5\xe0\x5c\xb6\xe2\ \xf2\x49\x5d\x23\x5e\xaa\xac\xf3\xa4\xe8\x80\x27\x31\x44\x8d\x27\ \x2f\x9f\x62\x6b\x47\x89\x54\x28\xe9\xca\xde\x04\x11\xde\xb3\xfe\ \x91\x11\xfd\x3c\x2b\xf7\x86\x9b\x2e\xa4\x64\xc6\x05\x6e\x46\x1c\ \xd0\x13\x03\x19\x82\x9e\x96\x55\xc6\xd9\x4e\x67\x86\x27\x8f\x97\ \xee\x2b\x4b\x8a\xbf\x10\x4c\x5a\x6c\x7b\x5d\x6b\xa7\xed\x06\xb1\ \x25\x27\x2e\x12\xf7\xe4\x8c\xeb\x4b\x73\x0e\x33\x04\xf5\x19\xdb\ \x3a\x59\xf4\xdb\x1c\x5c\xf6\xe6\x12\x13\xef\x64\xe5\x15\xbe\xc4\ \x80\x1c\xd9\x75\x32\x8b\xcc\xdc\xc2\x4c\x7d\x68\xe3\xc7\xc4\x20\ \x33\xdb\x43\xe2\x6f\x3a\x6b\x83\x57\x7c\x37\x04\x29\x0e\xd9\x5e\ \x5b\x98\x1c\xeb\xe8\xd6\x20\x36\xac\x4c\x1b\x85\x47\xe6\x4c\xc6\ \x82\x51\xd1\x15\x90\xb0\xa8\xb2\x74\x73\xdf\xff\xb2\xb6\xe4\x0d\ \x3d\x03\xfb\x3e\x38\x64\xd6\x2d\x22\x76\x17\x6c\xa7\xf0\x4c\xda\ \xcf\x18\xab\x24\xc2\x55\xd6\x97\x05\xf8\x61\x75\x67\xf2\x5b\xce\ \xea\xf7\x8a\x00\x90\x30\xfe\xc0\x3a\x1d\xa9\x52\xe0\xbb\x69\xc9\ \xde\x28\x3a\xe0\x21\x22\x2c\x49\x49\xc0\xd3\x97\x67\x64\x94\x15\ \xcd\xb3\x73\x54\xac\x28\xdd\xfc\x25\x18\x5d\x0f\xc0\xd6\xda\x37\ \x20\x1c\x5c\xc7\x84\x74\x73\x79\xf9\xdf\x9c\xee\x65\xf3\x92\x00\ \xd0\x8f\xad\xd3\x2b\xd2\x46\x21\xc4\x95\xeb\x4c\x10\x3b\x18\x63\ \x92\xc4\xd8\x5a\x47\xbf\x55\x94\x6e\xda\xca\x84\x65\x3a\x84\xf8\ \x64\x10\x45\xd6\x43\xb0\x2b\x1c\x59\xff\x64\xf5\x0e\xaa\x95\x0e\ \x98\x5a\x70\x43\x36\x08\xd3\xfb\x0a\x24\xc2\xf2\xf1\x49\x9e\x16\ \x7b\x89\x42\x85\xe7\xff\xb8\x6e\xa6\xa3\x5f\x0e\x94\x7d\x70\x5c\ \x00\x2f\xb8\xce\xcf\xb9\x10\x62\xb7\x10\xe2\xbe\x70\xa3\x22\x5d\ \x5b\xb6\x79\xef\x40\x35\x7a\xbc\x9e\x6b\xe1\x7c\xa5\xb5\xf7\xcf\ \xdc\xc4\x68\xc4\x6b\x06\x36\x90\xb4\x1b\xcd\xd8\xd3\x70\x1e\x20\ \xe0\xf2\xc4\x98\x11\xb7\xb4\x3c\x6c\x10\x7e\x03\xa0\xcf\xb6\xdf\ \xb3\x1a\xc8\xae\x81\xe0\xb7\x83\xc8\xce\x45\x9e\x03\xe5\x92\xa0\ \xbb\xb8\x42\xb4\x89\x06\x75\xbd\x56\xbb\xde\xbd\xad\x4e\x17\xf0\ \xfc\xbf\x4e\xb8\xde\x3a\x79\xa5\x1b\x6b\x02\xdb\xea\xcf\xe1\x99\ \x6f\x6a\xd0\x7d\x21\x38\x5f\x98\x52\x81\x07\x66\x4e\xc4\xa2\xcb\ \xe2\x3c\x6e\xce\xc5\x88\xc1\x22\xb0\xbe\xe6\x34\x4e\x75\xe9\x61\ \xe6\x96\x1b\xe7\x2c\xfe\x6e\x68\xb7\xd9\x12\x4d\xe0\x93\x00\x36\ \x0a\x80\x43\x17\x79\xc1\x51\x66\xe1\x7c\x99\xd6\x83\x70\xb4\x1e\ \x2d\xe6\x4f\xb9\x72\x79\x9c\xc2\x84\x3e\x0f\x0c\x22\xc2\x87\xcb\ \xe6\x22\xca\xc5\xdb\xfc\x75\x53\x2b\xee\xdb\xa1\xb5\xb3\x16\x32\ \x02\xfe\x94\x97\xed\xc0\x56\x1e\xf8\xbc\xb0\xff\x18\x36\xd8\xf8\ \x0c\x0c\x88\x10\x2f\x33\x4d\xf7\x7d\x07\xb6\x6e\x75\xbd\x4d\x79\ \x00\x3c\xd2\x01\x98\x89\x2f\xb4\x4e\x4f\x8a\x0a\x75\xd9\xf9\x00\ \xf0\xb7\x8a\x5a\x87\xa6\x62\x2e\x80\x97\x2a\x2e\xba\x03\x37\xbc\ \xc2\xa1\xf3\xee\x7b\x2e\x0b\xa0\x14\x1c\x57\x54\x94\x15\xdf\xe5\ \x69\xe7\x03\x43\xfc\x04\xcc\x9a\x75\x5d\xa8\x3e\x44\x35\x83\x38\ \x7f\xc5\x5a\x84\xec\x16\x39\x1c\x50\xd5\xea\xbc\xcd\x83\xf9\x47\ \x04\x12\x57\x8e\x89\x47\xa5\x13\xc3\x19\xe7\xe8\x22\x86\x72\x08\ \x6c\x05\xe1\x9f\xd6\x76\x02\x6f\xe0\xb6\x00\xa4\x2f\x28\x1c\xad\ \x90\xe8\x16\x82\x28\xd2\x09\x36\x8f\x31\x6e\x97\x77\x4a\x4c\xf8\ \x80\xe5\x5c\x16\xa6\x46\x7d\xa7\x63\x77\xb8\x94\x70\xe7\xb1\xf7\ \x02\x99\x1b\x27\x8e\x46\x7a\x74\x18\x6a\x3b\x74\x20\x02\x34\x0a\ \x09\x9d\x26\xcb\x5b\xcf\xef\x3b\xfc\x48\xc6\x28\x4d\x9d\x2f\x0f\ \xa4\x1a\x50\x00\xa6\x15\x14\x8d\xb3\x70\x7a\x9c\x0b\x7e\x0b\x11\ \x49\x00\xc1\x76\x5f\x40\x2f\xb6\x2b\x5c\x8e\x58\x9d\x3e\x06\x6b\ \xcb\x6b\x9c\xfc\x96\x32\x60\xfe\x40\x65\x5a\x7c\x14\xa6\x59\xe9\ \x3f\x9c\xf3\xf4\xdb\xff\xf4\x87\xe3\xbe\x0e\x2a\xe0\x52\x07\xc8\ \x2a\x28\x5a\x6d\x12\x54\x09\xc2\x6a\xeb\xa3\x57\x1c\x91\x1e\x13\ \x8e\x89\x51\x03\xaf\x8e\x5d\x9b\x9a\x88\xdb\xa6\xd8\x77\xf4\x1d\ \x19\xa9\x58\x92\x12\x38\x5e\x45\x5e\x60\x66\xd3\xba\x75\x11\xbe\ \xae\xc4\xe9\x08\x90\x5d\x50\x78\x3b\x04\xbd\xea\xe8\x65\x97\x88\ \x90\x16\x15\x86\xb4\xa8\x50\x8c\x09\x0f\xc1\xe8\x30\x35\xf2\x46\ \xc5\xba\xdc\x8c\xd9\x0b\x11\xe1\x47\x99\xa9\xc8\x1d\x1d\x8b\xed\ \x27\x7b\x7c\x4a\xae\x1c\x13\x8f\xc9\xd1\x23\x32\x88\xd6\xb0\xc1\ \x18\x93\x48\x85\x59\x00\xbe\xf0\x65\x3d\x0e\x7b\x6c\xda\xe2\x65\ \x97\x99\xcc\xd2\x11\x06\x26\xf3\x71\xce\x1d\x1d\x8b\x65\xa9\x89\ \x98\x9d\x18\x1d\x34\xf5\xfa\x10\x33\x17\x78\xea\xeb\x23\xd8\x76\ \xb2\x91\x73\x4e\x3b\x2d\x6a\xdc\xe0\xab\xb3\x8b\x1d\x8e\x00\x66\ \x93\xf2\x76\x46\xe8\xeb\xfc\x48\x95\x02\x4f\xcf\x77\xe0\xca\x14\ \xc4\x27\x94\x9d\x69\xc6\xd6\x93\x4d\x00\x88\x11\x43\x9e\x64\xc2\ \x2f\x00\x3c\x32\x50\xbe\xa1\xe0\x50\x07\x20\x82\xcc\xe4\xf8\xe3\ \xac\xb1\xc1\xce\xf7\x23\x2d\x06\xb9\x4b\x3a\x09\xee\x33\xe5\xc8\ \xa1\x00\x08\x0e\x99\xb3\xdb\xcc\x4b\xd0\x3a\x37\x9c\x74\xdb\xed\ \x49\x60\x3e\xf3\xae\x71\x3c\x02\x30\xde\xe7\x5f\xc6\x88\x30\xe6\ \x12\x9d\x9f\x0f\x17\x2d\x7a\xf9\x7a\x8e\x80\xf0\x99\xcf\xbd\x9d\ \x00\x64\x66\xae\x52\x01\xac\x2f\x0c\x47\xac\x5a\x39\x62\xf6\xff\ \x5d\x2a\xd8\x1a\xca\x84\x10\x27\x7c\x55\x97\x9d\x00\x44\x46\x0a\ \x99\x7a\xaf\x91\x82\xda\xbe\x3f\x11\x42\xa0\xa6\xcd\xce\x5c\xae\ \x75\x74\xaf\x37\xb0\x13\x00\xb5\xba\x49\xe6\x3e\x44\x14\x78\x3e\ \xfe\x23\x99\xb3\xdd\x06\x9c\xb3\xfa\x04\x70\x8e\xae\xac\x51\x6a\ \x9f\x9d\x34\x62\x27\x00\x25\x25\x25\x66\x6b\x5f\x7f\xbd\x25\x30\ \xfd\xfb\x47\x2a\x7b\x1a\x6c\xf7\x89\x8a\x5d\xbe\x5c\x0b\x70\xac\ \x04\x72\xd6\xb7\x8b\xa4\xdd\x68\x0a\xc8\x9d\x3e\x23\x95\xb2\x33\ \xf2\x98\x0f\x44\x64\xb7\xa1\xd3\x9b\x38\x9e\x06\x32\xde\xb7\x30\ \x6f\xb0\x08\xb4\x19\x7d\x26\x80\x41\xac\x38\x6f\x30\xe1\xab\x46\ \xf9\x08\x40\x8c\x7d\xe0\xcb\x3a\x1d\x0a\x00\x13\x72\xa5\xa3\xb6\ \xc3\x63\xbf\x83\x20\x6e\xf0\x69\x5d\xa3\x2c\xc2\x09\xe7\x38\xe8\ \xeb\x13\x47\x1d\x0a\x00\x67\xec\x1b\xeb\xb4\xb6\xe5\xd2\x74\xd4\ \xf0\x37\x5b\x4f\xca\xe3\x1b\x11\xc1\xe9\x8e\x1e\x6f\xe1\x78\x04\ \x00\x93\xed\x3e\x2d\x6f\x72\x18\xc0\x22\x88\x17\x69\x37\x9a\x71\ \xd8\xd6\x5b\x4a\x81\x8d\xbe\xae\xd7\xa1\x00\x54\x94\x6c\xac\x06\ \xd0\x17\xdf\xb6\xbc\xa9\x0d\xed\x46\xf7\xe2\xe9\x06\x19\x1a\xe5\ \x4d\x6d\x32\x65\x9b\x03\x07\x2a\xb7\x6f\x3e\xea\x22\x8b\x57\x70\ \xe6\x10\x22\xb8\x10\x9b\x7a\x13\x26\x2e\xf0\xaf\xba\x11\x79\xe4\ \x4d\xc0\x60\x1b\x61\x85\x01\x9f\xf9\xa3\x5e\xe7\x1e\x41\x0c\xaf\ \x59\x27\xdf\xae\x3e\x85\x2e\x73\x70\x36\xe0\x0b\x3a\x8c\x66\x7c\ \x79\xd6\x66\xfa\x27\xe8\x1d\x7f\xd4\xed\x54\x00\xb4\x25\xc5\xdf\ \x72\xf0\x92\xde\x74\x8b\xc1\x88\xe7\xbf\x3d\x16\xb4\x09\xf8\x80\ \x8f\x6a\x1b\x60\xe2\xb2\xe1\xbf\xfa\x60\xd9\xa6\x6f\x5c\x64\xf1\ \x1a\xae\xf7\x05\x10\x7b\xd8\x3a\xf9\x49\x5d\x23\xfe\x7c\xe0\xb8\ \xac\xb1\x41\x3c\x83\x0b\x61\xb7\x29\x84\x01\x7f\x83\x9f\xc2\xc9\ \xba\x5c\xe9\x69\xaa\xad\xaa\x4b\x1c\x3b\x65\x0c\x80\xbe\x0d\x8b\ \xda\xf3\x9d\xf8\xfc\x64\x13\x74\x66\x0e\x0e\x01\x05\x31\x84\x28\ \xd8\x90\xa3\x83\x5f\xea\x7c\x71\xba\x05\x1f\x58\x1d\x72\xc1\x81\ \x6e\x05\x89\xd5\x0d\xb5\xd5\x7e\x39\x68\x72\x40\xb7\x70\x93\x99\ \xff\x52\x29\xb1\xd9\xd6\x3b\x80\xeb\x3b\xf5\xb2\xf3\x79\x88\x08\ \xe1\x4a\x09\x61\x0a\x09\x4a\xc6\xa0\x60\x84\x6e\xb3\x05\x04\x60\ \x7a\x7c\x14\xee\x9f\x91\x26\x3b\xdd\x23\x48\x0f\x5c\x00\xaf\x57\ \xc9\x0f\x13\x61\x82\xbf\xb1\xbf\x74\x8b\xdf\xe6\xdd\x6e\xbd\xb6\ \x99\x05\xab\x92\x99\xc5\xf4\x31\x18\x66\x0c\xb5\xa2\xb2\x1b\x16\ \x04\x47\x09\x1b\xca\x4e\x37\xe3\xc1\xdd\xfd\x86\x3e\xce\xb9\x45\ \x41\x62\xd2\x81\xb2\x0f\x8e\xfb\xab\x0d\x6e\x2d\xf6\x37\xd5\x6a\ \x3b\x53\x12\x26\xbe\x69\x56\x30\x0d\x11\x9f\x0d\x0c\xfe\x98\x18\ \x23\xe7\x98\x9d\x10\x15\x14\x82\x0b\x70\x21\xf0\x5f\x7b\xab\xd1\ \x6a\xe8\x5f\x7d\x27\xd0\x5b\x07\xcb\xb6\xbc\xe6\x22\x9b\xd7\x71\ \xbb\x23\xcf\x9c\x39\x6c\x6a\xaa\xab\xfa\x2c\x31\x6d\xf2\xcb\x5c\ \xb0\xc3\x02\xbc\x4d\x80\x4c\xc4\xa1\x02\x61\xc0\x1d\x21\x07\x9b\ \x3b\xa0\xe7\x1c\x73\x82\x42\x00\x00\xf8\xa4\xae\x49\x76\xc0\x15\ \xe7\x30\x73\xc6\xbe\x77\xae\xf6\x90\xd3\x10\xf0\xbe\xc0\x2b\x3d\ \x31\x6b\xd6\x5d\x4a\x4b\x48\x4b\x34\x27\x43\x38\x67\x4a\x35\x09\ \x8b\x52\x80\x32\x00\x79\x98\x33\x00\xb8\x26\x35\x11\xf7\xcf\x4c\ \x83\x8a\x5d\xba\x3a\x81\xc1\xcc\x71\xd3\xd6\x72\x34\xe9\xac\x1c\ \x3f\x20\xfe\x57\x5b\x5a\xfc\x53\x7f\xb7\xc5\xa7\xaf\x62\x66\x41\ \xd1\x74\x08\xfa\x9c\x01\xb2\xb0\xda\xd9\xb1\x11\x78\x72\x5e\xba\ \x5b\x91\x44\x02\x91\x57\x0f\xd5\xe3\xd5\x43\xfd\x71\x14\x39\xb8\ \xce\x62\xc6\xc4\xea\x5d\x5b\x4e\xfb\xbb\x2d\x3e\x7d\x0d\xb5\x25\ \xc5\xdf\x92\x85\x16\x71\x70\x99\x1d\xb9\xa2\xa5\x03\xb7\xfd\xfb\ \x5b\xec\xb6\xf3\x7e\x09\x7c\x1a\x75\x06\xbc\x75\x58\x1e\x64\x9a\ \x04\x3d\x33\x1c\x9d\x0f\x0c\x42\x07\x18\x2a\x4d\xf5\x55\x0d\xc9\ \x29\x59\x1b\x39\x89\x6b\x08\xe8\xdb\xe0\xa0\xb7\x70\x7c\x56\xdf\ \x84\x76\xa3\x19\x33\xe2\xa3\x64\xb1\xfc\x03\x99\xa7\xf7\x1d\xc5\ \x51\xb9\xd3\xe7\x69\x49\xd3\x7d\x73\xc3\xb1\x63\x4e\x43\xb9\xf9\ \x12\xbf\x7c\x88\x0f\xee\xd8\x78\x4c\x49\x62\x9e\x10\xfc\x5f\xb6\ \xbf\x6d\x38\x7a\x06\xb7\x7d\xfe\x2d\x6a\x5c\x04\x8e\x08\x14\xf6\ \x36\xb4\x62\xfb\x29\xf9\x9a\xbf\x20\xf1\x80\x37\x22\x7d\x0c\x15\ \xbf\xf9\x7c\x37\xd4\x56\xeb\x9b\xea\x6e\x7a\x27\x21\xb5\xc1\x4c\ \xc4\xaf\x80\xd5\xc1\x6d\xed\x46\x33\xca\x9b\xda\xb0\x32\x6d\x94\ \xbf\x9a\xe3\x77\x8c\x16\x8e\xfb\x77\x1d\x42\xbb\xf5\xae\x1f\xc1\ \x77\x54\x96\x6e\xb9\x6f\xf8\x5a\xe5\xf7\xe3\xe3\x1f\xe3\x95\x65\ \x9b\x9f\x14\x5c\x91\x6f\xfb\xcb\x39\xfd\xa0\xa2\x9b\x5d\x74\xbc\ \x5e\x75\x12\x27\xbb\xfa\x03\xab\x73\xce\x2d\x10\xe2\x67\x18\xe6\ \x23\xe4\x86\x65\x2e\x56\xb9\x63\xe3\x4e\x2e\xe8\x1e\xeb\x6b\x99\ \x6e\x84\x97\xb9\x58\x39\xd6\xde\x85\xb7\x6d\x15\x3f\xb0\x17\x2a\ \x76\x7c\xe0\xeb\x00\x20\x03\x32\x6c\x93\x71\x82\x65\xaa\x75\x3a\ \x50\x77\x1f\x5b\x84\xc0\xda\xaf\x6b\x60\x91\x2f\xa3\xd7\x0b\x49\ \xb9\x66\x98\x9a\x24\x63\xf8\x04\x80\x98\x2c\xc4\x5c\x4e\x80\x0a\ \xc0\xbb\x47\x4e\xa1\xaa\x55\xee\x54\x2b\x20\x7e\xa6\x2d\x59\x3f\ \x22\x3c\x6d\x87\x25\x3e\x6b\xe6\xbc\x95\xb1\x80\x39\xab\x37\xcd\ \x08\xc8\x8a\xf5\xfc\x13\x20\x84\xc0\xbe\xa6\x36\x7c\x7e\xaa\x19\ \x97\x27\x45\x63\x5c\x44\x28\x62\xd4\x4a\x84\x2a\x25\x08\x01\x74\ \x9a\xcd\x68\xd6\x19\x71\xba\x4b\x8f\xda\x0e\x3d\x4e\x74\x74\xe3\ \x58\x7b\x17\x6a\x3b\x75\x08\x61\x12\x7e\x3b\x33\x0d\x79\xa3\xbd\ \x17\xad\xf4\x78\x7b\x37\xfe\xae\xb5\x39\x38\x43\xf0\x77\x2a\xcb\ \xb6\xf8\xd4\xd7\x7f\x30\x0c\x8b\x00\x30\xa5\x65\xbe\x75\x3a\x2d\ \x32\x0c\x61\x4a\xcf\x9b\xf2\x5e\xcd\x69\xfc\xe5\xe0\x09\x00\x40\ \xf1\xf1\xc1\x1d\x60\x65\x00\xc7\xba\x6f\x6a\xbc\x26\x00\x66\x2e\ \xf0\x64\xf9\x11\xb9\xf3\x0c\xc7\x39\x05\x54\xbf\xf0\x4a\x05\x5e\ \x62\x58\x3e\x01\x82\xe4\x11\x46\xa7\xc6\x79\x1e\x0c\xcb\x22\x04\ \xde\xad\xf1\xcc\x98\xd6\x6a\xf0\x9e\xe7\xf3\xff\x55\x9f\x44\xb5\ \x4d\xe0\x4b\x92\xe8\xee\x6f\x77\x6c\x18\x51\xe7\xeb\x0d\x8f\x0e\ \xc0\xe5\x51\xaf\x73\xe2\x3c\x8f\x40\xb2\xb7\xa1\x15\xe7\x74\x9e\ \x4d\x25\xbd\x65\x8c\x3c\x74\xbe\xd3\xce\xd1\x03\xc0\x7b\x07\x4b\ \x36\xad\xf7\x4e\x0d\xde\xc3\xef\x02\x30\xf1\x9a\x6b\xd4\x04\x31\ \xc7\xfa\xda\xd4\x78\xcf\x47\x80\x8f\x6b\xed\xdd\xd6\x39\x70\x1c\ \x40\x07\x00\x01\x70\xce\x39\x6f\x13\xe0\x87\x85\xc0\x36\x08\xfe\ \x3f\xb6\xf7\xdf\x9b\x33\xde\xe3\x76\xe8\x2d\x16\x3c\xfe\x55\xb5\ \xfc\x10\x6b\x2e\xce\x1a\xc9\x7c\x8f\xf3\x5c\xc3\x87\xdf\x75\x00\ \x4d\xb7\x66\x26\x18\xfa\x62\xce\x24\x85\xaa\x91\x18\xe2\xd9\xaa\ \x60\xab\xd1\x84\xb2\x33\xf2\x28\x6a\x82\xb3\x5c\xed\x8e\x8d\x3b\ \x9d\xe5\x29\x28\x28\x50\x34\x8b\x98\x5b\x81\xfe\x03\x16\xbd\x11\ \xa9\xfc\xcf\x07\x4e\xd8\x45\xf8\x20\x46\x77\x1c\x2e\xf9\xd0\xbd\ \x73\xed\xfd\x8c\xdf\x05\x40\x08\xb1\xd0\xda\x21\x64\x6a\xac\xe7\ \xd3\xbf\xad\xf5\xe7\x60\x96\x29\x5b\xe2\x48\xe5\x8e\x8d\x8e\x8e\ \x55\xef\xa3\x19\x51\xd3\x61\xd5\xf9\x11\x2a\x05\x26\x44\x79\x16\ \xac\xf2\x8b\x53\xcd\xd8\x62\xa3\x7c\x5e\x58\xe7\xb7\x5b\x03\x19\ \x29\xf8\x5f\x07\x20\x92\xcf\xff\xbd\x31\xfc\x9f\x90\x1f\x8a\x29\ \x18\x5e\xc7\x00\x26\x56\xc1\xe9\x0a\xeb\xf4\xf4\xf8\x28\x8f\x74\ \x80\xb3\xdd\x06\x3c\xbd\x4f\x1e\x03\x59\x80\x1f\x0e\xe9\xb2\xfc\ \x6a\xe8\xa5\xfa\x1e\x7f\xaf\xc1\x52\x66\x7e\x61\x03\x03\x4b\xf0\ \x5d\x15\x9c\x13\x49\x63\x0f\x96\x6c\x72\x79\xb2\x73\x66\x7e\xd1\ \x87\x0c\xb4\xcc\x67\xad\x00\x37\x49\x42\x9a\x37\xd0\xa1\x4d\xc3\ \x8d\x5f\x47\x80\xcc\x82\xeb\xd3\x7c\xdb\xf9\x00\xc0\xb6\x0e\xd4\ \xf9\x00\xc0\x40\x79\x03\xdd\xe3\x11\x1c\xbf\x1d\xe9\x9d\x0f\xf8\ \x59\x00\x94\x60\x3e\xdf\x62\x2c\x84\x70\xcf\xab\x96\xc3\x67\xcb\ \x8f\x1c\xe2\x23\xed\x8e\x2d\x03\x9c\xf0\x35\x32\xf0\xab\x00\xec\ \x2f\x29\x3e\x01\x81\x87\xac\x4f\x1d\xf7\x16\x9c\xc3\x2c\x80\xd7\ \x33\x93\x55\x1b\xdc\xcb\x21\x7e\x6c\x7d\x00\xb3\xf7\x1a\x82\xed\ \x5c\x49\x3f\xc4\x30\x2f\xf3\x06\x09\x12\x24\x48\x90\x20\x41\x82\ \x04\x09\x12\x24\x48\x90\x20\x41\x82\x04\x09\x12\x24\x48\x90\x20\ \x41\x82\x00\xc0\xff\x03\x2c\x06\xb5\x9f\x01\x64\xf4\x5a\x00\x00\ \x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\ \x00\x00\x07\x85\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\ \x00\x00\x07\x4c\x49\x44\x41\x54\x78\x5e\xbd\x97\x0d\x54\x93\xd7\ \x19\xc7\x9f\xbc\x09\x84\x0f\x39\xc2\x11\x09\x7a\xac\xa0\xd4\x75\ \xe8\xc0\x39\x57\xbf\xba\xf6\x6c\x67\x3b\x67\xeb\xba\xcd\x2f\xc4\ \x5a\xb4\x9b\x3b\xda\x59\xa4\x0e\x74\x56\x40\x4a\xb5\x6b\xab\xab\ \x5d\xe9\x59\xeb\xdc\xa9\x62\x55\x8a\x48\x12\x40\x4a\x20\x35\xc0\ \x0a\x41\x41\xc0\x80\x50\x04\x12\xc0\x10\x08\x06\x12\x82\x24\x08\ \xf9\x80\x37\x77\xcf\xcd\x79\x73\x4e\x9c\x76\x07\x5a\xd6\xff\x39\ \xbf\x73\x73\xdf\x5c\xde\xe7\xb9\xf7\xf9\xb8\x01\x08\x21\x0f\xd1\ \x76\xa7\x1d\x8c\x46\x93\x7b\xae\xac\xb9\xee\x1e\xab\xaa\x94\xee\ \xb1\xf5\xab\x36\xb8\xa6\xa8\x00\xad\x56\xeb\xd7\x78\x4b\xf5\x5c\ \x79\x45\xe5\xdb\x55\xd5\xd5\x45\xf8\x6c\x58\x51\x5e\x69\xf2\x86\ \x3e\xfb\xb2\xaa\xfa\x6a\xb5\xb2\x26\xb5\xb9\xb9\x39\x0a\x00\x78\ \xf0\x18\x09\x60\x1a\xe2\xf1\x18\xa8\xab\xab\xf3\xe7\x0b\x7c\x7e\ \xc7\x30\x4c\x52\x4b\x6b\xdb\x33\x2c\xcb\xf2\xf8\x7c\x3e\xe0\x1c\ \x1c\x0e\x07\x05\xd7\xb9\x6d\xb8\x9d\x15\x0a\x85\x94\x8d\xb8\x6e\ \xa3\xc9\x34\x7c\x42\x56\x26\x6f\x9e\x1f\x1a\x9a\xbc\x76\xcd\xd3\ \x35\x74\xc9\xb4\x1c\xa0\x2f\x6f\x6f\x6f\x0f\xc7\xf7\x1e\x1f\x1c\ \x32\xbe\xe2\x72\x11\xf0\x11\x08\x60\x62\xc2\x06\x23\x23\x23\x60\ \x1d\xb3\xc2\xd0\x90\xb1\x7b\x78\xd8\x54\xcf\x47\x69\xd4\xea\xeb\ \x80\x8a\x88\x8c\x8c\x15\xf8\xf8\x04\x8a\xc2\x44\x6b\x44\x22\xd1\ \xb2\xb0\xf9\xf3\x41\x20\x10\xac\xea\xef\xd7\x57\x2b\xca\x2b\x3e\ \xae\xbd\x71\xfd\xd0\xb1\x63\xc7\x9c\x5f\xeb\x00\xdd\x89\x42\xa1\ \x08\xf4\x0f\x08\x3c\xda\xd1\xd9\x95\x86\x53\x98\x9a\x9a\x02\xa3\ \xc9\x04\x03\x03\x03\xc6\xee\xee\xee\xa2\x2f\xff\x5d\x21\x37\x0f\ \x0f\x5b\x01\xc0\x8e\xd8\x10\x07\xe2\x44\xc8\xcd\xba\x1b\x95\x38\ \x0a\x91\x00\x4a\xdc\xb6\xed\x71\x31\xb1\x31\xfb\x9e\x8c\x7a\x12\ \x2c\x16\x92\xf4\xec\xb3\xcf\xf9\x03\xc0\x9f\x10\xf6\x91\x1c\x40\ \xf1\xd4\x6a\xcd\x86\xab\xc5\x25\xba\x7c\x89\x94\x48\xa4\x85\xe4\ \x6f\xa7\xfe\x4e\xfe\xb8\xe7\x95\xaa\x75\xeb\x37\xbc\x06\x00\xcf\ \x20\xcb\x91\x45\x48\x18\x12\x82\x04\x21\xfe\x88\x2f\x85\xfb\x3c\ \x87\xfb\x6e\x21\xb2\x02\xf9\xd9\x6b\x7f\x4e\x19\x3a\x7b\xfe\x3c\ \xc9\x97\x4a\x49\x55\x55\xf5\x7a\x6a\xeb\xbf\x8d\x33\xb7\x54\x4d\ \x29\x79\x57\xc4\x44\x5a\x50\x44\x3e\xfa\xf8\x9f\x24\xe9\x40\x72\ \xf7\x53\x4f\x45\xef\x04\x80\xd5\xc8\x12\x24\x94\xdb\x19\xff\xfd\ \x0f\xb2\x60\x1a\x62\xb8\xf5\x4f\x04\x07\x87\xfc\x3c\xe5\xd0\x61\ \x92\x93\x7b\x99\x60\xa2\x96\x50\x67\xbd\x8d\xf3\x4b\xcb\xe4\xa7\ \xaf\xe4\x8b\xc9\x95\x7c\x09\xc9\x3c\xf6\x16\xd9\xb4\x39\xee\x0c\ \x00\xac\x45\x22\x91\xb9\xdc\xee\x78\x69\x47\x33\x21\x35\x3d\x13\ \xa8\x03\x33\x90\x0f\xb2\x38\x2e\x7e\xbb\xf8\xc4\xc9\xf7\x48\x69\ \xd9\x17\x5d\xf4\xe4\x18\x2e\xe6\x4c\x7d\x43\xc3\x01\xab\xd5\x9a\ \x68\xb3\xd9\xe1\x66\x7d\x03\xe4\x5d\xce\xdd\x71\xb5\x48\xfa\x11\ \x00\xf4\x20\xf7\x10\xeb\x91\xb4\x0c\xe7\x5f\xdf\x39\x49\xe0\x9b\ \x69\x12\xb1\xac\xfe\xd1\x6a\xfe\x84\xcd\x06\xa3\xa3\xa3\xb4\x64\ \x04\x0c\x0f\x75\xf3\x66\xdd\x4f\xd4\x9a\xee\x0f\xa8\xf1\xe6\xdb\ \xb7\x41\x5e\x26\x7b\xb1\x4b\xa3\x6e\x00\x80\x3e\x64\xe4\xf0\x91\ \x74\x67\x6a\x7a\x06\x8d\x92\x27\x54\x08\x01\x96\x75\xc1\x4c\x94\ \x95\x95\x35\x2f\x2a\x6a\xc9\xe6\x29\x96\x05\x59\x89\xac\xca\x13\ \x1f\x5f\xbd\xde\xf0\x09\xfd\xd0\xd2\xda\x0a\x65\xb2\x92\x78\x75\ \x67\x47\x3d\xb7\xeb\xf1\xe4\x83\x7f\x71\xd9\xd0\xe3\xf1\xf1\x71\ \x0f\x58\x86\x13\xe0\x74\x3a\x81\x3e\x9f\x81\xf8\xb1\xb1\x2b\xf7\ \x4e\x4d\xb9\x78\x63\x16\x2b\x68\x34\x6a\x89\xbb\x6a\xca\xcb\xcb\ \xe3\x2f\xe6\xe4\x90\xcc\x37\x8f\x93\xad\x71\xf1\x17\x01\x60\x29\ \xe2\x87\xf0\x3c\xf9\x51\xdf\x78\xeb\x91\x6a\x91\xcb\xe5\x20\x91\ \x48\x61\x9a\xe2\x55\x54\x54\xae\xcd\xc9\xcd\x63\xb3\x3e\xfc\x07\ \xd9\xf7\x6a\x12\x2d\xd9\x08\x77\x08\xec\x0e\xe7\xef\x6d\x13\x76\ \xe8\xef\x1f\x80\x02\xa9\xf8\x7d\x00\x30\x22\x0e\x82\x02\x4e\x8f\ \xdb\x29\x3d\x01\xec\x72\x30\x1d\x29\x95\xca\xf0\xfb\xf7\x47\xcf\ \xbb\x58\x96\xb9\x7b\xb7\x07\x54\xaa\xc6\x7d\x00\xf0\x00\x61\x19\ \x97\xcb\xb5\x48\xaf\xd7\xc3\xb0\xc9\xa4\x00\x00\x33\x62\xf3\x36\ \x4e\x21\xf0\xcd\x85\xc6\x17\xdc\x33\x0c\x56\x62\xe2\x2d\xd7\x68\ \xba\xc0\x60\x18\xfa\xbc\xb1\xa1\x5e\x49\xc3\x8b\x10\x1a\x7a\xd1\ \xe4\xa4\x13\x56\xff\xf8\x87\xd4\xa3\x09\xb4\xcd\xc2\xec\x08\x1b\ \x5a\xe7\x7a\x9d\xae\xaf\xd5\x6e\xb7\x47\x9b\x4c\x26\x8c\x7b\xa7\ \x52\x2a\xb9\x92\x01\x00\x26\xae\x73\x02\x73\xee\x5c\xb6\x6c\xc2\ \x6e\x83\x25\x4b\xa3\x36\x9d\x3e\x7d\x3a\xb4\xe6\x46\xad\x27\x39\ \x85\xbd\xbd\xbd\x34\x17\xf8\x58\x2a\x30\x13\x89\xc5\x62\xff\x6b\ \x0a\xc5\x3b\xf8\xae\x5a\x02\x10\x8a\x4e\x40\x6d\x6d\x6d\x8d\x44\ \x9c\x9f\xec\x49\x6e\xcf\xc1\x32\xbd\xbd\xda\x82\x31\xcb\x03\x20\ \xac\x8b\x89\x5c\xb2\xf4\x0c\x9a\xfa\x97\xac\x54\x3e\x28\x96\x16\ \xda\x1b\x6e\x35\xd9\x4a\x64\xf2\x41\x00\x38\x4b\xef\x86\xe9\x74\ \xbd\xd2\xd2\xb2\x3d\x0e\xe7\xa4\x4e\xdf\x37\x90\xc6\x67\x18\x8c\ \x77\x13\x54\x56\x56\xbe\x57\x58\x20\x39\x00\x00\xfd\xc8\x28\xc2\ \x82\x97\x9e\xd8\xb2\x75\x9b\xea\xc4\xbb\x27\x89\xfc\x5a\x39\x29\ \x28\x2c\x22\x92\xc2\x42\x72\xe1\x62\x0e\xf9\xf4\xd3\x4b\x44\x2c\ \x2d\x20\x12\xa4\xb8\x44\xd6\x99\x9d\x9d\x1d\xe4\xa9\x82\xe2\xe2\ \x62\xdc\xa9\x04\x50\xd8\x47\xea\x97\x16\x16\x16\x9f\xca\xc9\xc9\ \x35\x7e\x72\xf6\x1c\xfe\xed\x25\x72\xfc\xad\xb7\xc9\x4b\x09\x3b\ \x7b\xb0\xfd\xbe\x00\x00\xdf\x47\x42\x1e\x77\xf9\xf1\xb8\x16\x1b\ \x75\x2e\xfb\x82\x32\x38\x78\x6e\xa0\x7e\x60\x00\xba\x30\x59\x42\ \x42\xe6\x0e\x05\x05\x05\xa9\xcd\xe6\xfb\x11\xe1\x0b\xc2\x23\xc2\ \x45\x22\x88\x8c\x8c\x38\xb5\x61\xfd\xba\xb4\x96\x96\x16\x3f\xb5\ \x5a\xfd\x02\x21\xf0\x53\xbc\x25\x9f\xb7\x8c\x8d\x45\xfa\xfa\xf8\ \x00\x26\x34\xe0\xdd\x0f\x1d\x1d\x1d\x3d\x4d\x2a\xd5\x87\xed\xed\ \x6d\xb4\x9f\x0c\x72\xc9\x6d\x47\x5c\x8f\x73\x40\x80\x84\x17\x5d\ \x2d\x2e\xeb\xed\xd5\xc5\xcc\x99\x33\x07\x1a\xea\x6b\xdd\x57\x72\ \xec\xca\x55\xe0\xef\xef\x87\x9f\xf9\xb4\x75\xc2\x8a\x15\xd1\xa0\ \x46\xe7\xe8\xd1\xfa\xa0\x41\x06\x47\x6a\xd4\x6c\x36\xbb\x7f\x45\ \x69\xb5\x77\x95\x5d\x1a\x8d\xe4\xce\x9d\xb6\x46\x00\x18\x46\x46\ \xb8\x72\x9b\x84\xaf\x91\x80\x10\x32\x85\xc6\x26\x4c\x46\x13\xaf\ \xaf\x4f\x0f\xeb\xd6\xad\x19\x0c\x9e\x1b\x1c\x32\x6a\xb1\x08\xb1\ \x23\xc2\xc2\x85\x8b\x60\x79\x74\x34\x34\xb7\xdc\x86\xf0\xb0\x30\ \xa0\x5d\xcc\x62\xb5\x02\xee\x1c\xac\x16\x4b\x57\xaf\x4e\xa7\x36\ \xe3\x0f\x92\xe6\xe6\xa6\x1b\xb4\xd7\x23\xf7\xb9\x71\x1c\x71\xfe\ \xcf\x2a\xf6\x8a\x09\x9b\x7b\xf9\xb2\x78\xd9\xb2\x65\x3f\xc0\xfe\ \x1e\x1e\x13\xbb\xd2\x60\x30\xdc\x5b\x40\x63\x1d\x26\x12\xb5\x8e\ \xdb\x6c\xb1\x63\x96\x31\xb8\x90\x73\xe9\x33\x9d\x56\xdb\xa0\x46\ \xcf\xb8\x44\x72\x70\x86\x1e\x70\xd8\xb8\xa3\x9e\x9c\x6e\xfb\xa0\ \xed\x96\x1e\x37\x2d\xb7\xc5\x2f\x25\xbc\x5c\xbb\x62\x79\xf4\xbc\ \xc5\x11\x8b\xc1\xbb\x17\x19\x0c\x83\xd0\xda\xfa\x95\x39\xf7\xb3\ \x8b\xbf\xf4\x8a\x27\x8b\x4c\x71\x38\xb8\xd1\x05\x33\xd5\xa6\xcd\ \x5b\x3d\x75\x1f\x8a\x3c\xfd\x9b\xdf\x6e\xac\xd9\xf7\xea\x7e\x72\ \x34\xe3\x0d\x92\x7e\x34\x83\xa4\x1c\x3c\x4c\x7e\xf5\xfc\xaf\x3f\ \xa7\xdf\x21\x61\xdc\xbd\xce\x3b\xfc\x7a\x2a\xa4\x24\x1f\x84\x6f\ \xad\x5d\x2f\xff\xc1\x0d\xf7\x62\x11\xb2\x12\xf9\xc5\xce\x5d\xbb\ \xf4\x7b\xf7\xee\x21\x09\x09\x09\xe5\x38\x5f\xe5\x31\xbe\x2d\xfe\ \x45\xc8\x78\xe3\x4d\x98\x2d\x07\x18\x2f\x47\x26\xb9\xe3\xbd\x4b\ \xf1\x13\x0a\x2d\xbe\xbe\x42\x7a\xe1\xe8\x70\x3e\x80\x98\xe9\x1a\ \x2e\x64\x6e\x66\x45\x49\x49\x49\x0f\xb1\x7f\xff\x7e\x40\xcd\xc3\ \x91\x50\xb6\x6c\xd9\x92\x8a\xf3\x40\xef\x35\x89\x89\x89\x70\x24\ \x35\x1d\x52\x52\x0e\xc1\xb7\x15\x2d\x43\xf0\x16\xde\x07\xfc\xdd\ \xbb\x77\xa7\x03\x27\x95\x4a\x75\x8d\x26\x98\xf7\x3a\xba\xfb\x31\ \xeb\x28\x08\x85\xfe\xb3\x16\x02\x6f\xfc\x02\x02\x02\x68\x70\x69\ \xad\x0f\xe9\x74\x3a\x23\x97\xe1\xde\xe2\x42\x80\xcc\xa6\x03\xb8\ \x7b\x66\xc7\x8e\x1d\xdb\x81\x13\xfe\x13\x72\x06\x07\x1b\x86\x82\ \x85\x47\x85\x0e\xda\x67\xfd\x04\x78\x79\x79\x79\x5f\xe0\x35\xfc\ \x2e\xa0\x64\x32\x59\x3e\x75\x00\xbe\x43\x31\x48\x10\xf2\x3d\x24\ \x06\x99\xef\x71\xf2\xbb\x76\x22\x80\x73\xc4\x17\xfe\xcf\xfa\x0f\ \x38\xa6\xfb\xa8\x1d\x7d\xb2\x07\x00\x00\x00\x00\x49\x45\x4e\x44\ \xae\x42\x60\x82\ " qt_resource_name = b"\ \x00\x05\ \x00\x6f\xa6\x53\ \x00\x69\ \x00\x63\x00\x6f\x00\x6e\x00\x73\ \x00\x08\ \x08\xc8\x58\x67\ \x00\x73\ \x00\x61\x00\x76\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\ \x00\x21\ \x0a\xf1\xf0\xc7\ \x00\x69\ \x00\x63\x00\x6f\x00\x6e\x00\x66\x00\x69\x00\x6e\x00\x64\x00\x65\x00\x72\x00\x5f\x00\x73\x00\x65\x00\x6c\x00\x65\x00\x63\x00\x74\ \x00\x5f\x00\x6c\x00\x61\x00\x73\x00\x73\x00\x6f\x00\x5f\x00\x36\x00\x34\x00\x36\x00\x33\x00\x30\x00\x2e\x00\x70\x00\x6e\x00\x67\ \ " qt_resource_struct_v1 = b"\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x02\ \x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ \x00\x00\x00\x26\x00\x00\x00\x00\x00\x01\x00\x00\x1a\xff\ " qt_resource_struct_v2 = b"\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ \x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x02\ \x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ \x00\x00\x01\x68\xf9\x1f\xa3\x3d\ \x00\x00\x00\x26\x00\x00\x00\x00\x00\x01\x00\x00\x1a\xff\ \x00\x00\x01\x69\x02\x23\x12\x90\ " qt_version = [int(v) for v in QtCore.qVersion().split('.')] if qt_version < [5, 8, 0]: rcc_version = 1 qt_resource_struct = qt_resource_struct_v1 else: rcc_version = 2 qt_resource_struct = qt_resource_struct_v2 def qInitResources(): QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) qInitResources()
62.752013
130
0.715492
9,211
38,969
3.022907
0.03333
0.026289
0.023919
0.01767
0.037926
0.035771
0.033364
0.033364
0.031641
0.029701
0
0.310752
0.034078
38,969
620
131
62.853226
0.428974
0.003901
0
0.029801
0
0.932119
0.000026
0
0
1
0
0
0
1
0.003311
false
0
0.001656
0
0.004967
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
4
e4a886f422924beccae48ec98f62dc22d04556a1
1,007
py
Python
jcasts/podcasts/migrations/0088_auto_20211112_2045.py
danjac/jcasts
04f5ef1f536d51962c0433d092817c0153acb6af
[ "MIT" ]
13
2021-09-17T07:41:00.000Z
2022-02-10T10:00:48.000Z
jcasts/podcasts/migrations/0088_auto_20211112_2045.py
danjac/jcasts
04f5ef1f536d51962c0433d092817c0153acb6af
[ "MIT" ]
167
2021-07-17T09:41:38.000Z
2021-08-31T06:03:34.000Z
jcasts/podcasts/migrations/0088_auto_20211112_2045.py
danjac/jcasts
04f5ef1f536d51962c0433d092817c0153acb6af
[ "MIT" ]
null
null
null
# Generated by Django 3.2.9 on 2021-11-12 20:45 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("podcasts", "0087_podcast_subscribe_ping"), ] operations = [ migrations.RemoveField( model_name="podcast", name="hub", ), migrations.RemoveField( model_name="podcast", name="hub_exception", ), migrations.RemoveField( model_name="podcast", name="subscribe_ping", ), migrations.RemoveField( model_name="podcast", name="subscribe_requested", ), migrations.RemoveField( model_name="podcast", name="subscribe_secret", ), migrations.RemoveField( model_name="podcast", name="subscribe_status", ), migrations.RemoveField( model_name="podcast", name="subscribed", ), ]
23.97619
52
0.533267
82
1,007
6.365854
0.414634
0.281609
0.348659
0.402299
0.630268
0.630268
0.551724
0
0
0
0
0.029412
0.358491
1,007
41
53
24.560976
0.778638
0.044687
0
0.6
1
0
0.182292
0.028125
0
0
0
0
0
1
0
false
0
0.028571
0
0.114286
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
e4c97e09b4c144c572e32d52775cdacde48aa96c
59
py
Python
geoindex/wsgi.py
openregister/geoindex
7acd4ad69f5ad868775d06007f6a46b8640f6a92
[ "MIT" ]
null
null
null
geoindex/wsgi.py
openregister/geoindex
7acd4ad69f5ad868775d06007f6a46b8640f6a92
[ "MIT" ]
null
null
null
geoindex/wsgi.py
openregister/geoindex
7acd4ad69f5ad868775d06007f6a46b8640f6a92
[ "MIT" ]
1
2021-04-11T08:30:56.000Z
2021-04-11T08:30:56.000Z
from geoindex.factory import create_app app = create_app()
19.666667
39
0.813559
9
59
5.111111
0.666667
0.391304
0
0
0
0
0
0
0
0
0
0
0.118644
59
2
40
29.5
0.884615
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
e4e257d79edb3b1e620f09b5b01da63fd7f02ec1
96
py
Python
doctor_dash/apps.py
surajsjain/universal_medical_history
e6d38830af53133f2d3306438778128eaa2926bb
[ "MIT" ]
3
2021-07-14T15:32:41.000Z
2022-02-08T08:34:34.000Z
doctor_dash/apps.py
surajsjain/universal-medical-history
e6d38830af53133f2d3306438778128eaa2926bb
[ "MIT" ]
null
null
null
doctor_dash/apps.py
surajsjain/universal-medical-history
e6d38830af53133f2d3306438778128eaa2926bb
[ "MIT" ]
null
null
null
from django.apps import AppConfig class DoctorDashConfig(AppConfig): name = 'doctor_dash'
16
34
0.770833
11
96
6.636364
0.909091
0
0
0
0
0
0
0
0
0
0
0
0.15625
96
5
35
19.2
0.901235
0
0
0
0
0
0.114583
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
e4f02ac4d597421566aec6f917c3bd4c26360eba
76,728
py
Python
src/experiments/experiments.py
Mrpatekful/Pytorch-MT
65c53245a9ab0bf4f5d933a239de1bfd7be79c3f
[ "MIT" ]
7
2018-02-15T10:54:57.000Z
2018-03-07T16:53:35.000Z
src/experiments/experiments.py
Mrpatekful/nmt-BMEVIAUAL01
65c53245a9ab0bf4f5d933a239de1bfd7be79c3f
[ "MIT" ]
null
null
null
src/experiments/experiments.py
Mrpatekful/nmt-BMEVIAUAL01
65c53245a9ab0bf4f5d933a239de1bfd7be79c3f
[ "MIT" ]
null
null
null
"""@package experiments """ __author__ = "Patrik Purgai" __copyright__ = "Copyright 2018, Patrik Purgai" __date__ = "23 Apr 2018" __version__ = "0.1" import numpy import logging import torch import torch.autograd import copy import tqdm from src.components.utils.utils import Classifier, Layer from src.models.models import Model from src.modules.modules import AutoEncoder, Translator, Discriminator, WordTranslator, NoiseModel from src.utils.analysis import DataLog, TextData, ScalarData from src.utils.reader import Language from src.utils.utils import Component, ModelWrapper, Policy, call, format_outputs, sentence_from_ids, UNMTPolicy, \ Interface class Experiment(Component): """ Abstract base class for the experiments. """ def train(self, epoch): raise NotImplementedError def validate(self): raise NotImplementedError def test(self): raise NotImplementedError def evaluate(self): raise NotImplementedError @property def state(self): raise NotImplementedError @state.setter def state(self, value): raise NotImplementedError class UnsupervisedTranslation(Experiment): """ Translation experiment, without parallel corpus. The method follows the main principles described in this article: https://arxiv.org/abs/1711.00043 The main goal of this experiment is to train a denoising auto-encoder, that learns to map sentences to sentences in two ways. The first way is to transform a noisy version of the source sentence to it's original form, and the second way is to transform a translated version of a sentence to it's original form. There is an additional factor during training, which is an adversarial reguralization, that learns to discriminate the hidden representations of the source and target languages. """ interface = Interface(**{ 'policy': (0, Policy), 'language_identifiers': (1, None), 'languages': (2, Language), 'model': (3, Model), 'initial_translator': (4, WordTranslator), 'reguralizer': (5, Classifier) }) abstract = False @staticmethod def clear_optimizers(optimizers: list): """ Convenience function for the execution of the clear function on the provided optimizer. Clear will reset the gradients of the optimized parameters. Args: optimizers: A list, containing the optimizer type objects. """ call('clear', optimizers) @staticmethod def step_optimizers(optimizers: list): """ Convenience function for the execution of the step function on the provided optimizer. Step will modify the values of the required parameters. Args: optimizers: A list, containing the optimizer type objects. """ call('step', optimizers) @staticmethod def freeze(task_components: list): """ Convenience function for the freezing the given components of the task. The frozen components won't receive any updates by the optimizers. Args: task_components: A list, containing Modules. """ call('freeze', task_components) @staticmethod def unfreeze(task_components: list): """ Convenience function for unfreezing the weights of the provided components. The optimizers will be able to modify the weights of these components. Args: task_components: A list, containing Modules. """ call('unfreeze', task_components) def __init__(self, model: Model, policy: UNMTPolicy, language_identifiers: list, languages: list, initial_translator: WordTranslator, reguralizer: Classifier = None): """ Initialization of an unsupervised translation task. The embedding and output layers for the model are created in this function as well. These modules have to be changeable during training, so their references are kept in a list, where each index corresponds to the index of language. :param model: A Model type instance, that will be used during the experiment. :param languages: :param policy: An UNMTPolicy object, that contains specific information about this particular task. The information is divided into three segments, the train, validation and test policy. The data contained in the segments are the following. tf_ratio: A float scalar, that determines the rate of teacher forcing during training phase. A value of 0 will prevent teacher forcing, so the model will use predictive decoding. A value of 1 will force the model to use the targets as previous outputs and a value of 0.5 will create a 50% chance of using either techniques. Default value is 1. noise: A boolean value signaling the presence of noise in the input data. The characteristics of the noise function is ... :param reguralizer: Reguralization, that will be used as an adversarial reguralizer during training. Default value is None, meaning there won't be any reguralization used during training. :raises ValueError: If the corpora was not created with a Monolingual type Corpora object an exception is raised. """ def initialize_embeddings() -> list: """ Initializer function for the embeddings of different languages. Each language uses a different embedding layer, which have to be switched during training and evaluation. """ nonlocal languages embeddings = [] for language in languages: embeddings.append(language.vocabulary.embedding) return embeddings def initialize_loss_functions() -> list: """ Initializer function for the loss functions of different languages. Each loss is a negative loss likelihood function. The difference is the padding value, that differs for the languages. """ nonlocal languages loss_functions = [] for language in languages: loss_functions.append(torch.nn.NLLLoss( ignore_index=language.vocabulary.tokens['<PAD>'], reduce=False)) return loss_functions def initialize_output_layers() -> list: """ Initializer function for the output layers of different languages. Each language uses a different output layer, which have to be switched during training and evaluation. """ nonlocal languages nonlocal self output_layers = [] for language in languages: output_layers.append(Layer( input_size=self._model.output_size, output_size=language.vocabulary.vocab_size, use_cuda=self._policy.cuda)) return output_layers def initialize_tokens() -> list: """ Initializer function for the tokens of the different languages. These tokens are the <EOS>, <SOS> and <UNK> tokens. The returned 'tokens' list contains their ID representation, that is retrieved from the vocabulary of the corresponding language. """ nonlocal languages tokens = [] for language in languages: tokens.append(language.vocabulary.tokens) return tokens def initialize_input_pipelines() -> tuple: """ Initializer function for the tokens of the different languages. These tokens are the <EOS>, <SOS> and <UNK> tokens. The returned 'tokens' list contains their ID representation, that is retrieved from the vocabulary of the corresponding language. """ nonlocal languages train_pipelines = [] validation_pipelines = [] test_pipelines = [] for language in languages: train_pipelines.append(language.input_pipelines['train']) validation_pipelines.append(language.input_pipelines['dev']) test_pipelines.append(language.input_pipelines['test']) assert all(list(map( lambda x: x.batch_size == train_pipelines[0].batch_size, train_pipelines))), \ 'Invalid batch size' return train_pipelines, validation_pipelines, test_pipelines self._policy = policy self._model = model self._reguralizer = reguralizer self.reguralize = False self._noise_model = NoiseModel(use_cuda=self._policy.cuda) self._language_identifiers = language_identifiers self._add_language_token = self._policy.add_language_token self._vocabularies = [l.vocabulary for l in languages] self._initial_translator = initial_translator self._initial_translator.vocabs = self._vocabularies self._initial_translator.cuda = self._policy.cuda self._initial_translator.language_tokens_required = \ self._add_language_token self._previous_translator = self._initial_translator # Initialization of the parameters, which will be different # for each language used in the experiment. self._tokens = initialize_tokens() self._embeddings = initialize_embeddings() self._loss_functions = initialize_loss_functions() self._output_layers = initialize_output_layers() self._train_input, self._dev_input, self._test_input = \ initialize_input_pipelines() self._discriminator_loss_function = torch.nn.CrossEntropyLoss( reduce=False) # Initialization of the model wrapper object, that will be used by # the modules, defined below. # The modules do not have full control over the model, so they use # this interface, to set the # correct look up tables for the given input. self._model_wrapper = ModelWrapper(self._model, self._tokens) self._model_wrapper.init_table({ 'encoder_inputs': self._embeddings, 'decoder_inputs': self._embeddings, 'decoder_outputs': self._output_layers }) self._num_languages = len(languages) # Initialization of the modules, which will be used during the # experiment. These objects (modules) # are at a higher abstraction level than the model, their # responsibility is to iterate the given # batch through the model, with the correct configuration of the # model look up tables. self._auto_encoder = AutoEncoder( # --OPTIONAL PARAMS-- cuda=self._policy.cuda, noise_model=self._noise_model, add_language_token=self._add_language_token, language_identifiers=self._language_identifiers, # --REQUIRED PARAMS-- model=self._model_wrapper, tokens=self._tokens, loss_functions=self._loss_functions, vocabularies=self._vocabularies ) self._translator = Translator( # --OPTIONAL PARAMS-- cuda=self._policy.cuda, add_language_token=self._add_language_token, language_identifiers=self._language_identifiers, # --REQUIRED PARAMS-- model=self._model_wrapper, tokens=self._tokens, loss_functions=self._loss_functions, vocabularies=self._vocabularies ) self._discriminator = Discriminator( # --OPTIONAL PARAMS-- cuda=self._policy.cuda, # --REQUIRED PARAMS-- model=self._reguralizer, loss_function=self._discriminator_loss_function, ) self._iteration = 0 self._batch_size = self._train_input[0].batch_size self._total_length = min(list(map(lambda x: x.total_length, self._train_input))) # Convenience attributes, that will help freezing and unfreezing # the parameters of the model # or the discriminator during specific phases of the training or # evaluation. self._auto_encoder_outputs = dict( zip(list(map(lambda x: f'auto_encoding_{str(x)}', self._model.output_types.keys())), self._model.output_types.values()) ) self._translator_outputs = dict( zip(list(map(lambda x: f'translation_{str(x)}', self._model.output_types.keys())), self._model.output_types.values()) ) self._model_optimizers = [ *self._model.optimizers, *[embedding.optimizer for embedding in self._embeddings], *[layer.optimizer for layer in self._output_layers] ] self._model_components = [ self._model, *self._embeddings, *self._output_layers ] def _format_auto_encoder_batch(self, batch: dict) -> dict: """ The special batch format, that is required by the task. This function is passed to the input_pipeline, and will be used to produce batches and targets, in a way, that is convenient for this particular task. :param batch: An unprocessed batch, that contains an <SOS> at the 0. index, <LNG> at 1. index and an <EOS> token at the -2. index. The element at the last index is the length of the sequence. :return Formatted batch: A dictionary, that contains different types of formatted inputs for the model. The batch is created from a monolingual corpora, so the only difference between the inputs and targets, are the shifting, and the tokens. inputs: A torch Variable, that is the input of the model. The <SOS> and <EOS> tokens are cut from the original input. targets: A torch Variable, which will be the target of the model. The <LNG> token is removed from the original batch. lengths: A NumPy Array, the lengths of the inputs provided to the encoder. These are required by the PaddedSequence PyTorch utility. """ formatted_batch = { 'inputs': torch.from_numpy(batch[:, 1: -2]), 'targets': torch.from_numpy(batch[:, : -1]), 'input_lengths': batch[:, -1] } if self._add_language_token: formatted_batch['input_lengths'] = \ formatted_batch['input_lengths'] - 1 else: formatted_batch['input_lengths'] = \ formatted_batch['input_lengths'] - 2 if self._policy.cuda: formatted_batch['targets'] = formatted_batch['targets'].cuda() formatted_batch['targets'] = torch.autograd.Variable( formatted_batch['targets']) return formatted_batch def train(self, epoch: int): raise NotImplementedError def validate(self): raise NotImplementedError def test(self): raise NotImplementedError def evaluate(self): raise NotImplementedError def _train_discriminator(self, batches, logs): """ :param batches: :param logs: """ discriminator_loss = 0 discriminator_inputs = self._create_discriminator_inputs(batches) for batch in discriminator_inputs: inputs = self._numpy_to_variable(batch[:, 0]) loss = self._discriminator(inputs=inputs, targets=batch[:, 1]) discriminator_loss += loss discriminator_loss.backward() discriminator_loss /= len(discriminator_inputs) logs.add(DataLog.TRAIN_DATA_ID, 'discriminator_loss', discriminator_loss.data) def _eval_discriminator(self, batches, logs, identifier): """ :param batches: :param logs: :param identifier: """ discriminator_loss = 0 discriminator_inputs = self._create_discriminator_inputs(batches) for index, batch in enumerate(discriminator_inputs): inputs = self._numpy_to_variable(batch[:, 0]) loss = self._discriminator(inputs=inputs, targets=batch[:, 1]) discriminator_loss += loss discriminator_loss /= len(discriminator_inputs) logs.add(identifier, 'discriminator_loss', discriminator_loss.data) def _train_auto_encoder(self, batches, logs, forced_targets=True): """ Implementation of a step of auto-encoding. The look up tables of the model are fitted to the provided inputs, and the <LNG> are substituted with the appropriate token. In this case the token is the source language token. The inputs are then transformed by a noise function, and then fed through the model. If reguralization is applied, the encoder outputs are fetched from the output of the model, which is used by the discriminator to apply an adversarial reguralization on these outputs. :param batches: A list, containing the batches from the input pipelines. :return loss: A scalar loss value, indicating the average loss of the auto encoder. :return outputs: A dictionary, that contains the outputs of the model. The types (keys) contained by this dictionary depends on the model specifications. """ auto_encoding_loss = 0 reguralization_loss = 0 for language_index, batch in enumerate(batches): loss, outputs, _ = self._auto_encoder(batch=batch, lang_index=language_index, forced_targets=forced_targets, denoising=self._policy.train_noise) auto_encoding_loss += loss logs[language_index].add(DataLog.TRAIN_DATA_ID, 'auto_encoding_loss', loss.data) if self._reguralizer is not None and self.reguralize: for _language_index in range(self._num_languages): if _language_index != language_index: reguralization_loss += self._reguralize(outputs['encoder_outputs'], _language_index) return auto_encoding_loss, reguralization_loss def _validate_auto_encoder(self, batches, logs, identifier, forced_targets=True): """ Implementation of a step of auto-encoding. The look up tables of the model are fitted to the provided inputs, and the <LNG> are substituted with the appropriate token. In this case the token is the source language token. The inputs are then transformed by a noise function, and then fed through the model. If reguralization is applied, the encoder outputs are fetched from the output of the model, which is used by the discriminator to apply an adversarial reguralization on these outputs. :param batches: A list, containing the batches from the input pipelines. :return loss: A scalar loss value, indicating the average loss of the auto encoder. :return outputs: A dictionary, that contains the outputs of the model. The types (keys) contained by this dictionary depends on the model specifications. """ auto_encoding_loss = 0 reguralization_loss = 0 for language_index, batch in enumerate(batches): loss, outputs, inputs = self._auto_encoder( batch=batch, lang_index=language_index, forced_targets=forced_targets, denoising=self._policy.validation_noise) vocabulary = self._vocabularies[language_index] outputs['input_text'] = sentence_from_ids(vocabulary=vocabulary, ids=inputs) outputs['output_text'] = sentence_from_ids(vocabulary=vocabulary, ids=outputs['symbols'][0]) auto_encoding_loss += loss logs[language_index].add(identifier, 'auto_encoding_loss', loss.data) logs[language_index].add(identifier, 'auto_encoding_text', { 'input_text': outputs['input_text'], 'target_text': sentence_from_ids( vocabulary=vocabulary, ids=batches[language_index]['targets'] .data.cpu().squeeze(0)[1:].numpy()), 'output_text': outputs['output_text'] }) for key in self._auto_encoder_outputs.keys(): logs[language_index].add( identifier, key, {key: outputs[key] for key in logs[language_index].get_required_keys(key)}) if self._reguralizer is not None and self.reguralize: for _language_index in range(self._num_languages): if _language_index != language_index: reguralization_loss += self._reguralize( outputs['encoder_outputs'], _language_index) return auto_encoding_loss, reguralization_loss def _reguralize(self, encoder_outputs, lang_index): """ This function implements the reguralization mechanism. The inputs are fed into the discriminator and evaluated based on the cross entropy loss, that is defined in the init function. The targets are either one-hot coded vectors, or their inverse. This depends on whether the loss is calculated for the discriminator or model loss. :param lang_index: An int value, that represents the index of the target language. This value will serve as the index of the substitution token for the input batch. :param encoder_outputs: PyTorch Variable, containing the outputs of the encoder. :return loss: A scalar loss value, indicating the average loss of the discriminator for either the inverse or normal target vector. """ targets = numpy.array([lang_index]*encoder_outputs.size(0)) loss = self._discriminator(inputs=encoder_outputs[:, -1, :], targets=targets) return loss def _create_discriminator_inputs(self, batches): """ """ batch_size = batches[0]['inputs'].size(0) concat_input = self._create_encoder_output(batches[0], lang_index=0) for index in range(1, self._num_languages): concat_input = [ *concat_input, *self._create_encoder_output(batches[index], lang_index=index) ] concat_input = numpy.array(concat_input) numpy.random.shuffle(concat_input) return numpy.array([ concat_input[index * batch_size:index * batch_size + batch_size] for index in range(len(batches))]) def _create_encoder_output(self, batch, lang_index): """ """ self._model_wrapper.set_lookup({'source': lang_index}) if self._language_identifiers is not None and self._add_language_token: inputs = self._add_random_language_token(batch['inputs'], lang_index) else: inputs = batch['inputs'] if self._policy.cuda: inputs = inputs.cuda() inputs = torch.autograd.Variable(inputs) outputs = self._model.encoder( inputs=inputs, lengths=batch['input_lengths'])['encoder_outputs']\ .data.cpu().numpy() return [(outputs[index, -1, :], lang_index) for index in range(len( outputs))] def _add_random_language_token(self, batch, lang_index): """ """ lang_tokens = numpy.random.uniform(0, self._num_languages, size=(batch.size(0))) tokens = torch.from_numpy(numpy.array([ self._vocabularies[lang_index](self._language_identifiers[int( lang_tokens[token])]) for token in range(len(lang_tokens)) ])).view(-1, 1) return torch.cat((tokens, batch), dim=1) def _numpy_to_variable(self, inputs): """ """ shaped_inputs = numpy.zeros((inputs.shape[0], inputs[0].shape[0])) for index in range(shaped_inputs.shape[0]): shaped_inputs[index, :] = inputs[index] shaped_inputs = torch.from_numpy(shaped_inputs).float() if self._policy.cuda: shaped_inputs = shaped_inputs.cuda() shaped_inputs = torch.autograd.Variable(shaped_inputs) return shaped_inputs @property def state(self): raise NotImplementedError @state.setter def state(self, value): raise NotImplementedError class DividedCurriculumTranslation(UnsupervisedTranslation): # TODO """ """ interface = UnsupervisedTranslation.interface abstract = False def __init__(self, policy: UNMTPolicy, model: Model, language_identifiers: list, languages: list, initial_translator: WordTranslator, reguralizer: Classifier = None): """ :param policy: :param model: :param language_identifiers: :param languages: :param initial_translator: :param reguralizer: """ super().__init__(model=model, policy=policy, language_identifiers=language_identifiers, languages=languages, initial_translator=initial_translator, reguralizer=reguralizer) def initialize_input_pipelines() -> tuple: """ Initializer function for the tokens of the different languages. These tokens are the <EOS>, <SOS> and <UNK> tokens. The returned 'tokens' list contains their ID representation, that is retrieved from the vocabulary of the corresponding language. """ nonlocal languages translated_train_pipelines = [] translated_dev_pipelines = [] for language in languages: translated_train_pipelines.append( language.input_pipelines['translated_train']) translated_dev_pipelines.append( language.input_pipelines['translated_dev']) return translated_train_pipelines, translated_dev_pipelines assert 'translated' in languages[0].input_pipelines, 'InputPipeline dictionary of the ' \ 'languages must contain \'translated\' key' self._translated_train_input, self._translated_dev_input = initialize_input_pipelines() def _format_translator_batch(self, batch: dict) -> dict: """ The special batch format, that is required by the task. This function is passed to the input_pipeline, and will be used to produce batches and targets, in a way, that is convenient for this particular task. Args: batch: An unprocessed batch, that contains an <SOS> at the 0. index, <LNG> at 1. index and an <EOS> token at the -2. index. The element at the last index is the length of the sequence. Returns: Formatted batch: A dictionary, that contains different types of formatted inputs for the model. The batch is created from a monolingual corpora, so the only difference between the inputs and targets, are the shifting, and the tokens. inputs: A torch Variable, that is the input of the model. The <SOS> and <EOS> tokens are cut from the original input. targets: A torch Variable, which will be the target of the model. The <LNG> token is removed from the original batch. lengths: A NumPy Array, the lengths of the inputs provided to the encoder. These are required by the PaddedSequence PyTorch utility. """ formatted_batch = { 'inputs': torch.from_numpy(batch[:, 1: -2]), 'targets': torch.from_numpy(batch[:, : -1]), 'input_lengths': batch[:, -1] - 1 } if self._policy.cuda: formatted_batch['targets'] = formatted_batch['targets'].cuda() formatted_batch['targets'] = torch.autograd.Variable(formatted_batch['targets']) return formatted_batch def train(self, epoch: int) -> dict: """ A single training iteration/epoch of the task. The method iterates through the training corpora once and updates the parameters of the model, based on the generated loss. The iteration has 2 main steps, the model and the discriminator training. During the model training, the inputs are propagated through an auto encoding, translation, and reguralization phase. The losses are calculated after each step, and summed with a specific weight. The weights are tuneable hyper parameters. The sum of the losses are minimized, where auto encoding and translation losses are calculated by a negative log likelihood loss, and the reguralization is calculated by a cross entropy loss. :raise RuntimeError: In case of an occurrence of NaN values a runtime exception is raised. :return total_iteration_loss: Loss of the model, including the auto encoding, translation and reguralization loss. The value is normalized, so this value represents the sum of average loss of a word after translation, :return tr_loss: Average loss of the translation phase of the model for an iteration. This value is a NumPy Array, with a dimension of (num_languages). A value at a given index corresponds to the average loss of a word prediction for the language of that index. :return ae_loss: Average loss of the auto encoding phase of the model. :return reg_loss: Average loss, created by the reguralization term, that contributes to the total model loss. :return dsc_loss: Average loss that is created by the discriminator, during its training phase. """ language_logs = [DataLog({ 'translation_loss': ScalarData, 'auto_encoding_loss': ScalarData, }) for _ in range(self._num_languages)] mutual_logs = DataLog({ 'total_loss': ScalarData, 'discriminator_loss': ScalarData, 'reguralization_loss': ScalarData }) self.reguralize = (epoch + 1) % 2 == 0 with tqdm.tqdm() as p_bar: p_bar.set_description('Translating corpora') for batches in zip(*list(map(lambda x: x.batch_generator(), self._train_input))): p_bar.update() with tqdm.tqdm() as p_bar: p_bar.set_description(f'Processing epoch {epoch}') for batches in zip(*list(map(lambda x: x.batch_generator(), [*self._train_input, *self._translated_train_input]))): p_bar.update() # Batches are generated from the InputPipeline object. In this experiment each language # has its own pipeline, with its vocabulary. The number of languages, however, may differ. # The generated 'batches' object contains the input, target, and length data for the model. auto_encoder_batches = list(map(self._format_auto_encoder_batch, batches[:len(batches) // 2])) translator_batches = list(map(self._format_translator_batch, batches[:len(batches) // 2])) iteration_loss = 0 total_reguralization_loss = 0 # Discriminator training or reguralization is not used by default, only if it has been explicitly # defined for the experiment. if self._reguralizer is not None and not self.reguralize: self.freeze(self._model_components) self.unfreeze([self._reguralizer]) self.clear_optimizers([self._reguralizer.optimizer]) self._train_discriminator(logs=mutual_logs, batches=auto_encoder_batches) self.step_optimizers([self._reguralizer.optimizer]) self.unfreeze(self._model_components) if self._reguralizer is not None: self.freeze([self._reguralizer]) self.clear_optimizers(self._model_optimizers) # Choosing the mode of decoding for the iteration. During predictive decoding (when teacher # forcing is not used), the embeddings of the model must be set to frozen state. forced_targets = numpy.random.random() < self._policy.train_tf_ratio if not forced_targets: self.freeze(self._embeddings) auto_encoding_loss, reguralization_loss = self._train_auto_encoder(logs=language_logs, batches=batches, forced_targets=forced_targets) iteration_loss += auto_encoding_loss iteration_loss += reguralization_loss if self._reguralizer is not None and self.reguralize: total_reguralization_loss += reguralization_loss.data translation_loss, reguralization_loss = self._train_translator(logs=language_logs, batches=batches, forced_targets=forced_targets) iteration_loss += translation_loss iteration_loss += reguralization_loss if self._reguralizer is not None and self.reguralize: total_reguralization_loss += reguralization_loss.data mutual_logs.add(DataLog.TRAIN_DATA_ID, 'total_loss', iteration_loss.data) mutual_logs.add(DataLog.TRAIN_DATA_ID, 'reguralization_loss', total_reguralization_loss) iteration_loss.backward() self.step_optimizers(self._model_optimizers) if not forced_targets: self.unfreeze(self._embeddings) return {**dict(zip(self._language_identifiers, language_logs)), DataLog.MUTUAL_TOKEN_ID: mutual_logs} def validate(self) -> dict: """ This function evaluates the model. Input data is propagated forward, and then the loss calculated based on the same loss function which was used during training. The weights however, are not modified in this function. :return logs: A list of DataLog type objects, that contain the logging data for the languages. The number of data logs equal to the number of languages, and each data log contains information about the produced output for the whole data set of a language. total_loss: The total loss of the iteration, which is the same as the model loss during training. The value contains the loss of translation, auto-encoding and reguralization loss. The individual error of the discriminator is not included. translation_loss: The error, that is produced by the model, when translating a sentence. auto_encoding_loss: The error, that is produced by the model, when restoring (auto-encoding) a sentence. reguralization_loss: The reguralization loss, that is produced by the discriminator. discriminator_loss: The error of the discriminator, which is the loss that is produced, when the discriminator identifies a given latent vector. translation_text: The textual representation of the input, target and output symbols at the translation phase. These texts are produced by the format outputs utility function. auto_encoding_text: The textual representation of the input, target and output symbols at the auto encoding phase. These texts are produced by the format outputs utility function. Additional outputs depend on the chosen model. """ language_logs = [DataLog({ 'translation_loss': ScalarData, 'auto_encoding_loss': ScalarData, 'translation_text': TextData, 'auto_encoding_text': TextData, **self._auto_encoder_outputs, **self._translator_outputs }) for _ in range(self._num_languages)] mutual_logs = DataLog({ 'total_loss': ScalarData, 'discriminator_loss': ScalarData, 'reguralization_loss': ScalarData, }) with tqdm.tqdm() as p_bar: p_bar.set_description('Translating corpora') for batches in zip(*list(map(lambda x: x.batch_generator(), self._train_input))): p_bar.update() with tqdm.tqdm() as p_bar: p_bar.set_description('Validating') for identifier, batches in enumerate(zip(*list( map(lambda x: x.batch_generator(), self._dev_input)))): p_bar.update() batches = list(map(self._format_auto_encoder_batch, batches)) iteration_loss = 0 full_reguralization_loss = 0 self.freeze(self._model_components) if self._reguralizer is not None: self.freeze([self._reguralizer]) self._eval_discriminator(logs=mutual_logs, batches=batches, identifier=identifier) auto_encoding_loss, reguralization_loss = self._validate_auto_encoder(logs=language_logs, batches=batches, identifier=identifier) iteration_loss += auto_encoding_loss iteration_loss += reguralization_loss if self._reguralizer is not None and self.reguralize: full_reguralization_loss += reguralization_loss.data translation_loss, reguralization_loss = self._validate_translator(logs=language_logs, batches=batches, identifier=identifier) iteration_loss += auto_encoding_loss iteration_loss += reguralization_loss mutual_logs.add(identifier, 'total_loss', iteration_loss.data) mutual_logs.add(identifier, 'reguralization_loss', full_reguralization_loss) self.unfreeze(self._model_components) if self._reguralizer is not None and self.reguralize: self.unfreeze([self._reguralizer]) return {**dict(zip(self._language_identifiers, language_logs)), DataLog.MUTUAL_TOKEN_ID: mutual_logs} def test(self): pass def evaluate(self): pass def _train_translator(self, batches, logs, forced_targets=True): """ :param batches: :param logs: :param forced_targets: :return total_translation_loss: :return total_reguralization_loss: """ total_translation_loss = 0 total_reguralization_loss = 0 translation_loss, reguralization_loss, outputs, _ = self._translate( batch=batches[0], logs=logs, input_lang_index=0, target_lang_index=1, identifier=DataLog.TRAIN_DATA_ID, forced_targets=forced_targets) total_translation_loss += translation_loss total_reguralization_loss += reguralization_loss translation_loss, reguralization_loss, outputs, _ = self._translate( batch=batches[1], logs=logs, input_lang_index=1, target_lang_index=0, identifier=DataLog.TRAIN_DATA_ID, forced_targets=forced_targets) total_translation_loss += translation_loss total_reguralization_loss += reguralization_loss return total_translation_loss, total_reguralization_loss def _validate_translator(self, batches, logs, identifier, forced_targets=False): """ :param batches: :param logs: :param identifier: :param forced_targets: :return translation_loss: :return reguralization_loss: """ total_translation_loss = 0 total_reguralization_loss = 0 translation_loss, reguralization_loss, outputs, translated_symbols = self._translate( batch=batches[0], logs=logs, input_lang_index=0, target_lang_index=1, identifier=identifier, forced_targets=forced_targets) total_translation_loss += translation_loss total_reguralization_loss += reguralization_loss source_vocabulary = self._vocabularies[0] target_vocabulary = self._vocabularies[1] outputs['input_text'] = sentence_from_ids(vocabulary=source_vocabulary, ids=translated_symbols) outputs['output_text'] = sentence_from_ids(vocabulary=target_vocabulary, ids=outputs['symbols'][0]) logs[0].add(identifier, 'translation_text', format_outputs( (source_vocabulary, translated_symbols), (target_vocabulary, batches[1]['inputs']), (target_vocabulary, outputs['symbols'][0]) ) ) logs[0].add(identifier, 'translation_text', { 'input_text': outputs['input_text'], 'target_text': sentence_from_ids(vocabulary=target_vocabulary, ids=batches[1]['inputs'] .data.cpu().squeeze(0)[1:].numpy()), 'output_text': outputs['output_text'] }) for key in self._translator_outputs.keys(): logs[0].add(identifier, key, {key: outputs[key] for key in logs[0].get_required_keys(key)}) translation_loss, reguralization_loss, outputs, translated_symbols = self._translate( batch=batches[1], logs=logs, input_lang_index=1, target_lang_index=0, identifier=identifier, forced_targets=forced_targets) total_translation_loss += translation_loss total_reguralization_loss += reguralization_loss source_vocabulary = self._vocabularies[1] target_vocabulary = self._vocabularies[0] outputs['input_text'] = sentence_from_ids(vocabulary=source_vocabulary, ids=translated_symbols) outputs['output_text'] = sentence_from_ids(vocabulary=target_vocabulary, ids=outputs['symbols'][0]) logs[1].add(identifier, 'translation_text', format_outputs( (source_vocabulary, translated_symbols), (target_vocabulary, batches[0]['inputs']), (target_vocabulary, outputs['symbols'][0]) ) ) for key in self._translator_outputs.keys(): logs[1].add(identifier, key, {key: outputs[key] for key in logs[1].get_required_keys(key)}) return translation_loss, reguralization_loss def _translate(self, batch, logs, input_lang_index, target_lang_index, identifier, forced_targets): """ :param batch: :param logs: :param input_lang_index: :param target_lang_index: :param identifier: :param forced_targets: :return translation_loss: :return reguralization_loss: :return outputs: :return translated_symbols: """ reguralization_loss = 0 # Loss will only be calculated by the translator, if targets, and targets_lengths are both provided. # During this step, the lengths of the targets are not provided, so loss will not be calculated. translation_loss, _, outputs, _, _, = self._translator( input_lang_index=input_lang_index, target_lang_index=target_lang_index, batch=batch, forced_targets=forced_targets) if self._reguralizer is not None and self.reguralize: for _language_index in range(self._num_languages): if _language_index != target_lang_index: reguralization_loss += self._reguralize(outputs['encoder_outputs'], _language_index) if logs is not None: logs[input_lang_index].add(identifier, 'translation_loss', translation_loss.data) return translation_loss, reguralization_loss, outputs, batch['inputs'] @property def state(self): """ Property for the state of the task. """ return { 'model': self._model.state, 'embeddings': [embedding.state for embedding in self._embeddings], 'output_layers': [layer.state for layer in self._output_layers], } # noinspection PyMethodOverriding @state.setter def state(self, state): """ Setter function for the state of the task, and the embeddings. """ self._model.state = state['model'] for index, embedding_state in enumerate(state['embeddings']): self._embeddings[index].state = embedding_state for index, layer_state in enumerate(state['output_layers']): self._output_layers[index].state = layer_state class MergedCurriculumTranslation(UnsupervisedTranslation): """ """ interface = UnsupervisedTranslation.interface abstract = False def __init__(self, model: Model, policy: UNMTPolicy, language_identifiers: list, languages: list, initial_translator: WordTranslator, reguralizer: Classifier = None): """ :param model: :param policy: :param language_identifiers: :param languages: :param initial_translator: :param reguralizer: """ super().__init__(model=model, policy=policy, language_identifiers=language_identifiers, languages=languages, initial_translator=initial_translator, reguralizer=reguralizer) self._previous_model = copy.deepcopy(self._model) self._previous_embeddings = copy.deepcopy(self._embeddings) self._previous_output_layers = copy.deepcopy(self._output_layers) self._previous_model_wrapper = ModelWrapper(self._previous_model, self._tokens) self._previous_model_wrapper.init_table({ 'encoder_inputs': self._previous_embeddings, 'decoder_inputs': self._previous_embeddings, 'decoder_outputs': self._previous_output_layers }) self._previous_model_components = [ self._previous_model, *self._previous_embeddings, *self._previous_output_layers ] def train(self, epoch: int) -> dict: """ A single training iteration/epoch of the task. The method iterates through the training corpora once and updates the parameters of the model, based on the generated loss. The iteration has 2 main steps, the model and the discriminator training. During the model training, the inputs are propagated through an auto encoding, translation, and reguralization phase. The losses are calculated after each step, and summed with a specific weight. The weights are tuneable hyper parameters. The sum of the losses are minimized, where auto encoding and translation losses are calculated by a negative log likelihood loss, and the reguralization is calculated by a cross entropy loss. :raises RuntimeError: In case of an occurrence of NaN values a runtime exception is raised. :return total_iteration_loss: Loss of the model, including the auto encoding, translation and reguralization loss. The value is normalized, so this value represents the sum of average loss of a word after translation, :return tr_loss: Average loss of the translation phase of the model for an iteration. This value is a NumPy Array, with a dimension of (num_languages). A value at a given index corresponds to the average loss of a word prediction for the language of that index. :return ae_loss: Average loss of the auto encoding phase of the model. :return reg_loss: Average loss, created by the reguralization term, that contributes to the total model loss. :return dsc_loss: Average loss that is created by the discriminator, during its training phase. """ language_logs = [DataLog({ 'translation_loss': ScalarData, 'auto_encoding_loss': ScalarData, }) for _ in range(self._num_languages)] mutual_logs = DataLog({ 'total_loss': ScalarData, 'discriminator_loss': ScalarData, 'reguralization_loss': ScalarData }) self._previous_model.eval() self.freeze(self._previous_model_components) self.reguralize = True with tqdm.tqdm(total=self._total_length) as p_bar: p_bar.set_description(f'Processing epoch {epoch}') for iteration, batches in enumerate(zip(*list(map(lambda x: x.batch_generator(), self._train_input)))): p_bar.update() if iteration*self._batch_size < self._iteration: continue else: self._iteration = iteration*self._batch_size # Batches are generated from the InputPipeline object. In this experiment each language # has its own pipeline, with its vocabulary. The number of languages, however, may differ. # The generated 'batches' object contains the input, target, and length data for the model. batches = list(map(self._format_auto_encoder_batch, batches)) iteration_loss = 0 total_reguralization_loss = 0 # Discriminator training or reguralization is not used by default, only if it has been explicitly # defined for the experiment. if self._reguralizer is not None: self._model.eval() self._reguralizer.train() self.freeze(self._model_components) self.unfreeze([self._reguralizer]) self.clear_optimizers([self._reguralizer.optimizer]) self._train_discriminator(logs=mutual_logs, batches=batches) self.step_optimizers([self._reguralizer.optimizer]) self.unfreeze(self._model_components) if self._reguralizer is not None: self.freeze([self._reguralizer]) self._reguralizer.eval() self.clear_optimizers(self._model_optimizers) # Choosing the mode of decoding for the iteration. During predictive decoding (when teacher # forcing is not used), the embeddings of the model must be set to frozen state. forced_targets = numpy.random.random() < self._policy.train_tf_ratio self._model.train() if not forced_targets: self.freeze(self._embeddings) auto_encoding_loss, reguralization_loss = self._train_auto_encoder(logs=language_logs, batches=batches, forced_targets=forced_targets) iteration_loss += auto_encoding_loss iteration_loss += reguralization_loss del auto_encoding_loss if self._reguralizer is not None and self.reguralize: total_reguralization_loss += reguralization_loss.data del reguralization_loss translation_loss, reguralization_loss = self._train_translator(logs=language_logs, batches=batches, forced_targets=forced_targets) iteration_loss += translation_loss iteration_loss += reguralization_loss del translation_loss if self._reguralizer is not None and self.reguralize: total_reguralization_loss += reguralization_loss.data del reguralization_loss mutual_logs.add(DataLog.TRAIN_DATA_ID, 'total_loss', iteration_loss.data) mutual_logs.add(DataLog.TRAIN_DATA_ID, 'reguralization_loss', total_reguralization_loss) iteration_loss.backward() del iteration_loss del total_reguralization_loss self.step_optimizers(self._model_optimizers) if not forced_targets: self.unfreeze(self._embeddings) self._iteration = 0 return {**dict(zip(self._language_identifiers, language_logs)), DataLog.MUTUAL_TOKEN_ID: mutual_logs} def validate(self) -> dict: """ This function evaluates the model. Input data is propagated forward, and then the loss calculated based on the same loss function which was used during training. The weights however, are not modified in this function. :return logs: A list of DataLog type objects, that contain the logging data for the languages. The number of data logs equal to the number of languages, and each data log contains information about the produced output for the whole data set of a language. total_loss: The total loss of the iteration, which is the same as the model loss during training. The value contains the loss of translation, auto-encoding and reguralization loss. The individual error of the discriminator is not included. translation_loss: The error, that is produced by the model, when translating a sentence. auto_encoding_loss: The error, that is produced by the model, when restoring (auto-encoding) a sentence. reguralization_loss: The reguralization loss, that is produced by the discriminator. discriminator_loss: The error of the discriminator, which is the loss that is produced, when the discriminator identifies a given latent vector. translation_text: The textual representation of the input, target and output symbols at the translation phase. These texts are produced by the format outputs utility function. auto_encoding_text: The textual representation of the input, target and output symbols at the auto encoding phase. These texts are produced by the format outputs utility function. Additional outputs depend on the chosen model. """ language_logs = [DataLog({ 'translation_loss': ScalarData, 'auto_encoding_loss': ScalarData, 'translation_text': TextData, 'auto_encoding_text': TextData, **self._auto_encoder_outputs, **self._translator_outputs }) for _ in range(self._num_languages)] mutual_logs = DataLog({ 'total_loss': ScalarData, 'discriminator_loss': ScalarData, 'reguralization_loss': ScalarData, }) self._model.eval() self._previous_model.eval() if self._reguralizer is not None: self._reguralizer.eval() self.reguralize = True with tqdm.tqdm() as p_bar: p_bar.set_description('Validating') for identifier, batches in enumerate(zip(*list(map(lambda x: x.batch_generator(), self._dev_input)))): p_bar.update() batches = list(map(self._format_auto_encoder_batch, batches)) iteration_loss = 0 full_reguralization_loss = 0 self.freeze(self._model_components) if self._reguralizer is not None: self.freeze([self._reguralizer]) self._eval_discriminator(logs=mutual_logs, batches=batches, identifier=identifier) auto_encoding_loss, reguralization_loss = self._validate_auto_encoder(logs=language_logs, batches=batches, identifier=identifier) iteration_loss += auto_encoding_loss iteration_loss += reguralization_loss if self._reguralizer is not None and self.reguralize: full_reguralization_loss += reguralization_loss.data translation_loss, reguralization_loss = self._validate_translator(logs=language_logs, batches=batches, identifier=identifier) iteration_loss += auto_encoding_loss iteration_loss += reguralization_loss mutual_logs.add(identifier, 'total_loss', iteration_loss.data) mutual_logs.add(identifier, 'reguralization_loss', full_reguralization_loss) self.unfreeze(self._model_components) if self._reguralizer is not None and self.reguralize: self.unfreeze([self._reguralizer]) self._previous_model.state = self._model.state for index, embedding_state in enumerate(self._embeddings): self._previous_embeddings[index].state = embedding_state.state for index, layer_state in enumerate(self._output_layers): self._previous_output_layers[index].state = layer_state.state self._previous_translator = Translator( # --OPTIONAL PARAMS-- cuda=self._policy.cuda, language_identifiers=self._language_identifiers, # --REQUIRED PARAMS-- model=self._previous_model_wrapper, tokens=self._tokens, add_language_token=self._add_language_token, loss_functions=self._loss_functions, vocabularies=self._vocabularies ) return {**dict(zip(self._language_identifiers, language_logs)), DataLog.MUTUAL_TOKEN_ID: mutual_logs} def test(self) -> dict: """ This function evaluates the model. Input data is propagated forward, and then the loss calculated based on the same loss function which was used during training. The weights however, are not modified in this function. :return logs: A list of DataLog type objects, that contain the logging data for the languages. The number of data logs equal to the number of languages, and each data log contains information about the produced output for the whole data set of a language. Additional outputs depend on the chosen model. """ language_logs = [DataLog({ 'translation_loss': ScalarData, 'translation_text': TextData, **self._translator_outputs }) for _ in range(self._num_languages)] mutual_logs = DataLog({ 'discriminator_loss': ScalarData, }) self._model.eval() self._previous_model.eval() self._reguralizer.eval() self.freeze(self._model_components) self.freeze(self._previous_model_components) self.reguralize = False with tqdm.tqdm() as p_bar: p_bar.set_description('Testing') for identifier, batches in enumerate(zip(*list(map(lambda x: x.batch_generator(), self._test_input)))): p_bar.update() batches = list(map(self._format_auto_encoder_batch, batches)) if self._reguralizer is not None: self.freeze([self._reguralizer]) self._eval_discriminator(logs=mutual_logs, batches=batches, identifier=identifier) self._validate_translator(logs=language_logs, batches=batches, identifier=identifier) self.reguralize = True return {**dict(zip(self._language_identifiers, language_logs)), DataLog.MUTUAL_TOKEN_ID: mutual_logs} def evaluate(self) -> dict: """ This function evaluates the model. Input data is propagated forward, and then the loss calculated based on the same loss function which was used during training. The weights however, are not modified in this function. :return logs: A list of DataLog type objects, that contain the logging data for the languages. The number of data logs equal to the number of languages, and each data log contains information about the produced output for the whole data set of a language. Additional outputs depend on the chosen model. """ language_logs = [DataLog({ 'translation_text': TextData, **self._translator_outputs }) for _ in range(self._num_languages)] self._model.eval() self._previous_model.eval() self.freeze(self._model_components) with tqdm.tqdm() as p_bar: p_bar.set_description('Inference') outputs = [] for identifier, batch in enumerate(self._test_input[0].batch_generator()): p_bar.update() batch = self._format_auto_encoder_batch(batch) input_text, output_text = self._eval_translator(batch=batch, input_lang_index=0, target_lang_index=1, logs=language_logs, identifier=identifier) outputs.append((input_text, output_text)) for identifier, batch in enumerate(self._test_input[1].batch_generator()): p_bar.update() batch = self._format_auto_encoder_batch(batch) input_text, output_text = self._eval_translator(batch=batch, input_lang_index=1, target_lang_index=0, logs=language_logs, identifier=identifier) outputs.append((input_text, output_text)) logging.info('\n\n'.join(list(map(lambda x: f'Input: {x[0]}\nOutput: {x[1]}', outputs)))) return dict(zip(self._language_identifiers, language_logs)) def _train_translator(self, batches, logs, forced_targets=True): """ :param batches: :param logs: :param forced_targets: :return total_translation_loss: :return total_reguralization_loss: """ total_translation_loss = 0 total_reguralization_loss = 0 translation_loss, reguralization_loss, outputs, _ = self._translate( batch=batches[0], logs=logs, input_lang_index=0, target_lang_index=1, identifier=DataLog.TRAIN_DATA_ID, forced_targets=forced_targets) total_translation_loss += translation_loss total_reguralization_loss += reguralization_loss translation_loss, reguralization_loss, outputs, _ = self._translate( batch=batches[1], logs=logs, input_lang_index=1, target_lang_index=0, identifier=DataLog.TRAIN_DATA_ID, forced_targets=forced_targets) total_translation_loss += translation_loss total_reguralization_loss += reguralization_loss return total_translation_loss, total_reguralization_loss def _validate_translator(self, batches, logs, identifier, forced_targets=False): """ :param batches: :param logs: :param identifier: :param forced_targets: :return translation_loss: :return reguralization_loss: """ total_translation_loss = 0 total_reguralization_loss = 0 translation_loss, reguralization_loss, outputs, translated_symbols = self._translate( batch=batches[0], logs=logs, input_lang_index=0, target_lang_index=1, identifier=identifier, forced_targets=forced_targets) total_translation_loss += translation_loss total_reguralization_loss += reguralization_loss source_vocabulary = self._vocabularies[1] target_vocabulary = self._vocabularies[0] outputs['input_text'] = sentence_from_ids(vocabulary=source_vocabulary, ids=translated_symbols) outputs['output_text'] = sentence_from_ids(vocabulary=target_vocabulary, ids=outputs['symbols'][0]) targets = batches[0]['inputs'].cpu().squeeze(0)[1:].numpy() targets = sentence_from_ids(vocabulary=target_vocabulary, ids=targets) logs[1].add(identifier, 'translation_text', { 'input_text': outputs['input_text'], 'target_text': targets, 'output_text': outputs['output_text'] }) for key in self._translator_outputs.keys(): logs[1].add(identifier, key, {key: outputs[key] for key in logs[1].get_required_keys(key)}) translation_loss, reguralization_loss, outputs, translated_symbols = self._translate( batch=batches[1], logs=logs, input_lang_index=1, target_lang_index=0, identifier=identifier, forced_targets=forced_targets) total_translation_loss += translation_loss total_reguralization_loss += reguralization_loss source_vocabulary = self._vocabularies[0] target_vocabulary = self._vocabularies[1] outputs['input_text'] = sentence_from_ids(vocabulary=source_vocabulary, ids=translated_symbols) outputs['output_text'] = sentence_from_ids(vocabulary=target_vocabulary, ids=outputs['symbols'][0]) targets = batches[1]['inputs'].cpu().squeeze(0)[1:].numpy() targets = sentence_from_ids(vocabulary=target_vocabulary, ids=targets) logs[0].add(identifier, 'translation_text', { 'input_text': outputs['input_text'], 'target_text': targets, 'output_text': outputs['output_text'] }) for key in self._translator_outputs.keys(): logs[0].add(identifier, key, {key: outputs[key] for key in logs[0].get_required_keys(key)}) return translation_loss, reguralization_loss def _eval_translator(self, batch, input_lang_index, target_lang_index, logs, identifier): """ :param batch: :param logs: :param identifier: :return translation_loss: :return reguralization_loss: """ _, translated_symbols, outputs, inputs, _, = self._translator( input_lang_index=input_lang_index, target_lang_index=target_lang_index, batch=batch, forced_targets=False) source_vocabulary = self._vocabularies[input_lang_index] target_vocabulary = self._vocabularies[target_lang_index] outputs['input_text'] = sentence_from_ids(vocabulary=source_vocabulary, ids=inputs.data.cpu().squeeze(0)) outputs['output_text'] = sentence_from_ids(vocabulary=target_vocabulary, ids=translated_symbols.squeeze(0)) logs[input_lang_index].add(identifier, 'translation_text', { 'input_text': outputs['input_text'], 'output_text': outputs['output_text'] }) for key in self._translator_outputs.keys(): logs[input_lang_index].add(identifier, key, {key: outputs[key] for key in logs[input_lang_index].get_required_keys(key)}) return ' '.join(outputs['input_text']), ' '.join(outputs['output_text']) def _translate(self, batch, logs, input_lang_index, target_lang_index, identifier, forced_targets): """ :param batch: :param logs: :param input_lang_index: :param target_lang_index: :param identifier: :param forced_targets: :return translation_loss: :return reguralization_loss: :return outputs: :return translated_symbols: """ reguralization_loss = 0 # Loss will only be calculated by the translator, if targets, and targets_lengths are both provided. # During this step, the lengths of the targets are not provided, so loss will not be calculated. _, translated_symbols, _, inputs, translated_lengths = self._previous_translator( input_lang_index=input_lang_index, target_lang_index=target_lang_index, batch=batch, forced_targets=False) # During 'back translation' loss can be calculated, because the lengths of the targets are known. translated_batch = { 'inputs': translated_symbols, 'input_lengths': translated_lengths, 'targets': batch['inputs'], 'target_lengths': batch['input_lengths'] } translation_loss, _, outputs, _, _, = self._translator( input_lang_index=target_lang_index, target_lang_index=input_lang_index, batch=translated_batch, forced_targets=forced_targets) if self._reguralizer is not None and self.reguralize: for _language_index in range(self._num_languages): if _language_index != target_lang_index: reguralization_loss += self._reguralize(outputs['encoder_outputs'], _language_index) logs[target_lang_index].add(identifier, 'translation_loss', translation_loss.data) translated_symbols = translated_symbols.squeeze(0).cpu().numpy() return translation_loss, reguralization_loss, outputs, translated_symbols @property def state(self): """ Property for the state of the task. """ return { 'model': self._model.state, 'iteration': self._iteration, 'previous_model': self._previous_model.state, 'previous_translator': type(self._previous_translator), 'embeddings': [embedding.state for embedding in self._embeddings], 'output_layers': [layer.state for layer in self._output_layers], 'previous_embeddings': [embedding.state for embedding in self._previous_embeddings], 'previous_output_layers': [layer.state for layer in self._previous_output_layers], } @state.setter def state(self, state): """ Setter function for the state of the task, and the embeddings. """ self._model.state = state['model'] self._previous_model.state = state['previous_model'] self._iteration = state['iteration'] for index, embedding_state in enumerate(state['embeddings']): self._embeddings[index].state = embedding_state for index, layer_state in enumerate(state['output_layers']): self._output_layers[index].state = layer_state for index, embedding_state in enumerate(state['previous_embeddings']): self._previous_embeddings[index].state = embedding_state for index, layer_state in enumerate(state['previous_output_layers']): self._previous_output_layers[index].state = layer_state if isinstance(state['previous_translator'], WordTranslator): self._previous_translator = self._initial_translator else: self._previous_translator = Translator( # --OPTIONAL PARAMS-- cuda=self._policy.cuda, language_identifiers=self._language_identifiers, # --REQUIRED PARAMS-- model=self._previous_model_wrapper, tokens=self._tokens, add_language_token=self._add_language_token, loss_functions=self._loss_functions, vocabularies=self._vocabularies )
37.355404
115
0.597774
7,970
76,728
5.533752
0.066374
0.043261
0.021948
0.008616
0.786527
0.759183
0.73202
0.716647
0.690686
0.68275
0
0.003602
0.334233
76,728
2,053
116
37.3736
0.859777
0.27707
0
0.65249
0
0
0.047735
0.00128
0
0
0
0.000487
0.002075
1
0.057054
false
0.002075
0.012448
0
0.110996
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
9033ea91788734f683a9cf767fe94cdf5b79e80e
393
py
Python
StreamApp/models.py
felixfaisal/StreamApp
9ba93f7af389eef3d4334f2a04dca5ab84aa59e8
[ "MIT" ]
null
null
null
StreamApp/models.py
felixfaisal/StreamApp
9ba93f7af389eef3d4334f2a04dca5ab84aa59e8
[ "MIT" ]
null
null
null
StreamApp/models.py
felixfaisal/StreamApp
9ba93f7af389eef3d4334f2a04dca5ab84aa59e8
[ "MIT" ]
null
null
null
from django.db import models # Create your models here. class VideoInfo(models.Model): Video_id = models.IntegerField() Video_Name = models.CharField(max_length=200) Video_Description = models.CharField(max_length=200) Video_Link = models.CharField(max_length=200) pub_date = models.DateTimeField('date published') def __str__(self): return self.Video_Name
26.2
56
0.737913
51
393
5.431373
0.568627
0.162455
0.194946
0.259928
0.32852
0.231047
0
0
0
0
0
0.027607
0.170483
393
14
57
28.071429
0.822086
0.061069
0
0
0
0
0.038147
0
0
0
0
0
0
1
0.111111
false
0
0.111111
0.111111
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
4
903654a995e9073029c31e7c3ba37662c5024318
76
py
Python
lathspell/__init__.py
tmoerman/lathspell
7bccc304cf30b535ce5e783831b71e603d2234ff
[ "BSD-3-Clause" ]
null
null
null
lathspell/__init__.py
tmoerman/lathspell
7bccc304cf30b535ce5e783831b71e603d2234ff
[ "BSD-3-Clause" ]
null
null
null
lathspell/__init__.py
tmoerman/lathspell
7bccc304cf30b535ce5e783831b71e603d2234ff
[ "BSD-3-Clause" ]
null
null
null
""" References: * Learning Topic Models -- Provably and Efficiently. """
19
56
0.671053
7
76
7.285714
1
0
0
0
0
0
0
0
0
0
0
0
0.184211
76
4
57
19
0.822581
0.894737
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
5f4246dd1935a2ff5aedacef8a42dd9af62aa8b7
2,392
py
Python
qiskit_dynamics/solvers/__init__.py
dekelmeirom/qiskit-dynamics
9ed616c0715d1ba6189ea2bc57330cecb21ef181
[ "Apache-2.0" ]
32
2021-06-15T17:59:35.000Z
2022-03-16T09:43:50.000Z
qiskit_dynamics/solvers/__init__.py
dekelmeirom/qiskit-dynamics
9ed616c0715d1ba6189ea2bc57330cecb21ef181
[ "Apache-2.0" ]
46
2021-07-22T10:58:49.000Z
2022-03-15T13:04:29.000Z
qiskit_dynamics/solvers/__init__.py
dekelmeirom/qiskit-dynamics
9ed616c0715d1ba6189ea2bc57330cecb21ef181
[ "Apache-2.0" ]
19
2021-06-21T12:23:28.000Z
2022-02-11T21:32:47.000Z
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. r""" ======================================== Solvers (:mod:`qiskit_dynamics.solvers`) ======================================== .. currentmodule:: qiskit_dynamics.solvers This module provides classes and functions for solving differential equations. The following table summarizes the solver interfaces exposed in this module. Broadly, the *solver functions* are low-level interfaces exposing numerical methods for solving particular classes of differential equations, while the *solver classes* provide high level interfaces for solving models of quantum systems. .. list-table:: Solver interfaces :widths: 10 50 :header-rows: 1 * - Object - Description * - :class:`~qiskit_dynamics.solvers.Solver` - High level solver class for both Hamiltonian and Lindblad dynamics. Automatically constructs the relevant model type based on system details, and the :meth:`~qiskit_dynamics.solvers.Solver.solve` method automatically handles ``qiskit.quantum_info`` input types. * - :func:`~qiskit_dynamics.solvers.solve_ode` - Low level solver function for ordinary differential equations: .. math:: \dot{y}(t) = f(t, y(t)), for :math:`y(t)` arrays of arbitrary shape. * - :func:`~qiskit_dynamics.solvers.solve_lmde` - Low level solver function for linear matrix differential equations in *standard form*: .. math:: \dot{y}(t) = G(t)y(t), where :math:`G(t)` is either a callable or a ``qiskit_dynamics`` model type, and :math:`y(t)` arrays of suitable shape for the matrix multiplication above. Solver classes ============== .. autosummary:: :toctree: ../stubs/ Solver Solver functions ================ .. autosummary:: :toctree: ../stubs/ solve_ode solve_lmde """ from .solver_functions import solve_ode, solve_lmde from .solver_classes import Solver
30.666667
97
0.68102
312
2,392
5.169872
0.480769
0.060756
0.078115
0.033478
0.119033
0.033478
0
0
0
0
0
0.007183
0.185201
2,392
77
98
31.064935
0.820421
0.948579
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
5fb8cef323d07dcccb6247984347768a5c2609c6
91
py
Python
bugtests/test244.py
jeff5/jython-whinchat
65d8e5268189f8197295ff2d91be3decb1ee0081
[ "CNRI-Jython" ]
577
2020-06-04T16:34:44.000Z
2022-03-31T11:46:07.000Z
bugtests/test244.py
jeff5/jython-whinchat
65d8e5268189f8197295ff2d91be3decb1ee0081
[ "CNRI-Jython" ]
174
2015-01-08T20:37:09.000Z
2020-06-03T16:48:59.000Z
bugtests/test244.py
jeff5/jython-whinchat
65d8e5268189f8197295ff2d91be3decb1ee0081
[ "CNRI-Jython" ]
162
2015-02-07T02:14:38.000Z
2020-05-30T16:42:03.000Z
import support support.compileJava("test244p/A.java") import test244p.A a=test244p.A()
10.111111
38
0.758242
13
91
5.307692
0.461538
0.391304
0
0
0
0
0
0
0
0
0
0.111111
0.10989
91
8
39
11.375
0.740741
0
0
0
0
0
0.166667
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
5fc2d19b7979e6a003cecb7a735f21ddeedaac8f
46
py
Python
seq2seq/__init__.py
mtran14/pytorch-seq2seq
738059377eee9be07863e33f21c7d255139c44d6
[ "Apache-2.0" ]
1,491
2017-06-30T16:15:40.000Z
2022-03-22T02:05:16.000Z
seq2seq/__init__.py
mtran14/pytorch-seq2seq
738059377eee9be07863e33f21c7d255139c44d6
[ "Apache-2.0" ]
128
2017-07-07T21:41:03.000Z
2021-06-30T13:18:23.000Z
seq2seq/__init__.py
mtran14/pytorch-seq2seq
738059377eee9be07863e33f21c7d255139c44d6
[ "Apache-2.0" ]
434
2017-07-08T12:35:15.000Z
2022-03-25T06:28:13.000Z
src_field_name = 'src' tgt_field_name = 'tgt'
15.333333
22
0.73913
8
46
3.75
0.5
0.6
0
0
0
0
0
0
0
0
0
0
0.130435
46
2
23
23
0.75
0
0
0
0
0
0.130435
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
395905244d3c06f86e6dd5ff30c595374ea33e6e
338
py
Python
python/BubbleSort/unitTest.py
Catfish1210/python-examples
0c2e1574478ae9e1634274eb2df5b8ff0202f9e3
[ "MIT" ]
null
null
null
python/BubbleSort/unitTest.py
Catfish1210/python-examples
0c2e1574478ae9e1634274eb2df5b8ff0202f9e3
[ "MIT" ]
null
null
null
python/BubbleSort/unitTest.py
Catfish1210/python-examples
0c2e1574478ae9e1634274eb2df5b8ff0202f9e3
[ "MIT" ]
1
2021-06-16T14:23:45.000Z
2021-06-16T14:23:45.000Z
import unittest from main import BubleSort class TestBubleSort(unittest.TestCase): def test_success(self): self.assertEqual(BubleSort([12, 3, 1, 7]), [1, 3, 7, 12]) #success def test_failed(self): self.assertEqual(BubleSort([12, 3, 1, 7]), [1, 3, 7, 13]) #failed if __name__ == '__main__': unittest.main()
24.142857
74
0.647929
47
338
4.446809
0.446809
0.066986
0.181818
0.267943
0.344498
0.344498
0.344498
0.344498
0.344498
0.344498
0
0.073801
0.198225
338
13
75
26
0.697417
0.038462
0
0
0
0
0.024768
0
0
0
0
0
0.222222
1
0.222222
false
0
0.222222
0
0.555556
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4