hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
2a7cc3a2ba45dcd79feceaaa86576ae6198b0caf
118
py
Python
Python/autori/autori.py
rvrheenen/OpenKattis
7fd59fcb54e86cdf10f56c580c218c62e584f391
[ "MIT" ]
12
2016-10-03T20:43:43.000Z
2021-06-12T17:18:42.000Z
Python/autori/autori.py
rvrheenen/OpenKattis
7fd59fcb54e86cdf10f56c580c218c62e584f391
[ "MIT" ]
null
null
null
Python/autori/autori.py
rvrheenen/OpenKattis
7fd59fcb54e86cdf10f56c580c218c62e584f391
[ "MIT" ]
10
2017-11-14T19:56:37.000Z
2021-02-02T07:39:57.000Z
line = input() abbr = line[0] for i in range(len(line)): if line[i] == "-": abbr += line[i+1] print(abbr)
16.857143
26
0.525424
20
118
3.1
0.6
0.258065
0
0
0
0
0
0
0
0
0
0.022727
0.254237
118
6
27
19.666667
0.681818
0
0
0
0
0
0.008475
0
0
0
0
0
0
1
0
false
0
0
0
0
0.166667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
2a7df1a1f3f69d0b66cc6a68a51f163e54917f1c
3,915
py
Python
covid_notebook/util/formatter.py
mtna/rds-python-examples
6cd52fe4ed1bc21acd591d575d0ea4a2dcd75d93
[ "Apache-2.0" ]
2
2020-05-15T18:11:04.000Z
2021-05-05T09:19:51.000Z
covid_notebook/util/formatter.py
mtna/rds-python-examples
6cd52fe4ed1bc21acd591d575d0ea4a2dcd75d93
[ "Apache-2.0" ]
null
null
null
covid_notebook/util/formatter.py
mtna/rds-python-examples
6cd52fe4ed1bc21acd591d575d0ea4a2dcd75d93
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed May 6 11:24:11 2020 provides formatting for COVID-19 Notebook @author: seanlucas """ def html_catalog(catalog): metadata = catalog.get_metadata() dataproduct_rows = '' for dataproduct in metadata['dataProducts']: dataproduct_rows += f''' <tr style="border: 1px solid black"> <td style="text-align:left; border: 1px solid black">{dataproduct['name']}</td> <td style="text-align:left; border: 1px solid black">{dataproduct['id']}</td> </tr> ''' return f''' <html> <body> <div> <h1>{metadata['name']} ({metadata['id']})</h1> <p>{metadata['description']}</p> <table style="border: 1px solid black"> <tr style="border: 1px solid black"> <td colspan="2" style="text-align:center"><b>Dataproducts</b></td> </tr> <tr style="border: 1px solid black"> <td style="text-align:center; border: 1px solid black"><b>Name</b></td> <td style="text-align:center; border: 1px solid black"><b>ID</b></td> </tr> {dataproduct_rows} </table> </div> </body> </html> ''' def html_variables(dataproduct): metadata = dataproduct.get_variable() var_rows = '' for variable in metadata: class_id = '' try: class_id = variable['classificationId'] except KeyError: pass var_rows += f''' <tr style="border: 1px solid black"> <td style="text-align:left; border: 1px solid black">{variable['name']}</td> <td style="text-align:left; border: 1px solid black">{variable['id']}</td> <td style="text-align:left; border: 1px solid black">{variable['label']}</td> <td style="text-align:left; border: 1px solid black">{variable['dataType']}</td> <td style="text-align:left; border: 1px solid black">{class_id}</td> </tr> ''' return f''' <html> <body> <div> <table style="border: 1px solid black"> <tr style="border: 1px solid black"> <td colspan="5" style="text-align:center"><b>Variables</b></td> </tr> <tr style="border: 1px solid black"> <td style="text-align:center; border: 1px solid black"><b>Name</b></td> <td style="text-align:center; border: 1px solid black"><b>ID</b></td> <td style="text-align:center; border: 1px solid black"><b>Label</b></td> <td style="text-align:center; border: 1px solid black"><b>Data Type</b></td> <td style="text-align:center; border: 1px solid black"><b>Classification</b></td> </tr> {var_rows} </table> </div> </body> </html> ''' def html_classification(dataproduct, class_id, limit=20): metadata = dataproduct.get_classification(class_id) code_count = metadata['rootCodeCount'] codes = dataproduct.get_code(class_id, limit) code_rows = '' for code in codes: code_rows += f''' <tr style="border: 1px solid black"> <td style="text-align:left; border: 1px solid black">{code['codeValue']}</td> <td style="text-align:left; border: 1px solid black">{code['name']}</td> </tr> ''' return f''' <html> <body> <div> <h1>{metadata['id']}</h1> <p>Code Count: {code_count}</p> <table style="border: 1px solid black"> <tr style="border: 1px solid black"> <td colspan="2" style="text-align:center"><b>Codes</b></td> </tr> <tr style="border: 1px solid black"> <td style="text-align:center; border: 1px solid black"><b>Value</b></td> <td style="text-align:center; border: 1px solid black"><b>Label</b></td> </tr> {code_rows} </table> </div> </body> </html> '''
33.177966
93
0.558876
496
3,915
4.362903
0.159274
0.124769
0.194085
0.263401
0.677449
0.658503
0.658503
0.633549
0.621534
0.591959
0
0.018698
0.262324
3,915
118
94
33.177966
0.730609
0.036271
0
0.564356
0
0.19802
0.797875
0.30093
0
0
0
0
0
1
0.029703
false
0.009901
0
0
0.059406
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
aa6be4e924d5f2831927b299450e4354fc451b2e
320
py
Python
krun_ext_graal_ce_hotspot.py
vext01/new_benchmarking_experiment
3c2d834b6cb555a1997bf0f0236d6947c6fa4697
[ "Apache-2.0", "MIT-0", "MIT" ]
1
2019-09-15T21:21:03.000Z
2019-09-15T21:21:03.000Z
krun_ext_graal_ce_hotspot.py
vext01/new_benchmarking_experiment
3c2d834b6cb555a1997bf0f0236d6947c6fa4697
[ "Apache-2.0", "MIT-0", "MIT" ]
9
2019-05-08T14:16:42.000Z
2019-12-09T10:56:45.000Z
krun_ext_graal_ce_hotspot.py
vext01/new_benchmarking_experiment
3c2d834b6cb555a1997bf0f0236d6947c6fa4697
[ "Apache-2.0", "MIT-0", "MIT" ]
2
2019-08-30T09:29:13.000Z
2019-11-20T20:59:09.000Z
#!/usr/bin/env python3 """ Graal CE (running the normal HotSpot compiler) script for use with Krun's ExternalSuiteVMDef. """ import sys from krun_ext_common import run, emit_process_exec_json _, benchmark, num_iters, param, instr = sys.argv emit_process_exec_json(run("graal-ce-hotspot", benchmark, int(num_iters)))
22.857143
74
0.775
49
320
4.836735
0.714286
0.059072
0.126582
0.160338
0
0
0
0
0
0
0
0.003546
0.11875
320
13
75
24.615385
0.836879
0.359375
0
0
0
0
0.081218
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
aa9d561fbe1b20f312f821da03f82c6947175010
86
py
Python
webu/auto/websocket.py
happyuc-project/webu.py
5a01124fc84d74df09a33d9dabe88b704cd5b6c6
[ "MIT" ]
null
null
null
webu/auto/websocket.py
happyuc-project/webu.py
5a01124fc84d74df09a33d9dabe88b704cd5b6c6
[ "MIT" ]
null
null
null
webu/auto/websocket.py
happyuc-project/webu.py
5a01124fc84d74df09a33d9dabe88b704cd5b6c6
[ "MIT" ]
null
null
null
from webu import ( Webu, WebsocketProvider, ) w3 = Webu(WebsocketProvider())
12.285714
30
0.674419
8
86
7.25
0.625
0.724138
0
0
0
0
0
0
0
0
0
0.014925
0.22093
86
6
31
14.333333
0.850746
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
aab648585ac526ae66daa1064ab201825f0e8da7
2,448
py
Python
chasscot/homepage/models.py
Chasscot-Doun/Chasscot
da46d29e38ef65b433a248397fd3f7ed6add3860
[ "MIT" ]
1
2021-05-10T18:37:54.000Z
2021-05-10T18:37:54.000Z
chasscot/homepage/models.py
Chasscot-Doun/Chasscot
da46d29e38ef65b433a248397fd3f7ed6add3860
[ "MIT" ]
null
null
null
chasscot/homepage/models.py
Chasscot-Doun/Chasscot
da46d29e38ef65b433a248397fd3f7ed6add3860
[ "MIT" ]
null
null
null
from django.db import models import datetime # Create your models here. class Produit(models.Model): nom_bouteille = models.CharField(max_length=40, default='') description_bouteille = models.CharField(max_length=500, default='') millésime = models.IntegerField(default=datetime.date.today().year) saison = models.CharField(max_length=20, default='') quantité = models.FloatField(default=0.5) pourcentage_alcool = models.FloatField(default=16) photo = models.ImageField(upload_to='uploads/', default='') prix = models.FloatField(default=10) pourcentage_rabais = models.FloatField(default=0) prix_final = models.FloatField(default=10) class Membre(models.Model): nom = models.CharField(max_length=20) prénom = models.CharField(max_length=20) surnom = models.CharField(max_length=20) biographie = models.CharField(max_length=500) titre = models.CharField(max_length=20) photo_nain = models.ImageField(upload_to='uploads/', default='') photo_humain = models.ImageField(upload_to='uploads/', default='') class Utilisateur(models.Model): nom = models.CharField(max_length=100) prénom = models.CharField(max_length=100) nom_rue = models.CharField(max_length=250) numéro_de_rue = models.CharField(max_length=4) code_postal = models.CharField(max_length=4) #Ne pas oublier de forcer le int date_naissance = models.DateField() class Etiquette(models.Model): modele_etiquette = models.ImageField(upload_to='uploads/', default='') class Achat(models.Model): heure_achat_complet = models.DateTimeField(default=datetime.date.today()) jour_achat = models.DateTimeField(default=datetime.date.today().day) mois_achat = models.DateTimeField(default=datetime.date.today().month) année_achat = models.DateTimeField(default=datetime.date.today().year) class Client_B2B(models.Model): nom_contact = models.CharField(max_length=100) prénom_contact = models.CharField(max_length=100) nom_rue = models.CharField(max_length=250) numéro_de_rue = models.CharField(max_length=4) code_postal = models.CharField(max_length=4) # Ne pas oublier de forcer le int date_naissance = models.DateField() nom_établissement = models.CharField(max_length=100) logo_établissement = models.ImageField(upload_to='uploads/', default='') type_établissement = models.CharField(max_length=100) url_site = models.URLField(max_length=300)
45.333333
83
0.751634
314
2,448
5.681529
0.283439
0.105942
0.201794
0.269058
0.67657
0.54204
0.377803
0.206278
0.206278
0.206278
0
0.027713
0.13031
2,448
54
84
45.333333
0.81024
0.035539
0
0.173913
0
0
0.016964
0
0
0
0
0
0
1
0
false
0
0.043478
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
aabef2629d07d7b18486ad3418d52659d4f00f13
153
py
Python
4.py
BarisTeksin/project-euler
38a368d66fdd3bdc1d977059ba966fb7c1dcdc39
[ "MIT" ]
4
2020-04-18T21:05:13.000Z
2020-04-26T15:39:14.000Z
4.py
BarisTeksin/project-euler
38a368d66fdd3bdc1d977059ba966fb7c1dcdc39
[ "MIT" ]
null
null
null
4.py
BarisTeksin/project-euler
38a368d66fdd3bdc1d977059ba966fb7c1dcdc39
[ "MIT" ]
null
null
null
high = 0 for x in range(100,1000): for y in range(100,1000): if str(x*y) == str(x*y)[::-1] and x*y>high: high = x*y print(high)
19.125
51
0.509804
31
153
2.516129
0.451613
0.102564
0.25641
0.358974
0
0
0
0
0
0
0
0.149533
0.300654
153
7
52
21.857143
0.579439
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.166667
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
aad42316705611f0159a8c195377c4a50a574a93
369
py
Python
edu54book/propellerino.py
Clonexy700/edu54book
2a83f178947ddaf72ae6f94b502dfcf390ea9fe3
[ "Unlicense" ]
1
2019-12-24T08:44:32.000Z
2019-12-24T08:44:32.000Z
edu54book/propellerino.py
Clonexy700/edu54book
2a83f178947ddaf72ae6f94b502dfcf390ea9fe3
[ "Unlicense" ]
null
null
null
edu54book/propellerino.py
Clonexy700/edu54book
2a83f178947ddaf72ae6f94b502dfcf390ea9fe3
[ "Unlicense" ]
null
null
null
from tkinter import * root = Tk() c = Canvas(root, width=500, height=500, bg='white') c.pack() c.create_oval(225, 235, 275, 285, width=2) c.create_oval(200, 210, 300, 310, width=2) c.create_oval(225, 80, 275, 210, width=2) c.create_oval(225, 310, 275, 450, width=2) c.create_oval(60, 240, 200, 285, width=2) c.create_oval(300, 240, 440, 285, width=2) root.mainloop()
26.357143
51
0.685637
72
369
3.430556
0.416667
0.17004
0.267206
0.263158
0.392713
0.323887
0
0
0
0
0
0.254658
0.127371
369
13
52
28.384615
0.512422
0
0
0
0
0
0.01355
0
0
0
0
0
0
1
0
false
0
0.090909
0
0.090909
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
2ad486eab3cc65ab2de10e2cdfe4c61e6463fcd8
1,147
py
Python
bspump/declarative/expression/value/eventexpr.py
chinese-soup/BitSwanPump
6ef71577cc1f166cff80876d28be37c791061bd2
[ "BSD-3-Clause" ]
1
2020-08-20T12:56:58.000Z
2020-08-20T12:56:58.000Z
bspump/declarative/expression/value/eventexpr.py
chinese-soup/BitSwanPump
6ef71577cc1f166cff80876d28be37c791061bd2
[ "BSD-3-Clause" ]
null
null
null
bspump/declarative/expression/value/eventexpr.py
chinese-soup/BitSwanPump
6ef71577cc1f166cff80876d28be37c791061bd2
[ "BSD-3-Clause" ]
null
null
null
from ...abc import Expression class EVENT(Expression): """ The current event. Usage: ``` !EVENT `` """ def __init__(self, app, *, value): super().__init__(app) assert(value == "") def __call__(self, context, event, *args, **kwargs): return event class KWARGS(Expression): """ The current kwargs. Usage: ``` !KWARGS `` """ def __init__(self, app, *, value): super().__init__(app) assert(value == "") def __call__(self, context, event, *args, **kwargs): return kwargs class KWARG(Expression): """ The item from a kwargs. Usage: ``` !KWARG argname `` """ def __init__(self, app, *, value): super().__init__(app) self.ArgName = value def __call__(self, context, event, *args, **kwargs): return kwargs[self.ArgName] class ARGS(Expression): def __init__(self, app, *, value): super().__init__(app) assert(value == '') def __call__(self, context, event, *args, **kwargs): return args class ARG(Expression): def __init__(self, app, *, value): super().__init__(app) assert(value == '') self.ArgNumber = 0 def __call__(self, context, event, *args, **kwargs): return args[self.ArgNumber]
15.092105
53
0.646033
140
1,147
4.864286
0.2
0.051395
0.080764
0.10279
0.64464
0.64464
0.64464
0.64464
0.599119
0.530103
0
0.001058
0.176112
1,147
75
54
15.293333
0.719577
0.135135
0
0.59375
0
0
0
0
0
0
0
0
0.125
1
0.3125
false
0
0.03125
0.15625
0.65625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
2ae7f709e893ed3d1b2889a7294f6fa489eee7e1
804
py
Python
src/ci_workflow/ci_target.py
asifsmohammed/opensearch-build
f78859000d676d35c29b15e08bbf4310c4df05b9
[ "Apache-2.0" ]
1
2022-01-29T17:48:00.000Z
2022-01-29T17:48:00.000Z
src/ci_workflow/ci_target.py
asifsmohammed/opensearch-build
f78859000d676d35c29b15e08bbf4310c4df05b9
[ "Apache-2.0" ]
1
2022-02-07T23:43:53.000Z
2022-02-10T19:56:41.000Z
src/ci_workflow/ci_target.py
tianleh/opensearch-build
460ad6b978c034d1dac672766c8dff67f51e4cd7
[ "Apache-2.0" ]
null
null
null
# SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. class CiTarget: version: str name: str snapshot: bool def __init__(self, version: str, name: str, snapshot: bool = True) -> None: self.version = version self.name = name self.snapshot = snapshot @property def opensearch_version(self) -> str: return self.version + "-SNAPSHOT" if self.snapshot else self.version @property def component_version(self) -> str: # BUG: the 4th digit is dictated by the component, it's not .0, this will break for 1.1.0.1 return self.version + ".0-SNAPSHOT" if self.snapshot else f"{self.version}.0"
30.923077
99
0.674129
113
804
4.743363
0.477876
0.123134
0.029851
0.063433
0.205224
0.108209
0
0
0
0
0
0.019417
0.231343
804
25
100
32.16
0.847896
0.337065
0
0.142857
0
0
0.068441
0
0
0
0
0
0
1
0.214286
false
0
0
0.142857
0.642857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
2d4b25a72de45e992d60306dcfdbf1aa7077cbcd
1,474
py
Python
models.py
YazdanRa/gifoolak
c383ad095a2207f29a2f11d56da9aa7be289b67c
[ "MIT" ]
null
null
null
models.py
YazdanRa/gifoolak
c383ad095a2207f29a2f11d56da9aa7be289b67c
[ "MIT" ]
null
null
null
models.py
YazdanRa/gifoolak
c383ad095a2207f29a2f11d56da9aa7be289b67c
[ "MIT" ]
null
null
null
from datetime import datetime from peewee import * from playhouse.sqlite_ext import JSONField database = SqliteDatabase(None) class BaseModel(Model): id = PrimaryKeyField() created_at = TimestampField(constraints=[SQL('DEFAULT CURRENT_TIMESTAMP')]) updated_at = TimestampField(constraints=[SQL('DEFAULT CURRENT_TIMESTAMP')]) class Meta: database = database class User(BaseModel): chat_id = CharField(max_length=128, unique=True) first_name = CharField(max_length=128) last_name = CharField(max_length=128, null=True) username = CharField(max_length=128, null=True) language_code = CharField(max_length=20, null=True) is_bot = BooleanField(default=False) def __str__(self): return "{} {} (@{})".format(self.first_name, self.last_name, self.username) class Message(BaseModel): chat_id = CharField(max_length=128) username = CharField(max_length=128, null=True) text = CharField(max_length=128, null=True) date = DateTimeField(default=datetime.now) details = JSONField(default=dict, null=True) class Keyword(BaseModel): text = CharField(max_length=128) class Gif(BaseModel): user = ForeignKeyField(User, backref='gif') file_id = CharField(max_length=256) file_path = CharField(max_length=256) file_size = IntegerField() is_public = BooleanField(null=True) keywords = ManyToManyField(Keyword, backref='gif') KeywordGif = Gif.keywords.get_through_model()
28.346154
83
0.726594
180
1,474
5.761111
0.405556
0.12729
0.190935
0.162006
0.403086
0.298939
0.243009
0
0
0
0
0.025911
0.162144
1,474
51
84
28.901961
0.813765
0
0
0.057143
0
0
0.045455
0
0
0
0
0
0
1
0.028571
false
0
0.085714
0.028571
0.914286
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
2d5ce664099f0e8e3a8ec1030ab3679e4ebf48aa
273
py
Python
src/python/WMCore/BossAir/Oracle/DeleteJobs.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
21
2015-11-19T16:18:45.000Z
2021-12-02T18:20:39.000Z
src/python/WMCore/BossAir/Oracle/DeleteJobs.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
5,671
2015-01-06T14:38:52.000Z
2022-03-31T22:11:14.000Z
src/python/WMCore/BossAir/Oracle/DeleteJobs.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
67
2015-01-21T15:55:38.000Z
2022-02-03T19:53:13.000Z
#!/usr/bin/env python """ _DeleteJobs_ Oracle implementation for creating a deleting a job """ from WMCore.BossAir.MySQL.DeleteJobs import DeleteJobs as MySQLDeleteJobs class DeleteJobs(MySQLDeleteJobs): """ _DeleteJobs_ Delete jobs from bl_runjob """
16.058824
73
0.736264
31
273
6.322581
0.774194
0
0
0
0
0
0
0
0
0
0
0
0.179487
273
16
74
17.0625
0.875
0.465201
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
3
2d8070a9b85ef85dbeb373f5548431258c77a9d3
875
py
Python
solutions/python/2018/correctLineup.py
lucifer1198/Codesignal
07d6d6457b8b3a9f1c51118b0e8e44cce66ee039
[ "MIT" ]
2
2020-12-21T22:09:26.000Z
2021-01-01T15:40:01.000Z
solutions/python/2018/correctLineup.py
nsu1210/Codesignal
07d6d6457b8b3a9f1c51118b0e8e44cce66ee039
[ "MIT" ]
null
null
null
solutions/python/2018/correctLineup.py
nsu1210/Codesignal
07d6d6457b8b3a9f1c51118b0e8e44cce66ee039
[ "MIT" ]
1
2021-01-28T18:15:02.000Z
2021-01-28T18:15:02.000Z
""" For the opening ceremony of the upcoming sports event an even number of athletes were picked. They formed a correct lineup, i.e. such a lineup in which no two boys or two girls stand together. The first person in the lineup was a girl. As a part of the performance, adjacent pairs of athletes (i.e. the first one together with the second one, the third one together with the fourth one, etc.) had to swap positions with each other. Given a list of athletes, return the list of athletes after the changes, i.e. after each adjacent pair of athletes is swapped. Example For athletes = [1, 2, 3, 4, 5, 6], the output should be correctLineup(athletes) = [2, 1, 4, 3, 6, 5]. """ def correctLineup(athletes): return [val for item in [a for a in zip([e for i, e in enumerate(athletes) if i % 2 != 0], [e for i, e in enumerate(athletes) if i % 2 == 0])] for val in item]
46.052632
163
0.72
164
875
3.841463
0.47561
0.079365
0.047619
0.057143
0.095238
0.095238
0.095238
0.095238
0.095238
0.095238
0
0.022792
0.197714
875
18
164
48.611111
0.874644
0.766857
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
2db9d906c0209bf146eef0099d07fffe6b41d599
65
py
Python
usbclassifier/__init__.py
t-yui/usbclassifier
72a0be8991c34381ea72c5f969ae9d838c9ac158
[ "MIT" ]
4
2019-09-02T14:16:52.000Z
2020-12-09T16:13:05.000Z
usbclassifier/__init__.py
t-yui/usbclassifier
72a0be8991c34381ea72c5f969ae9d838c9ac158
[ "MIT" ]
null
null
null
usbclassifier/__init__.py
t-yui/usbclassifier
72a0be8991c34381ea72c5f969ae9d838c9ac158
[ "MIT" ]
1
2020-03-21T10:26:10.000Z
2020-03-21T10:26:10.000Z
from .classifier import USBaggingClassifier __version__ = '0.1.1'
32.5
43
0.815385
8
65
6.125
0.875
0
0
0
0
0
0
0
0
0
0
0.050847
0.092308
65
2
44
32.5
0.779661
0
0
0
0
0
0.075758
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
2dbfdb14972de837af30ef8cd2a3801cb3e8ff95
753
py
Python
turbosms/routers.py
pmaigutyak/mp-turbosms
2b2089dbded95a49161e243e05a62fd0b568ab92
[ "0BSD" ]
7
2017-03-30T14:26:25.000Z
2021-09-25T14:31:43.000Z
turbosms/routers.py
pmaigutyak/mp-turbosms
2b2089dbded95a49161e243e05a62fd0b568ab92
[ "0BSD" ]
1
2019-04-20T15:38:13.000Z
2019-05-20T12:29:18.000Z
turbosms/routers.py
pmaigutyak/mp-turbosms
2b2089dbded95a49161e243e05a62fd0b568ab92
[ "0BSD" ]
null
null
null
class TurboSMSRouter(object): app_label = 'turbosms' db_name = 'turbosms' def db_for_read(self, model, **hints): if model._meta.app_label == self.app_label: return self.db_name return None def db_for_write(self, model, **hints): if model._meta.app_label == self.app_label: return self.db_name return None def allow_relation(self, obj1, obj2, **hints): if obj1._meta.app_label == self.app_label or \ obj2._meta.app_label == self.app_label: return False return None def allow_migrate(self, db, app_label, model_name=None, **hints): if app_label == self.app_label: return False return None
21.514286
69
0.600266
99
753
4.30303
0.262626
0.225352
0.140845
0.176056
0.605634
0.605634
0.549296
0.539906
0.539906
0.347418
0
0.007648
0.305445
753
34
70
22.147059
0.806883
0
0
0.5
0
0
0.021277
0
0
0
0
0
0
1
0.2
false
0
0
0
0.75
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
2dc3fe0ac26ca288fee13059aacf28df1b63b630
163
py
Python
paw/constants/tests.py
keeperaft/personalaltwebsite
b8ad2679c2809c316e8f746ffe1b302460f336be
[ "MIT" ]
null
null
null
paw/constants/tests.py
keeperaft/personalaltwebsite
b8ad2679c2809c316e8f746ffe1b302460f336be
[ "MIT" ]
2
2020-06-05T20:35:57.000Z
2021-06-10T21:24:24.000Z
paw/constants/tests.py
keeperaft/personalaltwebsite
b8ad2679c2809c316e8f746ffe1b302460f336be
[ "MIT" ]
1
2019-04-10T02:03:35.000Z
2019-04-10T02:03:35.000Z
BROWSER_IMPLICIT_WAIT_TIME = 5 MODAL_TRANSITION_WAIT_TIME = 2 BOOTSTRAP_SWITCH_TRANSITION_WAIT_TIME = 1 PAGE_LOADING_WAIT_TIME = 5 PAGE_LOADING_LONG_WAIT_TIME = 10
32.6
41
0.883436
27
163
4.703704
0.555556
0.314961
0.141732
0
0
0
0
0
0
0
0
0.040268
0.08589
163
5
42
32.6
0.812081
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
2dd1a49276fc87b7f280df5bf2a91398c277b7d1
532
py
Python
MyWork/OldFiles/Intermediate/ObjectOrientatedProgramming/ClassesObjects.py
minefarmer/100-Days-Python
b80b28d299342b490082ac301a0d8b176419f8f9
[ "Unlicense" ]
null
null
null
MyWork/OldFiles/Intermediate/ObjectOrientatedProgramming/ClassesObjects.py
minefarmer/100-Days-Python
b80b28d299342b490082ac301a0d8b176419f8f9
[ "Unlicense" ]
null
null
null
MyWork/OldFiles/Intermediate/ObjectOrientatedProgramming/ClassesObjects.py
minefarmer/100-Days-Python
b80b28d299342b490082ac301a0d8b176419f8f9
[ "Unlicense" ]
null
null
null
''' Class /is holding plate = True / attributes:/ / \ / tables_responsible = [4, 5, 6] waiter (object) \ \ / def take_order(table, order) / # takes order to chef methods:/ \ \ \def take_payment(amount): # add money to the restaurant '''
29.555556
58
0.293233
32
532
4.78125
0.84375
0.091503
0
0
0
0
0
0
0
0
0
0.015464
0.635338
532
18
59
29.555556
0.773196
0.969925
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
2de16c3fdb67e51a9ebf4990ca8d66b97638c3b8
72
py
Python
lightVeg/main.py
afiqhatta/hattaNum
b82267b048f7985b3bee0a33007118e4a1a80585
[ "MIT" ]
null
null
null
lightVeg/main.py
afiqhatta/hattaNum
b82267b048f7985b3bee0a33007118e4a1a80585
[ "MIT" ]
null
null
null
lightVeg/main.py
afiqhatta/hattaNum
b82267b048f7985b3bee0a33007118e4a1a80585
[ "MIT" ]
null
null
null
from lightVeg.datapoints.datapoint import * d = Data([1,0,0], [2,0,0])
18
43
0.666667
13
72
3.692308
0.769231
0.083333
0
0
0
0
0
0
0
0
0
0.095238
0.125
72
3
44
24
0.666667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
2de24c10268c8b843abe3e0daae0d27ad3a1001b
344
py
Python
scfmsp/controlflowanalysis/instructions/InstructionBic.py
sepidehpouyan/SCF-MSP430
1d7565bf38d9f42e775031d4ea8515ff99bef778
[ "MIT" ]
1
2020-07-03T21:26:52.000Z
2020-07-03T21:26:52.000Z
scfmsp/controlflowanalysis/instructions/InstructionBic.py
sepidehpouyan/SCF-MSP430
1d7565bf38d9f42e775031d4ea8515ff99bef778
[ "MIT" ]
null
null
null
scfmsp/controlflowanalysis/instructions/InstructionBic.py
sepidehpouyan/SCF-MSP430
1d7565bf38d9f42e775031d4ea8515ff99bef778
[ "MIT" ]
null
null
null
from scfmsp.controlflowanalysis.instructions.AbstractInstructionTwoRegisters import AbstractInstructionTwoRegisters class InstructionBic(AbstractInstructionTwoRegisters): name = 'bic' def get_execution_time(self): return self.clock def execute_judgment(self, ac): super(InstructionBic, self).execute_judgment(ac)
31.272727
115
0.790698
31
344
8.645161
0.677419
0.11194
0
0
0
0
0
0
0
0
0
0
0.142442
344
11
116
31.272727
0.908475
0
0
0
0
0
0.008696
0
0
0
0
0
0
1
0.285714
false
0
0.142857
0.142857
0.857143
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
2de48398e5c4f46109ddf67f0c9c617ff7732359
713
py
Python
tests/mock_bbio/__init__.py
vt-sailbot/sailbot-20
b5d75cb82e4bc3e9c4e428a288c6ac98a4aa2c52
[ "MIT" ]
1
2019-09-26T12:05:57.000Z
2019-09-26T12:05:57.000Z
tests/mock_bbio/__init__.py
vt-sailbot/sailbot-20
b5d75cb82e4bc3e9c4e428a288c6ac98a4aa2c52
[ "MIT" ]
5
2019-08-25T21:01:18.000Z
2020-09-04T02:56:40.000Z
tests/mock_bbio/__init__.py
vt-sailbot/sailbot-20
b5d75cb82e4bc3e9c4e428a288c6ac98a4aa2c52
[ "MIT" ]
null
null
null
import sys import types module_name = 'Adafruit_BBIO' # Create a type module_name Adafruit_BBIO = types.ModuleType(module_name) Adafruit_BBIO.GPIO = types.ModuleType(module_name + '.GPIO') Adafruit_BBIO.ADC = types.ModuleType(module_name + '.ADC') Adafruit_BBIO.PWM = types.ModuleType(module_name + '.PWM') Adafruit_BBIO.UART = types.ModuleType(module_name + '.UART') # Overwrite the default system module path to point to the type we just created sys.modules[module_name] = Adafruit_BBIO sys.modules[module_name + '.GPIO'] = Adafruit_BBIO.GPIO sys.modules[module_name + '.ADC'] = Adafruit_BBIO.ADC sys.modules[module_name + '.PWM'] = Adafruit_BBIO.PWM sys.modules[module_name + '.UART'] = Adafruit_BBIO.UART
35.65
79
0.772791
103
713
5.126214
0.252427
0.227273
0.198864
0.236742
0.287879
0
0
0
0
0
0
0
0.107994
713
19
80
37.526316
0.830189
0.14446
0
0
0
0
0.080725
0
0
0
0
0
0
1
0
false
0
0.153846
0
0.153846
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
93027f9c513a29a2ce3a6e99bcad683f2d76346f
127
py
Python
rio_stac/__init__.py
jonas-eberle/rio-stac
f46ecbaba3f9a324ab5546e1f3a5c2f922175969
[ "MIT" ]
null
null
null
rio_stac/__init__.py
jonas-eberle/rio-stac
f46ecbaba3f9a324ab5546e1f3a5c2f922175969
[ "MIT" ]
null
null
null
rio_stac/__init__.py
jonas-eberle/rio-stac
f46ecbaba3f9a324ab5546e1f3a5c2f922175969
[ "MIT" ]
null
null
null
"""rio-stac: Create STAC items from raster file.""" from rio_stac.stac import create_stac_item # noqa __version__ = "0.3.0"
21.166667
51
0.724409
21
127
4.047619
0.619048
0.164706
0
0
0
0
0
0
0
0
0
0.027778
0.149606
127
5
52
25.4
0.759259
0.401575
0
0
0
0
0.071429
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
93030f1a4d779ad15c572ebc533d877b010fec55
413
py
Python
tests/business/mock/mock_transport_stop_service.py
public-transport-quality-grades/oevgk18-generator
e467587d3d3c600a66139756e95bd84040d58c99
[ "MIT" ]
null
null
null
tests/business/mock/mock_transport_stop_service.py
public-transport-quality-grades/oevgk18-generator
e467587d3d3c600a66139756e95bd84040d58c99
[ "MIT" ]
3
2018-04-30T06:51:33.000Z
2018-05-30T18:22:58.000Z
tests/business/mock/mock_transport_stop_service.py
public-transport-quality-grades/oevgk18-generator
e467587d3d3c600a66139756e95bd84040d58c99
[ "MIT" ]
null
null
null
from typing import List from generator.business.model.transport_stop import TransportStop from . import mock_transport_stops def get_transport_stops(db) -> List[TransportStop]: return [ mock_transport_stops.stop_8503400, mock_transport_stops.stop_8503125, mock_transport_stops.stop_8591382, mock_transport_stops.stop_8593245, mock_transport_stops.stop_8504532 ]
27.533333
65
0.765133
50
413
5.92
0.42
0.331081
0.364865
0.371622
0
0
0
0
0
0
0
0.10355
0.181598
413
14
66
29.5
0.772189
0
0
0
0
0
0
0
0
0
0
0
0
1
0.090909
false
0
0.272727
0.090909
0.454545
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
934f94d34cd488990be0ab2c28fe4baafc2cbc17
90
py
Python
keyboards/inline/__init__.py
nomadroom/Media_Bot
59f70d1f1cbda254e6cc0f6a10c88dd05456f797
[ "MIT" ]
null
null
null
keyboards/inline/__init__.py
nomadroom/Media_Bot
59f70d1f1cbda254e6cc0f6a10c88dd05456f797
[ "MIT" ]
1
2021-09-12T17:38:29.000Z
2021-09-12T17:38:29.000Z
keyboards/inline/__init__.py
nomadroom/Media_Bot
59f70d1f1cbda254e6cc0f6a10c88dd05456f797
[ "MIT" ]
1
2020-12-18T08:49:41.000Z
2020-12-18T08:49:41.000Z
from aiogram.utils.callback_data import CallbackData some_callback = CallbackData("new")
22.5
52
0.833333
11
90
6.636364
0.818182
0
0
0
0
0
0
0
0
0
0
0
0.088889
90
3
53
30
0.890244
0
0
0
0
0
0.033333
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
935128fe3e82306431c5e1b59f7086bddbf5949f
3,831
py
Python
sgpublish/exporter/base.py
vfxetc/sgpublish
f6dcdb7d727ca78bc29ce76b91f13962628bfea1
[ "BSD-3-Clause" ]
3
2018-03-19T03:58:08.000Z
2020-09-30T17:47:16.000Z
sgpublish/exporter/base.py
vfxetc/sgpublish
f6dcdb7d727ca78bc29ce76b91f13962628bfea1
[ "BSD-3-Clause" ]
null
null
null
sgpublish/exporter/base.py
vfxetc/sgpublish
f6dcdb7d727ca78bc29ce76b91f13962628bfea1
[ "BSD-3-Clause" ]
2
2017-07-04T19:29:47.000Z
2019-07-19T01:15:43.000Z
import os from sgpublish.publisher import Publisher class Exporter(object): def __init__(self, workspace=None, filename_hint=None, publish_type=None): self._workspace = workspace self._filename_hint = filename_hint self._publish_type = publish_type @property def publish_type(self): """The type of publish to create for this exporter.""" return self._publish_type @property def filename_hint(self): """A filename for extracting info from, or using as a base to construct the final path if not supplied.""" return self._filename_hint @property def workspace(self): """The working directory, usually corresponds to an SGFS tag.""" return self._workspace or os.getcwd() def get_previous_publish_ids(self): """A set of previous publish IDs that current context was involved in. These publishes are used by the GUI to determine which publish stream to automatically select. Currently only supported in the Maya classes; please extend for your applications. """ return set() def record_publish_id(self, id_): """Save the new publish ID in the current scene/script/context. These publishes will later be returned by :meth:`get_previous_publish_ids`. Currently only supported in the Maya classes; please extend for your applications. """ pass def publish(self, link=None, name=None, export_kwargs=None, **publisher_kwargs): """Trigger a publish. This method only deals with setting up the publisher, and uses :meth:`export_publish` to do the work. :param export_kwargs: Passed to :meth:`export_publish`. :returns: The publisher used. """ type_ = self.publish_type if not type_: raise ValueError('cannot publish without type') publisher_kwargs.pop('type', None) with Publisher(link=link, type=type_, name=name, **publisher_kwargs) as publisher: # Record the ID before the export so that it is included. self.record_publish_id(publisher.id) # This is a hook that everyone should allow to go up the full chain. self.before_export_publish(publisher, **export_kwargs) # Completely overridable by children (without calling super). self.export_publish(publisher, **export_kwargs) return publisher def before_export_publish(self, publisher, **kwargs): pass def fields_for_review_version(self, **kwargs): return {} def export_publish(self, publisher, **kwargs): """Perform an export within the context of a publish. By default this simply calls :meth:`export` with the publish directory and no path. :param kwargs: Passed to :meth:`export_publish`. """ return self.export(publisher.directory, None, **kwargs) def export(self, directory, path, **kwargs): """Do the work of exporting. Must be implemented in subclasses. :param str directory: The directory to publish in. If ``path`` is present then this may be assumed equal to ``os.path.dirname(path)``. :param path: The path to export to. Will always be ``None`` when publishing, and future use of ``None`` is reserved for complex exports, such as geocaches. :type path: str or None :param kwargs: Extra keyword arguments passed from the exporting widgets. """ raise NotImplementedError()
33.605263
90
0.617593
457
3,831
5.059081
0.354486
0.03936
0.019464
0.019031
0.143599
0.086505
0.059689
0.059689
0.059689
0.059689
0
0
0.309319
3,831
113
91
33.902655
0.873772
0.448969
0
0.131579
0
0
0.01795
0
0
0
0
0
0
1
0.289474
false
0.052632
0.052632
0.026316
0.552632
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
3
9359bb5791113063725606fdee1bd52df26144cf
522
py
Python
src/app.py
RodrigoSantosRodrigues/simple-api-python-flask
508c104c6a2bf8ae50a3d72cfa640d4c7df723f7
[ "MIT" ]
null
null
null
src/app.py
RodrigoSantosRodrigues/simple-api-python-flask
508c104c6a2bf8ae50a3d72cfa640d4c7df723f7
[ "MIT" ]
null
null
null
src/app.py
RodrigoSantosRodrigues/simple-api-python-flask
508c104c6a2bf8ae50a3d72cfa640d4c7df723f7
[ "MIT" ]
null
null
null
from flask import Flask from .config import app_config from .controllers.HomeController import home_api def create_app(env_name): """ param: env_name Create app """ # app initiliazation app = Flask(__name__) app.config.from_object(app_config[env_name]) app.register_blueprint(home_api, url_prefix='/v1/api/home') @app.route('/') def index(): return 'My first Server Works!' @app.route('/greet') def hello(): return 'Hello from Server' return app
19.333333
62
0.655172
68
522
4.808824
0.441176
0.082569
0.079511
0
0
0
0
0
0
0
0
0.0025
0.233716
522
26
63
20.076923
0.815
0.091954
0
0
0
0
0.134884
0
0
0
0
0
0
1
0.214286
false
0
0.214286
0.142857
0.642857
0.071429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
93633b3ca263da09d1e926963fa950ced7b3a83b
152
py
Python
p37_test.py
alpatine/project-euler-python
d731d2deebff4bfb812811921f56da7b984652c0
[ "MIT" ]
null
null
null
p37_test.py
alpatine/project-euler-python
d731d2deebff4bfb812811921f56da7b984652c0
[ "MIT" ]
null
null
null
p37_test.py
alpatine/project-euler-python
d731d2deebff4bfb812811921f56da7b984652c0
[ "MIT" ]
null
null
null
from unittest import TestCase from p37 import p37 class P37_Test(TestCase): def test_correct_answer(self): self.assertEqual(p37(), 748317)
21.714286
39
0.743421
21
152
5.238095
0.619048
0
0
0
0
0
0
0
0
0
0
0.112
0.177632
152
6
40
25.333333
0.768
0
0
0
0
0
0
0
0
0
0
0
0.2
1
0.2
false
0
0.4
0
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
937cc123645591552fca47c1bb0f57ee0a24ecef
872
py
Python
src/injecta/service/argument/DictArgument.py
DataSentics/injecta
090eeac6c76c43d40be71df678222a07b0a3c783
[ "MIT" ]
3
2021-09-27T12:55:00.000Z
2022-01-31T19:13:23.000Z
src/injecta/service/argument/DictArgument.py
DataSentics/injecta
090eeac6c76c43d40be71df678222a07b0a3c783
[ "MIT" ]
null
null
null
src/injecta/service/argument/DictArgument.py
DataSentics/injecta
090eeac6c76c43d40be71df678222a07b0a3c783
[ "MIT" ]
1
2021-03-04T09:12:05.000Z
2021-03-04T09:12:05.000Z
from injecta.service.argument.ArgumentInterface import ArgumentInterface from injecta.service.class_.InspectedArgument import InspectedArgument class DictArgument(ArgumentInterface): def __init__(self, value: dict, name: str = None): self.__value = value self.__name = name @property def name(self): return self.__name def get_string_value(self): output = [] for key, sub_argument in self.__value.items(): output.append("{} = {}".format(key, sub_argument.get_string_value())) return ", ".join(output) def check_type_matches_definition(self, inspected_argument: InspectedArgument, services2_classes: dict, aliases2_services: dict): pass def __eq__(self, other: "DictArgument"): return self.name == other.name and self.get_string_value() == other.get_string_value()
32.296296
133
0.697248
99
872
5.808081
0.434343
0.062609
0.097391
0
0
0
0
0
0
0
0
0.002869
0.200688
872
26
134
33.538462
0.822095
0
0
0
0
0
0.024083
0
0
0
0
0
0
1
0.277778
false
0.055556
0.111111
0.111111
0.611111
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
0
0
0
3
faa863ae21cdc714f4877ad1b7792f9b7c902577
411
py
Python
mayan/apps/sources/storages.py
darrenflexxu/Mayan-EDMS
6707365bfacd137e625ddc1b990168012246fa07
[ "Apache-2.0" ]
null
null
null
mayan/apps/sources/storages.py
darrenflexxu/Mayan-EDMS
6707365bfacd137e625ddc1b990168012246fa07
[ "Apache-2.0" ]
5
2021-03-19T22:56:45.000Z
2022-03-12T00:08:43.000Z
mayan/apps/sources/storages.py
Sumit-Kumar-Jha/mayan
5b7ddeccf080b9e41cc1074c70e27dfe447be19f
[ "Apache-2.0" ]
1
2020-07-29T21:03:27.000Z
2020-07-29T21:03:27.000Z
from __future__ import unicode_literals from mayan.apps.storage.utils import get_storage_subclass from .settings import ( setting_staging_file_image_cache_storage, setting_staging_file_image_cache_storage_arguments, ) storage_staging_file_image_cache = get_storage_subclass( dotted_path=setting_staging_file_image_cache_storage.value )(**setting_staging_file_image_cache_storage_arguments.value)
31.615385
62
0.871046
55
411
5.854545
0.381818
0.170807
0.248447
0.326087
0.490683
0.490683
0.273292
0
0
0
0
0
0.085158
411
12
63
34.25
0.856383
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
faae335eeafa992b41a3b81b982e14bd079a747e
166
py
Python
lang/py/cookbook/v2/source/cb2_10_5_exm_1.py
ch1huizong/learning
632267634a9fd84a5f5116de09ff1e2681a6cc85
[ "MIT" ]
null
null
null
lang/py/cookbook/v2/source/cb2_10_5_exm_1.py
ch1huizong/learning
632267634a9fd84a5f5116de09ff1e2681a6cc85
[ "MIT" ]
null
null
null
lang/py/cookbook/v2/source/cb2_10_5_exm_1.py
ch1huizong/learning
632267634a9fd84a5f5116de09ff1e2681a6cc85
[ "MIT" ]
null
null
null
log_path = "/usr/local/nusphere/apache/logs/access_log" print "Percentage of requests that were client-cached: " + str( clientCachePercentage(log_path)) + '%'
41.5
63
0.728916
21
166
5.619048
0.857143
0.118644
0
0
0
0
0
0
0
0
0
0
0.138554
166
3
64
55.333333
0.825175
0
0
0
0
0
0.548193
0.253012
0
0
0
0
0
0
null
null
0
0
null
null
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
fad1682f1dae35be9241db14077bdd418aa01ac9
211
py
Python
apps/enquiries/admin.py
FancyKat/django-portfolio
f261f8d3a37e5771f9f48a74a769b6e9b479d49d
[ "MIT" ]
null
null
null
apps/enquiries/admin.py
FancyKat/django-portfolio
f261f8d3a37e5771f9f48a74a769b6e9b479d49d
[ "MIT" ]
9
2022-03-22T04:30:50.000Z
2022-03-22T04:49:13.000Z
apps/enquiries/admin.py
FancyKat/django-portfolio
f261f8d3a37e5771f9f48a74a769b6e9b479d49d
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Enquiry class EnquiryAdmin(admin.ModelAdmin): list_display = ["name", "email", "phone_number", "message"] admin.site.register(Enquiry, EnquiryAdmin)
19.181818
63
0.753555
25
211
6.28
0.76
0
0
0
0
0
0
0
0
0
0
0
0.127962
211
10
64
21.1
0.853261
0
0
0
0
0
0.132701
0
0
0
0
0
0
1
0
false
0
0.4
0
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
fad558c4f6575e6ff9099e80540fdc2055b56574
309
py
Python
state/db/db.py
andkononykhin/plenum
28dc1719f4b7e80d31dafbadb38cfec4da949886
[ "Apache-2.0" ]
148
2017-07-11T19:05:25.000Z
2022-03-16T21:31:20.000Z
state/db/db.py
andkononykhin/plenum
28dc1719f4b7e80d31dafbadb38cfec4da949886
[ "Apache-2.0" ]
561
2017-06-29T17:59:56.000Z
2022-03-09T15:47:14.000Z
state/db/db.py
andkononykhin/plenum
28dc1719f4b7e80d31dafbadb38cfec4da949886
[ "Apache-2.0" ]
378
2017-06-29T17:45:27.000Z
2022-03-26T07:27:59.000Z
from abc import abstractmethod class BaseDB: @abstractmethod def inc_refcount(self, key, value): raise NotImplementedError @abstractmethod def dec_refcount(self, key): raise NotImplementedError @abstractmethod def get(self, key): raise NotImplementedError
18.176471
39
0.692557
30
309
7.066667
0.533333
0.240566
0.141509
0.386792
0
0
0
0
0
0
0
0
0.252427
309
16
40
19.3125
0.917749
0
0
0.545455
0
0
0
0
0
0
0
0
0
1
0.272727
false
0
0.090909
0
0.454545
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
fadaf7e4d839c9ca608bd40d372cfaa3c2c5bf94
3,518
py
Python
mesos/cli/slave.py
mesosphere/mesos-cli
c65c8bcc3087a063d437698014e30dfd165a5257
[ "Apache-2.0" ]
77
2015-01-02T18:30:53.000Z
2019-06-11T06:10:06.000Z
mesos/cli/slave.py
mesosphere-backup/mesos-cli
c65c8bcc3087a063d437698014e30dfd165a5257
[ "Apache-2.0" ]
26
2015-01-31T23:23:34.000Z
2017-11-14T18:31:06.000Z
mesos/cli/slave.py
mesosphere-backup/mesos-cli
c65c8bcc3087a063d437698014e30dfd165a5257
[ "Apache-2.0" ]
24
2015-01-28T11:59:22.000Z
2019-06-11T01:55:29.000Z
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, print_function import urlparse import requests import requests.exceptions from . import exceptions, log, mesos_file, util from .cfg import CURRENT as CFG class MesosSlave(object): def __init__(self, items): self.__items = items def __getitem__(self, name): return self.__items[name] def __str__(self): return self.key() def key(self): return self["pid"].split('@')[-1] @property def host(self): return "{0}://{1}:{2}".format( CFG["scheme"], self["hostname"], self["pid"].split(":")[-1]) @log.duration def fetch(self, url, **kwargs): try: return requests.get(urlparse.urljoin( self.host, url), timeout=CFG["response_timeout"], **kwargs) except requests.exceptions.ConnectionError: raise exceptions.SlaveDoesNotExist( "Unable to connect to the slave at {0}".format(self.host)) @util.CachedProperty(ttl=5) def state(self): return self.fetch("/slave(1)/state.json").json() @property def frameworks(self): return util.merge(self.state, "frameworks", "completed_frameworks") def task_executor(self, task_id): for fw in self.frameworks: for exc in util.merge(fw, "executors", "completed_executors"): if task_id in list(map( lambda x: x["id"], util.merge( exc, "completed_tasks", "tasks", "queued_tasks"))): return exc raise exceptions.MissingExecutor("No executor has a task by that id") def file_list(self, path): # The sandbox does not exist on the slave. if path == "": return [] resp = self.fetch("/files/browse.json", params={"path": path}) if resp.status_code == 404: return [] return resp.json() def file(self, task, path): return mesos_file.File(self, task, path) @util.CachedProperty(ttl=1) def stats(self): return self.fetch("/monitor/statistics.json").json() def executor_stats(self, _id): return list(filter(lambda x: x["executor_id"])) def task_stats(self, _id): eid = self.task_executor(_id)["id"] stats = list(filter( lambda x: x["executor_id"] == eid, self.stats )) # Tasks that are not yet in a RUNNING state have no stats. if len(stats) == 0: return {} else: return stats[0]["statistics"] @property @util.memoize def log(self): return mesos_file.File(self, path="/slave/log")
31.132743
79
0.616543
444
3,518
4.788288
0.380631
0.032926
0.026341
0.015052
0.047977
0.026341
0.026341
0
0
0
0
0.007042
0.273451
3,518
112
80
31.410714
0.824726
0.241899
0
0.069444
0
0
0.121933
0.00906
0
0
0
0
0
1
0.208333
false
0
0.083333
0.138889
0.541667
0.013889
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
4f0003296fc304e903108cf4d0be48a1ad43b5e7
1,286
py
Python
tests/commands/test_node.py
o3seespy/o3seespy
4fdd942370df1ac8d454e361f651405717b8584c
[ "MIT", "BSD-3-Clause" ]
16
2019-10-24T17:58:46.000Z
2022-03-01T19:48:06.000Z
tests/commands/test_node.py
o3seespy/o3seespy
4fdd942370df1ac8d454e361f651405717b8584c
[ "MIT", "BSD-3-Clause" ]
5
2020-04-17T01:39:27.000Z
2020-12-18T05:07:58.000Z
tests/commands/test_node.py
o3seespy/o3seespy
4fdd942370df1ac8d454e361f651405717b8584c
[ "MIT", "BSD-3-Clause" ]
6
2020-02-20T02:13:11.000Z
2021-11-01T19:08:41.000Z
import o3seespy as o3 import numpy as np def test_build_regular_node_mesh_2dxy(): xs = [1, 2, 3] ys = [3, 4] zs = 4.5 osi = o3.OpenSeesInstance(ndm=3, ndf=3) sn = o3.node.build_regular_node_mesh(osi, xs, ys, zs) sn = np.array(sn) print(sn.shape) assert len(sn.shape) == 2 # two axes assert sn.shape[0] == 3 # y-axis assert sn.shape[1] == 2 # x-axis assert sn[2][1].x == 3.0 assert sn[2][1].y == 4.0 assert sn[2][1].z == 4.5, sn[2][1].z # No z-axis zs = None osi = o3.OpenSeesInstance(ndm=2, ndf=3) sn = o3.node.build_regular_node_mesh(osi, xs, ys, zs) sn = np.array(sn) assert len(sn.shape) == 2 # two axes assert sn.shape[0] == 3 # x-axis assert sn.shape[1] == 2 # y-axis assert sn[2][1].x == 3.0 assert sn[2][1].y == 4.0 assert not hasattr(sn[2][1], 'z'), sn[1][2].z def test_build_regular_node_mesh_2dxz(): xs = [1, 2, 3] ys = [3] zs = [4.5, 6.0] osi = o3.OpenSeesInstance(ndm=3, ndf=3) sn = o3.node.build_regular_node_mesh(osi, xs, ys, zs) sn = np.array(sn) assert len(sn.shape) == 3 # three axes assert sn.shape[0] == 3 # y-axis assert sn.shape[1] == 1 # x-axis assert sn.shape[2] == 2 # x-axis assert sn[2][0][1].x == 3.0
26.791667
57
0.560653
245
1,286
2.865306
0.183673
0.148148
0.12963
0.14245
0.794872
0.762108
0.611111
0.611111
0.611111
0.611111
0
0.082896
0.258942
1,286
47
58
27.361702
0.653725
0.067652
0
0.552632
0
0
0.000843
0
0
0
0
0
0.447368
1
0.052632
false
0
0.052632
0
0.105263
0.026316
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
3
877d074d84b0c3f4892f9d47b0b0226e66ec59d7
418
py
Python
coralillo/tests/utils_test.py
categulario/norm
232d3e25dcce2a1f698b429ecdedf5f8ee33c340
[ "MIT" ]
1
2020-10-11T06:40:33.000Z
2020-10-11T06:40:33.000Z
coralillo/tests/utils_test.py
categulario/coralillo
232d3e25dcce2a1f698b429ecdedf5f8ee33c340
[ "MIT" ]
17
2017-08-22T16:52:03.000Z
2017-08-30T17:23:56.000Z
coralillo/tests/utils_test.py
categulario/norm
232d3e25dcce2a1f698b429ecdedf5f8ee33c340
[ "MIT" ]
4
2018-05-15T18:10:10.000Z
2020-09-01T08:58:55.000Z
from coralillo.utils import parse_embed def test_parse_embed(): array = ['object'] output = [['object', None]] assert parse_embed(array) == output array = ['object.field'] output = [['object', ['field']]] assert parse_embed(array) == output array = ['object.field', 'foo', 'object.var'] output = [['foo', None], ['object', ['field', 'var']]] assert parse_embed(array) == output
26.125
58
0.605263
48
418
5.145833
0.333333
0.202429
0.242915
0.255061
0.45749
0.348178
0.348178
0.348178
0
0
0
0
0.200957
418
15
59
27.866667
0.739521
0
0
0.272727
0
0
0.184211
0
0
0
0
0
0.272727
1
0.090909
false
0
0.090909
0
0.181818
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
8781575b880de0d2850ae80b1171e33686b73765
218
py
Python
vedastr_cstr/vedastr/models/bodies/feature_extractors/__init__.py
bsm8734/formula-image-latex-recognition
86d5070e8f907571a47967d64facaee246d92a35
[ "MIT" ]
13
2021-06-20T18:11:23.000Z
2021-12-07T18:06:42.000Z
vedastr_cstr/vedastr/models/bodies/feature_extractors/__init__.py
bsm8734/formula-image-latex-recognition
86d5070e8f907571a47967d64facaee246d92a35
[ "MIT" ]
9
2021-06-16T14:55:07.000Z
2021-06-23T14:45:36.000Z
vedastr_cstr/vedastr/models/bodies/feature_extractors/__init__.py
bsm8734/formula-image-latex-recognition
86d5070e8f907571a47967d64facaee246d92a35
[ "MIT" ]
6
2021-06-17T15:16:50.000Z
2021-07-05T20:41:26.000Z
from .builder import build_feature_extractor # noqa 401 from .decoders import build_brick, build_bricks, build_decoder # noqa 401 from .encoders import build_backbone, build_encoder, build_enhance_module # noqa 401
54.5
85
0.825688
31
218
5.516129
0.548387
0.192982
0.128655
0
0
0
0
0
0
0
0
0.047368
0.12844
218
3
86
72.666667
0.852632
0.119266
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
87868a6eb63152a36a45d79106b350f4e4a5af31
152
py
Python
exemplos/teste.py
cirino/python
6c45b5305aebeeeebb7ffef335700e41cc0b6b3b
[ "MIT" ]
1
2018-05-06T01:25:28.000Z
2018-05-06T01:25:28.000Z
exemplos/teste.py
cirino/python
6c45b5305aebeeeebb7ffef335700e41cc0b6b3b
[ "MIT" ]
1
2019-02-10T18:46:37.000Z
2019-02-12T21:17:50.000Z
exemplos/teste.py
cirino/python
6c45b5305aebeeeebb7ffef335700e41cc0b6b3b
[ "MIT" ]
null
null
null
print("Hello Word.") def nome(parameter_list): a = parameter_list.split(" ", 2) print(a) nome('dag cirino mano dev') nome('dagmar aparecido')
16.888889
36
0.664474
22
152
4.5
0.727273
0.262626
0
0
0
0
0
0
0
0
0
0.007937
0.171053
152
9
37
16.888889
0.777778
0
0
0
0
0
0.30719
0
0
0
0
0
0
1
0.166667
false
0
0
0
0.166667
0.333333
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
878a1229a4362eaf45a21edcb737d339fb83908c
224
py
Python
portfolio/blog/urls.py
selvianl/resume
4a056d57953c5d31ee1699eeacdf9f9bae85e896
[ "Apache-2.0" ]
null
null
null
portfolio/blog/urls.py
selvianl/resume
4a056d57953c5d31ee1699eeacdf9f9bae85e896
[ "Apache-2.0" ]
4
2021-03-18T21:49:30.000Z
2022-01-13T00:59:54.000Z
portfolio/blog/urls.py
selvianl/resume
4a056d57953c5d31ee1699eeacdf9f9bae85e896
[ "Apache-2.0" ]
null
null
null
from django.conf.urls import url from portfolio.blog.views import BlogFormView, BlogView urlpatterns = [ url(r'^add/$', BlogFormView.as_view(), name='blog_add'), url(r'^$', BlogView.as_view(), name='blog_index'), ]
28
60
0.700893
31
224
4.935484
0.580645
0.052288
0.130719
0.183007
0
0
0
0
0
0
0
0
0.125
224
8
61
28
0.780612
0
0
0
0
0
0.115556
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
87967b7fba18bbe3257b71e4b4b051dadf01ba7c
400
py
Python
app/models/Roles_Usuarios.py
pony012/PruebaServicioCucea
e28fc35beb8eefa3ed2de8b702f04a0f8ec8832f
[ "MIT" ]
null
null
null
app/models/Roles_Usuarios.py
pony012/PruebaServicioCucea
e28fc35beb8eefa3ed2de8b702f04a0f8ec8832f
[ "MIT" ]
null
null
null
app/models/Roles_Usuarios.py
pony012/PruebaServicioCucea
e28fc35beb8eefa3ed2de8b702f04a0f8ec8832f
[ "MIT" ]
null
null
null
from app.db import db_sql as db roles_usuarios = db.Table('roles_usuarios', db.Column('user_id', db.Integer(), db.ForeignKey('usuario.id')), db.Column('role_id', db.Integer(), db.ForeignKey('rol.id')))
36.363636
65
0.365
34
400
4.147059
0.5
0.085106
0.212766
0.184397
0.326241
0
0
0
0
0
0
0
0.5325
400
10
66
40
0.754011
0
0
0.25
0
0
0.11
0
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
87bd2b7225cc173c5b88a75455771b83cb9b77d2
98
py
Python
FaceAPIWebRole/upload/forms.py
qwergram/CortanaVisionCSA
33e2e0266dec7d7eb65d4d3f3d5470f8dc601f48
[ "MIT" ]
null
null
null
FaceAPIWebRole/upload/forms.py
qwergram/CortanaVisionCSA
33e2e0266dec7d7eb65d4d3f3d5470f8dc601f48
[ "MIT" ]
null
null
null
FaceAPIWebRole/upload/forms.py
qwergram/CortanaVisionCSA
33e2e0266dec7d7eb65d4d3f3d5470f8dc601f48
[ "MIT" ]
null
null
null
from django import forms class UploadImageForm(forms.Form): imageupload = forms.ImageField()
19.6
36
0.77551
11
98
6.909091
0.818182
0
0
0
0
0
0
0
0
0
0
0
0.142857
98
4
37
24.5
0.904762
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
87c6a408e82b405b862a85871751638cf79ff84e
180
py
Python
output/models/ms_data/datatypes/facets/integer/integer_total_digits002_xsd/__init__.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
1
2021-08-14T17:59:21.000Z
2021-08-14T17:59:21.000Z
output/models/ms_data/datatypes/facets/integer/integer_total_digits002_xsd/__init__.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
4
2020-02-12T21:30:44.000Z
2020-04-15T20:06:46.000Z
output/models/ms_data/datatypes/facets/integer/integer_total_digits002_xsd/__init__.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
null
null
null
from output.models.ms_data.datatypes.facets.integer.integer_total_digits002_xsd.integer_total_digits002 import ( FooType, Test, ) __all__ = [ "FooType", "Test", ]
18
112
0.722222
21
180
5.714286
0.714286
0.2
0.35
0
0
0
0
0
0
0
0
0.04
0.166667
180
9
113
20
0.76
0
0
0
0
0
0.061111
0
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
87ea7762b0a56f834137a9305f07a96cfcc0cfef
15,998
py
Python
tests/leguinncounter_tests.py
dunnesquared/sentencecow
5cd9b508043fe7b1064852ec2a6b0460944f23f7
[ "MIT" ]
1
2021-02-11T17:05:03.000Z
2021-02-11T17:05:03.000Z
tests/leguinncounter_tests.py
dunnesquared/sentencecow
5cd9b508043fe7b1064852ec2a6b0460944f23f7
[ "MIT" ]
null
null
null
tests/leguinncounter_tests.py
dunnesquared/sentencecow
5cd9b508043fe7b1064852ec2a6b0460944f23f7
[ "MIT" ]
null
null
null
from random import randint from nose.tools import * # from legsewoc import * # from legsewoc.leguincounter import LeGuinCounter # from .context import legsewoc # from legsewoc.leguincounter import LeGuinCounter from .context import leguincounter from leguincounter import LeGuinCounter def test_init(): '''Test Cases: - Returns expected object - Check contents of attribute text - Check contentes of attribute sentences - Pass invalid argument - Pass empty string "" - Pass empty string " \n\t \r\n " ''' # Returns expected object text = "Blah! Blah, blah." lg = LeGuinCounter(text) expected = True assert_equal(isinstance(lg, LeGuinCounter), expected) # Check contents of attribute text expected = "Blah! Blah, blah." assert_equal(lg.text, expected) # Check contents of attribute sentences expected = ["Blah!", " Blah, blah."] assert_equal(lg.sentences, expected) # Pass invalid Argument text = None assert_raises(TypeError, LeGuinCounter, text) # Pass empty string "" text = "" expected = ("", []) lg = LeGuinCounter(text) assert_equal((lg.text, lg.sentences), expected) # Pass empty string " \n\t \r\n " text = " \n\t \r\n " expected = expected = (" \n\t \r\n ", []) lg = LeGuinCounter(text) assert_equal((lg.text, lg.sentences), expected) def test_parse(): '''Test Cases: # Pass valid argument # Pass invalid argument ''' # Pass valid argument text = "Blah! Blah, blah." lg = LeGuinCounter(text) lg.parse(text) expected = ["Blah!", " Blah, blah."] assert_equal(lg.sentences, expected) # Pass invalid argument text = None assert_raises(TypeError, lg.parse , text) def test_count_words(): '''Test Cases: - invalid input - empty string - empty string with spaces - non-empty string = 1 word - non-empty = random number of words ''' # invalid input text = "Blah! Blah, blah." lg = LeGuinCounter(text) assert_raises(TypeError, lg.count_words, 7) # empty string text = "" lg.parse(text) expected = 0 assert_equal(lg.count_words(text), expected) # empty string with spaces text = ''' ''' lg.parse(text) expected = 0 assert_equal(lg.count_words(text), expected) # non-empty string = 1 word text = "Eeyore" lg.parse(text) expected = 1 assert_equal(lg.count_words(text), expected) # random number of words val = randint(2, 1000) text = "Pizza! " * val expected = val assert_equal(lg.count_words(text), expected) def test_morethan(): '''Test Cases # valid sentence, invalid max = less than 1 # empty sentece, valid max # valid input; sentence equal to max # valid input; sentence less than max # valid input; greater than max # bad input types # default max ''' # valid sentence, invalid max = less than 1 text = "My name is Alex. What's yours?" lg = LeGuinCounter(text) max = 0 assert_raises(ValueError, lg.more_than, text, max) # empty sentece, valid max text = "" max = 3 assert_equal(lg.more_than(text, max), False) # valid input; sentence equal to max text = "My name is Alex. What's yours?" max = 6 assert_equal(lg.more_than(text, max), False) # valid input; sentence less than max text = "My name is Alex. What's yours?" max = 10 assert_equal(lg.more_than(text, max), False) # valid input; sentence less than max text = "My name is Alex. What's yours?" max = 3 assert_equal(lg.more_than(text, max), True) # bad input types assert_raises(TypeError, lg.more_than, 7, "whua?") # default max (20 words) assert_equal(lg.more_than(text), False) def test_sentence_morethan(): '''Test Cases # bad max value # no sentences over max # all sentences over max # some sentences over max ''' text = ''' This is a sample text. Do you like it? I hope so. We're having so much trouble getting this program finished. Bye for now!! ''' # bad max value lg = LeGuinCounter(text) max = 0 assert_raises(ValueError, lg.sentences_more_than, max) # no sentences over max max = 100 expected = [] assert_equal(lg.sentences_more_than(max), expected) # all sentences over max max = 1 actual = len(lg.sentences_more_than(max)) expected = 5 assert_equal(actual, expected) # some sentences over max = 3 actual = len(lg.sentences_more_than(max)) expected = 3 assert_equal(actual, expected) def test_mergenext(): '''Text Cases # Nothing to merge # Out of bounds to index: < 0 # Out of bounds to index: > len(sentences) # Out of bounds index: trying to merge the last element with something else # Merging when there is only one sentence in list # Merging when there are multiple sentences in list # Multiple merges until everything is just one sentence ''' # Nothing to merge text = "" lg = LeGuinCounter(text) before = len(lg.sentences) val = 0 assert_raises(ValueError, lg.merge_next, val) #lg.merge_next(0) #after = len(lg.sentences) #assert_equal(before, after) text = ''' This is a sample text. Do you like it? I hope so. We're having so much trouble getting this program finished. Bye for now!! ''' lg.parse(text) # Out of bounds to index: < 0 val = -100 assert_raises(IndexError, lg.merge_next, val) # Out of bounds to index: > len(sentences) val = 100 assert_raises(IndexError, lg.merge_next, val) # Out of bounds index: trying to merge the last element with something else val = len(lg.sentences) - 1 before = len(lg.sentences) lg.merge_next(val) after = len(lg.sentences) assert_equal(before, after) # Merging when there is only one sentence in list text = "This is a single sentence." lg.parse(text) before = len(lg.sentences) lg.merge_next(0) after = len(lg.sentences) assert_equal(before, after) # Merging when there are multiple sentences in list text = ''' This is a sample text. Do you like it? I hope so. We're having so much trouble getting this program finished. Bye for now!! ''' lg.parse(text) lg.merge_next(1) expected = 4 actual = len(lg.sentences) assert_equal(actual, expected) # Multiple merges until sentences can be merged no more! for n in range(10): lg.merge_next(0) expected = 1 actual = len(lg.sentences) assert_equal(actual, expected) # def test_split_sentence(): # # 1. Normal case, minimal white spacing # # Check first sentence # # Check second sentence # # Check list size # # # Setup # text = "This is a sentence with a footnote.[1] Crazy!" # split_pos = 38 # i = 0 # lg = LeGuinCounter(text) # lg.split_sentence(i, split_pos) # # # # Check first sentence # expected = 'This is a sentence with a footnote.[1]' # result = lg.sentences[i] # assert_equal(result, expected) # # # Check second sentence # expected = ' Crazy!' # result = lg.sentences[i+1] # assert_equal(result, expected) # # # Check size of list # expected = 2 # result = len(lg.sentences) # assert_equal(result, expected) # # # 2. Normal case, complicated whitespacing # text = '''This is a sentence with a footnote.[1] Crazy! It's followed by another.[2] And another.[3] This sentence is free. # Just insane. # Here's one last sentence with a footnote.[3] # This sentence is on a separate line, but still atttached to the previous sentence.''' # # # Check size BEFORE split # lg.parse(text) # expected = 4 # result = len(lg.sentences) # assert_equal(result, expected) # # # Split sentence 0 at split_pos = 38 # split_pos = 38 # i = 0 # lg.split_sentence(i, split_pos) # # # Test whether split worked: check sentences and list size # # Check first sentence # expected = 'This is a sentence with a footnote.[1]' # result = lg.sentences[i] # assert_equal(result, expected) # # # Check second sentence # expected = ' Crazy!' # result = lg.sentences[i+1] # assert_equal(result, expected) # # # Check size of list # expected = 5 # result = len(lg.sentences) # assert_equal(result, expected) # # # Test splitting the last sentence (4) at split pos 45 # i = 4 # split_pos = 45 # lg.split_sentence(i, split_pos) # # # Check first sentence # expected = "\nHere's one last sentence with a footnote.[3]" # result = lg.sentences[i] # assert_equal(result, expected) # # # Check second sentence # expected = ''' # This sentence is on a separate line, but still atttached to the previous sentence.''' # result = lg.sentences[i+1] # assert_equal(result, expected) # # # Check size of list # expected = 6 # result = len(lg.sentences) # assert_equal(result, expected) # # # #Test error conditions # # # Sentence index out of bounds # i = -1 # split_pos = 5 # assert_raises(IndexError, lg.split_sentence, i, split_pos) # # i = 1000 # assert_raises(IndexError, lg.split_sentence, i, split_pos) # # # # split pos out of bounds # i = 5 # split_pos = -5 # assert_raises(IndexError, lg.split_sentence, i, split_pos) # # split_pos = 1000 # assert_raises(IndexError, lg.split_sentence, i, split_pos) # # # # No sentences # lg.sentences = [] # i = 0 # split_pos = 3 # assert_raises(ValueError, lg.split_sentence, i, split_pos) # # # # Test: multiple splits # text = "0.1.2.3.4." # lg.parse(text) # # #Check size BEFORE split # expected = 1 # result = len(lg.sentences) # assert_equal(result, expected) # # # Do multiple splits # split_pos = 2 # for i in range(0, 4): # lg.split_sentence(i, split_pos) # # expected = 5 # result = len(lg.sentences) # assert_equal(result, expected) # # expected = ["0.", "1.", "2.", "3.", "4."] # result = lg.sentences # assert_equal(result, expected) # # # Split when there's nothing to split # text = "1." # lg.parse(text) # i, split_pos = 0, 1 # lg.split_sentence(i, split_pos) # lg.split_sentence(i, 0) # split again # # # Check sentence list length # expected = ['1', '.'] # result = lg.sentences # assert_equal(result, expected) # # # # Split with blank characters # text = "\n\t\r\n123!\n\t\r\n" # lg.parse(text) # i = 0 # split_pos = 4 # lg.split_sentence(i, split_pos) # # # Check sentence list length # expected = 1 # result = len(lg.sentences) # assert_equal(result, expected) # # # N.B. offset in textanalysis ignores leading white spaces of first sentence # # Trailing whitespaces discarded since sentence only take to terminating # # character # expected = "123!" # result = lg.sentences[i] # assert_equal(result, expected) # # # Split at end of sentence # text = "Pizza!" # lg.parse(text) # i, split_pos = 0, 6 # lg.split_sentence(i, split_pos) # # expected = 1 # result = len(lg.sentences) # assert_equal(result, expected) # # expected = "Pizza!" # result = lg.sentences[i] # assert_equal(result, expected) def test_split_sentence(): # 1. Normal case, minimal white spacing # Check first sentence # Check second sentence # Check list size # Setup text = "This is a sentence with a footnote.[1] Crazy!" i = 0 sub = "This is a sentence with a footnote.[1]" lg = LeGuinCounter(text) lg.split_sentence(i, sub) # Check first sentence expected = 'This is a sentence with a footnote.[1]' result = lg.sentences[i] assert_equal(result, expected) # Check second sentence expected = ' Crazy!' result = lg.sentences[i+1] assert_equal(result, expected) # Check size of list expected = 2 result = len(lg.sentences) assert_equal(result, expected) # 2. Normal case, complicated whitespacing text = '''This is a sentence with a footnote.[1] Crazy! It's followed by another.[2] And another.[3] This sentence is free. Just insane. Here's one last sentence with a footnote.[3] This sentence is on a separate line, but still atttached to the previous sentence.''' # Check size BEFORE split lg.parse(text) expected = 4 result = len(lg.sentences) assert_equal(result, expected) # Split sentence 0 at split_pos = 38 sub = 'This is a sentence with a footnote.[1]' i = 0 lg.split_sentence(i, sub) # Test whether split worked: check sentences and list size # Check first sentence expected = 'This is a sentence with a footnote.[1]' result = lg.sentences[i] assert_equal(result, expected) # Check second sentence expected = ' Crazy!' result = lg.sentences[i+1] assert_equal(result, expected) # Check size of list expected = 5 result = len(lg.sentences) assert_equal(result, expected) # Test splitting the last sentence (4) at split pos 45 i = 4 sub = "Here's one last sentence with a footnote.[3]" lg.split_sentence(i, sub) # Check first sentence expected = "\nHere's one last sentence with a footnote.[3]" result = lg.sentences[i] assert_equal(result, expected) # Check second sentence expected = ''' This sentence is on a separate line, but still atttached to the previous sentence.''' result = lg.sentences[i+1] assert_equal(result, expected) # Check size of list expected = 6 result = len(lg.sentences) assert_equal(result, expected) #Test error conditions # Sentence index out of bounds i = -1 sub= "Hello." assert_raises(IndexError, lg.split_sentence, i, sub) i = 1000 assert_raises(IndexError, lg.split_sentence, i, sub) # No sentences lg.sentences = [] i = 0 sub = "Hello" assert_raises(ValueError, lg.split_sentence, i, sub) #Substring empty text = "This is a sentence with a footnote.[1] Crazy!" lg.parse("This is a sentence with a footnote.[1] Crazy!") i = 0 sub = "" expected = ["This is a sentence with a footnote.[1] Crazy!"] result = lg.sentences assert_equal(result, expected) #Substring white characters only lg.sentences = [] i = 0 sub = " \n \r\t \n" assert_raises(ValueError, lg.split_sentence, i, sub) # Test: multiple splits text = "0.1.2.3.4." lg.parse(text) #Check size BEFORE split expected = 1 result = len(lg.sentences) assert_equal(result, expected) # Do multiple splits lg.split_sentence(0, "0.") lg.split_sentence(1, "1.") lg.split_sentence(2, "2.") lg.split_sentence(3, "3.") expected = 5 result = len(lg.sentences) assert_equal(result, expected) expected = ["0.", "1.", "2.", "3.", "4."] result = lg.sentences assert_equal(result, expected) # Split when there's nothing to split lg.split_sentence(2, "2.") expected = ["0.", "1.", "2.", "3.", "4."] result = lg.sentences assert_equal(result, expected) #Split again and again lg.split_sentence(2, "2") lg.split_sentence(2, "2") expected = ["0.", "1.", "2", ".", "3.", "4."] result = lg.sentences assert_equal(result, expected) # Split with blank characters text = "You!\n\t\r\n123!\n\t\r\n" lg.parse(text) i = 1 sub = "123!" lg.split_sentence(i, sub) # Check sentence list length expected = 2 result = len(lg.sentences) assert_equal(result, expected) expected = ["You!", "\n\t\r\n123!"] result = lg.sentences assert_equal(result, expected) #
26.013008
129
0.626641
2,156
15,998
4.570965
0.097866
0.065855
0.0621
0.091324
0.81725
0.788229
0.753628
0.678539
0.610553
0.567529
0
0.018007
0.260595
15,998
614
130
26.055375
0.815115
0.464308
0
0.686695
0
0.004292
0.198418
0.002967
0
0
0
0
0.227468
1
0.030043
false
0
0.017167
0
0.04721
0
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
87f9e852ff052564749065adb94a15868dd21a22
88
py
Python
aadhaar/secure_qr/__init__.py
vishaltanwar96/aadhaar-py
7d3fe865ef1ab9e087699ddf83def332ab701eea
[ "MIT" ]
6
2021-05-12T13:57:46.000Z
2021-12-20T10:59:57.000Z
aadhaar/secure_qr/__init__.py
vishaltanwar96/aadhaar-py
7d3fe865ef1ab9e087699ddf83def332ab701eea
[ "MIT" ]
12
2021-11-27T09:50:34.000Z
2022-03-12T01:04:45.000Z
aadhaar/secure_qr/__init__.py
vishaltanwar96/aadhaar-py
7d3fe865ef1ab9e087699ddf83def332ab701eea
[ "MIT" ]
null
null
null
from aadhaar.secure_qr.extractor import extract_data __all__ = [ "extract_data", ]
14.666667
52
0.75
11
88
5.363636
0.818182
0.372881
0
0
0
0
0
0
0
0
0
0
0.159091
88
5
53
17.6
0.797297
0
0
0
0
0
0.136364
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
354f9d3e53c6ec25711fdb4ba643a2b11227cfcd
5,305
py
Python
vcard/settings.py
hcpthanks/vCard
cc9a301f413961c398c355426013c0cc05fbb1b7
[ "MIT" ]
null
null
null
vcard/settings.py
hcpthanks/vCard
cc9a301f413961c398c355426013c0cc05fbb1b7
[ "MIT" ]
null
null
null
vcard/settings.py
hcpthanks/vCard
cc9a301f413961c398c355426013c0cc05fbb1b7
[ "MIT" ]
null
null
null
""" Django settings for vcard project. Generated by 'django-admin startproject' using Django 2.1.2. For more information on this file, see https://docs.djangoproject.com/en/2.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.1/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'oqc&3h1l*#u&fobkeyua92=-awh4wizv(cp_8srq-t)o=44r3g' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'guestbook', 'tinymce', 'taggit', 'blog', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'vcard.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(BASE_DIR, 'templates') ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'vcard.wsgi.application' # Database # https://docs.djangoproject.com/en/2.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.1/topics/i18n/ LANGUAGE_CODE = 'zh-hans' TIME_ZONE = 'Asia/Shanghai' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.1/howto/static-files/ STATIC_URL = '/static/' #自定义静态资源目录 #指定使用项目根下的 static STATICFILES_DIRS = [ os.path.join(BASE_DIR, 'static'), ] # 自定义媒体根目录 MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media') TINYMCE_DEFAULT_CONFIG = { # // General options 'mode': 'textareas', 'relative_urls': True, 'remove_script_host': False, 'urlconverter_callback': 'customURLConverter', 'theme': "advanced", 'plugins': "pagebreak,style,layer,table,save,advhr,advimage,advlink,emotions,iespell,inlinepopups,insertdatetime,preview,media,searchreplace,print,contextmenu,paste,directionality,fullscreen,noneditable,visualchars,nonbreaking,xhtmlxtras,template,wordcount,advlist,autosave", # // Theme options 'theme_advanced_buttons1': "save,newdocument,|,bold,italic,underline,strikethrough,|,justifyleft,justifycenter,justifyright,justifyfull,styleselect,formatselect,fontselect,fontsizeselect,fullscreen,code", 'theme_advanced_buttons2': "cut,copy,paste,pastetext,|,search,replace,|,bullist,numlist,|,outdent,indent,blockquote,|,undo,redo,|,link,unlink,anchor,image,cleanup,|,insertdate,inserttime,preview,|,forecolor,backcolor", 'theme_advanced_buttons3': "tablecontrols,|,hr,removeformat,visualaid,|,sub,sup,|,charmap,emotions,iespell,media,advhr,|,print,|,ltr,rtl", 'theme_advanced_toolbar_location': "top", 'theme_advanced_toolbar_align': "left", 'theme_advanced_statusbar_location': "bottom", 'theme_advanced_resizing': 'true', # // content_css: "/css/style.css", 'template_external_list_url': "lists/template_list.js", 'external_link_list_url': "lists/link_list.js", 'external_image_list_url': "lists/image_list.js", 'media_external_list_url': "lists/media_list.js", # // Style formats 'style_formats': [ {'title': 'Bold text', 'inline': 'strong'}, {'title': 'Red text', 'inline': 'span', 'styles': {'color': '#ff0000'}}, {'title': 'Help', 'inline': 'strong', 'classes': 'help'}, {'title': 'Table styles'}, {'title': 'Table row 1', 'selector': 'tr', 'classes': 'tablerow'} ], 'width': '700', 'height': '400' }
30.314286
279
0.69689
585
5,305
6.186325
0.466667
0.053882
0.042553
0.048356
0.151423
0.128489
0.073777
0.073777
0.033158
0
0
0.010676
0.152498
5,305
174
280
30.488506
0.794262
0.208671
0
0.028846
1
0.038462
0.631478
0.497361
0
0
0
0
0
1
0
false
0.048077
0.009615
0
0.009615
0.019231
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
355d1cd2b9e3f646e7e1688cab7788f127e5575d
13,899
py
Python
src/typeclasses/migrations/0002_resave_attrs.py
reddcoin-project/ReddConnect
5c212683de6b80b81fd15ed05239c3a1b46c3afd
[ "BSD-3-Clause" ]
2
2019-02-24T00:20:47.000Z
2020-04-24T15:50:31.000Z
src/typeclasses/migrations/0002_resave_attrs.py
reddcoin-project/ReddConnect
5c212683de6b80b81fd15ed05239c3a1b46c3afd
[ "BSD-3-Clause" ]
null
null
null
src/typeclasses/migrations/0002_resave_attrs.py
reddcoin-project/ReddConnect
5c212683de6b80b81fd15ed05239c3a1b46c3afd
[ "BSD-3-Clause" ]
1
2019-01-05T15:51:37.000Z
2019-01-05T15:51:37.000Z
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import DataMigration from django.db import models try: from django.contrib.auth import get_user_model except ImportError: # django < 1.5 from django.contrib.auth.models import User else: User = get_user_model() user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name) user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name) user_ptr_name = '%s_ptr' % User._meta.object_name.lower() class Migration(DataMigration): depends_on = (('server', '0004_store_all_attrs'), ('objects', '0021_auto__del_objattribute'), ('players', '0020_auto__del_playerattribute'), ('scripts', '0013_auto__del_scriptattribute')) no_dry_run=True def forwards(self, orm): "Write your forwards methods here." # Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..." for tmpattr in orm['server.TmpAttribute'].objects.all(): typ = tmpattr.db_obj_type dbid = tmpattr.db_obj_id if typ == 'objectdb': try: dbobj = orm['objects.ObjectDB'].objects.get(id=dbid) except: print "could not find objid %i" % dbid continue elif typ == 'playerdb': try: dbobj = orm['players.PlayerDB'].objects.get(id=dbid) except: print "could not find objid %i" % dbid continue elif typ == 'scriptdb': try: dbobj = orm['scripts.ScriptDB'].objects.get(id=dbid) except: print "could not find objid %i" % dbid continue else: print "Wrong object type to store on: %s" % typ continue dbattr = orm['typeclasses.Attribute'](db_key=tmpattr.db_key, db_value=tmpattr.db_value, db_lock_storage=tmpattr.db_lock_storage, db_date_created=tmpattr.db_date_created) dbattr.save() dbobj.db_attributes.add(dbattr) def backwards(self, orm): "Write your backwards methods here." raise RuntimeError("Cannot revert this migration.") models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, user_model_label: { 'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'server.serverconfig': { 'Meta': {'object_name': 'ServerConfig'}, 'db_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}), 'db_value': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'server.tmpattribute': { 'Meta': {'object_name': 'TmpAttribute'}, 'db_date_created': ('django.db.models.fields.DateTimeField', [], {}), 'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'db_obj_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'db_obj_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}), 'db_value': ('src.utils.picklefield.PickledObjectField', [], {'null': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'typeclasses.attribute': { 'Meta': {'object_name': 'Attribute'}, 'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'db_value': ('src.utils.picklefield.PickledObjectField', [], {'null': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'objects.alias': { 'Meta': {'object_name': 'Alias'}, 'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['objects.ObjectDB']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'objects.objectdb': { 'Meta': {'object_name': 'ObjectDB'}, 'db_attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['typeclasses.Attribute']", 'null': 'True', 'symmetrical': 'False'}), 'db_cmdset_storage': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'db_destination': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'destinations_set'", 'null': 'True', 'to': u"orm['objects.ObjectDB']"}), 'db_home': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'homes_set'", 'null': 'True', 'to': u"orm['objects.ObjectDB']"}), 'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'db_location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations_set'", 'null': 'True', 'to': u"orm['objects.ObjectDB']"}), 'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'db_permissions': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'db_player': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['players.PlayerDB']", 'null': 'True', 'blank': 'True'}), 'db_sessid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'db_typeclass_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'objects.objectnick': { 'Meta': {'unique_together': "(('db_nick', 'db_type', 'db_obj'),)", 'object_name': 'ObjectNick'}, 'db_nick': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['objects.ObjectDB']"}), 'db_real': ('django.db.models.fields.TextField', [], {}), 'db_type': ('django.db.models.fields.CharField', [], {'default': "'inputline'", 'max_length': '16', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'players.playerdb': { 'Meta': {'object_name': 'PlayerDB'}, 'db_attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['typeclasses.Attribute']", 'null': 'True', 'symmetrical': 'False'}), 'db_cmdset_storage': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), 'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'db_is_connected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'db_permissions': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'db_typeclass_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'unique': 'True'}) }, u'players.playernick': { 'Meta': {'unique_together': "(('db_nick', 'db_type', 'db_obj'),)", 'object_name': 'PlayerNick'}, 'db_nick': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['players.PlayerDB']"}), 'db_real': ('django.db.models.fields.TextField', [], {}), 'db_type': ('django.db.models.fields.CharField', [], {'default': "'inputline'", 'max_length': '16', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'scripts.scriptdb': { 'Meta': {'object_name': 'ScriptDB'}, 'db_attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['typeclasses.Attribute']", 'null': 'True', 'symmetrical': 'False'}), 'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'db_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'db_interval': ('django.db.models.fields.IntegerField', [], {'default': '-1'}), 'db_is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['objects.ObjectDB']", 'null': 'True', 'blank': 'True'}), 'db_permissions': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'db_persistent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'db_repeats': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'db_start_delay': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'db_typeclass_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) } } complete_apps = ['server', 'typeclasses', 'objects', 'scripts', 'players'] symmetrical = True
70.19697
188
0.559681
1,489
13,899
5.053056
0.13499
0.093567
0.161882
0.23126
0.717172
0.709862
0.698963
0.667464
0.619086
0.550106
0
0.00992
0.216706
13,899
197
189
70.553299
0.681179
0.00849
0
0.33871
0
0
0.529361
0.275604
0
0
0
0
0
0
null
null
0.005376
0.037634
null
null
0.021505
0
0
0
null
0
0
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
3569ad1085aab98b521635f2be1a6368983cfa79
407
py
Python
setup.py
agronick/aiogear
9e4f2d18f05d91daea9e48de18b2d38f4811589f
[ "MIT" ]
null
null
null
setup.py
agronick/aiogear
9e4f2d18f05d91daea9e48de18b2d38f4811589f
[ "MIT" ]
null
null
null
setup.py
agronick/aiogear
9e4f2d18f05d91daea9e48de18b2d38f4811589f
[ "MIT" ]
null
null
null
from setuptools import setup setup( name='aiogear', version='0.2.4rc1', author='Sinan Nalkaya', author_email='sardok@gmail.com', url='https://github.com/sardok/aiogear', description='Asynchronous gearman protocol based on asyncio', packages=['aiogear'], classifiers=[ 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ] )
25.4375
65
0.638821
45
407
5.755556
0.777778
0.146718
0.19305
0.200772
0
0
0
0
0
0
0
0.024922
0.211302
407
15
66
27.133333
0.781931
0
0
0
0
0
0.501229
0
0
0
0
0
0
1
0
true
0
0.071429
0
0.071429
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
35795d0e39c4aae272a22ceeb958dd05d3b9c8c9
43,391
py
Python
Code_for_Signal_Processing/plot_corr_mx_85_concate_time_linux_v1.6.6.py
puyaraimondii/biometric-classification-of-frequency-following-responses
f5b5dca516592be451a3133acb8fa178519bc991
[ "MIT" ]
1
2021-04-20T14:47:40.000Z
2021-04-20T14:47:40.000Z
Code_for_Signal_Processing/plot_corr_mx_85_concate_time_linux_v1.6.6.py
puyaraimondii/biometric-classification-of-frequency-following-responses
f5b5dca516592be451a3133acb8fa178519bc991
[ "MIT" ]
null
null
null
Code_for_Signal_Processing/plot_corr_mx_85_concate_time_linux_v1.6.6.py
puyaraimondii/biometric-classification-of-frequency-following-responses
f5b5dca516592be451a3133acb8fa178519bc991
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Aug 9 17:02:59 2018 @author: bruce compared with version 1.6.4 the update is from correlation coefficient """ import pandas as pd import numpy as np from scipy import fftpack from scipy import signal import matplotlib.pyplot as plt from matplotlib.colors import LinearSegmentedColormap def correlation_matrix(corr_mx, cm_title): from matplotlib import pyplot as plt from matplotlib import cm as cm fig = plt.figure() ax1 = fig.add_subplot(111) #cmap = cm.get_cmap('jet', 30) cax = ax1.matshow(corr_mx, cmap='gray') #cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap) fig.colorbar(cax) ax1.grid(False) plt.title(cm_title) #plt.title('cross correlation of test and retest') ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25'] xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25'] ax1.set_xticks(np.arange(len(xlabels))) ax1.set_yticks(np.arange(len(ylabels))) ax1.set_xticklabels(xlabels,fontsize=6) ax1.set_yticklabels(ylabels,fontsize=6) # Add colorbar, make sure to specify tick locations to match desired ticklabels #fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1]) # show digit in matrix corr_mx_array = np.asarray(corr_mx) for i in range(22): for j in range(22): c = corr_mx_array[j,i] ax1.text(i, j, round(c,2), va='center', ha='center') plt.show() def correlation_matrix_01(corr_mx, cm_title): # find the maximum in each row # input corr_mx is a dataframe # need to convert it into a array first #otherwise it is not working temp = np.asarray(corr_mx) output = (temp == temp.max(axis=1)[:,None]) # along rows fig = plt.figure() ax1 = fig.add_subplot(111) #cmap = cm.get_cmap('jet', 30) cs = ax1.matshow(output, cmap='gray') #cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap) fig.colorbar(cs) ax1.grid(False) plt.title(cm_title) ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25'] xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25'] ax1.set_xticks(np.arange(len(xlabels))) ax1.set_yticks(np.arange(len(ylabels))) ax1.set_xticklabels(xlabels,fontsize=6) ax1.set_yticklabels(ylabels,fontsize=6) plt.show() def correlation_matrix_rank(corr_mx, cm_title): temp = corr_mx #output = (temp == temp.max(axis=1)[:,None]) # along row output = temp.rank(axis=1, ascending=False) fig, ax1 = plt.subplots() im1 = ax1.matshow(output, cmap=plt.cm.Wistia) #cs = ax1.matshow(output) fig.colorbar(im1) ax1.grid(False) ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25'] xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25'] ax1.set_xticks(np.arange(len(xlabels))) ax1.set_yticks(np.arange(len(ylabels))) ax1.set_xticklabels(xlabels,fontsize=6) ax1.set_yticklabels(ylabels,fontsize=6) plt.title(cm_title) # show digit in matrix output = np.asarray(output) for i in range(22): for j in range(22): c = output[j,i] ax1.text(i, j, int(c), va='center', ha='center') plt.show() def correlation_matrix_comb(corr_mx, cm_title): fig, (ax2, ax3) = plt.subplots(1, 2) ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25'] xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25'] ''' # graph 1 grayscale im1 = ax1.matshow(corr_mx, cmap='gray') # colorbar need numpy version 1.13.1 #fig.colorbar(im1, ax=ax1) ax1.grid(False) ax1.set_title(cm_title) ax1.set_xticks(np.arange(len(xlabels))) ax1.set_yticks(np.arange(len(ylabels))) ax1.set_xticklabels(xlabels,fontsize=6) ax1.set_yticklabels(ylabels,fontsize=6) # show digit in matrix corr_mx_array = np.asarray(corr_mx) for i in range(22): for j in range(22): c = corr_mx_array[j,i] ax1.text(i, j, round(c,2), va='center', ha='center') ''' # graph 2 yellowscale corr_mx_rank = corr_mx.rank(axis=1, ascending=False) cmap_grey = LinearSegmentedColormap.from_list('mycmap', ['white', 'black']) im2 = ax2.matshow(corr_mx, cmap='viridis') # colorbar need numpy version 1.13.1 fig.colorbar(im2, ax=ax2) ax2.grid(False) ax2.set_title(cm_title) ax2.set_xticks(np.arange(len(xlabels))) ax2.set_yticks(np.arange(len(ylabels))) ax2.set_xticklabels(xlabels,fontsize=6) ax2.set_yticklabels(ylabels,fontsize=6) # Add colorbar, make sure to specify tick locations to match desired ticklabels # show digit in matrix corr_mx_rank = np.asarray(corr_mx_rank) for i in range(22): for j in range(22): c = corr_mx_rank[j,i] ax2.text(i, j, int(c), va='center', ha='center') # graph 3 # find the maximum in each row # input corr_mx is a dataframe # need to convert it into a array first #otherwise it is not working temp = np.asarray(corr_mx) output = (temp == temp.max(axis=1)[:,None]) # along rows im3 = ax3.matshow(output, cmap='gray') # colorbar need numpy version 1.13.1 #fig.colorbar(im3, ax=ax3) ax3.grid(False) ax3.set_title(cm_title) ax3.set_xticks(np.arange(len(xlabels))) ax3.set_yticks(np.arange(len(ylabels))) ax3.set_xticklabels(xlabels,fontsize=6) ax3.set_yticklabels(ylabels,fontsize=6) plt.show() def correlation_matrix_tt_01(corr_mx, cm_title): # find the maximum in each row # input corr_mx is a dataframe # need to convert it into a array first #otherwise it is not working temp = np.asarray(corr_mx) output = (temp == temp.max(axis=1)[:,None]) # along rows fig = plt.figure() ax1 = fig.add_subplot(111) #cmap = cm.get_cmap('jet', 30) cax = ax1.matshow(output, cmap='gray') #cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap) fig.colorbar(cax) ax1.grid(False) plt.title(cm_title) ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25'] xlabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25'] ax1.set_xticks(np.arange(len(xlabels))) ax1.set_yticks(np.arange(len(ylabels))) ax1.set_xticklabels(xlabels,fontsize=6) ax1.set_yticklabels(ylabels,fontsize=6) plt.show() def correlation_matrix_rr_01(corr_mx, cm_title): # find the maximum in each row # input corr_mx is a dataframe # need to convert it into a array first #otherwise it is not working temp = np.asarray(corr_mx) output = (temp == temp.max(axis=1)[:,None]) # along rows fig = plt.figure() ax1 = fig.add_subplot(111) #cmap = cm.get_cmap('jet', 30) cax = ax1.matshow(output, cmap='gray') #cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap) fig.colorbar(cax) ax1.grid(False) plt.title(cm_title) ylabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25'] xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25'] ax1.set_xticks(np.arange(len(xlabels))) ax1.set_yticks(np.arange(len(ylabels))) ax1.set_xticklabels(xlabels,fontsize=6) ax1.set_yticklabels(ylabels,fontsize=6) plt.show() # shrink value for correlation matrix # in order to use colormap -> 10 scale def shrink_value_03_1(corr_in1): corr_out1 = corr_in1.copy() # here dataframe.copy() must be used, otherwise input can also be changed when changing output for i in range (22): for j in range(22): if corr_in1.iloc[i, j] < 0.3: corr_out1.iloc[i, j] = 0.3 return corr_out1 def shrink_value_05_1(corr_in2): corr_out2 = corr_in2.copy() # here dataframe.copy() must be used, otherwise input can also be changed when changing output for i2 in range (22): for j2 in range(22): if corr_in2.iloc[i2, j2] < 0.5: corr_out2.iloc[i2, j2] = 0.5 return corr_out2 # not used!!!!!!!!!!!! # normalize the complex signal series def normalize_complex_arr(a): a_oo = a - a.real.min() - 1j*a.imag.min() # origin offsetted return a_oo/np.abs(a_oo).max() def improved_PCC(signal_in): output_corr = pd.DataFrame() for i in range(44): row_pcc_notremovemean = [] for j in range(44): sig_1 = signal_in.iloc[i, :] sig_2 = signal_in.iloc[j, :] pcc_notremovemean = np.abs(np.sum(sig_1 * sig_2) / np.sqrt(np.sum(sig_1*sig_1) * np.sum(sig_2 * sig_2))) row_pcc_notremovemean = np.append(row_pcc_notremovemean, pcc_notremovemean) output_corr = output_corr.append(pd.DataFrame(row_pcc_notremovemean.reshape(1,44)), ignore_index=True) output_corr = output_corr.iloc[22:44, 0:22] return output_corr ############################################################################### # import the pkl file #pkl_file=pd.read_pickle('/Users/bruce/Documents/uOttawa/Project/audio_brainstem_response/Data_BruceSunMaster_Studies/study2/study2DataFrame.pkl') df_EFR=pd.read_pickle('/home/bruce/Dropbox/4.Project/4.Code for Linux/df_EFR.pkl') # Mac # df_EFR=pd.read_pickle('/Users/bruce/Documents/uOttawa/Master‘s Thesis/4.Project/4.Code for Linux/df_EFR.pkl') # remove DC offset df_EFR_detrend = pd.DataFrame() for i in range(1408): # combine next two rows later df_EFR_detrend_data = pd.DataFrame(signal.detrend(df_EFR.iloc[i: i+1, 0:1024], type='constant').reshape(1,1024)) df_EFR_label = pd.DataFrame(df_EFR.iloc[i, 1024:1031].values.reshape(1,7)) df_EFR_detrend = df_EFR_detrend.append(pd.concat([df_EFR_detrend_data, df_EFR_label], axis=1, ignore_index=True)) # set the title of columns df_EFR_detrend.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"]) df_EFR_detrend = df_EFR_detrend.reset_index(drop=True) df_EFR = df_EFR_detrend # Define window function win_kaiser = signal.kaiser(1024, beta=14) win_hamming = signal.hamming(1024) # average the df_EFR df_EFR_avg = pd.DataFrame() df_EFR_avg_win = pd.DataFrame() # average test1 and test2 for i in range(704): # combine next two rows later df_EFR_avg_t = pd.DataFrame(df_EFR.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows # without window function df_EFR_avg_t = pd.DataFrame(df_EFR_avg_t.iloc[0,:].values.reshape(1,1024)) # without window function # implement the window function df_EFR_avg_t_window = pd.DataFrame((df_EFR_avg_t.iloc[0,:] * win_hamming).values.reshape(1,1024)) df_EFR_label = pd.DataFrame(df_EFR.iloc[2*i, 1024:1031].values.reshape(1,7)) df_EFR_avg = df_EFR_avg.append(pd.concat([df_EFR_avg_t, df_EFR_label], axis=1, ignore_index=True)) df_EFR_avg_win = df_EFR_avg_win.append(pd.concat([df_EFR_avg_t_window, df_EFR_label], axis=1, ignore_index=True)) # set the title of columns df_EFR_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"]) df_EFR_avg = df_EFR_avg.sort_values(by=["Condition", "Subject"]) df_EFR_avg = df_EFR_avg.reset_index(drop=True) df_EFR_avg_win.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"]) df_EFR_avg_win = df_EFR_avg_win.sort_values(by=["Condition", "Subject"]) df_EFR_avg_win = df_EFR_avg_win.reset_index(drop=True) # average all the subjects , test and retest and keep one sound levels # filter by 'a vowel and 85Db' df_EFR_avg_sorted = df_EFR_avg.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"]) df_EFR_avg_sorted = df_EFR_avg_sorted.reset_index(drop=True) df_EFR_avg_win_sorted = df_EFR_avg_win.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"]) df_EFR_avg_win_sorted = df_EFR_avg_win_sorted.reset_index(drop=True) # filter55 65 75 sound levels and keep 85dB # keep vowel condition and subject df_EFR_avg_85 = pd.DataFrame(df_EFR_avg_sorted.iloc[528:, :]) df_EFR_avg_85 = df_EFR_avg_85.reset_index(drop=True) df_EFR_avg_win_85 = pd.DataFrame(df_EFR_avg_win_sorted.iloc[528:, :]) df_EFR_avg_win_85 = df_EFR_avg_win_85.reset_index(drop=True) # this part was replaced by upper part based on what I need to do ''' # average all the subjects , test and retest, different sound levels # filter by 'a vowel and 85Db' df_EFR_avg_sorted = df_EFR_avg.sort_values(by=["Vowel","Condition", "Subject", "Sound Level"]) df_EFR_avg_sorted = df_EFR_avg_sorted.reset_index(drop=True) # average sound levels and # keep vowel condition and subject df_EFR_avg_vcs = pd.DataFrame() for i in range(176): # combine next two rows later df_EFR_avg_vcs_t = pd.DataFrame(df_EFR_avg_sorted.iloc[4*i: 4*i+4, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows df_EFR_avg_vcs_label = pd.DataFrame(df_EFR_avg_sorted.iloc[4*i, 1024:1031].values.reshape(1,7)) df_EFR_avg_vcs = df_EFR_avg_vcs.append(pd.concat([df_EFR_avg_vcs_t, df_EFR_avg_vcs_label], axis=1, ignore_index=True), ignore_index=True) # set the title of columns df_EFR_avg_vcs.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"]) #df_EFR_avg_vcs = df_EFR_avg_vcs.sort_values(by=["Condition", "Subject"]) ''' ''' # filter by 'a vowel and 85Db' df_EFR_a_85_test1 = df_EFR[(df_EFR['Vowel'] == 'a vowel') & (df_EFR['Sound Level'] == '85')] df_EFR_a_85_test1 = df_EFR_a_85_test1.reset_index(drop=True) df_EFR_a_85_avg = pd.DataFrame() # average test1 and test2 for i in range(44): df_EFR_a_85_avg_t = pd.DataFrame(df_EFR_a_85_test1.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024)) df_EFR_a_85_label = pd.DataFrame(df_EFR_a_85_test1.iloc[2*i, 1024:1031].values.reshape(1,7)) df_EFR_a_85_avg = df_EFR_a_85_avg.append(pd.concat([df_EFR_a_85_avg_t, df_EFR_a_85_label], axis=1, ignore_index=True)) # set the title of columns df_EFR_a_85_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"]) df_EFR_a_85_avg = df_EFR_a_85_avg.sort_values(by=["Condition", "Subject"]) df_EFR_a_85_avg = df_EFR_a_85_avg.reset_index(drop=True) ''' ################################################## # Frequency Domain # parameters sampling_rate = 9606 # fs # sampling_rate = 9596.623 n = 1024 k = np.arange(n) T = n/sampling_rate # time of signal frq = k/T freq = frq[range(int(n/2))] n2 = 9606 k2 = np.arange(n2) T2 = n2/sampling_rate frq2 = k2/T2 freq2 = frq2[range(int(n2/2))] # zero padding # for df_EFR df_EFR_data = df_EFR.iloc[:, :1024] df_EFR_label = df_EFR.iloc[:, 1024:] df_EFR_mid = pd.DataFrame(np.zeros((1408, 95036))) df_EFR_withzero = pd.concat([df_EFR_data, df_EFR_mid, df_EFR_label], axis=1) # rename columns df_EFR_withzero.columns = np.append(np.arange(96060), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"]) # for df_EFR_avg_85 df_EFR_avg_85_data = df_EFR_avg_85.iloc[:, :1024] df_EFR_avg_85_label = df_EFR_avg_85.iloc[:, 1024:] df_EFR_avg_85_mid = pd.DataFrame(np.zeros((176, 8582))) df_EFR_avg_85_withzero = pd.concat([df_EFR_avg_85_data, df_EFR_avg_85_mid, df_EFR_avg_85_label], axis=1) # rename columns df_EFR_avg_85_withzero.columns = np.append(np.arange(9606), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"]) # df_EFR_avg_win_85 df_EFR_avg_win_85_data = df_EFR_avg_win_85.iloc[:, :1024] df_EFR_avg_win_85_label = df_EFR_avg_win_85.iloc[:, 1024:] df_EFR_avg_win_85_mid = pd.DataFrame(np.zeros((176, 8582))) df_EFR_avg_win_85_withzero = pd.concat([df_EFR_avg_win_85_data, df_EFR_avg_win_85_mid, df_EFR_avg_win_85_label], axis=1) df_EFR_avg_win_85_withzero.columns = np.append(np.arange(9606), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"]) # concatenate AENU temp1 = pd.concat([df_EFR_avg_85.iloc[0:44, 0:1024].reset_index(drop=True),df_EFR_avg_85.iloc[44:88, 0:1024].reset_index(drop=True)], axis=1) temp2 = pd.concat([df_EFR_avg_85.iloc[88:132, 0:1024].reset_index(drop=True), df_EFR_avg_85.iloc[132:176, 0:1024].reset_index(drop=True)], axis=1) df_EFR_avg_85_aenu = pd.concat([temp1, temp2], axis=1, ignore_index=True) df_EFR_avg_85_aenu_withzero = pd.concat([df_EFR_avg_85_aenu, pd.DataFrame(np.zeros((44, 36864)))] , axis=1) ''' # test############## # test(detrend) temp_test = np.asarray(df_EFR_avg_85_data.iloc[0, 0:1024]) temp_test_detrend = signal.detrend(temp_test) plt.figure() plt.subplot(2, 1, 1) plt.plot(temp_test) plt.subplot(2, 1, 2) plt.plot(temp_test_detrend) plt.show() # the raw data is already DC removed # test(zero padding) temp_EFR_1 = df_EFR_withzero.iloc[0, 0:1024] temp_EFR_2= df_EFR_withzero.iloc[0, 0:9606] temp_amplitude_spectrum_1 = np.abs((fftpack.fft(temp_EFR_1)/n)[range(int(n/2))]) temp_amplitude_spectrum_2 = np.abs((fftpack.fft(temp_EFR_2)/n2)[range(int(n2/2))]) plt.figure() plt.subplot(2, 1, 1) markers1 = [11, 21, 32, 43, 53, 64, 75] # which corresponds to 100 200....700Hz in frequency domain plt.plot(temp_amplitude_spectrum_1, '-D', markevery=markers1) plt.xlim(0, 100) plt.title('without zero padding') plt.subplot(2, 1, 2) #markers2 = [100, 200, 300, 400, 500, 600, 700] markers2 = [99, 199, 299, 399, 499, 599, 599] # which corresponds to 100 200....700Hz in frequency domain plt.plot(temp_amplitude_spectrum_2, '-D', markevery=markers2) plt.xlim(0, 1000) # plt.xscale('linear') plt.title('with zero padding') plt.show() # ################# ''' # Calculate the Amplitude Spectrum # create a new dataframe with zero-padding amplitude spectrum ''' # for df_EFR df_as_7= pd.DataFrame() for i in range(1408): temp_EFR = df_EFR_avg_85_withzero.iloc[i, 0:96060] temp_as = np.abs((fftpack.fft(temp_EFR)/n2)[range(int(n2/2))]) #df_as_7 = pd.concat([df_as_7, temp_as_7_t], axis=0) df_as_7 = df_as_7.append(pd.DataFrame(np.array([temp_as[1000], temp_as[2000], temp_as[3000], temp_as[4000], \ temp_as[5000], temp_as[6000], temp_as[7000]]).reshape(1,7)), ignore_index = True) df_as_7 = pd.concat([df_as_7, df_EFR_label], axis=1) # add labels on it # filter by 'a vowel and 85Db' df_as_7_test1 = df_as_7[(df_as_7['Vowel'] == 'a vowel') & (df_as_7['Sound Level'] == '85')] df_as_7_test1 = df_as_7_test1.reset_index(drop=True) ''' # for df_EFR_avg_vcs_withzero df_as_85_no0= pd.DataFrame() df_as_85= pd.DataFrame() df_as7_85= pd.DataFrame() df_as_win_85= pd.DataFrame() df_as7_win_85= pd.DataFrame() for i in range(176): #temp_aenu_EFR = df_EFR_avg_aenu_withzero.iloc[i, 0:9606] temp_as_no0 = np.abs((np.fft.fft(df_EFR_avg_85_data.iloc[i, :]))[range(int(n/2))]) df_as_85_no0 = df_as_85_no0.append(pd.DataFrame(temp_as_no0.reshape(1,512)), ignore_index = True) temp_as = np.abs((np.fft.fft(df_EFR_avg_85_withzero.iloc[i, 0:9606]))[range(int(n2/2))]) df_as_85 = df_as_85.append(pd.DataFrame(temp_as.reshape(1,4803)), ignore_index = True) df_as7_85 = df_as7_85.append(pd.DataFrame(np.array([temp_as[100], temp_as[200], temp_as[300], temp_as[400], \ temp_as[500], temp_as[600], temp_as[700]]).reshape(1,7)), ignore_index = True) temp_as_win = np.abs((np.fft.fft(df_EFR_avg_win_85_withzero.iloc[i, 0:9606]))[range(int(n2/2))]) df_as_win_85 = df_as_win_85.append(pd.DataFrame(temp_as_win.reshape(1,4803)), ignore_index = True) df_as7_win_85 = df_as7_win_85.append(pd.DataFrame(np.array([temp_as_win[100], temp_as_win[200], temp_as_win[300], temp_as_win[400], \ temp_as_win[500], temp_as_win[600], temp_as_win[700]]).reshape(1,7)), ignore_index = True) df_as_85_no0 = pd.concat([df_as_85_no0, df_EFR_avg_85_label], axis=1) # add labels on it df_as_85 = pd.concat([df_as_85, df_EFR_avg_85_label], axis=1) # add labels on it df_as7_85 = pd.concat([df_as7_85, df_EFR_avg_85_label], axis=1) # add labels on it df_as_win_85 = pd.concat([df_as_win_85, df_EFR_avg_win_85_label], axis=1) # add labels on it df_as7_win_85 = pd.concat([df_as7_win_85, df_EFR_avg_win_85_label], axis=1) # add labels on it # wothout zero padding df_as_85_aenu = pd.concat([df_as_85.iloc[0:44, :4803], df_as_85.iloc[44:88, :4803].reset_index(drop=True), df_as_85.iloc[88:132, :4803].reset_index(drop=True), df_as_85.iloc[132:176, :4803].reset_index(drop=True)], axis=1) df_as_85_1300_aenu = pd.concat([df_as_85.iloc[0:44, :1300], df_as_85.iloc[44:88, :1300].reset_index(drop=True), df_as_85.iloc[88:132, :1300].reset_index(drop=True), df_as_85.iloc[132:176, :1300].reset_index(drop=True)], axis=1) df_as_85_no0_1300 = df_as_85_no0.iloc[:, :139] df_as_85_no0_aenu = pd.concat([df_as_85_no0_1300.iloc[0:44, :], df_as_85_no0_1300.iloc[44:88, :].reset_index(drop=True), df_as_85_no0_1300.iloc[88:132, :].reset_index(drop=True), df_as_85_no0_1300.iloc[132:176, :].reset_index(drop=True)], axis=1) df_as7_85_aenu = pd.concat([df_as7_85.iloc[0:44, :7], df_as7_85.iloc[44:88, :7].reset_index(drop=True), df_as7_85.iloc[88:132, :7].reset_index(drop=True), df_as7_85.iloc[132:176, :7].reset_index(drop=True)], axis=1) # for efr_aenu df_aenu_as_85 = pd.DataFrame() df_aenu_as7_85 = pd.DataFrame() for i in range(44): #temp_aenu_EFR = df_EFR_avg_aenu_withzero.iloc[i, 0:9606] temp_as2 = np.abs((fftpack.fft(df_EFR_avg_85_aenu.iloc[i, 0:4096])/4096)[range(int(4096/2))]) df_aenu_as_85 = df_aenu_as_85.append(pd.DataFrame(temp_as2.reshape(1,2048)), ignore_index = True) df_aenu_as7_85 = df_aenu_as7_85.append(pd.DataFrame(np.array([temp_as2[43], temp_as2[85], temp_as2[128], temp_as2[170], \ temp_as2[213], temp_as2[256], temp_as2[298]]).reshape(1,7)), ignore_index = True) #df_aenu_as_85 = pd.concat([df_aenu_as_85, df_EFR_avg_85_label], axis=1) # add labels on it ''' # average test1 and test2 df_as_7_avg = pd.DataFrame() for i in range(44): df_as_7_avg1 = pd.DataFrame(df_as_7_test1.iloc[2*i: 2*i+1, 0:7].mean(axis=0).values.reshape(1,7)) df_as_7_label = pd.DataFrame(df_as_7_test1.iloc[2*i, 7:14].values.reshape(1,7)) df_as_7_avg_t = pd.concat([df_as_7_avg1, df_as_7_label], axis=1, ignore_index=True) df_as_7_avg = df_as_7_avg.append(df_as_7_avg_t) # set the title of columns df_as_7_avg.columns = np.append(np.arange(7), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"]) df_as_7_avg = df_as_7_avg.sort_values(by=["Condition", "Subject"]) df_as_7_avg = df_as_7_avg.reset_index(drop=True) ''' ''' # set a normalized AS df_as_7_avg_data= pd.DataFrame(df_as_7_avg.iloc[:, 0:7].astype(float)) df_as_7_avg_sum= pd.DataFrame(df_as_7_avg.iloc[:, 0:7]).sum(axis=1) df_as_7_avg_label= pd.DataFrame(df_as_7_avg.iloc[:, 7:14]) # normalize df_as_7_avg_norm = df_as_7_avg_data.div(df_as_7_avg_sum, axis=0) # add label df_as_7_avg_norm = pd.concat([df_as_7_avg_norm, df_as_7_avg_label], axis=1, ignore_index=True) ''' # normalization df_EFR_avg_85_aenu_norm = df_EFR_avg_85_aenu.div((df_EFR_avg_85_aenu.iloc[0:4096].abs()**2).sum()) df_aenu_as_85_1300_norm = df_aenu_as_85.iloc[:, :535].div((df_aenu_as_85.iloc[:, :535].abs()**2).sum()/1300) df_as_85_1300_aenu_norm = df_as_85_1300_aenu.div((df_as_85_1300_aenu.abs()**2).sum()/1300) # Calculate correlation # EFR corr_EFR_avg_85_a = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22] corr_EFR_avg_85_e = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22] corr_EFR_avg_85_n = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22] corr_EFR_avg_85_u = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22] corr_EFR_avg_85_aenu = df_EFR_avg_85_aenu.iloc[:, 0:4096].T.corr(method='pearson').iloc[22:44, 0:22] ''' corr_EFR_avg_85_a_t = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22] corr_EFR_avg_85_e_t = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22] corr_EFR_avg_85_n_t = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22] corr_EFR_avg_85_u_t = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22] corr_EFR_avg_85_a_re = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44] corr_EFR_avg_85_e_re = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44] corr_EFR_avg_85_n_re = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44] corr_EFR_avg_85_u_re = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44] ''' # AS corr_as_85_a = df_as_85.iloc[0:44, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22] corr_as_85_e = df_as_85.iloc[44:88, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22] corr_as_85_n = df_as_85.iloc[88:132, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22] corr_as_85_u = df_as_85.iloc[132:176, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22] corr_as_win_85_a = df_as_win_85.iloc[0:44, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22] corr_as_win_85_e = df_as_win_85.iloc[44:88, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22] corr_as_win_85_n = df_as_win_85.iloc[88:132, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22] corr_as_win_85_u = df_as_win_85.iloc[132:176, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22] corr_as_85_aenu = df_aenu_as_85.iloc[0:44, 0:2048].T.corr(method='pearson').iloc[22:44, 0:22] # here we use df_aenu_as_85.iloc[:, 0:535] to limit freq into 0 to 1300Hz corr_as_85_aenu_1300 = df_aenu_as_85.iloc[0:44, 0:535].T.corr(method='pearson').iloc[22:44, 0:22] corr_as_85_no0_aenu = df_as_85_no0_aenu.iloc[0:44, :].T.corr(method='pearson').iloc[22:44, 0:22] corr_as_85_no0_aenu = df_as_85_no0_aenu.iloc[0:44, :].T.corr(method='pearson').iloc[22:44, 0:22] corr_as7_85_aenu = df_as7_85_aenu.iloc[0:44, :].T.corr(method='pearson').iloc[22:44, 0:22] corr_aenu_as7_85 = df_aenu_as7_85.iloc[0:44, :].T.corr(method='pearson').iloc[22:44, 0:22] # calculate the improved PCC matrix corr_as_85_a_v2 = improved_PCC(df_as_85.iloc[0:44, 0:1300]) corr_as_85_e_v2 = improved_PCC(df_as_85.iloc[44:88, 0:1300]) corr_as_85_n_v2 = improved_PCC(df_as_85.iloc[88:132, 0:1300]) corr_as_85_u_v2 = improved_PCC(df_as_85.iloc[132:176, 0:1300]) corr_as_85_1300_aenu = improved_PCC(df_as_85_1300_aenu) # df_EFR + df_aenu_AS_1300 df_aenu_sum_85 = pd.concat([df_EFR_avg_85_aenu, df_aenu_as_85.iloc[:, :535]], axis=1) # df_aenu_sum_85 = pd.concat([df_EFR_avg_85_aenu_norm, df_aenu_as_85_1300_norm], axis=1) corr_sum_85_aenu = df_aenu_sum_85.iloc[0:44, 0:].T.corr(method='pearson').iloc[22:44, 0:22] # df_EFR + df_aenu_no0_as df_aenu_sum_85_v2 = pd.concat([df_EFR_avg_85_aenu, df_as_85_no0_aenu], axis=1) corr_sum_85_aenu_v2 = df_aenu_sum_85_v2.iloc[0:44, 0:].T.corr(method='pearson').iloc[22:44, 0:22] # concatenate df_EFR and df_as_85_1300_aenu df_aenu_sum_85_v3 = pd.concat([df_EFR_avg_85_aenu, df_as_85_1300_aenu], axis=1) # df_aenu_sum_85_v3 = pd.concat([df_EFR_avg_85_aenu_norm, df_as_85_1300_aenu_norm], axis=1) corr_sum_85_aenu_v3 = df_aenu_sum_85_v3.iloc[0:44, 0:].T.corr(method='pearson').iloc[22:44, 0:22] # improved PCC (not remove mean for as) # test for do not removing the mean of PCC corr_sum_85_aenu_v4 = pd.DataFrame() signal_in = df_aenu_sum_85_v3 for i in range(44): row_pcc_notremovemean = [] row_pcc = [] for j in range(44): sig_1 = signal_in.iloc[i, :].reset_index(drop=True) sig_2 = signal_in.iloc[j, :].reset_index(drop=True) sig_1_remove_mean = (sig_1 - sig_1.mean()).reset_index(drop=True) sig_2_remove_mean = (sig_2 - sig_2.mean()).reset_index(drop=True) # here EFR remove the mean but AS not # then normalize the energy of EFR and AS sig_1_p1 = sig_1_remove_mean.iloc[0:4096].div((sig_1_remove_mean.iloc[0:4096].abs()**2).sum()) sig_1_p2 = sig_1.iloc[4096:].div((sig_1.iloc[4096:].abs()**2).sum()/1300) sig_1_new = pd.concat([sig_1_p1, sig_1_p2]) sig_2_p1 = sig_2_remove_mean.iloc[0:4096].div((sig_2_remove_mean.iloc[0:4096].abs()**2).sum()) sig_2_p2 = sig_2.iloc[4096:].div((sig_2.iloc[4096:].abs()**2).sum()/1300) sig_2_new = pd.concat([sig_2_p1, sig_2_p2]) #sig_1_new = pd.concat([sig_1_remove_mean.iloc[0:4096], sig_1.iloc[4096:]]) #sig_2_new = pd.concat([sig_2_remove_mean.iloc[0:4096], sig_2.iloc[4096:]]) ''' pcc_notremovemean = np.abs(np.sum(sig_1 * sig_2) / np.sqrt(np.sum(sig_1*sig_1) * np.sum(sig_2 * sig_2))) pcc = np.abs(np.sum(sig_1_remove_mean * sig_2_remove_mean) / np.sqrt(np.sum(sig_1_remove_mean*sig_1_remove_mean) * np.sum(sig_2_remove_mean * sig_2_remove_mean))) ''' pcc_notremovemean = np.abs(np.sum(sig_1_new * sig_2_new) / np.sqrt(np.sum(sig_1_new*sig_1_new) * np.sum(sig_2_new * sig_2_new))) row_pcc_notremovemean = np.append(row_pcc_notremovemean, pcc_notremovemean) # row_pcc = np.append(row_pcc, pcc) # example if i==4 & j==5: plt.figure(1) ax1 = plt.subplot(211) ax1.plot(sig_1) ax1.plot(sig_2) ax2 = plt.subplot(212) ax2.plot(sig_1_remove_mean) ax2.plot(sig_2_remove_mean) ax1.set_title("original signal, norm corr = %.3f" % pcc_notremovemean) ax2.set_title("signal with mean removed(PCC), norm corr = %.3f" % pcc) plt.tight_layout() ax1.grid(True) ax2.grid(True) plt.show() corr_sum_85_aenu_v4 = corr_sum_85_aenu_v4.append(pd.DataFrame(row_pcc_notremovemean.reshape(1,44)), ignore_index=True) corr_sum_85_aenu_v4 = corr_sum_85_aenu_v4.iloc[22:44, 0:22] ''' corr_as_85_a_t = df_as_85.iloc[0:44, 0:48030].T.corr(method='pearson').iloc[0:22, 0:22] corr_as_85_e_t = df_as_85.iloc[44:88, 0:48030].T.corr(method='pearson').iloc[0:22, 0:22] corr_as_85_n_t = df_as_85.iloc[88:132, 0:48030].T.corr(method='pearson').iloc[0:22, 0:22] corr_as_85_u_t = df_as_85.iloc[132:176, 0:48030].T.corr(method='pearson').iloc[0:22, 0:22] corr_as_85_a_re = df_as_85.iloc[0:44, 0:48030].T.corr(method='pearson').iloc[22:44, 22:44] corr_as_85_e_re = df_as_85.iloc[44:88, 0:48030].T.corr(method='pearson').iloc[22:44, 22:44] corr_as_85_n_re = df_as_85.iloc[88:132, 0:48030].T.corr(method='pearson').iloc[22:44, 22:44] corr_as_85_u_re = df_as_85.iloc[132:176, 0:48030].T.corr(method='pearson').iloc[22:44, 22:44] ''' #AS7 corr_as7_85_a = df_as7_85.iloc[0:44, 0:7].T.corr(method='pearson').iloc[22:44, 0:22] corr_as7_85_e = df_as7_85.iloc[44:88, 0:7].T.corr(method='pearson').iloc[22:44, 0:22] corr_as7_85_n = df_as7_85.iloc[88:132, 0:7].T.corr(method='pearson').iloc[22:44, 0:22] corr_as7_85_u = df_as7_85.iloc[132:176, 0:7].T.corr(method='pearson').iloc[22:44, 0:22] ''' corr_as7_85_a_t = df_as7_85.iloc[0:44, 0:7].T.corr(method='pearson').iloc[0:22, 0:22] corr_as7_85_e_t = df_as7_85.iloc[44:88, 0:7].T.corr(method='pearson').iloc[0:22, 0:22] corr_as7_85_n_t = df_as7_85.iloc[88:132, 0:7].T.corr(method='pearson').iloc[0:22, 0:22] corr_as7_85_u_t = df_as7_85.iloc[132:176, 0:7].T.corr(method='pearson').iloc[0:22, 0:22] corr_as7_85_a_re = df_as7_85.iloc[0:44, 0:7].T.corr(method='pearson').iloc[22:44, 22:44] corr_as7_85_e_re = df_as7_85.iloc[44:88, 0:7].T.corr(method='pearson').iloc[22:44, 22:44] corr_as7_85_n_re = df_as7_85.iloc[88:132, 0:7].T.corr(method='pearson').iloc[22:44, 22:44] corr_as7_85_u_re = df_as7_85.iloc[132:176, 0:7].T.corr(method='pearson').iloc[22:44, 22:44] ''' # shrink # shrink the correlation range from 0.3 to 1 # EFR ''' corr_EFR_avg_85_a_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_a) corr_EFR_avg_85_e_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_e) corr_EFR_avg_85_n_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_n) corr_EFR_avg_85_u_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_u) ''' corr_EFR_avg_85_aenu_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_aenu) # AS ''' corr_as_win_85_a_shrink_03_1 = shrink_value_03_1(corr_as_win_85_a) corr_as_win_85_e_shrink_03_1 = shrink_value_03_1(corr_as_win_85_e) corr_as_win_85_n_shrink_03_1 = shrink_value_03_1(corr_as_win_85_n) corr_as_win_85_u_shrink_03_1 = shrink_value_03_1(corr_as_win_85_u) ''' corr_as_85_aenu_shrink_03_1 = shrink_value_03_1(corr_as_85_aenu) # shrink the correlation range from 0.5 to 1 # EFR corr_EFR_avg_85_aenu_shrink_05_1 = shrink_value_05_1(corr_EFR_avg_85_aenu) # AS corr_as_85_aenu_shrink_05_1 = shrink_value_05_1(corr_as_85_aenu) # test # sum of time and frequency corelation matrix corr_sum_avg_85_aenu = (corr_EFR_avg_85_aenu + corr_as_85_aenu_1300).copy() corr_sum_avg_85_aenu_v2 = (corr_EFR_avg_85_aenu + corr_as_85_no0_aenu).copy() #corr_sum_avg_85_aenu = (corr_EFR_avg_85_aenu + corr_as_85_aenu).copy() # max of time and frequency corelation matrix # corr_max_avg_85_aenu = (corr_EFR_avg_85_aenu ? corr_as_85_aenu).copy() # plot the figure ''' # Correlation Matrix # EFR correlation_matrix(corr_EFR_avg_85_a, 'cross correlation of 85dB a_vowel in time domain') correlation_matrix(corr_EFR_avg_85_e, 'cross correlation of 85dB e_vowel in time domain') correlation_matrix(corr_EFR_avg_85_n, 'cross correlation of 85dB n_vowel in time domain') correlation_matrix(corr_EFR_avg_85_u, 'cross correlation of 85dB u_vowel in time domain') # AS correlation_matrix(corr_as_85_a, 'cross correlation of 85dB a_vowel in frequency domain') correlation_matrix(corr_as_85_e, 'cross correlation of 85dB e_vowel in frequency domain') correlation_matrix(corr_as_85_n, 'cross correlation of 85dB n_vowel in frequency domain') correlation_matrix(corr_as_85_u, 'cross correlation of 85dB u_vowel in frequency domain') # AS7 correlation_matrix(corr_as7_85_a, 'cross correlation of 85dB a_vowel in frequency domain 7') correlation_matrix(corr_as7_85_e, 'cross correlation of 85dB e_vowel in frequency domain 7') correlation_matrix(corr_as7_85_n, 'cross correlation of 85dB n_vowel in frequency domain 7') correlation_matrix(corr_as7_85_u, 'cross correlation of 85dB u_vowel in frequency domain 7') # Correlation Matrix witn 0 and 1 # EFR correlation_matrix_01(corr_EFR_avg_85_a, 'cross correlation of 85dB a_vowel in time domain') #correlation_matrix_tt_01(corr_EFR_avg_85_a_t, 'cross correlation of 85dB a_vowel in time domain') #correlation_matrix_rr_01(corr_EFR_avg_85_a_re, 'cross correlation of 85dB a_vowel in time domain') correlation_matrix_01(corr_EFR_avg_85_e, 'cross correlation of 85dB e_vowel in time domain') #correlation_matrix_tt_01(corr_EFR_avg_85_e_t, 'cross correlation of 85dB e_vowel in time domain') #correlation_matrix_rr_01(corr_EFR_avg_85_e_re, 'cross correlation of 85dB e_vowel in time domain') correlation_matrix_01(corr_EFR_avg_85_n, 'cross correlation of 85dB n_vowel in time domain') #correlation_matrix_tt_01(corr_EFR_avg_85_n_t, 'cross correlation of 85dB n_vowel in time domain') #correlation_matrix_rr_01(corr_EFR_avg_85_n_re, 'cross correlation of 85dB n_vowel in time domain') correlation_matrix_01(corr_EFR_avg_85_u, 'cross correlation of 85dB u_vowel in time domain') #correlation_matrix_tt_01(corr_EFR_avg_85_u_t, 'cross correlation of 85dB u_vowel in time domain') #correlation_matrix_rr_01(corr_EFR_avg_85_u_re, 'cross correlation of 85dB u_vowel in time domain') # Amplitude Spectrum correlation_matrix_01(corr_as_85_a, 'cross correlation of 85dB a_vowel in frequency domain') #correlation_matrix_tt_01(corr_as_85_a_t, 'cross correlation of 85dB a_vowel in frequency domain') #correlation_matrix_rr_01(corr_as_85_a_re, 'cross correlation of 85dB a_vowel in frequency domain') correlation_matrix_01(corr_as_85_e, 'cross correlation of 85dB e_vowel in frequency domain') #correlation_matrix_tt_01(corr_as_85_e_t, 'cross correlation of 85dB e_vowel in frequency domain') #correlation_matrix_rr_01(corr_as_85_e_re, 'cross correlation of 85dB e_vowel in frequency domain') correlation_matrix_01(corr_as_85_n, 'cross correlation of 85dB n_vowel in frequency domain') #correlation_matrix_tt_01(corr_as_85_n_t, 'cross correlation of 85dB n_vowel in frequency domain') #correlation_matrix_rr_01(corr_as_85_n_re, 'cross correlation of 85dB n_vowel in frequency domain') correlation_matrix_01(corr_as_85_u, 'cross correlation of 85dB u_vowel in frequency domain') #correlation_matrix_tt_01(corr_as_85_u_t, 'cross correlation of 85dB u_vowel in frequency domain') #correlation_matrix_rr_01(corr_as_85_u_re, 'cross correlation of 85dB u_vowel in frequency domain') # Amplitude Spectrum 7 points correlation_matrix_01(corr_as7_85_a, 'cross correlation of 85dB a_vowel in frequency domain 7') #correlation_matrix_tt_01(corr_as7_85_a_t, 'cross correlation of 85dB a_vowel in frequency domain 7') #correlation_matrix_rr_01(corr_as7_85_a_re, 'cross correlation of 85dB a_vowel in frequency domain 7') correlation_matrix_01(corr_as7_85_e, 'cross correlation of 85dB e_vowel in frequency domain 7') #correlation_matrix_tt_01(corr_as7_85_e_t, 'cross correlation of 85dB e_vowel in frequency domain 7') #correlation_matrix_rr_01(corr_as7_85_e_re, 'cross correlation of 85dB e_vowel in frequency domain 7') correlation_matrix_01(corr_as7_85_n, 'cross correlation of 85dB n_vowel in frequency domain 7') #correlation_matrix_tt_01(corr_as7_85_n_t, 'cross correlation of 85dB n_vowel in frequency domain 7') #correlation_matrix_rr_01(corr_as7_85_n_re, 'cross correlation of 85dB n_vowel in frequency domain 7') correlation_matrix_01(corr_as7_85_u, 'cross correlation of 85dB u_vowel in frequency domain 7') #correlation_matrix_tt_01(corr_as7_85_u_t, 'cross correlation of 85dB u_vowel in frequency domain 7') #correlation_matrix_rr_01(corr_as7_85_u_re, 'cross correlation of 85dB u_vowel in frequency domain 7') ''' # Correlation Matrix_both # EFR ''' correlation_matrix_comb(corr_EFR_avg_85_a, 'cross correlation of 85dB a_vowel in time domain') correlation_matrix_comb(corr_EFR_avg_85_e, 'cross correlation of 85dB e_vowel in time domain') correlation_matrix_comb(corr_EFR_avg_85_n, 'cross correlation of 85dB n_vowel in time domain') correlation_matrix_comb(corr_EFR_avg_85_u, 'cross correlation of 85dB u_vowel in time domain') ''' correlation_matrix_comb(corr_EFR_avg_85_aenu, 'cross correlation of 85dB aenu in time domain') correlation_matrix_comb(corr_EFR_avg_85_aenu_shrink_03_1, 'cross correlation of shrinked(0.3, 1) 85dB aenu in time domain') correlation_matrix_comb(corr_EFR_avg_85_aenu_shrink_05_1, 'cross correlation of shrinked(0.5, 1) 85dB aenu in time domain') # AS ''' correlation_matrix_comb(corr_as_85_a, 'cross correlation of 85dB a_vowel in frequency domain') correlation_matrix_comb(corr_as_85_e, 'cross correlation of 85dB e_vowel in frequency domain') correlation_matrix_comb(corr_as_85_n, 'cross correlation of 85dB n_vowel in frequency domain') correlation_matrix_comb(corr_as_85_u, 'cross correlation of 85dB u_vowel in frequency domain') ''' correlation_matrix_comb(corr_as_85_a_v2, 'cross correlation of 85dB a_vowel in frequency domain (improved PCC)') correlation_matrix_comb(corr_as_85_e_v2, 'cross correlation of 85dB e_vowel in frequency domain (improved PCC)') correlation_matrix_comb(corr_as_85_n_v2, 'cross correlation of 85dB n_vowel in frequency domain (improved PCC)') correlation_matrix_comb(corr_as_85_u_v2, 'cross correlation of 85dB u_vowel in frequency domain (improved PCC)') ''' correlation_matrix_comb(corr_as_win_85_a, 'cross correlation of 85dB a_vowel in frequency domain(hamming)') correlation_matrix_comb(corr_as_win_85_e, 'cross correlation of 85dB e_vowel in frequency domain(hamming)') correlation_matrix_comb(corr_as_win_85_n, 'cross correlation of 85dB n_vowel in frequency domain(hamming)') correlation_matrix_comb(corr_as_win_85_u, 'cross correlation of 85dB u_vowel in frequency domain(hamming)') ''' # no zero-padding correlation_matrix_comb(corr_as_85_no0_aenu, 'cross correlation of 85dB aenu in frequency domain(no zero padding)') # aenu -> as correlation_matrix_comb(corr_as_85_aenu, 'cross correlation of 85dB aenu in frequency domain') correlation_matrix_comb(corr_as_85_aenu_shrink_03_1, 'cross correlation of shrinked(0.3, 1) 85dB aenu in frequency domain') correlation_matrix_comb(corr_as_85_aenu_shrink_05_1, 'cross correlation of shrinked(0.5, 1) 85dB aenu in frequency domain') # zero padding -> as -> 0-1300Hz -> aenu # pcc do not remove mean correlation_matrix_comb(corr_as_85_1300_aenu, 'cross correlation of 85dB aenu in frequency domain(version2, improved PCC)') # AS7 ''' correlation_matrix_comb(corr_as7_85_a, 'cross correlation of 85dB a_vowel in frequency domain 7') correlation_matrix_comb(corr_as7_85_e, 'cross correlation of 85dB e_vowel in frequency domain 7') correlation_matrix_comb(corr_as7_85_n, 'cross correlation of 85dB n_vowel in frequency domain 7') correlation_matrix_comb(corr_as7_85_u, 'cross correlation of 85dB u_vowel in frequency domain 7') ''' correlation_matrix_comb(corr_as7_85_aenu, 'cross correlation of 85dB aenu in frequency domain 7(as7_aenu)') correlation_matrix_comb(corr_aenu_as7_85, 'cross correlation of 85dB aenu in frequency domain 7(aenu_as7)') # sum of EFR and AS # corr_EFR + corr_AS correlation_matrix_comb(corr_sum_avg_85_aenu, 'cross correlation of sum 85dB aenu in time and freq domain') correlation_matrix_comb(corr_sum_avg_85_aenu_v2, 'cross correlation of sum 85dB aenu in time and freq domain(version2)') # concat df_EFR + df_aenu_as 4096+535 correlation_matrix_comb(corr_sum_85_aenu, 'cross correlation of sum 85dB aenu in time and freq domain') # concat df_EFR + df_as_aenu 4096+5200 correlation_matrix_comb(corr_sum_85_aenu_v3, 'cross correlation of sum 85dB aenu in time and freq domain(version3)') # improved PCC correlation_matrix_comb(corr_sum_85_aenu_v4, 'cross correlation of sum 85dB aenu in time and freq domain (improved PCC)') # test corr_sum_85_aenu_v4.style.background_gradient(cmap='coolwarm')
45.483229
151
0.711691
7,881
43,391
3.600685
0.063444
0.032421
0.03383
0.05737
0.839729
0.781689
0.72002
0.660183
0.611093
0.56796
0
0.104134
0.141527
43,391
954
152
45.483229
0.657664
0.108963
0
0.24234
0
0
0.115383
0.001543
0
0
0
0
0
1
0.027855
false
0
0.022284
0
0.061281
0
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
3579a60a21ebd74c9b69a1c76b463cbf6758ff39
224
py
Python
analyticlab/measure/__init__.py
xingrongtech/analyticlab
2827591db9b31ff38299712ed6c404ff30583f6f
[ "MIT" ]
13
2018-05-11T02:45:11.000Z
2021-07-17T22:19:04.000Z
analyticlab/measure/__init__.py
xingrongtech/analyticlab
2827591db9b31ff38299712ed6c404ff30583f6f
[ "MIT" ]
null
null
null
analyticlab/measure/__init__.py
xingrongtech/analyticlab
2827591db9b31ff38299712ed6c404ff30583f6f
[ "MIT" ]
2
2019-10-17T11:43:11.000Z
2019-11-27T10:54:28.000Z
# -*- coding: utf-8 -*- """ Created on Sun Feb 18 09:25:00 2018 @author: xingrongtech """ from . import ins, std, ACategory, BCategory from .basemeasure import BaseMeasure from .measure import Measure from .ins import Ins
18.666667
44
0.71875
32
224
5.03125
0.6875
0.111801
0
0
0
0
0
0
0
0
0
0.069519
0.165179
224
11
45
20.363636
0.791444
0.361607
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
3
357edbf894cb150923e3c3c0472792341b9e7b4c
208
py
Python
topCoder/srms/200s/srm277/div2/sandwich_bar.py
ferhatelmas/algo
a7149c7a605708bc01a5cd30bf5455644cefd04d
[ "WTFPL" ]
25
2015-01-21T16:39:18.000Z
2021-05-24T07:01:24.000Z
topCoder/srms/200s/srm277/div2/sandwich_bar.py
gauravsingh58/algo
397859a53429e7a585e5f6964ad24146c6261326
[ "WTFPL" ]
2
2020-09-30T19:39:36.000Z
2020-10-01T17:15:16.000Z
topCoder/srms/200s/srm277/div2/sandwich_bar.py
ferhatelmas/algo
a7149c7a605708bc01a5cd30bf5455644cefd04d
[ "WTFPL" ]
15
2015-01-21T16:39:27.000Z
2020-10-01T17:00:22.000Z
class SandwichBar: def whichOrder(self, available, orders): for i, o in enumerate(orders): if all(map(lambda e: e in available, o.split())): return i return -1
29.714286
61
0.5625
27
208
4.333333
0.740741
0
0
0
0
0
0
0
0
0
0
0.007246
0.336538
208
6
62
34.666667
0.84058
0
0
0
0
0
0
0
0
0
0
0
0
1
0.166667
false
0
0
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
3580c0dd3f79543bbbc30b6fb81f79c1da8e9f30
187
py
Python
socialinsecurity.py
OmeletteDuFromageDat250/Protect-Magnus-Book
503dc78f46a33d7490383c646991edbfd690942e
[ "MIT" ]
null
null
null
socialinsecurity.py
OmeletteDuFromageDat250/Protect-Magnus-Book
503dc78f46a33d7490383c646991edbfd690942e
[ "MIT" ]
null
null
null
socialinsecurity.py
OmeletteDuFromageDat250/Protect-Magnus-Book
503dc78f46a33d7490383c646991edbfd690942e
[ "MIT" ]
1
2020-02-10T22:14:38.000Z
2020-02-10T22:14:38.000Z
# configured as the entry point of the app, simply imports app to start application, just run 'flask run' to start from app import app app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
46.75
114
0.759358
32
187
4.375
0.71875
0.1
0
0
0
0
0
0
0
0
0
0.064516
0.171123
187
4
115
46.75
0.83871
0.59893
0
0
0
0
0.243243
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
358c6e2e2306e328879feed2952a8562bec7ccc4
25,391
py
Python
server.py
mitmedialab/MediaCloud-Discover
5c7dd2f2bfe2b9dbb79d52fc5500987cba63e038
[ "MIT" ]
null
null
null
server.py
mitmedialab/MediaCloud-Discover
5c7dd2f2bfe2b9dbb79d52fc5500987cba63e038
[ "MIT" ]
3
2018-04-20T18:19:15.000Z
2018-04-20T18:35:51.000Z
server.py
mitmedialab/MediaCloud-Discover
5c7dd2f2bfe2b9dbb79d52fc5500987cba63e038
[ "MIT" ]
null
null
null
from flask import render_template from flask import Flask from flask_cache import Cache from flask import jsonify from os import environ import os import logging.config import datetime import mediacloud import json import random # All country entity data data = {} mc_admin = None app = Flask(__name__) api_key = environ.get('MC_API_KEY') base_dir = os.path.dirname(os.path.abspath(__file__)) # setup logging with open(os.path.join(base_dir, 'server-logging.json'), 'r') as f: logging_config = json.load(f) logging_config['handlers']['file']['filename'] = os.path.join(base_dir, logging_config['handlers']['file']['filename']) logging.config.dictConfig(logging_config) logger = logging.getLogger(__name__) logger.info("---------------------------------------------------------------------------") # https://pythonhosted.org/Flask-Cache/ # Flask-Cache Filesystem Mode Parameters: # CACHE_DEFAULT_TIMEOUT # CACHE_DIR # CACHE_THRESHOLD # CACHE_ARGS # CACHE_OPTIONS cache = Cache(app, config={'CACHE_TYPE': 'filesystem', 'CACHE_DIR': './cache', 'CACHE_DEFAULT_TIMEOUT': '28800'}) # 8 hour cache COUNTRY_GEONAMES_ID_TO_APLHA3 = {3041565:"AND",290557:"ARE",1149361:"AFG",3576396:"ATG",3573511:"AIA",783754:"ALB",174982:"ARM",3351879:"AGO",6697173:"ATA",3865483:"ARG",5880801:"ASM",2782113:"AUT",2077456:"AUS",3577279:"ABW",661882:"ALA",587116:"AZE",3277605:"BIH",3374084:"BRB",1210997:"BGD",2802361:"BEL",2361809:"BFA",732800:"BGR",290291:"BHR",433561:"BDI",2395170:"BEN",3578476:"BLM",3573345:"BMU",1820814:"BRN",3923057:"BOL",7626844:"BES",3469034:"BRA",3572887:"BHS",1252634:"BTN",3371123:"BVT",933860:"BWA",630336:"BLR",3582678:"BLZ",6251999:"CAN",1547376:"CCK",203312:"COD",239880:"CAF",2260494:"COG",2658434:"CHE",2287781:"CIV",1899402:"COK",3895114:"CHL",2233387:"CMR",1814991:"CHN",3686110:"COL",3624060:"CRI",3562981:"CUB",3374766:"CPV",7626836:"CUW",2078138:"CXR",146669:"CYP",3077311:"CZE",2921044:"DEU",223816:"DJI",2623032:"DNK",3575830:"DMA",3508796:"DOM",2589581:"DZA",3658394:"ECU",453733:"EST",357994:"EGY",2461445:"ESH",338010:"ERI",2510769:"ESP",337996:"ETH",660013:"FIN",2205218:"FJI",3474414:"FLK",2081918:"FSM",2622320:"FRO",3017382:"FRA",2400553:"GAB",2635167:"GBR",3580239:"GRD",614540:"GEO",3381670:"GUF",3042362:"GGY",2300660:"GHA",2411586:"GIB",3425505:"GRL",2413451:"GMB",2420477:"GIN",3579143:"GLP",2309096:"GNQ",390903:"GRC",3474415:"SGS",3595528:"GTM",4043988:"GUM",2372248:"GNB",3378535:"GUY",1819730:"HKG",1547314:"HMD",3608932:"HND",3202326:"HRV",3723988:"HTI",719819:"HUN",1643084:"IDN",2963597:"IRL",294640:"ISR",3042225:"IMN",1269750:"IND",1282588:"IOT",99237:"IRQ",130758:"IRN",2629691:"ISL",3175395:"ITA",3042142:"JEY",3489940:"JAM",248816:"JOR",1861060:"JPN",192950:"KEN",1527747:"KGZ",1831722:"KHM",4030945:"KIR",921929:"COM",3575174:"KNA",1873107:"PRK",1835841:"KOR",831053:"XKX",285570:"KWT",3580718:"CYM",1522867:"KAZ",1655842:"LAO",272103:"LBN",3576468:"LCA",3042058:"LIE",1227603:"LKA",2275384:"LBR",932692:"LSO",597427:"LTU",2960313:"LUX",458258:"LVA",2215636:"LBY",2542007:"MAR",2993457:"MCO",617790:"MDA",3194884:"MNE",3578421:"MAF",1062947:"MDG",2080185:"MHL",718075:"MKD",2453866:"MLI",1327865:"MMR",2029969:"MNG",1821275:"MAC",4041468:"MNP",3570311:"MTQ",2378080:"MRT",3578097:"MSR",2562770:"MLT",934292:"MUS",1282028:"MDV",927384:"MWI",3996063:"MEX",1733045:"MYS",1036973:"MOZ",3355338:"NAM",2139685:"NCL",2440476:"NER",2155115:"NFK",2328926:"NGA",3617476:"NIC",2750405:"NLD",3144096:"NOR",1282988:"NPL",2110425:"NRU",4036232:"NIU",2186224:"NZL",286963:"OMN",3703430:"PAN",3932488:"PER",4030656:"PYF",2088628:"PNG",1694008:"PHL",1168579:"PAK",798544:"POL",3424932:"SPM",4030699:"PCN",4566966:"PRI",6254930:"PSE",2264397:"PRT",1559582:"PLW",3437598:"PRY",289688:"QAT",935317:"REU",798549:"ROU",6290252:"SRB",2017370:"RUS",49518:"RWA",102358:"SAU",2103350:"SLB",241170:"SYC",366755:"SDN",7909807:"SSD",2661886:"SWE",1880251:"SGP",3370751:"SHN",3190538:"SVN",607072:"SJM",3057568:"SVK",2403846:"SLE",3168068:"SMR",2245662:"SEN",51537:"SOM",3382998:"SUR",2410758:"STP",3585968:"SLV",7609695:"SXM",163843:"SYR",934841:"SWZ",3576916:"TCA",2434508:"TCD",1546748:"ATF",2363686:"TGO",1605651:"THA",1220409:"TJK",4031074:"TKL",1966436:"TLS",1218197:"TKM",2464461:"TUN",4032283:"TON",298795:"TUR",3573591:"TTO",2110297:"TUV",1668284:"TWN",149590:"TZA",690791:"UKR",226074:"UGA",5854968:"UMI",6252001:"USA",3439705:"URY",1512440:"UZB",3164670:"VAT",3577815:"VCT",3625428:"VEN",3577718:"VGB",4796775:"VIR",1562822:"VNM",2134431:"VUT",4034749:"WLF",4034894:"WSM",69543:"YEM",1024031:"MYT",953987:"ZAF",895949:"ZMB",878675:"ZWE"} COUNTRY_ALPHA_TO_LAT_LONG = {'XKX': {'lat': 42.60, 'long': 20.9 }, 'SSD': {'lat': 7.265, 'long': 30.054}, 'DZA': {'lat': 28.0, 'long': 3.0}, 'AGO': {'lat': -12.5, 'long': 18.5}, 'EGY': {'lat': 27.0, 'long': 30.0}, 'BGD': {'lat': 24.0, 'long': 90.0}, 'NER': {'lat': 16.0, 'long': 8.0}, 'LIE': {'lat': 47.1667, 'long': 9.5333}, 'NAM': {'lat': -22.0, 'long': 17.0}, 'BGR': {'lat': 43.0, 'long': 25.0}, 'BOL': {'lat': -17.0, 'long': -65.0}, 'GHA': {'lat': 8.0, 'long': -2.0}, 'CCK': {'lat': -12.5, 'long': 96.8333}, 'PAK': {'lat': 30.0, 'long': 70.0}, 'CPV': {'lat': 16.0, 'long': -24.0}, 'JOR': {'lat': 31.0, 'long': 36.0}, 'LBR': {'lat': 6.5, 'long': -9.5}, 'LBY': {'lat': 25.0, 'long': 17.0}, 'MYS': {'lat': 2.5, 'long': 112.5}, 'DOM': {'lat': 19.0, 'long': -70.6667}, 'PRI': {'lat': 18.25, 'long': -66.5}, 'MYT': {'lat': -12.8333, 'long': 45.1667}, 'PRK': {'lat': 40.0, 'long': 127.0}, 'PSE': {'lat': 32.0, 'long': 35.25}, 'TZA': {'lat': -6.0, 'long': 35.0}, 'BWA': {'lat': -22.0, 'long': 24.0}, 'KHM': {'lat': 13.0, 'long': 105.0}, 'UMI': {'lat': 19.2833, 'long': 166.6}, 'TTO': {'lat': 11.0, 'long': -61.0}, 'PRY': {'lat': -23.0, 'long': -58.0}, 'HKG': {'lat': 22.25, 'long': 114.1667}, 'SAU': {'lat': 25.0, 'long': 45.0}, 'LBN': {'lat': 33.8333, 'long': 35.8333}, 'SVN': {'lat': 46.0, 'long': 15.0}, 'BFA': {'lat': 13.0, 'long': -2.0}, 'SVK': {'lat': 48.6667, 'long': 19.5}, 'MRT': {'lat': 20.0, 'long': -12.0}, 'HRV': {'lat': 45.1667, 'long': 15.5}, 'CHL': {'lat': -30.0, 'long': -71.0}, 'CHN': {'lat': 35.0, 'long': 105.0}, 'KNA': {'lat': 17.3333, 'long': -62.75}, 'JAM': {'lat': 18.25, 'long': -77.5}, 'SMR': {'lat': 43.7667, 'long': 12.4167}, 'GIB': {'lat': 36.1833, 'long': -5.3667}, 'DJI': {'lat': 11.5, 'long': 43.0}, 'GIN': {'lat': 11.0, 'long': -10.0}, 'FIN': {'lat': 64.0, 'long': 26.0}, 'URY': {'lat': -33.0, 'long': -56.0}, 'VAT': {'lat': 41.9, 'long': 12.45}, 'STP': {'lat': 1.0, 'long': 7.0}, 'SYC': {'lat': -4.5833, 'long': 55.6667}, 'NPL': {'lat': 28.0, 'long': 84.0}, 'CXR': {'lat': -10.5, 'long': 105.6667}, 'LAO': {'lat': 18.0, 'long': 105.0}, 'YEM': {'lat': 15.0, 'long': 48.0}, 'BVT': {'lat': -54.4333, 'long': 3.4}, 'ZAF': {'lat': -29.0, 'long': 24.0}, 'KIR': {'lat': 1.4167, 'long': 173.0}, 'PHL': {'lat': 13.0, 'long': 122.0}, 'ROU': {'lat': 46.0, 'long': 25.0}, 'VIR': {'lat': 18.3333, 'long': -64.8333}, 'SYR': {'lat': 35.0, 'long': 38.0}, 'MAC': {'lat': 22.1667, 'long': 113.55}, 'NIC': {'lat': 13.0, 'long': -85.0}, 'MLT': {'lat': 35.8333, 'long': 14.5833}, 'KAZ': {'lat': 48.0, 'long': 68.0}, 'TCA': {'lat': 21.75, 'long': -71.5833}, 'PYF': {'lat': -15.0, 'long': -140.0}, 'NIU': {'lat': -19.0333, 'long': -169.8667}, 'DMA': {'lat': 15.4167, 'long': -61.3333}, 'GBR': {'lat': 54.0, 'long': -2.0}, 'BEN': {'lat': 9.5, 'long': 2.25}, 'GUF': {'lat': 4.0, 'long': -53.0}, 'BEL': {'lat': 50.8333, 'long': 4.0}, 'MSR': {'lat': 16.75, 'long': -62.2}, 'TGO': {'lat': 8.0, 'long': 1.1667}, 'DEU': {'lat': 51.0, 'long': 9.0}, 'GUM': {'lat': 13.4667, 'long': 144.7833}, 'LKA': {'lat': 7.0, 'long': 81.0}, 'FLK': {'lat': -51.75, 'long': -59.0}, 'PCN': {'lat': -24.7, 'long': -127.4}, 'GUY': {'lat': 5.0, 'long': -59.0}, 'CRI': {'lat': 10.0, 'long': -84.0}, 'COK': {'lat': -21.2333, 'long': -159.7667}, 'MAR': {'lat': 32.0, 'long': -5.0}, 'MNP': {'lat': 15.2, 'long': 145.75}, 'LSO': {'lat': -29.5, 'long': 28.5}, 'HUN': {'lat': 47.0, 'long': 20.0}, 'TKM': {'lat': 40.0, 'long': 60.0}, 'SUR': {'lat': 4.0, 'long': -56.0}, 'NLD': {'lat': 52.5, 'long': 5.75}, 'BMU': {'lat': 32.3333, 'long': -64.75}, 'HMD': {'lat': -53.1, 'long': 72.5167}, 'TCD': {'lat': 15.0, 'long': 19.0}, 'GEO': {'lat': 42.0, 'long': 43.5}, 'MNE': {'lat': 42.0, 'long': 19.0}, 'MNG': {'lat': 46.0, 'long': 105.0}, 'MHL': {'lat': 9.0, 'long': 168.0}, 'MTQ': {'lat': 14.6667, 'long': -61.0}, 'BLZ': {'lat': 17.25, 'long': -88.75}, 'NFK': {'lat': -29.0333, 'long': 167.95}, 'MMR': {'lat': 22.0, 'long': 98.0}, 'AFG': {'lat': 33.0, 'long': 65.0}, 'BDI': {'lat': -3.5, 'long': 30.0}, 'VGB': {'lat': 18.5, 'long': -64.5}, 'BLR': {'lat': 53.0, 'long': 28.0}, 'GRD': {'lat': 12.1167, 'long': -61.6667}, 'TKL': {'lat': -9.0, 'long': -172.0}, 'GRC': {'lat': 39.0, 'long': 22.0}, 'GRL': {'lat': 72.0, 'long': -40.0}, 'SHN': {'lat': -15.9333, 'long': -5.7}, 'AND': {'lat': 42.5, 'long': 1.6}, 'MOZ': {'lat': -18.25, 'long': 35.0}, 'TJK': {'lat': 39.0, 'long': 71.0}, 'THA': {'lat': 15.0, 'long': 100.0}, 'HTI': {'lat': 19.0, 'long': -72.4167}, 'MEX': {'lat': 23.0, 'long': -102.0}, 'ANT': {'lat': 12.25, 'long': -68.75}, 'ZWE': {'lat': -20.0, 'long': 30.0}, 'LCA': {'lat': 13.8833, 'long': -61.1333}, 'IND': {'lat': 20.0, 'long': 77.0}, 'LVA': {'lat': 57.0, 'long': 25.0}, 'BTN': {'lat': 27.5, 'long': 90.5}, 'VCT': {'lat': 13.25, 'long': -61.2}, 'VNM': {'lat': 16.0, 'long': 106.0}, 'NOR': {'lat': 62.0, 'long': 10.0}, 'CZE': {'lat': 49.75, 'long': 15.5}, 'ATF': {'lat': -43.0, 'long': 67.0}, 'ATG': {'lat': 17.05, 'long': -61.8}, 'FJI': {'lat': -18.0, 'long': 175.0}, 'IOT': {'lat': -6.0, 'long': 71.5}, 'HND': {'lat': 15.0, 'long': -86.5}, 'MUS': {'lat': -20.2833, 'long': 57.55}, 'ATA': {'lat': -90.0, 'long': 0.0}, 'LUX': {'lat': 49.75, 'long': 6.1667}, 'ISR': {'lat': 31.5, 'long': 34.75}, 'FSM': {'lat': 6.9167, 'long': 158.25}, 'PER': {'lat': -10.0, 'long': -76.0}, 'REU': {'lat': -21.1, 'long': 55.6}, 'IDN': {'lat': -5.0, 'long': 120.0}, 'VUT': {'lat': -16.0, 'long': 167.0}, 'MKD': {'lat': 41.8333, 'long': 22.0}, 'COD': {'lat': 0.0, 'long': 25.0}, 'COG': {'lat': -1.0, 'long': 15.0}, 'ISL': {'lat': 65.0, 'long': -18.0}, 'GLP': {'lat': 16.25, 'long': -61.5833}, 'ETH': {'lat': 8.0, 'long': 38.0}, 'COM': {'lat': -12.1667, 'long': 44.25}, 'COL': {'lat': 4.0, 'long': -72.0}, 'NGA': {'lat': 10.0, 'long': 8.0}, 'TWN': {'lat': 23.5, 'long': 121.0}, 'PRT': {'lat': 39.5, 'long': -8.0}, 'MDA': {'lat': 47.0, 'long': 29.0}, 'GGY': {'lat': 49.5, 'long': -2.56}, 'MDG': {'lat': -20.0, 'long': 47.0}, 'ECU': {'lat': -2.0, 'long': -77.5}, 'SEN': {'lat': 14.0, 'long': -14.0}, 'ESH': {'lat': 24.5, 'long': -13.0}, 'MDV': {'lat': 3.25, 'long': 73.0}, 'ASM': {'lat': -14.3333, 'long': -170.0}, 'SPM': {'lat': 46.8333, 'long': -56.3333}, 'SRB': {'lat': 44.0, 'long': 21.0}, 'FRA': {'lat': 46.0, 'long': 2.0}, 'LTU': {'lat': 56.0, 'long': 24.0}, 'RWA': {'lat': -2.0, 'long': 30.0}, 'ZMB': {'lat': -15.0, 'long': 30.0}, 'GMB': {'lat': 13.4667, 'long': -16.5667}, 'WLF': {'lat': -13.3, 'long': -176.2}, 'JEY': {'lat': 49.21, 'long': -2.13}, 'FRO': {'lat': 62.0, 'long': -7.0}, 'GTM': {'lat': 15.5, 'long': -90.25}, 'DNK': {'lat': 56.0, 'long': 10.0}, 'IMN': {'lat': 54.23, 'long': -4.55}, 'AUS': {'lat': -27.0, 'long': 133.0}, 'AUT': {'lat': 47.3333, 'long': 13.3333}, 'SJM': {'lat': 78.0, 'long': 20.0}, 'VEN': {'lat': 8.0, 'long': -66.0}, 'PLW': {'lat': 7.5, 'long': 134.5}, 'KEN': {'lat': 1.0, 'long': 38.0}, 'TUR': {'lat': 39.0, 'long': 35.0}, 'ALB': {'lat': 41.0, 'long': 20.0}, 'OMN': {'lat': 21.0, 'long': 57.0}, 'TUV': {'lat': -8.0, 'long': 178.0}, 'ITA': {'lat': 42.8333, 'long': 12.8333}, 'BRN': {'lat': 4.5, 'long': 114.6667}, 'TUN': {'lat': 34.0, 'long': 9.0}, 'RUS': {'lat': 60.0, 'long': 100.0}, 'BRB': {'lat': 13.1667, 'long': -59.5333}, 'BRA': {'lat': -10.0, 'long': -55.0}, 'CIV': {'lat': 8.0, 'long': -5.0}, 'TLS': {'lat': -8.55, 'long': 125.5167}, 'GNQ': {'lat': 2.0, 'long': 10.0}, 'USA': {'lat': 38.0, 'long': -97.0}, 'QAT': {'lat': 25.5, 'long': 51.25}, 'WSM': {'lat': -13.5833, 'long': -172.3333}, 'AZE': {'lat': 40.5, 'long': 47.5}, 'GNB': {'lat': 12.0, 'long': -15.0}, 'SWZ': {'lat': -26.5, 'long': 31.5}, 'TON': {'lat': -20.0, 'long': -175.0}, 'CAN': {'lat': 60.0, 'long': -95.0}, 'UKR': {'lat': 49.0, 'long': 32.0}, 'KOR': {'lat': 37.0, 'long': 127.5}, 'AIA': {'lat': 18.25, 'long': -63.1667}, 'CAF': {'lat': 7.0, 'long': 21.0}, 'CHE': {'lat': 47.0, 'long': 8.0}, 'CYP': {'lat': 35.0, 'long': 33.0}, 'BIH': {'lat': 44.0, 'long': 18.0}, 'SGP': {'lat': 1.3667, 'long': 103.8}, 'SGS': {'lat': -54.5, 'long': -37.0}, 'SOM': {'lat': 10.0, 'long': 49.0}, 'UZB': {'lat': 41.0, 'long': 64.0}, 'CMR': {'lat': 6.0, 'long': 12.0}, 'POL': {'lat': 52.0, 'long': 20.0}, 'KWT': {'lat': 29.3375, 'long': 47.6581}, 'ERI': {'lat': 15.0, 'long': 39.0}, 'GAB': {'lat': -1.0, 'long': 11.75}, 'CYM': {'lat': 19.5, 'long': -80.5}, 'ARE': {'lat': 24.0, 'long': 54.0}, 'EST': {'lat': 59.0, 'long': 26.0}, 'MWI': {'lat': -13.5, 'long': 34.0}, 'ESP': {'lat': 40.0, 'long': -4.0}, 'IRQ': {'lat': 33.0, 'long': 44.0}, 'SLV': {'lat': 13.8333, 'long': -88.9167}, 'MLI': {'lat': 17.0, 'long': -4.0}, 'IRL': {'lat': 53.0, 'long': -8.0}, 'IRN': {'lat': 32.0, 'long': 53.0}, 'ABW': {'lat': 12.5, 'long': -69.9667}, 'SLE': {'lat': 8.5, 'long': -11.5}, 'PAN': {'lat': 9.0, 'long': -80.0}, 'SDN': {'lat': 15.0, 'long': 30.0}, 'SLB': {'lat': -8.0, 'long': 159.0}, 'NZL': {'lat': -41.0, 'long': 174.0}, 'MCO': {'lat': 43.7333, 'long': 7.4}, 'JPN': {'lat': 36.0, 'long': 138.0}, 'KGZ': {'lat': 41.0, 'long': 75.0}, 'UGA': {'lat': 1.0, 'long': 32.0}, 'NCL': {'lat': -21.5, 'long': 165.5}, 'PNG': {'lat': -6.0, 'long': 147.0}, 'ARG': {'lat': -34.0, 'long': -64.0}, 'SWE': {'lat': 62.0, 'long': 15.0}, 'BHS': {'lat': 24.25, 'long': -76.0}, 'BHR': {'lat': 26.0, 'long': 50.55}, 'ARM': {'lat': 40.0, 'long': 45.0}, 'NRU': {'lat': -0.5333, 'long': 166.9167}, 'CUB': {'lat': 21.5, 'long': -80.0}} # ///////////////////////////////////////////////////////////////////////// def init(): global mc_admin global data mc_admin = mediacloud.api.AdminMediaCloud(api_key) logger.info('Media Cloud Interface created') logger.debug(api_key) logger.info('Loading entity cache...') load_country_cache() logger.info('Loading complete.') # Get list of cached countries, load them into country_cache # ///////////////////////////////////////////////////////////////////////// def load_country_cache(): with open(os.path.join(base_dir, 'whitelist.json')) as f: whitelist = json.load(f) for item in whitelist: filename = 'cache/{0}.json'.format( item['country_name'] ) try: with open(os.path.join(base_dir, filename), 'r') as country_json: logger.debug('Loading cache from {0}...'.format(filename)) country_data = json.load(country_json) data[country_data['id']] = country_data logger.debug('Cache file {0} loaded.'.format(filename)) # If JSON cannot be read, skip country except (ValueError, KeyError, IOError) as e: logger.error('Cannot read cache file {0}'.format(filename)) logger.error(e) pass # ///////////////////////////////////////////////////////////////////////// # // Application Root # // Default to United States context # ///////////////////////////////////////////////////////////////////////// @app.route('/') def root(): return render_template('index.html', data='9139487') # ///////////////////////////////////////////////////////////////////////// @app.route('/<int:country_id>/<string:entity_type>/<entity_id>') def entity_select(country_id, entity_type, entity_id): return render_template('index.html', data={'country_id': country_id, 'entity_id': entity_id, 'entity_type': entity_type}) # ///////////////////////////////////////////////////////////////////////// @app.route('/word_over_time/<int:collection_id>/<string:type>/<entity>') def words_over_time(collection_id, type, entity): ''' Helper to fetch sentences counts over the last year for an arbitrary query ''' last_n_days = 30 start_date = datetime.date.today()-datetime.timedelta(last_n_days) end_date = datetime.date.today()-datetime.timedelta(1) # yesterday fq = mc_admin.publish_date_query(start_date, end_date) start_datetime = datetime.datetime.strftime(start_date, '%Y-%m-%d') end_datetime = datetime.datetime.strftime(end_date, '%Y-%m-%d') if entity.isdigit(): if type == 'media': # Media Type sentences_over_time = mc_admin.sentenceCount('*', [ 'tags_id_media:{0}'.format(str(collection_id)), 'media_id:{0}'.format(entity), fq ], split=True, split_start_date=start_datetime, split_end_date=end_datetime)['split'] else: # Entity Type sentences_over_time = mc_admin.sentenceCount('*', [ 'tags_id_media:{0}'.format(str(collection_id)), 'tags_id_stories:{0}'.format(entity), fq ], split=True, split_start_date=start_datetime, split_end_date=end_datetime)['split'] else: # Word Type sentences_over_time = mc_admin.sentenceCount(entity, [ 'tags_id_media:({0})'.format(str(collection_id)), fq ], split=True, split_start_date=start_datetime, split_end_date=end_datetime)['split'] return jsonify(sentences_over_time) # ///////////////////////////////////////////////////////////////////////// def add_type(entity, type): entity['type'] = type return entity # ///////////////////////////////////////////////////////////////////////// def build_json_response(json_data): response = app.response_class( response=json.dumps(json_data), status=200, mimetype='application/json') return response # ///////////////////////////////////////////////////////////////////////// @app.route('/cache_data') def cache_data(): # Tag sets that hold tags on stories... NYT_LABELS_TAG_SET = 1963 # one tag per theme in a story (Jasmin's transfer-learning model) GEO_TAG_SET = 1011 # one tag per country/state stories are about (disambiguated) CLIFF_ORGS_TAG_SET = 2388 # one tag for each org mentioned in stories CLIFF_PEOPLE_TAG_SET = 2389 # one tag for each perosn mentioned in stories countries = { '34412193': 'China' } for country_id, country_name in countries.items(): data[country_id] = { 'name': country_name } logger.info('Getting Media for {0}...'.format(country_name)) data[country_id]['media'] = getBiggestMedia(country_id) logger.info('Getting Words for {0}...'.format(country_name)) data[country_id]['words'] = getTopWords(country_id) logger.info('Getting NYT Labels for {0}...'.format(country_name)) data[country_id]['labels'] = getEntities(country_id, NYT_LABELS_TAG_SET) logger.info('Getting Places for {0}...'.format(country_name)) data[country_id]['places'] = getEntities(country_id, GEO_TAG_SET) logger.info('Getting Organizations for {0}...'.format(country_name)) data[country_id]['orgs'] = getEntities(country_id, CLIFF_ORGS_TAG_SET) logger.info('Getting People for {0}...'.format(country_name)) data[country_id]['people'] = getEntities(country_id, CLIFF_PEOPLE_TAG_SET) response = build_json_response(data) clear_cache() cache_data() return response # ///////////////////////////////////////////////////////////////////////// @cache.cached(timeout=28800, key_prefix='cache_data') def cache_data(): return data # ///////////////////////////////////////////////////////////////////////// @app.route('/country_entities/<country_id>') def country_entities(country_id): FROM_EACH_TYPE = 8 # Pick random entities random.shuffle(data[country_id]['people']) random_people = data[country_id]['people'][: FROM_EACH_TYPE] random_people = [add_type(entity, 'person') for entity in random_people] random.shuffle(data[country_id]['labels']) random_labels = data[country_id]['labels'][: FROM_EACH_TYPE] random_labels = [add_type(entity, 'label') for entity in random_labels] random.shuffle(data[country_id]['orgs']) random_orgs = data[country_id]['orgs'][: FROM_EACH_TYPE] random_orgs = [add_type(entity, 'organization') for entity in random_orgs] random.shuffle(data[country_id]['places']) random_places = data[country_id]['places'][: FROM_EACH_TYPE] random_places = [add_type(entity, 'location') for entity in random_places] random.shuffle(data[country_id]['media']) random_media = data[country_id]['media'][: FROM_EACH_TYPE] random_media = [add_type(entity, 'media') for entity in random_media] random.shuffle(data[country_id]['words']) random_words = data[country_id]['words'][: FROM_EACH_TYPE] random_words = [add_type(entity, 'word') for entity in random_words] all_entities = random_labels + random_places + random_orgs + random_people + random_media + random_words response = build_json_response(all_entities) return response # ///////////////////////////////////////////////////////////////////////// def getEntities(collection_id, tag_set): entities = mc_admin.sentenceFieldCount('*',[ 'tags_id_media:{}'.format(collection_id), 'publish_date:NOW to NOW-3MONTH' ], tag_sets_id=tag_set, sample_size=5000) return entities # ///////////////////////////////////////////////////////////////////////// @app.route('/entity/<int:entity_id>') def entity(entity_id): entity = mc_admin.tag(entity_id) return jsonify(entity) # ///////////////////////////////////////////////////////////////////////// @app.route('/getTopWords/<int:collection_id>') def getTopWords(collection_id): word = mc_admin.wordCount('*', [ 'tags_id_media:{0}'.format(collection_id), 'publish_date:NOW to NOW-3MONTH' ], num_words=100, sample_size=5000) return word # ///////////////////////////////////////////////////////////////////////// @app.route('/media/<int:media_id>') def media(media_id): data = mc_admin.media( media_id ) return jsonify(data) # ///////////////////////////////////////////////////////////////////////// @app.route('/getBiggestMedia/<int:collection_id>') def getBiggestMedia(collection_id): media = mc_admin.mediaList(rows=10, tags_id=collection_id, sort='num_stories') return media # ///////////////////////////////////////////////////////////////////////// @app.route('/getGlobeData/<int:collection_id>') def getGlobeData(collection_id): lat_long_mag = [] geo_tags = mc_admin.sentenceFieldCount('tags_id_media:{0}'.format(collection_id), tag_sets_id=1011) country_tags = [t for t in geo_tags if int(t['tag'].split('_')[1]) in COUNTRY_GEONAMES_ID_TO_APLHA3.keys()] for t in country_tags: try: alpha3 = COUNTRY_GEONAMES_ID_TO_APLHA3[int(t['tag'].split('_')[1])] latlong = COUNTRY_ALPHA_TO_LAT_LONG[alpha3] lat_long_mag.append(latlong['lat']) lat_long_mag.append(latlong['long']) lat_long_mag.append(t['count']) logger.info(t) except Exception, e: logger.error('Failed on country lookup for {0}'.format(t)) country_tags.remove(t) data = [['seriesA', lat_long_mag]] return jsonify(data) # ///////////////////////////////////////////////////////////////////////// @app.route('/html/<path:name>') def projects(name): return render_template('/{0}'.format(name)) # ///////////////////////////////////////////////////////////////////////// @app.route('/sentences/<int:collection_id>/<string:type>/<entity>') def sentences(collection_id, type, entity): sample_size = 2000 if(entity.isdigit()): # Media Type if(type == 'media'): sentenceList = mc_admin.sentenceList('*', [ 'tags_id_media:{0}'.format(str(collection_id)), 'media_id:{0}'.format(entity), 'publish_date:NOW to NOW-3MONTH'], rows=sample_size, sort=mc_admin.SORT_RANDOM) # Entity Type else: sentenceList = mc_admin.sentenceList('*', [ 'tags_id_media:{0}'.format(str(collection_id)), 'tags_id_stories:{0}'.format(entity), 'publish_date:NOW to NOW-3MONTH'], rows=sample_size, sort=mc_admin.SORT_RANDOM) else: # Word Type sentenceList = mc_admin.sentenceList(entity, [ 'tags_id_media:{0}'.format(str(collection_id)), 'publish_date:NOW to NOW-3MONTH'], rows=sample_size, sort=mc_admin.SORT_RANDOM) return jsonify(sentenceList) # ///////////////////////////////////////////////////////////////////////// init() if __name__ == '__main__': app.run(debug=True, port=5000)
72.753582
9,247
0.542161
3,620
25,391
3.700552
0.247514
0.053747
0.019409
0.005972
0.165572
0.117722
0.100627
0.083756
0.068528
0.056285
0
0.16334
0.15151
25,391
348
9,248
72.962644
0.458457
0.087629
0
0.23348
0
0
0.210657
0.01876
0
0
0
0
0
0
null
null
0.004405
0.048458
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
359676a7bed26deb7a43c184082ef5a5ec9bc8ac
783
py
Python
ingestion/src/metadata/generated/schema/type/collectionDescriptor.py
juliarvalenti/OpenMetadata
ed4508ab2cbc53e16127b5d091bdef2156d3c412
[ "Apache-2.0" ]
null
null
null
ingestion/src/metadata/generated/schema/type/collectionDescriptor.py
juliarvalenti/OpenMetadata
ed4508ab2cbc53e16127b5d091bdef2156d3c412
[ "Apache-2.0" ]
null
null
null
ingestion/src/metadata/generated/schema/type/collectionDescriptor.py
juliarvalenti/OpenMetadata
ed4508ab2cbc53e16127b5d091bdef2156d3c412
[ "Apache-2.0" ]
null
null
null
# generated by datamodel-codegen: # filename: schema/type/collectionDescriptor.json # timestamp: 2021-09-27T15:46:37+00:00 from __future__ import annotations from typing import Optional from pydantic import AnyUrl, BaseModel, Field from . import profile class CollectionInfo(BaseModel): name: Optional[str] = Field( None, description='Unique name that identifies a collection.' ) documentation: Optional[str] = Field(None, description='Description of collection.') href: Optional[AnyUrl] = Field( None, description='URL of the API endpoint where given collections are available.', ) images: Optional[profile.ImageList] = None class SchemaForCollectionDescriptor(BaseModel): collection: Optional[CollectionInfo] = None
27.964286
88
0.734355
86
783
6.639535
0.616279
0.047285
0.105079
0.070053
0.108581
0
0
0
0
0
0
0.027994
0.178799
783
27
89
29
0.860031
0.154534
0
0
1
0
0.196049
0
0
0
0
0
0
1
0
true
0
0.25
0
0.6875
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
3
35acd8f295f6dfd95a79f459c59be91b8a56f970
299
py
Python
multi_tenant/managers.py
arineto/django-multi-tenant
713d555831b35a487f5a91494a20ee3d955a4b63
[ "MIT" ]
7
2016-07-13T12:49:26.000Z
2018-03-22T14:29:07.000Z
multi_tenant/managers.py
arineto/django-multi-tenant
713d555831b35a487f5a91494a20ee3d955a4b63
[ "MIT" ]
null
null
null
multi_tenant/managers.py
arineto/django-multi-tenant
713d555831b35a487f5a91494a20ee3d955a4b63
[ "MIT" ]
5
2016-07-20T13:09:41.000Z
2018-05-02T02:54:13.000Z
from django.db import models class TenantModelManager(models.Manager): """ This manager makes it easy to filter by tenant """ def by_tenant(self, tenant): return self.filter(tenant=tenant) def by_tenants(self, tenants): return self.filter(tenant__in=tenants)
21.357143
50
0.685619
39
299
5.153846
0.538462
0.079602
0.109453
0.218905
0
0
0
0
0
0
0
0
0.22408
299
13
51
23
0.866379
0.153846
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.166667
0.333333
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
3
35e6f69b05fbe5ddf7f6a35922f57fd94ac2602e
232
py
Python
backend/web_app/tests/snapshots/snap_test_tk_graphql_requests.py
jsc-masshtab/vdi-server
3de49dec986ab26ffc6c073873fb9de5943809f9
[ "MIT" ]
2
2021-12-03T10:04:25.000Z
2022-01-12T06:26:39.000Z
backend/web_app/tests/snapshots/snap_test_tk_graphql_requests.py
jsc-masshtab/vdi-server
3de49dec986ab26ffc6c073873fb9de5943809f9
[ "MIT" ]
null
null
null
backend/web_app/tests/snapshots/snap_test_tk_graphql_requests.py
jsc-masshtab/vdi-server
3de49dec986ab26ffc6c073873fb9de5943809f9
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # snapshottest: v1 - https://goo.gl/zC4yUc from __future__ import unicode_literals from snapshottest import Snapshot snapshots = Snapshot() snapshots["test_request_thin_clients 1"] = {"thin_clients": []}
21.090909
63
0.732759
28
232
5.75
0.75
0.21118
0
0
0
0
0
0
0
0
0
0.019802
0.12931
232
10
64
23.2
0.777228
0.267241
0
0
0
0
0.233533
0.149701
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
ea1432b0f85cbc82edf8fd9f37ad2dab2828e324
134
py
Python
hcec/edwards/testdata/encodepoint.py
duwu/hcd
590966016bc42f9d043c16ad8438148ca40eff89
[ "ISC" ]
131
2018-07-19T13:01:41.000Z
2021-12-26T12:27:33.000Z
hcec/edwards/testdata/encodepoint.py
duwu/hcd
590966016bc42f9d043c16ad8438148ca40eff89
[ "ISC" ]
32
2018-07-28T17:53:34.000Z
2022-01-06T05:32:46.000Z
hcec/edwards/testdata/encodepoint.py
duwu/hcd
590966016bc42f9d043c16ad8438148ca40eff89
[ "ISC" ]
101
2018-08-22T03:31:11.000Z
2022-03-17T09:01:24.000Z
import sys from ed25519 import * P = [] x = int(sys.argv[1]) P.append(x) y = int(sys.argv[2]) P.append(y) encodepointhex(P)
13.4
22
0.61194
24
134
3.416667
0.541667
0.146341
0.243902
0
0
0
0
0
0
0
0
0.066038
0.208955
134
9
23
14.888889
0.707547
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
ea3af54632833f0e21da2c2709dfe27358eb4392
156
py
Python
template.py
suddi/coding-challenges
f31b53790084dce1ad0be65ec1d61bf177cddb39
[ "MIT" ]
null
null
null
template.py
suddi/coding-challenges
f31b53790084dce1ad0be65ec1d61bf177cddb39
[ "MIT" ]
11
2020-01-09T06:53:45.000Z
2022-02-11T01:34:44.000Z
template.py
suddi/coding-challenges
f31b53790084dce1ad0be65ec1d61bf177cddb39
[ "MIT" ]
1
2017-03-18T17:19:43.000Z
2017-03-18T17:19:43.000Z
# pylint: disable-msg=empty-docstring,unused-argument def solution(a): """ """ if __name__ == '__main__': import doctest doctest.testmod()
17.333333
53
0.647436
17
156
5.470588
0.941176
0
0
0
0
0
0
0
0
0
0
0
0.198718
156
8
54
19.5
0.744
0.326923
0
0
0
0
0.086957
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
ea4570a30f4803cd7a4cff196fba4f9b3401b86f
1,815
py
Python
discord_utils.py
CatTanker/cncnet-discord-bot
5c0bcbb6cda37052dfb29b6daa52770e4405f2d9
[ "MIT" ]
2
2022-01-12T20:40:37.000Z
2022-01-25T12:05:02.000Z
discord_utils.py
CatTanker/cncnet-discord-bot
5c0bcbb6cda37052dfb29b6daa52770e4405f2d9
[ "MIT" ]
11
2020-09-22T19:15:39.000Z
2020-10-12T09:16:03.000Z
discord_utils.py
CatTanker/cncnet-discord-bot
5c0bcbb6cda37052dfb29b6daa52770e4405f2d9
[ "MIT" ]
2
2021-04-12T17:15:01.000Z
2022-02-14T18:44:49.000Z
class DiscordParseException(Exception): """An exception that is thrown when parsing Discord's representation of a channel / role / user mention fails.""" def parse_discord_str(content_str: str, type_chars: str) -> int: """Parses Discord's representation of a channel / role / user mention into an ID.""" if content_str.startswith('<') and content_str.endswith('>') and content_str[1:-1].startswith(type_chars): return int(content_str[(1 + len(type_chars)):-1]) raise DiscordParseException(f"{content_str} is not a valid Discord-formatted ID representation for '{type_chars}'") def format_discord_str(discord_id: int, type_chars: str) -> str: """Formats an ID into a Discord's representation of a channel / role / user mention.""" return f"<{type_chars}{discord_id}>" # Shortcut functions def parse_channel(content_str: str) -> int: """Parses Discord's representation of a channel mention into an ID.""" return parse_discord_str(content_str, '#') def parse_role(content_str: str) -> int: """Parses Discord's representation of a role mention into an ID.""" return parse_discord_str(content_str, '@&') def parse_user(content_str: str) -> int: """Parses Discord's representation of a user mention into an ID.""" return parse_discord_str(content_str, '@!') def format_channel(discord_id: int) -> str: """Formats an ID into a Discord's representation of a channel mention.""" return format_discord_str(discord_id, '#') def format_role(discord_id: int) -> str: """Formats an ID into a Discord's representation of a role mention.""" return format_discord_str(discord_id, '@&') def format_user(discord_id: int) -> str: """Formats an ID into a Discord's representation of a user mention.""" return format_discord_str(discord_id, '@!')
45.375
119
0.716804
265
1,815
4.728302
0.173585
0.09577
0.158021
0.172386
0.684757
0.638468
0.638468
0.595371
0.595371
0.418196
0
0.002623
0.15978
1,815
39
120
46.538462
0.819016
0.371901
0
0
0
0
0.110603
0.023766
0
0
0
0
0
1
0.421053
false
0
0
0
0.894737
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
3
ea564dc09567e06ddb042de2088bfdc60b3e4272
68
py
Python
Codewars/8kyu/removing-elements/Python/solution1.py
RevansChen/online-judge
ad1b07fee7bd3c49418becccda904e17505f3018
[ "MIT" ]
7
2017-09-20T16:40:39.000Z
2021-08-31T18:15:08.000Z
Codewars/8kyu/removing-elements/Python/solution1.py
RevansChen/online-judge
ad1b07fee7bd3c49418becccda904e17505f3018
[ "MIT" ]
null
null
null
Codewars/8kyu/removing-elements/Python/solution1.py
RevansChen/online-judge
ad1b07fee7bd3c49418becccda904e17505f3018
[ "MIT" ]
null
null
null
# Python - 3.6.0 remove_every_other = lambda my_list: my_list[::2]
17
49
0.705882
13
68
3.384615
0.846154
0.272727
0
0
0
0
0
0
0
0
0
0.068966
0.147059
68
3
50
22.666667
0.689655
0.205882
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
ea6959f73343b9206b9c4c041bb1561a393d43b9
217
py
Python
jacquard/constants.py
peteowlett/jacquard
772fd633e521501688e0933482cba45f48c23ef9
[ "MIT" ]
null
null
null
jacquard/constants.py
peteowlett/jacquard
772fd633e521501688e0933482cba45f48c23ef9
[ "MIT" ]
null
null
null
jacquard/constants.py
peteowlett/jacquard
772fd633e521501688e0933482cba45f48c23ef9
[ "MIT" ]
null
null
null
"""Some general, project-level constants of little use outside Jacquard.""" import os import pathlib DEFAULT_CONFIG_FILE_PATH = pathlib.Path(os.environ.get( 'JACQUARD_CONFIG', '/etc/jacquard/config.cfg', ))
21.7
75
0.741935
29
217
5.413793
0.724138
0.178344
0
0
0
0
0
0
0
0
0
0
0.133641
217
9
76
24.111111
0.835106
0.317972
0
0
0
0
0.274648
0.169014
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
ea6f5aa5bfecfbdff710ccbf3153d1cf7bb2a5d7
196
py
Python
app_portfolio_skills/urls.py
MichaelDoctor/Portfolio
41d9104ef6d34f8eb146230b19038b445351c713
[ "MIT" ]
null
null
null
app_portfolio_skills/urls.py
MichaelDoctor/Portfolio
41d9104ef6d34f8eb146230b19038b445351c713
[ "MIT" ]
4
2021-06-09T18:02:18.000Z
2022-01-13T03:06:24.000Z
app_portfolio_skills/urls.py
MichaelDoctor/Portfolio
41d9104ef6d34f8eb146230b19038b445351c713
[ "MIT" ]
null
null
null
from django.urls import path from .views import LanguagesView, FrameworksView urlpatterns = [ path('languages/', LanguagesView.as_view()), path('frameworks/', FrameworksView.as_view()) ]
24.5
49
0.739796
21
196
6.809524
0.619048
0.083916
0
0
0
0
0
0
0
0
0
0
0.132653
196
7
50
28
0.841176
0
0
0
0
0
0.107143
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
ea90870689cd4d924ec6998b6eb531534c4268e1
145
py
Python
fileconversions/conversions/png_to_pdf_conversion.py
wilbertom/fileconversions
c48fda9b2804524fc57d1f6963d09645825b0da6
[ "MIT" ]
null
null
null
fileconversions/conversions/png_to_pdf_conversion.py
wilbertom/fileconversions
c48fda9b2804524fc57d1f6963d09645825b0da6
[ "MIT" ]
null
null
null
fileconversions/conversions/png_to_pdf_conversion.py
wilbertom/fileconversions
c48fda9b2804524fc57d1f6963d09645825b0da6
[ "MIT" ]
null
null
null
from .command_conversion import CommandConversion class PngToPdf(CommandConversion): command_name = 'convert' output_extension = 'pdf'
20.714286
49
0.77931
14
145
7.857143
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.151724
145
6
50
24.166667
0.894309
0
0
0
0
0
0.068966
0
0
0
0
0
0
1
0
false
0
0.25
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
ea92de22f54a91a1ac1404ae55750d761929c6ed
1,372
py
Python
app/http/controllers/TestController.py
Abeautifulsnow/masonite
f0ebb5ca05f5d88f21264e1cd0934435bd0a8791
[ "MIT" ]
null
null
null
app/http/controllers/TestController.py
Abeautifulsnow/masonite
f0ebb5ca05f5d88f21264e1cd0934435bd0a8791
[ "MIT" ]
1
2020-10-26T12:33:05.000Z
2020-10-26T12:33:05.000Z
app/http/controllers/TestController.py
Abeautifulsnow/masonite
f0ebb5ca05f5d88f21264e1cd0934435bd0a8791
[ "MIT" ]
null
null
null
from app.jobs.TestJob import TestJob from src.masonite import Queue, Mail from src.masonite.request import Request from src.masonite.view import View class TestController: def __init__(self): self.test = True def show(self): return 'show' def v(self, view: View): return view.render('test') def change_header(self, request: Request): request.header('Content-Type', 'application/xml') return 'test' def change_status(self, request: Request): request.status(203) return 'test' def change_404(self, request: Request): request.status(404) return 'test' def testing(self): return 'test' def json_response(self): return {'id': 2} def post_test(self): return 'post_test' def json(self): return 'success' def bad(self): return 5 / 0 def keyerror(self): x = {'hello': 'world'} return x['test'] def session(self, request: Request): request.session.set('test', 'value') return 'session set' def queue(self, queue: Queue): # queue.driver('amqp').push(self.bad) queue.driver('amqp').push(TestJob, channel='default') return 'queued' def mail(self, mail: Mail): return mail.to('idmann509@gmail.com').template('test', {'test': 'mail'})
23.254237
80
0.603499
169
1,372
4.840237
0.349112
0.136919
0.08802
0.122249
0.075795
0
0
0
0
0
0
0.014911
0.266764
1,372
58
81
23.655172
0.798211
0.02551
0
0.097561
0
0
0.113109
0
0
0
0
0
0
1
0.365854
false
0
0.097561
0.195122
0.829268
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
575d7a08079287307546190b7fcbc56b378c6df1
7,462
py
Python
tests/test_grafana.py
bushelpowered/grafana-ldap-sync-script
1de90583ca4ab8cc828332b72f1f95756d099c1f
[ "Apache-2.0" ]
9
2020-07-17T06:36:23.000Z
2022-03-27T19:35:50.000Z
tests/test_grafana.py
bushelpowered/grafana-ldap-sync-script
1de90583ca4ab8cc828332b72f1f95756d099c1f
[ "Apache-2.0" ]
2
2021-08-19T13:25:32.000Z
2022-02-03T16:06:55.000Z
tests/test_grafana.py
bushelpowered/grafana-ldap-sync-script
1de90583ca4ab8cc828332b72f1f95756d099c1f
[ "Apache-2.0" ]
6
2021-01-05T18:46:36.000Z
2022-03-28T11:35:46.000Z
from unittest import TestCase from unittest.mock import patch, Mock from grafana_api.grafana_api import GrafanaClientError from script import grafana class delete_team_by_name(TestCase): @patch("script.grafana.grafana_api") @patch("script.grafana.configuration") def test_deletes_team(self, mock_config, mock_grafana_api): mock_config.DRY_RUN = False mock_grafana_api.teams = Mock() mock_grafana_api.teams.get_team_by_name.return_value = [{"id": "my_team_id"}] output = grafana.delete_team_by_name("my_team") self.assertEqual(output, True) self.assertEqual(mock_grafana_api.teams.delete_team.call_count, 1) mock_grafana_api.teams.delete_team.assert_called_with("my_team_id") @patch("script.grafana.grafana_api") def test_no_team_to_delete(self, mock_grafana_api): mock_grafana_api.teams = Mock() mock_grafana_api.teams.get_team_by_name.return_value = [] output = grafana.delete_team_by_name("my_team") self.assertEqual(output, False) self.assertFalse(mock_grafana_api.teams.delete_team.called) class delete_user_by_login(TestCase): @patch("script.grafana.grafana_api") @patch("script.grafana.configuration") def test_does_not_delete_admin(self, mock_config, mock_grafana_api): mock_config.DRY_RUN = False mock_grafana_api.admin = Mock() mock_grafana_api.admin.delete_user.return_value = True output = grafana.delete_user_by_login("admin") self.assertFalse(output) @patch("script.grafana.grafana_api") @patch("script.grafana.configuration") def test_deletes_user(self, mock_config, mock_grafana_api): mock_config.DRY_RUN = False mock_grafana_api.admin = Mock() mock_grafana_api.admin.delete_user.return_value = True mock_grafana_api.users.find_user = Mock() mock_grafana_api.users.find_user.return_value = {"id": "id_delete_me"} output = grafana.delete_user_by_login("delete_me") self.assertTrue(output) self.assertEqual(mock_grafana_api.admin.delete_user.call_count, 1) mock_grafana_api.admin.delete_user.called_with("id_delete_me") self.assertEquals(mock_grafana_api.users.find_user.call_count, 1) mock_grafana_api.users.find_user.called_with("delete_me") class create_folder(TestCase): @patch("script.grafana.grafana_api") @patch("script.grafana.configuration") def test_creates_folder(self, mock_config, mock_grafana_api): mock_config.DRY_RUN = False mock_grafana_api.folder.create_folder = Mock() mock_grafana_api.folder.create_folder.return_value = True output = grafana.create_folder("foo", "bar") self.assertTrue(output) self.assertEqual(mock_grafana_api.folder.create_folder.call_count, 1) mock_grafana_api.folder.create_folder.assert_called_with("foo", "bar") @patch("script.grafana.grafana_api") @patch("script.grafana.configuration") def test_catches_exception(self, mock_config, mock_grafana_api): mock_config.DRY_RUN = False mock_grafana_api.folder.create_folder = Mock() mock_grafana_api.folder.create_folder.side_effect = GrafanaClientError("something", "went", "wrong") output = grafana.create_folder("foo", "bar") self.assertFalse(output) self.assertEqual(mock_grafana_api.folder.create_folder.call_count, 1) mock_grafana_api.folder.create_folder.assert_called_with("foo", "bar") class get_members_of_team(TestCase): @patch("script.grafana.grafana_api") def test_returns_members_correctly(self, mock_grafana_api): mock_grafana_api.teams.get_team_members = Mock() mock_grafana_api.teams.get_team_members.return_value = [{"login": "user_login", "name": "name", "email": "mail"} ] output = grafana.get_members_of_team("my_team") self.assertEqual(output, [{"login": "user_login", "name": "name", "email": "mail"}]) class login_taken(TestCase): @patch("script.grafana.grafana_api") def test_login_is_taken(self, mock_grafana_api): mock_grafana_api.users.find_user = Mock() mock_grafana_api.users.find_user.return_value = "" output = grafana.login_taken("foo") self.assertTrue(output) @patch("script.grafana.grafana_api") def test_login_is_not_taken(self, mock_grafana_api): mock_grafana_api.users.find_user = Mock() mock_grafana_api.users.find_user.side_effect = GrafanaClientError("user", "not", "found") output = grafana.login_taken("foo") self.assertFalse(output) class exists_folder(TestCase): @patch("script.grafana.grafana_api") def test_login_is_taken(self, mock_grafana_api): mock_grafana_api.folder.get_folder = Mock() mock_grafana_api.folder.get_folder.return_value = "" output = grafana.exists_folder("foo") self.assertTrue(output) @patch("script.grafana.grafana_api") def test_login_is_not_taken(self, mock_grafana_api): mock_grafana_api.folder.get_folder = Mock() mock_grafana_api.folder.get_folder.side_effect = GrafanaClientError("user", "not", "found") output = grafana.exists_folder("foo") self.assertFalse(output) class get_id_of_team(TestCase): @patch("script.grafana.grafana_api") def test_team_exists(self, mock_grafana_api): mock_grafana_api.teams.get_team_by_name = Mock() mock_grafana_api.teams.get_team_by_name.return_value = [{"id": "my_team"}] output = grafana.get_id_of_team("my_team") self.assertEqual(output, "my_team") @patch("script.grafana.grafana_api") def test_team_not_existing(self, mock_grafana_api): mock_grafana_api.teams.get_team_by_name = Mock() mock_grafana_api.teams.get_team_by_name.return_value = [] output = grafana.get_id_of_team("my_team") self.assertFalse(output) class update_folder_permissions(TestCase): @patch("script.grafana.grafana_api") @patch("script.grafana.configuration") def test_update_input(self, mock_config, mock_grafana_api): mock_config.DRY_RUN = False mock_grafana_api.folder.update_folder_permissions = Mock() mock_grafana_api.folder.update_folder_permissions.return_value = [] grafana.update_folder_permissions("my_folder", [ { "id": "my_id", "permission": 1 } ]) self.assertEqual(mock_grafana_api.folder.update_folder_permissions.call_count, 1) mock_grafana_api.folder.update_folder_permissions.assert_called_with("my_folder", {"items": [ {"id": "my_id", "permission": 1 } ] })
39.068063
108
0.642455
885
7,462
5.027119
0.090395
0.164082
0.179366
0.071926
0.837267
0.812093
0.739267
0.629804
0.578107
0.565745
0
0.001445
0.258108
7,462
190
109
39.273684
0.802204
0
0
0.536232
0
0
0.114313
0.071295
0
0
0
0
0.173913
1
0.101449
false
0
0.028986
0
0.188406
0
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
577674d6e6e440990896d02c1f571845c1da4ef4
272
py
Python
src/sima/simo/massunit.py
SINTEF/simapy
650b8c2f15503dad98e2bfc0d0788509593822c7
[ "MIT" ]
null
null
null
src/sima/simo/massunit.py
SINTEF/simapy
650b8c2f15503dad98e2bfc0d0788509593822c7
[ "MIT" ]
null
null
null
src/sima/simo/massunit.py
SINTEF/simapy
650b8c2f15503dad98e2bfc0d0788509593822c7
[ "MIT" ]
null
null
null
# Generated with MassUnit # from enum import Enum from enum import auto class MassUnit(Enum): """""" MG = auto() KG = auto() def label(self): if self == MassUnit.MG: return "Mg" if self == MassUnit.KG: return "kg"
18.133333
31
0.536765
33
272
4.424242
0.454545
0.109589
0.191781
0
0
0
0
0
0
0
0
0
0.345588
272
15
32
18.133333
0.820225
0.084559
0
0
1
0
0.016667
0
0
0
0
0
0
1
0.1
false
0
0.2
0
0.8
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
5779baa8ebd68d27991e236edfabf315ce58575d
167
py
Python
New folder/Re7.py
piyushparastiwari/python-project
5dba0ef4e77f1d2528f510327de4224b60b1d4ba
[ "Apache-2.0" ]
null
null
null
New folder/Re7.py
piyushparastiwari/python-project
5dba0ef4e77f1d2528f510327de4224b60b1d4ba
[ "Apache-2.0" ]
null
null
null
New folder/Re7.py
piyushparastiwari/python-project
5dba0ef4e77f1d2528f510327de4224b60b1d4ba
[ "Apache-2.0" ]
null
null
null
import re st="amit and amita belongs to same family" for val in re.finditer("amit",st): print(val) lis=val.span() print(lis) print(type(lis))
18.555556
43
0.616766
27
167
3.814815
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.251497
167
8
44
20.875
0.824
0
0
0
0
0
0.257862
0
0
0
0
0
0
1
0
false
0
0.142857
0
0.142857
0.428571
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
577c83bac9d62369bb796c7a1a418dc22207d901
318
py
Python
Python/Tests/TestData/TestAdapterTestB/InheritanceDerivedTest.py
nanshuiyu/pytools
9f9271fe8cf564b4f94e9456d400f4306ea77c23
[ "Apache-2.0" ]
null
null
null
Python/Tests/TestData/TestAdapterTestB/InheritanceDerivedTest.py
nanshuiyu/pytools
9f9271fe8cf564b4f94e9456d400f4306ea77c23
[ "Apache-2.0" ]
null
null
null
Python/Tests/TestData/TestAdapterTestB/InheritanceDerivedTest.py
nanshuiyu/pytools
9f9271fe8cf564b4f94e9456d400f4306ea77c23
[ "Apache-2.0" ]
null
null
null
import unittest import InheritanceBaseTest class DerivedClassTests(InheritanceBaseTest.BaseClassTests): def test_derived_pass(self): pass def test_derived_fail(self): self.assertTrue(False, "Force a failure in derived class test.") if __name__ == '__main__': unittest.main()
24.461538
73
0.710692
34
318
6.294118
0.617647
0.065421
0.130841
0
0
0
0
0
0
0
0
0
0.210692
318
12
74
26.5
0.85259
0
0
0
0
0
0.150327
0
0
0
0
0
0.111111
1
0.222222
false
0.222222
0.222222
0
0.555556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
3
579297708304ce877306df0513a8afce6be83e37
12,978
py
Python
source/rttov_test/profile-datasets-py/div83/062.py
bucricket/projectMAScorrection
89489026c8e247ec7c364e537798e766331fe569
[ "BSD-3-Clause" ]
null
null
null
source/rttov_test/profile-datasets-py/div83/062.py
bucricket/projectMAScorrection
89489026c8e247ec7c364e537798e766331fe569
[ "BSD-3-Clause" ]
1
2022-03-12T12:19:59.000Z
2022-03-12T12:19:59.000Z
source/rttov_test/profile-datasets-py/div83/062.py
bucricket/projectMAScorrection
89489026c8e247ec7c364e537798e766331fe569
[ "BSD-3-Clause" ]
null
null
null
""" Profile ../profile-datasets-py/div83/062.py file automaticaly created by prof_gen.py script """ self["ID"] = "../profile-datasets-py/div83/062.py" self["Q"] = numpy.array([ 2.135165, 2.544134, 3.283269, 4.205042, 4.999855, 5.317752, 5.205453, 5.320982, 5.907775, 6.095293, 6.191692, 6.191842, 6.146692, 6.080533, 6.027844, 6.021334, 6.013854, 5.993284, 5.947545, 5.859476, 5.707367, 5.45466 , 5.149923, 4.850026, 4.589789, 4.381871, 4.244622, 4.184272, 4.137703, 4.077603, 3.999644, 3.908545, 3.828035, 3.761756, 3.711596, 3.685716, 3.684286, 3.696326, 3.707896, 3.714566, 3.734056, 3.782776, 3.824185, 3.848895, 3.879375, 3.910525, 3.978284, 4.130243, 4.382681, 4.708588, 5.154323, 5.754207, 6.446238, 7.211108, 8.180523, 9.808144, 12.19045 , 14.78778 , 16.85582 , 18.19527 , 19.49502 , 20.34879 , 21.16945 , 22.4375 , 24.37611 , 26.75698 , 30.40398 , 36.18439 , 44.42083 , 55.27844 , 69.17471 , 87.64272 , 104.2721 , 117.5582 , 133.5732 , 155.2909 , 154.941 , 154.3332 , 165.0957 , 194.4842 , 252.885 , 340.3101 , 445.1148 , 547.7248 , 640.853 , 713.9849 , 773.9685 , 829.9586 , 885.8446 , 964.1026 , 1106.105 , 1219.651 , 944.1328 , 916.7358 , 890.4903 , 865.3355 , 841.2148 , 818.0752 , 795.8661 , 774.5406 , 754.055 ]) self["P"] = numpy.array([ 5.00000000e-03, 1.61000000e-02, 3.84000000e-02, 7.69000000e-02, 1.37000000e-01, 2.24400000e-01, 3.45400000e-01, 5.06400000e-01, 7.14000000e-01, 9.75300000e-01, 1.29720000e+00, 1.68720000e+00, 2.15260000e+00, 2.70090000e+00, 3.33980000e+00, 4.07700000e+00, 4.92040000e+00, 5.87760000e+00, 6.95670000e+00, 8.16550000e+00, 9.51190000e+00, 1.10038000e+01, 1.26492000e+01, 1.44559000e+01, 1.64318000e+01, 1.85847000e+01, 2.09224000e+01, 2.34526000e+01, 2.61829000e+01, 2.91210000e+01, 3.22744000e+01, 3.56505000e+01, 3.92566000e+01, 4.31001000e+01, 4.71882000e+01, 5.15278000e+01, 5.61260000e+01, 6.09895000e+01, 6.61253000e+01, 7.15398000e+01, 7.72396000e+01, 8.32310000e+01, 8.95204000e+01, 9.61138000e+01, 1.03017000e+02, 1.10237000e+02, 1.17778000e+02, 1.25646000e+02, 1.33846000e+02, 1.42385000e+02, 1.51266000e+02, 1.60496000e+02, 1.70078000e+02, 1.80018000e+02, 1.90320000e+02, 2.00989000e+02, 2.12028000e+02, 2.23442000e+02, 2.35234000e+02, 2.47408000e+02, 2.59969000e+02, 2.72919000e+02, 2.86262000e+02, 3.00000000e+02, 3.14137000e+02, 3.28675000e+02, 3.43618000e+02, 3.58966000e+02, 3.74724000e+02, 3.90893000e+02, 4.07474000e+02, 4.24470000e+02, 4.41882000e+02, 4.59712000e+02, 4.77961000e+02, 4.96630000e+02, 5.15720000e+02, 5.35232000e+02, 5.55167000e+02, 5.75525000e+02, 5.96306000e+02, 6.17511000e+02, 6.39140000e+02, 6.61192000e+02, 6.83667000e+02, 7.06565000e+02, 7.29886000e+02, 7.53628000e+02, 7.77790000e+02, 8.02371000e+02, 8.27371000e+02, 8.52788000e+02, 8.78620000e+02, 9.04866000e+02, 9.31524000e+02, 9.58591000e+02, 9.86067000e+02, 1.01395000e+03, 1.04223000e+03, 1.07092000e+03, 1.10000000e+03]) self["CO2"] = numpy.array([ 369.0472, 369.0461, 369.0418, 369.0364, 369.0272, 369.014 , 368.9971, 368.974 , 368.9548, 368.9298, 368.8917, 368.8367, 368.7727, 368.7118, 368.6468, 368.5718, 368.5218, 368.5058, 368.5028, 368.5008, 368.5119, 368.542 , 368.5811, 368.6212, 368.6653, 368.7144, 368.7534, 368.7935, 368.8235, 368.8565, 368.8935, 368.9336, 368.9856, 369.0416, 369.1946, 369.3996, 369.6246, 369.8796, 370.1486, 370.3456, 370.5446, 370.7746, 371.0386, 371.3156, 371.5916, 371.8805, 372.2945, 372.8095, 373.3224, 373.7442, 374.1831, 374.2768, 374.2726, 374.2973, 374.3609, 374.4303, 374.5314, 374.6365, 374.7087, 374.7782, 374.8217, 374.8544, 374.8731, 374.8786, 374.8759, 374.861 , 374.8396, 374.8074, 374.7734, 374.7403, 374.7041, 374.6662, 374.6269, 374.584 , 374.536 , 374.4828, 374.437 , 374.3912, 374.3472, 374.2982, 374.2463, 374.1886, 374.1284, 374.077 , 374.0341, 374.0028, 373.9803, 373.9624, 373.9454, 373.9202, 373.871 , 373.8335, 373.9406, 373.9539, 373.9667, 373.9771, 373.9871, 373.9958, 374.0041, 374.0121, 374.0198]) self["CO"] = numpy.array([ 1.044168 , 1.021437 , 0.9772578 , 0.9054302 , 0.803713 , 0.6758304 , 0.5907059 , 0.5901089 , 0.3854287 , 0.2633824 , 0.2040667 , 0.1702239 , 0.1136093 , 0.0496143 , 0.01022324, 0.00575758, 0.00531534, 0.00554938, 0.00580365, 0.00593863, 0.00606526, 0.0061711 , 0.00626554, 0.00633542, 0.00651423, 0.00678242, 0.00705851, 0.00736169, 0.00761043, 0.00788115, 0.00810314, 0.00834774, 0.00846046, 0.00857584, 0.00859602, 0.0085699 , 0.00856188, 0.00859618, 0.00863255, 0.00879777, 0.008989 , 0.00930631, 0.00978849, 0.01032786, 0.01123676, 0.01227285, 0.01416274, 0.01706003, 0.02045891, 0.02325589, 0.02657156, 0.02754204, 0.02778702, 0.02851259, 0.02991356, 0.03140779, 0.0328479 , 0.03440639, 0.0357939 , 0.03724762, 0.03847015, 0.03961269, 0.04054844, 0.04127447, 0.04184898, 0.04215827, 0.04238661, 0.04241467, 0.04242462, 0.04238266, 0.04235297, 0.04235829, 0.04237938, 0.04243321, 0.04246113, 0.04245561, 0.04239703, 0.04230487, 0.04214374, 0.04196034, 0.04173534, 0.04151467, 0.04130051, 0.04113266, 0.04099621, 0.04089188, 0.04082478, 0.0408119 , 0.04081871, 0.04083679, 0.04087654, 0.04090395, 0.04096279, 0.04099998, 0.04116401, 0.04140064, 0.04164224, 0.0418888 , 0.04214034, 0.04239694, 0.04265861]) self["T"] = numpy.array([ 205.66 , 214.138, 228.803, 244.185, 256.943, 262.499, 258.783, 249.46 , 239.723, 229.752, 223.18 , 221.108, 224.206, 226.735, 225.136, 218.183, 210.29 , 205.09 , 201.951, 200.133, 198.482, 196.528, 194.965, 194.055, 193.835, 194.059, 194.208, 194.173, 193.956, 193.718, 193.682, 193.933, 194.131, 194.705, 195.609, 196.61 , 197.675, 198.799, 199.776, 200.638, 201.537, 202.294, 202.856, 203.369, 203.973, 204.844, 205.793, 206.858, 207.907, 208.908, 209.816, 210.762, 211.53 , 211.957, 212.077, 211.765, 211.189, 210.949, 211.533, 212.365, 212.424, 212.319, 212.593, 213.367, 214.502, 215.702, 216.803, 217.925, 219.127, 220.43 , 221.803, 223.135, 224.442, 225.756, 227.146, 228.743, 230.533, 232.486, 234.56 , 236.706, 238.876, 240.984, 242.984, 244.886, 246.696, 248.41 , 249.974, 251.494, 253.024, 254.591, 256.096, 257.127, 254.785, 254.785, 254.785, 254.785, 254.785, 254.785, 254.785, 254.785, 254.785]) self["N2O"] = numpy.array([ 0.00045 , 0.00045 , 0.00045 , 0.00045 , 0.00087 , 0.00082 , 0.00045 , 0.00042 , 0.00133999, 0.00239998, 0.00516997, 0.00859995, 0.01236992, 0.01220993, 0.01250992, 0.01407992, 0.0165599 , 0.02100987, 0.02529985, 0.02979983, 0.03407981, 0.03503981, 0.03438982, 0.03376984, 0.02924987, 0.02439989, 0.01973992, 0.01793992, 0.01710993, 0.01630993, 0.01759993, 0.02691989, 0.03594986, 0.04469983, 0.0529298 , 0.05983978, 0.06655975, 0.07307973, 0.08438969, 0.1023196 , 0.1193496 , 0.1470594 , 0.1755893 , 0.2027892 , 0.2299491 , 0.254479 , 0.2758589 , 0.2846188 , 0.2926787 , 0.2999086 , 0.3061684 , 0.3113282 , 0.315218 , 0.3176777 , 0.3185374 , 0.3185369 , 0.3185361 , 0.3185353 , 0.3185346 , 0.3185342 , 0.3185338 , 0.3185335 , 0.3185333 , 0.3185329 , 0.3185322 , 0.3185315 , 0.3185303 , 0.3185285 , 0.3185259 , 0.3185224 , 0.318518 , 0.3185121 , 0.3185068 , 0.3185026 , 0.3184975 , 0.3184905 , 0.3184906 , 0.3184908 , 0.3184874 , 0.318478 , 0.3184594 , 0.3184316 , 0.3183982 , 0.3183655 , 0.3183359 , 0.3183126 , 0.3182935 , 0.3182756 , 0.3182578 , 0.3182329 , 0.3181877 , 0.3181515 , 0.3182393 , 0.318248 , 0.3182563 , 0.3182644 , 0.318272 , 0.3182794 , 0.3182865 , 0.3182933 , 0.3182998 ]) self["O3"] = numpy.array([ 0.7587884 , 0.6506303 , 0.4920414 , 0.4354072 , 0.4927165 , 0.6960063 , 1.086994 , 1.655471 , 2.345416 , 3.015182 , 3.040381 , 3.532388 , 3.956836 , 4.296324 , 4.369834 , 3.956136 , 3.575168 , 3.463419 , 3.454539 , 3.44159 , 3.335181 , 3.402581 , 3.532302 , 3.614202 , 3.602793 , 3.518435 , 3.352456 , 3.217207 , 3.064387 , 2.883768 , 2.693159 , 2.52414 , 2.357881 , 2.325821 , 2.490021 , 2.6827 , 2.71009 , 2.557481 , 2.390191 , 2.257812 , 2.018752 , 1.729413 , 1.550014 , 1.426195 , 1.229235 , 1.030666 , 0.8172317 , 0.6441773 , 0.5255887 , 0.4441519 , 0.3733641 , 0.3122102 , 0.2650703 , 0.2375863 , 0.2258832 , 0.2185519 , 0.2087325 , 0.1960951 , 0.1812439 , 0.164181 , 0.1450802 , 0.1258284 , 0.1086527 , 0.09469558, 0.08401265, 0.07566868, 0.06925349, 0.06385209, 0.05934416, 0.05564432, 0.05283704, 0.05088414, 0.04933126, 0.04790827, 0.04618753, 0.04385149, 0.04146417, 0.03934833, 0.03776906, 0.03677575, 0.03642339, 0.03630874, 0.03641958, 0.03672857, 0.03705864, 0.0372298 , 0.03721248, 0.03710618, 0.03694554, 0.03665513, 0.03590224, 0.03457798, 0.03937109, 0.03937217, 0.03937321, 0.0393742 , 0.03937515, 0.03937606, 0.03937694, 0.03937778, 0.03937858]) self["CH4"] = numpy.array([ 0.05500208, 0.05500206, 0.05500202, 0.09355291, 0.1077775 , 0.1368143 , 0.1482102 , 0.1591442 , 0.173895 , 0.2012118 , 0.2391385 , 0.2866422 , 0.3400379 , 0.3891816 , 0.4364604 , 0.4843851 , 0.5284008 , 0.5675666 , 0.6070874 , 0.6606711 , 0.7117169 , 0.7775898 , 0.8491726 , 0.9177535 , 1.036695 , 1.157895 , 1.274525 , 1.363954 , 1.442584 , 1.458114 , 1.474784 , 1.492634 , 1.511694 , 1.509474 , 1.507334 , 1.505314 , 1.503454 , 1.501804 , 1.506684 , 1.511824 , 1.517244 , 1.522934 , 1.528904 , 1.560684 , 1.585964 , 1.612404 , 1.640223 , 1.669413 , 1.697013 , 1.711792 , 1.727171 , 1.73038 , 1.730169 , 1.730588 , 1.731836 , 1.733073 , 1.734009 , 1.734974 , 1.735041 , 1.734998 , 1.734876 , 1.734715 , 1.734483 , 1.734181 , 1.733818 , 1.733374 , 1.732907 , 1.732417 , 1.731973 , 1.731654 , 1.73138 , 1.731228 , 1.731059 , 1.730866 , 1.730589 , 1.730201 , 1.729792 , 1.729333 , 1.728885 , 1.728424 , 1.727993 , 1.727552 , 1.727121 , 1.726794 , 1.726533 , 1.726347 , 1.726223 , 1.726126 , 1.72604 , 1.725904 , 1.725669 , 1.725533 , 1.726059 , 1.726146 , 1.726221 , 1.726285 , 1.726327 , 1.726367 , 1.726405 , 1.726442 , 1.726477 ]) self["CTP"] = 500.0 self["CFRACTION"] = 0.0 self["IDG"] = 0 self["ISH"] = 0 self["ELEVATION"] = 0.0 self["S2M"]["T"] = 254.785 self["S2M"]["Q"] = 754.054972021 self["S2M"]["O"] = 0.0393785839754 self["S2M"]["P"] = 875.53882 self["S2M"]["U"] = 0.0 self["S2M"]["V"] = 0.0 self["S2M"]["WFETC"] = 100000.0 self["SKIN"]["SURFTYPE"] = 0 self["SKIN"]["WATERTYPE"] = 1 self["SKIN"]["T"] = 254.785 self["SKIN"]["SALINITY"] = 35.0 self["SKIN"]["FOAM_FRACTION"] = 0.0 self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3]) self["ZENANGLE"] = 0.0 self["AZANGLE"] = 0.0 self["SUNZENANGLE"] = 0.0 self["SUNAZANGLE"] = 0.0 self["LATITUDE"] = -70.044 self["GAS_UNITS"] = 2 self["BE"] = 0.0 self["COSBK"] = 0.0 self["DATE"] = numpy.array([2007, 6, 1]) self["TIME"] = numpy.array([0, 0, 0])
58.197309
92
0.548467
1,907
12,978
3.730991
0.504982
0.011947
0.009276
0.013493
0.018693
0.018693
0.011103
0.00759
0.00759
0.00759
0
0.704293
0.292803
12,978
222
93
58.459459
0.07093
0.00732
0
0
0
0
0.019352
0.00272
0
0
0
0
0
1
0
true
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
57d73df628277c009f64cbf8b7778091e214b9f2
346
py
Python
procrastinate/contrib/django/migrations/0001_baseline.py
t-eckert/procrastinate
f831565b00d67afa7a4291d734c8fe85074a360c
[ "MIT" ]
null
null
null
procrastinate/contrib/django/migrations/0001_baseline.py
t-eckert/procrastinate
f831565b00d67afa7a4291d734c8fe85074a360c
[ "MIT" ]
null
null
null
procrastinate/contrib/django/migrations/0001_baseline.py
t-eckert/procrastinate
f831565b00d67afa7a4291d734c8fe85074a360c
[ "MIT" ]
null
null
null
# Generated by Django 3.1 on 2020-08-22 16:57 from django.db import migrations import procrastinate.contrib.django class Migration(migrations.Migration): initial = True dependencies: list = [] operations = [ procrastinate.contrib.django.RunProcrastinateFile( filename="baseline-0.5.0.sql", ), ]
18.210526
58
0.66763
39
346
5.923077
0.769231
0.17316
0.225108
0
0
0
0
0
0
0
0
0.064151
0.234104
346
18
59
19.222222
0.807547
0.124277
0
0
1
0
0.059801
0
0
0
0
0
0
1
0
false
0
0.2
0
0.6
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
57e6f4680e3910d5aa9b8c572fe9a67674fa7993
155
py
Python
Exercício feitos pela primeira vez/ex025.py
Claayton/pythonExerciciosLinux
696cdb16983638418bd0d0d4fe44dc72662b9c97
[ "MIT" ]
1
2021-01-23T15:43:34.000Z
2021-01-23T15:43:34.000Z
Exercício feitos pela primeira vez/ex025.py
Claayton/pythonExerciciosLinux
696cdb16983638418bd0d0d4fe44dc72662b9c97
[ "MIT" ]
null
null
null
Exercício feitos pela primeira vez/ex025.py
Claayton/pythonExerciciosLinux
696cdb16983638418bd0d0d4fe44dc72662b9c97
[ "MIT" ]
null
null
null
#Exercício025 name = str(input('Qual seu nome completo?: ')).strip().upper() print('Seu nome tem a palavra SILVA?: {}'.format('SILVA'in name)) print('xD')
31
65
0.677419
23
155
4.565217
0.782609
0.133333
0
0
0
0
0
0
0
0
0
0.021739
0.109677
155
4
66
38.75
0.73913
0.077419
0
0
0
0
0.457746
0
0
0
0
0
0
1
0
false
0
0
0
0
0.666667
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
57e84cf4f7c4b8be601c471d36c9ee834f3919ef
10,223
py
Python
tests/test_restapi_filtering.py
hafeezibbad/telegram-bot
0cbc35005ea5d076a8b3a243d794889532e69c4c
[ "Apache-2.0" ]
null
null
null
tests/test_restapi_filtering.py
hafeezibbad/telegram-bot
0cbc35005ea5d076a8b3a243d794889532e69c4c
[ "Apache-2.0" ]
2
2021-02-02T22:38:30.000Z
2021-06-02T01:27:14.000Z
tests/test_restapi_filtering.py
hafeezibbad/telegram-bot
0cbc35005ea5d076a8b3a243d794889532e69c4c
[ "Apache-2.0" ]
null
null
null
""" Module containing tests cases for testing Restapi calls for filtering and adding, removing dummy date. """ import json import string import random import unittest from datetime import datetime, timedelta from flask import url_for from botapp import create_app from botapp.models import MyBot, Message class ProceduresTest(unittest.TestCase): def setUp(self): self.app = create_app('testing') self.app_context = self.app.app_context() self.app_context.push() self.client = self.app.test_client() def tearDown(self): # Drop all collections MyBot.drop_collection() Message.drop_collection() self.app_context.pop() def get_api_headers(self): return { 'Accept': 'application/json', 'Content-Type': 'application/json' } def test_filter_messages_by_bot(self): for _ in range(3): Message(bot_id=1234).save() Message(bot_id=random.randint(1, 10)).save() # Get messages response = self.client.get( url_for('botapi.filter_messages_by_bot', bot_id=1234), headers=self.get_api_headers() ) self.assertEqual(response.status_code, 200) json_response = json.loads(response.data.decode('utf-8')) self.assertEqual(json_response['result'], 'success') self.assertEqual(len(json_response['messages']), 3) def test_filter_messages_by_username(self): for _ in range(3): Message(sender_username='TestUser1').save() Message(sender_username='TestUser' + str(random.randint(2, 10))).save() # Get messages response = self.client.get( url_for('botapi.filter_messages_by_username', username='TestUser1'), headers=self.get_api_headers() ) self.assertEqual(response.status_code, 200) json_response = json.loads(response.data.decode('utf-8')) self.assertEqual(json_response['result'], 'success') self.assertEqual(len(json_response['messages']), 3) def test_filter_messages_by_chatid(self): for _ in range(3): Message(chatid=123).save() Message(chatid=random.randint(200, 300)).save() # Get messages response = self.client.get( url_for('botapi.filter_messages_by_chatid', chatid=123), headers=self.get_api_headers() ) self.assertEqual(response.status_code, 200) json_response = json.loads(response.data.decode('utf-8')) self.assertEqual(json_response['result'], 'success') self.assertEqual(len(json_response['messages']), 3) def test_filter_messages_using_botid(self): # Add some dummy messages MyBot.generate_fake(1) Message.generate_fake(5) bot = MyBot(bot_id=11111, token='dummy-token', test_bot=True).save() self.assertIsNotNone(bot) for _ in range(3): Message(bot_id=bot.bot_id).save() self.assertEqual(Message.objects.count(), 5+3) # Get filtered messages response = self.client.get( url_for('botapi.filter_messages', botid=bot.bot_id, time_off=0, text='#', username='#', name='#'), headers=self.get_api_headers() ) self.assertEqual(response.status_code, 200) json_response = json.loads(response.data.decode('utf-8')) self.assertEqual(json_response['result'], 'success') self.assertEqual(len(json_response['messages']), 3) def test_filter_messages_using_time_off(self): # Add some dummy messages Message.generate_fake(5) for _ in range(5): Message(date=datetime.now()-timedelta(minutes=20)).save() self.assertEqual(Message.objects.count(), 5+5) # Get filtered messages response = self.client.get( url_for('botapi.filter_messages', botid=0, time_off=40, text='#', username='#', name='#'), headers=self.get_api_headers() ) self.assertEqual(response.status_code, 200) json_response = json.loads(response.data.decode('utf-8')) self.assertEqual(json_response['result'], 'success') self.assertEqual(len(json_response['messages']), 5) def test_filter_messages_using_text(self): # Add some dummy messages Message.generate_fake(5) for _ in range(5): Message(text_content='message:' + random.choice(string.ascii_letters)).save() self.assertEqual(Message.objects.count(), 5+5) # Get filtered messages response = self.client.get( url_for('botapi.filter_messages', botid=0, time_off=0, text='message', username='#', name='#'), headers=self.get_api_headers() ) self.assertEqual(response.status_code, 200) json_response = json.loads(response.data.decode('utf-8')) self.assertEqual(json_response['result'], 'success') self.assertEqual(len(json_response['messages']), 5) def test_filter_messages_using_username(self): # Add some dummy messages Message.generate_fake(5) for _ in range(5): Message(sender_username='testuser').save() self.assertEqual(Message.objects.count(), 5 + 5) # Get filtered messages response = self.client.get( url_for('botapi.filter_messages', botid=0, time_off=0, text='#', username='testuser', name='#'), headers=self.get_api_headers() ) self.assertEqual(response.status_code, 200) json_response = json.loads(response.data.decode('utf-8')) self.assertEqual(json_response['result'], 'success') self.assertEqual(len(json_response['messages']), 5) def test_filter_messages_using_user_firstname_lastname(self): # Add some dummy messages Message.generate_fake(5) Message(sender_firstname='testuser').save() Message(sender_lastname='usertest').save() Message(sender_firstname='test', sender_lastname='user').save() Message(sender_firstname='user', sender_lastname='test').save() self.assertEqual(Message.objects.count(), 5 + 4) # Get filtered messages response = self.client.get( url_for('botapi.filter_messages', botid=0, time_off=0, text='#', username='#', name='test'), headers=self.get_api_headers() ) self.assertEqual(response.status_code, 200) json_response = json.loads(response.data.decode('utf-8')) self.assertEqual(json_response['result'], 'success') self.assertEqual(len(json_response['messages']), 4) def test_filter_messages_using_no_criteria(self): # Add some dummy messages Message.generate_fake(5) Message(bot_id=1234).save() Message(date=datetime.now()-timedelta(hours=1.5)).save() Message(text_content='message1234').save() Message(sender_username='testuser').save() Message(sender_firstname='test', sender_lastname='user').save() self.assertEqual(Message.objects.count(), 5 + 5) # Get filtered messages response = self.client.get( url_for('botapi.filter_messages', botid=0, time_off=0, text='#', username='#', name='#'), headers=self.get_api_headers() ) self.assertEqual(response.status_code, 200) json_response = json.loads(response.data.decode('utf-8')) self.assertEqual(json_response['result'], 'success') self.assertEqual(len(json_response['messages']), 10) def test_filter_messages_using_all_criteria(self): # Add dummy messages Message.generate_fake(5) # Add partially matching messages. Message(date=datetime.now() - timedelta(minutes=30), # Un-match time. sender_username='tester1', sender_firstname='test', sender_lastname='bot', text_content='testmessage', bot_id=12345).save() Message(date=datetime.now() - timedelta(minutes=10), sender_username='tester2', # Non-matching sender-username. sender_firstname='test', sender_lastname='bot', text_content='testmessage', bot_id=12345).save() Message(date=datetime.now() - timedelta(minutes=10), sender_username='tester1', sender_firstname='abc', # Non-matching first-name, last-name sender_lastname='def', text_content='testmessage', bot_id=12345).save() Message(date=datetime.now() - timedelta(minutes=10), sender_username='tester1', sender_firstname='test', sender_lastname='bot', text_content='message', # Non-matching text content bot_id=12345).save() Message(date=datetime.now() - timedelta(minutes=10), sender_username='Tester1', sender_firstname='Test', sender_lastname='Bot', text_content='testmessage', bot_id=11111).save() # Non-matching botid # Add expected message. Message(date=datetime.now()-timedelta(minutes=10), sender_username='tester1', sender_firstname='test', sender_lastname='bot', text_content='testmessage', bot_id=12345).save() # Get filtered messages response = self.client.get( url_for('botapi.filter_messages', botid=12345, time_off=15, text='test', username='tester1', name='test'), headers=self.get_api_headers() ) self.assertEqual(response.status_code, 200) json_response = json.loads(response.data.decode('utf-8')) self.assertEqual(json_response['result'], 'success') self.assertEqual(len(json_response['messages']), 1)
41.056225
80
0.607845
1,137
10,223
5.272647
0.123131
0.090075
0.023853
0.031193
0.771309
0.740117
0.705254
0.68457
0.68457
0.66789
0
0.025511
0.267632
10,223
248
81
41.221774
0.77521
0.06456
0
0.569307
0
0
0.091491
0.026125
0
0
0
0
0.183168
1
0.064356
false
0
0.039604
0.00495
0.113861
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
17c3422fd5c753fc7b849ef8d958f4af1f54cc8e
91
py
Python
src/student1/apps.py
Ehsan-Home/drag_and_drop_in_tables
865079999e42a6f3dceb6814b69091580c1440b8
[ "MIT" ]
3
2020-03-08T09:14:38.000Z
2020-08-29T00:19:38.000Z
src/student1/apps.py
Ehsan-Home/drag_and_drop_in_tables
865079999e42a6f3dceb6814b69091580c1440b8
[ "MIT" ]
null
null
null
src/student1/apps.py
Ehsan-Home/drag_and_drop_in_tables
865079999e42a6f3dceb6814b69091580c1440b8
[ "MIT" ]
null
null
null
from django.apps import AppConfig class Student1Config(AppConfig): name = 'student1'
15.166667
33
0.758242
10
91
6.9
0.9
0
0
0
0
0
0
0
0
0
0
0.026316
0.164835
91
5
34
18.2
0.881579
0
0
0
0
0
0.087912
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
17cca09d6607d4701575eebf9f9363e242db7714
618
py
Python
object_oriented_pandas/data_unit.py
AdamWclw/object_oriented_pandas
084d8cf12d71c96dc42d5cf7ac52030b1862f9c1
[ "MIT" ]
1
2021-03-12T12:32:04.000Z
2021-03-12T12:32:04.000Z
object_oriented_pandas/data_unit.py
AdamWclw/object_oriented_pandas
084d8cf12d71c96dc42d5cf7ac52030b1862f9c1
[ "MIT" ]
null
null
null
object_oriented_pandas/data_unit.py
AdamWclw/object_oriented_pandas
084d8cf12d71c96dc42d5cf7ac52030b1862f9c1
[ "MIT" ]
null
null
null
# DATA UNIT CLASS class _Unit: def __init__(self, name, con_2_si, con_unit, si_unit): self._name = name self._con_2_si = con_2_si self._con_unit = con_unit self._si_unit = si_unit def get_name(self): return self._name def get_con_2_si(self): return self._con_2_si def get_con_unit(self): return self._con_unit def get_si_unit(self): return self._si_unit # DATA UNITS ENERGY = _Unit(name='Energy', con_2_si=1000 * 3600, con_unit='kWh', si_unit='Ws') DISTANCE = _Unit(name='Distance', con_2_si=1000, con_unit='km', si_unit='m')
22.888889
81
0.650485
103
618
3.446602
0.203884
0.078873
0.11831
0.050704
0
0
0
0
0
0
0
0.040512
0.2411
618
26
82
23.769231
0.716418
0.042071
0
0
0
0
0.037479
0
0
0
0
0
0
1
0.3125
false
0
0
0.25
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
17cd8ac3d98e5aa1290e36e5bb8980fb910b5466
259
py
Python
HARMAN International Software Engineer 2019/missingNumber.py
sivolko/codeforce
4b00c4c012780036e56d2f0e79adb2f5db7559df
[ "MIT" ]
null
null
null
HARMAN International Software Engineer 2019/missingNumber.py
sivolko/codeforce
4b00c4c012780036e56d2f0e79adb2f5db7559df
[ "MIT" ]
null
null
null
HARMAN International Software Engineer 2019/missingNumber.py
sivolko/codeforce
4b00c4c012780036e56d2f0e79adb2f5db7559df
[ "MIT" ]
null
null
null
def missing_number(nums): arr = [0 for _ in range(len(nums))] for i in range(len(nums)): if nums[i] < len(nums): arr[nums[i]] = -1 for i in range(len(nums)): if arr[i] != -1: return i return len(nums)
21.583333
39
0.498069
41
259
3.097561
0.341463
0.275591
0.23622
0.330709
0.314961
0.314961
0.314961
0
0
0
0
0.017751
0.34749
259
12
40
21.583333
0.733728
0
0
0.222222
0
0
0
0
0
0
0
0
0
1
0.111111
false
0
0
0
0.333333
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
17ebb1b0a232779c87fd66dd4cd9585253cf8344
1,196
py
Python
setup.py
whitespy/django-simple-metatags
5a085b64a3e1e8d4d1b1bd6b9a9eabcfa275912f
[ "MIT" ]
4
2017-04-09T20:29:46.000Z
2021-05-03T23:38:17.000Z
setup.py
whitespy/django-simple-metatags
5a085b64a3e1e8d4d1b1bd6b9a9eabcfa275912f
[ "MIT" ]
6
2017-04-09T20:38:40.000Z
2022-03-03T12:28:04.000Z
setup.py
whitespy/django-simple-metatags
5a085b64a3e1e8d4d1b1bd6b9a9eabcfa275912f
[ "MIT" ]
6
2016-12-05T16:00:17.000Z
2021-01-11T11:46:39.000Z
from setuptools import setup, find_packages setup( name='django-simple-metatags', version='2.0.3', description="The django application allows to add title, keywords and " "description meta tags to site's pages.", author='Andrey Butenko', author_email='whitespysoftware@gmail.com', url='https://github.com/whitespy/django-simple-metatags', long_description=open('README.rst', encoding='utf-8').read(), packages=find_packages(), include_package_data=True, platforms='any', classifiers=[ 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Framework :: Django', 'Framework :: Django :: 2.1', 'Framework :: Django :: 2.2', 'Framework :: Django :: 3.0', 'Framework :: Django :: 3.1', 'Framework :: Django :: 3.2', ], )
35.176471
75
0.601171
127
1,196
5.614173
0.566929
0.159888
0.210379
0.182328
0
0
0
0
0
0
0
0.027809
0.248328
1,196
33
76
36.242424
0.765295
0
0
0
0
0
0.576923
0.040134
0
0
0
0
0
1
0
true
0
0.032258
0
0.032258
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
aa124a0746565781aaf226d284979259a69cd9e9
671
py
Python
application/unused/acronymos/acronymos_source.py
Vahen/VahenWebsite
19a0f0f923874d66d8229e20e0df5b7916da5595
[ "MIT" ]
null
null
null
application/unused/acronymos/acronymos_source.py
Vahen/VahenWebsite
19a0f0f923874d66d8229e20e0df5b7916da5595
[ "MIT" ]
null
null
null
application/unused/acronymos/acronymos_source.py
Vahen/VahenWebsite
19a0f0f923874d66d8229e20e0df5b7916da5595
[ "MIT" ]
null
null
null
import random # Todo -> Terminer les corrections from typing import Set, List def create_acronyme(string: str) -> str: return ''.join([x[0] for x in string.split(" ")]) def pick_random_word_by_letter(letter: str, words: Set[str]) -> str: words_starting_by = find_list_words_starting_by(letter, words) return words_starting_by[random.randint(len(words_starting_by))] def find_list_words_starting_by(letter: str, words: Set[str]) -> List[str]: return [x for x in words if x.upper().startswith(letter.upper())] def unroll_acronyme(acronyme: str, words: Set[str]) -> str: return ' '.join([pick_random_word_by_letter(x, words) for x in acronyme])
30.5
77
0.724292
105
671
4.4
0.333333
0.140693
0.162338
0.090909
0.337662
0.125541
0
0
0
0
0
0.001742
0.14456
671
21
78
31.952381
0.803136
0.04769
0
0
0
0
0.00314
0
0
0
0
0.047619
0
1
0.363636
false
0
0.181818
0.272727
0.909091
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
1
0
0
0
3
aa457df01cd3ec06e6b098b140f070c64cd8b090
183
py
Python
v1/comments/urls.py
DucPhamTV/MaiTet
44a1465a3239808f6640592ba666d9c5449c0ef4
[ "MIT" ]
null
null
null
v1/comments/urls.py
DucPhamTV/MaiTet
44a1465a3239808f6640592ba666d9c5449c0ef4
[ "MIT" ]
15
2021-02-20T12:03:33.000Z
2021-07-26T10:15:03.000Z
v1/comments/urls.py
DucPhamTV/MaiTet
44a1465a3239808f6640592ba666d9c5449c0ef4
[ "MIT" ]
null
null
null
from rest_framework.routers import SimpleRouter from v1.comments.views import CommentViewSet router = SimpleRouter(trailing_slash=False) router.register('comments', CommentViewSet)
26.142857
47
0.846995
21
183
7.285714
0.714286
0
0
0
0
0
0
0
0
0
0
0.005952
0.081967
183
6
48
30.5
0.904762
0
0
0
0
0
0.043716
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
a4acdfd6ab541d7f88b9a599f52e9aadab5c5f4b
200
py
Python
programming-language/cases/python/examples/py.test/example1.py
wdv4758h/notes
60fa483961245ec5bb264d3f28a885fb82a1c25e
[ "Unlicense" ]
136
2015-06-15T13:26:40.000Z
2022-03-03T07:47:31.000Z
programming-language/cases/python/examples/py.test/example1.py
wdv4758h/notes
60fa483961245ec5bb264d3f28a885fb82a1c25e
[ "Unlicense" ]
82
2017-01-06T06:32:55.000Z
2020-09-03T03:34:24.000Z
programming-language/cases/python/examples/py.test/example1.py
wdv4758h/notes
60fa483961245ec5bb264d3f28a885fb82a1c25e
[ "Unlicense" ]
18
2015-12-04T04:02:44.000Z
2022-02-24T03:48:57.000Z
#!/usr/bin/env python ''' you can use this command to run specific Python code: ..code-block:: python py.test example1.py ''' def f(x): return x + 1 def test_f(): assert f(0) == 1
10.526316
53
0.6
34
200
3.5
0.705882
0
0
0
0
0
0
0
0
0
0
0.026667
0.25
200
18
54
11.111111
0.766667
0.61
0
0
0
0
0
0
0
0
0
0
0.25
1
0.5
false
0
0
0.25
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
a4b14eb6b6c945a92b4dd50e0dc46ef1f9f9807c
683
py
Python
tests/rules/test_required_if.py
mateuszz0000/Validator
80dde6dd9bcbc4e0fb5815c1415c40e7357e98bd
[ "MIT" ]
null
null
null
tests/rules/test_required_if.py
mateuszz0000/Validator
80dde6dd9bcbc4e0fb5815c1415c40e7357e98bd
[ "MIT" ]
null
null
null
tests/rules/test_required_if.py
mateuszz0000/Validator
80dde6dd9bcbc4e0fb5815c1415c40e7357e98bd
[ "MIT" ]
null
null
null
from validator.rules import RequiredIf def test_required_if_01(): rule = RequiredIf("a") value_to_check = "abc" assert rule.check(value_to_check) rule = RequiredIf("a") value_to_check = ["a", "b", "c"] assert rule.check(value_to_check) def test_required_if_02(): rule = RequiredIf("a") value_to_check = "" assert not rule.check(value_to_check) rule = RequiredIf("a") value_to_check = [] assert not rule.check(value_to_check) rule = RequiredIf("a") value_to_check = "bcd" assert not rule.check(value_to_check) rule = RequiredIf("a") value_to_check = ["b", "c", "d"] assert not rule.check(value_to_check)
22.766667
41
0.658858
99
683
4.242424
0.232323
0.2
0.342857
0.285714
0.8
0.8
0.657143
0.585714
0.585714
0.585714
0
0.007421
0.210835
683
29
42
23.551724
0.7718
0
0
0.571429
0
0
0.026354
0
0
0
0
0
0.285714
1
0.095238
false
0
0.047619
0
0.142857
0
0
0
0
null
0
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
351b7102ef7d184476ebafb377acce37ce069fd2
119
py
Python
Tuples.py
rashidulhasanhridoy/LANGUAGE-PROFICIENCY-Python-HackerRank
46beecbf3a2468d6c598fe62a3e65c5f0c1395c8
[ "Apache-2.0" ]
1
2020-07-21T18:01:52.000Z
2020-07-21T18:01:52.000Z
Tuples.py
rashidulhasanhridoy/LANGUAGE-PROFICIENCY-Python-HackerRank
46beecbf3a2468d6c598fe62a3e65c5f0c1395c8
[ "Apache-2.0" ]
null
null
null
Tuples.py
rashidulhasanhridoy/LANGUAGE-PROFICIENCY-Python-HackerRank
46beecbf3a2468d6c598fe62a3e65c5f0c1395c8
[ "Apache-2.0" ]
null
null
null
n = int(input()) integer_list = map(int, input().split()) integer_list = tuple(integer_list) print(hash(integer_list))
23.8
40
0.731092
18
119
4.611111
0.555556
0.53012
0
0
0
0
0
0
0
0
0
0
0.092437
119
4
41
29.75
0.768519
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.25
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
351e4169b883332b0f845dd71443900ace5793ff
627
py
Python
lego/settings/__init__.py
andrinelo/lego
9b53c8fe538d9107b980a70e2a21fb487cc3b290
[ "MIT" ]
null
null
null
lego/settings/__init__.py
andrinelo/lego
9b53c8fe538d9107b980a70e2a21fb487cc3b290
[ "MIT" ]
null
null
null
lego/settings/__init__.py
andrinelo/lego
9b53c8fe538d9107b980a70e2a21fb487cc3b290
[ "MIT" ]
null
null
null
import os import sys TESTING = 'test' in sys.argv[:2] DAPHNE_SERVER = 'daphne' in sys.argv from .base import * # noqa from .lego import * # noqa from .rest_framework import * # noqa from .search import * # noqa from .logging import * # noqa if TESTING: from .test import * # noqa else: if os.environ.get('ENV_CONFIG') in ['1', 'True', 'true']: from .production import * # noqa else: try: from .local import * # noqa except ImportError as e: raise ImportError('Couldn\'t load local settings lego.settings.local') DEFAULT_FROM_EMAIL = SERVER_EMAIL # noqa
24.115385
82
0.634769
84
627
4.666667
0.47619
0.204082
0.142857
0
0
0
0
0
0
0
0
0.004301
0.258373
627
25
83
25.08
0.83871
0.070175
0
0.1
0
0
0.062827
0
0
0
0
0
0
1
0
false
0
0.6
0
0.6
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
3525251f1d372db45b7ebad3ec881dff8481e8c7
444
py
Python
injectedConsole/plugin_help/__init__.py
ChenyangGao/SigilPlugin_injectedConsole
a35ee53671373db05e5ee66f0345d7ace06ddc0c
[ "BSD-3-Clause" ]
11
2021-03-04T02:19:48.000Z
2021-11-06T16:44:35.000Z
injectedConsole/plugin_help/__init__.py
fengdaokanhai/SigilPlugin_injectedConsole
a35ee53671373db05e5ee66f0345d7ace06ddc0c
[ "BSD-3-Clause" ]
1
2021-08-02T13:13:52.000Z
2021-08-02T13:13:52.000Z
injectedConsole/plugin_help/__init__.py
fengdaokanhai/SigilPlugin_injectedConsole
a35ee53671373db05e5ee66f0345d7ace06ddc0c
[ "BSD-3-Clause" ]
4
2021-03-08T07:42:17.000Z
2021-11-06T16:44:52.000Z
#!/usr/bin/env python3 # coding: utf-8 from plugin_util.console import get_current_shell, list_shells from plugin_util.run import run_file, run_path, run, load from plugin_util.usepip import execute_pip, install, uninstall, ensure_import from plugin_util.urlimport import ( install_url_meta, remove_url_meta, install_path_hook as install_url_hook, remove_path_hook as remove_url_hook ) from .editor import * from .function import *
29.6
78
0.808559
70
444
4.8
0.5
0.119048
0.166667
0
0
0
0
0
0
0
0
0.005168
0.128378
444
14
79
31.714286
0.863049
0.078829
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
3
353178e9150c100aa45bb72cda027d20d79ce4d2
7,985
py
Python
lintcode/257.py
jianershi/algorithm
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
[ "MIT" ]
1
2021-01-08T06:57:49.000Z
2021-01-08T06:57:49.000Z
lintcode/257.py
jianershi/algorithm
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
[ "MIT" ]
null
null
null
lintcode/257.py
jianershi/algorithm
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
[ "MIT" ]
1
2021-01-08T06:57:52.000Z
2021-01-08T06:57:52.000Z
""" 257. Longest String Chain https://www.lintcode.com/problem/longest-string-chain/description?_from=contest&&fromId=93 dp[i] longest string chian ending in i dp[i] = max(dp[i], dp[j] + 1) if distance between words[j] and word[i] is 1 for j in range [0, i) max(dp[i]) dp[i] = 1 longest increasing subsequence变形 """ class Solution: """ @param words: the list of word. @return: the length of the longest string chain. """ def longestStrChain(self, words): if not words: return 0 words = sorted(words, key=lambda x: len(x)) n = len(words) dp = [1] * n for i in range(n): for j in range(i - 1, -1 ,-1): if len(words[j]) == len(words[i]): continue if len(words[i]) - len(words[j]) > 1: break if self.is_valid(words[j], words[i]): dp[i] = max(dp[i], dp[j] + 1) return max(dp) def is_valid(self, word1, word2): i = 0 j = 0 diffed_by_one = False while i < len(word1) and j < len(word2): if word1[i] == word2[j]: i += 1 j += 1 else: if diffed_by_one: return False if j + 1 < len(word2) and word1[i] == word2[j + 1]: j += 1 else: return False diffed_by_one = True return True s = Solution() words=["uiykgmcc","jrgbss","mhkqodcpy","lkj","bwqktun","s","nrctyzifwytjblwy","wrp","scqlcwmxw","irqvnxdcxoejuu","gmlckvofwyifmrw","wbzbyrcppaljigvo","lk","kfeouqyyrer","efzzpvi","ubkcitcmwxk","txihn","mdwdmbtx","vuzvcoaif","jwmboqvhpqodsj","wscfvrfl","pzye","waxyoxftvrgqmkg","wwdidopozinxxn","dclpg","xjsvlxktxs","ajj","pvsdastm","tatjxhygidhn","feafycxdxagn","irqvnxxoeuu","kwjo","tztoovsyfwz","prllrw","sclmx","bbmjnwaxcwaml","gl","wiax","uzvcoaif","ztovyfwz","qxy","zuexoxyp","qxyyrl","pvsdasvtm","femafycxdxaagn","rspvccjcm","wvyiax","vst","efzi","fjmdcc","icsinrbpql","ctybiizlcr","ntyzfwytjblw","tatjxhygidhpn","e","kykizdandafusu","pnepuwcsxl","kfeuqyyrer","afplzhbqguu","hvajtj","prll","ildzdimea","zueoxp","ezi","lqr","jkaagljikwamaqvf","mlzwhkxsn","rspvccbcjjtcm","wscfvrl","m","msygukwlkrqboc","pifojogoveub","bkcmwx","jercgybhss","wrpi","aicsinkgrbpqli","aplzbuu","sclcmxw","atpepgsz","govrcuuglaer","bdxjpsvlxkytxs","uikgm","bm","wvyhiqax","znvaasgfvqi","hatpepgsz","hrzebpa","bnfz","lybtqrfzw","taxhygihn","bjnfzk","mhqp","ide","znvcaasgfvqi","ftv","afplzhbqsguuu","thn","pdbccbe","mxevopfoimgjww","fjmdrcce","rspvccjjcm","jv","motnfwohwule","xjsvlxtxs","bqeb","eug","jftavwgl","rzebpa","lybtqrfazw","zuexoxp","jercgybhsys","hajtj","bkcitcmwxk","mbpvxsdastvtm","mowlznwhkxsn","dvenn","rsacxe","tatjxhygihn","cotybiizlcr","bbmnaxaml","pkwrpsi","nqpdbccbkxens","mbpbovxsdastvtm","mj","pxpsvikwekuq","qeug","dmelddga","aicsinkgrbpxqli","bdxjpsvlxktytxs","pkrllrxw","jkgljikwmaqf","iddie","ctybiizcr","nyzfwytjblw","yvuhmiuehspi","keuqre","wzbypaigvo","sck","uzcoaf","dlpg","ubkcpitlscmwxk","molzwhkxsn","pepuwcsxl","laplm","dclpgc","mahkxqodcpy","sclcmx","hvrzebpaz","bgovrcuuglaer","clazpulmw","yvuyhmiuehspiq","wzbycpaljigvo","sceqalciwmxw","hjytflmvsgv","u","hjyvxytfflhmvsgv","jkgjikwmaqf","fefycxdxagn","ftvw","ofncgxrkqvcr","spvcjc","pvsdastvtm","kykzdandaus","wbzbycppaljigvo","haytpepgsz","jmowlznwhkxsn","aplzhbguu","zvyz","nfvqi","jfvtavwsgl","xejnllhfulns","zhhvbiqiw","jkgljikwmaqvf","tyizc","irqvnxcxoejuu","clvazzpulmw","oncgxrqvcr","qlupvpdkhrm","mtnfwohwule","wwdidopzozinxxn","auiykgmcc","wscfvrfyl","pfksmrullrxw","jwmoqvhpqods","ftavwg","iddiea","kcmw","ykkwjwo","pe","aplzbguu","eu","bbmnaxal","ntyswtnlab","zhhhvbhbiqiw","jwmoqvpqods","kykzdndaus","bbmjnaxcwaml","zunvcaasgfvqi","icsingrbpql","sceqalciwmsxyw","yvuhmiuehsp","bxjsvlxktxs","waxoxftvrgqmkg","cogxxpaknks","scllvazzpulmw","tatjxhygeidhpn","ftvwg","tyz","nafvqi","oby","pgzpkhqog","irqvnxxoejuu","oxwpkxlakcp","bnf","oxwnpkxlakcp","bwqktu","ufybbaozoqk","ntydswtnlab","zvyfz","znaafvqi","npdbccbke","mhkqocpy","kuq","bjnfz","taxhyihn","kwrpsi","qifepmcatbdjlf","lzwhks","kfeuqre","mxevopfoimgww","spvcjcm","oncgxrkqvcr","jftavwsgl","soifcbya","jpzyeg","jwmboqvhpqods","lapulm","jrgbhss","xejfnllhfulns","zhhhvbbiqiw","km","kuqre","scxlzlvazzpulmw","ztvyfwz","wbzbycpaljigvo","rzbpa","vsastm","uybaooqk","dn","ykwjwo","ufybmvbaozoqk","nknm","mbpvsdastvtm","dpgzpxykhqog","wzbypajigvo","bnjnfzk","eollbigtftpdrd","zhbiqiw","yvuhiuehp","zhhhvbhbiqiwg","pfksrullrxw","pzyeg","aplzhbqguu","z","hvrzecbpazw","clvazpulmw","tajxhygihn","pgzpxykhqog","fefyxdxagn","wimomuvhn","lqrzw","xejnlhfulns","jhrc","xsxxs","slmx","jrgss","uikgmc","ncgqvcr","womuhn","aryouvtnmme","uzco","zhhhvbiqiw","hjytflhmvsgv","znvaasfvqi","kuqr","ojrpp","ztoovyfwz","zvz","pxpsviweuq","ufybaooqk","xy","jfvvtavwksvgl","raiachv","bmnaxl","rspvccjjtcm","pgzpxkhqog","xhbtfnqebaj","sceqalciwmsxw","jssctk","uzvcoaf","fefydxagn","jhrvc","mbj","raiahv","nrtyzifwytjblwy","mhqcp","jkgjkwmaqf","wscfvrfylhi","lqrz","ahabucermswyrvl","wxoxftvrgqmkg","ku","uyaoq","mhqocp","ykwjo","vstm","ofncgxrkqvcwr","dqvh","taxyihn","idie","bwqtu","tztoovyfwz","rspvcccjjtcm","uojrpp","wmomuhn","cotycbiizlxcr","nrtyzfwytjblw","ocbya","sceqlciwmxw","ajtj","rspvccbcjjthcm","kfeuqyyre","dmelddg","txyihn","ubkcitlscmwxk","ntyswtnla","bdxjpstvlxktytxs","odqdvbh","pxpsvikeewekuq","mdwdmbdtux","vs","bma","wzbypigvo","qxyy","vsstm","hbtnqeba","hrzebpaz","xhbtfnjsqebbaj","ahaucermswyrv","ddmbtx","zhhbiqiw","pxpsvikewekuq","odqdvgbh","bxjpsvlxktxs","jsck","fjmdc","mdwdmbdtx","jqxyyrl","pxpsvikweuq","ctybizcr","dqvbh","lpl","lqrfzw","ufybaozoqk","znvaafvqi","yvuhmiuehp","hvrzebpazw","pfksrllrxw","alzuu","xjsvxtxs","afplzhbqguuu","icsingrbpqli","hjxytflhmvsgv","femafycxdxagn","uyaoqk","gmlckvofwyifrw","cinrbpql","jrcgbhss","oxwpkxlkcp","jkagljikwamaqvf","eollbigtftpdrdy","rspvcjcm","socbya","clapulm","qeb","kwrpi","efzpi","hbtfnqebaj","kykizdnandafusu","sclvazzpulmw","efzzpvvi","jfvvtavwsvgl","mhqocpy","v","mbpbvxsdastvtm","irqvnxouu","hvaajtj","ofnlcgxrkqvcwr","hbtqeba","hbtqeb","jwmqpds","ntrnlhujdslco","zv","npdbccbken","mhp","ddb","prllw","mddmbtx","clazpulm","cogxxpaknkse","bkitcmwxk","oxwpklkcp","tyiz","jwmqvpqods","waxyoxftvrgqmkgb","afplzhbbqsgujuu","bwtu","jercgbhss","rsacx","mahkqodcpy","cotycbiizlcr","ahabucermswyrv","lupvpkhr","dvnn","b","atpepsz","ncgxqvcr","qe","ubkcitlcmwxk","lyqrfzw","wimomuhn","bbmnaxl","motnfwohrwule","yvuyhmiuehspi","jfvvtavwsgl","rac","fefdxagn","bwqkctun","uotjrpp","ddbtx","afplzhbbqsguuu","xss","xsxs","wvyiqax","kykizdandaus","npdbccbkens","r","oxwnpkxjlakcp","tzmteoovsyfwz","kykizdnandafuspu","ahabulcermswyrvl","xjsxxs","qxyyr","ck","xhbtfnqebbaj","nqpdbccbkens","mpvsdastvtm","zuexqoxyp","gmlkvofwyifrw","kmw","txhn","kykizdandausu","molznwhkxsn","lupvpdkhr","jwmqvpds","bktcmwx","wyiax","hzvaajtj","ddbx","pifojogveub","naafvqi","motnfwjohrwule","odqvbh","aicsingrbpqli","jopzyeg","lybtqrfazrw","pijogveub","xzejfnllhfulns","scxllvazzpulmw","irqyvnxdcxfoejuu","cogxpaknks","pdkwrpsi","wzbycpajigvo","xjsxtxs","irqvnxdcxfoejuu","xhbtfnjqebbaj","uybaoqk","oncgxqvcr","aj","pepuwsxl","lytqrfzw","nkm","jrgs","pkrllrw","wscfvrfyli","bbmjnaxcaml","jftavwg","vuzvcozaif","pifjogveub","cmogxxpaknkse","cinrbql","scqlciwmxw","ztvyfz","mxyevopfoimgjpww","soicbya","lupvpdkhrm","ahaucermsyrv","ufybmvbaouzoqk","bdxjpsvlxktxs","hjxytfflhmvsgv","hjvxytfflhmvsgv","nqpdbccbzkxens","wr","kykzdndus","iddimea","fjmdrcc","efzzpi","vsdastm","btqeb","pfkrllrxw","ocby","irqvnxxouu","ildzpdimea","lzwhkxsn","ilddimea","ufybvbaozoqk","mxyevopfoimgjww","jhr","kcmwx","dvn","uzcof","glw","hbtnqebaj","riahv","w","qeugv","kfeuqyre","ilrdzpdimea","lplm","icinrbpql","scqlcmxw","bbmjnaxaml","e","rsac","bf","jwmqvpqds","tzteoovsyfwz","rc","lzwhkxs","jkgljikwamaqvf","tybizc","aplzuu","nrtyzifwytjblw","pze","bktcmwxk","uiykgmc","jsctk","npdbccbe","tybizcr"] print(s.longestStrChain(words))
133.083333
6,436
0.689167
772
7,985
7.11658
0.806995
0.003822
0.009829
0.003822
0.006735
0.005096
0.005096
0.005096
0.005096
0
0
0.004341
0.076894
7,985
60
6,437
133.083333
0.741012
0.049092
0
0.166667
0
0
0.628073
0
0
0
0
0
0
1
0.055556
false
0
0
0
0.222222
0.027778
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
35485b9479958d2d47a02d4fdf1fd0230cf334e9
284
py
Python
6 kyu/Real Password Cracker.py
mwk0408/codewars_solutions
9b4f502b5f159e68024d494e19a96a226acad5e5
[ "MIT" ]
6
2020-09-03T09:32:25.000Z
2020-12-07T04:10:01.000Z
6 kyu/Real Password Cracker.py
mwk0408/codewars_solutions
9b4f502b5f159e68024d494e19a96a226acad5e5
[ "MIT" ]
1
2021-12-13T15:30:21.000Z
2021-12-13T15:30:21.000Z
6 kyu/Real Password Cracker.py
mwk0408/codewars_solutions
9b4f502b5f159e68024d494e19a96a226acad5e5
[ "MIT" ]
null
null
null
from itertools import product from hashlib import sha1 def password_cracker(hash): for i in range(5): for j in product("abcdefghijklmnopqrstuvwxyz", repeat=i+1): if sha1("".join(j).encode('utf-8')).hexdigest()==hash: return "".join(j)
31.555556
67
0.612676
37
284
4.675676
0.702703
0.057803
0
0
0
0
0
0
0
0
0
0.023585
0.253521
284
9
68
31.555556
0.792453
0
0
0
0
0
0.108772
0.091228
0
0
0
0
0
1
0.142857
false
0.142857
0.285714
0
0.571429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
3
101f3b4e80adc3534c5aa8016e5bdc4f35dfeaaa
138
py
Python
Projeto/pedebem/bar/apps.py
UFOP-CSI477/2021-02-atividades-UFOP-LucasAlmeida
07b66989c1868ef50b557c9acafcafb3f931a870
[ "MIT" ]
null
null
null
Projeto/pedebem/bar/apps.py
UFOP-CSI477/2021-02-atividades-UFOP-LucasAlmeida
07b66989c1868ef50b557c9acafcafb3f931a870
[ "MIT" ]
null
null
null
Projeto/pedebem/bar/apps.py
UFOP-CSI477/2021-02-atividades-UFOP-LucasAlmeida
07b66989c1868ef50b557c9acafcafb3f931a870
[ "MIT" ]
null
null
null
from django.apps import AppConfig class BarConfig(AppConfig): default_auto_field = 'django.db.models.BigAutoField' name = 'bar'
19.714286
56
0.746377
17
138
5.941176
0.882353
0
0
0
0
0
0
0
0
0
0
0
0.15942
138
6
57
23
0.87069
0
0
0
0
0
0.231884
0.210145
0
0
0
0
0
1
0
false
0
0.25
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
101feaa476e1a0515bfd0e32c44607c3249ef6f9
1,463
py
Python
splendor_sim/test/factories/test_json_validator.py
markbrockettrobson/SplendorBots
f78fab8cccf30c9cc4351308eedfca4203e55463
[ "MIT" ]
1
2019-06-19T02:00:32.000Z
2019-06-19T02:00:32.000Z
splendor_sim/test/factories/test_json_validator.py
markbrockettrobson/SplendorBots
f78fab8cccf30c9cc4351308eedfca4203e55463
[ "MIT" ]
null
null
null
splendor_sim/test/factories/test_json_validator.py
markbrockettrobson/SplendorBots
f78fab8cccf30c9cc4351308eedfca4203e55463
[ "MIT" ]
null
null
null
import unittest import splendor_sim.src.factories.json_validator as json_validator class TestJsonValidator(unittest.TestCase): def setUp(self): self._schema = { "name": {"type": "string"}, "age": {"type": "integer", "min": 10, "required": True}, } self._json = {"name": "mark", "age": 10} def test_validate_json(self): # Arrange test_json_validator = json_validator.JsonValidator(self._schema) # Act # Assert self.assertTrue(test_json_validator.validate_json(self._json)) def test_validate_json_true_missing_non_required_field(self): # Arrange self._json = {"age": 10} test_json_validator = json_validator.JsonValidator(self._schema) # Act # Assert self.assertTrue(test_json_validator.validate_json(self._json)) def test_validate_json_false_incorrect_type(self): # Arrange self._json = {"name": "mark", "age": "10"} test_json_validator = json_validator.JsonValidator(self._schema) # Act # Assert self.assertFalse(test_json_validator.validate_json(self._json)) def test_validate_json_false_missing_required_field(self): # Arrange self._json = {"name": "mark"} test_json_validator = json_validator.JsonValidator(self._schema) # Act # Assert self.assertFalse(test_json_validator.validate_json(self._json))
32.511111
72
0.650034
163
1,463
5.466258
0.239264
0.204265
0.152637
0.085297
0.73064
0.73064
0.59596
0.59596
0.59596
0.59596
0
0.007201
0.240602
1,463
44
73
33.25
0.794779
0.051265
0
0.333333
0
0
0.053818
0
0
0
0
0
0.166667
1
0.208333
false
0
0.083333
0
0.333333
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
102756becedccecab8caa6b163783ea5218405f9
123
py
Python
TorrentPython/TorrentPython/Defines.py
reignofmiracle/RM_Torrent
a3f34d1868c0aeadc9c2c3e301f798dfae4fb3ff
[ "MIT" ]
1
2020-01-02T02:05:36.000Z
2020-01-02T02:05:36.000Z
TorrentPython/TorrentPython/Defines.py
reignofmiracle/RM_Torrent
a3f34d1868c0aeadc9c2c3e301f798dfae4fb3ff
[ "MIT" ]
null
null
null
TorrentPython/TorrentPython/Defines.py
reignofmiracle/RM_Torrent
a3f34d1868c0aeadc9c2c3e301f798dfae4fb3ff
[ "MIT" ]
null
null
null
class Defines(object): PROTOCOL_ID = b'BitTorrent protocol' RM_CLIENT_ID = b'RM' RM_CLIENT_VERSION = b'0100'
17.571429
40
0.691057
18
123
4.444444
0.611111
0.075
0
0
0
0
0
0
0
0
0
0.041237
0.211382
123
6
41
20.5
0.783505
0
0
0
0
0
0.204918
0
0
0
0
0
0
1
0
false
0
0
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
102e541c574caf63f801d1b925046bf123411f44
61
py
Python
testes/teste_script.py
SuyKingsleigh/KitCast
12ece1fbccd252dfeb7c417bc51519d4cbe3546c
[ "MIT" ]
null
null
null
testes/teste_script.py
SuyKingsleigh/KitCast
12ece1fbccd252dfeb7c417bc51519d4cbe3546c
[ "MIT" ]
null
null
null
testes/teste_script.py
SuyKingsleigh/KitCast
12ece1fbccd252dfeb7c417bc51519d4cbe3546c
[ "MIT" ]
null
null
null
import sys for arg in sys.argv[1:]: print('arg: ' + arg)
15.25
24
0.590164
11
61
3.272727
0.727273
0
0
0
0
0
0
0
0
0
0
0.021277
0.229508
61
4
25
15.25
0.744681
0
0
0
0
0
0.080645
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
10465ab3d0634ad742495f49de255abb27420c04
593
py
Python
base/c1_monitor/monitor_frame.py
wwllong/py-design-pattern
7d4f870cf1be09c7b3b4d5329df78765b86ea451
[ "Apache-2.0" ]
1
2021-05-20T07:24:55.000Z
2021-05-20T07:24:55.000Z
base/c1_monitor/monitor_frame.py
wwllong/py-design-pattern
7d4f870cf1be09c7b3b4d5329df78765b86ea451
[ "Apache-2.0" ]
null
null
null
base/c1_monitor/monitor_frame.py
wwllong/py-design-pattern
7d4f870cf1be09c7b3b4d5329df78765b86ea451
[ "Apache-2.0" ]
null
null
null
# 监听模式-框架模型 from abc import ABCMeta, abstractmethod # 引入ABCMeta 和 abstractmethod 来定义抽象类和抽象方法 class Observer(metaclass=ABCMeta): """"观察者的基类""" @abstractmethod def update(self, observable, object): pass class Observable: """"被观察者的基类""" def __init__(self): self.__observers = [] def addObserver(self, observer): self.__observers.append(observer) def removeObserver(self, observer): self.__observers.remove(observer) def notifyObserver(self, object=0): for o in self.__observers: o.update(self, object)
19.766667
41
0.657673
61
593
6.196721
0.52459
0.137566
0.084656
0.132275
0
0
0
0
0
0
0
0.002208
0.236088
593
29
42
20.448276
0.83223
0.111298
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0.066667
0.066667
0
0.533333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
3
105e3c9cfd68e3d3a94772a8e2750d96d2f760fa
93
py
Python
billsplit/apps.py
ksarthak4ever/Django-Bill_Splitting
59ee546f9e034dfd21effa30629b75e42b07da92
[ "MIT" ]
null
null
null
billsplit/apps.py
ksarthak4ever/Django-Bill_Splitting
59ee546f9e034dfd21effa30629b75e42b07da92
[ "MIT" ]
null
null
null
billsplit/apps.py
ksarthak4ever/Django-Bill_Splitting
59ee546f9e034dfd21effa30629b75e42b07da92
[ "MIT" ]
1
2021-10-07T16:17:02.000Z
2021-10-07T16:17:02.000Z
from django.apps import AppConfig class BillsplitConfig(AppConfig): name = 'billsplit'
15.5
33
0.763441
10
93
7.1
0.9
0
0
0
0
0
0
0
0
0
0
0
0.16129
93
5
34
18.6
0.910256
0
0
0
0
0
0.096774
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
1062057a42335c1cb315386ef8ebe99938be11a0
530
py
Python
compiler/codegen/codegen.py
raccoon-lang/raccoon
2a88039f271fbb0c370d1a6b57b2d67e3e9c6c9b
[ "Apache-2.0" ]
3
2020-12-28T18:10:49.000Z
2022-01-25T20:07:07.000Z
compiler/codegen/codegen.py
Valentine-Mario/raccoon
4dda2f6e5d227b43412d20729844ba394d6386f9
[ "Apache-2.0" ]
1
2021-08-23T21:09:30.000Z
2021-08-23T21:09:30.000Z
compiler/codegen/codegen.py
Valentine-Mario/raccoon
4dda2f6e5d227b43412d20729844ba394d6386f9
[ "Apache-2.0" ]
1
2021-01-15T08:32:37.000Z
2021-01-15T08:32:37.000Z
""" """ from copy import deepcopy from platform import machine class Codegen: """ """ def __init__(self, ast, semantic_info): self.word_size = 64 if '64' in machine() else 32 self.semantic_info = semantic_info self.ast = ast def __repr__(self): fields = deepcopy(vars(self)) string = ", ".join([f"{repr(key)}: {repr(val)}" for key, val in fields.items()]) return "{" + string + "}" def generate(self): return self def dumps(self): pass
20.384615
88
0.567925
65
530
4.446154
0.538462
0.124567
0.110727
0
0
0
0
0
0
0
0
0.016
0.292453
530
25
89
21.2
0.754667
0
0
0
0
0
0.058594
0
0
0
0
0
0
1
0.266667
false
0.066667
0.133333
0.066667
0.6
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
3
1068edd9c2cde925ca4eea48af2003a80af99ef9
678
py
Python
ridt/tests/systemtests/test_st25.py
riskaware-ltd/ridt
c0288a2f814b2749bdf73de7157f7477ca271aff
[ "MIT" ]
null
null
null
ridt/tests/systemtests/test_st25.py
riskaware-ltd/ridt
c0288a2f814b2749bdf73de7157f7477ca271aff
[ "MIT" ]
9
2020-09-18T08:22:39.000Z
2021-07-20T09:39:59.000Z
ridt/tests/systemtests/test_st25.py
riskaware-ltd/ridt
c0288a2f814b2749bdf73de7157f7477ca271aff
[ "MIT" ]
1
2021-06-22T21:53:20.000Z
2021-06-22T21:53:20.000Z
import unittest from os import listdir from os.path import join from os import remove import shutil from ridt.config import ConfigFileParser from ridt.container import Domain from ridt.container.eddydiffusionrun import EddyDiffusionRun from ridt.data import BatchDataStore from ridt.data import DataStoreReader class ST25(unittest.TestCase): """System Test 25. Test the system is able to output the data store created during a batch run or single run mode to disk.""" def setUp(self) -> None: pass def tearDown(self) -> None: pass def test_verify(self): pass if __name__ == "__main__": unittest.main()
19.371429
60
0.710914
91
678
5.197802
0.538462
0.084567
0.05074
0.07611
0
0
0
0
0
0
0
0.007663
0.230089
678
34
61
19.941176
0.898467
0.175516
0
0.157895
0
0
0.015038
0
0
0
0
0
0
1
0.157895
false
0.157895
0.526316
0
0.736842
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
3
107bc069d3961ebf37c18165bf8960fa751bb0a6
21
py
Python
settings.py
nelldnine/symph-fb-slack
7cbac7218fc66a24e12878bd1ad9b4e1a2781ec9
[ "Apache-2.0" ]
null
null
null
settings.py
nelldnine/symph-fb-slack
7cbac7218fc66a24e12878bd1ad9b4e1a2781ec9
[ "Apache-2.0" ]
null
null
null
settings.py
nelldnine/symph-fb-slack
7cbac7218fc66a24e12878bd1ad9b4e1a2781ec9
[ "Apache-2.0" ]
null
null
null
FACEBOOK_KEY = 'XXXX'
21
21
0.761905
3
21
5
1
0
0
0
0
0
0
0
0
0
0
0
0.095238
21
1
21
21
0.789474
0
0
0
0
0
0.181818
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
1083b7e694d8db2ab131f1b8c7874780b9245da3
666
py
Python
Geeks4Geeks/InterChangeQ.py
aparnamaleth/CodingPractice
937600b0dce20be023527cc036d1118312a140ea
[ "MIT" ]
null
null
null
Geeks4Geeks/InterChangeQ.py
aparnamaleth/CodingPractice
937600b0dce20be023527cc036d1118312a140ea
[ "MIT" ]
null
null
null
Geeks4Geeks/InterChangeQ.py
aparnamaleth/CodingPractice
937600b0dce20be023527cc036d1118312a140ea
[ "MIT" ]
null
null
null
class Queue: def __init__(self): self.input = [] self.output = [] self.size = 0 def push(self,data): self.input.append(data) self.size = self.size + 1 def pop(self): x = (self.size) for i in range(x/2): self.output.append(self.input.pop(0)) for i in range(x/2): self.input.append(self.output.pop()) for i in range(x/2): self.input.append(self.input.pop(0)) for i in range(x/2): self.output.append(self.input.pop(0)) while self.output: self.input.append(self.output.pop()) self.input.append(self.input.pop(0)) return self.input q = Queue() q.push(20) q.push(30) q.push(40) q.push(50) print q.pop()
20.8125
61
0.627628
118
666
3.508475
0.237288
0.23913
0.181159
0.10628
0.536232
0.536232
0.44686
0.379227
0.379227
0.379227
0
0.033582
0.195195
666
31
62
21.483871
0.738806
0
0
0.357143
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0.035714
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
52c19cf49bc3e20ccaba6c7c3afc80ddc76a4a5a
828
py
Python
linear-regression/src/my_kappa_calculator.py
yanshengjia/nlp
43398652b2cab9b85fd042f60e6f68c7b48697bc
[ "MIT" ]
1
2018-04-12T07:48:10.000Z
2018-04-12T07:48:10.000Z
linear-regression/src/my_kappa_calculator.py
yanshengjia/nlp
43398652b2cab9b85fd042f60e6f68c7b48697bc
[ "MIT" ]
null
null
null
linear-regression/src/my_kappa_calculator.py
yanshengjia/nlp
43398652b2cab9b85fd042f60e6f68c7b48697bc
[ "MIT" ]
1
2018-05-02T06:53:29.000Z
2018-05-02T06:53:29.000Z
# !/usr/bin/python # -*- coding:utf-8 -*- # Author: Shengjia Yan # Date: 2017-10-19 # Email: i@yanshengjia.com import numpy as np from quadratic_weighted_kappa import quadratic_weighted_kappa as qwk from quadratic_weighted_kappa import linear_weighted_kappa as lwk def assert_inputs(rater_a, rater_b): assert np.issubdtype(rater_a.dtype, np.integer), 'Integer array expected, got ' + str(rater_a.dtype) assert np.issubdtype(rater_b.dtype, np.integer), 'Integer array expected, got ' + str(rater_b.dtype) def quadratic_weighted_kappa(rater_a, rater_b, min_rating, max_rating): assert_inputs(rater_a, rater_b) return qwk(rater_a, rater_b, min_rating, max_rating) def linear_weighted_kappa(rater_a, rater_b, min_rating, max_rating): assert_inputs(rater_a, rater_b) return lwk(rater_a, rater_b, min_rating, max_rating)
36
101
0.78744
136
828
4.492647
0.330882
0.08838
0.126023
0.13748
0.628478
0.523732
0.484452
0.484452
0.386252
0.238953
0
0.012228
0.111111
828
22
102
37.636364
0.817935
0.123188
0
0.166667
0
0
0.077778
0
0
0
0
0
0.416667
1
0.25
false
0
0.25
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
0
0
0
0
1
0
0
3
52d54f5fa4aaa53b074f49e11c11b9b420b21684
1,070
py
Python
registration/views.py
riggedCoinflip/mydjango
9f30effc0dccd95916f59a3b65d7e02bdd2827b5
[ "MIT" ]
null
null
null
registration/views.py
riggedCoinflip/mydjango
9f30effc0dccd95916f59a3b65d7e02bdd2827b5
[ "MIT" ]
1
2021-02-26T02:13:35.000Z
2021-02-26T02:13:35.000Z
registration/views.py
riggedCoinflip/mydjango
9f30effc0dccd95916f59a3b65d7e02bdd2827b5
[ "MIT" ]
null
null
null
from django.contrib.auth import authenticate, login from django.http import HttpResponseRedirect from django.urls import reverse_lazy from django.views import generic from activate.verifiy import send_verification_email from users.forms import CustomUserCreationForm from users.models import User class SignupView(generic.CreateView): template_name = 'registration/signup.html' model = User form_class = CustomUserCreationForm success_url = reverse_lazy('welcome') def form_valid(self, form): self.object = form.save() user = self.object send_verification_email(user, self.request) # TODO messages.info(request, "Thanks for registering. You are now logged in.") user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password1'], ) login(self.request, user) return HttpResponseRedirect(self.get_success_url()) class WelcomeView(generic.TemplateView): template_name = 'registration/welcome.html'
34.516129
87
0.714953
120
1,070
6.25
0.525
0.053333
0.056
0
0
0
0
0
0
0
0
0.001178
0.206542
1,070
30
88
35.666667
0.882214
0.071963
0
0
0
0
0.073663
0.049445
0
0
0
0.033333
0
1
0.043478
false
0.043478
0.304348
0
0.695652
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
1
0
1
0
0
3
52e449c0774455cb2b78f5505b5dee811827265a
7,698
py
Python
corona_data_collector/tests/test_consistent_length.py
tkarady/avid-covider-pipelines
43944410092ebab24e821b385de3735b757f0062
[ "MIT" ]
null
null
null
corona_data_collector/tests/test_consistent_length.py
tkarady/avid-covider-pipelines
43944410092ebab24e821b385de3735b757f0062
[ "MIT" ]
20
2020-04-16T16:26:07.000Z
2020-10-08T07:52:27.000Z
corona_data_collector/tests/test_consistent_length.py
tkarady/avid-covider-pipelines
43944410092ebab24e821b385de3735b757f0062
[ "MIT" ]
5
2020-04-16T16:36:35.000Z
2020-10-03T12:48:05.000Z
from corona_data_collector.DBToFileWriter import collect_row from corona_data_collector.config import values_to_convert from corona_data_collector import load_from_db, add_gps_coordinates, export_corona_bot_answers from dataflows import Flow, load, printer from avid_covider_pipelines.utils import get_parameters_from_pipeline_spec import random import logging logging.basicConfig(level=logging.INFO) def test_exposure_status_failure(): print("test_exposure_status_failure") record = { 'age': '72', 'sex': 0, 'locale': 'he', 'street': 'יונה סאלק', 'smoking': 1, 'version': '1.0.1', 'city_town': 'אשדוד', 'temperature': '36.3', 'met_above_18': '0', 'met_under_18': '0', 'general_feeling': 0, 'numPreviousReports': 0, 'chronic_hypertension': 1, 'id': 175130, 'created': '2020-03-25T16:32:38.997021', 'insulation_status': 0 } record_to_store = collect_row(record) print(record_to_store) assert len(record_to_store) > 0, 'failed to create a record that can be stored in file' print("OK") def test_expected_contact_with_patient(): print("test_expected_contact_with_patient") back_from_abroad_db = [169603, 169632, 169813] contact_with_patient_db = [10722, 10715, 10697] Flow( load_from_db.flow({ "where": "id in (%s)" % ", ".join(map(str, back_from_abroad_db + contact_with_patient_db)) }), add_gps_coordinates.flow({ "source_fields": get_parameters_from_pipeline_spec("pipeline-spec.yaml", "corona_data_collector", "corona_data_collector.add_gps_coordinates")["source_fields"], "get-coords-callback": lambda street, city: (random.uniform(29, 34), random.uniform(34, 36), int(street != city)) }), export_corona_bot_answers.flow({ "destination_output": "data/corona_data_collector/destination_output" }), ).process() contact_with_patient_key = values_to_convert['insulation_status']['contact-with-patient'] back_from_abroad_key = values_to_convert['insulation_status']['back-from-abroad'] contact_with_patient_array = [] back_from_abroad_array = [] counts = {"contact_with_patient": 0, "back_from_abroad": 0} def _test(row): if int(row["isolation"]) == contact_with_patient_key: counts["contact_with_patient"] += 1 contact_with_patient_array.append(int(row["id"])) if int(row["isolation"]) == back_from_abroad_key: assert int(row["id"]) in back_from_abroad_db counts["back_from_abroad"] += 1 back_from_abroad_array.append(int(row["id"])) Flow( load('data/corona_data_collector/destination_output/corona_bot_answers_25_3_2020_with_coords.csv'), load('data/corona_data_collector/destination_output/corona_bot_answers_22_3_2020_with_coords.csv'), _test, ).process() assert 3 == counts["contact_with_patient"], str(counts) assert 3 == counts["back_from_abroad"], str(counts) assert set(back_from_abroad_array) == set(back_from_abroad_db) assert set(contact_with_patient_array) == set(contact_with_patient_db) print("OK") def test_isolated_total_count(): print("test_isolated_total_count") db_isolated_id = [169603,169630,169632,169637,169690,169728,169753,169813,169829,169837,169882,169924,169930,170014,170042,170064,170067,170097,170099,170127,170184,170223,170234,170244,170263,170272,170289,170322,170326,170328,170350,170370,170390,170414,170428,170432,170436,170438,170442,170448,170453,170478,170479,170621,170629,170685,170735,170744,170777,170811,170878,170886,170903,170929,170936,170962,170970,170989,171009,171018,171078,171097,171123,171127,171132,171133,171142,171158,171162,171200,171201,171230,171256,171268,171283,171288,171290,171302,171323,171337,171342,171374,171399,171440,171472,171499,171506,171541,171571,171590,171599,171615,171686,171718,171720,171753,171823,171865,171900,171904,171907,171991,172048,172076,172153,172155,172163,172165,172218,172225,172231,172233,172236,172263,172276,172277,172316,172367,172373,172406,172419,172458,172483,172491,172492,172505,172511,172537,172542,172594,172596,172629,172637,172638,172644,172716,172727,172733,172749,172750,172789,172797,172808,172810,172894,172923,172925,172952,172956,172972,172995,173006,173077,173087,173112,173177,173178,173186,173199,173211,173222,173272,173275,173335,173336,173377,173436,173466,173507,173524,173579,173671,173768,173816,173965,173973,173979,173980,174018,174040,174049,174055,174063,174082,174084,174095,174099,174144,174146,174167,174202,174206,174232,174236,174239,174242,174258,174259,174263,174267,174271,174295,174313,174332,174350,174359,174369,174372,174374,174394,174405,174411,174443,174456,174470,174496,174506,174511,174541,174617,174652,174744,174768,174779,174813,174830,174840,174850,174859,174865,174890,174910,174997,175018,175025,175027,175056,175128,175154,175159,175167,175179,175235,175280,175290,175332,175339,175373,175424,175443,175455,175465,175470,175492,175503,175519,175537,175542,175628,175644,175684,175691,175730,175765,175773,175790,175831,175849,175857,175863,175880,175883,175887,175894,175908,175976,176035,176040,176046,176076,176124,176132,176198,176202,176211,176241,176288,176300,176340,176364,176386,176408,176435,176453,176466,176478,176490,176501,176534,176574,176613,176617,176674,176681,176804,176825,176827,176860,176889,176926,176930,177008,177045,177107,177113,177118,177122,177136,177207,177211,177238,177296,177363,177381,177409,177418,177426,177512,177559,177575,177608,177627,177721,177732,177780,177798,177810,177865,177870,177905,177945,177947,177953,178091,178118,178138,178186,178217,178252,178289,178304,178328,178420,178508,178511,178517,178525,178551,178603,178604,178681,178700,178713,178742,178750,178756,178781,178792,178836,178848,178867,178881,178910,178939,178955,179016,179033,179065,179066,179074,179160,179185,179212,179225,179250,179270,179281,179294,179338,179376,179418,179480,179492,179549,179594,179621,179661,179664,179669,179683,179702,179714,179758,179768,179769,179888,179982,180002,180010,180021,180027,180044,180074,180123,180125,180131,180136,180145,180169,180198,180271,180284,180383,180394,180438,180448,180478,180505,180511,180553,180575,180579,180587,180629,180725,180747,180795,180798,180840,180888,180941,180943,180944,180964,180991,181023,181037,181049,181120,181162,181164,181192,181218,181220,181230,181252,181304,181326,181339,181410,181445,181483,181520,181555,181562,181599,181630,181665] Flow( load_from_db.flow({ "where": "id in (%s)" % ", ".join(map(str, db_isolated_id)) }), add_gps_coordinates.flow({ "source_fields": get_parameters_from_pipeline_spec("pipeline-spec.yaml", "corona_data_collector", "corona_data_collector.add_gps_coordinates")["source_fields"], "get-coords-callback": lambda street, city: (random.uniform(29, 34), random.uniform(34, 36), int(street != city)) }), export_corona_bot_answers.flow({ "destination_output": "data/corona_data_collector/destination_output" }), ).process() counts = {"isolated": 0} def _test(row): if int(row["isolation"]) > 0: assert int(row["id"]) in db_isolated_id counts["isolated"] += 1 Flow( load('data/corona_data_collector/destination_output/corona_bot_answers_25_3_2020_with_coords.csv'), _test, ).process() assert 468 == counts["isolated"], str(counts) print("OK") if __name__ == "__main__": test_exposure_status_failure() test_expected_contact_with_patient() test_isolated_total_count() print("Great Success!")
71.943925
3,298
0.759548
1,039
7,698
5.40616
0.583253
0.029375
0.048068
0.020474
0.249065
0.209008
0.185508
0.185508
0.166281
0.166281
0
0.427014
0.106521
7,698
106
3,299
72.622642
0.389648
0
0
0.388889
0
0
0.168117
0.077563
0
0
0
0
0.088889
1
0.055556
false
0
0.077778
0
0.133333
0.1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
5e0764b2930a571b47c8addd74be87438238db9d
3,371
py
Python
PyObjCTest/test_nsfilehandle.py
linuxfood/pyobjc-framework-Cocoa-test
3475890f165ab26a740f13d5afe4c62b4423a140
[ "MIT" ]
null
null
null
PyObjCTest/test_nsfilehandle.py
linuxfood/pyobjc-framework-Cocoa-test
3475890f165ab26a740f13d5afe4c62b4423a140
[ "MIT" ]
null
null
null
PyObjCTest/test_nsfilehandle.py
linuxfood/pyobjc-framework-Cocoa-test
3475890f165ab26a740f13d5afe4c62b4423a140
[ "MIT" ]
null
null
null
import Foundation from PyObjCTools.TestSupport import TestCase, min_os_level class TestNSFileHandle(TestCase): def testConstants(self): self.assertIsInstance(Foundation.NSFileHandleOperationException, str) self.assertIsInstance(Foundation.NSFileHandleReadCompletionNotification, str) self.assertIsInstance( Foundation.NSFileHandleReadToEndOfFileCompletionNotification, str ) self.assertIsInstance( Foundation.NSFileHandleConnectionAcceptedNotification, str ) self.assertIsInstance(Foundation.NSFileHandleDataAvailableNotification, str) self.assertIsInstance(Foundation.NSFileHandleNotificationDataItem, str) self.assertIsInstance(Foundation.NSFileHandleNotificationFileHandleItem, str) self.assertIsInstance(Foundation.NSFileHandleNotificationMonitorModes, str) def testMethods(self): f = Foundation.NSFileHandle.alloc().initWithFileDescriptor_closeOnDealloc_( 0, False ) self.assertArgIsBOOL(f.initWithFileDescriptor_closeOnDealloc_, 1) @min_os_level("10.6") def testMethods10_6(self): self.assertArgIsOut( Foundation.NSFileHandle.fileHandleForReadingFromURL_error_, 1 ) self.assertArgIsOut(Foundation.NSFileHandle.fileHandleForWritingToURL_error_, 1) self.assertArgIsOut(Foundation.NSFileHandle.fileHandleForUpdatingURL_error_, 1) @min_os_level("10.7") def testMethods10_7(self): self.assertArgIsBlock(Foundation.NSFileHandle.setReadabilityHandler_, 0, b"v@") self.assertArgIsBlock(Foundation.NSFileHandle.setWriteabilityHandler_, 0, b"v@") self.assertResultIsBlock(Foundation.NSFileHandle.readabilityHandler, b"v@") self.assertResultIsBlock(Foundation.NSFileHandle.writeabilityHandler, b"v@") @min_os_level("10.15") def testMethods10_15(self): self.assertArgIsOut( Foundation.NSFileHandle.readDataToEndOfFileAndReturnError_, 0 ) self.assertArgIsOut(Foundation.NSFileHandle.readDataUpToLength_error_, 1) self.assertResultIsBOOL(Foundation.NSFileHandle.writeData_error_) self.assertArgIsOut(Foundation.NSFileHandle.writeData_error_, 1) self.assertResultIsBOOL(Foundation.NSFileHandle.getOffset_error_) self.assertArgIsOut(Foundation.NSFileHandle.getOffset_error_, 0) self.assertArgIsOut(Foundation.NSFileHandle.getOffset_error_, 1) self.assertResultIsBOOL(Foundation.NSFileHandle.seekToEndReturningOffset_error_) self.assertArgIsOut(Foundation.NSFileHandle.seekToEndReturningOffset_error_, 0) self.assertArgIsOut(Foundation.NSFileHandle.seekToEndReturningOffset_error_, 1) self.assertResultIsBOOL(Foundation.NSFileHandle.seekToOffset_error_) self.assertArgIsOut(Foundation.NSFileHandle.seekToOffset_error_, 1) self.assertResultIsBOOL(Foundation.NSFileHandle.truncateAtOffset_error_) self.assertArgIsOut(Foundation.NSFileHandle.truncateAtOffset_error_, 1) self.assertResultIsBOOL(Foundation.NSFileHandle.synchronizeAndReturnError_) self.assertArgIsOut(Foundation.NSFileHandle.synchronizeAndReturnError_, 0) self.assertResultIsBOOL(Foundation.NSFileHandle.closeAndReturnError_) self.assertArgIsOut(Foundation.NSFileHandle.closeAndReturnError_, 0)
48.157143
88
0.770691
265
3,371
9.6
0.233962
0.224843
0.154088
0.220126
0.402516
0.284591
0
0
0
0
0
0.013347
0.155443
3,371
69
89
48.855072
0.880225
0
0
0.071429
0
0
0.00623
0
0
0
0
0
0.607143
1
0.089286
false
0
0.035714
0
0.142857
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
3
5e145878ae7e135adacc19d7438d7f356863d5bc
1,171
py
Python
infoblox_netmri/api/remote/models/subnet_network_explorer_summaries_section_grid_remote.py
IngmarVG-IB/infoblox-netmri
b0c725fd64aee1890d83917d911b89236207e564
[ "Apache-2.0" ]
null
null
null
infoblox_netmri/api/remote/models/subnet_network_explorer_summaries_section_grid_remote.py
IngmarVG-IB/infoblox-netmri
b0c725fd64aee1890d83917d911b89236207e564
[ "Apache-2.0" ]
null
null
null
infoblox_netmri/api/remote/models/subnet_network_explorer_summaries_section_grid_remote.py
IngmarVG-IB/infoblox-netmri
b0c725fd64aee1890d83917d911b89236207e564
[ "Apache-2.0" ]
null
null
null
from ..remote import RemoteModel from infoblox_netmri.utils.utils import check_api_availability class SubnetNetworkExplorerSummariesSectionGridRemote(RemoteModel): """ | ``SubnetID:`` none | ``attribute type:`` string | ``SubnetCIDR:`` none | ``attribute type:`` string | ``VirtualNetworkID:`` none | ``attribute type:`` string | ``Network:`` none | ``attribute type:`` string | ``VlanName:`` none | ``attribute type:`` string | ``VlanIndex:`` none | ``attribute type:`` string | ``VlanID:`` none | ``attribute type:`` string | ``RootBridgeAddress:`` none | ``attribute type:`` string | ``SubnetMemberCount:`` none | ``attribute type:`` string """ properties = ("SubnetID", "SubnetCIDR", "VirtualNetworkID", "Network", "VlanName", "VlanIndex", "VlanID", "RootBridgeAddress", "SubnetMemberCount", )
19.847458
67
0.478224
71
1,171
7.84507
0.366197
0.210054
0.274686
0.371634
0
0
0
0
0
0
0
0
0.392827
1,171
59
68
19.847458
0.783404
0.427839
0
0
0
0
0.181818
0
0
0
0
0
0
1
0
false
0
0.153846
0
0.307692
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
5e327699c9961aa4b34d7ffaa20719c0711985bb
3,032
py
Python
simsalabim/dsp/snr.py
simklein/simsalabim
feade4cf0c95d89e9d845feda2b5c3693eceb5f0
[ "MIT" ]
null
null
null
simsalabim/dsp/snr.py
simklein/simsalabim
feade4cf0c95d89e9d845feda2b5c3693eceb5f0
[ "MIT" ]
null
null
null
simsalabim/dsp/snr.py
simklein/simsalabim
feade4cf0c95d89e9d845feda2b5c3693eceb5f0
[ "MIT" ]
null
null
null
import numpy as np def wada_snr(wav): # Direct blind estimation of the SNR of a speech signal. # # Paper on WADA SNR: # http://www.cs.cmu.edu/~robust/Papers/KimSternIS08.pdf # # This function was adapted from this matlab code: # https://labrosa.ee.columbia.edu/projects/snreval/#9 # init eps = 1e-10 # next 2 lines define a fancy curve derived from a gamma distribution -- see paper db_vals = np.arange(-20, 101) g_vals = np.array([0.40974774, 0.40986926, 0.40998566, 0.40969089, 0.40986186, 0.40999006, 0.41027138, 0.41052627, 0.41101024, 0.41143264, 0.41231718, 0.41337272, 0.41526426, 0.4178192 , 0.42077252, 0.42452799, 0.42918886, 0.43510373, 0.44234195, 0.45161485, 0.46221153, 0.47491647, 0.48883809, 0.50509236, 0.52353709, 0.54372088, 0.56532427, 0.58847532, 0.61346212, 0.63954496, 0.66750818, 0.69583724, 0.72454762, 0.75414799, 0.78323148, 0.81240985, 0.84219775, 0.87166406, 0.90030504, 0.92880418, 0.95655449, 0.9835349 , 1.01047155, 1.0362095 , 1.06136425, 1.08579312, 1.1094819 , 1.13277995, 1.15472826, 1.17627308, 1.19703503, 1.21671694, 1.23535898, 1.25364313, 1.27103891, 1.28718029, 1.30302865, 1.31839527, 1.33294817, 1.34700935, 1.3605727 , 1.37345513, 1.38577122, 1.39733504, 1.40856397, 1.41959619, 1.42983624, 1.43958467, 1.44902176, 1.45804831, 1.46669568, 1.47486938, 1.48269965, 1.49034339, 1.49748214, 1.50435106, 1.51076426, 1.51698915, 1.5229097 , 1.528578 , 1.53389835, 1.5391211 , 1.5439065 , 1.54858517, 1.55310776, 1.55744391, 1.56164927, 1.56566348, 1.56938671, 1.57307767, 1.57654764, 1.57980083, 1.58304129, 1.58602496, 1.58880681, 1.59162477, 1.5941969 , 1.59693155, 1.599446 , 1.60185011, 1.60408668, 1.60627134, 1.60826199, 1.61004547, 1.61192472, 1.61369656, 1.61534074, 1.61688905, 1.61838916, 1.61985374, 1.62135878, 1.62268119, 1.62390423, 1.62513143, 1.62632463, 1.6274027 , 1.62842767, 1.62945532, 1.6303307 , 1.63128026, 1.63204102]) # peak normalize, get magnitude, clip lower bound wav = np.array(wav) wav = wav / abs(wav).max() abs_wav = abs(wav) abs_wav[abs_wav < eps] = eps # calcuate statistics # E[|z|] v1 = max(eps, abs_wav.mean()) # E[log|z|] v2 = np.log(abs_wav).mean() # log(E[|z|]) - E[log(|z|)] v3 = np.log(v1) - v2 # table interpolation wav_snr_idx = None if any(g_vals < v3): wav_snr_idx = np.where(g_vals < v3)[0].max() # handle edge cases or interpolate if wav_snr_idx is None: wav_snr = db_vals[0] elif wav_snr_idx == len(db_vals) - 1: wav_snr = db_vals[-1] else: wav_snr = db_vals[wav_snr_idx] + \ (v3-g_vals[wav_snr_idx]) / (g_vals[wav_snr_idx+1] - \ g_vals[wav_snr_idx]) * (db_vals[wav_snr_idx+1] - db_vals[wav_snr_idx]) # Calculate SNR dEng = sum(wav**2) dFactor = 10**(wav_snr / 10) dNoiseEng = dEng / (1 + dFactor) # Noise energy dSigEng = dEng * dFactor / (1 + dFactor) # Signal energy snr = 10 * np.log10(dSigEng / dNoiseEng) return snr
57.207547
1,475
0.666887
491
3,032
4.03055
0.466395
0.042446
0.045478
0.039414
0.0571
0.012127
0
0
0
0
0
0.449434
0.184697
3,032
53
1,476
57.207547
0.351133
0.172164
0
0
0
0
0
0
0
0
0
0
0
1
0.034483
false
0
0.034483
0
0.103448
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
eaaa69efcceb1b8352b1342b6299549cc66b1c6a
704
py
Python
pytablewriter/writer/text/__init__.py
shawalli/pytablewriter
2e3f84cb3c5676aa67711aa3e908b6e420c934b7
[ "MIT" ]
null
null
null
pytablewriter/writer/text/__init__.py
shawalli/pytablewriter
2e3f84cb3c5676aa67711aa3e908b6e420c934b7
[ "MIT" ]
null
null
null
pytablewriter/writer/text/__init__.py
shawalli/pytablewriter
2e3f84cb3c5676aa67711aa3e908b6e420c934b7
[ "MIT" ]
null
null
null
from ._borderless import BorderlessTableWriter from ._css import CssTableWriter from ._csv import CsvTableWriter from ._html import HtmlTableWriter from ._json import JsonTableWriter from ._jsonlines import JsonLinesTableWriter from ._latex import LatexMatrixWriter, LatexTableWriter from ._ltsv import LtsvTableWriter from ._markdown import MarkdownTableWriter from ._mediawiki import MediaWikiTableWriter from ._rst import RstCsvTableWriter, RstGridTableWriter, RstSimpleTableWriter from ._spacealigned import SpaceAlignedTableWriter from ._toml import TomlTableWriter from ._tsv import TsvTableWriter from ._unicode import BoldUnicodeTableWriter, UnicodeTableWriter from ._yaml import YamlTableWriter
41.411765
77
0.875
68
704
8.823529
0.558824
0
0
0
0
0
0
0
0
0
0
0
0.096591
704
16
78
44
0.943396
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
3
eac5df3ff69b83b9f50665bb87424eda3f56cbef
1,246
py
Python
boilerplate-polygon-area-calculator/shape_calculator.py
pablohema/FreeCodeCamp-Scientific-Computing--PythonCertification
d7fb13bed510bc191d84c20e414e545f5eb0c4f3
[ "MIT" ]
null
null
null
boilerplate-polygon-area-calculator/shape_calculator.py
pablohema/FreeCodeCamp-Scientific-Computing--PythonCertification
d7fb13bed510bc191d84c20e414e545f5eb0c4f3
[ "MIT" ]
null
null
null
boilerplate-polygon-area-calculator/shape_calculator.py
pablohema/FreeCodeCamp-Scientific-Computing--PythonCertification
d7fb13bed510bc191d84c20e414e545f5eb0c4f3
[ "MIT" ]
null
null
null
class Rectangle: def __init__(self, width, height): self.width = width self.height = height def __str__(self): return "Rectangle(width=" + str(self.width) + \ ", height=" + str(self.height) + ")" def set_width(self, width): self.width = width def set_height(self, height): self.height = height def get_area(self): return self.width * self.height def get_perimeter(self): return 2 * (self.width + self.height) def get_diagonal(self): return (self.width ** 2 + self.height ** 2) ** .5 def get_picture(self): if self.width > 50 or self.height > 50: return "Too big for picture." rectangle = ("*" * self.width + "\n") * self.height return rectangle def get_amount_inside(self, shape): max_width = self.width // shape.width max_height = self.height // shape.height return max_width * max_height class Square(Rectangle): def __init__(self, side): self.width = side self.height = side def set_side(self, side): self.width = side self.height = side def __str__(self): return "Square(side=" + str(self.width) + ")"
25.428571
59
0.577849
154
1,246
4.487013
0.194805
0.182344
0.065123
0.057887
0.182344
0.182344
0.109986
0.109986
0.109986
0
0
0.009185
0.300963
1,246
48
60
25.958333
0.784156
0
0
0.285714
0
0
0.049759
0
0
0
0
0
0
1
0.342857
false
0
0
0.142857
0.628571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
eac82e41bcb43ffe9b9470edc7c59e27d7472a05
186
py
Python
setup.py
juliscrazy/Otto-Bot
6d5af7ddb7ffb318f0f78d3bf2cf27631a305b4f
[ "MIT" ]
null
null
null
setup.py
juliscrazy/Otto-Bot
6d5af7ddb7ffb318f0f78d3bf2cf27631a305b4f
[ "MIT" ]
null
null
null
setup.py
juliscrazy/Otto-Bot
6d5af7ddb7ffb318f0f78d3bf2cf27631a305b4f
[ "MIT" ]
null
null
null
from distutils.core import setup setup(name='Otto-Bot', version='1.0', description='Python Discord Bot', author='jul', author_email='julislazy@gmail.com', )
23.25
41
0.634409
23
186
5.086957
0.869565
0
0
0
0
0
0
0
0
0
0
0.013793
0.22043
186
8
42
23.25
0.793103
0
0
0
0
0
0.272727
0
0
0
0
0
0
1
0
true
0
0.142857
0
0.142857
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3