hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
1347c0f23b34bc7e1521e069decb006ca75dc9ea
659
py
Python
src/api_rutas/crud.py
PythonistaMX/py261
614de0c8c78f26f10d485f1f46fc2c673fc79b6f
[ "MIT" ]
null
null
null
src/api_rutas/crud.py
PythonistaMX/py261
614de0c8c78f26f10d485f1f46fc2c673fc79b6f
[ "MIT" ]
null
null
null
src/api_rutas/crud.py
PythonistaMX/py261
614de0c8c78f26f10d485f1f46fc2c673fc79b6f
[ "MIT" ]
null
null
null
from sqlalchemy.orm import Session import models import schemas def consulta_alumnos(db: Session, skip: int = 0, limit: int = 100): return db.query(models.Alumno).offset(skip).limit(limit).all() def consulta_alumno(db: Session, cuenta: int): return db.query(models.Alumno).filter(models.Alumno.cuenta == cuenta).first() def alta_alumno(db: Session, cuenta: int, candidato: schemas.SchemaAlumnoIn): alumno = models.Alumno(cuenta=cuenta, **dict(candidato)) db.add(alumno) db.commit() db.refresh(alumno) return alumno def baja_alumno(db: Session, alumno: models.Alumno): db.delete(alumno) db.commit() return True
24.407407
81
0.713202
90
659
5.177778
0.377778
0.103004
0.096567
0.081545
0.2103
0
0
0
0
0
0
0.007207
0.157815
659
26
82
25.346154
0.832432
0
0
0.117647
0
0
0
0
0
0
0
0
0
1
0.235294
false
0
0.176471
0.117647
0.647059
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
13522402ec4a901fa6c69073eb3fe5fb826a55bb
4,413
py
Python
lib/desconst.py
elp2/des
7ada99924c3d02ce90431e2ef01f517f6750dddc
[ "MIT" ]
20
2015-07-20T12:24:38.000Z
2022-02-22T06:44:59.000Z
lib/desconst.py
elp2/des
7ada99924c3d02ce90431e2ef01f517f6750dddc
[ "MIT" ]
1
2019-06-30T07:45:12.000Z
2020-01-15T23:15:23.000Z
lib/desconst.py
elp2/des
7ada99924c3d02ce90431e2ef01f517f6750dddc
[ "MIT" ]
12
2016-12-06T13:31:21.000Z
2021-03-27T21:45:11.000Z
# Applied once at the beginning of the algorithm. INITIAL_PERMUTATION = [ 58, 50, 42, 34, 26, 18, 10, 2, 60, 52, 44, 36, 28, 20, 12, 4, 62, 54, 46, 38, 30, 22, 14, 6, 64, 56, 48, 40, 32, 24, 16, 8, 57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3, 61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7, ] # Inverse of INITIAL_PERMUTATION. Applied once at the end of the algorithm. FINAL_PERMUTATION = [ 40, 8, 48, 16, 56, 24, 64, 32, 39, 7, 47, 15, 55, 23, 63, 31, 38, 6, 46, 14, 54, 22, 62, 30, 37, 5, 45, 13, 53, 21, 61, 29, 36, 4, 44, 12, 52, 20, 60, 28, 35, 3, 43, 11, 51, 19, 59, 27, 34, 2, 42, 10, 50, 18, 58, 26, 33, 1, 41, 9, 49, 17, 57, 25, ] # Applied to the half-block at the beginning of the Fiestel function. EXPANSION = [ 32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17, 16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25, 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1, ] # Applied at the end of the Feistel function. PERMUTATION = [ 16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25, ] # Converts from full 64-bit key to two key halves: left and right. Only 48 # bits from the original key are used. PERMUTED_CHOICE_1_LEFT = [ 57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36, ] PERMUTED_CHOICE_1_RIGHT = [ 63, 55, 47, 39, 31, 23, 15, 7, 62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37, 29, 21, 13, 5, 28, 20, 12, 4, ] # Converts the shifted right and left key halves (concatenated together) into # the subkey for the round (input into Feistel function). PERMUTED_CHOICE_2 = [ 14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32, ] # S-Boxes # SBOX[outer 2 bits][inner 4 bits] # Each value represents 4 bits that the 6-bit input is mapped to. SBOX_1 = [ [14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7], [0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8], [4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0], [15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13], ] SBOX_2 = [ [15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10], [3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5], [0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15], [13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9], ] SBOX_3 = [ [10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8], [13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1], [13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7], [1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12], ] SBOX_4 = [ [7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15], [13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9], [10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4], [3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14], ] SBOX_5 = [ [2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9], [14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6], [4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14], [11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3], ] SBOX_6 = [ [12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11], [10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8], [9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6], [4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13], ] SBOX_7 = [ [4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1], [13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6], [1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2], [6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12], ] SBOX_8 = [ [13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7], [1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2], [7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8], [2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11], ] SBOXES = [SBOX_1, SBOX_2, SBOX_3, SBOX_4, SBOX_5, SBOX_6, SBOX_7, SBOX_8] # How much the left and right key halves are shifted every round. KEY_SHIFT_AMOUNTS = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]
34.748031
77
0.461591
1,009
4,413
1.990089
0.124876
0.00996
0.011952
0.011952
0.127988
0.096116
0.096116
0.096116
0
0
0
0.433234
0.312712
4,413
126
78
35.02381
0.228816
0.146386
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
135ffde01d0073c17127cc2060c35e5d54c9c5db
1,183
py
Python
djeym/decorators.py
genkosta/django-yandex-maps
f76f659d0e32bee9fbe95826e9b22e75c96b5d85
[ "MIT" ]
11
2019-08-29T07:38:33.000Z
2022-03-25T05:29:09.000Z
djeym/decorators.py
genkosta/django-yandex-maps
f76f659d0e32bee9fbe95826e9b22e75c96b5d85
[ "MIT" ]
24
2019-08-29T07:52:39.000Z
2022-02-27T09:46:58.000Z
djeym/decorators.py
genkosta/django-yandex-maps
f76f659d0e32bee9fbe95826e9b22e75c96b5d85
[ "MIT" ]
4
2019-08-29T12:58:34.000Z
2022-03-25T04:26:12.000Z
# -*- coding: utf-8 -*- from functools import wraps from django.http import HttpResponseForbidden, JsonResponse def ajax_login_required(view_func): """ Ajax. Class-based views - dispatch. Verify that the current user is authenticated. """ @wraps(view_func) def wrapper(self, request, *args, **kwargs): if not request.is_ajax(): return HttpResponseForbidden() if request.user.is_authenticated: return view_func(self, request, *args, **kwargs) msg = {'detail': 'HTTP 403 Forbidden'} return JsonResponse(msg, status=403) return wrapper def ajax_login_required_and_staff(view_func): """ Ajax. Class-based views - dispatch. Verify that the current user is authenticated. """ @wraps(view_func) def wrapper(self, request, *args, **kwargs): user = request.user if not request.is_ajax(): return HttpResponseForbidden() if user.is_authenticated and user.is_staff: return view_func(self, request, *args, **kwargs) msg = {'detail': 'HTTP 403 Forbidden'} return JsonResponse(msg, status=403) return wrapper
29.575
60
0.644125
137
1,183
5.437956
0.313869
0.06443
0.102013
0.112752
0.719463
0.719463
0.719463
0.719463
0.593289
0.593289
0
0.014689
0.251902
1,183
39
61
30.333333
0.827119
0.158918
0
0.695652
0
0
0.050633
0
0
0
0
0
0
1
0.173913
false
0
0.086957
0
0.608696
0
0
0
0
null
0
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
13732c17976f16d7e5a0dcd6636d1668599d777f
659
py
Python
djangoproject/pages/models.py
moonmirXD/E-barangay
211df250044e30bfb0e56951438f30a0b97e23a8
[ "bzip2-1.0.6" ]
null
null
null
djangoproject/pages/models.py
moonmirXD/E-barangay
211df250044e30bfb0e56951438f30a0b97e23a8
[ "bzip2-1.0.6" ]
null
null
null
djangoproject/pages/models.py
moonmirXD/E-barangay
211df250044e30bfb0e56951438f30a0b97e23a8
[ "bzip2-1.0.6" ]
null
null
null
from django.conf import settings from django.db import models from django.utils import timezone from django.contrib.auth.models import User from django.urls import reverse # models, views, url should check always!! User = settings.AUTH_USER_MODEL # <-----------------------------------------------> class Post(models.Model): title = models.CharField(max_length=100) content = models.TextField(max_length=255) date_posted = models.DateTimeField(default = timezone.now) author = models.ForeignKey(User, on_delete= models.CASCADE) def __str__(self): return self.title def get_absolute_url(self): return reverse('post-detail', kwargs={'pk':self.pk})
31.380952
60
0.722307
88
659
5.272727
0.556818
0.107759
0
0
0
0
0
0
0
0
0
0.010221
0.109256
659
20
61
32.95
0.780239
0.136571
0
0
0
0
0.022968
0
0
0
0
0
0
1
0.133333
false
0
0.333333
0.133333
0.933333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
3
139e14bd9686fe05cf4a15eb93b54ce658be42a6
196
py
Python
screws/miscellaneous/initialize_3d_list.py
mathischeap/mifem
3242e253fb01ca205a76568eaac7bbdb99e3f059
[ "MIT" ]
1
2020-10-14T12:48:35.000Z
2020-10-14T12:48:35.000Z
screws/miscellaneous/initialize_3d_list.py
mathischeap/mifem
3242e253fb01ca205a76568eaac7bbdb99e3f059
[ "MIT" ]
null
null
null
screws/miscellaneous/initialize_3d_list.py
mathischeap/mifem
3242e253fb01ca205a76568eaac7bbdb99e3f059
[ "MIT" ]
null
null
null
def initialize_3d_list(a, b, c): """ :param a: :param b: :param c: :return: """ lst = [[[None for _ in range(c)] for _ in range(b)] for _ in range(a)] return lst
16.333333
74
0.510204
30
196
3.166667
0.466667
0.157895
0.315789
0
0
0
0
0
0
0
0
0.007576
0.326531
196
12
75
16.333333
0.712121
0.193878
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
3
13cb939777e25880ca88bd80fb1942478962d8b5
112
py
Python
notebooks/_solutions/case2_observations32.py
jorisvandenbossche/FLAMES-python-data-wrangling
24a6dbe8637264f010c47affd3a8dcbe2b493e00
[ "BSD-3-Clause" ]
null
null
null
notebooks/_solutions/case2_observations32.py
jorisvandenbossche/FLAMES-python-data-wrangling
24a6dbe8637264f010c47affd3a8dcbe2b493e00
[ "BSD-3-Clause" ]
10
2020-11-09T09:21:01.000Z
2021-10-18T06:03:19.000Z
notebooks/_solutions/case2_observations32.py
jorisvandenbossche/FLAMES-python-data-wrangling
24a6dbe8637264f010c47affd3a8dcbe2b493e00
[ "BSD-3-Clause" ]
null
null
null
fig, ax = plt.subplots(figsize=(6, 6)) n_species_per_plot.plot(kind="barh", ax=ax) ax.set_ylabel("Plot number");
37.333333
43
0.723214
21
112
3.666667
0.714286
0.103896
0
0
0
0
0
0
0
0
0
0.019231
0.071429
112
3
44
37.333333
0.721154
0
0
0
0
0
0.132743
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
13cccb0e380062e6138405d319138959ed6e7538
433
py
Python
cortex/tests/test_utils.py
VisionandCognition/pycortex
88f477eb7e3b37123ed4e79244c18a322d5336e3
[ "BSD-2-Clause" ]
null
null
null
cortex/tests/test_utils.py
VisionandCognition/pycortex
88f477eb7e3b37123ed4e79244c18a322d5336e3
[ "BSD-2-Clause" ]
null
null
null
cortex/tests/test_utils.py
VisionandCognition/pycortex
88f477eb7e3b37123ed4e79244c18a322d5336e3
[ "BSD-2-Clause" ]
null
null
null
import cortex def test_download_subject(): # Test that newly downloaded subjects are added to the current database. # remove fsaverage from the list of available subjects if present. if "fsaverage" in cortex.db.subjects: cortex.db._subjects.pop("fsaverage") assert "fsaverage" not in cortex.db.subjects cortex.utils.download_subject(subject_id='fsaverage') assert "fsaverage" in cortex.db.subjects
33.307692
76
0.743649
58
433
5.465517
0.534483
0.100946
0.201893
0.170347
0.264984
0
0
0
0
0
0
0
0.177829
433
12
77
36.083333
0.890449
0.311778
0
0
0
0
0.152542
0
0
0
0
0
0.285714
1
0.142857
true
0
0.142857
0
0.285714
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
13d059cab1b2982959731fd6b94d8437b270a25b
105,801
py
Python
python/helpers/pydev/pydev_tests_python/test_debugger.py
Sajaki/intellij-community
6748af2c40567839d11fd652ec77ba263c074aad
[ "Apache-2.0" ]
1
2020-06-25T02:17:26.000Z
2020-06-25T02:17:26.000Z
python/helpers/pydev/pydev_tests_python/test_debugger.py
Sajaki/intellij-community
6748af2c40567839d11fd652ec77ba263c074aad
[ "Apache-2.0" ]
2
2022-02-19T09:45:05.000Z
2022-02-27T20:32:55.000Z
python/helpers/pydev/pydev_tests_python/test_debugger.py
bradleesand/intellij-community
750ff9c10333c9c1278c00dbe8d88c877b1b9749
[ "Apache-2.0" ]
1
2020-10-15T05:56:42.000Z
2020-10-15T05:56:42.000Z
# coding: utf-8 ''' The idea is that we record the commands sent to the debugger and reproduce them from this script (so, this works as the client, which spawns the debugger as a separate process and communicates to it as if it was run from the outside) Note that it's a python script but it'll spawn a process to run as jython, ironpython and as python. ''' import time import pytest from pydev_tests_python import debugger_unittest from pydev_tests_python.debugger_unittest import (CMD_SET_PROPERTY_TRACE, REASON_CAUGHT_EXCEPTION, REASON_UNCAUGHT_EXCEPTION, REASON_STOP_ON_BREAKPOINT, REASON_THREAD_SUSPEND, overrides, CMD_THREAD_CREATE, CMD_GET_THREAD_STACK, REASON_STEP_INTO_MY_CODE, CMD_GET_EXCEPTION_DETAILS, IS_IRONPYTHON, IS_JYTHON, IS_CPYTHON, IS_APPVEYOR, wait_for_condition, CMD_GET_FRAME, CMD_GET_BREAKPOINT_EXCEPTION, CMD_THREAD_SUSPEND, CMD_STEP_OVER, REASON_STEP_OVER, CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION, CMD_THREAD_RESUME_SINGLE_NOTIFICATION, IS_PY37_OR_GREATER, IS_PY38_OR_GREATER) from _pydevd_bundle.pydevd_constants import IS_WINDOWS try: from urllib import unquote except ImportError: from urllib.parse import unquote from pydev_tests_python.debug_constants import * pytest_plugins = [ str('pydev_tests_python.debugger_fixtures'), ] try: xrange except: xrange = range TEST_DJANGO = False TEST_FLASK = False try: import django version = [int(x) for x in django.get_version().split('.')][:2] TEST_DJANGO = version == [1, 7] or version == [2, 1] except: pass try: import flask TEST_FLASK = True except: pass if IS_PY2: builtin_qualifier = "__builtin__" else: builtin_qualifier = "builtins" @pytest.mark.skipif(IS_IRONPYTHON, reason='Test needs gc.get_referrers to really check anything.') def test_case_referrers(case_setup): with case_setup.test_file('_debugger_case1.py') as writer: writer.log.append('writing add breakpoint') writer.write_add_breakpoint(6, 'set_up') writer.log.append('making initial run') writer.write_make_initial_run() writer.log.append('waiting for breakpoint hit') hit = writer.wait_for_breakpoint_hit() thread_id = hit.thread_id frame_id = hit.frame_id writer.log.append('get frame') writer.write_get_frame(thread_id, frame_id) writer.log.append('step over') writer.write_step_over(thread_id) writer.log.append('get frame') writer.write_get_frame(thread_id, frame_id) writer.log.append('run thread') writer.write_run_thread(thread_id) writer.log.append('asserting') try: assert 13 == writer._sequence, 'Expected 13. Had: %s' % writer._sequence except: writer.log.append('assert failed!') raise writer.log.append('asserted') writer.finished_ok = True def test_case_2(case_setup): with case_setup.test_file('_debugger_case2.py') as writer: writer.write_add_breakpoint(3, 'Call4') # seq = 3 writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() thread_id = hit.thread_id frame_id = hit.frame_id writer.write_get_frame(thread_id, frame_id) # Note: write get frame but not waiting for it to be gotten. writer.write_add_breakpoint(14, 'Call2') writer.write_run_thread(thread_id) hit = writer.wait_for_breakpoint_hit() thread_id = hit.thread_id frame_id = hit.frame_id writer.write_get_frame(thread_id, frame_id) # Note: write get frame but not waiting for it to be gotten. writer.write_run_thread(thread_id) writer.log.append('Checking sequence. Found: %s' % (writer._sequence)) assert 15 == writer._sequence, 'Expected 15. Had: %s' % writer._sequence writer.log.append('Marking finished ok.') writer.finished_ok = True @pytest.mark.parametrize( 'skip_suspend_on_breakpoint_exception, skip_print_breakpoint_exception', ( [['NameError'], []], [['NameError'], ['NameError']], [[], []], # Empty means it'll suspend/print in any exception [[], ['NameError']], [['ValueError'], ['Exception']], [['Exception'], ['ValueError']], # ValueError will also suspend/print since we're dealing with a NameError ) ) def test_case_breakpoint_condition_exc(case_setup, skip_suspend_on_breakpoint_exception, skip_print_breakpoint_exception): msgs_in_stderr = ( 'Error while evaluating expression: i > 5', "NameError: name 'i' is not defined", 'Traceback (most recent call last):', 'File "<string>", line 1, in <module>', ) def _ignore_stderr_line(line): if original_ignore_stderr_line(line): return True for msg in msgs_in_stderr: if msg in line: return True return False def additional_output_checks(stdout, stderr): original_additional_output_checks(stdout, stderr) if skip_print_breakpoint_exception in ([], ['ValueError']): for msg in msgs_in_stderr: assert msg in stderr else: for msg in msgs_in_stderr: assert msg not in stderr with case_setup.test_file('_debugger_case_breakpoint_condition_exc.py') as writer: original_ignore_stderr_line = writer._ignore_stderr_line writer._ignore_stderr_line = _ignore_stderr_line original_additional_output_checks = writer.additional_output_checks writer.additional_output_checks = additional_output_checks writer.write_suspend_on_breakpoint_exception(skip_suspend_on_breakpoint_exception, skip_print_breakpoint_exception) breakpoint_id = writer.write_add_breakpoint( writer.get_line_index_with_content('break here'), 'Call', condition='i > 5') writer.write_make_initial_run() if skip_suspend_on_breakpoint_exception in ([], ['ValueError']): writer.wait_for_message(CMD_GET_BREAKPOINT_EXCEPTION) hit = writer.wait_for_breakpoint_hit() writer.write_run_thread(hit.thread_id) if IS_JYTHON: # Jython will break twice. if skip_suspend_on_breakpoint_exception in ([], ['ValueError']): writer.wait_for_message(CMD_GET_BREAKPOINT_EXCEPTION) hit = writer.wait_for_breakpoint_hit() writer.write_run_thread(hit.thread_id) hit = writer.wait_for_breakpoint_hit() thread_id = hit.thread_id frame_id = hit.frame_id writer.write_get_frame(thread_id, frame_id) msg = writer.wait_for_message(CMD_GET_FRAME) name_to_value = {} for var in msg.var: name_to_value[var['name']] = var['value'] assert name_to_value == {'i': 'int: 6', 'last_i': 'int: 6'} writer.write_remove_breakpoint(breakpoint_id) writer.write_run_thread(thread_id) writer.finished_ok = True @pytest.mark.skipif(IS_IRONPYTHON, reason='This test fails once in a while due to timing issues on IronPython, so, skipping it.') def test_case_3(case_setup): with case_setup.test_file('_debugger_case3.py') as writer: writer.write_make_initial_run() time.sleep(.5) breakpoint_id = writer.write_add_breakpoint(4, '') writer.write_add_breakpoint(5, 'FuncNotAvailable') # Check that it doesn't get hit in the global when a function is available hit = writer.wait_for_breakpoint_hit() thread_id = hit.thread_id frame_id = hit.frame_id writer.write_get_frame(thread_id, frame_id) writer.write_run_thread(thread_id) hit = writer.wait_for_breakpoint_hit() thread_id = hit.thread_id frame_id = hit.frame_id writer.write_get_frame(thread_id, frame_id) writer.write_remove_breakpoint(breakpoint_id) writer.write_run_thread(thread_id) assert 17 == writer._sequence, 'Expected 17. Had: %s' % writer._sequence writer.finished_ok = True def test_case_suspend_thread(case_setup): with case_setup.test_file('_debugger_case4.py') as writer: writer.write_make_initial_run() thread_id = writer.wait_for_new_thread() writer.write_suspend_thread(thread_id) while True: hit = writer.wait_for_breakpoint_hit((REASON_THREAD_SUSPEND, REASON_STOP_ON_BREAKPOINT)) if hit.name == 'sleep': break # Ok, broke on 'sleep'. else: # i.e.: if it doesn't hit on 'sleep', release and pause again. writer.write_run_thread(thread_id) time.sleep(.1) writer.write_suspend_thread(thread_id) assert hit.thread_id == thread_id writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'exit_while_loop()') writer.wait_for_evaluation([ [ '<var name="exit_while_loop()" type="str" qualifier="{0}" value="str: ok'.format(builtin_qualifier), '<var name="exit_while_loop()" type="str" value="str: ok"', # jython ] ]) writer.write_run_thread(thread_id) writer.finished_ok = True # Jython has a weird behavior: it seems it has fine-grained locking so that when # we're inside the tracing other threads don't run (so, we can have only one # thread paused in the debugger). @pytest.mark.skipif(IS_JYTHON, reason='Jython can only have one thread stopped at each time.') def test_case_suspend_all_thread(case_setup): with case_setup.test_file('_debugger_case_suspend_all.py') as writer: writer.write_make_initial_run() main_thread_id = writer.wait_for_new_thread() # Main thread thread_id1 = writer.wait_for_new_thread() # Thread 1 thread_id2 = writer.wait_for_new_thread() # Thread 2 # Ok, all threads created, let's wait for the main thread to get to the join. writer.wait_for_thread_join(main_thread_id) writer.write_suspend_thread('*') # Wait for 2 threads to be suspended (the main thread is already in a join, so, it can't actually # break out of it while others don't proceed). hit0 = writer.wait_for_breakpoint_hit(REASON_THREAD_SUSPEND) hit1 = writer.wait_for_breakpoint_hit(REASON_THREAD_SUSPEND) writer.write_evaluate_expression('%s\t%s\t%s' % (hit0.thread_id, hit0.frame_id, 'LOCAL'), 'exit_while_loop(1)') writer.wait_for_evaluation([ [ '<var name="exit_while_loop(1)" type="str" qualifier="{0}" value="str: ok'.format(builtin_qualifier) ] ]) writer.write_evaluate_expression('%s\t%s\t%s' % (hit1.thread_id, hit1.frame_id, 'LOCAL'), 'exit_while_loop(2)') writer.wait_for_evaluation('<var name="exit_while_loop(2)" type="str" qualifier="{0}" value="str: ok'.format(builtin_qualifier)) writer.write_run_thread('*') writer.finished_ok = True def test_case_5(case_setup): with case_setup.test_file('_debugger_case56.py') as writer: breakpoint_id = writer.write_add_breakpoint(2, 'Call2') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() thread_id = hit.thread_id frame_id = hit.frame_id writer.write_get_frame(thread_id, frame_id) writer.write_remove_breakpoint(breakpoint_id) writer.write_step_return(thread_id) hit = writer.wait_for_breakpoint_hit('109') thread_id = hit.thread_id frame_id = hit.frame_id line = hit.line assert line == 8, 'Expecting it to go to line 8. Went to: %s' % line writer.write_step_in(thread_id) hit = writer.wait_for_breakpoint_hit('107') thread_id = hit.thread_id frame_id = hit.frame_id line = hit.line # goes to line 4 in jython (function declaration line) assert line in (4, 5), 'Expecting it to go to line 4 or 5. Went to: %s' % line writer.write_run_thread(thread_id) assert 15 == writer._sequence, 'Expected 15. Had: %s' % writer._sequence writer.finished_ok = True def test_case_6(case_setup): with case_setup.test_file('_debugger_case56.py') as writer: writer.write_add_breakpoint(2, 'Call2') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() thread_id = hit.thread_id frame_id = hit.frame_id writer.write_get_frame(thread_id, frame_id) writer.write_step_return(thread_id) hit = writer.wait_for_breakpoint_hit('109') thread_id = hit.thread_id frame_id = hit.frame_id line = hit.line assert line == 8, 'Expecting it to go to line 8. Went to: %s' % line writer.write_step_in(thread_id) hit = writer.wait_for_breakpoint_hit('107') thread_id = hit.thread_id frame_id = hit.frame_id line = hit.line # goes to line 4 in jython (function declaration line) assert line in (4, 5), 'Expecting it to go to line 4 or 5. Went to: %s' % line writer.write_run_thread(thread_id) assert 13 == writer._sequence, 'Expected 15. Had: %s' % writer._sequence writer.finished_ok = True @pytest.mark.skipif(IS_IRONPYTHON, reason='This test is flaky on Jython, so, skipping it.') def test_case_7(case_setup): # This test checks that we start without variables and at each step a new var is created, but on ironpython, # the variables exist all at once (with None values), so, we can't test it properly. with case_setup.test_file('_debugger_case7.py') as writer: writer.write_add_breakpoint(2, 'Call') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit('111') writer.write_get_frame(hit.thread_id, hit.frame_id) writer.wait_for_vars('<xml></xml>') # no vars at this point writer.write_step_over(hit.thread_id) writer.wait_for_breakpoint_hit('108') writer.write_get_frame(hit.thread_id, hit.frame_id) writer.wait_for_vars([ [ '<xml><var name="variable_for_test_1" type="int" qualifier="{0}" value="int%253A 10" />%0A</xml>'.format(builtin_qualifier), '<var name="variable_for_test_1" type="int" value="int', # jython ] ]) writer.write_step_over(hit.thread_id) writer.wait_for_breakpoint_hit('108') writer.write_get_frame(hit.thread_id, hit.frame_id) writer.wait_for_vars([ [ '<xml><var name="variable_for_test_1" type="int" qualifier="{0}" value="int%253A 10" />%0A<var name="variable_for_test_2" type="int" qualifier="{0}" value="int%253A 20" />%0A</xml>'.format(builtin_qualifier), '<var name="variable_for_test_1" type="int" value="int%253A 10" />%0A<var name="variable_for_test_2" type="int" value="int%253A 20" />%0A', # jython ] ]) writer.write_run_thread(hit.thread_id) assert 17 == writer._sequence, 'Expected 17. Had: %s' % writer._sequence writer.finished_ok = True def test_case_8(case_setup): with case_setup.test_file('_debugger_case89.py') as writer: writer.write_add_breakpoint(10, 'Method3') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit('111') writer.write_step_return(hit.thread_id) hit = writer.wait_for_breakpoint_hit('109', line=15) writer.write_run_thread(hit.thread_id) assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence writer.finished_ok = True def test_case_9(case_setup): with case_setup.test_file('_debugger_case89.py') as writer: writer.write_add_breakpoint(10, 'Method3') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit('111') # Note: no active exception (should not give an error and should return no # exception details as there's no exception). writer.write_get_current_exception(hit.thread_id) msg = writer.wait_for_message(CMD_GET_EXCEPTION_DETAILS) assert msg.thread['id'] == hit.thread_id assert not hasattr(msg.thread, 'frames') # No frames should be found. writer.write_step_over(hit.thread_id) hit = writer.wait_for_breakpoint_hit('108', line=11) writer.write_step_over(hit.thread_id) hit = writer.wait_for_breakpoint_hit('108', line=12) writer.write_run_thread(hit.thread_id) assert 13 == writer._sequence, 'Expected 13. Had: %s' % writer._sequence writer.finished_ok = True def test_case_10(case_setup): with case_setup.test_file('_debugger_case_simple_calls.py') as writer: writer.write_add_breakpoint(2, 'None') # None or Method should make hit. writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit('111') writer.write_step_return(hit.thread_id) hit = writer.wait_for_breakpoint_hit('109', line=11) writer.write_step_over(hit.thread_id) hit = writer.wait_for_breakpoint_hit('108', line=12) writer.write_run_thread(hit.thread_id) assert 11 == writer._sequence, 'Expected 11. Had: %s' % writer._sequence writer.finished_ok = True def test_case_11(case_setup): with case_setup.test_file('_debugger_case_simple_calls.py') as writer: writer.write_add_breakpoint(2, 'Method1') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit('111', line=2) writer.write_step_over(hit.thread_id) hit = writer.wait_for_breakpoint_hit('108', line=3) writer.write_step_over(hit.thread_id) hit = writer.wait_for_breakpoint_hit('108', line=11) writer.write_step_over(hit.thread_id) hit = writer.wait_for_breakpoint_hit('108', line=12) writer.write_run_thread(hit.thread_id) assert 13 == writer._sequence, 'Expected 13. Had: %s' % writer._sequence writer.finished_ok = True def test_case_12(case_setup): with case_setup.test_file('_debugger_case_simple_calls.py') as writer: writer.write_add_breakpoint(2, '') # Should not be hit: setting empty function (not None) should only hit global. writer.write_add_breakpoint(6, 'Method1a') writer.write_add_breakpoint(11, 'Method2') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit('111', line=11) writer.write_step_return(hit.thread_id) hit = writer.wait_for_breakpoint_hit('111', line=6) # not a return (it stopped in the other breakpoint) writer.write_run_thread(hit.thread_id) assert 13 == writer._sequence, 'Expected 13. Had: %s' % writer._sequence writer.finished_ok = True @pytest.mark.skipif(IS_IRONPYTHON, reason='Failing on IronPython (needs to be investigated).') def test_case_13(case_setup): with case_setup.test_file('_debugger_case13.py') as writer: def _ignore_stderr_line(line): if original_ignore_stderr_line(line): return True if IS_JYTHON: for expected in ( "RuntimeWarning: Parent module '_pydevd_bundle' not found while handling absolute import", "import __builtin__"): if expected in line: return True return False original_ignore_stderr_line = writer._ignore_stderr_line writer._ignore_stderr_line = _ignore_stderr_line writer.write_add_breakpoint(35, 'main') writer.write("%s\t%s\t%s" % (CMD_SET_PROPERTY_TRACE, writer.next_seq(), "true;false;false;true")) writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit('111') writer.write_get_frame(hit.thread_id, hit.frame_id) writer.write_step_in(hit.thread_id) hit = writer.wait_for_breakpoint_hit('107', line=25) # Should go inside setter method writer.write_step_in(hit.thread_id) hit = writer.wait_for_breakpoint_hit('107') writer.write_step_in(hit.thread_id) hit = writer.wait_for_breakpoint_hit('107', line=21) # Should go inside getter method writer.write_step_in(hit.thread_id) hit = writer.wait_for_breakpoint_hit('107') # Disable property tracing writer.write("%s\t%s\t%s" % (CMD_SET_PROPERTY_TRACE, writer.next_seq(), "true;true;true;true")) writer.write_step_in(hit.thread_id) hit = writer.wait_for_breakpoint_hit('107', line=39) # Should Skip step into properties setter # Enable property tracing writer.write("%s\t%s\t%s" % (CMD_SET_PROPERTY_TRACE, writer.next_seq(), "true;false;false;true")) writer.write_step_in(hit.thread_id) hit = writer.wait_for_breakpoint_hit('107', line=8) # Should go inside getter method writer.write_run_thread(hit.thread_id) writer.finished_ok = True def test_case_14(case_setup): # Interactive Debug Console with case_setup.test_file('_debugger_case14.py') as writer: writer.write_add_breakpoint(22, 'main') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit('111') assert hit.thread_id, '%s not valid.' % hit.thread_id assert hit.frame_id, '%s not valid.' % hit.frame_id # Access some variable writer.write_debug_console_expression("%s\t%s\tEVALUATE\tcarObj.color" % (hit.thread_id, hit.frame_id)) writer.wait_for_var(['<more>False</more>', '%27Black%27']) assert 7 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence # Change some variable writer.write_debug_console_expression("%s\t%s\tEVALUATE\tcarObj.color='Red'" % (hit.thread_id, hit.frame_id)) writer.write_debug_console_expression("%s\t%s\tEVALUATE\tcarObj.color" % (hit.thread_id, hit.frame_id)) writer.wait_for_var(['<more>False</more>', '%27Red%27']) assert 11 == writer._sequence, 'Expected 13. Had: %s' % writer._sequence # Iterate some loop writer.write_debug_console_expression("%s\t%s\tEVALUATE\tfor i in range(3):" % (hit.thread_id, hit.frame_id)) writer.wait_for_var(['<xml><more>True</more></xml>']) writer.write_debug_console_expression("%s\t%s\tEVALUATE\t print(i)" % (hit.thread_id, hit.frame_id)) writer.wait_for_var(['<xml><more>True</more></xml>']) writer.write_debug_console_expression("%s\t%s\tEVALUATE\t" % (hit.thread_id, hit.frame_id)) writer.wait_for_var( [ '<xml><more>False</more><output message="0"></output><output message="1"></output><output message="2"></output></xml>' ] ) assert 17 == writer._sequence, 'Expected 19. Had: %s' % writer._sequence writer.write_run_thread(hit.thread_id) writer.finished_ok = True def test_case_15(case_setup): with case_setup.test_file('_debugger_case15.py') as writer: writer.write_add_breakpoint(22, 'main') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT) # Access some variable writer.write_custom_operation("%s\t%s\tEXPRESSION\tcarObj.color" % (hit.thread_id, hit.frame_id), "EXEC", "f=lambda x: 'val=%s' % x", "f") writer.wait_for_custom_operation('val=Black') assert 7 == writer._sequence, 'Expected 7. Had: %s' % writer._sequence writer.write_custom_operation("%s\t%s\tEXPRESSION\tcarObj.color" % (hit.thread_id, hit.frame_id), "EXECFILE", debugger_unittest._get_debugger_test_file('_debugger_case15_execfile.py'), "f") writer.wait_for_custom_operation('val=Black') assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence writer.write_run_thread(hit.thread_id) writer.finished_ok = True def test_case_16(case_setup): # numpy.ndarray resolver try: import numpy except ImportError: pytest.skip('numpy not available') with case_setup.test_file('_debugger_case16.py') as writer: writer.write_add_breakpoint(9, 'main') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT) # In this test we check that the three arrays of different shapes, sizes and types # are all resolved properly as ndarrays. # First pass check is that we have all three expected variables defined writer.write_get_frame(hit.thread_id, hit.frame_id) writer.wait_for_multiple_vars(( ( '<var name="smallarray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B 0.%252B1.j 1.%252B1.j 2.%252B1.j 3.%252B1.j 4.%252B1.j 5.%252B1.j 6.%252B1.j 7.%252B1.j 8.%252B1.j%250A 9.%252B1.j 10.%252B1.j 11.%252B1.j 12.%252B1.j 13.%252B1.j 14.%252B1.j 15.%252B1.j 16.%252B1.j 17.%252B1.j%250A 18.%252B1.j 19.%252B1.j 20.%252B1.j 21.%252B1.j 22.%252B1.j 23.%252B1.j 24.%252B1.j 25.%252B1.j 26.%252B1.j%250A 27.%252B1.j 28.%252B1.j 29.%252B1.j 30.%252B1.j 31.%252B1.j 32.%252B1.j 33.%252B1.j 34.%252B1.j 35.%252B1.j%250A 36.%252B1.j 37.%252B1.j 38.%252B1.j 39.%252B1.j 40.%252B1.j 41.%252B1.j 42.%252B1.j 43.%252B1.j 44.%252B1.j%250A 45.%252B1.j 46.%252B1.j 47.%252B1.j 48.%252B1.j 49.%252B1.j 50.%252B1.j 51.%252B1.j 52.%252B1.j 53.%252B1.j%250A 54.%252B1.j 55.%252B1.j 56.%252B1.j 57.%252B1.j 58.%252B1.j 59.%252B1.j 60.%252B1.j 61.%252B1.j 62.%252B1.j%250A 63.%252B1.j 64.%252B1.j 65.%252B1.j 66.%252B1.j 67.%252B1.j 68.%252B1.j 69.%252B1.j 70.%252B1.j 71.%252B1.j%250A 72.%252B1.j 73.%252B1.j 74.%252B1.j 75.%252B1.j 76.%252B1.j 77.%252B1.j 78.%252B1.j 79.%252B1.j 80.%252B1.j%250A 81.%252B1.j 82.%252B1.j 83.%252B1.j 84.%252B1.j 85.%252B1.j 86.%252B1.j 87.%252B1.j 88.%252B1.j 89.%252B1.j%250A 90.%252B1.j 91.%252B1.j 92.%252B1.j 93.%252B1.j 94.%252B1.j 95.%252B1.j 96.%252B1.j 97.%252B1.j 98.%252B1.j%250A 99.%252B1.j%255D" isContainer="True" />', '<var name="smallarray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B 0.%252B1.j 1.%252B1.j 2.%252B1.j 3.%252B1.j 4.%252B1.j 5.%252B1.j 6.%252B1.j 7.%252B1.j%250A 8.%252B1.j 9.%252B1.j 10.%252B1.j 11.%252B1.j 12.%252B1.j 13.%252B1.j 14.%252B1.j 15.%252B1.j%250A 16.%252B1.j 17.%252B1.j 18.%252B1.j 19.%252B1.j 20.%252B1.j 21.%252B1.j 22.%252B1.j 23.%252B1.j%250A 24.%252B1.j 25.%252B1.j 26.%252B1.j 27.%252B1.j 28.%252B1.j 29.%252B1.j 30.%252B1.j 31.%252B1.j%250A 32.%252B1.j 33.%252B1.j 34.%252B1.j 35.%252B1.j 36.%252B1.j 37.%252B1.j 38.%252B1.j 39.%252B1.j%250A 40.%252B1.j 41.%252B1.j 42.%252B1.j 43.%252B1.j 44.%252B1.j 45.%252B1.j 46.%252B1.j 47.%252B1.j%250A 48.%252B1.j 49.%252B1.j 50.%252B1.j 51.%252B1.j 52.%252B1.j 53.%252B1.j 54.%252B1.j 55.%252B1.j%250A 56.%252B1.j 57.%252B1.j 58.%252B1.j 59.%252B1.j 60.%252B1.j 61.%252B1.j 62.%252B1.j 63.%252B1.j%250A 64.%252B1.j 65.%252B1.j 66.%252B1.j 67.%252B1.j 68.%252B1.j 69.%252B1.j 70.%252B1.j 71.%252B1.j%250A 72.%252B1.j 73.%252B1.j 74.%252B1.j 75.%252B1.j 76.%252B1.j 77.%252B1.j 78.%252B1.j 79.%252B1.j%250A 80.%252B1.j 81.%252B1.j 82.%252B1.j 83.%252B1.j 84.%252B1.j 85.%252B1.j 86.%252B1.j 87.%252B1.j%250A 88.%252B1.j 89.%252B1.j 90.%252B1.j 91.%252B1.j 92.%252B1.j 93.%252B1.j 94.%252B1.j 95.%252B1.j%250A 96.%252B1.j 97.%252B1.j 98.%252B1.j 99.%252B1.j%255D" isContainer="True" />' ), ( '<var name="bigarray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B%255B 0 1 2 ... 9997 9998 9999%255D%250A %255B10000 10001 10002 ... 19997 19998 19999%255D%250A %255B20000 20001 20002 ... 29997 29998 29999%255D%250A ...%250A %255B70000 70001 70002 ... 79997 79998 79999%255D%250A %255B80000 80001 80002 ... 89997 89998 89999%255D%250A %255B90000 90001 90002 ... 99997 99998 99999%255D%255D" isContainer="True" />', '<var name="bigarray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B%255B 0 1 2 ...%252C 9997 9998 9999%255D%250A %255B10000 10001 10002 ...%252C 19997 19998 19999%255D%250A %255B20000 20001 20002 ...%252C 29997 29998 29999%255D%250A ...%252C %250A %255B70000 70001 70002 ...%252C 79997 79998 79999%255D%250A %255B80000 80001 80002 ...%252C 89997 89998 89999%255D%250A %255B90000 90001 90002 ...%252C 99997 99998 99999%255D%255D" isContainer="True" />' ), # Any of the ones below will do. ( '<var name="hugearray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B 0 1 2 ... 9999997 9999998 9999999%255D" isContainer="True" />', '<var name="hugearray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B 0 1 2 ...%252C 9999997 9999998 9999999%255D" isContainer="True" />' ) )) # For each variable, check each of the resolved (meta data) attributes... writer.write_get_variable(hit.thread_id, hit.frame_id, 'smallarray') writer.wait_for_multiple_vars(( '<var name="min" type="complex128"', '<var name="max" type="complex128"', '<var name="shape" type="tuple"', '<var name="dtype" type="dtype"', '<var name="size" type="int"', )) # ...and check that the internals are resolved properly writer.write_get_variable(hit.thread_id, hit.frame_id, 'smallarray\t__internals__') writer.wait_for_var('<var name="%27size%27') writer.write_get_variable(hit.thread_id, hit.frame_id, 'bigarray') # isContainer could be true on some numpy versions, so, we only check for the var begin. writer.wait_for_multiple_vars(( [ '<var name="min" type="int64" qualifier="numpy" value="int64%253A 0"', '<var name="min" type="int64" qualifier="numpy" value="int64%3A 0"', '<var name="size" type="int" qualifier="{0}" value="int%3A 100000"'.format(builtin_qualifier), ], [ '<var name="max" type="int64" qualifier="numpy" value="int64%253A 99999"', '<var name="max" type="int32" qualifier="numpy" value="int32%253A 99999"', '<var name="max" type="int64" qualifier="numpy" value="int64%3A 99999"', '<var name="max" type="int32" qualifier="numpy" value="int32%253A 99999"', ], '<var name="shape" type="tuple"', '<var name="dtype" type="dtype"', '<var name="size" type="int"' )) writer.write_get_variable(hit.thread_id, hit.frame_id, 'bigarray\t__internals__') writer.wait_for_var('<var name="%27size%27') # this one is different because it crosses the magic threshold where we don't calculate # the min/max writer.write_get_variable(hit.thread_id, hit.frame_id, 'hugearray') writer.wait_for_var(( [ '<var name="min" type="str" qualifier={0} value="str%253A ndarray too big%252C calculating min would slow down debugging" />'.format(builtin_qualifier), '<var name="min" type="str" qualifier={0} value="str%3A ndarray too big%252C calculating min would slow down debugging" />'.format(builtin_qualifier), '<var name="min" type="str" qualifier="{0}" value="str%253A ndarray too big%252C calculating min would slow down debugging" />'.format(builtin_qualifier), '<var name="min" type="str" qualifier="{0}" value="str%3A ndarray too big%252C calculating min would slow down debugging" />'.format(builtin_qualifier), ], [ '<var name="max" type="str" qualifier={0} value="str%253A ndarray too big%252C calculating max would slow down debugging" />'.format(builtin_qualifier), '<var name="max" type="str" qualifier={0} value="str%3A ndarray too big%252C calculating max would slow down debugging" />'.format(builtin_qualifier), '<var name="max" type="str" qualifier="{0}" value="str%253A ndarray too big%252C calculating max would slow down debugging" />'.format(builtin_qualifier), '<var name="max" type="str" qualifier="{0}" value="str%3A ndarray too big%252C calculating max would slow down debugging" />'.format(builtin_qualifier), ], '<var name="shape" type="tuple"', '<var name="dtype" type="dtype"', '<var name="size" type="int"', )) writer.write_get_variable(hit.thread_id, hit.frame_id, 'hugearray\t__internals__') writer.wait_for_var('<var name="%27size%27') writer.write_run_thread(hit.thread_id) writer.finished_ok = True def test_case_17(case_setup): # Check dont trace with case_setup.test_file('_debugger_case17.py') as writer: writer.write_enable_dont_trace(True) writer.write_add_breakpoint(27, 'main') writer.write_add_breakpoint(29, 'main') writer.write_add_breakpoint(31, 'main') writer.write_add_breakpoint(33, 'main') writer.write_make_initial_run() for _i in range(4): hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT) writer.write_step_in(hit.thread_id) hit = writer.wait_for_breakpoint_hit('107', line=2) # Should Skip step into properties setter writer.write_run_thread(hit.thread_id) writer.finished_ok = True def test_case_17a(case_setup): # Check dont trace return with case_setup.test_file('_debugger_case17a.py') as writer: writer.write_enable_dont_trace(True) writer.write_add_breakpoint(2, 'm1') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=2) writer.write_step_in(hit.thread_id) hit = writer.wait_for_breakpoint_hit('107', line=10) # Should Skip step into properties setter assert hit.name == 'm3' writer.write_run_thread(hit.thread_id) writer.finished_ok = True def test_case_18(case_setup): # change local variable if IS_IRONPYTHON or IS_JYTHON: pytest.skip('Unsupported assign to local') with case_setup.test_file('_debugger_case18.py') as writer: writer.write_add_breakpoint(5, 'm2') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=5) writer.write_change_variable(hit.thread_id, hit.frame_id, 'a', '40') writer.wait_for_var('<xml><var name="" type="int" qualifier="{0}" value="int%253A 40" />%0A</xml>'.format(builtin_qualifier,)) writer.write_run_thread(hit.thread_id) writer.finished_ok = True def test_case_19(case_setup): # Check evaluate '__' attributes with case_setup.test_file('_debugger_case19.py') as writer: writer.write_add_breakpoint(8, None) writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=8) writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'a.__var') writer.wait_for_evaluation([ [ '<var name="a.__var" type="int" qualifier="{0}" value="int'.format(builtin_qualifier), '<var name="a.__var" type="int" value="int', # jython ] ]) writer.write_run_thread(hit.thread_id) writer.finished_ok = True @pytest.mark.skipif(IS_JYTHON, reason='Monkey-patching related to starting threads not done on Jython.') def test_case_20(case_setup): # Check that we were notified of threads creation before they started to run with case_setup.test_file('_debugger_case20.py') as writer: writer.write_make_initial_run() # We already check if it prints 'TEST SUCEEDED' by default, so, nothing # else should be needed in this test as it tests what's needed just by # running the module. writer.finished_ok = True @pytest.mark.skipif(not TEST_FLASK, reason='No flask available') def test_case_flask(case_setup_flask): with case_setup_flask.test_file(EXPECTED_RETURNCODE='any') as writer: writer.write_multi_threads_single_notification(True) writer.write_add_breakpoint_jinja2(5, None, 'hello.html') writer.write_add_breakpoint_jinja2(8, None, 'hello.html') writer.write_make_initial_run() t = writer.create_request_thread() time.sleep(2) # Give flask some time to get to startup before requesting the page t.start() hit = writer.wait_for_single_notification_as_hit(line=5) writer.write_get_frame(hit.thread_id, hit.frame_id) writer.wait_for_vars(['<var name="content" type="str"']) writer.write_run_thread(hit.thread_id) hit = writer.wait_for_single_notification_as_hit(line=8) writer.write_get_frame(hit.thread_id, hit.frame_id) writer.wait_for_vars(['<var name="content" type="str"']) writer.write_run_thread(hit.thread_id) for _ in xrange(10): if hasattr(t, 'contents'): break time.sleep(.3) else: raise AssertionError('Flask did not return contents properly!') assert '<title>Hello</title>' in t.contents assert 'Flask-Jinja-Test' in t.contents writer.finished_ok = True @pytest.mark.skipif(not TEST_DJANGO, reason='No django available') def test_case_django_a(case_setup_django): with case_setup_django.test_file(EXPECTED_RETURNCODE='any') as writer: writer.write_add_breakpoint_django(5, None, 'index.html') writer.write_make_initial_run() t = writer.create_request_thread('my_app') time.sleep(5) # Give django some time to get to startup before requesting the page t.start() hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=5) writer.write_get_variable(hit.thread_id, hit.frame_id, 'entry') writer.wait_for_vars([ '<var name="key" type="str"', 'v1' ]) writer.write_run_thread(hit.thread_id) hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=5) writer.write_get_variable(hit.thread_id, hit.frame_id, 'entry') writer.wait_for_vars([ '<var name="key" type="str"', 'v2' ]) writer.write_run_thread(hit.thread_id) for _ in xrange(10): if hasattr(t, 'contents'): break time.sleep(.3) else: raise AssertionError('Django did not return contents properly!') contents = t.contents.replace(' ', '').replace('\r', '').replace('\n', '') if contents != '<ul><li>v1:v1</li><li>v2:v2</li></ul>': raise AssertionError('%s != <ul><li>v1:v1</li><li>v2:v2</li></ul>' % (contents,)) writer.finished_ok = True @pytest.mark.skipif(not TEST_DJANGO, reason='No django available') def test_case_django_b(case_setup_django): with case_setup_django.test_file(EXPECTED_RETURNCODE='any') as writer: writer.write_add_breakpoint_django(4, None, 'name.html') writer.write_make_initial_run() t = writer.create_request_thread('my_app/name') time.sleep(5) # Give django some time to get to startup before requesting the page t.start() hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=4) writer.write_get_frame(hit.thread_id, hit.frame_id) writer.wait_for_var('<var name="form" type="NameForm" qualifier="my_app.forms" value="NameForm%253A') writer.write_run_thread(hit.thread_id) writer.finished_ok = True @pytest.mark.skipif(not TEST_CYTHON, reason='No cython available') def test_cython(case_setup): from _pydevd_bundle import pydevd_cython assert pydevd_cython.trace_dispatch is not None def _has_qt(): try: try: from PySide import QtCore # @UnresolvedImport return True except: from PySide2 import QtCore # @UnresolvedImport return True except: try: from PyQt4 import QtCore # @UnresolvedImport return True except: try: from PyQt5 import QtCore # @UnresolvedImport return True except: pass return False @pytest.mark.skipif(not _has_qt(), reason='No qt available') def test_case_qthread1(case_setup): with case_setup.test_file('_debugger_case_qthread1.py') as writer: breakpoint_id = writer.write_add_breakpoint(writer.get_line_index_with_content('break here'), 'run') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() writer.write_remove_breakpoint(breakpoint_id) writer.write_run_thread(hit.thread_id) writer.log.append('Checking sequence. Found: %s' % (writer._sequence)) assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence writer.log.append('Marking finished ok.') writer.finished_ok = True @pytest.mark.skipif(not _has_qt(), reason='No qt available') def test_case_qthread2(case_setup): with case_setup.test_file('_debugger_case_qthread2.py') as writer: breakpoint_id = writer.write_add_breakpoint(writer.get_line_index_with_content('break here'), 'long_running') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() thread_id = hit.thread_id writer.write_remove_breakpoint(breakpoint_id) writer.write_run_thread(thread_id) writer.log.append('Checking sequence. Found: %s' % (writer._sequence)) assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence writer.log.append('Marking finished ok.') writer.finished_ok = True @pytest.mark.skipif(not _has_qt(), reason='No qt available') def test_case_qthread3(case_setup): with case_setup.test_file('_debugger_case_qthread3.py') as writer: breakpoint_id = writer.write_add_breakpoint(writer.get_line_index_with_content('break here'), 'run') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() thread_id = hit.thread_id frame_id = hit.frame_id writer.write_remove_breakpoint(breakpoint_id) writer.write_run_thread(thread_id) writer.log.append('Checking sequence. Found: %s' % (writer._sequence)) assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence writer.log.append('Marking finished ok.') writer.finished_ok = True @pytest.mark.skipif(not _has_qt(), reason='No qt available') def test_case_qthread4(case_setup): with case_setup.test_file('_debugger_case_qthread4.py') as writer: original_additional_output_checks = writer.additional_output_checks def additional_output_checks(stdout, stderr): original_additional_output_checks(stdout, stderr) if 'On start called' not in stdout: raise AssertionError('Expected "On start called" to be in stdout:\n%s' % (stdout,)) if 'Done sleeping' not in stdout: raise AssertionError('Expected "Done sleeping" to be in stdout:\n%s' % (stdout,)) if 'native Qt signal is not callable' in stderr: raise AssertionError('Did not expect "native Qt signal is not callable" to be in stderr:\n%s' % (stderr,)) breakpoint_id = writer.write_add_breakpoint(28, 'on_start') # breakpoint on print('On start called2'). writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() writer.write_remove_breakpoint(breakpoint_id) writer.write_run_thread(hit.thread_id) writer.log.append('Checking sequence. Found: %s' % (writer._sequence)) assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence writer.log.append('Marking finished ok.') writer.finished_ok = True def test_m_switch(case_setup_m_switch): with case_setup_m_switch.test_file() as writer: writer.log.append('writing add breakpoint') breakpoint_id = writer.write_add_breakpoint(1, None) writer.log.append('making initial run') writer.write_make_initial_run() writer.log.append('waiting for breakpoint hit') hit = writer.wait_for_breakpoint_hit() writer.write_remove_breakpoint(breakpoint_id) writer.log.append('run thread') writer.write_run_thread(hit.thread_id) writer.log.append('asserting') try: assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence except: writer.log.append('assert failed!') raise writer.log.append('asserted') writer.finished_ok = True def test_module_entry_point(case_setup_m_switch_entry_point): with case_setup_m_switch_entry_point.test_file() as writer: writer.log.append('writing add breakpoint') breakpoint_id = writer.write_add_breakpoint(1, None) writer.log.append('making initial run') writer.write_make_initial_run() writer.log.append('waiting for breakpoint hit') hit = writer.wait_for_breakpoint_hit() writer.write_remove_breakpoint(breakpoint_id) writer.log.append('run thread') writer.write_run_thread(hit.thread_id) writer.log.append('asserting') try: assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence except: writer.log.append('assert failed!') raise writer.log.append('asserted') writer.finished_ok = True @pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.') def test_check_tracer_with_exceptions(case_setup): def get_environ(writer): env = os.environ.copy() # This test requires regular tracing (without cython). env['PYDEVD_USE_CYTHON'] = 'NO' env['PYDEVD_USE_FRAME_EVAL'] = 'NO' return env with case_setup.test_file('_debugger_case_check_tracer.py', get_environ=get_environ) as writer: writer.write_add_exception_breakpoint_with_policy('IndexError', "1", "1", "1") writer.write_make_initial_run() writer.finished_ok = True @pytest.mark.skipif(IS_JYTHON, reason='Failing on Jython -- needs to be investigated).') def test_unhandled_exceptions_basic(case_setup): def check_test_suceeded_msg(writer, stdout, stderr): # Don't call super (we have an unhandled exception in the stack trace). return 'TEST SUCEEDED' in ''.join(stdout) and 'TEST SUCEEDED' in ''.join(stderr) def additional_output_checks(writer, stdout, stderr): if 'raise Exception' not in stderr: raise AssertionError('Expected test to have an unhandled exception.\nstdout:\n%s\n\nstderr:\n%s' % ( stdout, stderr)) with case_setup.test_file( '_debugger_case_unhandled_exceptions.py', check_test_suceeded_msg=check_test_suceeded_msg, additional_output_checks=additional_output_checks, EXPECTED_RETURNCODE=1, ) as writer: writer.write_add_exception_breakpoint_with_policy('Exception', "0", "1", "0") writer.write_make_initial_run() def check(hit, exc_type, exc_desc): writer.write_get_current_exception(hit.thread_id) msg = writer.wait_for_message(accept_message=lambda msg:exc_type in msg and 'exc_type="' in msg and 'exc_desc="' in msg, unquote_msg=False) assert unquote(msg.thread['exc_desc']) == exc_desc assert unquote(msg.thread['exc_type']) in ( "&lt;type 'exceptions.%s'&gt;" % (exc_type,), # py2 "&lt;class '%s'&gt;" % (exc_type,) # py3 ) if len(msg.thread.frame) == 0: assert unquote(unquote(msg.thread.frame['file'])).endswith('_debugger_case_unhandled_exceptions.py') else: assert unquote(unquote(msg.thread.frame[0]['file'])).endswith('_debugger_case_unhandled_exceptions.py') writer.write_run_thread(hit.thread_id) # Will stop in 2 background threads hit0 = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION) thread_id1 = hit0.thread_id hit1 = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION) thread_id2 = hit1.thread_id if hit0.name == 'thread_func2': check(hit0, 'ValueError', 'in thread 2') check(hit1, 'Exception', 'in thread 1') else: check(hit0, 'Exception', 'in thread 1') check(hit1, 'ValueError', 'in thread 2') writer.write_run_thread(thread_id1) writer.write_run_thread(thread_id2) # Will stop in main thread hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION) assert hit.name == '<module>' thread_id3 = hit.thread_id # Requesting the stack in an unhandled exception should provide the stack of the exception, # not the current location of the program. writer.write_get_thread_stack(thread_id3) msg = writer.wait_for_message(CMD_GET_THREAD_STACK) assert len(msg.thread.frame) == 0 # In main thread (must have no back frames). assert msg.thread.frame['name'] == '<module>' check(hit, 'IndexError', 'in main') writer.log.append('Marking finished ok.') writer.finished_ok = True @pytest.mark.skipif(IS_JYTHON, reason='Failing on Jython -- needs to be investigated).') def test_unhandled_exceptions_in_top_level(case_setup_unhandled_exceptions): with case_setup_unhandled_exceptions.test_file( '_debugger_case_unhandled_exceptions_on_top_level.py', EXPECTED_RETURNCODE=1, ) as writer: writer.write_add_exception_breakpoint_with_policy('Exception', "0", "1", "0") writer.write_make_initial_run() # Will stop in main thread hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION) writer.write_run_thread(hit.thread_id) writer.log.append('Marking finished ok.') writer.finished_ok = True @pytest.mark.skipif(IS_JYTHON, reason='Failing on Jython -- needs to be investigated).') def test_unhandled_exceptions_in_top_level2(case_setup_unhandled_exceptions): # Note: expecting unhandled exception to be printed to stderr. def get_environ(writer): env = os.environ.copy() curr_pythonpath = env.get('PYTHONPATH', '') pydevd_dirname = os.path.dirname(writer.get_pydevd_file()) curr_pythonpath = pydevd_dirname + os.pathsep + curr_pythonpath env['PYTHONPATH'] = curr_pythonpath return env def update_command_line_args(writer, args): # Start pydevd with '-m' to see how it deal with being called with # runpy at the start. assert args[0].endswith('pydevd.py') args = ['-m', 'pydevd'] + args[1:] return args with case_setup_unhandled_exceptions.test_file( '_debugger_case_unhandled_exceptions_on_top_level.py', get_environ=get_environ, update_command_line_args=update_command_line_args, EXPECTED_RETURNCODE=(1, 255), # Python 2.6, Jython can give 255 ) as writer: writer.write_add_exception_breakpoint_with_policy('Exception', "0", "1", "0") writer.write_make_initial_run() # Should stop (only once) in the main thread. hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION) writer.write_run_thread(hit.thread_id) writer.log.append('Marking finished ok.') writer.finished_ok = True @pytest.mark.skipif(IS_JYTHON, reason='Failing on Jython -- needs to be investigated).') def test_unhandled_exceptions_in_top_level3(case_setup_unhandled_exceptions): with case_setup_unhandled_exceptions.test_file( '_debugger_case_unhandled_exceptions_on_top_level.py', EXPECTED_RETURNCODE=1 ) as writer: # Handled and unhandled writer.write_add_exception_breakpoint_with_policy('Exception', "1", "1", "0") writer.write_make_initial_run() # Will stop in main thread twice: once one we find that the exception is being # thrown and another in postmortem mode when we discover it's uncaught. hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION) writer.write_run_thread(hit.thread_id) hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION) writer.write_run_thread(hit.thread_id) writer.log.append('Marking finished ok.') writer.finished_ok = True @pytest.mark.skipif(IS_JYTHON, reason='Failing on Jython -- needs to be investigated).') def test_unhandled_exceptions_in_top_level4(case_setup_unhandled_exceptions): # Note: expecting unhandled exception to be printed to stderr. with case_setup_unhandled_exceptions.test_file( '_debugger_case_unhandled_exceptions_on_top_level2.py', EXPECTED_RETURNCODE=1, ) as writer: # Handled and unhandled writer.write_add_exception_breakpoint_with_policy('Exception', "1", "1", "0") writer.write_make_initial_run() # We have an exception thrown and handled and another which is thrown and is then unhandled. hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION) writer.write_run_thread(hit.thread_id) hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION) writer.write_run_thread(hit.thread_id) hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION) writer.write_run_thread(hit.thread_id) writer.log.append('Marking finished ok.') writer.finished_ok = True @pytest.mark.skipif(not IS_CPYTHON, reason='Only for Python.') def test_case_set_next_statement(case_setup): with case_setup.test_file('_debugger_case_set_next_statement.py') as writer: breakpoint_id = writer.write_add_breakpoint(6, None) writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=6) # Stop in line a=3 (before setting it) writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'a') writer.wait_for_evaluation('<var name="a" type="int" qualifier="{0}" value="int: 2"'.format(builtin_qualifier)) writer.write_set_next_statement(hit.thread_id, 2, 'method') hit = writer.wait_for_breakpoint_hit('127', line=2) # Check that it's still unchanged writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'a') writer.wait_for_evaluation('<var name="a" type="int" qualifier="{0}" value="int: 2"'.format(builtin_qualifier)) # After a step over it should become 1 as we executed line which sets a = 1 writer.write_step_over(hit.thread_id) hit = writer.wait_for_breakpoint_hit('108') writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'a') writer.wait_for_evaluation('<var name="a" type="int" qualifier="{0}" value="int: 1"'.format(builtin_qualifier)) writer.write_remove_breakpoint(breakpoint_id) writer.write_run_thread(hit.thread_id) writer.finished_ok = True def test_unhandled_exceptions_get_stack(case_setup_unhandled_exceptions): with case_setup_unhandled_exceptions.test_file( '_debugger_case_unhandled_exception_get_stack.py', EXPECTED_RETURNCODE=(1, 255), # Python 2.6, Jython can give 255 ) as writer: writer.write_add_exception_breakpoint_with_policy('Exception', "0", "1", "0") writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION) writer.write_get_thread_stack(hit.thread_id) msg = writer.wait_for_get_thread_stack_message() files = [frame['file'] for frame in msg.thread.frame] assert msg.thread['id'] == hit.thread_id if not files[0].endswith('_debugger_case_unhandled_exception_get_stack.py'): raise AssertionError('Expected to find _debugger_case_unhandled_exception_get_stack.py in files[0]. Found: %s' % ('\n'.join(files),)) assert len(msg.thread.frame) == 0 # No back frames (stopped in main). assert msg.thread.frame['name'] == '<module>' assert msg.thread.frame['line'] == str(writer.get_line_index_with_content('break line on unhandled exception')) writer.write_run_thread(hit.thread_id) writer.log.append('Marking finished ok.') writer.finished_ok = True @pytest.mark.skipif(not IS_CPYTHON, reason='Only for Python.') def test_case_get_next_statement_targets(case_setup): with case_setup.test_file('_debugger_case_get_next_statement_targets.py') as writer: breakpoint_id = writer.write_add_breakpoint(21, None) writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=21) writer.write_get_next_statement_targets(hit.thread_id, hit.frame_id) targets = writer.wait_for_get_next_statement_targets() expected = set((2, 3, 5, 8, 9, 10, 12, 13, 14, 15, 17, 18, 19, 21)) assert targets == expected, 'Expected targets to be %s, was: %s' % (expected, targets) writer.write_remove_breakpoint(breakpoint_id) writer.write_run_thread(hit.thread_id) writer.finished_ok = True @pytest.mark.skipif(IS_IRONPYTHON or IS_JYTHON, reason='Failing on IronPython and Jython (needs to be investigated).') def test_case_type_ext(case_setup): # Custom type presentation extensions def get_environ(self): env = os.environ.copy() python_path = env.get("PYTHONPATH", "") ext_base = debugger_unittest._get_debugger_test_file('my_extensions') env['PYTHONPATH'] = ext_base + os.pathsep + python_path if python_path else ext_base return env with case_setup.test_file('_debugger_case_type_ext.py', get_environ=get_environ) as writer: writer.get_environ = get_environ writer.write_add_breakpoint(7, None) writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit('111') writer.write_get_frame(hit.thread_id, hit.frame_id) assert writer.wait_for_var([ [ r'<var name="my_rect" type="Rect" qualifier="__main__" value="Rectangle%255BLength%253A 5%252C Width%253A 10 %252C Area%253A 50%255D" isContainer="True" />', r'<var name="my_rect" type="Rect" value="Rect: <__main__.Rect object at', # Jython ] ]) writer.write_get_variable(hit.thread_id, hit.frame_id, 'my_rect') assert writer.wait_for_var(r'<var name="area" type="int" qualifier="{0}" value="int%253A 50" />'.format(builtin_qualifier)) writer.write_run_thread(hit.thread_id) writer.finished_ok = True @pytest.mark.skipif(IS_IRONPYTHON or IS_JYTHON, reason='Failing on IronPython and Jython (needs to be investigated).') def test_case_event_ext(case_setup): def get_environ(self): env = os.environ.copy() python_path = env.get("PYTHONPATH", "") ext_base = debugger_unittest._get_debugger_test_file('my_extensions') env['PYTHONPATH'] = ext_base + os.pathsep + python_path if python_path else ext_base env["VERIFY_EVENT_TEST"] = "1" return env # Test initialize event for extensions with case_setup.test_file('_debugger_case_event_ext.py', get_environ=get_environ) as writer: original_additional_output_checks = writer.additional_output_checks @overrides(writer.additional_output_checks) def additional_output_checks(stdout, stderr): original_additional_output_checks(stdout, stderr) if 'INITIALIZE EVENT RECEIVED' not in stdout: raise AssertionError('No initialize event received') writer.additional_output_checks = additional_output_checks writer.write_make_initial_run() writer.finished_ok = True @pytest.mark.skipif(IS_JYTHON, reason='Jython does not seem to be creating thread started inside tracing (investigate).') def test_case_writer_creation_deadlock(case_setup): # check case where there was a deadlock evaluating expressions with case_setup.test_file('_debugger_case_thread_creation_deadlock.py') as writer: writer.write_add_breakpoint(26, None) writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit('111') assert hit.line == 26, 'Expected return to be in line 26, was: %s' % (hit.line,) writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'create_thread()') writer.wait_for_evaluation('<var name="create_thread()" type="str" qualifier="{0}" value="str: create_thread:ok'.format(builtin_qualifier)) writer.write_run_thread(hit.thread_id) writer.finished_ok = True def test_case_skip_breakpoints_in_exceptions(case_setup): # Case where breakpoint is skipped after an exception is raised over it with case_setup.test_file('_debugger_case_skip_breakpoint_in_exceptions.py') as writer: writer.write_add_breakpoint(5, None) writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit('111', line=5) writer.write_run_thread(hit.thread_id) hit = writer.wait_for_breakpoint_hit('111', line=5) writer.write_run_thread(hit.thread_id) writer.finished_ok = True def test_case_handled_exceptions0(case_setup): # Stop only once per handled exception. with case_setup.test_file('_debugger_case_exceptions.py') as writer: writer.write_set_project_roots([os.path.dirname(writer.TEST_FILE)]) writer.write_add_exception_breakpoint_with_policy( 'IndexError', notify_on_handled_exceptions=2, # Notify only once notify_on_unhandled_exceptions=0, ignore_libraries=1 ) writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION, line=3) writer.write_run_thread(hit.thread_id) writer.finished_ok = True @pytest.mark.skipif(IS_JYTHON, reason='Not working on Jython (needs to be investigated).') def test_case_handled_exceptions1(case_setup): # Stop multiple times for the same handled exception. def get_environ(self): env = os.environ.copy() env["IDE_PROJECT_ROOTS"] = os.path.dirname(self.TEST_FILE) return env with case_setup.test_file('_debugger_case_exceptions.py', get_environ=get_environ) as writer: writer.write_add_exception_breakpoint_with_policy( 'IndexError', notify_on_handled_exceptions=1, # Notify multiple times notify_on_unhandled_exceptions=0, ignore_libraries=1 ) writer.write_make_initial_run() def check(hit): writer.write_get_frame(hit.thread_id, hit.frame_id) writer.wait_for_message(accept_message=lambda msg:'__exception__' in msg and 'IndexError' in msg, unquote_msg=False) writer.write_get_current_exception(hit.thread_id) msg = writer.wait_for_message(accept_message=lambda msg:'IndexError' in msg and 'exc_type="' in msg and 'exc_desc="' in msg, unquote_msg=False) assert msg.thread['exc_desc'] == 'foo' assert unquote(msg.thread['exc_type']) in ( "&lt;type 'exceptions.IndexError'&gt;", # py2 "&lt;class 'IndexError'&gt;" # py3 ) assert unquote(unquote(msg.thread.frame[0]['file'])).endswith('_debugger_case_exceptions.py') writer.write_run_thread(hit.thread_id) hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION, line=3) check(hit) hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION, line=6) check(hit) hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION, line=10) check(hit) writer.finished_ok = True def test_case_handled_exceptions2(case_setup): # No IDE_PROJECT_ROOTS set. def get_environ(self): env = os.environ.copy() # Don't stop anywhere (note: having IDE_PROJECT_ROOTS = '' will consider # having anything not under site-packages as being in the project). env["IDE_PROJECT_ROOTS"] = '["empty"]' return env with case_setup.test_file('_debugger_case_exceptions.py', get_environ=get_environ) as writer: writer.write_add_exception_breakpoint_with_policy( 'IndexError', notify_on_handled_exceptions=1, # Notify multiple times notify_on_unhandled_exceptions=0, ignore_libraries=1 ) writer.write_make_initial_run() writer.finished_ok = True def test_case_handled_exceptions3(case_setup): # Don't stop on exception thrown in the same context (only at caller). def get_environ(self): env = os.environ.copy() env["IDE_PROJECT_ROOTS"] = os.path.dirname(self.TEST_FILE) return env with case_setup.test_file('_debugger_case_exceptions.py', get_environ=get_environ) as writer: # Note: in this mode we'll only stop once. writer.write_set_py_exception_globals( break_on_uncaught=False, break_on_caught=True, skip_on_exceptions_thrown_in_same_context=False, ignore_exceptions_thrown_in_lines_with_ignore_exception=True, ignore_libraries=True, exceptions=('IndexError',) ) writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION, line=3) writer.write_run_thread(hit.thread_id) writer.finished_ok = True def test_case_handled_exceptions4(case_setup): # Don't stop on exception thrown in the same context (only at caller). def get_environ(self): env = os.environ.copy() env["IDE_PROJECT_ROOTS"] = os.path.dirname(self.TEST_FILE) return env with case_setup.test_file('_debugger_case_exceptions.py', get_environ=get_environ) as writer: # Note: in this mode we'll only stop once. writer.write_set_py_exception_globals( break_on_uncaught=False, break_on_caught=True, skip_on_exceptions_thrown_in_same_context=True, ignore_exceptions_thrown_in_lines_with_ignore_exception=True, ignore_libraries=True, exceptions=('IndexError',) ) writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION, line=6) writer.write_run_thread(hit.thread_id) writer.finished_ok = True def test_case_settrace(case_setup): with case_setup.test_file('_debugger_case_settrace.py') as writer: writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit('108', line=12) writer.write_run_thread(hit.thread_id) hit = writer.wait_for_breakpoint_hit(line=7) writer.write_run_thread(hit.thread_id) writer.finished_ok = True @pytest.mark.skipif(True or IS_PY26 or IS_JYTHON, reason='This is *very* flaky. Scapy only supports 2.7 onwards, not available for jython.') def test_case_scapy(case_setup): with case_setup.test_file('_debugger_case_scapy.py') as writer: writer.FORCE_KILL_PROCESS_WHEN_FINISHED_OK = True writer.reader_thread.set_messages_timeout(30) # Starting scapy may be slow (timed out with 15 seconds on appveyor). writer.write_add_breakpoint(2, None) writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() thread_id = hit.thread_id frame_id = hit.frame_id writer.write_run_thread(thread_id) writer.finished_ok = True @pytest.mark.skipif(IS_APPVEYOR or IS_JYTHON, reason='Flaky on appveyor / Jython encoding issues (needs investigation).') def test_redirect_output(case_setup): def get_environ(writer): env = os.environ.copy() env["PYTHONIOENCODING"] = 'utf-8' return env with case_setup.test_file('_debugger_case_redirect.py', get_environ=get_environ) as writer: original_ignore_stderr_line = writer._ignore_stderr_line @overrides(writer._ignore_stderr_line) def _ignore_stderr_line(line): if original_ignore_stderr_line(line): return True return line.startswith(( 'text', 'binary', 'a' )) writer._ignore_stderr_line = _ignore_stderr_line # Note: writes to stdout and stderr are now synchronous (so, the order # must always be consistent and there's a message for each write). expected = [ 'text\n', 'binary or text\n', 'ação1\n', ] if sys.version_info[0] >= 3: expected.extend(( 'binary\n', 'ação2\n'.encode(encoding='latin1').decode('utf-8', 'replace'), 'ação3\n', )) new_expected = [(x, 'stdout') for x in expected] new_expected.extend([(x, 'stderr') for x in expected]) writer.write_start_redirect() writer.write_make_initial_run() msgs = [] ignored = [] while len(msgs) < len(new_expected): try: msg = writer.wait_for_output() except AssertionError: for msg in msgs: sys.stderr.write('Found: %s\n' % (msg,)) for msg in new_expected: sys.stderr.write('Expected: %s\n' % (msg,)) for msg in ignored: sys.stderr.write('Ignored: %s\n' % (msg,)) raise if msg not in new_expected: ignored.append(msg) continue msgs.append(msg) if msgs != new_expected: print(msgs) print(new_expected) assert msgs == new_expected writer.finished_ok = True def test_path_translation(case_setup): def get_file_in_client(writer): # Instead of using: test_python/_debugger_case_path_translation.py # we'll set the breakpoints at foo/_debugger_case_path_translation.py file_in_client = os.path.dirname(os.path.dirname(writer.TEST_FILE)) return os.path.join(os.path.dirname(file_in_client), 'foo', '_debugger_case_path_translation.py') def get_environ(writer): import json env = os.environ.copy() env["PYTHONIOENCODING"] = 'utf-8' assert writer.TEST_FILE.endswith('_debugger_case_path_translation.py') env["PATHS_FROM_ECLIPSE_TO_PYTHON"] = json.dumps([ ( os.path.dirname(get_file_in_client(writer)), os.path.dirname(writer.TEST_FILE) ) ]) return env with case_setup.test_file('_debugger_case_path_translation.py', get_environ=get_environ) as writer: from pydev_tests_python.debugger_unittest import CMD_LOAD_SOURCE writer.write_start_redirect() file_in_client = get_file_in_client(writer) assert 'pydev_tests_python' not in file_in_client writer.write_add_breakpoint(2, 'main', filename=file_in_client) writer.write_make_initial_run() xml = writer.wait_for_message(lambda msg:'stop_reason="111"' in msg) assert xml.thread.frame[0]['file'] == file_in_client thread_id = xml.thread['id'] # Request a file that exists files_to_match = [file_in_client] if IS_WINDOWS: files_to_match.append(file_in_client.upper()) for f in files_to_match: writer.write_load_source(f) writer.wait_for_message( lambda msg: '%s\t' % CMD_LOAD_SOURCE in msg and \ "def main():" in msg and \ "print('break here')" in msg and \ "print('TEST SUCEEDED!')" in msg , expect_xml=False) # Request a file that does not exist writer.write_load_source(file_in_client + 'not_existent.py') writer.wait_for_message( lambda msg:'901\t' in msg and ('FileNotFoundError' in msg or 'IOError' in msg), expect_xml=False) writer.write_run_thread(thread_id) writer.finished_ok = True def test_evaluate_errors(case_setup): with case_setup.test_file('_debugger_case7.py') as writer: writer.write_add_breakpoint(4, 'Call') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() thread_id = hit.thread_id frame_id = hit.frame_id writer.write_evaluate_expression('%s\t%s\t%s' % (thread_id, frame_id, 'LOCAL'), 'name_error') writer.wait_for_evaluation('<var name="name_error" type="NameError"') writer.write_run_thread(thread_id) writer.finished_ok = True def test_list_threads(case_setup): with case_setup.test_file('_debugger_case7.py') as writer: writer.write_add_breakpoint(4, 'Call') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() thread_id = hit.thread_id frame_id = hit.frame_id seq = writer.write_list_threads() msg = writer.wait_for_list_threads(seq) assert msg.thread['name'] == 'MainThread' assert msg.thread['id'].startswith('pid') writer.write_run_thread(thread_id) writer.finished_ok = True def test_case_print(case_setup): with case_setup.test_file('_debugger_case_print.py') as writer: writer.write_add_breakpoint(1, 'None') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() thread_id = hit.thread_id frame_id = hit.frame_id writer.write_run_thread(thread_id) writer.finished_ok = True @pytest.mark.skipif(IS_JYTHON, reason='Not working on Jython (needs to be investigated).') def test_case_lamdda(case_setup): with case_setup.test_file('_debugger_case_lamda.py') as writer: writer.write_add_breakpoint(writer.get_line_index_with_content('Break here'), 'None') writer.write_make_initial_run() for _ in range(3): # We'll hit the same breakpoint 3 times. hit = writer.wait_for_breakpoint_hit() writer.write_run_thread(hit.thread_id) writer.finished_ok = True @pytest.mark.skipif(IS_JYTHON, reason='Not working properly on Jython (needs investigation).') def test_case_suspension_policy(case_setup): with case_setup.test_file('_debugger_case_suspend_policy.py') as writer: writer.write_add_breakpoint(25, '', filename=writer.TEST_FILE, hit_condition='', is_logpoint=False, suspend_policy='ALL') writer.write_make_initial_run() thread_ids = [] for i in range(3): writer.log.append('Waiting for thread %s of 3 to stop' % (i + 1,)) # One thread is suspended with a breakpoint hit and the other 2 as thread suspended. hit = writer.wait_for_breakpoint_hit((REASON_STOP_ON_BREAKPOINT, REASON_THREAD_SUSPEND)) thread_ids.append(hit.thread_id) for thread_id in thread_ids: writer.write_run_thread(thread_id) writer.finished_ok = True @pytest.mark.skipif(IS_JYTHON, reason='Flaky on Jython (needs investigation).') def test_case_get_thread_stack(case_setup): with case_setup.test_file('_debugger_case_get_thread_stack.py') as writer: original_ignore_stderr_line = writer._ignore_stderr_line @overrides(writer._ignore_stderr_line) def _ignore_stderr_line(line): if original_ignore_stderr_line(line): return True if IS_JYTHON: for expected in ( "RuntimeWarning: Parent module '_pydev_bundle' not found while handling absolute import", "from java.lang import System"): if expected in line: return True return False writer._ignore_stderr_line = _ignore_stderr_line writer.write_add_breakpoint(18, None) writer.write_make_initial_run() thread_created_msgs = [writer.wait_for_message(CMD_THREAD_CREATE)] thread_created_msgs.append(writer.wait_for_message(CMD_THREAD_CREATE)) thread_id_to_name = {} for msg in thread_created_msgs: thread_id_to_name[msg.thread['id']] = msg.thread['name'] assert len(thread_id_to_name) == 2 hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT) assert hit.thread_id in thread_id_to_name for request_thread_id in thread_id_to_name: writer.write_get_thread_stack(request_thread_id) msg = writer.wait_for_get_thread_stack_message() files = [frame['file'] for frame in msg.thread.frame] assert msg.thread['id'] == request_thread_id if not files[0].endswith('_debugger_case_get_thread_stack.py'): raise AssertionError('Expected to find _debugger_case_get_thread_stack.py in files[0]. Found: %s' % ('\n'.join(files),)) if ([filename for filename in files if filename.endswith('pydevd.py')]): raise AssertionError('Did not expect to find pydevd.py. Found: %s' % ('\n'.join(files),)) if request_thread_id == hit.thread_id: assert len(msg.thread.frame) == 0 # In main thread (must have no back frames). assert msg.thread.frame['name'] == '<module>' else: assert len(msg.thread.frame) > 1 # Stopped in threading (must have back frames). assert msg.thread.frame[0]['name'] == 'method' writer.write_run_thread(hit.thread_id) writer.finished_ok = True def test_case_dump_threads_to_stderr(case_setup): from pydev_tests_python.debugger_unittest import wait_for_condition def additional_output_checks(writer, stdout, stderr): assert is_stderr_ok(stderr), make_error_msg(stderr) def make_error_msg(stderr): return 'Did not find thread dump in stderr. stderr:\n%s' % (stderr,) def is_stderr_ok(stderr): return 'Thread Dump' in stderr and 'Thread pydevd.CommandThread (daemon: True, pydevd thread: True)' in stderr with case_setup.test_file( '_debugger_case_get_thread_stack.py', additional_output_checks=additional_output_checks) as writer: writer.write_add_breakpoint(12, None) writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT) writer.write_dump_threads() wait_for_condition( lambda: is_stderr_ok(writer.get_stderr()), lambda: make_error_msg(writer.get_stderr()) ) writer.write_run_thread(hit.thread_id) writer.finished_ok = True def test_stop_on_start_regular(case_setup): with case_setup.test_file('_debugger_case_simple_calls.py') as writer: writer.write_stop_on_start() writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit(REASON_STEP_INTO_MY_CODE, file='_debugger_case_simple_calls.py', line=1) writer.write_run_thread(hit.thread_id) writer.finished_ok = True def _get_breakpoint_cases(): if sys.version_info >= (3, 7): # Just check breakpoint() return ('_debugger_case_breakpoint.py',) else: # Check breakpoint() and sys.__breakpointhook__ replacement. return ('_debugger_case_breakpoint.py', '_debugger_case_breakpoint2.py') @pytest.mark.parametrize("filename", _get_breakpoint_cases()) @pytest.mark.skipif(not IS_PY37_OR_GREATER, reason="Supported only in Python 3.7") def test_py_37_breakpoint(case_setup, filename): with case_setup.test_file(filename) as writer: writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit(file=filename, line=3) writer.write_run_thread(hit.thread_id) writer.finished_ok = True def _get_generator_cases(): if IS_PY2: return ('_debugger_case_generator_py2.py',) else: # On py3 we should check both versions. return ('_debugger_case_generator_py2.py', '_debugger_case_generator_py3.py') @pytest.mark.parametrize("filename", _get_generator_cases()) def test_generator_cases(case_setup, filename): with case_setup.test_file(filename) as writer: writer.write_add_breakpoint(writer.get_line_index_with_content('break here')) writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() writer.write_run_thread(hit.thread_id) writer.finished_ok = True def test_stop_on_start_m_switch(case_setup_m_switch): with case_setup_m_switch.test_file() as writer: writer.write_stop_on_start() writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit(REASON_STEP_INTO_MY_CODE, file='_debugger_case_m_switch.py', line=1) writer.write_run_thread(hit.thread_id) writer.finished_ok = True def test_stop_on_start_entry_point(case_setup_m_switch_entry_point): with case_setup_m_switch_entry_point.test_file() as writer: writer.write_stop_on_start() writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit(REASON_STEP_INTO_MY_CODE, file='_debugger_case_module_entry_point.py', line=1) writer.write_run_thread(hit.thread_id) writer.finished_ok = True @pytest.mark.skipif(IS_JYTHON, reason='Not working properly on Jython (needs investigation).') def test_debug_zip_files(case_setup, tmpdir): def get_environ(writer): env = os.environ.copy() curr_pythonpath = env.get('PYTHONPATH', '') curr_pythonpath = str(tmpdir.join('myzip.zip')) + os.pathsep + curr_pythonpath curr_pythonpath = str(tmpdir.join('myzip2.egg!')) + os.pathsep + curr_pythonpath env['PYTHONPATH'] = curr_pythonpath env["IDE_PROJECT_ROOTS"] = str(tmpdir.join('myzip.zip')) return env import zipfile zip_file = zipfile.ZipFile( str(tmpdir.join('myzip.zip')), 'w') zip_file.writestr('zipped/__init__.py', '') zip_file.writestr('zipped/zipped_contents.py', 'def call_in_zip():\n return 1') zip_file.close() zip_file = zipfile.ZipFile( str(tmpdir.join('myzip2.egg!')), 'w') zip_file.writestr('zipped2/__init__.py', '') zip_file.writestr('zipped2/zipped_contents2.py', 'def call_in_zip2():\n return 1') zip_file.close() with case_setup.test_file('_debugger_case_zip_files.py', get_environ=get_environ) as writer: writer.write_add_breakpoint( 2, 'None', filename=os.path.join(str(tmpdir.join('myzip.zip')), 'zipped', 'zipped_contents.py') ) writer.write_add_breakpoint( 2, 'None', filename=os.path.join(str(tmpdir.join('myzip2.egg!')), 'zipped2', 'zipped_contents2.py') ) writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() assert hit.name == 'call_in_zip' writer.write_run_thread(hit.thread_id) hit = writer.wait_for_breakpoint_hit() assert hit.name == 'call_in_zip2' writer.write_run_thread(hit.thread_id) writer.finished_ok = True @pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.') def test_multiprocessing(case_setup_multiprocessing): import threading from pydev_tests_python.debugger_unittest import AbstractWriterThread with case_setup_multiprocessing.test_file('_debugger_case_multiprocessing.py') as writer: break1_line = writer.get_line_index_with_content('break 1 here') break2_line = writer.get_line_index_with_content('break 2 here') writer.write_add_breakpoint(break1_line) writer.write_add_breakpoint(break2_line) server_socket = writer.server_socket class SecondaryProcessWriterThread(AbstractWriterThread): TEST_FILE = writer.get_main_filename() _sequence = -1 class SecondaryProcessThreadCommunication(threading.Thread): def run(self): from pydev_tests_python.debugger_unittest import ReaderThread server_socket.listen(1) self.server_socket = server_socket new_sock, addr = server_socket.accept() reader_thread = ReaderThread(new_sock) reader_thread.start() writer2 = SecondaryProcessWriterThread() writer2.reader_thread = reader_thread writer2.sock = new_sock writer2.write_version() writer2.write_add_breakpoint(break1_line) writer2.write_add_breakpoint(break2_line) writer2.write_make_initial_run() hit = writer2.wait_for_breakpoint_hit() writer2.write_run_thread(hit.thread_id) secondary_process_thread_communication = SecondaryProcessThreadCommunication() secondary_process_thread_communication.start() writer.write_make_initial_run() hit2 = writer.wait_for_breakpoint_hit() secondary_process_thread_communication.join(10) if secondary_process_thread_communication.isAlive(): raise AssertionError('The SecondaryProcessThreadCommunication did not finish') writer.write_run_thread(hit2.thread_id) writer.finished_ok = True @pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.') def test_remote_debugger_basic(case_setup_remote): with case_setup_remote.test_file('_debugger_case_remote.py') as writer: writer.log.append('making initial run') writer.write_make_initial_run() writer.log.append('waiting for breakpoint hit') hit = writer.wait_for_breakpoint_hit() writer.log.append('run thread') writer.write_run_thread(hit.thread_id) writer.log.append('asserting') try: assert 5 == writer._sequence, 'Expected 5. Had: %s' % writer._sequence except: writer.log.append('assert failed!') raise writer.log.append('asserted') writer.finished_ok = True @pytest.mark.skipif(not IS_CPYTHON or not IS_PY37_OR_GREATER, reason='CPython only test.') def test_py_37_breakpoint_remote(case_setup_remote): with case_setup_remote.test_file('_debugger_case_breakpoint_remote.py') as writer: writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit( filename='_debugger_case_breakpoint_remote.py', line=13, ) writer.write_run_thread(hit.thread_id) try: assert 5 == writer._sequence, 'Expected 5. Had: %s' % writer._sequence except: writer.log.append('assert failed!') raise writer.log.append('asserted') writer.finished_ok = True @pytest.mark.skipif(not IS_CPYTHON or not IS_PY37_OR_GREATER, reason='CPython only test.') def test_py_37_breakpoint_remote_no_import(case_setup_remote): def get_environ(writer): env = os.environ.copy() curr_pythonpath = env.get('PYTHONPATH', '') pydevd_dirname = os.path.join( os.path.dirname(writer.get_pydevd_file()), 'pydev_sitecustomize') curr_pythonpath = pydevd_dirname + os.pathsep + curr_pythonpath env['PYTHONPATH'] = curr_pythonpath return env with case_setup_remote.test_file( '_debugger_case_breakpoint_remote_no_import.py', get_environ=get_environ) as writer: writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit( "108", filename='_debugger_case_breakpoint_remote_no_import.py', line=12, ) writer.write_run_thread(hit.thread_id) try: assert 5 == writer._sequence, 'Expected 5. Had: %s' % writer._sequence except: writer.log.append('assert failed!') raise writer.log.append('asserted') writer.finished_ok = True @pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.') def test_remote_debugger_multi_proc(case_setup_remote): class _SecondaryMultiProcProcessWriterThread(debugger_unittest.AbstractWriterThread): FORCE_KILL_PROCESS_WHEN_FINISHED_OK = True def __init__(self, server_socket): debugger_unittest.AbstractWriterThread.__init__(self) self.server_socket = server_socket def run(self): print('waiting for second process') self.sock, addr = self.server_socket.accept() print('accepted second process') from pydev_tests_python.debugger_unittest import ReaderThread self.reader_thread = ReaderThread(self.sock) self.reader_thread.start() self._sequence = -1 # initial command is always the version self.write_version() self.log.append('start_socket') self.write_make_initial_run() time.sleep(.5) self.finished_ok = True def do_kill(writer): debugger_unittest.AbstractWriterThread.do_kill(writer) if hasattr(writer, 'secondary_multi_proc_process_writer'): writer.secondary_multi_proc_process_writer.do_kill() with case_setup_remote.test_file( '_debugger_case_remote_1.py', do_kill=do_kill, EXPECTED_RETURNCODE='any' ) as writer: # It seems sometimes it becomes flaky on the ci because the process outlives the writer thread... # As we're only interested in knowing if a second connection was received, just kill the related # process. assert hasattr(writer, 'FORCE_KILL_PROCESS_WHEN_FINISHED_OK') writer.FORCE_KILL_PROCESS_WHEN_FINISHED_OK = True writer.log.append('making initial run') writer.write_make_initial_run() writer.log.append('waiting for breakpoint hit') hit = writer.wait_for_breakpoint_hit() writer.secondary_multi_proc_process_writer = secondary_multi_proc_process_writer = \ _SecondaryMultiProcProcessWriterThread(writer.server_socket) secondary_multi_proc_process_writer.start() writer.log.append('run thread') writer.write_run_thread(hit.thread_id) for _i in xrange(400): if secondary_multi_proc_process_writer.finished_ok: break time.sleep(.1) else: writer.log.append('Secondary process not finished ok!') raise AssertionError('Secondary process not finished ok!') writer.log.append('Secondary process finished!') try: assert 5 == writer._sequence, 'Expected 5. Had: %s' % writer._sequence except: writer.log.append('assert failed!') raise writer.log.append('asserted') writer.finished_ok = True @pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.') def test_remote_unhandled_exceptions(case_setup_remote): def check_test_suceeded_msg(writer, stdout, stderr): return 'TEST SUCEEDED' in ''.join(stderr) def additional_output_checks(writer, stdout, stderr): # Don't call super as we have an expected exception assert 'ValueError: TEST SUCEEDED' in stderr with case_setup_remote.test_file( '_debugger_case_remote_unhandled_exceptions.py', additional_output_checks=additional_output_checks, check_test_suceeded_msg=check_test_suceeded_msg, EXPECTED_RETURNCODE=1) as writer: writer.log.append('making initial run') writer.write_make_initial_run() writer.log.append('waiting for breakpoint hit') hit = writer.wait_for_breakpoint_hit() writer.write_add_exception_breakpoint_with_policy('Exception', '0', '1', '0') writer.log.append('run thread') writer.write_run_thread(hit.thread_id) writer.log.append('waiting for uncaught exception') hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION) writer.write_run_thread(hit.thread_id) writer.log.append('finished ok') writer.finished_ok = True def test_trace_dispatch_correct(case_setup): def get_environ(writer): env = os.environ.copy() env['PYDEVD_USE_FRAME_EVAL'] = 'NO' # This test checks trace dispatch (so, disable frame eval). return env with case_setup.test_file('_debugger_case_trace_dispatch.py', get_environ=get_environ) as writer: breakpoint_id = writer.write_add_breakpoint(5, 'method') writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() writer.write_remove_breakpoint(breakpoint_id) writer.write_run_thread(hit.thread_id) writer.finished_ok = True @pytest.mark.skipif(IS_PY26, reason='Failing on Python 2.6 on travis (needs investigation).') def test_case_single_notification_on_step(case_setup): from pydev_tests_python.debugger_unittest import REASON_STEP_INTO with case_setup.test_file('_debugger_case_import_main.py') as writer: writer.write_multi_threads_single_notification(True) writer.write_add_breakpoint(writer.get_line_index_with_content('break here'), '') writer.write_make_initial_run() hit = writer.wait_for_single_notification_as_hit() writer.write_step_in(hit.thread_id) hit = writer.wait_for_single_notification_as_hit(reason=REASON_STEP_INTO) writer.write_step_in(hit.thread_id) hit = writer.wait_for_single_notification_as_hit(reason=REASON_STEP_INTO) writer.write_step_in(hit.thread_id) hit = writer.wait_for_single_notification_as_hit(reason=REASON_STEP_INTO) writer.write_run_thread(hit.thread_id) writer.finished_ok = True def test_return_value(case_setup): with case_setup.test_file('_debugger_case_return_value.py') as writer: writer.write_add_breakpoint(writer.get_line_index_with_content('break here'), '') writer.write_show_return_vars() writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() writer.write_step_over(hit.thread_id) hit = writer.wait_for_breakpoint_hit(REASON_STEP_OVER) writer.write_get_frame(hit.thread_id, hit.frame_id) writer.wait_for_vars([ [ '<var name="method1" type="int" qualifier="%s" value="int: 1" isRetVal="True"' % (builtin_qualifier,), '<var name="method1" type="int" value="int%253A 1" isRetVal="True"', ], ]) writer.write_run_thread(hit.thread_id) writer.finished_ok = True @pytest.mark.skipif(IS_JYTHON, reason='Jython can only have one thread stopped at each time.') @pytest.mark.parametrize('check_single_notification', [True, False]) def test_run_pause_all_threads_single_notification(case_setup, check_single_notification): from pydev_tests_python.debugger_unittest import TimeoutError with case_setup.test_file('_debugger_case_multiple_threads.py') as writer: # : :type writer: AbstractWriterThread writer.write_multi_threads_single_notification(True) writer.write_make_initial_run() main_thread_id = writer.wait_for_new_thread() thread_id1 = writer.wait_for_new_thread() thread_id2 = writer.wait_for_new_thread() # Ok, all threads created, let's wait for the main thread to get to the join. writer.wait_for_thread_join(main_thread_id) writer.write_suspend_thread('*') if check_single_notification: dct = writer.wait_for_json_message(CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION) assert dct['thread_id'] in (thread_id1, thread_id2) assert dct['stop_reason'] == REASON_THREAD_SUSPEND else: # We should have a single thread suspended event for both threads. hit0 = writer.wait_for_breakpoint_hit(REASON_THREAD_SUSPEND) assert hit0.thread_id in (thread_id1, thread_id2) hit1 = writer.wait_for_breakpoint_hit(REASON_THREAD_SUSPEND) assert hit1.thread_id in (thread_id1, thread_id2) with pytest.raises(TimeoutError): # The main thread should not receive a hit as it's effectively deadlocked until other # threads finish. writer.wait_for_breakpoint_hit(REASON_THREAD_SUSPEND, timeout=1) # Doing a step in in one thread, when paused should notify on both threads. writer.write_step_over(thread_id1) if check_single_notification: dct = writer.wait_for_json_message(CMD_THREAD_RESUME_SINGLE_NOTIFICATION) # Note: prefer wait_for_single_notification_as_hit assert dct['thread_id'] == thread_id1 dct = writer.wait_for_json_message(CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION) # Note: prefer wait_for_single_notification_as_hit assert dct['thread_id'] == thread_id1 assert dct['stop_reason'] == REASON_STEP_OVER hit = writer.get_current_stack_hit(thread_id1) else: hit = writer.wait_for_breakpoint_hit(CMD_STEP_OVER) writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'stop_loop()') writer.wait_for_evaluation('<var name="stop_loop()" type="str" qualifier="{0}" value="str: stopped_loop'.format(builtin_qualifier)) writer.write_run_thread('*') writer.finished_ok = True def scenario_uncaught(writer): hit = writer.wait_for_breakpoint_hit() writer.write_add_exception_breakpoint_with_policy('ValueError', '0', '1', '0') writer.write_run_thread(hit.thread_id) hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION) writer.write_run_thread(hit.thread_id) def scenario_caught(writer): hit = writer.wait_for_breakpoint_hit() writer.write_add_exception_breakpoint_with_policy('ValueError', '1', '0', '0') writer.write_run_thread(hit.thread_id) for _ in range(2): hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION) writer.write_run_thread(hit.thread_id) # Note: the one in the top-level will be hit once as caught (but not another time # in postmortem mode). hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION) writer.write_run_thread(hit.thread_id) def scenario_caught_and_uncaught(writer): hit = writer.wait_for_breakpoint_hit() writer.write_add_exception_breakpoint_with_policy('ValueError', '1', '1', '0') writer.write_run_thread(hit.thread_id) for _ in range(2): hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION) writer.write_run_thread(hit.thread_id) # Note: the one in the top-level will be hit once as caught and another in postmortem mode. hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION) writer.write_run_thread(hit.thread_id) hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION) writer.write_run_thread(hit.thread_id) @pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.') @pytest.mark.parametrize( 'check_scenario', [ scenario_uncaught, scenario_caught, scenario_caught_and_uncaught, ] ) def test_top_level_exceptions_on_attach(case_setup_remote, check_scenario): def check_test_suceeded_msg(writer, stdout, stderr): return 'TEST SUCEEDED' in ''.join(stderr) def additional_output_checks(writer, stdout, stderr): # Don't call super as we have an expected exception assert 'ValueError: TEST SUCEEDED' in stderr with case_setup_remote.test_file( '_debugger_case_remote_unhandled_exceptions2.py', additional_output_checks=additional_output_checks, check_test_suceeded_msg=check_test_suceeded_msg, EXPECTED_RETURNCODE=1) as writer: writer.log.append('making initial run') writer.write_make_initial_run() check_scenario(writer) writer.log.append('finished ok') writer.finished_ok = True _GENERATOR_FILES = [ '_debugger_case_generator3.py', ] if not IS_PY2: _GENERATOR_FILES.append('_debugger_case_generator.py') _GENERATOR_FILES.append('_debugger_case_generator2.py') @pytest.mark.parametrize('target_filename', _GENERATOR_FILES) @pytest.mark.skipif(IS_JYTHON, reason='We do not detect generator returns on Jython.') def test_generator_step_over_basic(case_setup, target_filename): with case_setup.test_file(target_filename) as writer: line = writer.get_line_index_with_content('break here') writer.write_add_breakpoint(line) writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() # Note: not using for so that we know which step failed in the ci if it fails. writer.write_step_over(hit.thread_id) hit = writer.wait_for_breakpoint_hit( reason=REASON_STEP_OVER, file=target_filename, line=writer.get_line_index_with_content('step 1') ) writer.write_step_over(hit.thread_id) hit = writer.wait_for_breakpoint_hit( reason=REASON_STEP_OVER, file=target_filename, line=writer.get_line_index_with_content('step 2') ) if IS_PY38_OR_GREATER and target_filename == '_debugger_case_generator2.py': # On py 3.8 it goes back to the return line. writer.write_step_over(hit.thread_id) hit = writer.wait_for_breakpoint_hit( reason=REASON_STEP_OVER, file=target_filename, line=writer.get_line_index_with_content('return \\') ) writer.write_step_over(hit.thread_id) hit = writer.wait_for_breakpoint_hit( reason=REASON_STEP_OVER, file=target_filename, line=writer.get_line_index_with_content('generator return') ) writer.write_step_over(hit.thread_id) hit = writer.wait_for_breakpoint_hit( reason=REASON_STEP_OVER, file=target_filename, line=writer.get_line_index_with_content('step 3') ) writer.write_run_thread(hit.thread_id) writer.finished_ok = True @pytest.mark.skipif(not IS_CPYTHON or not IS_PY36_OR_GREATER, reason='Only CPython 3.6 onwards') def test_asyncio_step_over_basic(case_setup): target_filename = '_debugger_case_asyncio.py' with case_setup.test_file(target_filename) as writer: line = writer.get_line_index_with_content('break main') writer.write_add_breakpoint(line) writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() writer.write_step_over(hit.thread_id) hit = writer.wait_for_breakpoint_hit( reason=REASON_STOP_ON_BREAKPOINT, file=target_filename, line=writer.get_line_index_with_content('break main') ) writer.write_step_over(hit.thread_id) hit = writer.wait_for_breakpoint_hit( reason=REASON_STEP_OVER, file=target_filename, line=writer.get_line_index_with_content('step main') ) writer.write_run_thread(hit.thread_id) writer.finished_ok = True @pytest.mark.skipif(not IS_CPYTHON or not IS_PY36_OR_GREATER, reason='Only CPython 3.6 onwards') def test_asyncio_step_over_end_of_function(case_setup): target_filename = '_debugger_case_asyncio.py' with case_setup.test_file(target_filename) as writer: line = writer.get_line_index_with_content('break count 2') writer.write_add_breakpoint(line) writer.write_make_initial_run() hit = writer.wait_for_breakpoint_hit() writer.write_step_over(hit.thread_id) hit = writer.wait_for_breakpoint_hit( reason=REASON_STEP_OVER, file=target_filename, line=writer.get_line_index_with_content('break main') ) writer.write_step_over(hit.thread_id) hit = writer.wait_for_breakpoint_hit( reason=REASON_STEP_OVER, file=target_filename, line=writer.get_line_index_with_content('step main') ) writer.write_run_thread(hit.thread_id) writer.finished_ok = True # Jython needs some vars to be set locally. # set JAVA_HOME=c:\bin\jdk1.8.0_172 # set PATH=%PATH%;C:\bin\jython2.7.0\bin # set PATH=%PATH%;%JAVA_HOME%\bin # c:\bin\jython2.7.0\bin\jython.exe -m py.test tests_python if __name__ == '__main__': pytest.main(['-k', 'test_unhandled_exceptions_in_top_level2'])
39.985261
1,486
0.672725
14,298
105,801
4.671842
0.063995
0.064223
0.04048
0.041019
0.781505
0.74357
0.703165
0.670649
0.634555
0.590093
0
0.034629
0.225111
105,801
2,645
1,487
40.000378
0.78014
0.070245
0
0.585581
0
0.020363
0.199401
0.039118
0
0
0
0
0.065493
1
0.068795
false
0.001651
0.019813
0.002752
0.113924
0.007155
0
0
0
null
0
0
0
0
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
13d1292f8171296bf377ca61ac6b442c37b078e2
676
py
Python
kinopoisk_unofficial/response/films/film_sequels_and_prequels_response.py
masterWeber/kinopoisk-api-unofficial-client
5c95e1ec6e43bd302399b63a1525ee7e61724155
[ "MIT" ]
2
2021-11-13T12:23:41.000Z
2021-12-24T14:09:49.000Z
kinopoisk_unofficial/response/films/film_sequels_and_prequels_response.py
masterWeber/kinopoisk-api-unofficial-client
5c95e1ec6e43bd302399b63a1525ee7e61724155
[ "MIT" ]
1
2022-03-29T19:13:24.000Z
2022-03-30T18:57:23.000Z
kinopoisk_unofficial/response/films/film_sequels_and_prequels_response.py
masterWeber/kinopoisk-api-unofficial-client
5c95e1ec6e43bd302399b63a1525ee7e61724155
[ "MIT" ]
1
2021-11-13T12:30:01.000Z
2021-11-13T12:30:01.000Z
from dataclasses import dataclass, field from typing import List from apischema import serializer, deserializer from kinopoisk_unofficial.contract.response import Response from kinopoisk_unofficial.model.film_sequels_and_prequels import FilmSequelOrPrequel @dataclass(frozen=True) class FilmSequelsAndPrequelsResponse(Response): items: List[FilmSequelOrPrequel] = field(default_factory=list) @serializer def unwrap_film(response: FilmSequelsAndPrequelsResponse) -> List[FilmSequelOrPrequel]: return response.items @deserializer def wrap_film(items: List[FilmSequelOrPrequel]) -> FilmSequelsAndPrequelsResponse: return FilmSequelsAndPrequelsResponse(items)
29.391304
87
0.843195
66
676
8.515152
0.469697
0.122776
0.081851
0
0
0
0
0
0
0
0
0
0.096154
676
22
88
30.727273
0.919804
0
0
0
0
0
0
0
0
0
0
0
0
1
0.142857
false
0
0.357143
0.142857
0.785714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
3
13dc69450e0418cc1dc8e0067cbe0eef9f450955
313
py
Python
tutorials/W2D5_GenerativeModels/solutions/W2D5_Tutorial1_Solution_f63c0e9f.py
justynaekert/course-content-dl
aa64d9feb1ae92ad4b7afaf13b13616b3a020c20
[ "CC-BY-4.0", "BSD-3-Clause" ]
473
2021-04-13T18:27:42.000Z
2022-03-28T14:14:35.000Z
tutorials/W2D5_GenerativeModels/solutions/W2D5_Tutorial1_Solution_f63c0e9f.py
justynaekert/course-content-dl
aa64d9feb1ae92ad4b7afaf13b13616b3a020c20
[ "CC-BY-4.0", "BSD-3-Clause" ]
399
2021-06-07T20:56:59.000Z
2022-01-26T23:05:06.000Z
tutorials/W2D5_GenerativeModels/solutions/W2D5_Tutorial1_Solution_f63c0e9f.py
justynaekert/course-content-dl
aa64d9feb1ae92ad4b7afaf13b13616b3a020c20
[ "CC-BY-4.0", "BSD-3-Clause" ]
170
2021-04-16T11:09:32.000Z
2022-03-31T12:13:52.000Z
""" An Autoencoder accepts input, compresses it, and recreates it. On the other hand, VAEs assume that the source data has some underlying distribution and attempts to find the distribution parameters. So, VAEs are similar to GANs (but note that GANs work differently, as we will see in the next tutorials). """;
44.714286
81
0.779553
50
313
4.88
0.8
0
0
0
0
0
0
0
0
0
0
0
0.166134
313
7
82
44.714286
0.934866
0.968051
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
13eac9e0b1176a2ae3a1b3f7c522f3f14fcefa3d
4,326
py
Python
python/ambassador/fetch/k8sobject.py
yawboateng/emissary
2a0c208e25dfbe3b27839c0044c21cde8979e214
[ "Apache-2.0" ]
null
null
null
python/ambassador/fetch/k8sobject.py
yawboateng/emissary
2a0c208e25dfbe3b27839c0044c21cde8979e214
[ "Apache-2.0" ]
null
null
null
python/ambassador/fetch/k8sobject.py
yawboateng/emissary
2a0c208e25dfbe3b27839c0044c21cde8979e214
[ "Apache-2.0" ]
1
2021-03-04T10:23:33.000Z
2021-03-04T10:23:33.000Z
from __future__ import annotations from typing import Any, Dict, Iterator, Optional import collections.abc import dataclasses import enum from ..config import Config @dataclasses.dataclass(frozen=True) class KubernetesGVK: """ Represents a Kubernetes resource type (API group, version and kind). """ api_version: str kind: str @property def api_group(self) -> Optional[str]: # These are backward-indexed to support apiVersion: v1, which has a # version but no group. try: return self.api_version.split('/', 1)[-2] except IndexError: return None @property def version(self) -> str: return self.api_version.split('/', 1)[-1] @property def domain(self) -> str: if self.api_group: return f'{self.kind.lower()}.{self.api_group}' else: return self.kind.lower() @classmethod def for_ambassador(cls, kind: str, version: str = 'v2') -> KubernetesGVK: if 'alpha' in version: return cls(f'x.getambassador.io/{version}', kind) else: return cls(f'getambassador.io/{version}', kind) @classmethod def for_knative_networking(cls, kind: str) -> KubernetesGVK: return cls('networking.internal.knative.dev/v1alpha1', kind) @enum.unique class KubernetesObjectScope (enum.Enum): CLUSTER = enum.auto() NAMESPACE = enum.auto() @dataclasses.dataclass(frozen=True) class KubernetesObjectKey: """ Represents a single Kubernetes resource by kind and name. """ gvk: KubernetesGVK namespace: Optional[str] name: str @property def kind(self) -> str: return self.gvk.kind @property def scope(self) -> KubernetesObjectScope: return KubernetesObjectScope.CLUSTER if self.namespace is None else KubernetesObjectScope.NAMESPACE @classmethod def from_object_reference(cls, ref: Dict[str, Any]) -> KubernetesObjectKey: return cls(KubernetesGVK('v1', ref['kind']), ref.get('namespace'), ref['name']) class KubernetesObject (collections.abc.Mapping): """ Represents a raw object from Kubernetes. """ def __init__(self, delegate: Dict[str, Any]) -> None: self.delegate = delegate try: self.gvk self.name except KeyError: raise ValueError('delegate is not a valid Kubernetes object') def __getitem__(self, key: str) -> Any: return self.delegate[key] def __iter__(self) -> Iterator[str]: return iter(self.delegate) def __len__(self) -> int: return len(self.delegate) @property def gvk(self) -> KubernetesGVK: return KubernetesGVK(self['apiVersion'], self['kind']) @property def kind(self) -> str: return self.gvk.kind @property def metadata(self) -> Dict[str, Any]: return self['metadata'] @property def namespace(self) -> str: val = self.metadata.get('namespace') if val == '_automatic_': val = Config.ambassador_namespace elif val is None: raise AttributeError(f'{self.__class__.__name__} {self.gvk.domain} {self.name} has no namespace (it is cluster-scoped)') return val @property def name(self) -> str: return self.metadata['name'] @property def key(self) -> KubernetesObjectKey: try: namespace: Optional[str] = self.namespace except AttributeError: namespace = None return KubernetesObjectKey(self.gvk, namespace, self.name) @property def scope(self) -> KubernetesObjectScope: return self.key.scope @property def generation(self) -> int: return self.metadata.get('generation', 1) @property def annotations(self) -> Dict[str, str]: return self.metadata.get('annotations', {}) @property def ambassador_id(self) -> str: return self.annotations.get('getambassador.io/ambassador-id', 'default') @property def labels(self) -> Dict[str, str]: return self.metadata.get('labels', {}) @property def spec(self) -> Dict[str, Any]: return self.get('spec', {}) @property def status(self) -> Dict[str, Any]: return self.get('status', {})
26.060241
132
0.625058
488
4,326
5.452869
0.239754
0.074408
0.034198
0.031943
0.170237
0.143931
0.084179
0.063886
0.03758
0.03758
0
0.003116
0.258206
4,326
165
133
26.218182
0.826114
0.059177
0
0.298246
0
0.008772
0.102908
0.045986
0
0
0
0
0
1
0.219298
false
0
0.052632
0.166667
0.605263
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
b91f5d29b76fad2ebbbc5065da0b638268b98c73
40,635
py
Python
nnef_tests/conversion/tf_pb_layer_test_cases.py
benmbark/NNEF-Tools
f9bcb3e043474d47f6a8a552abcc6d1069476072
[ "Apache-2.0" ]
1
2019-08-28T11:58:52.000Z
2019-08-28T11:58:52.000Z
nnef_tests/conversion/tf_pb_layer_test_cases.py
Acidburn0zzz/NNEF-Tools
f9bcb3e043474d47f6a8a552abcc6d1069476072
[ "Apache-2.0" ]
null
null
null
nnef_tests/conversion/tf_pb_layer_test_cases.py
Acidburn0zzz/NNEF-Tools
f9bcb3e043474d47f6a8a552abcc6d1069476072
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2018 The Khronos Group Inc. # Copyright (c) 2018 Au-Zone Technologies Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import import os import sys import unittest import numpy as np import tensorflow as tf from nnef_tests.conversion.tf_pb_test_runner import TFPbTestRunner if not os.path.exists('nnef_tools') and os.path.exists('../../nnef_tools'): os.chdir('../..') # Activations Testing class TestActivations(TFPbTestRunner): def __init__(self, *args, **kwargs): super(TestActivations, self).__init__(*args, **kwargs) self.name = "testActivations/" # ActivationsSigmoid Test def sigmoid_network(self, x): z1 = tf.sigmoid(x, name="z1") return z1 def test_sigmoid(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + "/" x = tf.placeholder(tf.float32, shape=[None, 1, 1, 1], name='x') self.sigmoid_network(x) self._test_layer(output_name, "z1") # Activations_nn_Sigmoid Test def nn_sigmoid_network(self, x): z1 = tf.nn.sigmoid(x, name="z1") return z1 def test_nn_sigmoid(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 1, 1, 1], name='x') self.sigmoid_network(x) self._test_layer(output_name, "z1") # ActivationsTanh Test def tanh_network(self, x): z1 = tf.tanh(x, name="z1") return z1 def test_tanh(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 1, 1, 1], name='x') self.tanh_network(x) self._test_layer(output_name, "z1") # Activations_nn_Tanh Test def nn_tanh_network(self, x): z1 = tf.nn.tanh(x, name="z1") return z1 def test_nn_tanh(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 1, 1, 1], name='x') self.nn_tanh_network(x) self._test_layer(output_name, "z1") # Activations_nn_Elu Test def nn_elu_network(self, x): z1 = tf.nn.elu(x, name="z1") return z1 def test_nn_elu(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 1, 1, 1], name='x') self.nn_elu_network(x) self._test_layer(output_name, "z1") # Activations_nn_Relu Test def nn_relu_network(self, x): z1 = tf.nn.relu(x, name="z1") return z1 def test_nn_relu(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 1, 1, 1], name='x') self.nn_relu_network(x) self._test_layer(output_name, "z1") class TestBasicMath(TFPbTestRunner): def __init__(self, *args, **kwargs): super(TestBasicMath, self).__init__(*args, **kwargs) self.name = "testBasicMath/" # BasicMathAdd Test def add_network(self, x): y = tf.constant([1, 2], dtype=tf.float32, name="y") z1 = tf.add(x, y, name="z1") return z1 def test_add(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 2, 2, 2], name='x') self.add_network(x) self._test_layer(output_name, "z1") # BasicMathSub Test def sub_network(self, x): y = tf.constant([-1], dtype=tf.float32, name="y") z1 = tf.subtract(x, y, name="z1") return z1 def test_sub(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 2, 2, 2], name='x') self.sub_network(x) self._test_layer(output_name, "z1") # BasicMathMult Test def multiply_network(self, x): y = tf.constant([-1, 2], dtype=tf.float32, name="y") z1 = tf.multiply(x, y, name="z1") return z1 def test_multiply(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 2, 2, 2], name='x') self.multiply_network(x) self._test_layer(output_name, "z1") # BasicMathDiv Test def div_network(self, x): y = tf.constant([[-1], [-2], [-3], [-4]], dtype=tf.float32, name="y") z1 = tf.div(x, y, name="z1") return z1 def test_div(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 1], name='x') self.div_network(x) self._test_layer(output_name, "z1") # BasicMathMatMul Test def matmul_network(self, x): y = tf.constant([[-1], [-2], [-3], [-4]], dtype=tf.float32, name="y") z1 = tf.matmul(x, y, transpose_b=True, name="z1") z2 = tf.matmul(x, y, transpose_a=True, name="z2") return z1, z2 def test_matmul(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[4, 1], name='x') self.matmul_network(x) self._test_layer(output_name, "z1,z2") # BasicMathNeg Test def neg_network(self, x): z1 = tf.negative(x, name="z1") return z1 def test_neg(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 1], name='x') self.neg_network(x) self._test_layer(output_name, "z1") # BasicMathBiasAdd Test def bias_add_network(self, x): z1 = tf.nn.bias_add(x, tf.constant([1.0]), name="z1") return z1 def test_bias_add(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 1], name='x') self.bias_add_network(x) self._test_layer(output_name, "z1") # BasicMathSin Test def sin_network(self, x): z1 = tf.sin(x, name="z1") return z1 def test_sin(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 1], name='x') self.sin_network(x) self._test_layer(output_name, "z1") # BasicMathCos Test def cos_network(self, x): z1 = tf.cos(x, name="z1") return z1 def test_cos(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 1], name='x') self.cos_network(x) self._test_layer(output_name, "z1") class TestComparisons(TFPbTestRunner): def __init__(self, *args, **kwargs): super(TestComparisons, self).__init__(*args, **kwargs) self.name = "testComparisons/" # Comparisons_Greater Test def greater_network(self, x): y = tf.constant([[1], [2], [3], [4]], dtype=tf.float32, name="y") cond = tf.greater(x, y, name="cond") z1 = tf.where(cond, x, y, name="z1") return z1 def test_greater(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[4, 1], name='x') self.greater_network(x) self._test_layer(output_name, "z1") # Comparisons_Greater_Equal Test def greater_equal_network(self, x): y = tf.constant([[1], [2], [3], [4]], dtype=tf.float32, name="y") cond = tf.greater_equal(x, y, name="cond") z1 = tf.where(cond, x, y, name="z1") return z1 def test_greater_equal(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[4, 1], name='x') self.greater_equal_network(x) self._test_layer(output_name, "z1") # Comparisons_Less Test def less_network(self, x): y = tf.constant([[1], [2], [3], [4]], dtype=tf.float32, name="y") cond = tf.less(x, y, name="cond") z1 = tf.where(cond, x, y, name="z1") return z1 def test_less(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[4, 1], name='x') self.less_network(x) self._test_layer(output_name, "z1") # Comparisons_Less_Equal Test def less_equal_network(self, x): y = tf.constant([[1], [2], [3], [4]], dtype=tf.float32, name="y") cond = tf.less_equal(x, y, name="cond") z1 = tf.where(cond, x, y, name="z1") return z1 def test_less_equal(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[4, 1], name='x') self.less_equal_network(x) self._test_layer(output_name, "z1") # Comparisons_Equal Test def equal_network(self, x): y = tf.constant([[1], [2], [3], [4]], dtype=tf.float32, name="y") cond = tf.equal(x, y, name="cond") z1 = tf.where(cond, x, y, name="z1") return z1 def test_equal(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[4, 1], name='x') self.equal_network(x) self._test_layer(output_name, "z1") # Comparisons_Not_Equal Test def not_equal_network(self, x): y = tf.constant([[1], [2], [3], [4]], dtype=tf.float32, name="y") cond = tf.not_equal(x, y, name="cond") z1 = tf.where(cond, x, y, name="z1") return z1 def test_not_equal(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[4, 1], name='x') self.not_equal_network(x) self._test_layer(output_name, "z1") class TestConvolutions(TFPbTestRunner): def __init__(self, *args, **kwargs): super(TestConvolutions, self).__init__(*args, **kwargs) self.name = "testConvolutions/" # Convolutions_nn_conv1d Test def nn_conv1d_network(self, x): y = tf.constant(np.random.rand(2, 1, 5) * 2 - 1, dtype=tf.float32, name="y") z1 = tf.nn.conv1d(x, y, 2, 'SAME', name="z1") return z1 @unittest.skip("Reshape removes knowledge of data format") def test_nn_conv1d(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 1], name='x') self.nn_conv1d_network(x) self._test_layer(output_name, "z1/Squeeze") # Convolutions_nn_conv2d Test def nn_conv2d_network(self, x): y = tf.constant(np.ones([3, 3, 1, 4]) * 2 - 1, dtype=tf.float32, name="y") z1 = tf.nn.conv2d(x, y, [1, 2, 2, 1], 'VALID', name="z1") z2 = tf.nn.conv2d(x, y, strides=[1, 1, 1, 1], padding='SAME', dilations=[1, 2, 2, 1], name="z2") return z1, z2 def test_nn_conv2d(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 5, 5, 1], name='x') self.nn_conv2d_network(x) self._test_layer(output_name, "z1,z2") # Convolutions_nn_conv3d Test def nn_conv3d_network(self, x): y = tf.constant(np.ones([3, 3, 3, 1, 5]) * 2 - 1, dtype=tf.float32, name="y") z1 = tf.nn.conv3d(x, y, [1, 2, 2, 2, 1], 'VALID', name="z1") return z1 def test_nn_conv3d(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 5, 5, 5, 1], name='x') self.nn_conv3d_network(x) self._test_layer(output_name, "z1") # Convolutions_nn_convolution Test def nn_convolution_network(self, x1): x2 = tf.constant(np.ones([1, 5, 5, 3]), dtype=tf.float32) x3 = tf.constant(np.ones([1, 5, 3]), dtype=tf.float32) y1 = tf.constant(np.ones([3, 3, 3, 3, 6]) * 2 - 1, dtype=tf.float32, name="y1") y2 = tf.constant(np.ones([3, 3, 3, 6]) * 2 - 1, dtype=tf.float32, name="y2") y3 = tf.constant(np.ones([3, 3, 6]) * 2 - 1, dtype=tf.float32, name="y3") z1 = tf.nn.convolution(x1, y1, strides=[2, 2, 2], padding='VALID', name="z1") z2 = tf.nn.convolution(x2, y2, strides=[2, 2], padding='VALID', name="z2") z3 = tf.nn.convolution(x3, y3, strides=[2], padding='VALID', name="z3") return z1, z2, z3 @unittest.skip("Recreates Conv1D, 2D, and 3D, issue with only a single input node, not conversion.") def test_nn_convolution(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 5, 5, 5, 3], name='x') self.nn_convolution_network(x) self._test_layer(output_name, "z1,z2,z3/Squeeze", True) # Convolutions_nn_conv2d_transpose Test def nn_conv2d_transpose_network(self, x): y = tf.constant(np.ones([2, 2, 1, 3]) * 2 - 1, dtype=tf.float32, name="y") z1 = tf.nn.conv2d_transpose(x, y, output_shape=[1, 10, 10, 1], strides=[1, 2, 2, 1], padding='SAME', name="z1") return z1 def test_nn_conv2d_transpose(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 5, 5, 3], name='x') self.nn_conv2d_transpose_network(x) self._test_layer(output_name, "z1") # Convolutions_nn_conv3d_transpose Test def nn_conv3d_transpose_network(self, x): y = tf.constant(np.ones([2, 2, 2, 1, 3]) * 2 - 1, dtype=tf.float32, name="y") z1 = tf.nn.conv3d_transpose(x, y, output_shape=[1, 10, 10, 10, 1], strides=[1, 2, 2, 2, 1], padding='SAME', name="z1") return z1 def test_nn_conv3d_transpose(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 5, 5, 5, 3], name='x') self.nn_conv3d_transpose_network(x) self._test_layer(output_name, "z1") # Convolutions_nn_depthwise_conv2d Test def nn_depthwise_conv2d_network(self, x): y = tf.constant(np.ones([3, 3, 3, 5]) * 2 - 1, dtype=tf.float32, name="y") z1 = tf.nn.depthwise_conv2d(x, y, [1, 2, 2, 1], 'SAME', name="z1") return z1 def test_nn_depthwise_conv2d(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 5, 5, 3], name='x') self.nn_depthwise_conv2d_network(x) self._test_layer(output_name, "z1") # Convolutions_nn_separable_conv2d Test def nn_separable_conv2d_network(self, x): y = tf.constant(np.ones([3, 3, 1, 1]) * 2 - 1, dtype=tf.float32, name="y") c1 = tf.constant(np.ones([1, 1, 1, 1]), dtype=tf.float32, name="c1") z1 = tf.nn.separable_conv2d(x, y, c1, [1, 2, 2, 1], 'SAME', name="z1") return z1 def test_nn_separable_conv2d(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 5, 5, 1], name='x') self.nn_separable_conv2d_network(x) self._test_layer(output_name, "z1") class TestLogicals(TFPbTestRunner): def __init__(self, *args, **kwargs): super(TestLogicals, self).__init__(*args, **kwargs) self.name = "testLogicals/" # Logicals_logical_and Test def logical_and_network(self, x): y = tf.constant([[1], [2], [3], [4]], dtype=tf.float32, name="y") log1 = tf.greater(x, y, name='log1') log2 = tf.less(x, y, name='log2') log_op = tf.logical_and(log1, log2, name="log_op") z1 = tf.where(log_op, x, y, name='z1') return z1 def test_logical_and(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[4, 1], name='x') self.logical_and_network(x) self._test_layer(output_name, "z1") # Logicals_logical_or Test def logical_or_network(self, x): y = tf.constant([[1], [2], [3], [4]], dtype=tf.float32, name="y") log1 = tf.greater(x, y, name='log1') log2 = tf.less(x, y, name='log2') log_op = tf.logical_or(log1, log2, name="log_op") z1 = tf.where(log_op, x, y, name='z1') return z1 def test_logical_or(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[4, 1], name='x') self.logical_or_network(x) self._test_layer(output_name, "z1") # Logicals_logical_not Test def logical_not_network(self, x): y = tf.constant([[1], [2], [3], [4]], dtype=tf.float32, name="y") log1 = tf.greater(x, y, name='log1') log_op = tf.logical_not(log1, name="log_op") z1 = tf.where(log_op, x, y, name='z1') return z1 def test_logical_not(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[4, 1], name='x') self.logical_not_network(x) self._test_layer(output_name, "z1") class TestMathFunc(TFPbTestRunner): def __init__(self, *args, **kwargs): super(TestMathFunc, self).__init__(*args, **kwargs) self.name = "testMathFunc/" # TestMathFunc_pow Test def pow_network(self, x): y = tf.constant([[-1], [-2], [-3], [-4]], dtype=tf.float32, name="y") z1 = tf.pow(x, y, name="z1") return z1 def test_pow(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.pow_network(x) self._test_layer(output_name, "z1") # TestMathFunc_abs Test def abs_network(self, x): z1 = tf.abs(x, name="z1") return z1 def test_abs(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.abs_network(x) self._test_layer(output_name, "z1") # TestMathFunc_sign Test def sign_network(self, x): z1 = tf.sign(x, name="z1") return z1 def test_sign(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.sign_network(x) self._test_layer(output_name, "z1") # TestMathFunc_exp Test def exp_network(self, x): z1 = tf.exp(x, name="z1") return z1 def test_exp(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.exp_network(x) self._test_layer(output_name, "z1") # TestMathFunc_log Test def log_network(self, x): z1 = tf.log(x, name="z1") return z1 def test_log(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.log_network(x) self._test_layer(output_name, "z1") # TestMathFunc_sqrt Test def sqrt_network(self, x): z1 = tf.sqrt(x, name="z1") return z1 def test_sqrt(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.sqrt_network(x) self._test_layer(output_name, "z1") # TestMathFunc_square Test def square_network(self, x): z1 = tf.square(x, name="z1") return z1 def test_square(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.square_network(x) self._test_layer(output_name, "z1") # TestMathFunc_floor Test def floor_network(self, x): z1 = tf.floor(x, name="z1") return z1 def test_floor(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.floor_network(x) self._test_layer(output_name, "z1") # TestMathFunc_ceil Test def ceil_network(self, x): z1 = tf.ceil(x, name="z1") return z1 def test_ceil(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.ceil_network(x) self._test_layer(output_name, "z1") # TestMathFunc_round Test def round_network(self, x): z1 = tf.round(x, name="z1") return z1 def test_round(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.round_network(x) self._test_layer(output_name, "z1") # TestMathFunc_minimum Test def minimum_network(self, x): y = tf.constant([[-1], [-2], [-3], [-4]], dtype=tf.float32, name="y") z1 = tf.minimum(x, y, name="z1") return z1 def test_minimum(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.minimum_network(x) self._test_layer(output_name, "z1") # TestMathFunc_maximum Test def maximum_network(self, x): y = tf.constant([[-1], [-2], [-3], [-4]], dtype=tf.float32, name="y") z1 = tf.maximum(x, y, name="z1") return z1 def test_maximum(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.maximum_network(x) self._test_layer(output_name, "z1") # TestMathFunc_reduce_sum Test def reduce_sum_network(self, x): z1 = tf.reduce_sum(x, name="z1") z2 = tf.reduce_sum(x, axis=[1, 1, 3], name="z2") z3 = tf.reduce_sum(x, axis=[0, 1], keepdims=True, name="z3") z4 = tf.reduce_sum(x, axis=[3], name="z4") return z1, z2, z3, z4 def test_reduce_sum(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 2], name='x') self.reduce_sum_network(x) self._test_layer(output_name, "z1,z2,z3,z4") # TestMathFunc_reduce_any Test def reduce_any_network(self, x): z1 = tf.reduce_any(x, name="z1") z2 = tf.reduce_any(x, axis=[1, 1, 3], name="z2") z3 = tf.reduce_any(x, axis=[0, 1], keepdims=True, name="z3") z4 = tf.reduce_any(x, axis=[3], name="z4") return z1, z2, z3, z4 def test_reduce_any(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.bool, shape=[None, 4, 4, 2], name='x') self.reduce_any_network(x) self._test_layer(output_name, "z1,z2,z3,z4") # TestMathFunc_reduce_all Test def reduce_all_network(self, x): z1 = tf.reduce_all(x, name="z1") z2 = tf.reduce_all(x, axis=[1, 1, 3], name="z2") z3 = tf.reduce_all(x, axis=[0, 1], keepdims=True, name="z3") z4 = tf.reduce_all(x, axis=[3], name="z4") return z1, z2, z3, z4 def test_reduce_all(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.bool, shape=[None, 4, 4, 2], name='x') self.reduce_all_network(x) self._test_layer(output_name, "z1,z2,z3,z4") # TestMathFunc_reduce_mean Test def reduce_mean_network(self, x): z1 = tf.reduce_mean(x, name="z1") z2 = tf.reduce_mean(x, axis=[1, 1, 3], name="z2") z3 = tf.reduce_mean(x, axis=[0, 1], keepdims=True, name="z3") z4 = tf.reduce_mean(x, axis=[3], name="z4") return z1, z2, z3, z4 def test_reduce_mean(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.reduce_mean_network(x) self._test_layer(output_name, "z1,z2,z3,z4") # TestMathFunc_reduce_max Test def reduce_max_network(self, x): z1 = tf.reduce_max(x, name="z1") z2 = tf.reduce_max(x, axis=[1, 1, 3], name="z2") z3 = tf.reduce_max(x, axis=[0, 1], keepdims=True, name="z3") z4 = tf.reduce_max(x, axis=[3], name="z4") return z1, z2, z3, z4 def test_reduce_max(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.reduce_max_network(x) self._test_layer(output_name, "z1,z2,z3,z4") class TestNormalization(TFPbTestRunner): def __init__(self, *args, **kwargs): super(TestNormalization, self).__init__(*args, **kwargs) self.name = "testNormalization/" # Normalization_nn_lrn Test def nn_lrn_network(self, x): z1 = tf.nn.lrn(x, name="z1") z2 = tf.nn.lrn(x, depth_radius=3, bias=2, alpha=2, beta=0.3, name="z2") return z1, z2 def test_nn_lrn(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.nn_lrn_network(x) self._test_layer(output_name, "z1,z2") # Normalization_nn_local_response_normalization Test def nn_local_response_normalization_network(self, x): z1 = tf.nn.local_response_normalization(x, name="z1") z2 = tf.nn.local_response_normalization(x, depth_radius=3, bias=2, alpha=2, beta=0.3, name="z2") return z1, z2 def test_nn_local_response_normalization(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.nn_local_response_normalization_network(x) self._test_layer(output_name, "z1,z2") # Normalization_nn_batch_normalization Test def nn_batch_normalization_network(self, x): y = tf.constant(np.ones([4]) * 10, dtype=tf.float32, name="y") z1 = tf.nn.batch_normalization(x, y, y, y, y, variance_epsilon=0.001, name="z1") return z1 def test_nn_batch_normalization(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.nn_batch_normalization_network(x) self._test_layer(output_name, "z1/add_1") # Normalization_nn_fused_batch_norm Test def nn_fused_batch_norm_network(self, x): y = tf.constant(np.ones([4]) * 10, dtype=tf.float32, name="y") z1 = tf.nn.fused_batch_norm(x, y, y, y, y, is_training=False, name="z1") return z1 def test_nn_fused_batch_norm(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.nn_fused_batch_norm_network(x) self._test_layer(output_name, "z1") # Normalization_nn_fused_batch_norm Test def nn_fused_batch_norm_network2(self, x): a = tf.constant(np.ones([4]) * 10, dtype=tf.float32, name="a") b = tf.constant(np.ones([4]) * 10, dtype=tf.float32, name="b") c = tf.constant(np.ones([4]) * 10, dtype=tf.float32, name="c") d = tf.constant(np.ones([4]) * 10, dtype=tf.float32, name="d") z1 = tf.nn.fused_batch_norm(x, a, b, c, d, is_training=False, name="z1") return z1 def test_nn_fused_batch_norm2(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.nn_fused_batch_norm_network2(x) self._test_layer(output_name, "z1") # Normalization_nn_l2_normalize Test def nn_l2_normalize_network(self, x): z1 = tf.nn.l2_normalize(x, name="z1") return z1 def test_nn_l2_normalize(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.nn_l2_normalize_network(x) self._test_layer(output_name, "z1") class TestOther(TFPbTestRunner): def __init__(self, *args, **kwargs): super(TestOther, self).__init__(*args, **kwargs) self.name = "testOther/" # Other_image_resize_images Test def image_resize_images_network(self, x): z1 = tf.image.resize_images(x, [2, 2], method=tf.image.ResizeMethod.AREA) z2 = tf.image.resize_images(x, [8, 8]) z3 = tf.image.resize_images(x, [2, 2], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) z4 = tf.image.resize_images(x, [8, 8], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) z1 = tf.nn.relu(z1, name='z1') z2 = tf.nn.relu(z2, name='z2') z3 = tf.nn.relu(z3, name='z3') z4 = tf.nn.relu(z4, name='z4') return z1, z2, z3, z4 def test_image_resize_images(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[1, 4, 4, 1], name='x') self.image_resize_images_network(x) self._test_layer(output_name, "z1,z2,z3,z4") def squeeze_network(self, x): z1 = tf.squeeze(x, name="z1") return z1 def test_squeeze(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[3, 1, 5, 1, 2, 1], name='x') self.squeeze_network(x) self._test_layer(output_name, "z1") class TestPooling(TFPbTestRunner): def __init__(self, *args, **kwargs): super(TestPooling, self).__init__(*args, **kwargs) self.name = "testPooling/" # Pooling_nn_max_pool Test def nn_max_pool_network(self, x): z1 = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 3, 3, 1], 'VALID', name="z1") z2 = tf.nn.max_pool(x, [1, 1, 1, 1], [1, 1, 1, 1], 'SAME', name="z2") return z1, z2 def test_nn_max_pool(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.nn_max_pool_network(x) self._test_layer(output_name, "z1,z2") # Pooling_nn_max_pool_with_argmax Test def nn_max_pool_with_argmax_network(self, x): z1 = tf.nn.max_pool_with_argmax(x, [1, 2, 2, 1], [1, 3, 3, 1], 'SAME', name="z1") z2 = tf.nn.max_pool_with_argmax(x, [1, 1, 1, 1], [1, 1, 1, 1], 'VALID', name="z2") return z1, z2 def test_nn_max_pool_with_argmax(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.nn_max_pool_with_argmax_network(x) self._test_layer(output_name, "z1,z2") # Pooling_nn_avg_pool Test def nn_avg_pool_network(self, x): z1 = tf.nn.avg_pool(x, [1, 2, 2, 1], [1, 3, 3, 1], 'SAME', name="z1") z2 = tf.nn.avg_pool(x, [1, 1, 1, 1], [1, 1, 1, 1], 'VALID', name="z2") return z1, z2 def test_nn_avg_pool(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.nn_avg_pool_network(x) self._test_layer(output_name, "z1,z2") class TestSofts(TFPbTestRunner): def __init__(self, *args, **kwargs): super(TestSofts, self).__init__(*args, **kwargs) self.name = "testSofts/" # Softs_nn_softsign Test def nn_softsign_network(self, x): z1 = tf.nn.softsign(x, name="z1") return z1 @unittest.skip("softsign doesn't exist within NNEF currently") def test_nn_softsign(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.nn_softsign_network(x) self._test_layer(output_name, "z1") # Softs_nn_softplus Test def nn_softplus_network(self, x): z1 = tf.nn.softplus(x, name="z1") return z1 def test_nn_softplus(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[None, 4, 4, 4], name='x') self.nn_softplus_network(x) self._test_layer(output_name, "z1") # Softs_nn_softmax Test def nn_softmax_network(self, x): z1 = tf.nn.softmax(x, name="z1") return z1 def test_nn_softmax(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[1, 4], name='x') self.nn_softmax_network(x) self._test_layer(output_name, "z1") class TestVarOps(TFPbTestRunner): def __init__(self, *args, **kwargs): super(TestVarOps, self).__init__(*args, **kwargs) self.name = "testVarOps/" # VarOps_concat Test def concat_network(self, x): y = tf.constant([[-1], [-2], [-3], [-4]], dtype=tf.float32, name="y") z1 = tf.concat([x, y], 0, name="z1") return z1 def test_concat(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[4, 1], name='x') self.concat_network(x) self._test_layer(output_name, "z1") # VarOps_split Test def split_network(self, x): a, b = tf.split(x, 2, name="splitting") z1 = tf.add(a, b, 'z1') return z1 def test_split(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[4, 1], name='x') self.split_network(x) self._test_layer(output_name, "z1") # VarOps_reshape Test def reshape_network(self, x): z1 = tf.reshape(x, [8, 4], name="z1") return z1 def test_reshape(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[1, 4, 4, 2], name='x') self.reshape_network(x) self._test_layer(output_name, "z1") # VarOps_tile Test def tile_network(self, x): z1 = tf.tile(x, multiples=[1, 2, 3, 4], name="z1") return z1 def test_tile(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[1, 4, 4, 2], name='x') self.tile_network(x) self._test_layer(output_name, "z1") # VarOps_pad Test def pad_network(self, x): z1 = tf.pad(x, paddings=[[0, 0], [3, 2], [0, 2], [1, 1]], mode="REFLECT", name="z1") return z1 def test_pad(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[1, 4, 4, 2], name='x') self.pad_network(x) self._test_layer(output_name, "z1") def pad_network2(self, x): z1 = tf.pad(x, paddings=[[0, 0], [3, 2], [0, 2], [1, 1]], mode="SYMMETRIC", name="z1") return z1 def test_pad2(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[1, 4, 4, 2], name='x') self.pad_network2(x) self._test_layer(output_name, "z1") def pad_network3(self, x): z1 = tf.pad(x, paddings=[[0, 0], [3, 2], [0, 2], [1, 1]], mode="CONSTANT", name="z1") return z1 def test_pad3(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[1, 4, 4, 2], name='x') self.pad_network3(x) self._test_layer(output_name, "z1") # VarOps_transpose Test def transpose_network(self, x): z1 = tf.transpose(x, [1, 3, 2, 0], name="z1") return z1 def test_transpose(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[1, 4, 4, 2], name='x') self.transpose_network(x) self._test_layer(output_name, "z1") # Variable_where Test def where_network(self, x): y = tf.constant([[1], [1], [1], [1]], dtype=tf.float32, name='y') cond = tf.greater_equal(x, y, name='cond') z1 = tf.where(cond, x, y, name="z1") return z1 def test_where(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[4, 1], name='x') self.where_network(x) self._test_layer(output_name, "z1") class TestVariables(TFPbTestRunner): def __init__(self, *args, **kwargs): super(TestVariables, self).__init__(*args, **kwargs) self.name = "testVariables/" # Variable_where Test def assign_network(self, x): t1 = tf.Variable(initial_value=[0.0, 0.0, 0.0, 0.0], name='t1') c1 = tf.constant([10.0, 20.0, 30.0, 40.0], dtype=tf.float32, name="c1") t1.assign(c1) z1 = tf.add(x, t1, name='z1') return z1 @unittest.skip("Frozen graph removes all assign nodes.") def test_assign(self): tf.reset_default_graph() output_name = self.name + sys._getframe().f_code.co_name[5:] + '/' x = tf.placeholder(tf.float32, shape=[4], name='x') self.assign_network(x) self._test_layer(output_name, "z1") if __name__ == '__main__': unittest.main()
35.060397
119
0.598622
6,064
40,635
3.789248
0.051451
0.063539
0.034946
0.057185
0.810601
0.779572
0.729306
0.675124
0.634868
0.56763
0
0.045159
0.240335
40,635
1,158
120
35.090674
0.699213
0.06017
0
0.493201
0
0
0.032044
0
0
0
0
0
0
1
0.195303
false
0
0.008653
0
0.309023
0.001236
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
b9314f1ba4cbf04c24e8df5bb4514d79a269969e
129
py
Python
config.py
alsigna/netbox_vlan_exporter
de6c23fb4e320448994384672a2454e474fc5e91
[ "MIT" ]
null
null
null
config.py
alsigna/netbox_vlan_exporter
de6c23fb4e320448994384672a2454e474fc5e91
[ "MIT" ]
null
null
null
config.py
alsigna/netbox_vlan_exporter
de6c23fb4e320448994384672a2454e474fc5e91
[ "MIT" ]
null
null
null
NETBOX_URL = "http://127.0.0.1:8080" NETBOX_AUTH_TOKEN = "0123456789abcdef0123456789abcdef01234567" REPORT_NAME = "netbox_vlans"
32.25
62
0.806202
16
129
6.1875
0.8125
0
0
0
0
0
0
0
0
0
0
0.316667
0.069767
129
3
63
43
0.508333
0
0
0
0
0
0.565891
0.310078
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
b94019486803b71e8e09a0a0e390d0dab8205fcf
704
py
Python
tests/test_formatText.py
TommasU/slash
47541104f8302908bfd33a9ba653707195b57903
[ "MIT" ]
null
null
null
tests/test_formatText.py
TommasU/slash
47541104f8302908bfd33a9ba653707195b57903
[ "MIT" ]
14
2021-11-09T19:41:16.000Z
2021-12-02T00:06:26.000Z
tests/test_formatText.py
TommasU/slash
47541104f8302908bfd33a9ba653707195b57903
[ "MIT" ]
null
null
null
""" Copyright (C) 2021 SE Slash - All Rights Reserved You may use, distribute and modify this code under the terms of the MIT license. You should have received a copy of the MIT license with this file. If not, please write to: secheaper@gmail.com """ from src.modules.formatter import formatSearchQuery, formatTitle def test_formatSearchQuery(): """ Checks the formatSearchQuery function """ assert formatSearchQuery("1 2") == "1+2" assert formatSearchQuery("A B") == "A+B" assert formatSearchQuery("ABC") == "ABC" def test_formatTitle(): """ Checks the formatTitle function """ assert formatTitle("0"*50) == "0"*40+"..." assert formatTitle("0"*5) == "0"*5
28.16
64
0.681818
93
704
5.139785
0.612903
0.144351
0.033473
0.062762
0
0
0
0
0
0
0
0.031634
0.191761
704
25
65
28.16
0.808436
0.443182
0
0
0
0
0.070621
0
0
0
0
0
0.625
1
0.25
true
0
0.125
0
0.375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
1
0
0
0
0
0
0
3
b944c3d81efd9a04b31ff81764dd07657ef7b2ec
142
py
Python
myapp/models.py
thinkAmi-sandbox/Django_messages_framework_sample
b40ea10c591f89e1c7710c9b3655a48318d17117
[ "Unlicense" ]
null
null
null
myapp/models.py
thinkAmi-sandbox/Django_messages_framework_sample
b40ea10c591f89e1c7710c9b3655a48318d17117
[ "Unlicense" ]
null
null
null
myapp/models.py
thinkAmi-sandbox/Django_messages_framework_sample
b40ea10c591f89e1c7710c9b3655a48318d17117
[ "Unlicense" ]
null
null
null
from django.db import models class Item(models.Model): # 特に指定しない場合、blank=False(=入力必須) name = models.CharField('Name', max_length=255)
28.4
51
0.725352
20
142
5.1
0.85
0
0
0
0
0
0
0
0
0
0
0.02459
0.140845
142
5
51
28.4
0.811475
0.197183
0
0
0
0
0.035398
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
b946f3f71f0998bc7713f43026cd9e4e93db1a80
717
py
Python
tests/plugins/test_oneplusone.py
xcgx/streamlink
b635e0d9d0fe9363817a96ec7d31faefed95cb57
[ "BSD-2-Clause" ]
10
2017-04-10T18:25:41.000Z
2021-09-15T20:14:58.000Z
tests/plugins/test_oneplusone.py
xcgx/streamlink
b635e0d9d0fe9363817a96ec7d31faefed95cb57
[ "BSD-2-Clause" ]
9
2020-04-04T09:49:52.000Z
2020-04-21T01:52:02.000Z
tests/plugins/test_oneplusone.py
xcgx/streamlink
b635e0d9d0fe9363817a96ec7d31faefed95cb57
[ "BSD-2-Clause" ]
12
2022-01-30T23:34:18.000Z
2022-03-26T17:09:43.000Z
from streamlink.plugins.oneplusone import OnePlusOne from tests.plugins import PluginCanHandleUrl class TestPluginCanHandleUrlOnePlusOne(PluginCanHandleUrl): __plugin__ = OnePlusOne should_match = [ "https://1plus1.video/ru/tvguide/plusplus/online", "https://1plus1.video/tvguide/1plus1/online", "https://1plus1.video/tvguide/2plus2/online", "https://1plus1.video/tvguide/bigudi/online", "https://1plus1.video/tvguide/plusplus/online", "https://1plus1.video/tvguide/sport/online", "https://1plus1.video/tvguide/tet/online", "https://1plus1.video/tvguide/uniantv/online", ] should_not_match = [ "https://1plus1.video/", ]
32.590909
59
0.682008
73
717
6.60274
0.342466
0.205394
0.298755
0.319502
0.46888
0.182573
0.182573
0
0
0
0
0.037225
0.175732
717
21
60
34.142857
0.778342
0
0
0
0
0
0.503487
0
0
0
0
0
0
1
0
false
0
0.117647
0
0.352941
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
b96b83ff649e865ebe246404d2aa0fa1418d3444
1,247
py
Python
mybitbank/libs/connections/walletconfig-template.py
zonedoutspace/mybitbank
85d28726117a3c1ca76be5772d30c9edae1df7f4
[ "MIT" ]
15
2015-08-29T12:35:59.000Z
2018-02-06T06:26:26.000Z
mybitbank/libs/connections/walletconfig-template.py
FireWalkerX/mybitbank
945e604e5fee3914c7c98a25c2c34831ba0ad946
[ "MIT" ]
null
null
null
mybitbank/libs/connections/walletconfig-template.py
FireWalkerX/mybitbank
945e604e5fee3914c7c98a25c2c34831ba0ad946
[ "MIT" ]
19
2015-02-03T21:32:51.000Z
2021-11-06T12:08:26.000Z
# -*- coding: utf-8 -*- config = [ { 'id': 1, 'rpcusername': "testuser", 'rpcpassword': "testnet", 'rpchost': "localhost", 'rpcport': "7000", 'name': 'Bitcoin (BTC)', 'symbol': "฿", 'currency': 'BTC', }, { 'id': 2, 'rpcusername': "testuser", 'rpcpassword': "testnet", 'rpchost': "localhost", 'rpcport': "7001", 'name': 'Litecoin (LTC)', 'symbol': "Ł", 'currency': 'LTC', }, { 'id': 3, 'rpcusername': "testuser", 'rpcpassword': "testnet", 'rpchost': "localhost", 'rpcport': "7002", 'name': 'Namecoin (NMC)', 'symbol': "ℕ", 'currency': 'NMC', }, { 'id': 4, 'rpcusername': "testuser", 'rpcpassword': "testnet", 'rpchost': "localhost", 'rpcport': "7003", 'name': 'PPcoin (PPC)', 'symbol': "Ᵽ", 'currency': 'PPC', }, { 'id': 5, 'rpcusername': "testuser", 'rpcpassword': "testnet", 'rpchost': "localhost", 'rpcport': "7003", 'name': 'Feathercoin (FTC)', 'symbol': "ƒ", 'currency': 'FTC', }, ]
23.092593
36
0.412991
89
1,247
5.797753
0.438202
0.184109
0.290698
0.358527
0.612403
0.612403
0.612403
0.263566
0.263566
0
0
0.033206
0.372093
1,247
53
37
23.528302
0.624521
0.01684
0
0.326923
0
0
0.416667
0
0
0
0
0
0
1
0
false
0.096154
0
0
0
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
3
b96f272894327295332b4ac3ce8b52eb7b3eebb1
1,395
py
Python
lf3py/api/render.py
rog-works/lf3py
e89937f7aa133ed54d85764f06101ab9abf6b960
[ "CNRI-Python" ]
null
null
null
lf3py/api/render.py
rog-works/lf3py
e89937f7aa133ed54d85764f06101ab9abf6b960
[ "CNRI-Python" ]
48
2020-12-19T13:47:26.000Z
2021-01-07T22:27:56.000Z
lf3py/api/render.py
rog-works/lf3py
e89937f7aa133ed54d85764f06101ab9abf6b960
[ "CNRI-Python" ]
null
null
null
from lf3py.api.errors import ApiError from lf3py.api.response import ErrorBody, Response from lf3py.api.symbols import IApiRender from lf3py.lang.error import stacktrace from lf3py.routing.errors import RouteMismatchError from lf3py.task.data import Result class ApiRender(IApiRender): def __init__(self, response: Response) -> None: self._response = response def ok(self, status: int = 200, body: Result = Result()) -> Response: return self.http_result(status, {}, body) def fail(self, error: Exception) -> Response: if isinstance(error, ApiError): return self.error_result(error.status, str(error), error) elif isinstance(error, RouteMismatchError): return self.error_result(404, '404 Data Not Found', error) else: return self.error_result(500, '500 Internal Server Error', error) def http_result(self, status: int, headers: dict, body: Result) -> Response: return Response(statusCode=status, headers={**self._response.headers, **headers}, body=body) def error_result(self, status: int, message: str, error: Exception) -> Response: return self.http_result(status, {}, self.build_error_body(status, message, error)) def build_error_body(self, status: int, message: str, error: Exception) -> Result: return ErrorBody(message=message, stacktrace=stacktrace(error))
43.59375
100
0.70681
173
1,395
5.601156
0.283237
0.055728
0.053664
0.065015
0.146543
0.146543
0.076367
0
0
0
0
0.01847
0.184946
1,395
31
101
45
0.833773
0
0
0
0
0
0.030824
0
0
0
0
0
0
1
0.25
false
0
0.25
0.166667
0.833333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
b97506007bec61bbd117a97b54941cc47d5f0524
188
py
Python
TropicalHazards_BI/utils.py
fga-eps-mds/2018.1-TropicalHazards-BI
1ceb034813f0062b9c3b4a1e2ec0963f6fe6fddc
[ "MIT" ]
8
2018-03-07T01:07:45.000Z
2018-03-14T00:56:51.000Z
TropicalHazards_BI/utils.py
fga-gpp-mds/2018.1-TropicalHazards-BI
1ceb034813f0062b9c3b4a1e2ec0963f6fe6fddc
[ "MIT" ]
245
2018-03-15T16:33:42.000Z
2018-06-28T23:41:35.000Z
TropicalHazards_BI/utils.py
fga-eps-mds/2018.1-TropicalHazards-BI
1ceb034813f0062b9c3b4a1e2ec0963f6fe6fddc
[ "MIT" ]
9
2018-08-27T18:35:30.000Z
2021-09-12T04:52:56.000Z
import pymongo def connect_mongo(engine=pymongo, host='mongo', port=27017): mongo_client = engine.MongoClient(host, port) mongo_db = mongo_client['main_db'] return mongo_db
20.888889
60
0.734043
26
188
5.076923
0.538462
0.166667
0
0
0
0
0
0
0
0
0
0.031646
0.159574
188
8
61
23.5
0.803797
0
0
0
0
0
0.06383
0
0
0
0
0
0
1
0.2
false
0
0.2
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
b98b0b13f67fcb8db10dcc4fdbdd003a78c9209a
660
py
Python
user_interface/show_money_textbox.py
talacounts/game_of_life
1297f181ddff65f8d31e63ad582ada41f63853df
[ "MIT" ]
null
null
null
user_interface/show_money_textbox.py
talacounts/game_of_life
1297f181ddff65f8d31e63ad582ada41f63853df
[ "MIT" ]
null
null
null
user_interface/show_money_textbox.py
talacounts/game_of_life
1297f181ddff65f8d31e63ad582ada41f63853df
[ "MIT" ]
null
null
null
from user_interface.textbox import Textbox from engine.player import Player from typing import Tuple, Dict, Any class Money_Textbox(Textbox): def __init__(self, x: int, y: int, width: int, height: int, player: Player): super(Money_Textbox, self).__init__(x, y, width, height, player.name) self.player = player self.title = self.player.name def _render(self): self.text = str(self.player.money) return super(Money_Textbox, self)._render() def draw(self, window): self.render() super(Textbox, self).draw(window) def on_focus(self): pass def on_unfocus(self): pass
26.4
80
0.65303
89
660
4.662921
0.382022
0.086747
0.081928
0.101205
0
0
0
0
0
0
0
0
0.239394
660
24
81
27.5
0.826693
0
0
0.111111
0
0
0
0
0
0
0
0
0
1
0.277778
false
0.111111
0.166667
0
0.555556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
3
b98e416fc874da5af37d442e92c74950e0d614b1
282
py
Python
tests/core/forms.py
timgates42/django-selenium
aec304c51dc4e3a5bc412dde892a54241c546ceb
[ "Apache-2.0" ]
26
2015-01-20T06:09:05.000Z
2022-01-05T10:44:50.000Z
tests/core/forms.py
timgates42/django-selenium
aec304c51dc4e3a5bc412dde892a54241c546ceb
[ "Apache-2.0" ]
4
2015-06-02T14:32:19.000Z
2020-03-24T08:27:36.000Z
tests/core/forms.py
timgates42/django-selenium
aec304c51dc4e3a5bc412dde892a54241c546ceb
[ "Apache-2.0" ]
9
2015-01-31T20:59:29.000Z
2021-01-05T21:58:27.000Z
from django import forms class SampleSearchForm(forms.Form): """Search form for test purposes""" query = forms.CharField(widget=forms.TextInput(attrs={'class': 'input-xlarge search-query', 'autocomplete': 'off'}))
35.25
95
0.574468
27
282
6
0.740741
0
0
0
0
0
0
0
0
0
0
0
0.304965
282
7
96
40.285714
0.826531
0.102837
0
0
0
0
0.182186
0
0
0
0
0
0
1
0
false
0
0.25
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
b9955b1fbc2831eb5492c78d3d4aed4fa8960f47
29,242
py
Python
tests/examples/test_ga_onemax.py
jorgetavares/pygenome
2b529ea55feff8c4a0214b37354d4d7c273202a3
[ "MIT" ]
1
2019-11-18T14:41:20.000Z
2019-11-18T14:41:20.000Z
tests/examples/test_ga_onemax.py
jorgetavares/pygenome
2b529ea55feff8c4a0214b37354d4d7c273202a3
[ "MIT" ]
null
null
null
tests/examples/test_ga_onemax.py
jorgetavares/pygenome
2b529ea55feff8c4a0214b37354d4d7c273202a3
[ "MIT" ]
null
null
null
from examples.ga_onemax import * stdout1 = """0 0.03125 0.040781302009688776 0.005561455106895243 1 0.029411764705882353 0.038091861610110966 0.00450622600604616 2 0.027777777777777776 0.03547228263920674 0.00378636872370032 3 0.027777777777777776 0.03383622027764222 0.003792657676482543 4 0.027777777777777776 0.03153144879877114 0.0024635362886572016 5 0.023809523809523808 0.030139024953537853 0.00243139455636538 6 0.023809523809523808 0.02894388940095638 0.0026576983008068868 7 0.023809523809523808 0.02740366108153792 0.001826310539891214 8 0.023255813953488372 0.026444187681938076 0.0015349653277309185 9 0.022727272727272728 0.026012870101612462 0.001608538168134231 10 0.022222222222222223 0.025314390354864127 0.0013064223948593403 11 0.022222222222222223 0.02475279874881244 0.0014170379402423956 12 0.02127659574468085 0.024026041106628093 0.0013427418981510168 13 0.02127659574468085 0.0233757082989196 0.0012006885907910165 14 0.020833333333333332 0.02285467855630095 0.0010185863389449473 15 0.02 0.022430398717967374 0.0008704333997032909 16 0.02 0.021960350829972216 0.0008949697776471712 17 0.02 0.021653716984652648 0.0007101590492949621 18 0.02 0.021357860050448662 0.000618545520306597 19 0.02 0.02111156184156859 0.0006393216238278883 fitness: 0.02 genotype: [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] """ stdout2 = """0 0.03125 0.040781302009688776 0.005561455106895243 1 0.029411764705882353 0.03805953402390407 0.004543443870432798 2 0.027777777777777776 0.03540714856298931 0.004003729039375085 3 0.027777777777777776 0.03338954942095523 0.0032194506876552006 4 0.025 0.031759630111261566 0.0024138303577230054 5 0.024390243902439025 0.030465830885796095 0.0024506760085341995 6 0.024390243902439025 0.029748169948644855 0.002240931141267259 7 0.023255813953488372 0.02878293987059667 0.0021429324656018814 8 0.023255813953488372 0.027502249635484855 0.0019123397613806427 9 0.023255813953488372 0.02645550691596207 0.0018288473202157202 10 0.022222222222222223 0.025367100470720813 0.0015199281190212102 11 0.022222222222222223 0.02461907262781222 0.0013499923545325775 12 0.021739130434782608 0.024065322825332153 0.0011729776361822577 13 0.02127659574468085 0.023461332182942187 0.0010670316985843752 14 0.02127659574468085 0.02289507628617888 0.0009396105298584204 15 0.020833333333333332 0.022522016567904247 0.0008268338171416158 16 0.02040816326530612 0.022136339976635826 0.0007804385336199252 17 0.02 0.021817609951539876 0.0008327779489794365 18 0.02 0.02147309566758398 0.0007178798030896314 19 0.02 0.021207604712420763 0.0006741417655733425 fitness: 0.02 genotype: [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] """ stdout3 = """0 0.03125 0.040781302009688776 0.005561455106895243 1 0.03125 0.04076591739430416 0.0055657214195924745 2 0.03125 0.040622042750429514 0.005507164564763866 3 0.03125 0.04062719572949554 0.005525504386535372 4 0.03125 0.040572560348773205 0.0054984646902999194 5 0.03125 0.04034090492142615 0.005390384072367683 6 0.03125 0.0402046536026411 0.005419778941160164 7 0.03125 0.04013739327676008 0.0054623937675048004 8 0.03125 0.04007443031379712 0.005338017400020037 9 0.03125 0.04008458848257598 0.005319762685780308 10 0.03125 0.04010683542362159 0.005291563125455103 11 0.03125 0.0400226603394465 0.005272742309413064 12 0.03125 0.04005228996907613 0.0052641997698029625 13 0.03125 0.03998787773075085 0.005261292143217217 14 0.03125 0.03984664065698004 0.005316203083084664 15 0.03125 0.03988885010781018 0.005299466931565621 16 0.03125 0.03976902252160328 0.005410194445432538 17 0.03125 0.03968429308554145 0.005435280260514713 18 0.03125 0.03951577456702293 0.005206081471009931 19 0.03125 0.03935236039498807 0.005161212358951413 20 0.03125 0.03943844448969286 0.005138235191023557 21 0.03125 0.03921225401350238 0.005069181577556729 22 0.03125 0.039104984632319956 0.0050662190117403486 23 0.03125 0.03912662965396498 0.005097826009881623 24 0.03125 0.039124976214811535 0.00504608772719245 25 0.03125 0.03907867991851524 0.005043793314564134 26 0.03125 0.03904662863646396 0.005037425492953105 27 0.03125 0.03907867991851524 0.005043793314564134 28 0.03125 0.03896815036186499 0.004962528399866003 29 0.03125 0.03900793816027348 0.004942315673450242 30 0.03125 0.03895114567739404 0.00488617726647651 31 0.03125 0.03895768754534895 0.00495978158921641 32 0.03125 0.0388843550359764 0.005016909124594712 33 0.03125 0.03876251595551663 0.0050627579498394104 34 0.029411764705882353 0.03871180601636856 0.00513031663950765 35 0.029411764705882353 0.03877376323220508 0.005091848287170193 36 0.029411764705882353 0.03876419872264057 0.005108817760699437 37 0.029411764705882353 0.0385806766181466 0.0051765377035178336 38 0.029411764705882353 0.03857604469093403 0.00516256817573713 39 0.029411764705882353 0.03840009933028102 0.004911792561082685 40 0.029411764705882353 0.038435941798657225 0.004981197894576194 41 0.029411764705882353 0.03853501357514836 0.004917322984771472 42 0.029411764705882353 0.038551071591206375 0.004878023381996317 43 0.029411764705882353 0.03850821444834923 0.004883928233132197 44 0.029411764705882353 0.03848615317628796 0.0049545125688823305 45 0.029411764705882353 0.0383247775149123 0.004630315762456711 46 0.029411764705882353 0.03835051678203087 0.004617491935241724 47 0.029411764705882353 0.03835225004238483 0.004622889754411538 48 0.029411764705882353 0.03829598890156486 0.00461175402246592 49 0.029411764705882353 0.038335183040759 0.004595201177906614 50 0.029411764705882353 0.03834498738176467 0.004631145416784666 51 0.029411764705882353 0.03824933877749539 0.0046539394757127055 52 0.029411764705882353 0.03820648163463824 0.004657352542172028 53 0.029411764705882353 0.03811561436985793 0.004638459235855089 54 0.029411764705882353 0.038042281860485375 0.004686336005487959 55 0.029411764705882353 0.03805401594480011 0.004728978747106032 56 0.029411764705882353 0.037953681496639566 0.004665623840500757 57 0.029411764705882353 0.03790496354792162 0.004563527561674136 58 0.029411764705882353 0.037946760831098214 0.0045125666497372 59 0.029411764705882353 0.03786342749776488 0.0045200413120227495 60 0.029411764705882353 0.03787299200732939 0.004502766305273224 61 0.029411764705882353 0.03772479343171146 0.004577197733181666 62 0.029411764705882353 0.03756110295552098 0.004590515018741406 63 0.029411764705882353 0.0375312087756268 0.004581945112910876 64 0.02857142857142857 0.037414501408919436 0.004649577397769239 65 0.02857142857142857 0.03745978020419824 0.004664641840042881 66 0.02857142857142857 0.03744054943496747 0.004699542302834623 67 0.029411764705882353 0.037476425323784535 0.00468220417022404 68 0.029411764705882353 0.03743274716286499 0.00467600604283492 69 0.029411764705882353 0.037407749460447934 0.004685628116342981 70 0.029411764705882353 0.037317251722891374 0.0047513339193878245 71 0.029411764705882353 0.0371447649504046 0.004644532507889975 72 0.029411764705882353 0.03723060921366822 0.004606305036740161 73 0.029411764705882353 0.03719333234519001 0.004568490742026526 74 0.029411764705882353 0.037072499011856684 0.004570371800346553 75 0.029411764705882353 0.03725888669824436 0.00463564051398577 76 0.029411764705882353 0.037377996564413055 0.004572851829122976 77 0.029411764705882353 0.037349533566310586 0.004613310651506204 78 0.029411764705882353 0.037411568305764684 0.004586057330254607 79 0.029411764705882353 0.03731581914751553 0.0045754960654208286 80 0.029411764705882353 0.03728618951788589 0.004567604980281492 81 0.029411764705882353 0.03715757738927377 0.004466345151557656 82 0.029411764705882353 0.03718413767445474 0.0044624741750795074 83 0.029411764705882353 0.037248896144892726 0.004402375850256563 84 0.029411764705882353 0.037142274455737305 0.0043518420263156665 85 0.029411764705882353 0.03697717037183455 0.004176873824141705 86 0.029411764705882353 0.03691124526986941 0.004183622694639944 87 0.029411764705882353 0.03684504480366894 0.004240558604486811 88 0.029411764705882353 0.03679626460782992 0.004306445175176725 89 0.029411764705882353 0.03680857987876588 0.004301569721001867 90 0.029411764705882353 0.03678527172037708 0.004314663123220094 91 0.029411764705882353 0.03669090939471609 0.004363215285960536 92 0.029411764705882353 0.0367041369079436 0.004362239421426741 93 0.029411764705882353 0.03664874409743641 0.004292859713942862 94 0.029411764705882353 0.036647250668523625 0.004343984692202518 95 0.029411764705882353 0.03642403638280934 0.004202931967059495 96 0.029411764705882353 0.036346617027970624 0.004207650344462092 97 0.029411764705882353 0.03613744536746657 0.004189380043502483 98 0.029411764705882353 0.036101821938141807 0.00420205885430731 99 0.029411764705882353 0.03602970655352642 0.004222773629600652 100 0.029411764705882353 0.03592388644770632 0.004060497274442531 101 0.029411764705882353 0.03595217999324124 0.004071462517889265 102 0.029411764705882353 0.03592819979184169 0.004078346489282658 103 0.029411764705882353 0.035853086669669736 0.004133926322621262 104 0.029411764705882353 0.03584505137280204 0.004094286794650818 105 0.029411764705882353 0.035880376696565505 0.004032672462065162 106 0.029411764705882353 0.03591315319426135 0.004063742414963224 107 0.029411764705882353 0.0357711077397159 0.00397503381350392 108 0.029411764705882353 0.035768356978344455 0.003962536919820571 109 0.029411764705882353 0.03567339676614286 0.003933717881361394 110 0.029411764705882353 0.035622114714860806 0.003930461569449365 111 0.029411764705882353 0.03558141467416077 0.003917447924549628 112 0.029411764705882353 0.03561374226036766 0.0038948442958828483 113 0.029411764705882353 0.03554190317990789 0.003848514458455151 114 0.029411764705882353 0.03561523568928045 0.0038381428516082675 115 0.029411764705882353 0.03556268571793181 0.0038805391979271028 116 0.029411764705882353 0.035620556088302176 0.003858883261254072 117 0.029411764705882353 0.03558076828989369 0.0038498871382687747 118 0.029411764705882353 0.03547102134884809 0.003860076863019065 119 0.029411764705882353 0.035436682268388316 0.0038172229775300655 120 0.029411764705882353 0.03537120607791212 0.003856482710039714 121 0.029411764705882353 0.03528586318006923 0.0038617281680746883 122 0.029411764705882353 0.035275782534907936 0.0038709235452645437 123 0.029411764705882353 0.035280937374621596 0.0038435826251394536 124 0.02857142857142857 0.03525930650004955 0.0038534921506053663 125 0.027777777777777776 0.03522458427782733 0.003904764199816617 126 0.027777777777777776 0.035022273816896175 0.003832353761110754 127 0.027777777777777776 0.034827144152127044 0.003559835915024719 128 0.027777777777777776 0.03472864448776989 0.0036016624391633514 129 0.027777777777777776 0.03472156387774811 0.0035043721635600886 130 0.027777777777777776 0.03465311149679572 0.0035207979205571158 131 0.027777777777777776 0.0346293019729862 0.0035215917339726003 132 0.027777777777777776 0.03459457975076398 0.003571503846812583 133 0.027777777777777776 0.03453844264074756 0.0035871430778440278 134 0.027777777777777776 0.03458399774018197 0.0035785494716660954 135 0.027777777777777776 0.034554977701081284 0.0036064459984184467 136 0.027777777777777776 0.034235458984503746 0.00323394946738343 137 0.027777777777777776 0.03422396473163018 0.003235092545128326 138 0.027777777777777776 0.03420949428828043 0.003273869605456437 139 0.027777777777777776 0.03419403214927824 0.003254116560130402 140 0.027777777777777776 0.03415162391791005 0.0032745242857899013 141 0.027777777777777776 0.0342087126624988 0.003217240720355445 142 0.027777777777777776 0.0342087126624988 0.003217240720355445 143 0.027777777777777776 0.034191320780187565 0.003184752436255108 144 0.027777777777777776 0.034036864125730905 0.0032643911269782584 145 0.027777777777777776 0.03390271400166143 0.003218074845557746 146 0.027777777777777776 0.033818057917005345 0.0032457395603201866 147 0.027777777777777776 0.03377779382286194 0.003268686844476118 148 0.027777777777777776 0.03374749079255891 0.003286663928290463 149 0.027777777777777776 0.033735324496674594 0.0032830776385959176 150 0.027777777777777776 0.033504161797481204 0.0031506200813113897 151 0.027777777777777776 0.03348332846414787 0.003158558769349625 152 0.027777777777777776 0.033526568748767464 0.0030999246087808903 153 0.027777777777777776 0.03346154349624222 0.00316030081196644 154 0.027777777777777776 0.03342091420225543 0.003178836757681714 155 0.027777777777777776 0.03336680164814288 0.003185387171859075 156 0.027777777777777776 0.03339245120415296 0.003201781315556209 157 0.02702702702702703 0.03342415938291997 0.003190911336279097 158 0.02702702702702703 0.033372281454582084 0.0032092216966512163 159 0.02702702702702703 0.033300852883153514 0.0032356813659525745 160 0.02702702702702703 0.03321699433973615 0.003255403901293605 161 0.027777777777777776 0.03326856314992565 0.0031931312795011164 162 0.027777777777777776 0.033162297655222016 0.0032588719730904867 163 0.027777777777777776 0.03317671334705706 0.0033321389733581017 164 0.027777777777777776 0.03309570037760596 0.0033620846391585702 165 0.027777777777777776 0.033067631672117896 0.003381712170645736 166 0.027777777777777776 0.03307771231727919 0.0033777791115661075 167 0.027777777777777776 0.03297205222169974 0.0033353899583604013 168 0.027777777777777776 0.03290062365027117 0.0033523395671306525 169 0.027777777777777776 0.03280394255359007 0.0033958866711734062 170 0.027777777777777776 0.032762145270413476 0.003400685321747771 171 0.027777777777777776 0.03276030523821292 0.0033929570661628797 172 0.027777777777777776 0.032687632812599315 0.0033452163657807653 173 0.027777777777777776 0.0325664847726526 0.0033138297659914503 174 0.02702702702702703 0.032502680144888024 0.0033544687941013283 175 0.02702702702702703 0.03239409983536958 0.0033327300870295787 176 0.02702702702702703 0.032357062798332545 0.0033013589668393216 177 0.027777777777777776 0.032392057155907546 0.003273357815197512 178 0.027777777777777776 0.0323320824084328 0.003328726877076974 179 0.027777777777777776 0.03221470580261806 0.0032446462457340615 180 0.027777777777777776 0.03221342281141571 0.00323851735811994 181 0.027777777777777776 0.03207602735143961 0.003260356365938651 182 0.02702702702702703 0.032014407289819545 0.0032550525488685205 183 0.02702702702702703 0.03194672860544257 0.0032723496623143747 184 0.02702702702702703 0.031909352951368625 0.0032874624668255393 185 0.02702702702702703 0.031909352951368625 0.0032874624668255393 186 0.02702702702702703 0.03200194554396121 0.0033001618731900856 187 0.02702702702702703 0.031925825645260676 0.0033184375182541037 188 0.02702702702702703 0.03188055592446913 0.0033422610548507936 189 0.02702702702702703 0.03196196348749434 0.003345614146100431 190 0.02702702702702703 0.0319227099482408 0.0033463680631940817 191 0.02702702702702703 0.031987178159767836 0.0033174240280473624 192 0.02702702702702703 0.03201641485444571 0.003311614185069456 193 0.02702702702702703 0.03196085929889016 0.0033355688856440712 194 0.02702702702702703 0.03186499720463684 0.0031114635289667385 195 0.02702702702702703 0.03179047774167924 0.0030888021593992667 196 0.02702702702702703 0.03181795026915177 0.003135444443992817 197 0.02702702702702703 0.03177739591609741 0.0030785962722787432 198 0.02702702702702703 0.03175840261496465 0.0030841676144967778 199 0.02702702702702703 0.03170284705940909 0.003105262167325866 fitness: 0.02702702702702703 genotype: [1 1 1 0 1 0 1 1 1 1 1 1 1 1 1 1 0 1 0 1 1 1 1 0 1 1 0 1 1 1 0 1 1 1 1 0 0 0 1 1 1 1 1 0 1 1 0 0 1 1] """ stdout4 = """0 0.03125 0.040781302009688776 0.005561455106895243 1 0.03125 0.04073358980809726 0.005611323894099167 2 0.03125 0.04053184479385226 0.005618896584066156 3 0.03125 0.04052943212031889 0.005707430033203208 4 0.03125 0.04031885132049323 0.005557155703438686 5 0.03125 0.04017681326595518 0.005550182390644313 6 0.03125 0.04007237029513291 0.005659834969094889 7 0.03125 0.03974061704954726 0.005675526261668921 8 0.03125 0.039619906293755856 0.005681785878437178 9 0.03125 0.03949760425033312 0.005732395162757329 10 0.03125 0.03957740110404302 0.005739659294499826 11 0.02857142857142857 0.03941847253261445 0.0058883086220128 12 0.02857142857142857 0.03946380220294412 0.005875490246256851 13 0.02857142857142857 0.03929806721329609 0.0059559672395683464 14 0.02857142857142857 0.0391270507297796 0.006101526459694825 15 0.02857142857142857 0.03888532756708457 0.006241677313710797 16 0.02857142857142857 0.038668706673044326 0.0063205121898296185 17 0.02857142857142857 0.03856764847198612 0.006393733325621937 18 0.02857142857142857 0.038311677123303065 0.006540601588919424 19 0.02857142857142857 0.038227021038646974 0.006610959070916546 20 0.02857142857142857 0.03792378604899895 0.006703191685388337 21 0.02857142857142857 0.03771059923581213 0.006825735384647775 22 0.02857142857142857 0.037652096883511114 0.006879646149613075 23 0.02857142857142857 0.03756052179193602 0.006806930000633567 24 0.02857142857142857 0.0372530984033113 0.006616244897318855 25 0.02857142857142857 0.037071472621685524 0.006700935418616673 26 0.027777777777777776 0.036894769026183265 0.00681638917178459 27 0.027777777777777776 0.03679332768336261 0.006737740324559595 28 0.027777777777777776 0.03656592636604193 0.006718795651361971 29 0.027777777777777776 0.03636299983863723 0.0067182066098195655 30 0.027777777777777776 0.03615974957582815 0.006784624195964068 31 0.027777777777777776 0.036001067461093515 0.006792736244762245 32 0.02631578947368421 0.03585705404487707 0.006886008196113005 33 0.02631578947368421 0.03571511934702132 0.006980966601862108 34 0.02631578947368421 0.03558208144338916 0.006975671272659737 35 0.02631578947368421 0.035523269641156305 0.007029830564216234 36 0.02631578947368421 0.035352630520894796 0.007109006244468943 37 0.02631578947368421 0.03525904015206289 0.00716713853183638 38 0.02631578947368421 0.035003150427752115 0.007261733438238009 39 0.02631578947368421 0.0350878065124082 0.007235558192867908 40 0.02631578947368421 0.03506031966234071 0.007253354166028904 41 0.02631578947368421 0.03497496379956549 0.007248977861899415 42 0.02631578947368421 0.03481186508054571 0.006966668179409498 43 0.02631578947368421 0.034732893345652924 0.006993106579244648 44 0.02631578947368421 0.03457633876475624 0.0070796600767005675 45 0.02564102564102564 0.03445953473545221 0.007096635586395174 46 0.02564102564102564 0.03424201890301702 0.007115120569419857 47 0.02564102564102564 0.03400009280129056 0.007168165801585374 48 0.02564102564102564 0.033670742784440544 0.00711148149312442 49 0.025 0.033436412300110054 0.007189450666750213 50 0.025 0.033447997635998795 0.007187477871410633 51 0.025 0.03322871184413236 0.007249243312145341 52 0.025 0.0330933317271733 0.00725059006156153 53 0.025 0.03295272671814723 0.007308861341128651 54 0.025 0.03287930504178438 0.007366395988270292 55 0.025 0.03278447745557748 0.007406073280120626 56 0.025 0.032792687082208165 0.007401201055329391 57 0.025 0.03268001985812789 0.0073843136398220365 58 0.025 0.03230008721972339 0.007245850271969602 59 0.024390243902439025 0.03214398965874778 0.007246430111445265 60 0.024390243902439025 0.032015866004383844 0.0072681425037237355 61 0.024390243902439025 0.031883485452205894 0.00733690135935937 62 0.024390243902439025 0.031816880996164566 0.007376902910834809 63 0.024390243902439025 0.03146209328162075 0.007177437385924868 64 0.024390243902439025 0.030962607235877355 0.0068235487821863006 65 0.024390243902439025 0.030309716409687423 0.006138468195036998 66 0.024390243902439025 0.029989853666525578 0.0056202135596107525 67 0.024390243902439025 0.029909236021992244 0.0056570168868638054 68 0.024390243902439025 0.029714197729741806 0.005507420236007412 69 0.024390243902439025 0.029641209851875877 0.005540760460082632 70 0.024390243902439025 0.029590994643841448 0.005565312649992668 71 0.024390243902439025 0.029540284704693373 0.00554356934473582 72 0.023809523809523808 0.0294674131318218 0.005574058368394143 73 0.023809523809523808 0.029395570937021352 0.005560822242024083 74 0.023809523809523808 0.029263551712821353 0.005507388169792296 75 0.023809523809523808 0.029057202506472146 0.0054044000172213495 76 0.023809523809523808 0.02892271087198051 0.005390781173143104 77 0.023809523809523808 0.028937998073724706 0.005386883261199939 78 0.023809523809523808 0.02888072253192904 0.005408732109380401 79 0.023809523809523808 0.02888038515001271 0.00540813338837751 80 0.023809523809523808 0.028692779962407523 0.005274723961496633 81 0.023809523809523808 0.02846658948621705 0.005154135225991897 82 0.023809523809523808 0.028378157592907106 0.005173921018975657 83 0.023809523809523808 0.028293672185841053 0.005178852024892103 84 0.023809523809523808 0.028122237340493163 0.004972342571789223 85 0.023809523809523808 0.02806820803646386 0.004994113363054957 86 0.023809523809523808 0.02806820803646386 0.004994113363054957 87 0.023809523809523808 0.02794674041499624 0.004918427003520906 88 0.023809523809523808 0.027886667587220818 0.004900505036397889 89 0.023809523809523808 0.0279223818729351 0.004892344487055345 90 0.023809523809523808 0.027903729472703757 0.004903536886326738 91 0.023809523809523808 0.027929532101447562 0.004909059012466715 92 0.023809523809523808 0.027844709312465177 0.004905805768405714 93 0.023809523809523808 0.027857217129851043 0.0048985714954845805 94 0.023809523809523808 0.027794784464477206 0.004915719554580067 95 0.023809523809523808 0.027662182081874816 0.004809379264527122 96 0.023809523809523808 0.027541684523877265 0.0048284192202885335 97 0.023809523809523808 0.027499010963501106 0.004810399870685516 98 0.023809523809523808 0.02735829801791011 0.004692016732470327 99 0.023809523809523808 0.027302208274320366 0.004678665596025819 100 0.023809523809523808 0.027162435766126806 0.004552932274250579 101 0.023809523809523808 0.027151233123345216 0.004561946442992086 102 0.023255813953488372 0.026918788873964664 0.004289242904213266 103 0.023255813953488372 0.026900743958018447 0.004299114587977271 104 0.023255813953488372 0.026793565879179342 0.004118194101992821 105 0.023255813953488372 0.026737543470215758 0.004120300907841031 106 0.023255813953488372 0.02677100203810976 0.004163631467732847 107 0.023255813953488372 0.026721347292490583 0.004163033936033469 108 0.023255813953488372 0.02671581019393023 0.004167269243327534 109 0.023255813953488372 0.02657680780408536 0.00417039084574992 110 0.023255813953488372 0.026560934788212344 0.004165409033319495 111 0.023255813953488372 0.02656301848582848 0.004170603683923384 112 0.023255813953488372 0.026610493881282317 0.00421484207235389 113 0.023255813953488372 0.026628808899597334 0.004206602264090147 114 0.023255813953488372 0.026584159261924437 0.0041969238651043605 115 0.023255813953488372 0.02658536980169067 0.00419997458125932 116 0.023255813953488372 0.026472222992579425 0.004215769526466076 117 0.023255813953488372 0.02645352799928223 0.004222125778576367 118 0.023255813953488372 0.026320351341118257 0.004090358032511461 119 0.023255813953488372 0.026302639378284347 0.00409903815479723 120 0.023255813953488372 0.026229175108855644 0.004103421645792556 121 0.023255813953488372 0.026271679625962364 0.004087112723481286 122 0.023255813953488372 0.02626493198763578 0.004087591304364971 123 0.023255813953488372 0.0262463266092743 0.004093708892816499 124 0.023255813953488372 0.02623594508257042 0.0041031299227546085 125 0.023255813953488372 0.026190075366976547 0.0041089287510517845 126 0.023255813953488372 0.026135623494899216 0.004129086542967953 127 0.023255813953488372 0.026205905789783716 0.004115175203514505 128 0.023255813953488372 0.026193308357462128 0.0041182653342320494 129 0.023255813953488372 0.025839045910176425 0.003477341432928507 130 0.023255813953488372 0.025808446154974466 0.0034864631254768698 131 0.023255813953488372 0.02573132158692534 0.0034937543640718286 132 0.023255813953488372 0.02578365360070166 0.0034924993721620577 133 0.023255813953488372 0.025728410740580746 0.0034834736685537 134 0.023255813953488372 0.025675772463750756 0.0035059864473457867 135 0.023255813953488372 0.02554115707913537 0.003262457391280839 136 0.023255813953488372 0.02549593744089248 0.0032624816665789286 137 0.023255813953488372 0.02547668198518003 0.003263268278986877 138 0.023255813953488372 0.025450604513980603 0.003262279428949987 139 0.023255813953488372 0.025469859969693056 0.003261646532635847 140 0.023255813953488372 0.025464052768763904 0.0032640796303695994 141 0.023255813953488372 0.025452708469274398 0.0032697583507197816 142 0.022727272727272728 0.02534261432019256 0.0032621280248064798 143 0.022727272727272728 0.025310961076684527 0.0032628251123270014 144 0.022727272727272728 0.025268708245875997 0.0032715108250115127 145 0.022727272727272728 0.025197252444862813 0.00319915986612111 146 0.022727272727272728 0.025157022302533968 0.003213736996065187 147 0.022727272727272728 0.02514619979171146 0.003220072617385478 148 0.022727272727272728 0.025114389734503608 0.003217563197988547 149 0.022727272727272728 0.025078504567039497 0.0032239688042125085 150 0.022727272727272728 0.02506685149143293 0.003227200421439853 151 0.022727272727272728 0.024913102015102807 0.0031401751832950456 152 0.022727272727272728 0.024918639113663165 0.0031377352236427146 153 0.022727272727272728 0.024871186566210617 0.003145316514677488 154 0.022727272727272728 0.0247821452525671 0.0031303927692320336 155 0.022727272727272728 0.024751797183663337 0.003131023043946763 156 0.022727272727272728 0.024734355323198228 0.0031344480686555398 157 0.022727272727272728 0.024764955078400184 0.0031348009257341713 158 0.022727272727272728 0.024731695654896857 0.0031473966182713883 159 0.022727272727272728 0.02466243158563279 0.0031346008819375867 160 0.022222222222222223 0.024615958814557814 0.003141382574566584 161 0.022222222222222223 0.024519277717876715 0.003105415519295516 162 0.022222222222222223 0.0245028829133367 0.0031112353636597737 163 0.022222222222222223 0.024462887678219518 0.0031180004659155784 164 0.022222222222222223 0.02443615695636684 0.0031291288953815715 165 0.022222222222222223 0.02443135813761454 0.0031331297745607885 166 0.022222222222222223 0.024439337238616896 0.003140412149256389 167 0.022222222222222223 0.02440873748341493 0.0031368851853292674 168 0.022222222222222223 0.024364213531914237 0.003147253659261851 169 0.022222222222222223 0.02435811597093863 0.00314660657645012 170 0.022222222222222223 0.024308657976358682 0.0031600312059756512 171 0.022222222222222223 0.02425625852605793 0.00313504764815793 172 0.022222222222222223 0.024229292896993604 0.0031436219889505058 173 0.022222222222222223 0.024208134468858434 0.003151748218016044 174 0.022222222222222223 0.024202849056596275 0.0031537833257092194 175 0.022222222222222223 0.024149029002776223 0.0031473309832006865 176 0.022222222222222223 0.024154566101336577 0.0031462415617485957 177 0.022222222222222223 0.0241283571681509 0.0031565107346521915 178 0.022222222222222223 0.02413892799267521 0.0031526836436063556 179 0.022222222222222223 0.024123541570312046 0.0031598933565761657 180 0.022222222222222223 0.024094404041174515 0.0031592000755098874 181 0.022222222222222223 0.0239782424250129 0.0031276457946775855 182 0.022222222222222223 0.023934105037852257 0.003133833954361933 183 0.022222222222222223 0.02383551408632013 0.003095805615909798 184 0.022222222222222223 0.02374288449369054 0.0030588974624381 185 0.022222222222222223 0.02375974911265385 0.0030586455350572798 186 0.022222222222222223 0.023684625405951197 0.003048970855806743 187 0.021739130434782608 0.023674509075814646 0.003052852873040262 188 0.021739130434782608 0.023637175430315575 0.0030593530822678734 189 0.021739130434782608 0.023637395017491684 0.003060049731537424 190 0.021739130434782608 0.023611640578693763 0.003067690138196303 191 0.021739130434782608 0.023567571121580824 0.0030682476009510212 192 0.021739130434782608 0.023530994171960397 0.003076225972762006 193 0.021739130434782608 0.023530994171960397 0.003076225972762006 194 0.021739130434782608 0.02352065825464774 0.003078868461455716 195 0.021739130434782608 0.02348593376225734 0.0030709428788565215 196 0.021739130434782608 0.02346522982850993 0.00307566702127991 197 0.021739130434782608 0.023272081629294998 0.002723000011150271 198 0.021739130434782608 0.02323116035531002 0.002732254763077608 199 0.021739130434782608 0.02323621086036053 0.0027308515244538496 fitness: 0.021739130434782608 genotype: [1 1 0 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1] """ def test_generational_no_elitism(capfd): generational_no_elitism() out, err = capfd.readouterr() assert out == stdout1 def test_generational_with_elitism(capfd): generational_with_elitism() out, err = capfd.readouterr() assert out == stdout2 def test_steady_state_no_elitism(capfd): steady_state_no_elitism() out, err = capfd.readouterr() assert out == stdout3 def test_steady_state_with_elitism(capfd): steady_state_with_elitism() out, err = capfd.readouterr() assert out == stdout4
60.66805
114
0.87983
3,373
29,242
7.620219
0.327898
0.012839
0.017391
0.021476
0.045364
0.045364
0.024472
0.024433
0.017663
0.017274
0
0.929382
0.071678
29,242
481
115
60.794179
0.017461
0
0
0.027719
0
0.008529
0.977943
0.107004
0
0
0
0
0.008529
1
0.008529
false
0
0.002132
0
0.010661
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
b99921276754597cdf8d054e2188cd71742f335c
156
py
Python
BioSTEAM 1.x.x/build/lib/biorefineries/lipidcane/__init__.py
blsymens/Bioindustrial-Park
c1173646185d52f4b8d595ad088ade8e5216614d
[ "MIT" ]
18
2020-05-12T21:46:14.000Z
2022-03-14T00:35:35.000Z
build/lib/biosteam/biorefineries/lipidcane/__init__.py
lilanyu/biosteam
b025bbe138bfd0b016af58583792fb4f3ff9186e
[ "MIT" ]
24
2020-03-05T14:39:15.000Z
2022-03-25T22:24:50.000Z
build/lib/biosteam/biorefineries/lipidcane/__init__.py
lilanyu/biosteam
b025bbe138bfd0b016af58583792fb4f3ff9186e
[ "MIT" ]
9
2020-05-14T13:02:32.000Z
2022-03-27T19:41:07.000Z
# -*- coding: utf-8 -*- __all__ = [] from lazypkg import LazyPkg LazyPkg(__name__, ['tea', 'process_settings', 'species', 'system', 'utils', 'model'])
26
85
0.628205
17
156
5.235294
0.882353
0
0
0
0
0
0
0
0
0
0
0.007576
0.153846
156
6
86
26
0.666667
0.134615
0
0
0
0
0.313433
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
b9a985d8084a913277abdf8f286f77aa159531f9
432
py
Python
schema/listings.py
olubiyiontheweb/malliva
b212e6b359eed54c92533f0a02afe3c0042150e2
[ "MIT" ]
null
null
null
schema/listings.py
olubiyiontheweb/malliva
b212e6b359eed54c92533f0a02afe3c0042150e2
[ "MIT" ]
null
null
null
schema/listings.py
olubiyiontheweb/malliva
b212e6b359eed54c92533f0a02afe3c0042150e2
[ "MIT" ]
1
2021-07-19T12:15:52.000Z
2021-07-19T12:15:52.000Z
from enum import unique from typing import Optional from pydantic import BaseModel, Field from pydantic.networks import EmailStr from pydantic.types import FilePath, confloat, conlist class Listing(BaseModel): title: str price: confloat(lt=100000.0) posted_by: str category: Optional[int] = 1 description: str = None listing_images: Optional[conlist(FilePath, max_items=5)] = None visible: bool = True
27
67
0.747685
57
432
5.614035
0.649123
0.1125
0
0
0
0
0
0
0
0
0
0.025424
0.180556
432
15
68
28.8
0.878531
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.384615
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
3
b9b7d7af2214c8b7e0cfdc316e09d2136e74eb54
626
py
Python
toolbox/utils/Generator.py
harsh-kakasaniya55/skift
79ccaf5398cfb7921599105607ad6688ece452d8
[ "MIT" ]
2
2021-08-14T16:03:48.000Z
2021-11-09T10:29:36.000Z
toolbox/utils/Generator.py
harsh-kakasaniya55/skift
79ccaf5398cfb7921599105607ad6688ece452d8
[ "MIT" ]
null
null
null
toolbox/utils/Generator.py
harsh-kakasaniya55/skift
79ccaf5398cfb7921599105607ad6688ece452d8
[ "MIT" ]
2
2020-10-13T14:25:30.000Z
2020-10-13T14:39:40.000Z
class Generator: def __init__(self): self.buffer = "" self.ident = 0 def push_ident(self): self.ident = self.ident + 1 def pop_ident(self): self.ident = self.ident - 1 def emit(self, *code): if (''.join(code) == ""): self.buffer += "\n" else: self.buffer += ' ' * self.ident + ''.join(code) + '\n' def emit_section(self, title): self.buffer += ' ' * self.ident + "/* --- " + title + " " + \ "-" * (69 - len(title) - 4 * self.ident) + " */\n\n" # nice def finalize(self): return self.buffer
26.083333
72
0.468051
71
626
4.028169
0.352113
0.251748
0.146853
0.199301
0.216783
0.216783
0.216783
0.216783
0
0
0
0.014742
0.34984
626
23
73
27.217391
0.687961
0.00639
0
0
0
0
0.045161
0
0
0
0
0
0
1
0.333333
false
0
0
0.055556
0.444444
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
b9becdb31c1eb04ebdc0c00d933e41b464670002
151
py
Python
Install_Textblob.py
Jeff-Winchell/Political_Tweet_Negativity
b1d769b1fd5f94932eb71e8ed4a9095174e6aed0
[ "MIT" ]
11
2020-03-09T23:13:44.000Z
2021-05-09T21:35:54.000Z
Install_Textblob.py
Jeff-Winchell/Political_Tweet_Negativity
b1d769b1fd5f94932eb71e8ed4a9095174e6aed0
[ "MIT" ]
null
null
null
Install_Textblob.py
Jeff-Winchell/Political_Tweet_Negativity
b1d769b1fd5f94932eb71e8ed4a9095174e6aed0
[ "MIT" ]
2
2020-03-11T00:24:51.000Z
2020-03-11T18:55:37.000Z
import sqlmlutils connection=sqlmlutils.ConnectionInfo(server="localhost",database="Test") sqlmlutils.SQLPackageManager(connection).install("textblob")
50.333333
72
0.854305
14
151
9.214286
0.785714
0
0
0
0
0
0
0
0
0
0
0
0.019868
151
3
73
50.333333
0.871622
0
0
0
0
0
0.138158
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
6a2cc4c216d5bcc7990701e93f6dcb87f74ebbe1
252
py
Python
src/evaluation/create_datasets.py
iserh/data-augmentation
1e1e99177ff4256c68cafe043bd7e50d52bf669d
[ "MIT" ]
null
null
null
src/evaluation/create_datasets.py
iserh/data-augmentation
1e1e99177ff4256c68cafe043bd7e50d52bf669d
[ "MIT" ]
null
null
null
src/evaluation/create_datasets.py
iserh/data-augmentation
1e1e99177ff4256c68cafe043bd7e50d52bf669d
[ "MIT" ]
null
null
null
from utils.data import load_splitted_datasets, split_datasets DATASET = "MNIST" split_datasets(DATASET, reduce=500, others=False, seed=42) datasets, _ = load_splitted_datasets(DATASET, others=False) print(", ".join([str(len(ds)) for ds in datasets]))
36
61
0.77381
36
252
5.222222
0.638889
0.239362
0.212766
0
0
0
0
0
0
0
0
0.02193
0.095238
252
6
62
42
0.802632
0
0
0
0
0
0.027778
0
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0.2
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
6a300a6d97d763640c8e3144ea9adcc5a2e636eb
347
py
Python
stellar_model/model/horizon/account_data.py
MartinThoma/stellar-model
a21969f3d1aac1e95cb32747d54b72a30e21dda9
[ "Apache-2.0" ]
8
2021-04-25T09:58:39.000Z
2022-01-22T05:01:37.000Z
stellar_model/model/horizon/account_data.py
MartinThoma/stellar-model
a21969f3d1aac1e95cb32747d54b72a30e21dda9
[ "Apache-2.0" ]
8
2021-06-02T12:46:21.000Z
2021-11-29T01:23:42.000Z
stellar_model/model/horizon/account_data.py
MartinThoma/stellar-model
a21969f3d1aac1e95cb32747d54b72a30e21dda9
[ "Apache-2.0" ]
3
2021-07-21T03:58:05.000Z
2021-08-01T00:03:11.000Z
from typing import Optional from pydantic import BaseModel from pydantic import Field __all__ = ["AccountData"] class AccountData(BaseModel): """ Represents a single data object stored on by an account. """ value: str = Field(description="The key value for this data.") # TODO: add description sponsor: Optional[str]
19.277778
66
0.706052
43
347
5.604651
0.697674
0.099585
0.149378
0
0
0
0
0
0
0
0
0
0.213256
347
17
67
20.411765
0.882784
0.227666
0
0
0
0
0.154762
0
0
0
0
0.058824
0
1
0
false
0
0.428571
0
0.857143
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
1
0
1
0
0
3
dbde7725bab1f35e4def65bc6176198b5ae6508c
686
py
Python
test/files/decrypt_bruteforce.py
evan-miller-snyk/juice-shop
2eca75477b0cb67f34c4faac7d73bdd048021c57
[ "MIT" ]
4
2021-05-19T14:18:11.000Z
2021-09-15T08:05:41.000Z
test/files/decrypt_bruteforce.py
evan-miller-snyk/juice-shop
2eca75477b0cb67f34c4faac7d73bdd048021c57
[ "MIT" ]
141
2021-03-07T03:51:33.000Z
2021-07-28T23:47:16.000Z
test/files/decrypt_bruteforce.py
evan-miller-snyk/juice-shop
2eca75477b0cb67f34c4faac7d73bdd048021c57
[ "MIT" ]
142
2021-01-07T16:38:28.000Z
2022-03-30T16:48:41.000Z
# Copyright (c) 2014-2021 Bjoern Kimminich. # SPDX-License-Identifier: MIT # Public Parameters N = 145906768007583323230186939349070635292401872375357164399581871019873438799005358938369571402670149802121818086292467422828157022922076746906543401224889672472407926969987100581290103199317858753663710862357656510507883714297115637342788911463535102712032765166518411726859837988672111837205085526346618740053 e = 65537 encrypted_chars = {} for char in [chr(i) for i in range(33,126)] + [' ', '\n', '\t']: c = pow(ord(char), e, N) encrypted_chars[str(c)] = char with open('announcement_encrypted.md', 'r') as fl: print "".join([encrypted_chars[f[:-1]] for f in fl.readlines()])
42.875
313
0.803207
61
686
8.967213
0.688525
0.076782
0
0
0
0
0
0
0
0
0
0.525641
0.090379
686
15
314
45.733333
0.350962
0.129738
0
0
0
0
0.052365
0.04223
0
0
0
0
0
0
null
null
0
0
null
null
0.125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
e0363f2d8b8ee0a5a6386ee343a762ca1545c3da
12,801
py
Python
tests/components/notify/test_apns.py
TastyPi/home-assistant
aa1e4c564cb8660bf6b7637bc25317ee58869214
[ "MIT" ]
null
null
null
tests/components/notify/test_apns.py
TastyPi/home-assistant
aa1e4c564cb8660bf6b7637bc25317ee58869214
[ "MIT" ]
null
null
null
tests/components/notify/test_apns.py
TastyPi/home-assistant
aa1e4c564cb8660bf6b7637bc25317ee58869214
[ "MIT" ]
null
null
null
"""The tests for the APNS component.""" import unittest import os import homeassistant.components.notify as notify from homeassistant.core import State from homeassistant.components.notify.apns import ApnsNotificationService from tests.common import get_test_home_assistant from homeassistant.config import load_yaml_config_file from unittest.mock import patch from apns2.errors import Unregistered class TestApns(unittest.TestCase): """Test the APNS component.""" def setUp(self): # pylint: disable=invalid-name """Setup things to be run when tests are started.""" self.hass = get_test_home_assistant() def tearDown(self): # pylint: disable=invalid-name """Stop everything that was started.""" self.hass.stop() def test_apns_setup_full(self): """Test setup with all data.""" config = { 'notify': { 'platform': 'apns', 'name': 'test_app', 'sandbox': 'True', 'topic': 'testapp.appname', 'cert_file': 'test_app.pem' } } self.assertTrue(notify.setup(self.hass, config)) def test_apns_setup_missing_name(self): """Test setup with missing name.""" config = { 'notify': { 'platform': 'apns', 'sandbox': 'True', 'topic': 'testapp.appname', 'cert_file': 'test_app.pem' } } self.assertFalse(notify.setup(self.hass, config)) def test_apns_setup_missing_certificate(self): """Test setup with missing name.""" config = { 'notify': { 'platform': 'apns', 'topic': 'testapp.appname', 'name': 'test_app' } } self.assertFalse(notify.setup(self.hass, config)) def test_apns_setup_missing_topic(self): """Test setup with missing topic.""" config = { 'notify': { 'platform': 'apns', 'cert_file': 'test_app.pem', 'name': 'test_app' } } self.assertFalse(notify.setup(self.hass, config)) def test_register_new_device(self): """Test registering a new device with a name.""" config = { 'notify': { 'platform': 'apns', 'name': 'test_app', 'topic': 'testapp.appname', 'cert_file': 'test_app.pem' } } devices_path = self.hass.config.path('test_app_apns.yaml') with open(devices_path, 'w+') as out: out.write('5678: {name: test device 2}\n') notify.setup(self.hass, config) self.assertTrue(self.hass.services.call('apns', 'test_app', {'push_id': '1234', 'name': 'test device'}, blocking=True)) devices = {str(key): value for (key, value) in load_yaml_config_file(devices_path).items()} test_device_1 = devices.get('1234') test_device_2 = devices.get('5678') self.assertIsNotNone(test_device_1) self.assertIsNotNone(test_device_2) self.assertEqual('test device', test_device_1.get('name')) os.remove(devices_path) def test_register_device_without_name(self): """Test registering a without a name.""" config = { 'notify': { 'platform': 'apns', 'name': 'test_app', 'topic': 'testapp.appname', 'cert_file': 'test_app.pem' } } devices_path = self.hass.config.path('test_app_apns.yaml') with open(devices_path, 'w+') as out: out.write('5678: {name: test device 2}\n') notify.setup(self.hass, config) self.assertTrue(self.hass.services.call('apns', 'test_app', {'push_id': '1234'}, blocking=True)) devices = {str(key): value for (key, value) in load_yaml_config_file(devices_path).items()} test_device = devices.get('1234') self.assertIsNotNone(test_device) self.assertIsNone(test_device.get('name')) os.remove(devices_path) def test_update_existing_device(self): """Test updating an existing device.""" config = { 'notify': { 'platform': 'apns', 'name': 'test_app', 'topic': 'testapp.appname', 'cert_file': 'test_app.pem' } } devices_path = self.hass.config.path('test_app_apns.yaml') with open(devices_path, 'w+') as out: out.write('1234: {name: test device 1}\n') out.write('5678: {name: test device 2}\n') notify.setup(self.hass, config) self.assertTrue(self.hass.services.call('apns', 'test_app', {'push_id': '1234', 'name': 'updated device 1'}, blocking=True)) devices = {str(key): value for (key, value) in load_yaml_config_file(devices_path).items()} test_device_1 = devices.get('1234') test_device_2 = devices.get('5678') self.assertIsNotNone(test_device_1) self.assertIsNotNone(test_device_2) self.assertEqual('updated device 1', test_device_1.get('name')) os.remove(devices_path) def test_update_existing_device_with_tracking_id(self): """Test updating an existing device that has a tracking id.""" config = { 'notify': { 'platform': 'apns', 'name': 'test_app', 'topic': 'testapp.appname', 'cert_file': 'test_app.pem' } } devices_path = self.hass.config.path('test_app_apns.yaml') with open(devices_path, 'w+') as out: out.write('1234: {name: test device 1, ' 'tracking_device_id: tracking123}\n') out.write('5678: {name: test device 2, ' 'tracking_device_id: tracking456}\n') notify.setup(self.hass, config) self.assertTrue(self.hass.services.call('apns', 'test_app', {'push_id': '1234', 'name': 'updated device 1'}, blocking=True)) devices = {str(key): value for (key, value) in load_yaml_config_file(devices_path).items()} test_device_1 = devices.get('1234') test_device_2 = devices.get('5678') self.assertIsNotNone(test_device_1) self.assertIsNotNone(test_device_2) self.assertEqual('tracking123', test_device_1.get('tracking_device_id')) self.assertEqual('tracking456', test_device_2.get('tracking_device_id')) os.remove(devices_path) @patch('apns2.client.APNsClient') def test_send(self, mock_client): """Test updating an existing device.""" send = mock_client.return_value.send_notification config = { 'notify': { 'platform': 'apns', 'name': 'test_app', 'topic': 'testapp.appname', 'cert_file': 'test_app.pem' } } devices_path = self.hass.config.path('test_app_apns.yaml') with open(devices_path, 'w+') as out: out.write('1234: {name: test device 1}\n') notify.setup(self.hass, config) self.assertTrue(self.hass.services.call('notify', 'test_app', {'message': 'Hello', 'data': { 'badge': 1, 'sound': 'test.mp3', 'category': 'testing' } }, blocking=True)) self.assertTrue(send.called) self.assertEqual(1, len(send.mock_calls)) target = send.mock_calls[0][1][0] payload = send.mock_calls[0][1][1] self.assertEqual('1234', target) self.assertEqual('Hello', payload.alert) self.assertEqual(1, payload.badge) self.assertEqual('test.mp3', payload.sound) self.assertEqual('testing', payload.category) @patch('apns2.client.APNsClient') def test_send_when_disabled(self, mock_client): """Test updating an existing device.""" send = mock_client.return_value.send_notification config = { 'notify': { 'platform': 'apns', 'name': 'test_app', 'topic': 'testapp.appname', 'cert_file': 'test_app.pem' } } devices_path = self.hass.config.path('test_app_apns.yaml') with open(devices_path, 'w+') as out: out.write('1234: {name: test device 1, disabled: True}\n') notify.setup(self.hass, config) self.assertTrue(self.hass.services.call('notify', 'test_app', {'message': 'Hello', 'data': { 'badge': 1, 'sound': 'test.mp3', 'category': 'testing' } }, blocking=True)) self.assertFalse(send.called) @patch('apns2.client.APNsClient') def test_send_with_state(self, mock_client): """Test updating an existing device.""" send = mock_client.return_value.send_notification devices_path = self.hass.config.path('test_app_apns.yaml') with open(devices_path, 'w+') as out: out.write('1234: {name: test device 1, ' 'tracking_device_id: tracking123}\n') out.write('5678: {name: test device 2, ' 'tracking_device_id: tracking456}\n') notify_service = ApnsNotificationService( self.hass, 'test_app', 'testapp.appname', False, 'test_app.pem' ) notify_service.device_state_changed_listener( 'device_tracker.tracking456', State('device_tracker.tracking456', None), State('device_tracker.tracking456', 'home')) self.hass.block_till_done() notify_service.send_message(message='Hello', target='home') self.assertTrue(send.called) self.assertEqual(1, len(send.mock_calls)) target = send.mock_calls[0][1][0] payload = send.mock_calls[0][1][1] self.assertEqual('5678', target) self.assertEqual('Hello', payload.alert) @patch('apns2.client.APNsClient') def test_disable_when_unregistered(self, mock_client): """Test disabling a device when it is unregistered.""" send = mock_client.return_value.send_notification send.side_effect = Unregistered() config = { 'notify': { 'platform': 'apns', 'name': 'test_app', 'topic': 'testapp.appname', 'cert_file': 'test_app.pem' } } devices_path = self.hass.config.path('test_app_apns.yaml') with open(devices_path, 'w+') as out: out.write('1234: {name: test device 1}\n') notify.setup(self.hass, config) self.assertTrue(self.hass.services.call('notify', 'test_app', {'message': 'Hello'}, blocking=True)) devices = {str(key): value for (key, value) in load_yaml_config_file(devices_path).items()} test_device_1 = devices.get('1234') self.assertIsNotNone(test_device_1) self.assertEqual(True, test_device_1.get('disabled')) os.remove(devices_path)
35.756983
77
0.499336
1,256
12,801
4.899682
0.11465
0.042086
0.043224
0.042899
0.750894
0.734969
0.706045
0.663958
0.663958
0.663958
0
0.023421
0.382939
12,801
357
78
35.857143
0.755665
0.049606
0
0.675277
0
0
0.170804
0.014068
0
0
0
0
0.136531
1
0.051661
false
0
0.03321
0
0.088561
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
e0407d1d36857931b1f0367d2b07386cccf4499c
273
py
Python
Mundo 1/ex006.py
adonaifariasdev/cursoemvideo-python3
1fd35e45b24c52013fa3bc98e723971db8e6b7d1
[ "MIT" ]
null
null
null
Mundo 1/ex006.py
adonaifariasdev/cursoemvideo-python3
1fd35e45b24c52013fa3bc98e723971db8e6b7d1
[ "MIT" ]
null
null
null
Mundo 1/ex006.py
adonaifariasdev/cursoemvideo-python3
1fd35e45b24c52013fa3bc98e723971db8e6b7d1
[ "MIT" ]
null
null
null
num = int(input('Digite um número: ')) dobro = num * 2 triplo = num * 3 raiz = num ** (1/2) print('O dobro de {} é igual a {}.'.format(num, dobro)) print('O triplo de {} é igual a {}.'.format(num, triplo)) print('A raiz quadrada de {} é igual a {:.2f}.'.format(num, raiz))
34.125
66
0.604396
48
273
3.4375
0.4375
0.054545
0.145455
0.163636
0.218182
0.218182
0
0
0
0
0
0.022321
0.179487
273
7
67
39
0.714286
0
0
0
0
0
0.410256
0
0
0
0
0
0
1
0
false
0
0
0
0
0.428571
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
e053c2a5601d1937f810989064712a875ba8ccec
19
py
Python
tests/mixins/__init__.py
jhubert/pydocx-s3-images
e7c96b257c67db8043822292550c193db410a4e6
[ "Apache-2.0" ]
2
2016-04-17T02:45:33.000Z
2019-07-26T09:26:41.000Z
tests/mixins/__init__.py
jhubert/pydocx-s3-images
e7c96b257c67db8043822292550c193db410a4e6
[ "Apache-2.0" ]
3
2015-07-17T20:04:53.000Z
2015-07-18T19:24:40.000Z
tests/mixins/__init__.py
jhubert/pydocx-s3-images
e7c96b257c67db8043822292550c193db410a4e6
[ "Apache-2.0" ]
null
null
null
__author__ = 'geo'
9.5
18
0.684211
2
19
4.5
1
0
0
0
0
0
0
0
0
0
0
0
0.157895
19
1
19
19
0.5625
0
0
0
0
0
0.157895
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
e055c953df57798094fd6fd821663260ac5a142c
773
py
Python
src/models/shop/checkout_models.py
tassaron/flask-shop
371cc04fdee384fc27a1faf790b01039ce289e3d
[ "MIT" ]
1
2022-03-08T04:57:54.000Z
2022-03-08T04:57:54.000Z
src/models/shop/checkout_models.py
tassaron/muffin-shop
371cc04fdee384fc27a1faf790b01039ce289e3d
[ "MIT" ]
null
null
null
src/models/shop/checkout_models.py
tassaron/muffin-shop
371cc04fdee384fc27a1faf790b01039ce289e3d
[ "MIT" ]
null
null
null
from muffin_shop.helpers.main.plugins import db class Transaction(db.Model): """Represents a successful transaction -- a cart of products that someone bought""" id = db.Column(db.Integer, primary_key=True, nullable=False) uuid = db.Column(db.String(60), nullable=False) products = db.Column(db.String(1024), nullable=False) price = db.Column(db.Integer, nullable=True) shipping_address = db.Column(db.String(1024), nullable=True) phone_number = db.Column(db.String(11), nullable=True) email_address = db.Column(db.String(40), nullable=True) customer_name = db.Column(db.String(40), nullable=True) customer_uuid = db.Column(db.String(60), nullable=True) user_id = db.Column(db.Integer, db.ForeignKey("user.id"), nullable=True)
45.470588
87
0.721863
113
773
4.867257
0.389381
0.145455
0.181818
0.203636
0.443636
0.349091
0.247273
0.138182
0
0
0
0.027027
0.138422
773
16
88
48.3125
0.798799
0.099612
0
0
0
0
0.010145
0
0
0
0
0
0
1
0
false
0
0.083333
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
e059d4769d5b4c2c8dce061a02614f3d3cb7077e
126
py
Python
recipes/stages/_base_/models/classifiers/ti_classifier.py
openvinotoolkit/model_preparation_algorithm
8d36bf5944837b7a3d22fc2c3a4cb93423619fc2
[ "Apache-2.0" ]
null
null
null
recipes/stages/_base_/models/classifiers/ti_classifier.py
openvinotoolkit/model_preparation_algorithm
8d36bf5944837b7a3d22fc2c3a4cb93423619fc2
[ "Apache-2.0" ]
null
null
null
recipes/stages/_base_/models/classifiers/ti_classifier.py
openvinotoolkit/model_preparation_algorithm
8d36bf5944837b7a3d22fc2c3a4cb93423619fc2
[ "Apache-2.0" ]
null
null
null
_base_ = './classifier.py' model = dict( type='TaskIncrementalLwF', head=dict( type='TaskIncLwfHead' ) )
14
30
0.595238
11
126
6.636364
0.818182
0.219178
0
0
0
0
0
0
0
0
0
0
0.253968
126
8
31
15.75
0.776596
0
0
0
0
0
0.373016
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
e0685bac0c7f6b8baaa44113111f2e477d9965d4
3,926
py
Python
tests/selenium/test_password_reset.py
mereckaj/CS4098-Group-E
0af1c68eeb69c48e798a2464fc8a4ba4d269f11a
[ "MIT" ]
2
2016-02-07T15:00:24.000Z
2016-02-07T22:28:54.000Z
tests/selenium/test_password_reset.py
mereckaj/CS4098-Group-E
0af1c68eeb69c48e798a2464fc8a4ba4d269f11a
[ "MIT" ]
6
2016-02-10T10:05:51.000Z
2016-02-14T17:59:05.000Z
tests/selenium/test_password_reset.py
mereckaj/CS4098-Group-E
0af1c68eeb69c48e798a2464fc8a4ba4d269f11a
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import Select from selenium.common.exceptions import NoSuchElementException from selenium.common.exceptions import NoAlertPresentException import unittest, time, re import random class TestEditor(unittest.TestCase): username = "test_user_" + str(random.randint(1, 10000000)) def setUp(self): self.driver = webdriver.Firefox() self.driver.maximize_window() self.driver.implicitly_wait(30) self.base_url = "http://localhost:8000" self.verificationErrors = [] self.accept_next_alert = True def is_element_present(self, how, what): try: self.driver.find_element(by=how, value=what) except NoSuchElementException as e: return False return True def is_alert_present(self): try: self.driver.switch_to_alert() except NoAlertPresentException as e: return False return True def close_alert_and_get_its_text(self): try: alert = self.driver.switch_to_alert() alert_text = alert.text if self.accept_next_alert: alert.accept() else: alert.dismiss() return alert_text finally: self.accept_next_alert = True def test_editor(self): driver = self.driver # Register, login and logout driver.get(self.base_url + "/login") driver.find_element_by_id("reg").click() driver.find_element_by_id("first_name").clear() driver.find_element_by_id("first_name").send_keys("test") driver.find_element_by_id("last_name").clear() driver.find_element_by_id("last_name").send_keys("user") driver.find_element_by_id("password").clear() driver.find_element_by_id("password").send_keys("Aa1aaaa!") driver.find_element_by_id("confirm").clear() driver.find_element_by_id("confirm").send_keys("Aa1aaaa!") driver.find_element_by_id("email").clear() driver.find_element_by_id("email").send_keys(self.username + "@example.com") driver.find_element_by_id("submit").click() self.assertEqual("http://localhost:8000/", driver.current_url) # Log out driver.find_element_by_xpath("//div[@id='bs-example-navbar-collapse-1']/ul[2]/li/a").click() driver.find_element_by_link_text("Logout").click() self.assertEqual("http://localhost:8000/login", driver.current_url) # Send password reset email driver.find_element_by_link_text("Forgot Password").click() self.assertEqual("http://localhost:8000/reset_password", driver.current_url) driver.find_element_by_id("email").send_keys(self.username + "@example.com"); driver.find_element_by_id("submit").click() driver.find_element_by_id("backToLogin").click() self.assertEqual("http://localhost:8000/login", driver.current_url) # Go to password reset page and change the password driver.get(self.base_url + "/test/reset/"+self.username + "@example.com") driver.find_element_by_id("password").clear() driver.find_element_by_id("password").send_keys("Ba1aaaa!") driver.find_element_by_id("confirm").clear() driver.find_element_by_id("confirm").send_keys("Ba1aaaa!") driver.find_element_by_id("submit").click() # After password reset we are logged in, logout driver.find_element_by_xpath("//div[@id='bs-example-navbar-collapse-1']/ul[2]/li/a").click() driver.find_element_by_link_text("Logout").click() self.assertEqual("http://localhost:8000/login", driver.current_url) # Test the new password driver.get(self.base_url + "/login") driver.find_element_by_id("email").clear() driver.find_element_by_id("email").send_keys(self.username + "@example.com") driver.find_element_by_id("password").clear() driver.find_element_by_id("password").send_keys("Ba1aaaa!") driver.find_element_by_id("submit").click() self.assertEqual("http://localhost:8000/", driver.current_url) def tearDown(self): self.driver.quit() self.assertEqual([], self.verificationErrors) if __name__ == "__main__": unittest.main()
39.26
94
0.758023
564
3,926
5.005319
0.219858
0.109812
0.186681
0.208643
0.628764
0.588381
0.525328
0.460857
0.455898
0.455898
0
0.013834
0.097809
3,926
99
95
39.656566
0.783173
0.050942
0
0.345679
0
0.024691
0.168908
0.027972
0
0
0
0
0.08642
1
0.074074
false
0.098765
0.098765
0
0.234568
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
3
e06e5c5e74abebd87462007167a1ee3bb577723f
196
py
Python
openiPrototype/openiPrototype/APIS/Activity/Nutrition/admin.py
OPENi-ict/ntua_demo
104118fbe1f54db35386ca96286317ceb64cb658
[ "Apache-2.0" ]
null
null
null
openiPrototype/openiPrototype/APIS/Activity/Nutrition/admin.py
OPENi-ict/ntua_demo
104118fbe1f54db35386ca96286317ceb64cb658
[ "Apache-2.0" ]
null
null
null
openiPrototype/openiPrototype/APIS/Activity/Nutrition/admin.py
OPENi-ict/ntua_demo
104118fbe1f54db35386ca96286317ceb64cb658
[ "Apache-2.0" ]
null
null
null
__author__ = 'mpetyx' from django.contrib import admin from .models import OpeniNutrition class NutritionAdmin(admin.ModelAdmin): pass admin.site.register(OpeniNutrition, NutritionAdmin)
16.333333
51
0.80102
21
196
7.285714
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.127551
196
11
52
17.818182
0.894737
0
0
0
0
0
0.030612
0
0
0
0
0
0
1
0
false
0.166667
0.333333
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
0
0
0
3
e07769c80ce823bc07c3aee5ffb41cccf6eaf9a0
373
py
Python
sdk/python/pulumi_openstack/__init__.py
Frassle/pulumi-openstack
6fc26edd7c42e7c3d65a01cf9384148cc56466e4
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_openstack/__init__.py
Frassle/pulumi-openstack
6fc26edd7c42e7c3d65a01cf9384148cc56466e4
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_openstack/__init__.py
Frassle/pulumi-openstack
6fc26edd7c42e7c3d65a01cf9384148cc56466e4
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** # Make subpackages available: __all__ = ['blockstorage', 'compute', 'config', 'database', 'dns', 'firewall', 'identity', 'images', 'loadbalancer', 'networking', 'objectstorage', 'vpnaas']
53.285714
157
0.689008
46
373
5.5
0.934783
0
0
0
0
0
0
0
0
0
0
0.003125
0.142091
373
6
158
62.166667
0.7875
0.549598
0
0
1
0
0.607362
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
0ebb47236036485210c0675c73b1736444ddbee6
1,153
py
Python
asdf/tests/test_helpers.py
zanecodes/asdf
c1f6cf915409da5372c47ac725dc922b4bd52f7d
[ "BSD-3-Clause" ]
66
2020-06-24T14:10:32.000Z
2022-03-18T11:46:11.000Z
asdf/tests/test_helpers.py
zanecodes/asdf
c1f6cf915409da5372c47ac725dc922b4bd52f7d
[ "BSD-3-Clause" ]
179
2020-06-22T18:48:31.000Z
2022-03-31T22:52:19.000Z
asdf/tests/test_helpers.py
zanecodes/asdf
c1f6cf915409da5372c47ac725dc922b4bd52f7d
[ "BSD-3-Clause" ]
13
2020-07-21T16:11:20.000Z
2022-03-18T20:41:41.000Z
import pytest from asdf import types from asdf.exceptions import AsdfConversionWarning, AsdfWarning from asdf.tests.helpers import assert_roundtrip_tree def test_conversion_error(tmpdir): class FooType(types.CustomType): name = 'foo' def __init__(self, a, b): self.a = a self.b = b @classmethod def from_tree(cls, tree, ctx): raise TypeError("This allows us to test the failure") @classmethod def to_tree(cls, node, ctx): return dict(a=node.a, b=node.b) def __eq__(self, other): return self.a == other.a and self.b == other.b class FooExtension: @property def types(self): return [FooType] @property def tag_mapping(self): return [] @property def url_mapping(self): return [] foo = FooType(10, 'hello') tree = dict(foo=foo) with pytest.raises(AsdfConversionWarning): with pytest.warns(AsdfWarning, match="Unable to locate schema file"): assert_roundtrip_tree(tree, tmpdir, extensions=FooExtension())
24.531915
77
0.601908
136
1,153
4.970588
0.441176
0.035503
0.056213
0
0
0
0
0
0
0
0
0.0025
0.306158
1,153
46
78
25.065217
0.8425
0
0
0.212121
0
0
0.060711
0
0
0
0
0
0.060606
1
0.242424
false
0
0.121212
0.151515
0.606061
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
0ebdd76cab7e2c9082cb22dfbca3ca0515083ff2
615
py
Python
82/test_score.py
alehpineda/bitesofpy
bfd319a606cd0b7b9bfb85a3e8942872a2d43c48
[ "MIT" ]
null
null
null
82/test_score.py
alehpineda/bitesofpy
bfd319a606cd0b7b9bfb85a3e8942872a2d43c48
[ "MIT" ]
2
2020-09-24T11:25:29.000Z
2021-06-25T15:43:35.000Z
82/test_score.py
alehpineda/bitesofpy
bfd319a606cd0b7b9bfb85a3e8942872a2d43c48
[ "MIT" ]
null
null
null
from score import Score def test_enum_content(): assert list(Score) == [ Score.BEGINNER, Score.INTERMEDIATE, Score.ADVANCED, Score.CHEATED, ] def test_equality_comparison(): assert Score.BEGINNER is Score.BEGINNER assert Score.INTERMEDIATE is not Score.ADVANCED def test_str_using_thumbsup(): assert str(Score.BEGINNER) == "BEGINNER => 👍👍" assert str(Score.INTERMEDIATE) == "INTERMEDIATE => 👍👍👍" assert str(Score.ADVANCED) == "ADVANCED => 👍👍👍👍" assert str(Score.CHEATED) == "CHEATED => 👍" def test_average(): assert Score.average() == 2.5
22.777778
59
0.64878
73
615
5.493151
0.369863
0.069825
0.139651
0
0
0
0
0
0
0
0
0.004158
0.217886
615
26
60
23.653846
0.808732
0
0
0
0
0
0.099187
0
0
0
0
0
0.444444
1
0.222222
true
0
0.055556
0
0.277778
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
1
0
0
0
0
0
0
3
0ec7e7d46aa94f9b558fb058ae57386a5e71d374
198
py
Python
tsdl/__init__.py
burgerdev/hostload
93142628bb32923c5e6f3a8b791488d72a5c9077
[ "MIT" ]
null
null
null
tsdl/__init__.py
burgerdev/hostload
93142628bb32923c5e6f3a8b791488d72a5c9077
[ "MIT" ]
null
null
null
tsdl/__init__.py
burgerdev/hostload
93142628bb32923c5e6f3a8b791488d72a5c9077
[ "MIT" ]
null
null
null
""" Module that combines the computation graph functionality of module lazyflow with the neural network capabilities of pylearn2 """ import logging logging.basicConfig(level=logging.INFO)
18
66
0.782828
24
198
6.458333
0.791667
0
0
0
0
0
0
0
0
0
0
0.006024
0.161616
198
10
67
19.8
0.927711
0.666667
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
0ec9a7bc2bee1d105c5ad0de623a519001fee984
644
py
Python
film_details_searcher/film_details_searcher/models/search_result.py
tomaszkyc/film_details_searcher
e8e12bc739990324f6ab4110fdd363e6ef207f1a
[ "MIT" ]
null
null
null
film_details_searcher/film_details_searcher/models/search_result.py
tomaszkyc/film_details_searcher
e8e12bc739990324f6ab4110fdd363e6ef207f1a
[ "MIT" ]
null
null
null
film_details_searcher/film_details_searcher/models/search_result.py
tomaszkyc/film_details_searcher
e8e12bc739990324f6ab4110fdd363e6ef207f1a
[ "MIT" ]
null
null
null
from dataclasses import dataclass @dataclass(init=True, repr=True, eq=True, frozen=True) class SearchResult: details: dict @classmethod def parse(cls, title, link): if not isinstance(title, str) or not isinstance(link, str): raise TypeError('Link and title should be str type') title = SearchResult._parse_title(title) attr = {"title": title, "link": link} return cls(attr) def link(self): return self.details['link'] def title(self): return self.details['title'] @staticmethod def _parse_title(title): return title.replace(' - Filmweb', '')
25.76
67
0.63354
78
644
5.179487
0.461538
0.074257
0.074257
0.10396
0
0
0
0
0
0
0
0
0.251553
644
24
68
26.833333
0.838174
0
0
0
0
0
0.094721
0
0
0
0
0
0
1
0.222222
false
0
0.055556
0.166667
0.611111
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
0ee17b9a780e7eb1f6f9a8f7885751076aff563c
62
py
Python
desafio.py
araujoajoao/PythonB
9263fc6a2f731f21fe35a5eaaddff5be94effe3b
[ "MIT" ]
null
null
null
desafio.py
araujoajoao/PythonB
9263fc6a2f731f21fe35a5eaaddff5be94effe3b
[ "MIT" ]
null
null
null
desafio.py
araujoajoao/PythonB
9263fc6a2f731f21fe35a5eaaddff5be94effe3b
[ "MIT" ]
null
null
null
nome = input('Qual seu nome?') print('Seja bem vindo' , nome)
20.666667
30
0.66129
10
62
4.1
0.8
0
0
0
0
0
0
0
0
0
0
0
0.16129
62
2
31
31
0.788462
0
0
0
0
0
0.451613
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
0ee9a99545021d45c878b5b8f4d1787073cfe638
164
py
Python
sso/contact/urls.py
uktrade/staff-sso
c23da74415befdaed60649a9a940b1ba8331581e
[ "MIT" ]
7
2018-07-30T16:18:52.000Z
2022-03-21T12:58:20.000Z
sso/contact/urls.py
uktrade/staff-sso
c23da74415befdaed60649a9a940b1ba8331581e
[ "MIT" ]
55
2017-06-26T12:49:01.000Z
2022-03-09T15:48:49.000Z
sso/contact/urls.py
uktrade/staff-sso
c23da74415befdaed60649a9a940b1ba8331581e
[ "MIT" ]
1
2020-05-28T07:17:26.000Z
2020-05-28T07:17:26.000Z
from django.urls import path from .views import AccessDeniedView urlpatterns = [ path("access-denied/", AccessDeniedView.as_view(), name="access-denied"), ]
18.222222
77
0.737805
19
164
6.315789
0.684211
0.2
0
0
0
0
0
0
0
0
0
0
0.134146
164
8
78
20.5
0.84507
0
0
0
0
0
0.164634
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
0ef4b1b3cdae2c0af8ec6755251a42b275c2d11c
245
py
Python
alien_rfid/__init__.py
sacherjj/alien_rfid
615e2baa79d61b6448b4be71267011bbb2bd44fe
[ "MIT" ]
1
2017-11-25T15:30:59.000Z
2017-11-25T15:30:59.000Z
alien_rfid/__init__.py
sacherjj/alien_rfid
615e2baa79d61b6448b4be71267011bbb2bd44fe
[ "MIT" ]
null
null
null
alien_rfid/__init__.py
sacherjj/alien_rfid
615e2baa79d61b6448b4be71267011bbb2bd44fe
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from .alien_serial import AlienReaderSerial from .alien_network import AlienReaderNetwork from .alien_tester import AlienReaderTester __author__ = """Joe Sacher""" __email__ = 'sacherjj@gmail.com' __version__ = '0.1.0'
24.5
45
0.763265
29
245
5.931034
0.758621
0.156977
0
0
0
0
0
0
0
0
0
0.018519
0.118367
245
9
46
27.222222
0.777778
0.085714
0
0
0
0
0.149321
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
0efe5a39ea152f4552ac310a6dd322c52d5d29db
447
py
Python
tkhtmlview/utils.py
Saadmairaj/tkhtmlview
83b4fedaee1634084cf8432081114a3ee4e9a1fc
[ "MIT" ]
null
null
null
tkhtmlview/utils.py
Saadmairaj/tkhtmlview
83b4fedaee1634084cf8432081114a3ee4e9a1fc
[ "MIT" ]
null
null
null
tkhtmlview/utils.py
Saadmairaj/tkhtmlview
83b4fedaee1634084cf8432081114a3ee4e9a1fc
[ "MIT" ]
null
null
null
import os class RenderHTML: def __init__(self, file): self._file = file if not os.path.exists(self._file): raise FileNotFoundError(f"No such HTML file: {self._file}") with open(file, 'r') as f: self._html = f.read() def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self._file) def get_html(self): return str(self._html) __str__ = get_html
21.285714
71
0.577181
59
447
3.898305
0.491525
0.173913
0.104348
0
0
0
0
0
0
0
0
0
0.297539
447
20
72
22.35
0.732484
0
0
0
0
0
0.089888
0
0
0
0
0
0
1
0.230769
false
0
0.076923
0.153846
0.615385
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
16189a6ca9254aa4c683bf04ca7b507acf52074e
267
py
Python
internal_api/urls.py
LuisSanchez/Python-Sandbox
ef5b516f89c262bbf5daf8f57c0609c5f0b58141
[ "Unlicense" ]
1
2020-08-16T14:09:41.000Z
2020-08-16T14:09:41.000Z
internal_api/urls.py
LuisSanchez/pythonchallenge
ef5b516f89c262bbf5daf8f57c0609c5f0b58141
[ "Unlicense" ]
null
null
null
internal_api/urls.py
LuisSanchez/pythonchallenge
ef5b516f89c262bbf5daf8f57c0609c5f0b58141
[ "Unlicense" ]
null
null
null
from django.urls import path, include from rest_framework.urlpatterns import format_suffix_patterns from internal_api import views urlpatterns = [ path('calculateTMC/', views.CalculateTMCForCredit.as_view()), ] urlpatterns = format_suffix_patterns(urlpatterns)
26.7
65
0.820225
31
267
6.83871
0.612903
0.113208
0.188679
0
0
0
0
0
0
0
0
0
0.104869
267
9
66
29.666667
0.887029
0
0
0
0
0
0.048689
0
0
0
0
0
0
1
0
false
0
0.428571
0
0.428571
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
164c135d9a969ddec32288e558228df1fcd98faf
2,068
py
Python
optimizedGPS/problems/PreSolver.py
mickael-grima/optimizedGPS
e82fd864d7e1a3ef1f7ae64db991cb33784d4df3
[ "Apache-2.0" ]
null
null
null
optimizedGPS/problems/PreSolver.py
mickael-grima/optimizedGPS
e82fd864d7e1a3ef1f7ae64db991cb33784d4df3
[ "Apache-2.0" ]
null
null
null
optimizedGPS/problems/PreSolver.py
mickael-grima/optimizedGPS
e82fd864d7e1a3ef1f7ae64db991cb33784d4df3
[ "Apache-2.0" ]
null
null
null
""" In this file, we introduce some algorithm in order to simplify the problem before solving it """ import sys from optimizedGPS.structure.DriversStructure import DriversStructure class PreSolver(object): def __init__(self, graph, drivers_graph, drivers_structure=None, horizon=sys.maxint): self.graph = graph self.drivers_graph = drivers_graph self.drivers_structure = drivers_structure or DriversStructure(graph, drivers_graph, horizon=horizon) def get_graph(self): return self.graph def get_drivers_graph(self): return self.drivers_graph def solve(self): raise NotImplementedError("Not implemented yet") def set_unreachable_edge_to_driver(self, driver, edge): self.drivers_structure.set_unreachable_edge_to_driver(driver, edge) def is_edge_reachable_by_driver(self, driver, edge): return self.drivers_structure.is_edge_reachable_by_driver(driver, edge) def iter_reachable_edges_for_driver(self, driver): for edge in self.graph.edges_iter(): if self.is_edge_reachable_by_driver(driver, edge): yield edge def map_reachable_edges_for_drivers(self): """ to each driver we associate the reachable edges :return: dict """ return { driver: list(self.iter_reachable_edges_for_driver(driver)) for driver in self.drivers_graph.get_all_drivers() } class HorizonPresolver(PreSolver): """ Compute the minimum horizon """ def solve(self): from optimizedGPS.problems.Heuristics import RealGPS problem = RealGPS(self.graph, self.drivers_graph) problem.solve() self.drivers_structure.horizon = problem.opt_simulator.get_maximum_ending_time() def get_horizon(self): return self.drivers_structure.horizon class SafetyIntervalsPresolver(PreSolver): """ Compute the smallest possible safety intervals """ def solve(self): self.drivers_structure.compute_optimal_safety_intervals()
30.411765
109
0.70648
248
2,068
5.629032
0.306452
0.078797
0.08596
0.036533
0.139685
0.047278
0.047278
0
0
0
0
0
0.218085
2,068
67
110
30.865672
0.863327
0.111219
0
0.081081
0
0
0.010747
0
0
0
0
0
0
1
0.297297
false
0
0.081081
0.108108
0.594595
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
165cfe426d47aee240986efc2a381f0e37a10aa8
857
py
Python
product_inventory.py
GTHEP/udemy-projects
fbe2b053273f1bd4e1606441146d8ba25ce18a7e
[ "MIT" ]
null
null
null
product_inventory.py
GTHEP/udemy-projects
fbe2b053273f1bd4e1606441146d8ba25ce18a7e
[ "MIT" ]
null
null
null
product_inventory.py
GTHEP/udemy-projects
fbe2b053273f1bd4e1606441146d8ba25ce18a7e
[ "MIT" ]
null
null
null
# Define a Product class. Objects should have 3 variables for price, code, and quantity class Product: def __init__(self, price=0.00, code='aaaa', quantity=0): self.price = price self.code = code self.quantity = quantity def __repr__(self): return f'Product({self.price!r}, {self.code!r}, {self.quantity!r})' def __str__(self): return f'The product code is: {self.code}' # Define an inventory class and a function for calculating the total value of the inventory. class Inventory: def __init__(self): self.products_list = [] def add_product(self, product): self.products_list.append(product) return self.products_list def total_value(self): return sum(product.price * product.quantity for product in self.products_list)
30.607143
93
0.647608
114
857
4.675439
0.350877
0.090056
0.120075
0.071295
0
0
0
0
0
0
0
0.007862
0.257876
857
27
94
31.740741
0.830189
0.205368
0
0
0
0.058824
0.137574
0.034024
0
0
0
0
0
1
0.352941
false
0
0
0.176471
0.705882
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
1667c587d74c07f99442c6be392f0bdba276f2a8
153
py
Python
ML_CW1/assgn_1_part_2/2_neural_network/plot_cost.py
ShellySrivastava/Machine-Learning
bfdea30c06abe4228c103ae525adcf990015983f
[ "MIT" ]
null
null
null
ML_CW1/assgn_1_part_2/2_neural_network/plot_cost.py
ShellySrivastava/Machine-Learning
bfdea30c06abe4228c103ae525adcf990015983f
[ "MIT" ]
null
null
null
ML_CW1/assgn_1_part_2/2_neural_network/plot_cost.py
ShellySrivastava/Machine-Learning
bfdea30c06abe4228c103ae525adcf990015983f
[ "MIT" ]
null
null
null
import matplotlib.pyplot as plt import os def plot_cost(cost, ax1): ax1.set_xlabel('Iterations') ax1.set_ylabel('Cost') ax1.plot(cost)
17
32
0.686275
23
153
4.434783
0.608696
0.156863
0
0
0
0
0
0
0
0
0
0.03252
0.196078
153
8
33
19.125
0.796748
0
0
0
0
0
0.091503
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
1673481d8faf2586e15b425141d105d4e55835f9
1,269
py
Python
plyer/platforms/macosx/storagepath.py
Sires0/plyer
b68d19ff1d5589722086fc303e23c372f34f3897
[ "MIT" ]
3
2020-07-17T16:23:14.000Z
2021-05-25T21:00:49.000Z
plyer/platforms/macosx/storagepath.py
Sires0/plyer
b68d19ff1d5589722086fc303e23c372f34f3897
[ "MIT" ]
1
2021-05-25T20:54:22.000Z
2021-05-26T20:22:30.000Z
plyer/platforms/macosx/storagepath.py
Sires0/plyer
b68d19ff1d5589722086fc303e23c372f34f3897
[ "MIT" ]
1
2019-05-17T09:45:00.000Z
2019-05-17T09:45:00.000Z
''' MacOS X Storage Path -------------------- ''' import os.path from plyer.facades import StoragePath from plyer.platforms.macosx.libs import osx_paths # Directory constants (NSSearchPathDirectory enumeration) NSApplicationDirectory = 1 NSDocumentDirectory = 9 NSDownloadsDirectory = 15 NSMoviesDirectory = 17 NSMusicDirectory = 18 NSPicturesDirectory = 19 class OSXStoragePath(StoragePath): def _get_home_dir(self): return os.path.expanduser('~') def _get_external_storage_dir(self): return 'Method not implemented for current platform.' def _get_root_dir(self): return '/' def _get_documents_dir(self): return osx_paths.NSIterateSearchPaths(NSDocumentDirectory) def _get_downloads_dir(self): return osx_paths.NSIterateSearchPaths(NSDownloadsDirectory) def _get_videos_dir(self): return osx_paths.NSIterateSearchPaths(NSMoviesDirectory) def _get_music_dir(self): return osx_paths.NSIterateSearchPaths(NSMusicDirectory) def _get_pictures_dir(self): return osx_paths.NSIterateSearchPaths(NSPicturesDirectory) def _get_application_dir(self): return osx_paths.NSIterateSearchPaths(NSApplicationDirectory) def instance(): return OSXStoragePath()
25.38
69
0.748621
131
1,269
6.984733
0.427481
0.059016
0.127869
0.104918
0.268852
0.268852
0
0
0
0
0
0.00947
0.167849
1,269
49
70
25.897959
0.857008
0.077226
0
0
0
0
0.039553
0
0
0
0
0
0
1
0.333333
false
0
0.1
0.333333
0.8
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
16749ac6d04f6d3b17186662c06bc43d5c7cf45d
237
py
Python
link/forms.py
tamirOK/short-link-project
b7c2db7eae0d82978b78ae08e8c89a8d9af95dd3
[ "MIT" ]
null
null
null
link/forms.py
tamirOK/short-link-project
b7c2db7eae0d82978b78ae08e8c89a8d9af95dd3
[ "MIT" ]
null
null
null
link/forms.py
tamirOK/short-link-project
b7c2db7eae0d82978b78ae08e8c89a8d9af95dd3
[ "MIT" ]
null
null
null
from django import forms class UrlForm(forms.Form): url = forms.URLField(widget=forms.URLInput( attrs={'placeholder': 'Paste your link here...'})) short_url = forms.CharField(widget=forms.HiddenInput(), required=False)
29.625
75
0.708861
29
237
5.758621
0.758621
0.095808
0
0
0
0
0
0
0
0
0
0
0.151899
237
7
76
33.857143
0.830846
0
0
0
0
0
0.14346
0
0
0
0
0
0
1
0
false
0
0.2
0
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
16a9f0f8fd88065cd8283268d76385dc5bb8b515
231
py
Python
tpe/PS2000aRapidMode.py
markomanninen/tandempiercerexperiment
876d912224b6cc51812515df0987d651004a5a68
[ "MIT" ]
null
null
null
tpe/PS2000aRapidMode.py
markomanninen/tandempiercerexperiment
876d912224b6cc51812515df0987d651004a5a68
[ "MIT" ]
null
null
null
tpe/PS2000aRapidMode.py
markomanninen/tandempiercerexperiment
876d912224b6cc51812515df0987d651004a5a68
[ "MIT" ]
null
null
null
#!/usr/bin/python3 # -*- coding: utf-8 -*- # # Copyright (C) 2018 Pico Technology Ltd. See LICENSE file for terms. # Copyright (C) 2021-2022 Marko Manninen # # PS2000A RAPID MODE to retrieve data from four channels with a trigger.
28.875
72
0.714286
35
231
4.714286
0.942857
0.121212
0
0
0
0
0
0
0
0
0
0.09375
0.168831
231
7
73
33
0.765625
0.939394
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
16bc41afaa327a11169eb7a2eaa66244baa049f9
210
py
Python
src/deploy/builder/builder/urls.py
werelaxe/drapo
5f78da735819200f0e7efa6a5e6b3b45ba6e0d4b
[ "MIT" ]
null
null
null
src/deploy/builder/builder/urls.py
werelaxe/drapo
5f78da735819200f0e7efa6a5e6b3b45ba6e0d4b
[ "MIT" ]
null
null
null
src/deploy/builder/builder/urls.py
werelaxe/drapo
5f78da735819200f0e7efa6a5e6b3b45ba6e0d4b
[ "MIT" ]
null
null
null
from django.conf.urls import url, include from django.contrib import admin urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^stacks/', include(('stacks.urls', 'stacks'), namespace='stacks')), ]
26.25
77
0.680952
28
210
5.107143
0.5
0.13986
0
0
0
0
0
0
0
0
0
0
0.133333
210
7
78
30
0.785714
0
0
0
0
0
0.180952
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
16c44703fa386b06e7879dd377c889dce8db0575
128
py
Python
.history/ClassFiles/Control Flow/ForLoopElseStatement_20210101222647.py
minefarmer/Comprehensive-Python
f97b9b83ec328fc4e4815607e6a65de90bb8de66
[ "Unlicense" ]
null
null
null
.history/ClassFiles/Control Flow/ForLoopElseStatement_20210101222647.py
minefarmer/Comprehensive-Python
f97b9b83ec328fc4e4815607e6a65de90bb8de66
[ "Unlicense" ]
null
null
null
.history/ClassFiles/Control Flow/ForLoopElseStatement_20210101222647.py
minefarmer/Comprehensive-Python
f97b9b83ec328fc4e4815607e6a65de90bb8de66
[ "Unlicense" ]
null
null
null
''' For Loop and Else statemenr Used to specify a block of code to execute when the loop is finished. ''' for x in range
32
69
0.695313
23
128
3.869565
0.869565
0
0
0
0
0
0
0
0
0
0
0
0.25
128
4
70
32
0.927083
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
bc6b2e86b894f46b1a246157810c53685e0f4936
702
py
Python
src/optimizer/__init__.py
OnurUner/DeepSide
dffb7ddc1d1bde36bbf5abb6eac107d39985c57a
[ "MIT" ]
4
2019-11-26T14:21:33.000Z
2022-03-17T16:53:59.000Z
src/optimizer/__init__.py
OnurUner/DeepSide
dffb7ddc1d1bde36bbf5abb6eac107d39985c57a
[ "MIT" ]
1
2020-06-04T10:42:48.000Z
2022-03-08T17:47:13.000Z
src/optimizer/__init__.py
OnurUner/DeepSide
dffb7ddc1d1bde36bbf5abb6eac107d39985c57a
[ "MIT" ]
1
2022-01-12T13:39:26.000Z
2022-01-12T13:39:26.000Z
import torch from .adamw import AdamW def init_optim(optim, params, lr, weight_decay): if optim == 'adam': return torch.optim.Adam(params, lr=lr, weight_decay=weight_decay) elif optim == 'adamw': return AdamW(params, lr=lr, weight_decay=weight_decay) elif optim == 'amsgrad': return torch.optim.Adam(params, lr=lr, weight_decay=weight_decay, amsgrad=True) elif optim == 'sgd': return torch.optim.SGD(params, lr=lr, momentum=0.9, weight_decay=weight_decay) elif optim == 'rmsprop': return torch.optim.RMSprop(params, lr=lr, momentum=0.9, weight_decay=weight_decay) else: raise KeyError("Unsupported optimizer: {}".format(optim))
39
90
0.683761
97
702
4.824742
0.28866
0.258547
0.106838
0.235043
0.527778
0.527778
0.508547
0.508547
0.508547
0.401709
0
0.00703
0.189459
702
17
91
41.294118
0.815466
0
0
0
0
0
0.07265
0
0
0
0
0
0
1
0.066667
false
0
0.133333
0
0.533333
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
bc757eb1f822232cfb09a6425b4c7f2d6227e841
2,600
py
Python
pages/login_page.py
and-buk/ui-moodle-test
ad6fd85ecc517c903cf1893472f6ab4287f64da6
[ "MIT" ]
null
null
null
pages/login_page.py
and-buk/ui-moodle-test
ad6fd85ecc517c903cf1893472f6ab4287f64da6
[ "MIT" ]
null
null
null
pages/login_page.py
and-buk/ui-moodle-test
ad6fd85ecc517c903cf1893472f6ab4287f64da6
[ "MIT" ]
null
null
null
from selenium.webdriver.remote.webelement import WebElement import conftest from locators.login_page_locators import LoginPageLocators from models.auth import AuthData from pages.base_page import BasePage class LoginPage(BasePage): """Класс объекта Page Object с атрибутами и методами, необходимыми для аутентификации пользователя.""" def is_auth(self) -> bool: """Проверяем успешность авторизации пользователя.""" self.find_element(LoginPageLocators.FORM) element = self.find_elements(LoginPageLocators.USER_BUTTON) if len(element) > 0: return True return False def is_exit_confirm_button(self) -> bool: """Проверяем наличие кнопки подтверждения выхода из учётной записи пользователя.""" self.find_element(LoginPageLocators.FORM) element = self.find_elements(LoginPageLocators.EXIT_CONFIRM) if len(element) > 0: return True return False def email_input(self) -> WebElement: """Находим поле для ввода email.""" return self.find_element(LoginPageLocators.LOGIN) def password_input(self) -> WebElement: """Находим поле для ввода пароля.""" return self.find_element(LoginPageLocators.PASSWORD) def submit_button(self) -> WebElement: return self.find_element(LoginPageLocators.SUBMIT) def user_menu(self) -> WebElement: return self.find_element(LoginPageLocators.USER_MENU) def exit(self) -> WebElement: """Находим кнопку выхода из учётной записи пользователя.""" return self.find_element(LoginPageLocators.EXIT) def exit_confirm(self) -> WebElement: """Находим кнопку подтверждения выхода из учётной записи пользователя.""" return self.find_element(LoginPageLocators.EXIT_CONFIRM) def auth(self, data: AuthData) -> None: """Реализуем алгоритм авторизации, завершения сеанса работы авторизованного пользователя.""" if self.is_exit_confirm_button(): self.click_element(self.exit_confirm()) elif self.is_auth(): self.click_element(self.user_menu()) self.click_element(self.exit()) self.fill_element(self.email_input(), data.login) self.fill_element(self.password_input(), data.password) conftest.logger.info(f"Логин:'{data.login}' Пароль: {data.password}") self.click_element(self.submit_button()) def auth_login_error(self) -> str: """Находим на веб-странице сообщение об ошибке авторизации.""" return self.find_element(LoginPageLocators.LOGIN_ERROR).text
40
106
0.701923
294
2,600
6.054422
0.316327
0.049438
0.075843
0.161798
0.472472
0.400562
0.325843
0.224719
0.224719
0.183146
0
0.000966
0.203462
2,600
64
107
40.625
0.858522
0.210769
0
0.190476
0
0
0.021934
0
0
0
0
0
0
1
0.238095
false
0.095238
0.119048
0.047619
0.642857
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
3
bc8553768694096b77d50da789e7dc122e42d447
21,327
py
Python
tests/test_main.py
kylejohnson514/cel-python
c79a1b1a60cd5210b679bcb05f37c99387607241
[ "Apache-2.0" ]
null
null
null
tests/test_main.py
kylejohnson514/cel-python
c79a1b1a60cd5210b679bcb05f37c99387607241
[ "Apache-2.0" ]
null
null
null
tests/test_main.py
kylejohnson514/cel-python
c79a1b1a60cd5210b679bcb05f37c99387607241
[ "Apache-2.0" ]
null
null
null
# SPDX-Copyright: Copyright (c) Capital One Services, LLC # SPDX-License-Identifier: Apache-2.0 # Copyright 2020 Capital One Services, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and limitations under the License. """ Pure Python implementation of CEL. Test the main CLI. Python >= 3.9 preserves order of arguments defined in :mod:`argparse`. Python < 3.9 alphabetizes the arguments. This makes string comparisons challenging in expected results. """ import argparse import io import sys from unittest.mock import Mock, call, sentinel from pytest import * import celpy import celpy.__main__ from celpy import celtypes @fixture def mock_os_environ(monkeypatch): monkeypatch.setitem(celpy.__main__.os.environ, "OS_ENV_VAR", "3.14") def test_arg_type_value(mock_os_environ): """GIVEN arg values; WHEN parsing; THEN correct interpretation.""" assert celpy.__main__.arg_type_value("name:int=42") == ( "name", celtypes.IntType, 42, ) assert celpy.__main__.arg_type_value("OS_ENV_VAR") == ( "OS_ENV_VAR", celtypes.StringType, "3.14", ) assert celpy.__main__.arg_type_value("OS_ENV_VAR:double") == ( "OS_ENV_VAR", celtypes.DoubleType, 3.14, ) with raises(argparse.ArgumentTypeError): celpy.__main__.arg_type_value("name:type:value") def test_get_options(): """GIVEN verbose settings; WHEN parsing; THEN correct interpretation.""" options = celpy.__main__.get_options(["--arg", "name:int=42", "-n", "355./113."]) assert options.arg == [("name", celtypes.IntType, 42)] assert options.null_input assert options.expr == "355./113." assert options.verbose == 0 options = celpy.__main__.get_options(["-v", "-n", '"hello world"']) assert options.null_input assert options.expr == '"hello world"' assert options.verbose == 1 options = celpy.__main__.get_options(["-vv", ".doc.field * 42"]) assert not options.null_input assert options.expr == ".doc.field * 42" assert options.verbose == 2 def test_arg_type_bad(capsys): """GIVEN invalid arg values; WHEN parsing; THEN correct interpretation.""" with raises(SystemExit) as exc_info: options = celpy.__main__.get_options( ["--arg", "name:nope=42", "-n", "355./113."] ) assert exc_info.value.args == (2,) out, err = capsys.readouterr() assert err.splitlines() == [ "usage: celpy [-h] [-v] [-a ARG] [-n] [-s] [-i] [--json-package NAME]", " [--json-document NAME] [-b] [-f FORMAT]", " [expr]", "celpy: error: argument -a/--arg: arg name:nope=42 type name not in ['int', " "'uint', 'double', 'bool', 'string', 'bytes', 'list', 'map', 'null_type', " "'single_duration', 'single_timestamp', 'int64_value', 'uint64_value', " "'double_value', 'bool_value', 'string_value', 'bytes_value', 'number_value', " "'null_value']", ] def test_arg_value_bad(capsys): """GIVEN invalid arg values; WHEN parsing; THEN correct interpretation.""" with raises(SystemExit) as exc_info: options = celpy.__main__.get_options( ["--arg", "name:int=nope", "-n", "355./113."] ) assert exc_info.value.args == (2,) out, err = capsys.readouterr() assert err.splitlines() == [ "usage: celpy [-h] [-v] [-a ARG] [-n] [-s] [-i] [--json-package NAME]", " [--json-document NAME] [-b] [-f FORMAT]", " [expr]", "celpy: error: argument -a/--arg: arg name:int=nope value invalid for the supplied type", ] def test_arg_combo_bad(capsys): """GIVEN invalid arg combinations; WHEN parsing; THEN correct interpretation.""" error_prefix = [ "usage: celpy [-h] [-v] [-a ARG] [-n] [-s] [-i] [--json-package NAME]", " [--json-document NAME] [-b] [-f FORMAT]", " [expr]", ] with raises(SystemExit) as exc_info: options = celpy.__main__.get_options( ["-i", "-n", "355./113."] ) assert exc_info.value.args == (2,) out, err = capsys.readouterr() assert err.splitlines() == error_prefix + [ "celpy: error: Interactive mode and an expression provided", ] with raises(SystemExit) as exc_info: options = celpy.__main__.get_options( ["-n"] ) assert exc_info.value.args == (2,) out, err = capsys.readouterr() assert err.splitlines() == error_prefix + [ "celpy: error: No expression provided", ] with raises(SystemExit) as exc_info: options = celpy.__main__.get_options( ["-n", "--json-document=_", "--json-package=_"] ) assert exc_info.value.args == (2,) out, err = capsys.readouterr() assert err.splitlines() == error_prefix + [ "celpy: error: Either use --json-package or --json-document, not both", ] @fixture def mock_cel_environment(monkeypatch): mock_runner = Mock(evaluate=Mock(return_value=str(sentinel.OUTPUT))) mock_env = Mock( compile=Mock(return_value=sentinel.AST), program=Mock(return_value=mock_runner) ) mock_env_class = Mock(return_value=mock_env) monkeypatch.setattr(celpy.__main__, "Environment", mock_env_class) return mock_env_class def test_main_0(mock_cel_environment, caplog, capsys): """GIVEN null-input AND expression; WHEN eval; THEN correct internal object use.""" argv = ["--null-input", '"Hello world! I\'m " + name + "."'] status = celpy.__main__.main(argv) assert status == 0 assert mock_cel_environment.mock_calls == [call(package=None, annotations=None)] env = mock_cel_environment.return_value assert env.compile.mock_calls == [call('"Hello world! I\'m " + name + "."')] assert env.program.mock_calls == [call(sentinel.AST)] prgm = env.program.return_value assert prgm.evaluate.mock_calls == [call({})] assert caplog.messages == [] out, err = capsys.readouterr() assert out == '"sentinel.OUTPUT"\n' assert err == "" def test_main_1(mock_cel_environment, caplog, capsys): """GIVEN null-input AND arg AND expression; WHEN eval; THEN correct internal object use.""" argv = [ "--arg", "name:string=CEL", "--null-input", '"Hello world! I\'m " + name + "."', ] status = celpy.__main__.main(argv) assert status == 0 assert mock_cel_environment.mock_calls == [ call(package=None, annotations={"name": celtypes.StringType}) ] env = mock_cel_environment.return_value assert env.compile.mock_calls == [call('"Hello world! I\'m " + name + "."')] assert env.program.mock_calls == [call(sentinel.AST)] prgm = env.program.return_value assert prgm.evaluate.mock_calls == [call({"name": "CEL"})] assert caplog.messages == [] out, err = capsys.readouterr() assert out == '"sentinel.OUTPUT"\n' assert err == "" def test_main_pipe(mock_cel_environment, caplog, capsys): """GIVEN JSON AND expression; WHEN eval; THEN correct internal object use.""" argv = ['"Hello world! I\'m " + name + "."'] sys.stdin = io.StringIO('{"name": "CEL"}\n') status = celpy.__main__.main(argv) sys.stdin = sys.__stdin__ assert status == 0 assert mock_cel_environment.mock_calls == [call(package="jq", annotations=None)] env = mock_cel_environment.return_value assert env.compile.mock_calls == [call('"Hello world! I\'m " + name + "."')] assert env.program.mock_calls == [call(sentinel.AST)] prgm = env.program.return_value assert prgm.evaluate.mock_calls == [ call( { "jq": celtypes.MapType( {celtypes.StringType("name"): celtypes.StringType("CEL")} ) } ) ] assert caplog.messages == [] out, err = capsys.readouterr() assert out == '"sentinel.OUTPUT"\n' assert err == "" def test_main_0_non_boolean(mock_cel_environment, caplog, capsys): """ GIVEN null-input AND boolean option and AND non-bool expr WHEN eval THEN correct internal object use. """ argv = ["-bn", '"Hello world! I\'m " + name + "."'] status = celpy.__main__.main(argv) assert status == 2 assert mock_cel_environment.mock_calls == [call(package=None, annotations=None)] env = mock_cel_environment.return_value assert env.compile.mock_calls == [call('"Hello world! I\'m " + name + "."')] assert env.program.mock_calls == [call(sentinel.AST)] prgm = env.program.return_value assert prgm.evaluate.mock_calls == [call({})] assert caplog.messages == [ "Expected celtypes.BoolType, got <class 'str'> = 'sentinel.OUTPUT'" ] out, err = capsys.readouterr() assert out == "" assert err == "" @fixture def mock_cel_environment_false(monkeypatch): mock_runner = Mock(evaluate=Mock(return_value=celtypes.BoolType(False))) mock_env = Mock( compile=Mock(return_value=sentinel.AST), program=Mock(return_value=mock_runner) ) mock_env_class = Mock(return_value=mock_env) monkeypatch.setattr(celpy.__main__, "Environment", mock_env_class) return mock_env_class def test_main_0_boolean(mock_cel_environment_false, caplog, capsys): """ GIVEN null-input AND boolean option AND false expr WHEN eval THEN correct internal object use. """ argv = ["-bn", "2 == 1"] status = celpy.__main__.main(argv) assert status == 1 assert mock_cel_environment_false.mock_calls == [ call(package=None, annotations=None) ] env = mock_cel_environment_false.return_value assert env.compile.mock_calls == [call("2 == 1")] assert env.program.mock_calls == [call(sentinel.AST)] prgm = env.program.return_value assert prgm.evaluate.mock_calls == [call({})] assert caplog.messages == [] out, err = capsys.readouterr() assert out == "" assert err == "" @fixture def mock_cel_environment_integer(monkeypatch): mock_runner = Mock(evaluate=Mock(return_value=celtypes.IntType(3735928559))) mock_env = Mock( compile=Mock(return_value=sentinel.AST), program=Mock(return_value=mock_runner) ) mock_env_class = Mock(return_value=mock_env) monkeypatch.setattr(celpy.__main__, "Environment", mock_env_class) return mock_env_class def test_main_slurp_int_format(mock_cel_environment_integer, caplog, capsys): """ GIVEN JSON AND slurp option AND formatted output AND int expr WHEN eval THEN correct internal object use. """ argv = ["-s", "-f", "#8x", "339629869*11"] sys.stdin = io.StringIO('{"name": "CEL"}\n') status = celpy.__main__.main(argv) sys.stdin = sys.__stdin__ assert status == 0 assert mock_cel_environment_integer.mock_calls == [ call(package='jq', annotations=None) ] env = mock_cel_environment_integer.return_value assert env.compile.mock_calls == [call("339629869*11")] assert env.program.mock_calls == [call(sentinel.AST)] prgm = env.program.return_value assert prgm.evaluate.mock_calls == [ call({'jq': celtypes.MapType({celtypes.StringType('name'): celtypes.StringType('CEL')})}) ] assert caplog.messages == [] out, err = capsys.readouterr() assert out == "0xdeadbeef\n" assert err == "" @fixture def mock_cel_environment_bool(monkeypatch): mock_runner = Mock(evaluate=Mock(return_value=celtypes.BoolType(False))) mock_env = Mock( compile=Mock(return_value=sentinel.AST), program=Mock(return_value=mock_runner) ) mock_env_class = Mock(return_value=mock_env) monkeypatch.setattr(celpy.__main__, "Environment", mock_env_class) return mock_env_class def test_main_slurp_bool_status(mock_cel_environment_bool, caplog, capsys): """ GIVEN JSON AND slurp option AND formatted output AND int expr WHEN eval THEN correct internal object use. """ argv = ["-s", "-b", '.name == "not CEL"'] sys.stdin = io.StringIO('{"name": "CEL"}\n') status = celpy.__main__.main(argv) sys.stdin = sys.__stdin__ assert status == 1 assert mock_cel_environment_bool.mock_calls == [ call(package='jq', annotations=None) ] env = mock_cel_environment_bool.return_value assert env.compile.mock_calls == [call('.name == "not CEL"')] assert env.program.mock_calls == [call(sentinel.AST)] prgm = env.program.return_value assert prgm.evaluate.mock_calls == [ call({'jq': celtypes.MapType({celtypes.StringType('name'): celtypes.StringType('CEL')})}) ] assert caplog.messages == [] out, err = capsys.readouterr() assert out == "false\n" assert err == "" def test_main_0_int_format(mock_cel_environment_integer, caplog, capsys): """ GIVEN slurp option AND formatted output AND int expr WHEN eval THEN correct internal object use. """ argv = ["-n", "-f", "#8x", "339629869*11"] status = celpy.__main__.main(argv) assert status == 0 assert mock_cel_environment_integer.mock_calls == [ call(package=None, annotations=None) ] env = mock_cel_environment_integer.return_value assert env.compile.mock_calls == [call("339629869*11")] assert env.program.mock_calls == [call(sentinel.AST)] prgm = env.program.return_value assert prgm.evaluate.mock_calls == [call({})] assert caplog.messages == [] out, err = capsys.readouterr() assert out == "0xdeadbeef\n" assert err == "" def test_main_verbose(mock_cel_environment, caplog, capsys): """GIVEN verbose AND expression; WHEN eval; THEN correct log output.""" argv = ["-v", "[2, 4, 5].map(x, x/2)"] status = celpy.__main__.main(argv) assert status == 0 assert mock_cel_environment.mock_calls == [call(annotations=None, package="jq")] assert caplog.messages == ["Expr: '[2, 4, 5].map(x, x/2)'"] out, err = capsys.readouterr() assert out == "" assert err == "" def test_main_very_verbose(mock_cel_environment, caplog, capsys): """GIVEN very verbose AND expression; WHEN eval; THEN correct log output.""" argv = ["-vv", "[2, 4, 5].map(x, x/2)"] status = celpy.__main__.main(argv) assert status == 0 assert mock_cel_environment.mock_calls == [call(annotations=None, package="jq")] expected_namespace = argparse.Namespace( verbose=2, arg=None, null_input=False, slurp=False, interactive=False, package='jq', document=None, boolean=False, format=None, expr='[2, 4, 5].map(x, x/2)' ) assert caplog.messages == [ str(expected_namespace), "Expr: '[2, 4, 5].map(x, x/2)'", ] out, err = capsys.readouterr() assert out == "" assert err == "" @fixture def mock_cel_environment_syntax_error(monkeypatch): mock_runner = Mock(evaluate=Mock(return_value=str(sentinel.OUTPUT))) mock_env = Mock( compile=Mock(side_effect=celpy.CELParseError((sentinel.arg0, sentinel.arg1))), cel_parser=Mock(error_text=Mock(return_value=sentinel.Formatted_Error)), ) mock_env_class = Mock(return_value=mock_env) monkeypatch.setattr(celpy.__main__, "Environment", mock_env_class) return mock_env_class def test_main_parse_error(mock_cel_environment_syntax_error, caplog, capsys): """GIVEN syntax error; WHEN eval; THEN correct stderr output.""" argv = ["-n", "[nope++]"] status = celpy.__main__.main(argv) assert status == 1 assert mock_cel_environment_syntax_error.mock_calls == [ call(package=None, annotations=None) ] expected_namespace = argparse.Namespace( verbose=0, arg=None, null_input=True, slurp=False, interactive=False, package='jq', document=None, boolean=False, format=None, expr='[nope++]' ) assert caplog.messages == [ str(expected_namespace), "Expr: '[nope++]'", ] out, err = capsys.readouterr() assert out == "" assert err == "sentinel.Formatted_Error\n" @fixture def mock_cel_environment_eval_error(monkeypatch): mock_runner = Mock( evaluate=Mock(side_effect=celpy.CELEvalError((sentinel.arg0, sentinel.arg1))) ) mock_env = Mock( compile=Mock(return_value=sentinel.AST), program=Mock(return_value=mock_runner), cel_parser=Mock(error_text=Mock(return_value=sentinel.Formatted_Error)), ) mock_env_class = Mock(return_value=mock_env) monkeypatch.setattr(celpy.__main__, "Environment", mock_env_class) return mock_env_class def test_main_0_eval_error(mock_cel_environment_eval_error, caplog, capsys): """GIVEN null input AND bad expression; WHEN eval; THEN correct stderr output.""" argv = ["-n", "2 / 0"] status = celpy.__main__.main(argv) assert status == 2 assert mock_cel_environment_eval_error.mock_calls == [ call(package=None, annotations=None) ] expected_namespace = argparse.Namespace( verbose=0, arg=None, null_input=True, slurp=False, interactive=False, package='jq', document=None, boolean=False, format=None, expr='2 / 0' ) assert caplog.messages == [ str(expected_namespace), "Expr: '2 / 0'", ] out, err = capsys.readouterr() assert out == "" assert err == "sentinel.Formatted_Error\n" def test_main_pipe_eval_error(mock_cel_environment_eval_error, caplog, capsys): """GIVEN piped input AND bad expression; WHEN eval; THEN correct stderr output.""" argv = [".json.field / 0"] sys.stdin = io.StringIO('{"name": "CEL"}\n') status = celpy.__main__.main(argv) sys.stdin = sys.__stdin__ assert status == 0 assert mock_cel_environment_eval_error.mock_calls == [ call(package="jq", annotations=None) ] expected_namespace = argparse.Namespace( verbose=0, arg=None, null_input=False, slurp=False, interactive=False, package='jq', document=None, boolean=False, format=None, expr='.json.field / 0' ) assert caplog.messages == [ str(expected_namespace), "Expr: '.json.field / 0'", "Encountered (sentinel.arg0, sentinel.arg1) on document '{\"name\": \"CEL\"}\\n'", ] out, err = capsys.readouterr() assert out == "null\n" assert err == "" def test_main_pipe_json_error(mock_cel_environment_eval_error, caplog, capsys): """GIVEN piped input AND bad expression; WHEN eval; THEN correct stderr output.""" argv = [".json.field / 0"] sys.stdin = io.StringIO('nope, not json\n') status = celpy.__main__.main(argv) sys.stdin = sys.__stdin__ assert status == 3 assert mock_cel_environment_eval_error.mock_calls == [ call(package="jq", annotations=None) ] expected_namespace = argparse.Namespace( verbose=0, arg=None, null_input=False, slurp=False, interactive=False, package='jq', document=None, boolean=False, format=None, expr='.json.field / 0' ) assert caplog.messages == [ str(expected_namespace), "Expr: '.json.field / 0'", "Expecting value: line 1 column 1 (char 0) on document 'nope, not json\\n'", ] out, err = capsys.readouterr() assert out == "" assert err == "" def test_main_repl(monkeypatch, capsys): mock_repl = Mock() mock_repl_class = Mock(return_value=mock_repl) monkeypatch.setattr(celpy.__main__, 'CEL_REPL', mock_repl_class) argv = ["-i"] status = celpy.__main__.main(argv) assert status == 0 assert mock_repl_class.mock_calls == [ call() ] assert mock_repl.cmdloop.mock_calls == [ call() ] def test_repl_class_good_interaction(capsys): """ If any print() is added for debugging, this test is likely to break. """ c = celpy.__main__.CEL_REPL() c.preloop() assert c.state == {} r_0 = c.onecmd("set pi 355./113.") assert not r_0 r_1 = c.onecmd("show") assert not r_1 r_2 = c.onecmd("pi * 2.") assert not r_2 r_2 = c.onecmd("quit") assert r_2 out, err = capsys.readouterr() lines = out.splitlines() assert lines[0].startswith("3.14159") assert lines[1].startswith("{'pi': DoubleType(3.14159") assert lines[2].startswith("6.28318") assert c.state == {"pi": celpy.celtypes.DoubleType(355./113.)} def test_repl_class_bad_interaction(capsys): c = celpy.__main__.CEL_REPL() c.preloop() c.onecmd("set a pi ++ nope | not & proper \\ CEL") c.onecmd("this! isn't! valid!!") out, err = capsys.readouterr() lines = err.splitlines() assert ( lines[0] == "ERROR: <input>:1:5 Unexpected token Token(PLUS, '+') at line 1, column 5." ) assert ( lines[4] == "ERROR: <input>:1:5 Unexpected token Token(BANG, '!') at line 1, column 5." ) assert c.state == {}
35.309603
100
0.646176
2,713
21,327
4.856985
0.106893
0.022312
0.057373
0.035061
0.782652
0.748653
0.735069
0.693329
0.674357
0.643242
0
0.015616
0.213298
21,327
603
101
35.368159
0.769758
0.115581
0
0.563559
0
0.019068
0.149563
0.00279
0
0
0.001073
0
0.279661
1
0.061441
false
0
0.016949
0
0.091102
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
bc89a354a2a6594f9bc9c5455d66778c3366f09c
531
py
Python
tsheets/repos/project.py
meilinger/api_python
c5ddd4f448ae1622ddd29a6166fee5db43df11f9
[ "MIT" ]
null
null
null
tsheets/repos/project.py
meilinger/api_python
c5ddd4f448ae1622ddd29a6166fee5db43df11f9
[ "MIT" ]
null
null
null
tsheets/repos/project.py
meilinger/api_python
c5ddd4f448ae1622ddd29a6166fee5db43df11f9
[ "MIT" ]
null
null
null
from tsheets.repository import Repository from datetime import date, datetime import tsheets.models as models class Project(Repository): pass Project.add_me_to_subcls() Project.add_url("/reports/project") Project.add_model(models.Project) Project.add_actions([u'report']) Project.filter("start_date", date) Project.filter("end_date", date) Project.filter("user_ids", [int]) Project.filter("group_ids", [int]) Project.filter("jobcode_ids", [int]) Project.filter("jobcode_type", str) Project.filter("customfielditems", dict)
25.285714
41
0.779661
73
531
5.506849
0.452055
0.226368
0.097015
0.141791
0.129353
0
0
0
0
0
0
0
0.079096
531
20
42
26.55
0.822086
0
0
0
0
0
0.180791
0
0
0
0
0
0
1
0
true
0.0625
0.1875
0
0.25
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
3
bca00d8fc3ccc6ce2beaad47e1bcf8b71543752a
190
py
Python
uqcsbot/scripts/cookbook.py
ashfordneil/uqcsbot
af47d8c4fcdf7cba6c6d411e838b17628fedf9af
[ "MIT" ]
null
null
null
uqcsbot/scripts/cookbook.py
ashfordneil/uqcsbot
af47d8c4fcdf7cba6c6d411e838b17628fedf9af
[ "MIT" ]
null
null
null
uqcsbot/scripts/cookbook.py
ashfordneil/uqcsbot
af47d8c4fcdf7cba6c6d411e838b17628fedf9af
[ "MIT" ]
null
null
null
from uqcsbot import bot, Command @bot.on_command("cookbook") def handle_cookbook(command: Command): bot.post_message(command.channel, "https://github.com/UQComputingSociety/cookbook")
27.142857
87
0.784211
24
190
6.083333
0.666667
0.136986
0
0
0
0
0
0
0
0
0
0
0.089474
190
6
88
31.666667
0.843931
0
0
0
0
0
0.284211
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
bcabd634d307a4a80edda02638defef1cb1c906f
349
py
Python
core/admin.py
bm0/book_catalog
29e1e3584d40dd75770a030df02d50093365b561
[ "MIT" ]
null
null
null
core/admin.py
bm0/book_catalog
29e1e3584d40dd75770a030df02d50093365b561
[ "MIT" ]
9
2020-02-11T21:41:20.000Z
2022-01-13T00:33:21.000Z
core/admin.py
bm0/book_catalog
29e1e3584d40dd75770a030df02d50093365b561
[ "MIT" ]
null
null
null
from django.contrib import admin from core import models # Register your models here. class BookInline(admin.TabularInline): model = models.Book extra = 1 class AuthorAdmin(admin.ModelAdmin): inlines = (BookInline,) admin.site.register(models.Author, AuthorAdmin) admin.site.register(models.Book) admin.site.register(models.Tag)
17.45
47
0.759312
44
349
6.022727
0.522727
0.101887
0.192453
0.260377
0
0
0
0
0
0
0
0.003344
0.143266
349
19
48
18.368421
0.882943
0.074499
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.2
0
0.7
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
bcb2a7c6fc81d1d604b350eba07fc014bab23961
308
py
Python
src/python/WMQuality/Emulators/DefaultConfig.py
tslazarova/WMCore
a09e2aefe700fb9b0d12b9f7089b21bde5a5bd62
[ "Apache-2.0" ]
1
2015-02-05T13:43:46.000Z
2015-02-05T13:43:46.000Z
src/python/WMQuality/Emulators/DefaultConfig.py
tslazarova/WMCore
a09e2aefe700fb9b0d12b9f7089b21bde5a5bd62
[ "Apache-2.0" ]
1
2016-10-13T14:57:35.000Z
2016-10-13T14:57:35.000Z
src/python/WMQuality/Emulators/DefaultConfig.py
juztas/WMCore
f7e830a573d50fb1d7240797f18d809f994b934d
[ "Apache-2.0" ]
null
null
null
''' Created on Feb 15, 2010 ''' from WMCore.Configuration import Configuration from WMCore.WMBase import getWMBASE import os.path config = Configuration() config.section_("Emulator") config.Emulator.PhEDEx = True config.Emulator.DBSReader = True config.Emulator.ReqMgr = True config.Emulator.SiteDB = True
20.533333
46
0.788961
39
308
6.205128
0.538462
0.231405
0.223141
0
0
0
0
0
0
0
0
0.021978
0.113636
308
14
47
22
0.864469
0.074675
0
0
0
0
0.028986
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
bcb3d0ab36966c7d7efe30fa0b524245b69781a5
24,152
py
Python
bnc/commands/cmd_spot.py
mpetrinidev/bnb-cli
d10fc36b6a1e2dd34597d9f31ea143019ba6f4b9
[ "MIT" ]
5
2021-04-02T20:49:19.000Z
2021-06-11T06:22:24.000Z
bnc/commands/cmd_spot.py
mpetrinidev/bnb-cli
d10fc36b6a1e2dd34597d9f31ea143019ba6f4b9
[ "MIT" ]
2
2021-06-13T20:22:09.000Z
2021-06-14T23:09:14.000Z
bnc/commands/cmd_spot.py
mpetrinidev/bnc-cli
d10fc36b6a1e2dd34597d9f31ea143019ba6f4b9
[ "MIT" ]
null
null
null
import click from ..builder import LimitOrderBuilder from ..builder import MarketOrderBuilder from ..builder import StopLossLimitBuilder from ..builder import TakeProfitLimitBuilder from ..builder import LimitMakerBuilder from ..builder import CancelOrderBuilder from ..builder import OpenOrdersBuilder from ..builder import OrderStatusBuilder from ..builder import Builder from ..builder import AllOrderBuilder from ..builder import MyTradesBuilder from ..builder import NewOcoOrderBuilder from ..builder import CancelOcoOrderBuilder from ..builder import OcoOrderBuilder from ..builder import AllOcoOrderBuilder from ..environment import pass_environment from ..decorators import coro from ..decorators import new_order_options from ..decorators import check_credentials from ..utils.api_time import get_timestamp from ..validation.val_spot import validate_recv_window from ..validation.val_spot import validate_side from ..validation.val_spot import validate_time_in_force from ..validation.val_spot import validate_new_order_resp_type @click.group(short_help="Spot Account/Trade operations") @check_credentials def cli(): """Spot Account/Trade operations""" @cli.group("new_order", short_help="Send in a new limit, " "market, stop_loss, stop_loss_limit, " "take_profit, take_profit_limit " "or limit_maker order") def new_order(): """ Send in a new limit, market, stop_loss, stop_loss_limit, take_profit, take_profit_limit or limit_maker order """ @new_order.command("limit", short_help="Send in a new limit order") @new_order_options([{'name': '-tif', 'attrs': {'required': True}}, {'name': '-q', 'attrs': {'required': True}}, {'name': '-p', 'attrs': {'required': True}}, {'name': '-qoq', 'exclude': True}, {'name': '-sp', 'exclude': True}]) @coro async def limit(symbol, side, time_in_force, quantity, price, new_client_order_id, iceberg_qty, recv_window, new_order_resp_type): """Send in a new limit order""" payload = { 'symbol': symbol, 'side': side, 'type': "LIMIT", 'timeInForce': time_in_force, 'price': price, 'quantity': quantity, 'newOrderRespType': new_order_resp_type, 'recvWindow': recv_window, 'timestamp': get_timestamp() } builder = LimitOrderBuilder(endpoint='api/v3/order', payload=payload, method='POST') \ .add_optional_params_to_payload(new_client_order_id=new_client_order_id, iceberg_qty=iceberg_qty) \ .set_security() await builder.send_http_req() builder.handle_response().generate_output() @new_order.command("market", short_help="Send in a new market order") @new_order_options([{'name': '-tif', 'exclude': True}, {'name': '-p', 'exclude': True}, {'name': '-sp', 'exclude': True}, {'name': '-iq', 'exclude': True}]) @pass_environment @coro async def market(ctx, symbol, side, quantity, quote_order_qty, new_client_order_id, recv_window, new_order_resp_type): """Send in a new market order""" if quantity is None and quote_order_qty is None: ctx.log('Either --quantity (-q) or --quote_order_qty (-qoq) must be sent.') return payload = { 'symbol': symbol, 'side': side, 'type': "MARKET", 'newOrderRespType': new_order_resp_type, 'recvWindow': recv_window, 'timestamp': get_timestamp() } builder = MarketOrderBuilder(endpoint='api/v3/order', payload=payload, method='POST') \ .add_optional_params_to_payload(quantity=quantity, quote_order_qty=quote_order_qty, new_client_order_id=new_client_order_id) \ .set_security() await builder.send_http_req() builder.handle_response().generate_output() @new_order.command("stop_loss_limit", short_help="Send in a new stop_loss_limit order") @new_order_options([{'name': '-tif', 'attrs': {'required': True}}, {'name': '-q', 'attrs': {'required': True}}, {'name': '-p', 'attrs': {'required': True}}, {'name': '-sp', 'attrs': {'required': True}}, {'name': '-nort', 'attrs': {'default': "ACK"}}, {'name': '-qoq', 'exclude': True}]) @coro async def stop_loss_limit(symbol, side, time_in_force, quantity, price, new_client_order_id, stop_price, iceberg_qty, recv_window, new_order_resp_type): """Send in a new stop_loss_limit order""" payload = { 'symbol': symbol, 'side': side, 'type': "STOP_LOSS_LIMIT", 'timeInForce': time_in_force, 'quantity': quantity, 'price': price, 'stopPrice': stop_price, 'newOrderRespType': new_order_resp_type, 'recvWindow': recv_window, 'timestamp': get_timestamp() } builder = StopLossLimitBuilder(endpoint='api/v3/order', payload=payload, method='POST') \ .add_optional_params_to_payload(new_client_order_id=new_client_order_id, iceberg_qty=iceberg_qty) \ .set_security() await builder.send_http_req() builder.handle_response().generate_output() @new_order.command("take_profit_limit", short_help="Send in a new take_profit_limit order") @new_order_options([{'name': '-tif', 'attrs': {'required': True}}, {'name': '-q', 'attrs': {'required': True}}, {'name': '-p', 'attrs': {'required': True}}, {'name': '-sp', 'attrs': {'required': True}}, {'name': '-nort', 'attrs': {'default': "ACK"}}, {'name': '-qoq', 'exclude': True}]) @coro async def take_profit_limit(symbol, side, time_in_force, quantity, price, new_client_order_id, stop_price, iceberg_qty, recv_window, new_order_resp_type): """Send in a new take_profit_limit order""" payload = { 'symbol': symbol, 'side': side, 'type': "TAKE_PROFIT_LIMIT", 'timeInForce': time_in_force, 'quantity': quantity, 'price': price, 'stopPrice': stop_price, 'newOrderRespType': new_order_resp_type, 'recvWindow': recv_window, 'timestamp': get_timestamp() } builder = TakeProfitLimitBuilder(endpoint='api/v3/order', payload=payload, method='POST') \ .add_optional_params_to_payload(new_client_order_id=new_client_order_id, iceberg_qty=iceberg_qty) \ .set_security() await builder.send_http_req() builder.handle_response().generate_output() @new_order.command("limit_maker", short_help="Send in a new limit_maker order") @new_order_options([{'name': '-q', 'attrs': {'required': True}}, {'name': '-p', 'attrs': {'required': True}}, {'name': '-tif', 'exclude': True}, {'name': '-qoq', 'exclude': True}, {'name': '-sp', 'exclude': True}, {'name': '-nort', 'attrs': {'default': "ACK"}}]) @coro async def limit_maker(symbol, side, quantity, price, new_client_order_id, iceberg_qty, recv_window, new_order_resp_type): """Send in a new limit_maker order""" payload = { 'symbol': symbol, 'side': side, 'type': "LIMIT_MAKER", 'quantity': quantity, 'price': price, 'newOrderRespType': new_order_resp_type, 'recvWindow': recv_window, 'timestamp': get_timestamp() } builder = LimitMakerBuilder(endpoint='api/v3/order', payload=payload, method='POST') \ .add_optional_params_to_payload(new_client_order_id=new_client_order_id, iceberg_qty=iceberg_qty) \ .set_security() await builder.send_http_req() builder.handle_response().generate_output() @cli.command("cancel_order", short_help='Cancel an active order') @click.option("-sy", "--symbol", required=True, type=click.types.STRING) @click.option("-oid", "--order_id", type=click.types.INT) @click.option("-ocoid", "--orig_client_order_id", type=click.types.STRING) @click.option("-ncoid", "--new_client_order_id", type=click.types.STRING) @click.option("-rw", "--recv_window", default=5000, show_default=True, callback=validate_recv_window, type=click.types.INT) @pass_environment @coro async def cancel_order(ctx, symbol, order_id, orig_client_order_id, new_client_order_id, recv_window): """ Cancel an active order Either orderId or origClientOrderId must be sent. """ if order_id is None and orig_client_order_id is None: ctx.log('Either --order_id (-oid) or --orig_client_order_id (-ocoid) must be sent.') return payload = { 'symbol': symbol, 'recvWindow': recv_window, 'timestamp': get_timestamp() } builder = CancelOrderBuilder(endpoint='api/v3/order', payload=payload, method='DELETE') \ .add_optional_params_to_payload(order_id=order_id, orig_client_order_id=orig_client_order_id, new_client_order_id=new_client_order_id) \ .set_security() await builder.send_http_req() builder.handle_response().generate_output() @cli.command("cancel_all_orders", short_help='Cancels all active orders on a symbol. ' 'This includes OCO orders.') @click.option("-sy", "--symbol", required=True, type=click.types.STRING) @click.option("-rw", "--recv_window", default=5000, show_default=True, callback=validate_recv_window, type=click.types.INT) @coro async def cancel_all_orders(symbol, recv_window): """ Cancels all active orders on a symbol. This includes OCO orders. """ payload = { 'symbol': symbol, 'recvWindow': recv_window, 'timestamp': get_timestamp() } builder = Builder(endpoint='api/v3/openOrders', payload=payload, method='DELETE').set_security() await builder.send_http_req() builder.handle_response().generate_output() @cli.command("account_info", short_help="Get current account information") @click.option("--query", type=click.types.STRING) @click.option("-rw", "--recv_window", default=5000, show_default=True, callback=validate_recv_window, type=click.types.INT) @coro async def account_info(recv_window, query): """Get current account information""" payload = { 'recvWindow': recv_window, 'timestamp': get_timestamp() } builder = Builder(endpoint='api/v3/account', payload=payload) \ .set_security() await builder.send_http_req() builder.handle_response().filter(query).generate_output() @cli.command("order_status", short_help="Check an order's status") @click.option("-sy", "--symbol", required=True, type=click.types.STRING) @click.option("-oid", "--order_id", type=click.types.INT) @click.option("-ocoid", "--orig_client_order_id", type=click.types.STRING) @click.option("-rw", "--recv_window", default=5000, show_default=True, callback=validate_recv_window, type=click.types.INT) @coro @pass_environment async def order_status(ctx, symbol, order_id, orig_client_order_id, recv_window): """ Check an order's status Notes: Either --order_id (-oid) or --orig_client_order_id (-ocoid) must be sent. For some historical orders cummulativeQuoteQty will be < 0, meaning the data is not available at this time. """ if order_id is None and orig_client_order_id is None: ctx.log('Either --order_id (-oid) or --orig_client_order_id (-ocoid) must be sent.') return payload = { 'symbol': symbol, 'recvWindow': recv_window, 'timestamp': get_timestamp() } builder = OrderStatusBuilder(endpoint='api/v3/order', payload=payload) \ .add_optional_params_to_payload(order_id=order_id, orig_client_order_id=orig_client_order_id) \ .set_security() await builder.send_http_req() builder.handle_response().generate_output() @cli.command("open_orders", short_help="Get all open orders on a symbol. Careful when accessing this with no symbol.") @click.option("-sy", "--symbol", type=click.types.STRING) @click.option("-rw", "--recv_window", default=5000, show_default=True, callback=validate_recv_window, type=click.types.INT) @coro async def open_orders(symbol, recv_window): """ Get all open orders on a symbol. Careful when accessing this with no symbol. Weight: 1 for a single symbol. 40 when the symbol parameter is omitted. """ payload = { 'recvWindow': recv_window, 'timestamp': get_timestamp() } builder = OpenOrdersBuilder(endpoint='api/v3/openOrders', payload=payload) \ .add_optional_params_to_payload(symbol=symbol) \ .set_security() await builder.send_http_req() builder.handle_response().generate_output() @cli.command("all_orders", short_help="Get all account orders; active, canceled, or filled.") @click.option("-sy", "--symbol", required=True, type=click.types.STRING) @click.option("-oid", "--order_id", type=click.types.INT) @click.option("-st", "--start_time", type=click.types.INT) @click.option("-et", "--end_time", type=click.types.INT) @click.option("-l", "--limit", default=500, show_default=True, type=click.types.IntRange(1, 1000)) @click.option("-rw", "--recv_window", default=5000, show_default=True, callback=validate_recv_window, type=click.types.INT) @click.option("--query", type=click.types.STRING) @coro async def all_orders(symbol, order_id, start_time, end_time, limit, recv_window, query): """ Get all account orders; active, canceled, or filled. Weight: 5 with symbol. NOTES: If orderId is set, it will get orders >= that orderId. Otherwise most recent orders are returned. For some historical orders cummulativeQuoteQty will be < 0, meaning the data is not available at this time. """ payload = { 'symbol': symbol, 'limit': limit, 'recvWindow': recv_window, 'timestamp': get_timestamp() } builder = AllOrderBuilder(endpoint='api/v3/allOrders', payload=payload) \ .add_optional_params_to_payload(order_id=order_id, start_time=start_time, end_time=end_time) \ .set_security() await builder.send_http_req() builder.handle_response().filter(query).generate_output() @cli.command("new_oco_order", short_help="Send in a new OCO") @click.option("-sy", "--symbol", required=True, type=click.types.STRING) @click.option("-lcoid", "--list_client_order_id", type=click.types.STRING) @click.option("-si", "--side", required=True, callback=validate_side, type=click.types.STRING) @click.option("-q", "--quantity", required=True, type=click.types.FLOAT) @click.option("-limcoid", "--limit_client_order_id", type=click.types.STRING) @click.option("-p", "--price", required=True, type=click.types.FLOAT) @click.option("-liq", "--limit_iceberg_qty", type=click.types.FLOAT) @click.option("-scoid", "--stop_client_order_id", type=click.types.STRING) @click.option("-sp", "--stop_price", required=True, type=click.types.FLOAT) @click.option("-slp", "--stop_limit_price", type=click.types.FLOAT) @click.option("-siq", "--stop_iceberg_qty", type=click.types.FLOAT) @click.option("-sltif", "--stop_limit_time_in_force", callback=validate_time_in_force, type=click.types.STRING) @click.option("-nort", "--new_order_resp_type", default="FULL", callback=validate_new_order_resp_type, type=click.types.STRING) @click.option("-rw", "--recv_window", default=5000, show_default=True, callback=validate_recv_window, type=click.types.INT) @coro @pass_environment async def new_oco_order(ctx, symbol, list_client_order_id, side, quantity, limit_client_order_id, price, limit_iceberg_qty, stop_client_order_id, stop_price, stop_limit_price, stop_iceberg_qty, stop_limit_time_in_force, new_order_resp_type, recv_window): """ Send in a new OCO. Other Info: Price Restrictions: SELL: Limit Price > Last Price > Stop Price BUY: Limit Price < Last Price < Stop Price Quantity Restrictions: Both legs must have the same quantity ICEBERG quantities however do not have to be the same. Order Rate Limit OCO counts as 2 orders against the order rate limit. """ if stop_limit_price is not None and stop_limit_time_in_force is None: ctx.log('--stop_limit_time_in_force (-sltif) is required when you sent --stop_limit_price (-slp).') return payload = { 'symbol': symbol, 'side': side, 'quantity': quantity, 'price': price, 'stopPrice': stop_price, 'newOrderRespType': new_order_resp_type, 'recvWindow': recv_window, 'timestamp': get_timestamp() } builder = NewOcoOrderBuilder(endpoint='api/v3/order/oco', method='POST', payload=payload) \ .add_optional_params_to_payload(list_client_order_id=list_client_order_id, limit_client_order_id=limit_client_order_id, limit_iceberg_qty=limit_iceberg_qty, stop_client_order_id=stop_client_order_id, stop_limit_price=stop_limit_price, stop_iceberg_qty=stop_iceberg_qty, stop_limit_time_in_force=stop_limit_time_in_force) \ .set_security() await builder.send_http_req() builder.handle_response().generate_output() @cli.command("cancel_oco_order", short_help="Cancel an entire Order List.") @click.option("-sy", "--symbol", required=True, type=click.types.STRING) @click.option("-olid", "--order_list_id", type=click.types.INT) @click.option("-lcoid", "--list_client_order_id", type=click.types.STRING) @click.option("-ncoid", "--new_client_order_id", type=click.types.STRING) @click.option("-rw", "--recv_window", default=5000, show_default=True, callback=validate_recv_window, type=click.types.INT) @coro @pass_environment async def cancel_oco_order(ctx, symbol, order_list_id, list_client_order_id, new_client_order_id, recv_window): """ Cancel an entire Order List. Weight: 1 Additional notes: Canceling an individual leg will cancel the entire OCO """ if order_list_id is None and list_client_order_id is None: ctx.log('Either --order_list_id (-olid) or --list_client_order_id (-lcoid) must be sent.') return payload = { 'symbol': symbol, 'recvWindow': recv_window, 'timestamp': get_timestamp() } builder = CancelOcoOrderBuilder(endpoint='api/v3/orderList', method='DELETE', payload=payload) \ .add_optional_params_to_payload(order_list_id=order_list_id, list_client_order_id=list_client_order_id, new_client_order_id=new_client_order_id) \ .set_security() await builder.send_http_req() builder.handle_response().generate_output() @cli.command("oco_order", short_help="Retrieves a specific OCO based on provided optional parameters.") @click.option("-olid", "--order_list_id", type=click.types.INT) @click.option("-lcoid", "--list_client_order_id", type=click.types.STRING) @click.option("-rw", "--recv_window", default=5000, show_default=True, callback=validate_recv_window, type=click.types.INT) @coro @pass_environment async def oco_order(ctx, order_list_id, list_client_order_id, recv_window): """ Retrieves a specific OCO based on provided optional parameters Weight: 1 """ if order_list_id is None and list_client_order_id is None: ctx.log('Either --order_list_id (-olid) or --list_client_order_id (-lcoid) must be sent.') return payload = { 'recvWindow': recv_window, 'timestamp': get_timestamp() } builder = OcoOrderBuilder(endpoint='api/v3/orderList', payload=payload) \ .add_optional_params_to_payload(order_list_id=order_list_id, list_client_order_id=list_client_order_id) \ .set_security() await builder.send_http_req() builder.handle_response().generate_output() @cli.command("all_oco_orders", short_help="Retrieves all OCO based on provided optional parameters.") @click.option("-fid", "--from_id", type=click.types.INT) @click.option("-st", "--start_time", type=click.types.INT) @click.option("-et", "--end_time", type=click.types.INT) @click.option("-l", "--limit", default=500, show_default=True, type=click.types.IntRange(1, 1000)) @click.option("-rw", "--recv_window", default=5000, show_default=True, callback=validate_recv_window, type=click.types.INT) @coro async def all_oco_orders(from_id, start_time, end_time, limit, recv_window): """ Retrieves all OCO based on provided optional parameters. Weight: 10 """ payload = { 'limit': limit, 'recvWindow': recv_window, 'timestamp': get_timestamp() } builder = AllOcoOrderBuilder(endpoint='api/v3/allOrderList', payload=payload) \ .add_optional_params_to_payload(from_id=from_id, start_time=start_time, end_time=end_time) \ .set_security() await builder.send_http_req() builder.handle_response().generate_output() @cli.command("open_oco_orders") @click.option("-rw", "--recv_window", default=5000, show_default=True, callback=validate_recv_window, type=click.types.INT) @coro async def open_oco_orders(recv_window): """ Weight: 2 """ payload = { 'recvWindow': recv_window, 'timestamp': get_timestamp() } builder = Builder(endpoint='api/v3/openOrderList', payload=payload).set_security() await builder.send_http_req() builder.handle_response().generate_output() @cli.command("my_trades", short_help="Get trades for a specific account and symbol.") @click.option("-sy", "--symbol", required=True, type=click.types.STRING) @click.option("-st", "--start_time", type=click.types.INT) @click.option("-et", "--end_time", type=click.types.INT) @click.option("-fid", "--from_id", type=click.types.INT) @click.option("-l", "--limit", default=500, show_default=True, type=click.types.IntRange(1, 1000)) @click.option("-rw", "--recv_window", default=5000, show_default=True, callback=validate_recv_window, type=click.types.INT) @click.option("--query", type=click.types.STRING) @coro async def my_trades(symbol, start_time, end_time, from_id, limit, recv_window, query): """ Get trades for a specific account and symbol. Weight: 5 NOTES: If fromId is set, it will get id >= that fromId. Otherwise most recent trades are returned. """ payload = { 'symbol': symbol, 'limit': limit, 'recvWindow': recv_window, 'timestamp': get_timestamp() } builder = MyTradesBuilder(endpoint='api/v3/myTrades', payload=payload) \ .add_optional_params_to_payload(start_time=start_time, end_time=end_time, from_id=from_id) \ .set_security() await builder.send_http_req() builder.handle_response().filter(query).generate_output()
37.974843
118
0.643756
2,937
24,152
5.031324
0.083418
0.036476
0.052785
0.028761
0.803478
0.765311
0.732828
0.698653
0.623943
0.595249
0
0.005451
0.225282
24,152
635
119
38.034646
0.784298
0.005714
0
0.641379
0
0
0.178094
0.016386
0
0
0
0
0
1
0.004598
false
0.016092
0.057471
0
0.075862
0
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
bcd427dcbdaa32b0723d00aff850fffc96b3dee6
798
py
Python
metadata/r21/corrections_muon.py
gordonwatts/func-adl-types-atlas
9d135371d4e21d69373a8e1611ea8118cf2fff7f
[ "MIT" ]
null
null
null
metadata/r21/corrections_muon.py
gordonwatts/func-adl-types-atlas
9d135371d4e21d69373a8e1611ea8118cf2fff7f
[ "MIT" ]
1
2022-02-23T17:56:48.000Z
2022-02-23T17:56:48.000Z
metadata/r21/corrections_muon.py
gordonwatts/func-adl-types-atlas
9d135371d4e21d69373a8e1611ea8118cf2fff7f
[ "MIT" ]
null
null
null
muon_container = '{{calib.muon_collection}}' from MuonAnalysisAlgorithms.MuonAnalysisSequence import makeMuonAnalysisSequence muonSequence = makeMuonAnalysisSequence('mc', workingPoint='{{calib.muon_working_point}}.{{calib.muon_isolation}}', postfix = '{{calib.muon_working_point}}_{{calib.muon_isolation}}') muonSequence.configure( inputName = muon_container, outputName = muon_container + 'Calib_{{calib.muon_working_point}}{{calib.muon_isolation}}_%SYS%' ) calibrationAlgSeq += muonSequence print( muonSequence ) # For debugging output_muon_container = "{{calib.muon_collection}}Calib_{{calib.muon_working_point}}{{calib.muon_isolation}}_%SYS%" # Output muon_collection = {{calib.muon_collection}}Calib_{{calib.muon_working_point}}{{calib.muon_isolation}}_{{ sys_error }}
88.666667
182
0.779449
83
798
7.108434
0.313253
0.198305
0.135593
0.177966
0.511864
0.435593
0.435593
0.30339
0.30339
0.223729
0
0
0.082707
798
9
183
88.666667
0.806011
0.172932
0
0
0
0
0.43465
0.431611
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0.125
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
bce07cffbf017b46b5e17f9c30ce5e93930c9965
146
py
Python
pdip/dependency/scopes.py
ahmetcagriakca/pdip
c4c16d5666a740154cabdc6762cd44d98b7bdde8
[ "MIT" ]
2
2021-12-09T21:07:46.000Z
2021-12-11T22:18:01.000Z
pdip/dependency/scopes.py
fmuyilmaz/pdip
f7e30b0c04d9e85ef46b0b7094fafd3ce18bccab
[ "MIT" ]
null
null
null
pdip/dependency/scopes.py
fmuyilmaz/pdip
f7e30b0c04d9e85ef46b0b7094fafd3ce18bccab
[ "MIT" ]
3
2021-11-15T00:47:00.000Z
2021-12-17T11:35:45.000Z
class IDependency: pass class IScoped(IDependency): pass def __del__(self): pass class ISingleton(IDependency): pass
10.428571
30
0.650685
15
146
6.066667
0.533333
0.494505
0
0
0
0
0
0
0
0
0
0
0.280822
146
13
31
11.230769
0.866667
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0.125
false
0.5
0
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
3
bce639b9308c7ddf03261bd417404c48063eddf2
3,079
py
Python
workshops/hash_table_tests/test_hash_table.py
PetkoAndreev/Python-OOP
2cc3094940cdf078f0ee60be938e883f843766e4
[ "MIT" ]
1
2021-05-27T07:59:17.000Z
2021-05-27T07:59:17.000Z
workshops/hash_table_tests/test_hash_table.py
PetkoAndreev/Python-OOP
2cc3094940cdf078f0ee60be938e883f843766e4
[ "MIT" ]
null
null
null
workshops/hash_table_tests/test_hash_table.py
PetkoAndreev/Python-OOP
2cc3094940cdf078f0ee60be938e883f843766e4
[ "MIT" ]
null
null
null
import unittest from python_oop.workshops.hash_table.hashtable import HashTable class TestHashTable(unittest.TestCase): def setUp(self): self.hash_table = HashTable() def test_attributes(self): self.assertEqual(4, len(self.hash_table.keys)) self.assertEqual(4, len(self.hash_table.values)) self.assertEqual(4, self.hash_table.max_capacity) def test_add_with_available_space(self): self.hash_table.add("Test_key1", "Value1") self.assertEqual(1, self.hash_table.actual_length) self.assertEqual(4, self.hash_table.max_capacity) self.assertEqual("Value1", self.hash_table["Test_key1"]) def test_add_with_no_available_space_resizes(self): for num in range(1, self.hash_table.max_capacity + 1): self.hash_table.add(f"Test_key{num}", f"Value{num}") self.assertEqual(4, self.hash_table.actual_length) self.assertEqual(4, self.hash_table.max_capacity) # Overload the dict and it should resize self.hash_table.add("Test_key5", "Value5") self.assertEqual(5, self.hash_table.actual_length) self.assertEqual(8, self.hash_table.max_capacity) self.assertIn("Test_key5", self.hash_table.keys) def test_value_is_replaced_when_key_exists(self): self.hash_table.add("Test_key", "Value") self.assertEqual("Value", self.hash_table["Test_key"]) self.hash_table["Test_key"] = "New_value" self.assertEqual("New_value", self.hash_table["Test_key"]) def test_get_with_existing_key(self): self.hash_table.add("Test_key", "Value") self.assertEqual("Value", self.hash_table.get("Test_key")) def test_get_with_not_existing_key(self): self.hash_table.add("Test_key", "Value") self.assertIsNone(self.hash_table.get("not existing key")) def test_get_with_not_existing_key_with_default_value(self): self.hash_table.add("Test_key", "Value") self.assertEqual("DEFAULT", self.hash_table.get("not existing key", "DEFAULT")) def test_representation(self): self.hash_table.add("Test_key", "Value") self.assertEqual("{Test_key: Value}", str(self.hash_table)) def test_collision_set_next_available_index(self): self.hash_table["name"] = "Peter" self.assertEqual(1, self.hash_table.keys.index("name")) # collision with index 1 self.hash_table["age"] = 25 self.assertEqual(2, self.hash_table.keys.index("age")) def test_collision_set_next_available_index_at_0(self): self.hash_table["name"] = "Peter" self.assertEqual(1, self.hash_table.keys.index("name")) # collision with index 1 self.hash_table["age"] = 25 self.assertEqual(2, self.hash_table.keys.index("age")) self.hash_table["work"] = "Some title" self.assertEqual(3, self.hash_table.keys.index("work")) # Go back to index 0, because no other available self.hash_table["eyes color"] = "blue" self.assertEqual(0, self.hash_table.keys.index("eyes color"))
42.763889
87
0.685287
431
3,079
4.63109
0.206497
0.18487
0.260521
0.076653
0.639279
0.58517
0.516032
0.398297
0.352705
0.352705
0
0.012764
0.185775
3,079
72
88
42.763889
0.783406
0.042546
0
0.296296
0
0
0.117188
0
0
0
0
0
0.425926
1
0.203704
false
0
0.037037
0
0.259259
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
0
0
0
0
0
0
0
3
4c07b902f82357f9ace056f54188bb1218f7311b
441
py
Python
poradnia/cases/migrations/0013_auto_20150312_2326.py
efefre/poradnia
8bf9c88888d538cf4d1224431355c850d31ef252
[ "MIT" ]
23
2015-07-20T01:10:52.000Z
2021-01-12T10:05:48.000Z
poradnia/cases/migrations/0013_auto_20150312_2326.py
efefre/poradnia
8bf9c88888d538cf4d1224431355c850d31ef252
[ "MIT" ]
710
2015-07-12T13:19:14.000Z
2022-03-29T12:38:18.000Z
poradnia/cases/migrations/0013_auto_20150312_2326.py
efefre/poradnia
8bf9c88888d538cf4d1224431355c850d31ef252
[ "MIT" ]
20
2015-07-21T00:45:34.000Z
2021-01-31T12:48:18.000Z
from django.db import migrations class Migration(migrations.Migration): dependencies = [("cases", "0012_auto_20150311_0434")] operations = [ migrations.RemoveField(model_name="case", name="created_by"), migrations.RemoveField(model_name="case", name="created_on"), migrations.RemoveField(model_name="case", name="modified_by"), migrations.RemoveField(model_name="case", name="modified_on"), ]
31.5
70
0.70068
48
441
6.208333
0.458333
0.281879
0.348993
0.402685
0.624161
0.624161
0.624161
0
0
0
0
0.043127
0.15873
441
13
71
33.923077
0.760108
0
0
0
0
0
0.195011
0.052154
0
0
0
0
0
1
0
false
0
0.111111
0
0.444444
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
4c165617da31b79e7ec5cf5d3f2b09d8274a1ea6
766
py
Python
core-python/Core_Python/regexpkg/Regex_As_6.py
theumang100/tutorials-1
497f54c2adb022c316530319a168fca1c007d4b1
[ "MIT" ]
9
2020-04-23T05:24:19.000Z
2022-02-17T16:37:51.000Z
core-python/Core_Python/regexpkg/Regex_As_6.py
theumang100/tutorials-1
497f54c2adb022c316530319a168fca1c007d4b1
[ "MIT" ]
5
2020-10-01T05:08:37.000Z
2020-10-12T03:18:10.000Z
core-python/Core_Python/regexpkg/Regex_As_6.py
theumang100/tutorials-1
497f54c2adb022c316530319a168fca1c007d4b1
[ "MIT" ]
9
2020-04-28T14:06:41.000Z
2021-10-19T18:32:28.000Z
''' The multi_vowel_words function returns all words with 3 or more consecutive vowels (a, e, i, o, u). Fill in the regular expression to do that. ''' import re def multi_vowel_words(text): pattern = r"\w+[aeiou]{3,}\w+" result = re.findall(pattern, text) return result print(multi_vowel_words("Life is beautiful")) # ['beautiful'] print(multi_vowel_words("Obviously, the queen is courageous and gracious.")) # ['Obviously', 'queen', 'courageous', 'gracious'] print(multi_vowel_words("The rambunctious children had to sit quietly and await their delicious dinner.")) # ['rambunctious', 'quietly', 'delicious'] print(multi_vowel_words("The order of a data queue is First In First Out (FIFO)")) # ['queue'] print(multi_vowel_words("Hello world!")) # []
33.304348
106
0.720627
112
766
4.803571
0.5625
0.130112
0.195167
0.185874
0.085502
0
0
0
0
0
0
0.00303
0.138381
766
23
107
33.304348
0.812121
0.340731
0
0
0
0
0.460285
0
0
0
0
0
0
1
0.1
false
0
0.1
0
0.3
0.5
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
4c42725681a18a31adef25eb006292f20583c1d0
1,261
py
Python
alipay/aop/api/response/ZhimaCreditEpSceneFulfillmentlistSyncResponse.py
articuly/alipay-sdk-python-all
0259cd28eca0f219b97dac7f41c2458441d5e7a6
[ "Apache-2.0" ]
null
null
null
alipay/aop/api/response/ZhimaCreditEpSceneFulfillmentlistSyncResponse.py
articuly/alipay-sdk-python-all
0259cd28eca0f219b97dac7f41c2458441d5e7a6
[ "Apache-2.0" ]
null
null
null
alipay/aop/api/response/ZhimaCreditEpSceneFulfillmentlistSyncResponse.py
articuly/alipay-sdk-python-all
0259cd28eca0f219b97dac7f41c2458441d5e7a6
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import simplejson as json from alipay.aop.api.response.AlipayResponse import AlipayResponse from alipay.aop.api.domain.FulfillmentResult import FulfillmentResult class ZhimaCreditEpSceneFulfillmentlistSyncResponse(AlipayResponse): def __init__(self): super(ZhimaCreditEpSceneFulfillmentlistSyncResponse, self).__init__() self._fulfillment_result_list = None @property def fulfillment_result_list(self): return self._fulfillment_result_list @fulfillment_result_list.setter def fulfillment_result_list(self, value): if isinstance(value, list): self._fulfillment_result_list = list() for i in value: if isinstance(i, FulfillmentResult): self._fulfillment_result_list.append(i) else: self._fulfillment_result_list.append(FulfillmentResult.from_alipay_dict(i)) def parse_response_content(self, response_content): response = super(ZhimaCreditEpSceneFulfillmentlistSyncResponse, self).parse_response_content(response_content) if 'fulfillment_result_list' in response: self.fulfillment_result_list = response['fulfillment_result_list']
38.212121
118
0.725615
127
1,261
6.866142
0.338583
0.21445
0.264908
0.172018
0.135321
0
0
0
0
0
0
0.000993
0.201427
1,261
32
119
39.40625
0.864945
0.033307
0
0
0
0
0.037798
0.037798
0
0
0
0
0
1
0.173913
false
0
0.130435
0.043478
0.391304
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
4c43dcfb24b6b24a9256199ec20f8b4104053af5
157
py
Python
Development/05/after/pluralsight.plugin.xbmc/addon.py
enriqueescobar-askida/Kinito.RaspberryPi
90ecc8920e7a149fcfdb9e153da04bb5743e73d9
[ "MIT" ]
null
null
null
Development/05/after/pluralsight.plugin.xbmc/addon.py
enriqueescobar-askida/Kinito.RaspberryPi
90ecc8920e7a149fcfdb9e153da04bb5743e73d9
[ "MIT" ]
null
null
null
Development/05/after/pluralsight.plugin.xbmc/addon.py
enriqueescobar-askida/Kinito.RaspberryPi
90ecc8920e7a149fcfdb9e153da04bb5743e73d9
[ "MIT" ]
null
null
null
import os import sys import xbmcplugin import xbmcaddon import xbmcgui print 'Hello XBMC Addon!!!!' dialog = xbmcgui.Dialog() dialog.ok('Hello XBMC','Hi!')
15.7
29
0.751592
22
157
5.363636
0.590909
0.152542
0
0
0
0
0
0
0
0
0
0
0.127389
157
10
29
15.7
0.861314
0
0
0
0
0
0.208861
0
0
0
0
0
0
0
null
null
0
0.625
null
null
0.125
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
3
4c549d2eb8f7e679791a9409b8bae0a82683cd21
3,388
py
Python
keras/regularizers.py
bjerva/keras
8d34e8a362f516b09a6e9f52f07264c9f1e088a9
[ "MIT" ]
null
null
null
keras/regularizers.py
bjerva/keras
8d34e8a362f516b09a6e9f52f07264c9f1e088a9
[ "MIT" ]
null
null
null
keras/regularizers.py
bjerva/keras
8d34e8a362f516b09a6e9f52f07264c9f1e088a9
[ "MIT" ]
1
2020-06-11T02:11:58.000Z
2020-06-11T02:11:58.000Z
from __future__ import absolute_import from . import backend as K from .utils.generic_utils import get_from_module import warnings class Regularizer(object): def __call__(self, x): return 0 def get_config(self): return {'name': self.__class__.__name__} def set_param(self, _): warnings.warn('The `set_param` method on regularizers is deprecated. ' 'It no longer does anything, ' 'and it will be removed after 06/2017.') def set_layer(self, _): warnings.warn('The `set_layer` method on regularizers is deprecated. ' 'It no longer does anything, ' 'and it will be removed after 06/2017.') class EigenvalueRegularizer(Regularizer): """Regularizer based on the eignvalues of a weight matrix. Only available for tensors of rank 2. # Arguments k: Float; modulates the amount of regularization to apply. """ def __init__(self, k): self.k = k def __call__(self, x): if K.ndim(x) != 2: raise ValueError('EigenvalueRegularizer ' 'is only available for tensors of rank 2.') covariance = K.dot(K.transpose(x), x) dim1, dim2 = K.eval(K.shape(covariance)) # Power method for approximating the dominant eigenvector: power = 9 # Number of iterations of the power method. o = K.ones([dim1, 1]) # Initial values for the dominant eigenvector. main_eigenvect = K.dot(covariance, o) for n in range(power - 1): main_eigenvect = K.dot(covariance, main_eigenvect) covariance_d = K.dot(covariance, main_eigenvect) # The corresponding dominant eigenvalue: main_eigenval = (K.dot(K.transpose(covariance_d), main_eigenvect) / K.dot(K.transpose(main_eigenvect), main_eigenvect)) # Multiply by the given regularization gain. regularization = (main_eigenval ** 0.5) * self.k return K.sum(regularization) class L1L2Regularizer(Regularizer): """Regularizer for L1 and L2 regularization. # Arguments l1: Float; L1 regularization factor. l2: Float; L2 regularization factor. """ def __init__(self, l1=0., l2=0.): self.l1 = K.cast_to_floatx(l1) self.l2 = K.cast_to_floatx(l2) def __call__(self, x): regularization = 0 if self.l1: regularization += K.sum(self.l1 * K.abs(x)) if self.l2: regularization += K.sum(self.l2 * K.square(x)) return regularization def get_config(self): return {'name': self.__class__.__name__, 'l1': float(self.l1), 'l2': float(self.l2)} # Aliases. WeightRegularizer = L1L2Regularizer ActivityRegularizer = L1L2Regularizer def l1(l=0.01): return L1L2Regularizer(l1=l) def l2(l=0.01): return L1L2Regularizer(l2=l) def l1l2(l1=0.01, l2=0.01): return L1L2Regularizer(l1=l1, l2=l2) def activity_l1(l=0.01): return L1L2Regularizer(l1=l) def activity_l2(l=0.01): return L1L2Regularizer(l2=l) def activity_l1l2(l1=0.01, l2=0.01): return L1L2Regularizer(l1=l1, l2=l2) def get(identifier, kwargs=None): return get_from_module(identifier, globals(), 'regularizer', instantiate=True, kwargs=kwargs)
28.233333
78
0.622491
430
3,388
4.737209
0.290698
0.011782
0.02651
0.070692
0.326951
0.259205
0.259205
0.22975
0.22975
0.126657
0
0.046029
0.275384
3,388
119
79
28.470588
0.783707
0.160272
0
0.220588
0
0
0.115564
0.007513
0
0
0
0
0
1
0.235294
false
0
0.058824
0.147059
0.514706
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
4c5fa90c4afdbcc9667fb7a515e895de31cf9fe4
222
py
Python
core/urls.py
svhenrique/weather-project
ec0ff168aac51245f7967a15a298363b2f352caf
[ "MIT" ]
null
null
null
core/urls.py
svhenrique/weather-project
ec0ff168aac51245f7967a15a298363b2f352caf
[ "MIT" ]
null
null
null
core/urls.py
svhenrique/weather-project
ec0ff168aac51245f7967a15a298363b2f352caf
[ "MIT" ]
null
null
null
from django.contrib import admin from django.urls import path from . import views urlpatterns = [ path('', views.WeatherView.as_view(), name='index'), path('delete/<pk>/', views.delete_location, name='delete') ]
22.2
62
0.702703
29
222
5.310345
0.586207
0.12987
0
0
0
0
0
0
0
0
0
0
0.144144
222
9
63
24.666667
0.810526
0
0
0
0
0
0.103604
0
0
0
0
0
0
1
0
false
0
0.428571
0
0.428571
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
4c61cfae0eb5a4902458deffb59c73120a3d70d3
615
py
Python
miss/collections.py
Changaco/python-miss
899df25615b5eee72307e80b6de43daffa144464
[ "CC0-1.0" ]
1
2020-04-09T16:42:40.000Z
2020-04-09T16:42:40.000Z
miss/collections.py
Changaco/python-miss
899df25615b5eee72307e80b6de43daffa144464
[ "CC0-1.0" ]
null
null
null
miss/collections.py
Changaco/python-miss
899df25615b5eee72307e80b6de43daffa144464
[ "CC0-1.0" ]
null
null
null
""" This module is a version of collections consistent in both python 2 and 3. It is only made of re-exports from builtin modules, with one exception. Unlike the builtin UserDict, the one in this module derives from dict, which means that existing code using `isintance(obj, dict)` won't break. """ from collections import * from .six import PY3 if PY3: class UserDict(UserDict, dict): pass else: import UserDict as _UserDict class UserDict(_UserDict.DictMixin, dict): __contains__ = dict.__contains__ __iter__ = dict.__iter__ from UserList import * from UserString import *
27.954545
80
0.733333
87
615
4.977011
0.62069
0.046189
0.096998
0
0
0
0
0
0
0
0
0.00818
0.204878
615
21
81
29.285714
0.877301
0.473171
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0.090909
0.454545
0
0.818182
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
3
4c65763f68254cedc5a4c579af6d4f044c4a2bef
598
py
Python
Github_User_Details/functions.py
Frans06/code-n-stitch
996a2bf8474bd200f3f1f89d1b8c7e9b9615117a
[ "MIT" ]
50
2020-09-19T16:40:21.000Z
2022-02-05T05:48:42.000Z
Github_User_Details/functions.py
Frans06/code-n-stitch
996a2bf8474bd200f3f1f89d1b8c7e9b9615117a
[ "MIT" ]
266
2020-09-25T17:24:04.000Z
2021-11-29T07:17:57.000Z
Github_User_Details/functions.py
Frans06/code-n-stitch
996a2bf8474bd200f3f1f89d1b8c7e9b9615117a
[ "MIT" ]
113
2020-09-26T10:28:11.000Z
2021-10-15T06:58:53.000Z
import requests from io import BytesIO from PIL import Image def get_user_image(url): response = requests.get(url) img_file = BytesIO(response.content) return Image.open(img_file) def get_user_details(username): res = requests.get(f'https://api.github.com/users/{username}') return res.json() if res.status_code == 200 else None def replace_space(string, idx): return string[:idx] + '\n' + string[idx+1:] if idx > -1 else string def insert_new_line(string): return replace_space( replace_space(string, string.find(' ', 53)), string.find(' ', 106) )
23
74
0.688963
88
598
4.545455
0.511364
0.09
0.05
0
0
0
0
0
0
0
0
0.02045
0.182274
598
25
75
23.92
0.797546
0
0
0
0
0
0.071906
0
0
0
0
0
0
1
0.25
false
0
0.1875
0.125
0.6875
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
4c68a2c948adfa51d3f649c50ec2387ae5eda33c
79
py
Python
pset6/hello.py
D3adsec9ja/cs50x-works
9cdc6f4007315324ca69f4756bb3e26e1eaa6929
[ "BSD-3-Clause" ]
null
null
null
pset6/hello.py
D3adsec9ja/cs50x-works
9cdc6f4007315324ca69f4756bb3e26e1eaa6929
[ "BSD-3-Clause" ]
null
null
null
pset6/hello.py
D3adsec9ja/cs50x-works
9cdc6f4007315324ca69f4756bb3e26e1eaa6929
[ "BSD-3-Clause" ]
null
null
null
#ask for name prompt = input("What is your name?\n") print(f"hello, {prompt}")
19.75
38
0.670886
14
79
3.785714
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.139241
79
4
39
19.75
0.779412
0.151899
0
0
0
0
0.522388
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
4c7062a9ab479cf67e965873ee7335528d52354c
425
py
Python
examples/stats/multi_ess.py
papamarkou/eeyore
4cd9b5a619cd095035aa93f348d1c937629aa8a3
[ "MIT" ]
6
2020-04-22T18:56:46.000Z
2021-09-09T15:57:48.000Z
examples/stats/multi_ess.py
papamarkou/eeyore
4cd9b5a619cd095035aa93f348d1c937629aa8a3
[ "MIT" ]
19
2019-11-14T21:22:21.000Z
2020-10-31T16:18:36.000Z
examples/stats/multi_ess.py
scidom/eeyore
4cd9b5a619cd095035aa93f348d1c937629aa8a3
[ "MIT" ]
null
null
null
# Compute multivariate ESS using multi_ess function based on eeyore # %% Load packages import numpy as np import torch from eeyore.stats import multi_ess # %% Read chains chains = torch.as_tensor(np.genfromtxt('chain01.csv', delimiter=',')) # %% Compute multivariate ESS using INSE MC covariance estimation ess_val = multi_ess(chains) print('Multivariate ESS using INSE MC covariance estimation: {}'.format(ess_val))
22.368421
81
0.764706
59
425
5.40678
0.542373
0.141066
0.188088
0.169279
0.288401
0.288401
0.288401
0
0
0
0
0.005495
0.143529
425
18
82
23.611111
0.870879
0.378824
0
0
0
0
0.262548
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0.166667
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
4c718a74f1d85fa92faa6ea708fdd0f72b919d59
1,286
py
Python
rts/python/memory.py
Snektron/futhark
ca9a33d511ba30b27409aef46e5df92556ab2e8b
[ "ISC" ]
2
2022-01-02T16:21:11.000Z
2022-01-09T09:49:43.000Z
rts/python/memory.py
q60/futhark
a9421d922778281ac8a84e66497c340290c1e23b
[ "ISC" ]
null
null
null
rts/python/memory.py
q60/futhark
a9421d922778281ac8a84e66497c340290c1e23b
[ "ISC" ]
null
null
null
# Start of memory.py. import ctypes as ct def addressOffset(x, offset, bt): return ct.cast(ct.addressof(x.contents)+int(offset), ct.POINTER(bt)) def allocateMem(size): return ct.cast((ct.c_byte * max(0,size))(), ct.POINTER(ct.c_byte)) # Copy an array if its is not-None. This is important for treating # Numpy arrays as flat memory, but has some overhead. def normaliseArray(x): if (x.base is x) or (x.base is None): return x else: return x.copy() def unwrapArray(x): return normaliseArray(x).ctypes.data_as(ct.POINTER(ct.c_byte)) def createArray(x, shape, t): # HACK: np.ctypeslib.as_array may fail if the shape contains zeroes, # for some reason. if any(map(lambda x: x == 0, shape)): return np.ndarray(shape, dtype=t) else: return np.ctypeslib.as_array(x, shape=shape).view(t) def indexArray(x, offset, bt): return addressOffset(x, offset*ct.sizeof(bt), bt)[0] def writeScalarArray(x, offset, v): ct.memmove(ct.addressof(x.contents)+int(offset)*ct.sizeof(v), ct.addressof(v), ct.sizeof(v)) # An opaque Futhark value. class opaque(object): def __init__(self, desc, *payload): self.data = payload self.desc = desc def __repr__(self): return "<opaque Futhark value of type {}>".format(self.desc) # End of memory.py.
27.956522
94
0.692846
212
1,286
4.136792
0.40566
0.031927
0.023945
0.034208
0.107184
0.070696
0.070696
0
0
0
0
0.002791
0.164075
1,286
45
95
28.577778
0.813023
0.205288
0
0.074074
0
0
0.032544
0
0
0
0
0
0
1
0.333333
false
0
0.037037
0.185185
0.740741
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
d5d16d785cb5f660a5d909ee212c35e5226a5fa4
1,302
py
Python
src/compas_occ/geometry/curves/__init__.py
lottilotte/compas_occ
0e6cd8fb90802995993fca82a65e26157bf193fd
[ "MIT" ]
null
null
null
src/compas_occ/geometry/curves/__init__.py
lottilotte/compas_occ
0e6cd8fb90802995993fca82a65e26157bf193fd
[ "MIT" ]
null
null
null
src/compas_occ/geometry/curves/__init__.py
lottilotte/compas_occ
0e6cd8fb90802995993fca82a65e26157bf193fd
[ "MIT" ]
null
null
null
from .nurbs import OCCNurbsCurve try: from compas.geometry import NurbsCurve except ImportError: pass else: from compas.plugins import plugin @plugin(category='factories', requires=['compas_occ']) def new_nurbscurve(cls, *args, **kwargs): # for _, value in inspect.getmembers(OCCNurbsCurve): # if inspect.isfunction(value): # if hasattr(value, '__isabstractmethod__'): # raise Exception('Abstract method not implemented: {}'.format(value)) return super(NurbsCurve, OCCNurbsCurve).__new__(OCCNurbsCurve) @plugin(category='factories', requires=['compas_occ']) def new_nurbscurve_from_parameters(*args, **kwargs): return OCCNurbsCurve.from_parameters(*args, **kwargs) @plugin(category='factories', requires=['compas_occ']) def new_nurbscurve_from_points(*args, **kwargs): return OCCNurbsCurve.from_points(*args, **kwargs) @plugin(category='factories', requires=['compas_occ']) def new_nurbscurve_from_interpolation(cls, *args, **kwargs): return OCCNurbsCurve.from_interpolation(*args, **kwargs) @plugin(category='factories', requires=['compas_occ']) def new_nurbscurve_from_step(cls, *args, **kwargs): return OCCNurbsCurve.from_step(*args, **kwargs)
38.294118
90
0.691244
137
1,302
6.343066
0.328467
0.103567
0.132336
0.178366
0.533947
0.457998
0.375144
0.375144
0.375144
0.310702
0
0
0.180492
1,302
33
91
39.454545
0.814433
0.165899
0
0.227273
0
0
0.087882
0
0
0
0
0
0
1
0.227273
false
0.045455
0.181818
0.227273
0.636364
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
3
910f839332d16c3c4978656718541c320899c1c7
22,938
bzl
Python
third_party/repositories/scala_2_11.bzl
wiwa/rules_scala
3dd5d8110d56cfc19722532866cbfc039a6a9612
[ "Apache-2.0" ]
null
null
null
third_party/repositories/scala_2_11.bzl
wiwa/rules_scala
3dd5d8110d56cfc19722532866cbfc039a6a9612
[ "Apache-2.0" ]
null
null
null
third_party/repositories/scala_2_11.bzl
wiwa/rules_scala
3dd5d8110d56cfc19722532866cbfc039a6a9612
[ "Apache-2.0" ]
null
null
null
artifacts = { "io_bazel_rules_scala_scala_library": { "artifact": "org.scala-lang:scala-library:2.11.12", "sha256": "0b3d6fd42958ee98715ba2ec5fe221f4ca1e694d7c981b0ae0cd68e97baf6dce", }, "io_bazel_rules_scala_scala_compiler": { "artifact": "org.scala-lang:scala-compiler:2.11.12", "sha256": "3e892546b72ab547cb77de4d840bcfd05c853e73390fed7370a8f19acb0735a0", }, "io_bazel_rules_scala_scala_reflect": { "artifact": "org.scala-lang:scala-reflect:2.11.12", "sha256": "6ba385b450a6311a15c918cf8688b9af9327c6104f0ecbd35933cfcd3095fe04", }, "io_bazel_rules_scala_scalatest": { "artifact": "org.scalatest:scalatest_2.11:3.2.9", "sha256": "45affb34dd5b567fa943a7e155118ae6ab6c4db2fd34ca6a6c62ea129a1675be", }, "io_bazel_rules_scala_scalatest_compatible": { "artifact": "org.scalatest:scalatest-compatible:jar:3.2.9", "sha256": "7e5f1193af2fd88c432c4b80ce3641e4b1d062f421d8a0fcc43af9a19bb7c2eb", }, "io_bazel_rules_scala_scalatest_core": { "artifact": "org.scalatest:scalatest-core_2.11:3.2.9", "sha256": "003cb40f78cbbffaf38203b09c776d06593974edf1883a933c1bbc0293a2f280", }, "io_bazel_rules_scala_scalatest_featurespec": { "artifact": "org.scalatest:scalatest-featurespec_2.11:3.2.9", "sha256": "41567216bbd338625e77cd74ca669c88f59ff2da8adeb362657671bb43c4e462", }, "io_bazel_rules_scala_scalatest_flatspec": { "artifact": "org.scalatest:scalatest-flatspec_2.11:3.2.9", "sha256": "3e89091214985782ff912559b7eb1ce085f6117db8cff65663e97325dc264b91", }, "io_bazel_rules_scala_scalatest_freespec": { "artifact": "org.scalatest:scalatest-freespec_2.11:3.2.9", "sha256": "7c3e26ac0fa165263e4dac5dd303518660f581f0f8b0c20ba0b8b4a833ac9b9e", }, "io_bazel_rules_scala_scalatest_funsuite": { "artifact": "org.scalatest:scalatest-funsuite_2.11:3.2.9", "sha256": "dc2100fe45b577c464f01933d8e605c3364dbac9ba24cd65222a5a4f3000717c", }, "io_bazel_rules_scala_scalatest_funspec": { "artifact": "org.scalatest:scalatest-funspec_2.11:3.2.9", "sha256": "6ed2de364aacafcb3390144501ed4e0d24b7ff1431e8b9e6503d3af4bc160196", }, "io_bazel_rules_scala_scalatest_matchers_core": { "artifact": "org.scalatest:scalatest-matchers-core_2.11:3.2.9", "sha256": "06eb7b5f3a8e8124c3a92e5c597a75ccdfa3fae022bc037770327d8e9c0759b4", }, "io_bazel_rules_scala_scalatest_shouldmatchers": { "artifact": "org.scalatest:scalatest-shouldmatchers_2.11:3.2.9", "sha256": "444545c33a3af8d7a5166ea4766f376a5f2c209854c7eb630786c8cb3f48a706", }, "io_bazel_rules_scala_scalatest_mustmatchers": { "artifact": "org.scalatest:scalatest-mustmatchers_2.11:3.2.9", "sha256": "b0ba6b9db7a2d1a4f7a3cf45b034b65481e31da8748abc2f2750cf22619d5a45", }, "io_bazel_rules_scala_scalactic": { "artifact": "org.scalactic:scalactic_2.11:3.2.9", "sha256": "97b439fe61d1c655a8b29cdab8182b15b41b2308923786a348fc7b9f8f72b660", }, "io_bazel_rules_scala_scala_xml": { "artifact": "org.scala-lang.modules:scala-xml_2.11:1.2.0", "sha256": "eaddac168ef1e28978af768706490fa4358323a08964c25fa1027c52238e3702", }, "io_bazel_rules_scala_scala_parser_combinators": { "artifact": "org.scala-lang.modules:scala-parser-combinators_2.11:1.1.2", "sha256": "3e0889e95f5324da6420461f7147cb508241ed957ac5cfedc25eef19c5448f26", }, "org_scalameta_common": { "artifact": "org.scalameta:common_2.11:4.3.0", "sha256": "6330798bcbd78d14d371202749f32efda0465c3be5fd057a6055a67e21335ba0", "deps": [ "@com_lihaoyi_sourcecode", "@io_bazel_rules_scala_scala_library", ], }, "org_scalameta_fastparse": { "artifact": "org.scalameta:fastparse_2.11:1.0.1", "sha256": "49ecc30a4b47efc0038099da0c97515cf8f754ea631ea9f9935b36ca7d41b733", "deps": [ "@com_lihaoyi_sourcecode", "@io_bazel_rules_scala_scala_library", "@org_scalameta_fastparse_utils", ], }, "org_scalameta_fastparse_utils": { "artifact": "org.scalameta:fastparse-utils_2.11:1.0.1", "sha256": "93f58db540e53178a686621f7a9c401307a529b68e051e38804394a2a86cea94", "deps": [ "@com_lihaoyi_sourcecode", "@io_bazel_rules_scala_scala_library", ], }, "org_scala_lang_modules_scala_collection_compat": { "artifact": "org.scala-lang.modules:scala-collection-compat_2.11:2.1.2", "sha256": "e9667b8b7276aeb42599f536fe4d7caab06eabc55e9995572267ad60c7a11c8b", "deps": [ "@io_bazel_rules_scala_scala_library", ], }, "org_scalameta_parsers": { "artifact": "org.scalameta:parsers_2.11:4.3.0", "sha256": "724382abfac27b32dec6c21210562bc7e1b09b5268ccb704abe66dcc8844beeb", "deps": [ "@io_bazel_rules_scala_scala_library", "@org_scalameta_trees", ], }, "org_scalameta_scalafmt_core": { "artifact": "org.scalameta:scalafmt-core_2.11:2.3.2", "sha256": "6bf391e0e1d7369fda83ddaf7be4d267bf4cbccdf2cc31ff941999a78c30e67f", "deps": [ "@com_geirsson_metaconfig_core", "@com_geirsson_metaconfig_typesafe_config", "@io_bazel_rules_scala_scala_library", "@io_bazel_rules_scala_scala_reflect", "@org_scalameta_scalameta", "@org_scala_lang_modules_scala_collection_compat", ], }, "org_scalameta_scalameta": { "artifact": "org.scalameta:scalameta_2.11:4.3.0", "sha256": "94fe739295447cd3ae877c279ccde1def06baea02d9c76a504dda23de1d90516", "deps": [ "@io_bazel_rules_scala_scala_library", "@org_scala_lang_scalap", "@org_scalameta_parsers", ], }, "org_scalameta_trees": { "artifact": "org.scalameta:trees_2.11:4.3.0", "sha256": "d24d5d63d8deafe646d455c822593a66adc6fdf17c8373754a3834a6e92a8a72", "deps": [ "@com_thesamet_scalapb_scalapb_runtime", "@io_bazel_rules_scala_scala_library", "@org_scalameta_common", "@org_scalameta_fastparse", ], }, "org_typelevel_paiges_core": { "artifact": "org.typelevel:paiges-core_2.11:0.2.4", "sha256": "aa66fbe0457ca5cb5b9e522d4cb873623bb376a2e1ff58c464b5194c1d87c241", "deps": [ "@io_bazel_rules_scala_scala_library", ], }, "com_typesafe_config": { "artifact": "com.typesafe:config:1.3.3", "sha256": "b5f1d6071f1548d05be82f59f9039c7d37a1787bd8e3c677e31ee275af4a4621", }, "org_scala_lang_scalap": { "artifact": "org.scala-lang:scalap:2.11.12", "sha256": "a6dd7203ce4af9d6185023d5dba9993eb8e80584ff4b1f6dec574a2aba4cd2b7", "deps": [ "@io_bazel_rules_scala_scala_compiler", ], }, "com_thesamet_scalapb_lenses": { "artifact": "com.thesamet.scalapb:lenses_2.11:0.9.0", "sha256": "f4809760edee6abc97a7fe9b7fd6ae5fe1006795b1dc3963ab4e317a72f1a385", "deps": [ "@io_bazel_rules_scala_scala_library", ], }, "com_thesamet_scalapb_scalapb_runtime": { "artifact": "com.thesamet.scalapb:scalapb-runtime_2.11:0.9.0", "sha256": "ab1e449a18a9ce411eb3fec31bdbca5dd5fae4475b1557bb5e235a7b54738757", "deps": [ "@com_google_protobuf_protobuf_java", "@com_lihaoyi_fastparse", "@com_thesamet_scalapb_lenses", "@io_bazel_rules_scala_scala_library", ], }, "com_lihaoyi_fansi": { "artifact": "com.lihaoyi:fansi_2.11:0.2.5", "sha256": "1ff0a8304f322c1442e6bcf28fab07abf3cf560dd24573dbe671249aee5fc488", "deps": [ "@com_lihaoyi_sourcecode", "@io_bazel_rules_scala_scala_library", ], }, "com_lihaoyi_fastparse": { "artifact": "com.lihaoyi:fastparse_2.11:2.1.2", "sha256": "5c5d81f90ada03ac5b21b161864a52558133951031ee5f6bf4d979e8baa03628", "deps": [ "@com_lihaoyi_sourcecode", ], }, "com_lihaoyi_pprint": { "artifact": "com.lihaoyi:pprint_2.11:0.5.3", "sha256": "fb5e4921e7dff734d049e752a482d3a031380d3eea5caa76c991312dee9e6991", "deps": [ "@com_lihaoyi_fansi", "@com_lihaoyi_sourcecode", "@io_bazel_rules_scala_scala_library", ], }, "com_lihaoyi_sourcecode": { "artifact": "com.lihaoyi:sourcecode_2.11:0.1.7", "sha256": "33516d7fd9411f74f05acfd5274e1b1889b7841d1993736118803fc727b2d5fc", "deps": [ "@io_bazel_rules_scala_scala_library", ], }, "com_google_protobuf_protobuf_java": { "artifact": "com.google.protobuf:protobuf-java:3.10.0", "sha256": "161d7d61a8cb3970891c299578702fd079646e032329d6c2cabf998d191437c9", }, "com_geirsson_metaconfig_core": { "artifact": "com.geirsson:metaconfig-core_2.11:0.9.4", "sha256": "5d5704a1f1c4f74aed26248eeb9b577274d570b167cec0bf51d2908609c29118", "deps": [ "@com_lihaoyi_pprint", "@io_bazel_rules_scala_scala_library", "@org_typelevel_paiges_core", "@org_scala_lang_modules_scala_collection_compat", ], }, "com_geirsson_metaconfig_typesafe_config": { "artifact": "com.geirsson:metaconfig-typesafe-config_2.11:0.9.4", "sha256": "52d2913640f4592402aeb2f0cec5004893d02acf26df4aa1cf8d4dcb0d2b21c7", "deps": [ "@com_geirsson_metaconfig_core", "@com_typesafe_config", "@io_bazel_rules_scala_scala_library", "@org_scala_lang_modules_scala_collection_compat", ], }, "io_bazel_rules_scala_org_openjdk_jmh_jmh_core": { "artifact": "org.openjdk.jmh:jmh-core:1.20", "sha256": "1688db5110ea6413bf63662113ed38084106ab1149e020c58c5ac22b91b842ca", }, "io_bazel_rules_scala_org_openjdk_jmh_jmh_generator_asm": { "artifact": "org.openjdk.jmh:jmh-generator-asm:1.20", "sha256": "2dd4798b0c9120326310cda3864cc2e0035b8476346713d54a28d1adab1414a5", }, "io_bazel_rules_scala_org_openjdk_jmh_jmh_generator_reflection": { "artifact": "org.openjdk.jmh:jmh-generator-reflection:1.20", "sha256": "57706f7c8278272594a9afc42753aaf9ba0ba05980bae0673b8195908d21204e", }, "io_bazel_rules_scala_org_ows2_asm_asm": { "artifact": "org.ow2.asm:asm:6.1.1", "sha256": "dd3b546415dd4bade2ebe3b47c7828ab0623ee2336604068e2d81023f9f8d833", }, "io_bazel_rules_scala_net_sf_jopt_simple_jopt_simple": { "artifact": "net.sf.jopt-simple:jopt-simple:4.6", "sha256": "3fcfbe3203c2ea521bf7640484fd35d6303186ea2e08e72f032d640ca067ffda", }, "io_bazel_rules_scala_org_apache_commons_commons_math3": { "artifact": "org.apache.commons:commons-math3:3.6.1", "sha256": "1e56d7b058d28b65abd256b8458e3885b674c1d588fa43cd7d1cbb9c7ef2b308", }, "io_bazel_rules_scala_junit_junit": { "artifact": "junit:junit:4.12", "sha256": "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a", }, "io_bazel_rules_scala_org_hamcrest_hamcrest_core": { "artifact": "org.hamcrest:hamcrest-core:1.3", "sha256": "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9", }, "io_bazel_rules_scala_org_specs2_specs2_common": { "artifact": "org.specs2:specs2-common_2.11:4.4.1", "sha256": "52d7c0da58725606e98c6e8c81d2efe632053520a25da9140116d04a4abf9d2c", "deps": [ "@io_bazel_rules_scala_org_specs2_specs2_fp", ], }, "io_bazel_rules_scala_org_specs2_specs2_core": { "artifact": "org.specs2:specs2-core_2.11:4.4.1", "sha256": "8e95cb7e347e7a87e7a80466cbd88419ece1aaacb35c32e8bd7d299a623b31b9", "deps": [ "@io_bazel_rules_scala_org_specs2_specs2_common", "@io_bazel_rules_scala_org_specs2_specs2_matcher", ], }, "io_bazel_rules_scala_org_specs2_specs2_fp": { "artifact": "org.specs2:specs2-fp_2.11:4.4.1", "sha256": "e43006fdd0726ffcd1e04c6c4d795176f5f765cc787cc09baebe1fcb009e4462", }, "io_bazel_rules_scala_org_specs2_specs2_matcher": { "artifact": "org.specs2:specs2-matcher_2.11:4.4.1", "sha256": "448e5ab89d4d650d23030fdbee66a010a07dcac5e4c3e73ef5fe39ca1aace1cd", "deps": [ "@io_bazel_rules_scala_org_specs2_specs2_common", ], }, "io_bazel_rules_scala_org_specs2_specs2_junit": { "artifact": "org.specs2:specs2-junit_2.11:4.4.1", "sha256": "a8549d52e87896624200fe35ef7b841c1c698a8fb5d97d29bf082762aea9bb72", "deps": [ "@io_bazel_rules_scala_org_specs2_specs2_core", ], }, "scala_proto_rules_scalapb_plugin": { "artifact": "com.thesamet.scalapb:compilerplugin_2.11:0.9.7", "sha256": "2d6793fa2565953ef2b5094fc37fae4933f3c42e4cb4048d54e7f358ec104a87", }, "scala_proto_rules_protoc_bridge": { "artifact": "com.thesamet.scalapb:protoc-bridge_2.11:0.7.14", "sha256": "314e34bf331b10758ff7a780560c8b5a5b09e057695a643e33ab548e3d94aa03", }, "scala_proto_rules_scalapb_runtime": { "artifact": "com.thesamet.scalapb:scalapb-runtime_2.11:0.9.7", "sha256": "5131033e9536727891a38004ec707a93af1166cb8283c7db711c2c105fbf289e", }, "scala_proto_rules_scalapb_runtime_grpc": { "artifact": "com.thesamet.scalapb:scalapb-runtime-grpc_2.11:0.9.7", "sha256": "24d19df500ce6450d8f7aa72a9bad675fa4f3650f7736d548aa714058f887e23", }, "scala_proto_rules_scalapb_lenses": { "artifact": "com.thesamet.scalapb:lenses_2.11:0.9.7", "sha256": "f8e3b526ceac998652b296014e9ab4c0ab906a40837dd1dfcf6948b6f5a1a8bf", }, "scala_proto_rules_scalapb_fastparse": { "artifact": "com.lihaoyi:fastparse_2.11:2.1.2", "sha256": "5c5d81f90ada03ac5b21b161864a52558133951031ee5f6bf4d979e8baa03628", }, "scala_proto_rules_grpc_core": { "artifact": "io.grpc:grpc-core:1.24.0", "sha256": "8fc900625a9330b1c155b5423844d21be0a5574fe218a63170a16796c6f7880e", }, "scala_proto_rules_grpc_api": { "artifact": "io.grpc:grpc-api:1.24.0", "sha256": "553978366e04ee8ddba64afde3b3cf2ac021a2f3c2db2831b6491d742b558598", }, "scala_proto_rules_grpc_stub": { "artifact": "io.grpc:grpc-stub:1.24.0", "sha256": "eaa9201896a77a0822e26621b538c7154f00441a51c9b14dc9e1ec1f2acfb815", }, "scala_proto_rules_grpc_protobuf": { "artifact": "io.grpc:grpc-protobuf:1.24.0", "sha256": "88cd0838ea32893d92cb214ea58908351854ed8de7730be07d5f7d19025dd0bc", }, "scala_proto_rules_grpc_netty": { "artifact": "io.grpc:grpc-netty:1.24.0", "sha256": "8478333706ba442a354c2ddb8832d80a5aef71016e8a9cf07e7bf6e8c298f042", }, "scala_proto_rules_grpc_context": { "artifact": "io.grpc:grpc-context:1.24.0", "sha256": "1f0546e18789f7445d1c5a157010a11bc038bbb31544cdb60d9da3848efcfeea", }, "scala_proto_rules_perfmark_api": { "artifact": "io.perfmark:perfmark-api:0.17.0", "sha256": "816c11409b8a0c6c9ce1cda14bed526e7b4da0e772da67c5b7b88eefd41520f9", }, "scala_proto_rules_guava": { "artifact": "com.google.guava:guava:26.0-android", "sha256": "1d044ebb866ef08b7d04e998b4260c9b52fab6e6d6b68d207859486bb3686cd5", }, "scala_proto_rules_google_instrumentation": { "artifact": "com.google.instrumentation:instrumentation-api:0.3.0", "sha256": "671f7147487877f606af2c7e39399c8d178c492982827305d3b1c7f5b04f1145", }, "scala_proto_rules_netty_codec": { "artifact": "io.netty:netty-codec:4.1.32.Final", "sha256": "dbd6cea7d7bf5a2604e87337cb67c9468730d599be56511ed0979aacb309f879", }, "scala_proto_rules_netty_codec_http": { "artifact": "io.netty:netty-codec-http:4.1.32.Final", "sha256": "db2c22744f6a4950d1817e4e1a26692e53052c5d54abe6cceecd7df33f4eaac3", }, "scala_proto_rules_netty_codec_socks": { "artifact": "io.netty:netty-codec-socks:4.1.32.Final", "sha256": "fe2f2e97d6c65dc280623dcfd24337d8a5c7377049c120842f2c59fb83d7408a", }, "scala_proto_rules_netty_codec_http2": { "artifact": "io.netty:netty-codec-http2:4.1.32.Final", "sha256": "4d4c6cfc1f19efb969b9b0ae6cc977462d202867f7dcfee6e9069977e623a2f5", }, "scala_proto_rules_netty_handler": { "artifact": "io.netty:netty-handler:4.1.32.Final", "sha256": "07d9756e48b5f6edc756e33e8b848fb27ff0b1ae087dab5addca6c6bf17cac2d", }, "scala_proto_rules_netty_buffer": { "artifact": "io.netty:netty-buffer:4.1.32.Final", "sha256": "8ac0e30048636bd79ae205c4f9f5d7544290abd3a7ed39d8b6d97dfe3795afc1", }, "scala_proto_rules_netty_transport": { "artifact": "io.netty:netty-transport:4.1.32.Final", "sha256": "175bae0d227d7932c0c965c983efbb3cf01f39abe934f5c4071d0319784715fb", }, "scala_proto_rules_netty_resolver": { "artifact": "io.netty:netty-resolver:4.1.32.Final", "sha256": "9b4a19982047a95ea4791a7ad7ad385c7a08c2ac75f0a3509cc213cb32a726ae", }, "scala_proto_rules_netty_common": { "artifact": "io.netty:netty-common:4.1.32.Final", "sha256": "cc993e660f8f8e3b033f1d25a9e2f70151666bdf878d460a6508cb23daa696dc", }, "scala_proto_rules_netty_handler_proxy": { "artifact": "io.netty:netty-handler-proxy:4.1.32.Final", "sha256": "10d1081ed114bb0e76ebbb5331b66a6c3189cbdefdba232733fc9ca308a6ea34", }, "scala_proto_rules_opencensus_api": { "artifact": "io.opencensus:opencensus-api:0.22.1", "sha256": "62a0503ee81856ba66e3cde65dee3132facb723a4fa5191609c84ce4cad36127", }, "scala_proto_rules_opencensus_impl": { "artifact": "io.opencensus:opencensus-impl:0.22.1", "sha256": "9e8b209da08d1f5db2b355e781b9b969b2e0dab934cc806e33f1ab3baed4f25a", }, "scala_proto_rules_disruptor": { "artifact": "com.lmax:disruptor:3.4.2", "sha256": "f412ecbb235c2460b45e63584109723dea8d94b819c78c9bfc38f50cba8546c0", }, "scala_proto_rules_opencensus_impl_core": { "artifact": "io.opencensus:opencensus-impl-core:0.22.1", "sha256": "04607d100e34bacdb38f93c571c5b7c642a1a6d873191e25d49899668514db68", }, "scala_proto_rules_opencensus_contrib_grpc_metrics": { "artifact": "io.opencensus:opencensus-contrib-grpc-metrics:0.22.1", "sha256": "3f6f4d5bd332c516282583a01a7c940702608a49ed6e62eb87ef3b1d320d144b", }, "io_bazel_rules_scala_mustache": { "artifact": "com.github.spullara.mustache.java:compiler:0.8.18", "sha256": "ddabc1ef897fd72319a761d29525fd61be57dc25d04d825f863f83cc89000e66", }, "io_bazel_rules_scala_guava": { "artifact": "com.google.guava:guava:21.0", "sha256": "972139718abc8a4893fa78cba8cf7b2c903f35c97aaf44fa3031b0669948b480", }, "libthrift": { "artifact": "org.apache.thrift:libthrift:0.10.0", "sha256": "8591718c1884ac8001b4c5ca80f349c0a6deec691de0af720c5e3bc3a581dada", }, "io_bazel_rules_scala_scrooge_core": { "artifact": "com.twitter:scrooge-core_2.11:21.2.0", "sha256": "d6cef1408e34b9989ea8bc4c567dac922db6248baffe2eeaa618a5b354edd2bb", }, "io_bazel_rules_scala_scrooge_generator": { "artifact": "com.twitter:scrooge-generator_2.11:21.2.0", "sha256": "87094f01df2c0670063ab6ebe156bb1a1bcdabeb95bc45552660b030287d6acb", "runtime_deps": [ "@io_bazel_rules_scala_guava", "@io_bazel_rules_scala_mustache", "@io_bazel_rules_scala_scopt", ], }, "io_bazel_rules_scala_util_core": { "artifact": "com.twitter:util-core_2.11:21.2.0", "sha256": "31c33d494ca5a877c1e5b5c1f569341e1d36e7b2c8b3fb0356fb2b6d4a3907ca", }, "io_bazel_rules_scala_util_logging": { "artifact": "com.twitter:util-logging_2.11:21.2.0", "sha256": "f3b62465963fbf0fe9860036e6255337996bb48a1a3f21a29503a2750d34f319", }, "io_bazel_rules_scala_javax_annotation_api": { "artifact": "javax.annotation:javax.annotation-api:1.3.2", "sha256": "e04ba5195bcd555dc95650f7cc614d151e4bcd52d29a10b8aa2197f3ab89ab9b", }, "io_bazel_rules_scala_scopt": { "artifact": "com.github.scopt:scopt_2.11:4.0.0-RC2", "sha256": "956dfc89d3208e4a6d8bbfe0205410c082cee90c4ce08be30f97c044dffc3435", }, # test only "com_twitter__scalding_date": { "testonly": True, "artifact": "com.twitter:scalding-date_2.11:0.17.0", "sha256": "bf743cd6d224a4568d6486a2b794143e23145d2afd7a1d2de412d49e45bdb308", }, "org_typelevel__cats_core": { "testonly": True, "artifact": "org.typelevel:cats-core_2.11:0.9.0", "sha256": "3fda7a27114b0d178107ace5c2cf04e91e9951810690421768e65038999ffca5", }, "com_google_guava_guava_21_0_with_file": { "testonly": True, "artifact": "com.google.guava:guava:21.0", "sha256": "972139718abc8a4893fa78cba8cf7b2c903f35c97aaf44fa3031b0669948b480", }, "com_github_jnr_jffi_native": { "testonly": True, "artifact": "com.github.jnr:jffi:jar:native:1.2.17", "sha256": "4eb582bc99d96c8df92fc6f0f608fd123d278223982555ba16219bf8be9f75a9", }, "org_apache_commons_commons_lang_3_5": { "testonly": True, "artifact": "org.apache.commons:commons-lang3:3.5", "sha256": "8ac96fc686512d777fca85e144f196cd7cfe0c0aec23127229497d1a38ff651c", }, "org_springframework_spring_core": { "testonly": True, "artifact": "org.springframework:spring-core:5.1.5.RELEASE", "sha256": "f771b605019eb9d2cf8f60c25c050233e39487ff54d74c93d687ea8de8b7285a", }, "org_springframework_spring_tx": { "testonly": True, "artifact": "org.springframework:spring-tx:5.1.5.RELEASE", "sha256": "666f72b73c7e6b34e5bb92a0d77a14cdeef491c00fcb07a1e89eb62b08500135", "deps": [ "@org_springframework_spring_core", ], }, "com_google_guava_guava_21_0": { "testonly": True, "artifact": "com.google.guava:guava:21.0", "sha256": "972139718abc8a4893fa78cba8cf7b2c903f35c97aaf44fa3031b0669948b480", "deps": [ "@org_springframework_spring_core", ], }, "org_spire_math_kind_projector": { "testonly": True, "artifact": "org.spire-math:kind-projector_2.11:0.9.10", "sha256": "897460d4488b7dd6ac9198937d6417b36cc6ec8ab3693fdf2c532652f26c4373", }, }
44.888454
85
0.687375
1,969
22,938
7.645505
0.131031
0.02976
0.051016
0.072273
0.346154
0.219012
0.152318
0.137837
0.103693
0.082769
0
0.25661
0.18877
22,938
510
86
44.976471
0.552397
0.000392
0
0.194882
0
0.003937
0.716884
0.640686
0
0
0
0
0
1
0
false
0
0
0
0
0.005906
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
911039f1b6fa87987ed1926db40c9eecf549491c
355
py
Python
src/modules/__init__.py
ljgago/matebot
9540912b3a8159cfdabd362044f277ec57cb6aaa
[ "MIT" ]
6
2020-12-04T21:01:20.000Z
2022-02-15T22:03:11.000Z
src/modules/__init__.py
ljgago/matebot
9540912b3a8159cfdabd362044f277ec57cb6aaa
[ "MIT" ]
5
2021-03-05T01:41:21.000Z
2021-04-20T19:22:47.000Z
src/modules/__init__.py
ljgago/matebot
9540912b3a8159cfdabd362044f277ec57cb6aaa
[ "MIT" ]
4
2020-12-04T16:13:34.000Z
2021-01-25T00:23:18.000Z
# -*- coding: utf-8 -*- from .help import Help from .welcome import Welcome from .faq import FAQ from .events import Events from .scheduler import Scheduler from .polls import Polls from .search import Search from .newMembers import NewMembers from .info import Info from .mentorships import Mentorship # from .mentionNewMembers import MentionNewMembers
25.357143
50
0.797183
47
355
6.021277
0.361702
0
0
0
0
0
0
0
0
0
0
0.003289
0.143662
355
13
51
27.307692
0.927632
0.197183
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
3
911e0aca4da1197da593c4874b8d3f08e745e271
189
py
Python
note_server/note/urls.py
tokibito/note-app-django-vue-javascript
62ae8a761a2d44107d6cc1d10575c2e8099312ae
[ "MIT" ]
33
2018-03-17T08:06:29.000Z
2020-03-17T02:41:35.000Z
note_server/note/urls.py
tokibito/note-app-django-vue-javascript
62ae8a761a2d44107d6cc1d10575c2e8099312ae
[ "MIT" ]
null
null
null
note_server/note/urls.py
tokibito/note-app-django-vue-javascript
62ae8a761a2d44107d6cc1d10575c2e8099312ae
[ "MIT" ]
1
2019-06-12T01:20:56.000Z
2019-06-12T01:20:56.000Z
from rest_framework.routers import DefaultRouter from .views import PageViewSet router = DefaultRouter() router.register(r'page', PageViewSet, base_name='page') urlpatterns = router.urls
23.625
55
0.809524
23
189
6.565217
0.695652
0
0
0
0
0
0
0
0
0
0
0
0.100529
189
7
56
27
0.888235
0
0
0
0
0
0.042328
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
912550fbf7c0ed30991f5583bdd234d955b63182
104
py
Python
nptimelapse/extensions.py
olus2000/nptimelapse
8623a55310b30921a76d1c071df0636869a2a806
[ "MIT" ]
1
2021-09-22T11:57:17.000Z
2021-09-22T11:57:17.000Z
nptimelapse/extensions.py
olus2000/nptimelapse
8623a55310b30921a76d1c071df0636869a2a806
[ "MIT" ]
2
2021-12-23T09:30:14.000Z
2021-12-28T09:42:34.000Z
nptimelapse/extensions.py
olus2000/nptimelapse
8623a55310b30921a76d1c071df0636869a2a806
[ "MIT" ]
1
2021-12-20T16:24:54.000Z
2021-12-20T16:24:54.000Z
from flask_sqlalchemy import SQLAlchemy from celery import Celery db = SQLAlchemy() celery = Celery()
14.857143
39
0.788462
13
104
6.230769
0.461538
0
0
0
0
0
0
0
0
0
0
0
0.153846
104
6
40
17.333333
0.920455
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
91490cdce355de5623a2281046322a46c761620a
199
py
Python
tests/unit_tests/utils/test_handlers/__init__.py
openjusticebaltimore/ResponseBot
e224fe5251190f2f4a8901afbd622c411601e86e
[ "Apache-2.0" ]
16
2016-05-11T00:04:18.000Z
2020-07-21T20:32:11.000Z
tests/unit_tests/utils/test_handlers/__init__.py
openjusticebaltimore/ResponseBot
e224fe5251190f2f4a8901afbd622c411601e86e
[ "Apache-2.0" ]
6
2016-05-10T03:34:17.000Z
2016-06-14T02:42:45.000Z
tests/unit_tests/utils/test_handlers/__init__.py
openjusticebaltimore/ResponseBot
e224fe5251190f2f4a8901afbd622c411601e86e
[ "Apache-2.0" ]
6
2016-05-09T20:17:43.000Z
2020-08-15T04:43:45.000Z
from responsebot.handlers import BaseTweetHandler, register_handler @register_handler class HandlerClassInInit(BaseTweetHandler): def on_tweet(self, tweet): print('HandlerClassInInit')
24.875
67
0.798995
19
199
8.210526
0.736842
0.192308
0
0
0
0
0
0
0
0
0
0
0.130653
199
7
68
28.428571
0.901734
0
0
0
0
0
0.090452
0
0
0
0
0
0
1
0.2
false
0
0.2
0
0.6
0.2
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
e67d231afb870f1f084df8ebe882acd8b7ebbc6d
3,260
py
Python
tf2tags/migrations/0004_auto_20180119_2102.py
DrDos0016/tf2tags
ce539a8e0dfc56d37f4efd31a0b4924f7e6e9983
[ "MIT" ]
11
2018-01-31T08:09:37.000Z
2020-04-26T08:52:43.000Z
tf2tags/migrations/0004_auto_20180119_2102.py
DrDos0016/tf2tags
ce539a8e0dfc56d37f4efd31a0b4924f7e6e9983
[ "MIT" ]
null
null
null
tf2tags/migrations/0004_auto_20180119_2102.py
DrDos0016/tf2tags
ce539a8e0dfc56d37f4efd31a0b4924f7e6e9983
[ "MIT" ]
2
2020-09-02T14:37:52.000Z
2021-03-09T17:03:18.000Z
# Generated by Django 2.0.1 on 2018-01-19 21:02 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('tf2tags', '0003_auto_20160802_0059'), ] operations = [ migrations.RemoveField( model_name='users', name='admin', ), migrations.RemoveField( model_name='users', name='bonus_comments', ), migrations.RemoveField( model_name='users', name='bonus_submitted', ), migrations.RemoveField( model_name='users', name='last_visit', ), migrations.AlterField( model_name='bans', name='begins', field=models.DateTimeField(auto_now_add=True, verbose_name='Ban Start'), ), migrations.AlterField( model_name='bans', name='ends', field=models.DateTimeField(verbose_name='Ban End'), ), migrations.AlterField( model_name='news', name='author', field=models.CharField(default='Dr. Dos', max_length=50), ), migrations.AlterField( model_name='news', name='profile', field=models.CharField(default='id/dr_dos', max_length=50), ), migrations.AlterField( model_name='submissions', name='color', field=models.CharField(default='FFD700', max_length=6), ), migrations.AlterField( model_name='submissions', name='desc', field=models.CharField(db_index=True, default='', max_length=200, verbose_name='description'), ), migrations.AlterField( model_name='submissions', name='filter', field=models.CharField(default='', max_length=50), ), migrations.AlterField( model_name='submissions', name='ip', field=models.GenericIPAddressField(default=''), ), migrations.AlterField( model_name='submissions', name='keywords', field=models.CharField(db_index=True, default='', max_length=500), ), migrations.AlterField( model_name='submissions', name='paint', field=models.CharField(default='', max_length=6), ), migrations.AlterField( model_name='submissions', name='particles', field=models.CharField(default='', max_length=50), ), migrations.AlterField( model_name='submissions', name='prefix', field=models.CharField(default='', max_length=50), ), migrations.AlterField( model_name='submissions', name='user', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.SET_DEFAULT, to='tf2tags.Users'), ), migrations.AlterField( model_name='votes', name='user', field=models.ForeignKey(blank=True, default=0, null=True, on_delete=django.db.models.deletion.SET_NULL, to='tf2tags.Users'), ), ]
32.277228
136
0.55184
299
3,260
5.866221
0.284281
0.09236
0.199544
0.231471
0.63683
0.606043
0.403079
0.315279
0.315279
0.140251
0
0.025815
0.322699
3,260
100
137
32.6
0.768569
0.013804
0
0.617021
1
0
0.113601
0.007158
0
0
0
0
0
1
0
false
0
0.021277
0
0.053191
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
e683864a1b4704f9c8763e03394da222d7e4c6cd
234
py
Python
classes/cloud/S3.py
includeamin/ffmpeg-cloud
f24453fe2369cbde047570f3cde93a0b47781d79
[ "Apache-2.0" ]
1
2020-05-19T13:50:45.000Z
2020-05-19T13:50:45.000Z
classes/cloud/S3.py
includeamin/ffmpeg-cloud
f24453fe2369cbde047570f3cde93a0b47781d79
[ "Apache-2.0" ]
null
null
null
classes/cloud/S3.py
includeamin/ffmpeg-cloud
f24453fe2369cbde047570f3cde93a0b47781d79
[ "Apache-2.0" ]
null
null
null
from classes.cloud.BaseCloud import BaseCloud class S3(BaseCloud): def __init__(self): pass def upload(self, path: str, file_name: str): pass def download(self, file_name: str, path: str): pass
18
50
0.636752
31
234
4.612903
0.548387
0.097902
0.153846
0
0
0
0
0
0
0
0
0.005848
0.269231
234
12
51
19.5
0.830409
0
0
0.375
0
0
0
0
0
0
0
0
0
1
0.375
false
0.375
0.125
0
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
3
e69787cad15ce9692d2aafc33114d792d4093f76
2,102
py
Python
app/models.py
NderituMwanu/personal-blog-website
c0fb19bb008c1fc7142203799f272e4834781342
[ "MIT" ]
null
null
null
app/models.py
NderituMwanu/personal-blog-website
c0fb19bb008c1fc7142203799f272e4834781342
[ "MIT" ]
null
null
null
app/models.py
NderituMwanu/personal-blog-website
c0fb19bb008c1fc7142203799f272e4834781342
[ "MIT" ]
null
null
null
from flask_login import UserMixin from . import db from werkzeug.security import generate_password_hash,check_password_hash from datetime import datetime class Subscribe: __tablename__ = 'subscriptions' blogname = db.Column(db.String(255)) class Quote: def __init__(self,author,quote): self.author = author self.quote = quote class Role(db.Model): __tablename__ = 'roles' id = db.Column(db.Integer,primary_key = True) name = db.Column(db.String(255)) users = db.relationship('User',backref = 'role',lazy="dynamic") def __repr__(self): return f'User {self.name}' class Post(db.Model): __tablename__ = 'post' id = db.Column(db.Integer, primary_key=True) title = db.Column(db.String(100)) username = db.Column(db.String(20)) content = db.Column(db.Text) date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) def __repr__(self): return f"Post('{self.title}', '{self.date_posted}', '{self.content}', '{self.user_id}')" class Comment(db.Model): __tablename__='comment' comment = db.Column(db.Text, primary_key=True) def __repr__(self): return f'comment{self.comment}' class User(UserMixin,db.Model): __tablename_ = 'users' id = db.Column(db.Integer,primary_key = True) email = db.Column(db.String(200)) username = db.Column(db.String(255)) image_file = db.Column(db.String(20), nullable=False, default='default.jpg') role_id = db.Column(db.Integer,db.ForeignKey('roles.id')) pass_secure = db.Column(db.String(255)) posts = db.relationship('Post', backref='author',lazy=True) @property def password(self): raise AttributeError('You cannot read the password attribute') @password.setter def password(self, password): self.pass_secure = generate_password_hash(password) def verify_password(self,password): return check_password_hash(self.pass_secure,password) def __repr__(self): return f'User {self.username}'
26.948718
96
0.683159
280
2,102
4.910714
0.275
0.093091
0.116364
0.093091
0.274182
0.154909
0.154909
0.072
0
0
0
0.012806
0.182683
2,102
78
97
26.948718
0.787544
0
0
0.134615
1
0.019231
0.122682
0.019971
0
0
0
0
0
1
0.153846
false
0.173077
0.076923
0.096154
0.884615
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
3
e6aef3ae686c2f2c9c168ae763787a91a6223e10
2,571
py
Python
tests/parsers/test_voc_parsers.py
lee00286/icevision
e696481fc01bd1b9411bcb7ea1853d9e47bb4d04
[ "Apache-2.0" ]
null
null
null
tests/parsers/test_voc_parsers.py
lee00286/icevision
e696481fc01bd1b9411bcb7ea1853d9e47bb4d04
[ "Apache-2.0" ]
null
null
null
tests/parsers/test_voc_parsers.py
lee00286/icevision
e696481fc01bd1b9411bcb7ea1853d9e47bb4d04
[ "Apache-2.0" ]
null
null
null
from icevision.all import * def test_voc_annotation_parser(samples_source, voc_class_map): annotation_parser = parsers.voc( annotations_dir=samples_source / "voc/Annotations", images_dir=samples_source / "voc/JPEGImages", class_map=voc_class_map, ) records = annotation_parser.parse(data_splitter=SingleSplitSplitter())[0] assert len(records) == 2 record = records[0] expected = { "imageid": 0, "filepath": samples_source / "voc/JPEGImages/2007_000063.jpg", "width": 500, "height": 375, "labels": [voc_class_map.get_name(k) for k in ["dog", "chair"]], "bboxes": [BBox.from_xyxy(123, 115, 379, 275), BBox.from_xyxy(75, 1, 428, 375)], } assert record == expected record = records[1] expected = { "imageid": 1, "filepath": samples_source / "voc/JPEGImages/2011_003353.jpg", "height": 500, "width": 375, "labels": [voc_class_map.get_name("person")], "bboxes": [BBox.from_xyxy(130, 45, 375, 470)], } assert record == expected def test_voc_mask_parser(samples_source): mask_parser = parsers.VocMaskParser( masks_dir=samples_source / "voc/SegmentationClass" ) records = mask_parser.parse(data_splitter=SingleSplitSplitter())[0] record = records[0] expected = { "imageid": 0, "masks": [ VocMaskFile(samples_source / "voc/SegmentationClass/2007_000063.png"), ], } assert record == expected def test_voc_combined_parser(samples_source, voc_class_map): annotation_parser = parsers.VocXmlParser( annotations_dir=samples_source / "voc/Annotations", images_dir=samples_source / "voc/JPEGImages", class_map=voc_class_map, ) mask_parser = parsers.VocMaskParser( masks_dir=samples_source / "voc/SegmentationClass" ) combined_parser = parsers.CombinedParser(annotation_parser, mask_parser) records = combined_parser.parse(data_splitter=SingleSplitSplitter())[0] assert len(records) == 1 record = records[0] expected = { "imageid": 0, "filepath": samples_source / "voc/JPEGImages/2007_000063.jpg", "width": 500, "height": 375, "labels": [voc_class_map.get_name(k) for k in ["dog", "chair"]], "bboxes": [BBox.from_xyxy(123, 115, 379, 275), BBox.from_xyxy(75, 1, 428, 375)], "masks": [ VocMaskFile(samples_source / "voc/SegmentationClass/2007_000063.png") ], } assert record == expected
31.740741
88
0.637495
292
2,571
5.369863
0.243151
0.116071
0.132653
0.072704
0.804847
0.783163
0.711097
0.693878
0.693878
0.55102
0
0.06802
0.233761
2,571
80
89
32.1375
0.727919
0
0
0.567164
0
0
0.162194
0.080124
0
0
0
0
0.089552
1
0.044776
false
0
0.014925
0
0.059701
0
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
e6c11e93c300e462b3fc027a82967eef75c6b6cc
91
py
Python
Code/venv/src/Metadata/apps.py
WebMetadataRetrieval/15-Web_Metadata_Retrieval
96a9c8869ccc5429c4de9e97d37705bbb9e19c5b
[ "Apache-2.0" ]
null
null
null
Code/venv/src/Metadata/apps.py
WebMetadataRetrieval/15-Web_Metadata_Retrieval
96a9c8869ccc5429c4de9e97d37705bbb9e19c5b
[ "Apache-2.0" ]
38
2021-04-01T18:09:31.000Z
2021-05-08T17:27:03.000Z
Code/venv/src/Metadata/apps.py
WebMetadataRetrieval/15-Web_Metadata_Retrieval
96a9c8869ccc5429c4de9e97d37705bbb9e19c5b
[ "Apache-2.0" ]
3
2021-04-16T07:25:11.000Z
2022-01-29T10:24:33.000Z
from django.apps import AppConfig class MetadataConfig(AppConfig): name = 'Metadata'
15.166667
33
0.758242
10
91
6.9
0.9
0
0
0
0
0
0
0
0
0
0
0
0.164835
91
5
34
18.2
0.907895
0
0
0
0
0
0.087912
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
e6d5577badec9b3b208a5f47344825e2d8d9c956
373
py
Python
tests/test.py
zhreshold/decord-distro
e531e606a76b6949d87707493ef9b9b0a6a5b676
[ "MIT" ]
null
null
null
tests/test.py
zhreshold/decord-distro
e531e606a76b6949d87707493ef9b9b0a6a5b676
[ "MIT" ]
2
2020-04-28T07:56:25.000Z
2020-10-01T20:39:36.000Z
tests/test.py
zhreshold/decord-distro
e531e606a76b6949d87707493ef9b9b0a6a5b676
[ "MIT" ]
null
null
null
import unittest import sys class DecordTest(unittest.TestCase): """ Simple functionality tests. """ def test_import(self): """ Test that the cv2 module can be imported. """ import decord def test_video_capture(self): import decord as dc cap = dc.VideoReader("SampleVideo_1280x720_1mb.mp4") self.assertTrue(len(cap))
21.941176
60
0.656836
45
373
5.333333
0.711111
0.058333
0
0
0
0
0
0
0
0
0
0.035587
0.246649
373
16
61
23.3125
0.818505
0.187668
0
0
0
0
0.096886
0.096886
0
0
0
0
0.111111
1
0.222222
false
0
0.555556
0
0.888889
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
3
e6da3997507cf70d600304b6451e9ae23c5aeb9e
277
py
Python
output/models/nist_data/atomic/unsigned_byte/schema_instance/nistschema_sv_iv_atomic_unsigned_byte_pattern_1_xsd/__init__.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
1
2021-08-14T17:59:21.000Z
2021-08-14T17:59:21.000Z
output/models/nist_data/atomic/unsigned_byte/schema_instance/nistschema_sv_iv_atomic_unsigned_byte_pattern_1_xsd/__init__.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
4
2020-02-12T21:30:44.000Z
2020-04-15T20:06:46.000Z
output/models/nist_data/atomic/unsigned_byte/schema_instance/nistschema_sv_iv_atomic_unsigned_byte_pattern_1_xsd/__init__.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
null
null
null
from output.models.nist_data.atomic.unsigned_byte.schema_instance.nistschema_sv_iv_atomic_unsigned_byte_pattern_1_xsd.nistschema_sv_iv_atomic_unsigned_byte_pattern_1 import NistschemaSvIvAtomicUnsignedBytePattern1 __all__ = [ "NistschemaSvIvAtomicUnsignedBytePattern1", ]
46.166667
213
0.902527
31
277
7.354839
0.612903
0.184211
0.236842
0.175439
0.350877
0.350877
0.350877
0.350877
0
0
0
0.015209
0.050542
277
5
214
55.4
0.851711
0
0
0
0
0
0.144404
0.144404
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
e6f17dc39a4d0da815a20a12fdb579f2e0de6351
118
py
Python
web_scraping/ec2files/ec2file122.py
nikibhatt/Groa
fc2d4ae87cb825e6d54a0831c72be16541eebe61
[ "MIT" ]
1
2020-04-08T19:44:30.000Z
2020-04-08T19:44:30.000Z
web_scraping/ec2files/ec2file122.py
cmgospod/Groa
31b3624bfe61e772b55f8175b4e95d63c9e67966
[ "MIT" ]
null
null
null
web_scraping/ec2files/ec2file122.py
cmgospod/Groa
31b3624bfe61e772b55f8175b4e95d63c9e67966
[ "MIT" ]
1
2020-09-12T07:07:41.000Z
2020-09-12T07:07:41.000Z
from scraper import * s = Scraper(start=217404, end=219185, max_iter=30, scraper_instance=122) s.scrape_letterboxd()
39.333333
73
0.779661
18
118
4.944444
0.833333
0
0
0
0
0
0
0
0
0
0
0.160377
0.101695
118
3
74
39.333333
0.679245
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
e6f31829da6a0250fa1d861b0803a35547e94c81
156
py
Python
src/Controllers/testRecommendation.py
quicksloth/source-code-recommendation-server
2b2315d15146af0e362cda3a4f791df5a064d8c7
[ "Apache-2.0" ]
3
2019-01-24T22:07:49.000Z
2021-03-19T04:17:16.000Z
src/Controllers/testRecommendation.py
quicksloth/source-code-recommendation-server
2b2315d15146af0e362cda3a4f791df5a064d8c7
[ "Apache-2.0" ]
3
2017-08-27T18:01:07.000Z
2017-09-16T13:46:20.000Z
src/Controllers/testRecommendation.py
quicksloth/source-code-recommendation-server
2b2315d15146af0e362cda3a4f791df5a064d8c7
[ "Apache-2.0" ]
null
null
null
from EvaluatorController import EvaluatorController t = EvaluatorController(complex_network=None) print(t.init_get_recommendation_code_with_mocked_data())
31.2
56
0.884615
18
156
7.277778
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.057692
156
4
57
39
0.891156
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
e6f8a35776c6170403c587266f56594e96a0e3cd
548
py
Python
extras/mas_integrador_loco/creador_funciones.py
ivigilante/curso-python-itba
57ce47a5202c3d4fe3783fea52f550cfec957331
[ "MIT" ]
null
null
null
extras/mas_integrador_loco/creador_funciones.py
ivigilante/curso-python-itba
57ce47a5202c3d4fe3783fea52f550cfec957331
[ "MIT" ]
null
null
null
extras/mas_integrador_loco/creador_funciones.py
ivigilante/curso-python-itba
57ce47a5202c3d4fe3783fea52f550cfec957331
[ "MIT" ]
null
null
null
contador = 0 file = open("funciones_matematicas.py","w") funcs = {} def print_contador(): print(contador) def aumentar_contador(): global contador contador += 1 def crear_funcion(): ecuacion = input("Ingrese ecuacion: ") def agregar_funcion(): f = open("funciones_matematicas.py","w") ecuacion = input("Ingrese ecuacion: ") f.write("funcs = \{}\n") f.write("def f1(x):\n") f.write("\treturn "+ecuacion) f.close() def test_func(): f = open("funciones_matematicas.py","w") f.write("def f():\n") f.write("\tprint(\"hola\")") f.close()
22.833333
43
0.664234
77
548
4.623377
0.402597
0.08427
0.202247
0.219101
0.233146
0.157303
0
0
0
0
0
0.006263
0.125912
548
24
44
22.833333
0.736952
0
0
0.272727
0
0
0.300546
0.131148
0
0
0
0
0
1
0.227273
false
0
0
0
0.227273
0.136364
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
fc2ac70e03072c992c5cf985107fe4ac1b1f9bb9
117
py
Python
agc/012/a/answer.py
TakuyaNoguchi/atcoder
d079402e6fe9c9aaf3a6fc9272331ee71fc497da
[ "MIT" ]
null
null
null
agc/012/a/answer.py
TakuyaNoguchi/atcoder
d079402e6fe9c9aaf3a6fc9272331ee71fc497da
[ "MIT" ]
null
null
null
agc/012/a/answer.py
TakuyaNoguchi/atcoder
d079402e6fe9c9aaf3a6fc9272331ee71fc497da
[ "MIT" ]
null
null
null
N = int(input()) a = sorted(map(int, input().split()), reverse=True) print(sum([a[n] for n in range(1, N * 2, 2)]))
23.4
51
0.581197
23
117
2.956522
0.695652
0.235294
0
0
0
0
0
0
0
0
0
0.030303
0.153846
117
4
52
29.25
0.656566
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.333333
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
fc32dccc3dd354fc5943d68255ceb5f42f5aecf4
123
py
Python
config/settings/test.py
farhanmasud/django-tailwind-starter-template
ad7324b8807b523e6e8b93026a7622e14a779abb
[ "MIT" ]
2
2020-03-11T17:16:29.000Z
2020-03-12T02:06:43.000Z
config/settings/test.py
farhanmasud/django-tailwind-starter-template
ad7324b8807b523e6e8b93026a7622e14a779abb
[ "MIT" ]
4
2021-03-19T01:06:29.000Z
2021-09-22T18:49:49.000Z
config/settings/test.py
farhanmasud/django-tailwind-starter-template
ad7324b8807b523e6e8b93026a7622e14a779abb
[ "MIT" ]
null
null
null
from .base import * # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ["*"]
17.571429
65
0.707317
18
123
4.777778
0.944444
0
0
0
0
0
0
0
0
0
0
0
0.186992
123
6
66
20.5
0.86
0.512195
0
0
0
0
0.017241
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
fc4fdfce77c0f8a50cda697d91bdda382b77deba
75
py
Python
management/pycom_functions/in_every_program/measure_loop.py
AaltoIIC/OSEMA
38740e3dcfeb72a6d87e25190e6a73f6c60b199b
[ "MIT" ]
2
2021-04-03T13:19:32.000Z
2022-01-03T00:38:55.000Z
management/pycom_functions/in_every_program/measure_loop.py
AaltoIIC/OSEMA
38740e3dcfeb72a6d87e25190e6a73f6c60b199b
[ "MIT" ]
null
null
null
management/pycom_functions/in_every_program/measure_loop.py
AaltoIIC/OSEMA
38740e3dcfeb72a6d87e25190e6a73f6c60b199b
[ "MIT" ]
null
null
null
def measure_loop(i2c): print("Start measurement") m = Measure(i2c)
18.75
30
0.666667
10
75
4.9
0.8
0
0
0
0
0
0
0
0
0
0
0.033333
0.2
75
3
31
25
0.783333
0
0
0
0
0
0.226667
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.333333
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
fc6fac5dd0dfba23b9ce2ac84f655682a4330cd8
685
py
Python
examples/duplo/paths.py
dgerod/behavior_tree_learning
71da80c91ecd48fd5da377f83604b62112ba9629
[ "Apache-2.0" ]
7
2022-02-09T12:51:51.000Z
2022-03-19T14:40:16.000Z
examples/duplo/paths.py
dgerod/behavior_tree_learning
71da80c91ecd48fd5da377f83604b62112ba9629
[ "Apache-2.0" ]
2
2022-02-03T10:54:41.000Z
2022-02-15T10:32:03.000Z
examples/duplo/paths.py
dgerod/behavior_tree_learning
71da80c91ecd48fd5da377f83604b62112ba9629
[ "Apache-2.0" ]
null
null
null
import os import sys _this_file_path = os.path.abspath(__file__) _PACKAGE_DIRECTORY = os.path.dirname(os.path.dirname(os.path.dirname(_this_file_path))) _EXAMPLES_DIRECTORY = os.path.dirname(os.path.dirname(_this_file_path)) _CURRENT_EXAMPLE_DIRECTORY = os.path.dirname(_this_file_path) def add_modules_to_path(): sys.path.append(os.path.normpath(_PACKAGE_DIRECTORY)) sys.path.append(os.path.normpath(_EXAMPLES_DIRECTORY)) def get_example_directory(): return _CURRENT_EXAMPLE_DIRECTORY def get_outputs_directory(): return os.path.join(_CURRENT_EXAMPLE_DIRECTORY, 'results') def get_log_directory(): return os.path.join(_CURRENT_EXAMPLE_DIRECTORY, 'logs')
26.346154
87
0.79854
98
685
5.122449
0.265306
0.131474
0.155378
0.131474
0.561753
0.561753
0.404382
0.342629
0.151394
0
0
0
0.093431
685
25
88
27.4
0.808374
0
0
0
0
0
0.016058
0
0
0
0
0
0
1
0.266667
false
0
0.133333
0.2
0.6
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
fc7bb7dc0fa84e8ab9ca3d0fc0ab32a0f401a8a2
87
py
Python
apps/shortener/services/__init__.py
ShAlireza/Yektanet
9e638395b85346b7536cf422c514ae7762faa9b4
[ "MIT" ]
null
null
null
apps/shortener/services/__init__.py
ShAlireza/Yektanet
9e638395b85346b7536cf422c514ae7762faa9b4
[ "MIT" ]
null
null
null
apps/shortener/services/__init__.py
ShAlireza/Yektanet
9e638395b85346b7536cf422c514ae7762faa9b4
[ "MIT" ]
null
null
null
from .short_url_service import ShortenedURLService __all__ = ('ShortenedURLService',)
21.75
50
0.827586
8
87
8.25
0.875
0
0
0
0
0
0
0
0
0
0
0
0.091954
87
3
51
29
0.835443
0
0
0
0
0
0.218391
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
fc85ce2a147eac19d64df138386351c2106bd558
12,149
py
Python
usaspending_api/accounts/migrations/0001_initial.py
g4brielvs/usaspending-api
bae7da2c204937ec1cdf75c052405b13145728d5
[ "CC0-1.0" ]
217
2016-11-03T17:09:53.000Z
2022-03-10T04:17:54.000Z
usaspending_api/accounts/migrations/0001_initial.py
g4brielvs/usaspending-api
bae7da2c204937ec1cdf75c052405b13145728d5
[ "CC0-1.0" ]
622
2016-09-02T19:18:23.000Z
2022-03-29T17:11:01.000Z
usaspending_api/accounts/migrations/0001_initial.py
g4brielvs/usaspending-api
bae7da2c204937ec1cdf75c052405b13145728d5
[ "CC0-1.0" ]
93
2016-09-07T20:28:57.000Z
2022-02-25T00:25:27.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2017-10-11 16:17 from __future__ import unicode_literals import django.db.models.deletion from django.db import migrations, models from usaspending_api.common.helpers.generic_helper import FY_PG_FUNCTION_DEF class Migration(migrations.Migration): initial = True dependencies = [ ('submissions', '0001_initial'), ('references', '0001_initial'), ] operations = [ migrations.CreateModel( name='AppropriationAccountBalances', fields=[ ('data_source', models.TextField(choices=[('USA', 'USAspending'), ('DBR', 'DATA Act Broker')], help_text='The source of this entry, either Data Broker (DBR) or USASpending (USA)', null=True)), ('appropriation_account_balances_id', models.AutoField(primary_key=True, serialize=False)), ('budget_authority_unobligated_balance_brought_forward_fyb', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('adjustments_to_unobligated_balance_brought_forward_cpe', models.DecimalField(decimal_places=2, max_digits=23)), ('budget_authority_appropriated_amount_cpe', models.DecimalField(decimal_places=2, max_digits=23)), ('borrowing_authority_amount_total_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('contract_authority_amount_total_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('spending_authority_from_offsetting_collections_amount_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('other_budgetary_resources_amount_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('total_budgetary_resources_amount_cpe', models.DecimalField(decimal_places=2, max_digits=23)), ('gross_outlay_amount_by_tas_cpe', models.DecimalField(decimal_places=2, max_digits=23)), ('deobligations_recoveries_refunds_by_tas_cpe', models.DecimalField(decimal_places=2, max_digits=23)), ('unobligated_balance_cpe', models.DecimalField(decimal_places=2, max_digits=23)), ('status_of_budgetary_resources_total_cpe', models.DecimalField(decimal_places=2, max_digits=23)), ('obligations_incurred_total_by_tas_cpe', models.DecimalField(decimal_places=2, max_digits=23)), ('drv_appropriation_availability_period_start_date', models.DateField(blank=True, null=True)), ('drv_appropriation_availability_period_end_date', models.DateField(blank=True, null=True)), ('drv_appropriation_account_expired_status', models.TextField(blank=True, null=True)), ('drv_obligations_unpaid_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('drv_other_obligated_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('reporting_period_start', models.DateField(blank=True, null=True)), ('reporting_period_end', models.DateField(blank=True, null=True)), ('last_modified_date', models.DateField(blank=True, null=True)), ('certified_date', models.DateField(blank=True, null=True)), ('create_date', models.DateTimeField(auto_now_add=True, null=True)), ('update_date', models.DateTimeField(auto_now=True, null=True)), ('final_of_fy', models.BooleanField(db_index=True, default=False)), ('submission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='submissions.SubmissionAttributes')), ], options={ 'db_table': 'appropriation_account_balances', 'managed': True, }, ), migrations.CreateModel( name='AppropriationAccountBalancesQuarterly', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('data_source', models.TextField(choices=[('USA', 'USAspending'), ('DBR', 'DATA Act Broker')], help_text='The source of this entry, either Data Broker (DBR) or USASpending (USA)', null=True)), ('budget_authority_unobligated_balance_brought_forward_fyb', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('adjustments_to_unobligated_balance_brought_forward_cpe', models.DecimalField(decimal_places=2, max_digits=23)), ('budget_authority_appropriated_amount_cpe', models.DecimalField(decimal_places=2, max_digits=23)), ('borrowing_authority_amount_total_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('contract_authority_amount_total_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('spending_authority_from_offsetting_collections_amount_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('other_budgetary_resources_amount_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('total_budgetary_resources_amount_cpe', models.DecimalField(decimal_places=2, max_digits=23)), ('gross_outlay_amount_by_tas_cpe', models.DecimalField(decimal_places=2, max_digits=23)), ('deobligations_recoveries_refunds_by_tas_cpe', models.DecimalField(decimal_places=2, max_digits=23)), ('unobligated_balance_cpe', models.DecimalField(decimal_places=2, max_digits=23)), ('status_of_budgetary_resources_total_cpe', models.DecimalField(decimal_places=2, max_digits=23)), ('obligations_incurred_total_by_tas_cpe', models.DecimalField(decimal_places=2, max_digits=23)), ('create_date', models.DateTimeField(auto_now_add=True, null=True)), ('update_date', models.DateTimeField(auto_now=True, null=True)), ('submission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='submissions.SubmissionAttributes')), ], options={ 'db_table': 'appropriation_account_balances_quarterly', 'managed': True, }, ), migrations.CreateModel( name='BudgetAuthority', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('agency_identifier', models.TextField(db_index=True)), ('fr_entity_code', models.TextField(db_index=True, null=True)), ('year', models.IntegerField()), ('amount', models.BigIntegerField(null=True)), ], options={ 'db_table': 'budget_authority', }, ), migrations.CreateModel( name='FederalAccount', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('agency_identifier', models.TextField(db_index=True)), ('main_account_code', models.TextField(db_index=True)), ('account_title', models.TextField()), ('federal_account_code', models.TextField(null=True)), ], options={ 'db_table': 'federal_account', 'managed': True, }, ), migrations.CreateModel( name='TreasuryAppropriationAccount', fields=[ ('data_source', models.TextField(choices=[('USA', 'USAspending'), ('DBR', 'DATA Act Broker')], help_text='The source of this entry, either Data Broker (DBR) or USASpending (USA)', null=True)), ('treasury_account_identifier', models.AutoField(primary_key=True, serialize=False)), ('tas_rendering_label', models.TextField(blank=True, null=True)), ('allocation_transfer_agency_id', models.TextField(blank=True, null=True)), ('agency_id', models.TextField()), ('beginning_period_of_availability', models.TextField(blank=True, null=True)), ('ending_period_of_availability', models.TextField(blank=True, null=True)), ('availability_type_code', models.TextField(blank=True, null=True)), ('availability_type_code_description', models.TextField(blank=True, null=True)), ('main_account_code', models.TextField()), ('sub_account_code', models.TextField()), ('account_title', models.TextField(blank=True, null=True)), ('reporting_agency_id', models.TextField(blank=True, null=True)), ('reporting_agency_name', models.TextField(blank=True, null=True)), ('budget_bureau_code', models.TextField(blank=True, null=True)), ('budget_bureau_name', models.TextField(blank=True, null=True)), ('fr_entity_code', models.TextField(blank=True, null=True)), ('fr_entity_description', models.TextField(blank=True, null=True)), ('budget_function_code', models.TextField(blank=True, null=True)), ('budget_function_title', models.TextField(blank=True, null=True)), ('budget_subfunction_code', models.TextField(blank=True, null=True)), ('budget_subfunction_title', models.TextField(blank=True, null=True)), ('drv_appropriation_availability_period_start_date', models.DateField(blank=True, null=True)), ('drv_appropriation_availability_period_end_date', models.DateField(blank=True, null=True)), ('drv_appropriation_account_expired_status', models.TextField(blank=True, null=True)), ('create_date', models.DateTimeField(auto_now_add=True, null=True)), ('update_date', models.DateTimeField(auto_now=True, null=True)), ('internal_start_date', models.DateField(blank=True, null=True)), ('internal_end_date', models.DateField(blank=True, null=True)), ('awarding_toptier_agency', models.ForeignKey(help_text='The toptier agency object associated with the ATA', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='tas_ata', to='references.ToptierAgency')), ('federal_account', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='accounts.FederalAccount')), ('funding_toptier_agency', models.ForeignKey(help_text='The toptier agency object associated with the AID', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='tas_aid', to='references.ToptierAgency')), ], options={ 'db_table': 'treasury_appropriation_account', 'managed': True, }, ), migrations.AlterUniqueTogether( name='federalaccount', unique_together=set([('agency_identifier', 'main_account_code')]), ), migrations.AlterUniqueTogether( name='budgetauthority', unique_together=set([('agency_identifier', 'fr_entity_code', 'year')]), ), migrations.AddField( model_name='appropriationaccountbalancesquarterly', name='treasury_account_identifier', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.TreasuryAppropriationAccount'), ), migrations.AddField( model_name='appropriationaccountbalances', name='treasury_account_identifier', field=models.ForeignKey(db_column='treasury_account_identifier', on_delete=django.db.models.deletion.CASCADE, related_name='account_balances', to='accounts.TreasuryAppropriationAccount'), ), migrations.RunSQL(sql=[FY_PG_FUNCTION_DEF]), ]
70.225434
240
0.656844
1,292
12,149
5.878483
0.151703
0.058986
0.05688
0.064911
0.79605
0.766162
0.739171
0.691639
0.612245
0.589072
0
0.011471
0.217878
12,149
172
241
70.633721
0.787834
0.005597
0
0.533742
1
0
0.291605
0.190263
0
0
0
0
0
1
0
false
0
0.02454
0
0.04908
0
0
0
0
null
0
0
0
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
fc9dc3621394219b868a359293cec16d7b2cf071
278
py
Python
hash_dict/__init__.py
Cologler/hash-dict-python
bf483cb2365ae9545c207830e6b726b6712c5de1
[ "MIT" ]
null
null
null
hash_dict/__init__.py
Cologler/hash-dict-python
bf483cb2365ae9545c207830e6b726b6712c5de1
[ "MIT" ]
null
null
null
hash_dict/__init__.py
Cologler/hash-dict-python
bf483cb2365ae9545c207830e6b726b6712c5de1
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # # Copyright (c) 2019~2999 - Cologler <skyoflw@gmail.com> # ---------- # # ---------- from .hash_dict import HashDict from .hash_set import HashSet from .comparer import ( IEqualityComparer, ObjectComparer, AnyComparer, StringComparers, )
17.375
56
0.640288
28
278
6.285714
0.821429
0.090909
0
0
0
0
0
0
0
0
0
0.039301
0.176259
278
15
57
18.533333
0.729258
0.352518
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.428571
0
0.428571
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
5d95f58ecb354c99d577464809c6a84b8fbb5592
251
py
Python
Projetos Python/pythonexercicios/des008.py
Moyses-Nunes/Projetos-Python
71ae170fb0d7be6afea18608bca630b57b9f0dff
[ "MIT" ]
null
null
null
Projetos Python/pythonexercicios/des008.py
Moyses-Nunes/Projetos-Python
71ae170fb0d7be6afea18608bca630b57b9f0dff
[ "MIT" ]
null
null
null
Projetos Python/pythonexercicios/des008.py
Moyses-Nunes/Projetos-Python
71ae170fb0d7be6afea18608bca630b57b9f0dff
[ "MIT" ]
null
null
null
largura = float(input('Digite a largura da parede:')) altura = float(input('Digite a altura da parede:')) area = altura * largura qntd = area / 2 print('A área da parede é:{} m^2'.format(area)) print('São necessários {} litros de tinta'.format(qntd))
35.857143
56
0.697211
40
251
4.375
0.525
0.137143
0.182857
0.194286
0
0
0
0
0
0
0
0.009302
0.143426
251
6
57
41.833333
0.804651
0
0
0
0
0
0.446215
0
0
0
0
0
0
1
0
false
0
0
0
0
0.333333
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3