hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
6dec948a6fe6c2e2c800839b880bad85b5a6dfd3
96
py
Python
venv/lib/python3.8/site-packages/cryptography/hazmat/primitives/asymmetric/ed448.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
2
2022-03-13T01:58:52.000Z
2022-03-31T06:07:54.000Z
venv/lib/python3.8/site-packages/cryptography/hazmat/primitives/asymmetric/ed448.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
19
2021-11-20T04:09:18.000Z
2022-03-23T15:05:55.000Z
venv/lib/python3.8/site-packages/cryptography/hazmat/primitives/asymmetric/ed448.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
null
null
null
/home/runner/.cache/pip/pool/a1/1f/a3/e2319c5949c6c568b51b2807c5565b82448e287b11eb2d44dcb7fac183
96
96
0.895833
9
96
9.555556
1
0
0
0
0
0
0
0
0
0
0
0.4375
0
96
1
96
96
0.458333
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
6
096fcd42c6d9f08d5d089e2c6170449d67a02488
4,290
py
Python
deprecated/examples/simnet_bow/py_reader_generator.py
hutuxian/FleetX
843c7aa33f5a14680becf058a3aaf0327eefafd4
[ "Apache-2.0" ]
170
2020-08-12T12:07:01.000Z
2022-03-07T02:38:26.000Z
deprecated/examples/simnet_bow/py_reader_generator.py
hutuxian/FleetX
843c7aa33f5a14680becf058a3aaf0327eefafd4
[ "Apache-2.0" ]
195
2020-08-13T03:22:15.000Z
2022-03-30T07:40:25.000Z
deprecated/examples/simnet_bow/py_reader_generator.py
hutuxian/FleetX
843c7aa33f5a14680becf058a3aaf0327eefafd4
[ "Apache-2.0" ]
67
2020-08-14T02:07:46.000Z
2022-03-28T10:05:33.000Z
#!/usr/bin/python # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # There are 13 integer features and 26 categorical features import random def combination(x, y): res = [[[xi, yi] for yi in y] for xi in x] return res[0] def get_one_data(file_list, sample_rate): for file in file_list: contents = [] with open(file, "r") as fin: for q in fin.readlines(): """query_ids, pos_title_ids, neg_title_ids, label""" one_data = q.split(";")[:-1] if len(one_data) < 4: print("data format error!, please check!", q) continue label = int(one_data[0]) pos_title_num, neg_title_num = int(one_data[1].split(" ")[0]), int(one_data[1].split(" ")[1]) query_ids = [int(x) for x in one_data[2].split(" ")] if pos_title_num + neg_title_num != len(one_data) - 3: print("data format error, pos_title_num={}, neg_title_num={}, one_data={}" .format(pos_title_num, neg_title_num, len(one_data))) continue for x in range(pos_title_num): pos_title_ids = [ int(i) for i in one_data[3+x].split(" ")] for y in range(neg_title_num): if random.random() > sample_rate: continue neg_title_ids = [int(i) for i in one_data[3+pos_title_num+y].split(" ")] yield [query_ids, pos_title_ids, neg_title_ids, [label]] fin.close() def get_batch_reader(file_list, batch_size=128, sample_rate=0.02, trainer_id=1): def batch_reader(): res = [] idx = 0 for i in get_one_data(file_list, sample_rate): res.append(i) idx += 1 if len(res) >= batch_size: yield res res = [] return batch_reader def get_infer_data(file_list, sample_rate): for file in file_list: contents = [] with open(file, "r") as fin: for q in fin.readlines(): """query_ids, pos_title_ids, neg_title_ids, label""" one_data = q.split(";")[:-1] if len(one_data) < 4: print("data format error!, please check!",q) continue label = int(one_data[0]) pos_title_num, neg_title_num = int(one_data[1].split(" ")[0]), int(one_data[1].split(" ")[1]) query_ids = [int(x) for x in one_data[2].split(" ")] if pos_title_num + neg_title_num != len(one_data) - 3: print("data format error, pos_title_num={}, neg_title_num={}, one_data={}" .format(pos_title_num,neg_title_num,len(one_data))) continue for x in range(pos_title_num): pos_title_ids = [int(i) for i in one_data[3 + x].split(" ")] for y in range(neg_title_num): if random.random() > sample_rate: continue neg_title_ids = [int(i) for i in one_data[3 + pos_title_num + y].split(" ")] yield [query_ids, pos_title_ids, neg_title_ids] fin.close() def get_infer_batch_reader(file_list, batch_size=128, sample_rate=0.02, trainer_id=1): def batch_reader(): res = [] idx = 0 for i in get_infer_data(file_list, sample_rate): res.append(i) idx += 1 if len(res) >= batch_size: yield res res = [] return batch_reader
39
109
0.545221
584
4,290
3.773973
0.22774
0.076225
0.059891
0.050817
0.715517
0.715517
0.715517
0.705535
0.705535
0.703267
0
0.018895
0.346154
4,290
109
110
39.357798
0.766845
0.15338
0
0.837838
0
0
0.060364
0
0
0
0
0
0
1
0.094595
false
0
0.013514
0
0.148649
0.054054
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
09992545064487f403e07c0d5592b982f3bafe39
212
py
Python
projetos/admin.py
siqueiralex/SistemaComissaoVoluntario
1a4ba8fd37d5182156e7bb4c4204845df2707fd1
[ "MIT" ]
null
null
null
projetos/admin.py
siqueiralex/SistemaComissaoVoluntario
1a4ba8fd37d5182156e7bb4c4204845df2707fd1
[ "MIT" ]
null
null
null
projetos/admin.py
siqueiralex/SistemaComissaoVoluntario
1a4ba8fd37d5182156e7bb4c4204845df2707fd1
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import * # Register your models here. admin.site.register(Cronograma) admin.site.register(Atividade) admin.site.register(EfetivoDiario) admin.site.register(Unidade)
23.555556
34
0.816038
28
212
6.178571
0.5
0.208092
0.393064
0
0
0
0
0
0
0
0
0
0.084906
212
9
35
23.555556
0.891753
0.122642
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
099ad8518dad1d6a3df7458747dcb5b82bba5c36
42
py
Python
piecewise/monitor/__init__.py
jtbish/piecewise
d27e095e298698d83901bfd1236e0e0669261136
[ "MIT" ]
3
2019-12-04T02:28:08.000Z
2022-02-02T13:13:36.000Z
piecewise/monitor/__init__.py
jtbish/piecewise
d27e095e298698d83901bfd1236e0e0669261136
[ "MIT" ]
5
2020-01-15T04:33:36.000Z
2021-03-26T14:58:47.000Z
piecewise/monitor/__init__.py
jtbish/piecewise
d27e095e298698d83901bfd1236e0e0669261136
[ "MIT" ]
1
2020-06-04T08:55:27.000Z
2020-06-04T08:55:27.000Z
from .monitor import Monitor, NullMonitor
21
41
0.833333
5
42
7
0.8
0
0
0
0
0
0
0
0
0
0
0
0.119048
42
1
42
42
0.945946
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
09b13a36abe13835dfcebaecd4290e8a9855957c
180
py
Python
{{cookiecutter.package_name}}/{{cookiecutter.package_name}}/user/views.py
cmeadows/fbone-marrow
0c69bcafbe21c48641cc9759f2a959b9b7881ce3
[ "BSD-3-Clause" ]
null
null
null
{{cookiecutter.package_name}}/{{cookiecutter.package_name}}/user/views.py
cmeadows/fbone-marrow
0c69bcafbe21c48641cc9759f2a959b9b7881ce3
[ "BSD-3-Clause" ]
null
null
null
{{cookiecutter.package_name}}/{{cookiecutter.package_name}}/user/views.py
cmeadows/fbone-marrow
0c69bcafbe21c48641cc9759f2a959b9b7881ce3
[ "BSD-3-Clause" ]
1
2020-04-25T14:01:26.000Z
2020-04-25T14:01:26.000Z
from flask import Blueprint, render_template user = Blueprint('user', __name__, url_prefix='/user') @user.route('/') def index(): return render_template('user/index.html')
18
54
0.716667
23
180
5.304348
0.652174
0.229508
0.295082
0
0
0
0
0
0
0
0
0
0.127778
180
9
55
20
0.77707
0
0
0
0
0
0.138889
0
0
0
0
0
0
1
0.2
false
0
0.2
0.2
0.6
0.4
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
6
09b58c244a12073f6fbe4f057a9cdb4f58a173fa
98
py
Python
zipline/modelling/classifier.py
michaeljohnbennett/zipline
29321af1b472d72b759a71c9f5ba341109fc0e6d
[ "Apache-2.0" ]
1
2015-10-22T03:38:19.000Z
2015-10-22T03:38:19.000Z
zipline/modelling/classifier.py
michaeljohnbennett/zipline
29321af1b472d72b759a71c9f5ba341109fc0e6d
[ "Apache-2.0" ]
null
null
null
zipline/modelling/classifier.py
michaeljohnbennett/zipline
29321af1b472d72b759a71c9f5ba341109fc0e6d
[ "Apache-2.0" ]
null
null
null
""" classifier.py """ from zipline.modelling.term import Term class Classifier(Term): pass
9.8
39
0.704082
12
98
5.75
0.75
0
0
0
0
0
0
0
0
0
0
0
0.173469
98
9
40
10.888889
0.851852
0.132653
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
116897ffba2caba35db0ce442d80a7df19e49b10
11,412
py
Python
psydac/api/tests/test_api_feec_3d.py
mayuri-dhote/psydac
01ddbe2d049a599684c45060912d01c2658160a3
[ "MIT" ]
20
2019-07-30T12:37:57.000Z
2022-03-09T11:35:04.000Z
psydac/api/tests/test_api_feec_3d.py
mayuri-dhote/psydac
01ddbe2d049a599684c45060912d01c2658160a3
[ "MIT" ]
98
2019-04-01T16:32:27.000Z
2022-03-21T19:30:35.000Z
psydac/api/tests/test_api_feec_3d.py
mayuri-dhote/psydac
01ddbe2d049a599684c45060912d01c2658160a3
[ "MIT" ]
7
2019-10-03T03:49:47.000Z
2022-03-01T09:11:49.000Z
# -*- coding: UTF-8 -*- from sympde.topology import Mapping from sympde.calculus import grad, dot from sympde.calculus import laplace from sympde.topology import ScalarFunctionSpace from sympde.topology import elements_of from sympde.topology import NormalVector from sympde.topology import Cube, Derham from sympde.topology import Union from sympde.expr import BilinearForm, LinearForm, integral from sympde.expr import Norm from sympde.expr import find, EssentialBC from psydac.fem.basic import FemField from psydac.api.discretization import discretize from psydac.feec.pull_push import push_3d_hcurl, push_3d_hdiv from psydac.api.settings import PSYDAC_BACKEND_GPYCCEL, PSYDAC_BACKEND_NUMBA from psydac.linalg.utilities import array_to_stencil from psydac.linalg.iterative_solvers import cg from mpi4py import MPI import pytest import numpy as np import scipy as sc #=============================================================================== def splitting_integrator_scipy(e0, b0, M1, M2, CURL, dt, niter): CURL_T = CURL.T M1_solver = sc.sparse.linalg.splu(M1) def M1CM2_dot(b): y1 = M2.dot(b) y2 = CURL_T.dot(y1) return M1_solver.solve(y2) e_history = [e0] b_history = [b0] for ts in range(niter): b = b_history[ts] e = e_history[ts] b_new = b - dt * CURL.dot(e) e_new = e + dt * M1CM2_dot(b_new) b_history.append(b_new) e_history.append(e_new) return e_history, b_history def splitting_integrator_stencil(e0, b0, M1, M2, CURL, dt, niter): CURL_T = CURL.transpose() def M1CM2_dot(b): y1 = M2.dot(b) y2 = CURL_T.dot(y1) return cg(M1, y2, tol=1e-12)[0] e_history = [e0] b_history = [b0] for ts in range(niter): b = b_history[ts] e = e_history[ts] b_new = b - dt * CURL.dot(e) e_new = e + dt * M1CM2_dot(b_new) b_history.append(b_new) e_history.append(e_new) return e_history, b_history def evaluation_all_times(fields, x, y, z): ak_value = np.empty(len(fields), dtype = 'float') for i in range(len(fields)): ak_value[i] = fields[i](x,y,z) return ak_value #================================================================================== def run_maxwell_3d_scipy(logical_domain, mapping, e_ex, b_ex, ncells, degree, periodic, dt, niter): domain = mapping(logical_domain) derham = Derham(domain) u0, v0 = elements_of(derham.V0, names='u0, v0') u1, v1 = elements_of(derham.V1, names='u1, v1') u2, v2 = elements_of(derham.V2, names='u2, v2') u3, v3 = elements_of(derham.V3, names='u3, v3') a0 = BilinearForm((u0, v0), integral(domain, u0*v0)) a1 = BilinearForm((u1, v1), integral(domain, dot(u1, v1))) a2 = BilinearForm((u2, v2), integral(domain, dot(u2, v2))) a3 = BilinearForm((u3, v3), integral(domain, u3*v3)) #============================================================================== # Discrete objects: Psydac domain_h = discretize(domain, ncells=ncells, comm=MPI.COMM_WORLD) derham_h = discretize(derham, domain_h, degree=degree, periodic=periodic) a1_h = discretize(a1, domain_h, (derham_h.V1, derham_h.V1), backend=PSYDAC_BACKEND_GPYCCEL) a2_h = discretize(a2, domain_h, (derham_h.V2, derham_h.V2), backend=PSYDAC_BACKEND_GPYCCEL) # StencilMatrix objects M1 = a1_h.assemble().tosparse().tocsc() M2 = a2_h.assemble().tosparse().tocsr() # Diff operators GRAD, CURL, DIV = derham_h.derivatives_as_matrices # Porjectors P0, P1, P2, P3 = derham_h.projectors(nquads=[5,5,5]) CURL = CURL.transform(lambda block: block.tokronstencil().tostencil()).tomatrix().tosparse().tocsr() # initial conditions e0_1 = lambda x, y, z: e_ex[0](0, x, y, z) e0_2 = lambda x, y, z: e_ex[1](0, x, y, z) e0_3 = lambda x, y, z: e_ex[2](0, x, y, z) e0 = (e0_1, e0_2, e0_3) b0_1 = lambda x, y, z : b_ex[0](0, x, y, z) b0_2 = lambda x, y, z : b_ex[1](0, x, y, z) b0_3 = lambda x, y, z : b_ex[2](0, x, y, z) b0 = (b0_1, b0_2, b0_3) # project initial conditions e0_coeff = P1(e0).coeffs b0_coeff = P2(b0).coeffs # time integrator e_history, b_history = splitting_integrator_scipy(e0_coeff.toarray(), b0_coeff.toarray(), M1, M2, CURL, dt, niter) # study of fields b_history = [array_to_stencil(bi, derham_h.V2.vector_space) for bi in b_history] b_fields = [FemField(derham_h.V2, bi).fields for bi in b_history] bx_fields = [bi[0] for bi in b_fields] by_fields = [bi[1] for bi in b_fields] bz_fields = [bi[2] for bi in b_fields] bx_value_fun = lambda x, y, z: evaluation_all_times(bx_fields, x, y, z) by_value_fun = lambda x, y, z: evaluation_all_times(by_fields, x, y, z) bz_value_fun = lambda x, y, z: evaluation_all_times(bz_fields, x, y, z) x,y,z = derham_h.V0.breaks x, y = 0.5, 0.5 b_values_0 = [] for zi in z: b_value_phys = push_3d_hdiv(bx_value_fun, by_value_fun, bz_value_fun, x, y, zi, mapping) b_values_0.append(b_value_phys[0]) b_values_0 = np.array(b_values_0) time_array = np.linspace(0, dt*niter, niter + 1) tt, zz = np.meshgrid(time_array, z) b_ex_values_0 = b_ex[0](tt, x, y, zz) error = abs(b_values_0-b_ex_values_0).max() return error #================================================================================== def run_maxwell_3d_stencil(logical_domain, mapping, e_ex, b_ex, ncells, degree, periodic, dt, niter): domain = mapping(logical_domain) derham = Derham(domain) u0, v0 = elements_of(derham.V0, names='u0, v0') u1, v1 = elements_of(derham.V1, names='u1, v1') u2, v2 = elements_of(derham.V2, names='u2, v2') u3, v3 = elements_of(derham.V3, names='u3, v3') a0 = BilinearForm((u0, v0), integral(domain, u0*v0)) a1 = BilinearForm((u1, v1), integral(domain, dot(u1, v1))) a2 = BilinearForm((u2, v2), integral(domain, dot(u2, v2))) a3 = BilinearForm((u3, v3), integral(domain, u3*v3)) #============================================================================== # Discrete objects: Psydac domain_h = discretize(domain, ncells=ncells, comm=MPI.COMM_WORLD) derham_h = discretize(derham, domain_h, degree=degree, periodic=periodic) a1_h = discretize(a1, domain_h, (derham_h.V1, derham_h.V1), backend=PSYDAC_BACKEND_GPYCCEL) a2_h = discretize(a2, domain_h, (derham_h.V2, derham_h.V2), backend=PSYDAC_BACKEND_GPYCCEL) # StencilMatrix objects M1 = a1_h.assemble() M2 = a2_h.assemble() # Diff operators GRAD, CURL, DIV = derham_h.derivatives_as_matrices # Porjectors P0, P1, P2, P3 = derham_h.projectors(nquads=[5,5,5]) # initial conditions e0_1 = lambda x, y, z: e_ex[0](0, x, y, z) e0_2 = lambda x, y, z: e_ex[1](0, x, y, z) e0_3 = lambda x, y, z: e_ex[2](0, x, y, z) e0 = (e0_1, e0_2, e0_3) b0_1 = lambda x, y, z : b_ex[0](0, x, y, z) b0_2 = lambda x, y, z : b_ex[1](0, x, y, z) b0_3 = lambda x, y, z : b_ex[2](0, x, y, z) b0 = (b0_1, b0_2, b0_3) # project initial conditions e0_coeff = P1(e0).coeffs b0_coeff = P2(b0).coeffs # time integrator e_history, b_history = splitting_integrator_stencil(e0_coeff, b0_coeff, M1, M2, CURL, dt, niter) # study of fields b_fields = [FemField(derham_h.V2, bi).fields for bi in b_history] bx_fields = [bi[0] for bi in b_fields] by_fields = [bi[1] for bi in b_fields] bz_fields = [bi[2] for bi in b_fields] bx_value_fun = lambda x, y, z: evaluation_all_times(bx_fields, x, y, z) by_value_fun = lambda x, y, z: evaluation_all_times(by_fields, x, y, z) bz_value_fun = lambda x, y, z: evaluation_all_times(bz_fields, x, y, z) x,y,z = derham_h.V0.breaks x, y = 0.5, 0.5 b_values_0 = [] for zi in z: b_value_phys = push_3d_hdiv(bx_value_fun, by_value_fun, bz_value_fun, x, y, zi, mapping) b_values_0.append(b_value_phys[0]) b_values_0 = np.array(b_values_0) time_array = np.linspace(0, dt*niter, niter + 1) tt, zz = np.meshgrid(time_array, z) b_ex_values_0 = b_ex[0](tt, x, y, zz) error = abs(b_values_0-b_ex_values_0).max() return error ############################################################################### # SERIAL TESTS ############################################################################### #============================================================================== # 3D Maxwell's equations with "Collela" map #============================================================================== def test_maxwell_3d_1(): class CollelaMapping3D(Mapping): _expressions = {'x': 'k1*(x1 + eps*sin(2.*pi*x1)*sin(2.*pi*x2))', 'y': 'k2*(x2 + eps*sin(2.*pi*x1)*sin(2.*pi*x2))', 'z': 'k3*x3'} _ldim = 3 _pdim = 3 M = CollelaMapping3D('M', k1=1, k2=1, k3=1, eps=0.1) logical_domain = Cube('C', bounds1=(0, 1), bounds2=(0, 1), bounds3=(0, 1)) # exact solution e_ex_0 = lambda t, x, y, z: 0 e_ex_1 = lambda t, x, y, z: -np.cos(2*np.pi*t-2*np.pi*z) e_ex_2 = lambda t, x, y, z: 0 e_ex = (e_ex_0, e_ex_1, e_ex_2) b_ex_0 = lambda t, x, y, z : np.cos(2*np.pi*t-2*np.pi*z) b_ex_1 = lambda t, x, y, z : 0 b_ex_2 = lambda t, x, y, z : 0 b_ex = (b_ex_0, b_ex_1, b_ex_2) #space parameters ncells = [2**4, 2**3, 2**5] degree = [2, 2, 2] periodic = [True, True, True] #time parameters dt = 0.5*1/max(ncells) niter = 10 T = dt*niter error = run_maxwell_3d_scipy(logical_domain, M, e_ex, b_ex, ncells, degree, periodic, dt, niter) assert abs(error - 0.04294761712765949) < 1e-9 def test_maxwell_3d_2(): class CollelaMapping3D(Mapping): _expressions = {'x': 'k1*(x1 + eps*sin(2.*pi*x1)*sin(2.*pi*x2))', 'y': 'k2*(x2 + eps*sin(2.*pi*x1)*sin(2.*pi*x2))', 'z': 'k3*x3'} _ldim = 3 _pdim = 3 M = CollelaMapping3D('M', k1=1, k2=1, k3=1, eps=0.1) logical_domain = Cube('C', bounds1=(0, 1), bounds2=(0, 1), bounds3=(0, 1)) # exact solution e_ex_0 = lambda t, x, y, z: 0 e_ex_1 = lambda t, x, y, z: -np.cos(2*np.pi*t-2*np.pi*z) e_ex_2 = lambda t, x, y, z: 0 e_ex = (e_ex_0, e_ex_1, e_ex_2) b_ex_0 = lambda t, x, y, z : np.cos(2*np.pi*t-2*np.pi*z) b_ex_1 = lambda t, x, y, z : 0 b_ex_2 = lambda t, x, y, z : 0 b_ex = (b_ex_0, b_ex_1, b_ex_2) #space parameters ncells = [7, 7, 7] degree = [2, 2, 2] periodic = [True, True, True] #time parameters dt = 0.5*1/max(ncells) niter = 2 T = dt*niter error = run_maxwell_3d_stencil(logical_domain, M, e_ex, b_ex, ncells, degree, periodic, dt, niter) assert abs(error - 0.24586986658559362) < 1e-9 #============================================================================== # CLEAN UP SYMPY NAMESPACE #============================================================================== def teardown_module(): from sympy.core import cache cache.clear_cache() def teardown_function(): from sympy.core import cache cache.clear_cache()
32.69914
118
0.572906
1,806
11,412
3.41196
0.129568
0.018825
0.025316
0.02629
0.777345
0.774911
0.767121
0.759007
0.746349
0.737098
0
0.054451
0.224325
11,412
348
119
32.793103
0.641663
0.106642
0
0.744186
0
0.018605
0.0237
0.0128
0
0
0
0
0.009302
1
0.051163
false
0
0.106977
0
0.227907
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
fec208c22d9d1041117b1d183e1d441e6bf6d6c9
40
py
Python
lightRaven/agent/__init__.py
M0gician/lightRaven
edcaed1ffbfab95064fc2719f2e3f79375ce6f04
[ "MIT" ]
1
2020-12-16T07:41:44.000Z
2020-12-16T07:41:44.000Z
lightRaven/agent/__init__.py
M0gician/lightRaven
edcaed1ffbfab95064fc2719f2e3f79375ce6f04
[ "MIT" ]
null
null
null
lightRaven/agent/__init__.py
M0gician/lightRaven
edcaed1ffbfab95064fc2719f2e3f79375ce6f04
[ "MIT" ]
null
null
null
from .cem_seldonian import CEMSeldonian
20
39
0.875
5
40
6.8
1
0
0
0
0
0
0
0
0
0
0
0
0.1
40
1
40
40
0.944444
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
3a343af5749652da771adbdd16f972a23748ed7b
47
py
Python
pybabblesdk/blockchain/__init__.py
mosaicnetworks/pybabblesdk
6fe09cbe02ed8dc674aa849723bad5336a9b9017
[ "MIT" ]
3
2019-04-24T19:42:37.000Z
2020-06-09T03:36:04.000Z
pybabblesdk/blockchain/__init__.py
mosaicnetworks/pybabblesdk
6fe09cbe02ed8dc674aa849723bad5336a9b9017
[ "MIT" ]
null
null
null
pybabblesdk/blockchain/__init__.py
mosaicnetworks/pybabblesdk
6fe09cbe02ed8dc674aa849723bad5336a9b9017
[ "MIT" ]
null
null
null
from pybabblesdk.blockchain.block import Block
23.5
46
0.87234
6
47
6.833333
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.085106
47
1
47
47
0.953488
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
28cf36595d374834e4f9baeeee590a8bb7fc9e14
166
py
Python
arxiv_html/renders/tasks.py
arXiv/arxiv-readability
20dac4540aaf689b2ab8fdababf51e89e645f077
[ "Apache-2.0", "MIT" ]
19
2019-01-02T16:39:10.000Z
2022-02-11T12:50:27.000Z
arxiv_html/renders/tasks.py
cul-it/arxiv-readability
20dac4540aaf689b2ab8fdababf51e89e645f077
[ "Apache-2.0", "MIT" ]
2
2018-11-12T17:09:14.000Z
2018-11-12T17:10:07.000Z
arxiv_html/renders/tasks.py
cul-it/arxiv-readability
20dac4540aaf689b2ab8fdababf51e89e645f077
[ "Apache-2.0", "MIT" ]
7
2019-01-10T22:02:01.000Z
2020-12-06T16:28:22.000Z
from ..celery import app from .engrafo import run_engrafo @app.task(bind=True) def run_engrafo_task(self, *args, **kwargs): return run_engrafo(*args, **kwargs)
20.75
44
0.73494
25
166
4.72
0.56
0.254237
0
0
0
0
0
0
0
0
0
0
0.13253
166
7
45
23.714286
0.819444
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.4
0.2
0.8
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
6
e92ca4ce1f3afd72b1e63f43b5e858e59256b930
23,959
py
Python
blowdrycss/unit_tests/test_BreakpointParser.py
acnagy/test-blowdrycss
bd9603dc87dc304b811213e3e6c3c97afa7f5966
[ "MIT" ]
null
null
null
blowdrycss/unit_tests/test_BreakpointParser.py
acnagy/test-blowdrycss
bd9603dc87dc304b811213e3e6c3c97afa7f5966
[ "MIT" ]
null
null
null
blowdrycss/unit_tests/test_BreakpointParser.py
acnagy/test-blowdrycss
bd9603dc87dc304b811213e3e6c3c97afa7f5966
[ "MIT" ]
null
null
null
# python 2 from __future__ import absolute_import # builtins from unittest import TestCase, main # plugins from cssutils.css import Property # custom from blowdrycss.breakpointparser import BreakpointParser from blowdrycss_settings import px_to_em __author__ = 'chad nelson' __project__ = 'blowdrycss' class TestBreakpointParser(TestCase): def test_set_breakpoint_key(self): valid_css_classes = [ 'inline-small-up', 'inline-giant-down-i', 'green-xxsmall-only', 'padding-10-large-up', 'xsmall-down', 'medium-only', 'giant-up', 'giant-only-i', 'display-large-down', ] names = ['display', 'display', 'color', 'padding', 'display', 'display', 'display', 'display', 'display', ] values = ['inherit', 'inherit', 'green', '10', 'inherit', 'inherit', 'inherit', 'inherit', 'inherit', ] priorities = ['', 'important', '', '', '', '', '', 'important', ''] expected = ['-small', '-giant', '-xxsmall', '-large', '-xsmall', '-medium', '-giant', '-giant', '-large', ] for i, css_class in enumerate(valid_css_classes): css_property = Property(name=names[i], value=values[i], priority=priorities[i]) breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) breakpoint_parser.set_breakpoint_key() self.assertEqual(breakpoint_parser.breakpoint_key, expected[i]) def test_set_breakpoint_key_ValueError(self): invalid_css_classes = ['inline-small', 'inline-down', 'custom-class', '-xsmall-', '-xxlarge-up'] names = ['display', 'display', 'padding', 'invalid', 'invalid', ] values = ['inherit', 'inherit', '10', 'invalid', 'invalid', ] for i, css_class in enumerate(invalid_css_classes): css_property = Property(name=names[i], value=values[i], priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) self.assertFalse(breakpoint_parser.is_breakpoint) def test_set_limit_key(self): valid_css_classes = ['inline-small-up', 'inline-giant-down-i', 'green-xxsmall-only', 'padding-10-large-up', ] names = ['display', 'display', 'color', 'padding', ] values = ['inline', 'inline', 'green', '10', ] priorities = ['', 'important', '', '', ] expected = ['-up', '-down', '-only', '-up', ] for i, css_class in enumerate(valid_css_classes): css_property = Property(name=names[i], value=values[i], priority=priorities[i]) breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) breakpoint_parser.set_limit_key() self.assertEqual(breakpoint_parser.limit_key, expected[i]) def test_set_limit_key_ValueError(self): invalid_css_classes = ['inline-small-', 'inline-downward', '-only-', 'custom-class', '-up-', ] names = ['display', 'display', 'color', 'padding', 'invalid', ] values = ['inline', 'inline', 'green', '10', 'invalid', ] for i, css_class in enumerate(invalid_css_classes): css_property = Property(name=names[i], value=values[i], priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) self.assertFalse(breakpoint_parser.is_breakpoint) def test_set_custom_breakpoint_key_Valid(self): valid_css_classes = ( 'padding-25-820-up', 'display-480-down', 'margin-5-2-5-2-1000-up', 'display-960-up-i', 'display-3_2rem-down' ) names = ['padding', 'display', 'margin', 'display', 'display', ] values = ['25', 'none', '5-2-5-2', 'none', 'none', ] priorities = ['', '', '', 'important', '', ] limit_key = ('-up', '-down', '-up', '-up', '-down', ) breakpoint = ('-820', '-480', '-1000', '-960', '-3_2rem', ) converted_breakpoint = (px_to_em('820'), px_to_em('480'), px_to_em('1000'), px_to_em('960'), '3.2rem', ) for i, css_class in enumerate(valid_css_classes): css_property = Property(name=names[i], value=values[i], priority=priorities[i]) breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) self.assertTrue(breakpoint_parser.is_breakpoint, msg=breakpoint_parser.css_class) self.assertEqual( breakpoint_parser.breakpoint_dict['custom'][limit_key[i]], converted_breakpoint[i], msg=converted_breakpoint[i] + ' dict: ' + breakpoint_parser.breakpoint_dict['custom'][limit_key[i]] ) self.assertEqual( breakpoint_parser.breakpoint_dict['custom']['breakpoint'], breakpoint[i], msg=breakpoint[i] + ' dict: ' + breakpoint_parser.breakpoint_dict['custom']['breakpoint'] ) def test_set_custom_breakpoint_key_Invalid(self): invalid_css_classes = ( '-820-up', '480-down', 'margin-5-2-5-2-1000-', 'display-960-i', 'display-3_2rem', ) names = ['padding', 'display', 'margin', 'display', 'display', ] values = ['25', 'none', '5-2-5-2', 'none', 'none', ] priorities = ['', '', '', 'important', '', ] limit_key = ('-up', '-down', '-up', '-up', '-down', ) breakpoint = None for i, css_class in enumerate(invalid_css_classes): css_property = Property(name=names[i], value=values[i], priority=priorities[i]) breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) self.assertFalse(breakpoint_parser.is_breakpoint, msg=breakpoint_parser.css_class) self.assertEqual( breakpoint_parser.breakpoint_dict['custom'][limit_key[i]], breakpoint, msg=str(breakpoint) + ' dict: ' + str(breakpoint_parser.breakpoint_dict['custom'][limit_key[i]]) ) self.assertEqual( breakpoint_parser.breakpoint_dict['custom']['breakpoint'], breakpoint, msg=str(breakpoint) + ' dict: ' + str(breakpoint_parser.breakpoint_dict['custom']['breakpoint']) ) def test_set_custom_breakpoint_key_ONLY(self): invalid_css_class = 'display-3_2rem-920-only' name = 'display' value = 'none' priority = '' limit_key = '-only' css_property = Property(name=name, value=value, priority=priority) breakpoint_parser = BreakpointParser(css_class=invalid_css_class, css_property=css_property) self.assertFalse(breakpoint_parser.is_breakpoint, msg=breakpoint_parser.css_class) try: should_not_exist = breakpoint_parser.breakpoint_dict['custom'][limit_key] self.assertTrue(False, msg=should_not_exist) except KeyError: self.assertTrue(True) def test_strip_breakpoint_limit(self): valid_css_classes = [ 'inline-small-up', 'inline-giant-down', 'green-xxsmall-only', 'padding-10-large-up', 'xlarge-only', 'large-down', 'xsmall-up', 'padding-25-820-up', 'display-480-down', 'margin-5-2-5-2-1000-up', 'display-960-up-i', 'display-3_2rem-down' ] names = [ 'display', 'display', 'color', 'padding', 'display', 'display', 'display', 'padding', 'display', 'margin', 'display', 'display', ] values = [ 'inline', 'inline', 'green', '10', 'none', 'none', 'none', '25', 'none', '5-2-5-2', 'none', 'none', ] expected = [ 'inline', 'inline', 'green', 'padding-10', '', '', '', 'padding-25', 'display', 'margin-5-2-5-2', 'display-i', 'display', ] for i, css_class in enumerate(valid_css_classes): css_property = Property(name=names[i], value=values[i], priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) clean_css_class = breakpoint_parser.strip_breakpoint_limit() self.assertEqual(clean_css_class, expected[i]) def test_is_display_True(self): valid_css_classes = ('display-small-down', 'medium-only', 'giant-up', 'display-720-up', 'display-369-down') names = ('display', 'display', 'display', 'display', 'display', ) values = ('none', 'none', 'none', 'none', 'none', ) for i, css_class in enumerate(valid_css_classes): css_property = Property(name=names[i], value=values[i], priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) self.assertTrue(breakpoint_parser.is_display(), msg=css_class) def test_is_display_False(self): invalid_css_classes = ( 'inline-small-up', 'inline-giant-down', 'green-xxsmall-only', 'padding-10-large-up', 'padding-25-820-up', 'margin-5-2-5-2-1000-up', ) names = ('display', 'display', 'color', 'padding', 'padding', 'margin', ) values = ('inline', 'inline', 'green', '10', '25', '5-2-5-2', ) for i, css_class in enumerate(invalid_css_classes): css_property = Property(name=names[i], value=values[i], priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) self.assertFalse(breakpoint_parser.is_display(), msg=css_class) def test_css_for_only_display(self): css_class = 'display-large-only' name = 'display' value = 'none' expected = ( '@media only screen and (max-width: 45.0625em) {\n' + '\t.display-large-only {\n' + '\t\tdisplay: none;\n' + '\t}\n' + '}\n\n' + '@media only screen and (min-width: 64.0em) {\n' + '\t.display-large-only {\n' + '\t\tdisplay: none;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.css_for_only() self.assertEqual(css, expected) def test_css_for_only_display_shorthand(self): css_class = 'large-only' name = 'display' value = 'none' expected = ( '@media only screen and (max-width: 45.0625em) {\n' + '\t.large-only {\n' + '\t\tdisplay: none;\n' + '\t}\n' + '}\n\n' + '@media only screen and (min-width: 64.0em) {\n' + '\t.large-only {\n' + '\t\tdisplay: none;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.css_for_only() self.assertEqual(css, expected) def test_css_for_only_display_shorthand_important(self): css_class = 'large-only-i' name = 'display' value = 'none' priority = 'important' expected = ( '@media only screen and (max-width: 45.0625em) {\n' + '\t.large-only-i {\n' + '\t\tdisplay: none !important;\n' + '\t}\n' + '}\n\n' + '@media only screen and (min-width: 64.0em) {\n' + '\t.large-only-i {\n' + '\t\tdisplay: none !important;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority=priority) breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.css_for_only() self.assertEqual(css, expected, msg=css) def test_css_for_only_general_usage(self): css_class = 'padding-100-large-only' name = 'padding' value = px_to_em('100') expected = ( '@media only screen and (min-width: 45.0625em) and (max-width: 64.0em) {\n' + '\t.padding-100-large-only {\n' + '\t\tpadding: 6.25em;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.css_for_only() self.assertEqual(css, expected) def test_css_for_only_general_usage_important(self): css_class = 'padding-100-large-only-i' name = 'padding' value = px_to_em('100') priority = 'important' expected = ( '@media only screen and (min-width: 45.0625em) and (max-width: 64.0em) {\n' + '\t.padding-100-large-only-i {\n' + '\t\tpadding: 6.25em !important;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority=priority) breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.css_for_only() self.assertEqual(css, expected) def test_css_for_only_wrong_limit_key(self): css_class = 'padding-100-large-only' name = 'padding' value = px_to_em('100') expected = '' css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) breakpoint_parser.limit_key = '-up' # Change to WRONG LIMIT KEY css = breakpoint_parser.css_for_only() self.assertEqual(css, expected) def test_css_for_down_display(self): css_class = 'display-medium-down' name = 'display' value = 'none' expected = ( '@media only screen and (min-width: 45.0em) {\n' + '\t.display-medium-down {\n' + '\t\tdisplay: none;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.css_for_down() self.assertEqual(css, expected) def test_css_for_down_display_shorthand(self): css_class = 'medium-down' name = 'display' value = 'none' expected = ( '@media only screen and (min-width: 45.0em) {\n' + '\t.medium-down {\n' + '\t\tdisplay: none;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.css_for_down() self.assertEqual(css, expected) def test_css_for_down_display_custom(self): css_class = 'display-369-down' name = 'display' value = 'none' expected = ( '@media only screen and (min-width: 23.0625em) {\n' + '\t.display-369-down {\n' + '\t\tdisplay: none;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.css_for_down() self.assertEqual(css, expected) def test_css_for_down_general_usage(self): css_class = 'padding-100-medium-down' name = 'padding' value = px_to_em('100') expected = ( '@media only screen and (max-width: 45.0em) {\n' + '\t.padding-100-medium-down {\n' + '\t\tpadding: 6.25em;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.css_for_down() self.assertEqual(css, expected) def test_css_for_down_wrong_limit_key(self): css_class = 'padding-100-medium-down' name = 'padding' value = px_to_em('100') expected = '' css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) breakpoint_parser.limit_key = '-only' # Change to WRONG LIMIT KEY. css = breakpoint_parser.css_for_down() self.assertEqual(css, expected) def test_css_for_up_display(self): css_class = 'display-small-up' name = 'display' value = 'none' expected = ( '@media only screen and (max-width: 15.0625em) {\n' + '\t.display-small-up {\n' + '\t\tdisplay: none;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.css_for_up() self.assertEqual(css, expected) def test_css_for_up_display_shorthand(self): css_class = 'small-up' name = 'display' value = 'none' expected = ( '@media only screen and (max-width: 15.0625em) {\n' + '\t.small-up {\n' + '\t\tdisplay: none;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.css_for_up() self.assertEqual(css, expected) def test_css_for_up_display_custom(self): css_class = 'display-720-up' name = 'display' value = 'none' expected = ( '@media only screen and (max-width: 45.0em) {\n' + '\t.display-720-up {\n' + '\t\tdisplay: none;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.css_for_up() self.assertEqual(css, expected) def test_css_for_up_general_usage(self): css_class = 'padding-100-small-up' name = 'padding' value = px_to_em('100') expected = ( '@media only screen and (min-width: 15.0625em) {\n' + '\t.padding-100-small-up {\n' + '\t\tpadding: 6.25em;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.css_for_up() self.assertEqual(css, expected) def test_css_for_up_wrong_limit_key(self): css_class = 'padding-100-small-up' name = 'padding' value = px_to_em('100') expected = '' css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) breakpoint_parser.limit_key = '-only' # Change to WRONG LIMIT KEY css = breakpoint_parser.css_for_up() self.assertEqual(css, expected) # build_media_query def test_build_media_query_only_display(self): css_class = 'display-large-only' name = 'display' value = 'none' expected = ( '@media only screen and (max-width: 45.0625em) {\n' + '\t.display-large-only {\n' + '\t\tdisplay: none;\n' + '\t}\n' + '}\n\n' + '@media only screen and (min-width: 64.0em) {\n' + '\t.display-large-only {\n' + '\t\tdisplay: none;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.build_media_query() self.assertEqual(css, expected) def test_build_media_query_only_general_usage(self): css_class = 'padding-100-large-only' name = 'padding' value = px_to_em('100') expected = ( '@media only screen and (min-width: 45.0625em) and (max-width: 64.0em) {\n' + '\t.padding-100-large-only {\n' + '\t\tpadding: 6.25em;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.build_media_query() self.assertEqual(css, expected) def test_build_media_query_down_display(self): css_class = 'display-medium-down' name = 'display' value = 'none' expected = ( '@media only screen and (min-width: 45.0em) {\n' + '\t.display-medium-down {\n' + '\t\tdisplay: none;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.build_media_query() self.assertEqual(css, expected) def test_build_media_query_down_general_usage(self): css_class = 'padding-100-medium-down' name = 'padding' value = px_to_em('100') expected = ( '@media only screen and (max-width: 45.0em) {\n' + '\t.padding-100-medium-down {\n' + '\t\tpadding: 6.25em;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.build_media_query() self.assertEqual(css, expected) def test_build_media_query_up_display(self): css_class = 'display-small-up' name = 'display' value = 'none' expected = ( '@media only screen and (max-width: 15.0625em) {\n' + '\t.display-small-up {\n' + '\t\tdisplay: none;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.build_media_query() self.assertEqual(css, expected) def test_build_media_query_up_general_usage(self): css_class = 'padding-100-small-up' name = 'padding' value = px_to_em('100') expected = ( '@media only screen and (min-width: 15.0625em) {\n' + '\t.padding-100-small-up {\n' + '\t\tpadding: 6.25em;\n' + '\t}\n' + '}\n\n' ) css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) css = breakpoint_parser.build_media_query() self.assertEqual(css, expected) def test_build_media_query_invalid_limit_key(self): css_class = 'padding-100-small-up' name = 'padding' value = px_to_em('100') expected = '' css_property = Property(name=name, value=value, priority='') breakpoint_parser = BreakpointParser(css_class=css_class, css_property=css_property) breakpoint_parser.limit_key = 'invalid_key' css = breakpoint_parser.build_media_query() self.assertEqual(css, expected) if __name__ == '__main__': main()
42.937276
120
0.588589
2,781
23,959
4.841064
0.046027
0.062987
0.053109
0.056377
0.895194
0.863701
0.838743
0.823739
0.816311
0.797371
0
0.023065
0.267123
23,959
557
121
43.014363
0.743664
0.005384
0
0.694611
0
0.005988
0.213803
0.023131
0
0
0
0
0.077844
1
0.065868
false
0
0.031936
0
0.0998
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
aaf350bea740d89fc42cf4f9697f8847e91e4497
143
py
Python
interface/backend/static/views.py
rahit/django-skeleton
b9d36da546307e3e28899a6e5054cc3f2229184d
[ "MIT" ]
1
2017-10-09T10:12:47.000Z
2017-10-09T10:12:47.000Z
interface/backend/static/views.py
rahit/django-skeleton
b9d36da546307e3e28899a6e5054cc3f2229184d
[ "MIT" ]
null
null
null
interface/backend/static/views.py
rahit/django-skeleton
b9d36da546307e3e28899a6e5054cc3f2229184d
[ "MIT" ]
null
null
null
from django.conf import settings from django.shortcuts import render def home(request): return render(request, '../frontend/index.html')
20.428571
52
0.762238
19
143
5.736842
0.736842
0.183486
0
0
0
0
0
0
0
0
0
0
0.132867
143
6
53
23.833333
0.879032
0
0
0
0
0
0.153846
0.153846
0
0
0
0
0
1
0.25
false
0
0.5
0.25
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
c9733ec60ea5acdcecfae5d12fddd4cfd198270b
17,097
py
Python
tests/test_config_pools.py
discogestalt/stingray
db55e0cd279cb8be8cf9716bc3e1e7f73cda2c1d
[ "Apache-2.0" ]
null
null
null
tests/test_config_pools.py
discogestalt/stingray
db55e0cd279cb8be8cf9716bc3e1e7f73cda2c1d
[ "Apache-2.0" ]
null
null
null
tests/test_config_pools.py
discogestalt/stingray
db55e0cd279cb8be8cf9716bc3e1e7f73cda2c1d
[ "Apache-2.0" ]
1
2019-05-28T20:16:59.000Z
2019-05-28T20:16:59.000Z
import pytest from stingray.apiclient import StingrayAPIClientError from stingray.config.pools import Pools, Pool import config_pool_responses as cpr stingray_args = dict( host='stingray', port='9070', user='admin', password='admin', api_version='5.2', ) stingray_args_old = dict( host='stingray', port='9070', user='admin', password='admin', api_version='2.0', ) pools_base = 'https://stingray:9070/api/tm/5.2/config/active/pools/' stats_base = 'https://stingray:9070/api/tm/5.2/status/local_tm/statistics/' pools_base_old = 'https://stingray:9070/api/tm/2.0/config/active/pools/' stats_base_old = 'https://stingray:9070/api/tm/2.0/status/local_tm/statistics/' pytest_plugins = "pytest-responses" def base_response(responses): responses.add( responses.GET, pools_base, json=cpr.list_pools, ) def base_response_old(responses): responses.add( responses.GET, pools_base_old, json=cpr.list_pools_old, ) class TestStingrayPools(object): def test_config_pools_init(self, responses): base_response(responses) pools = Pools(**stingray_args) assert isinstance(pools, Pools) assert len(pools.pools) == 3 assert pools.pools['Pool1'] == "/api/tm/5.2/config/active/pools/Pool1" def test_config_pools_init_old(self, responses): base_response_old(responses) pools = Pools(**stingray_args_old) assert isinstance(pools, Pools) assert pools.api_version == "2.0" def test_config_pools_get(self, responses): base_response(responses) responses.add( responses.GET, '{0}Pool1'.format(pools_base), json=cpr.get_pool ) pools = Pools(**stingray_args) pool = pools.get('Pool1') node1 = pool.properties['basic']['nodes_table'][0] assert isinstance(pool, Pool) assert node1['node'] == "10.0.0.1:8000" assert node1['state'] == "active" assert node1['state'] == pool.nodes['10.0.0.1:8000']['state'] def test_config_pools_get_old(self, responses): base_response_old(responses) responses.add( responses.GET, '{0}Pool1'.format(pools_base_old), json=cpr.get_pool_old ) pools = Pools(**stingray_args_old) assert pools.api_version == "2.0" pool = pools.get('Pool1') assert isinstance(pool, Pool) assert pool.api_version == "2.0" node1 = pool.properties['basic']['nodes'][0] assert node1 == "10.0.0.1:8000" assert pool.nodes[node1]['state'] == "active" def test_config_pools_add(self, responses): base_response(responses) responses.add( responses.PUT, '{0}Pool4'.format(pools_base), json=cpr.get_pool ) pools = Pools(**stingray_args) new_pool = pools.add('Pool4', nodes=['10.0.0.1:8000']) assert isinstance(new_pool, Pool) assert new_pool.nodes.get('10.0.0.1:8000', False) assert 'Pool4' in pools.pools def test_config_pools_add_old(self, responses): base_response_old(responses) responses.add( responses.PUT, '{0}Pool4'.format(pools_base_old), json=cpr.get_pool_old ) pools = Pools(**stingray_args_old) new_pool = pools.add('Pool4', nodes=['10.0.0.1:8000', '10.0.0.2:8000']) assert isinstance(new_pool, Pool) assert new_pool.nodes.get('10.0.0.1:8000', False) assert new_pool.properties['basic']['nodes'] == ['10.0.0.1:8000', '10.0.0.2:8000'] assert 'Pool4' in pools.pools def test_config_pools_add_no_nodes(self, responses): base_response(responses) with pytest.raises( StingrayAPIClientError, match="No nodes specified, cannot create pool"): pools = Pools(**stingray_args) pools.add('Pool5') def test_config_pools_add_bad_arg(self, responses): base_response(responses) with pytest.raises( StingrayAPIClientError, match="Nodes must be specified as a list"): pools = Pools(**stingray_args) pools.add('Pool5', '10.0.0.2:8000') def test_config_pools_delete(self, responses): base_response(responses) responses.add( responses.DELETE, '{0}Pool1'.format(pools_base), status=204 ) pools = Pools(**stingray_args) del_response = pools.delete('Pool1') assert del_response['success'] == "Resource has been removed" assert 'Pool1' not in pools.pools class TestStingrayPool(object): def test_config_pool_nodes_status(self, responses): base_response(responses) responses.add( responses.GET, '{0}Pool1'.format(pools_base), json=cpr.get_pool ) responses.add( responses.GET, '{0}nodes/node/10.0.0.1:8000'.format(stats_base), json=cpr.nodes_status ) pools = Pools(**stingray_args) pool = pools.get('Pool1') nodes_status = pool.nodes_status() assert nodes_status['10.0.0.1:8000']['state'] == "active" assert nodes_status['10.0.0.1:8000']['health'] == "alive" def test_config_pool_nodes_status_old(self, responses): base_response_old(responses) responses.add( responses.GET, '{0}Pool1'.format(pools_base_old), json=cpr.get_pool_old ) responses.add( responses.GET, '{0}nodes/node/10.0.0.1:8000'.format(stats_base_old), json=cpr.nodes_status ) responses.add( responses.GET, '{0}nodes/node/10.0.0.2:8000'.format(stats_base_old), json=cpr.nodes_status ) pools = Pools(**stingray_args_old) pool = pools.get('Pool1') nodes_status = pool.nodes_status() assert nodes_status['10.0.0.1:8000']['state'] == "active" assert nodes_status['10.0.0.2:8000']['health'] == "alive" def test_config_pool_add_node(self, responses): base_response(responses) responses.add( responses.GET, '{0}Pool1'.format(pools_base), json=cpr.get_pool ) responses.add( responses.PUT, '{0}Pool1'.format(pools_base), json=cpr.two_nodes ) responses.add( responses.GET, '{0}nodes/node/10.0.0.1:8000'.format(stats_base), json=cpr.nodes_status ) responses.add( responses.GET, '{0}nodes/node/10.0.0.2:8000'.format(stats_base), json=cpr.nodes_status ) pools = Pools(**stingray_args) pool = pools.get('Pool1') assert len(pool.nodes) == 1 nodes_status = pool.add_node('10.0.0.2:8000') assert '10.0.0.2:8000' in nodes_status assert nodes_status['10.0.0.1:8000']['state'] == "active" assert nodes_status['10.0.0.2:8000']['state'] == "active" assert len(pool.nodes) == 2 assert len(pool.properties['basic']['nodes_table']) == 2 def test_config_pool_add_node_old(self, responses): base_response_old(responses) responses.add( responses.GET, '{0}Pool1'.format(pools_base_old), json=cpr.get_pool_old ) responses.add( responses.PUT, '{0}Pool1'.format(pools_base_old), json=cpr.get_pool_add_old ) for i in range(1, 4): responses.add( responses.GET, '{0}nodes/node/10.0.0.{1}:8000'.format(stats_base_old, i), json=cpr.nodes_status ) pools = Pools(**stingray_args_old) pool = pools.get('Pool1') assert len(pool.nodes) == 2 nodes_status = pool.add_node('10.0.0.3:8000') assert nodes_status['10.0.0.3:8000']['state'] == "active" assert '10.0.0.3:8000' in pool.properties['basic']['nodes'] assert len(pool.nodes) == 3 assert len(pool.properties['load_balancing']['node_weighting']) == 3 def test_config_pool_drain_node(self, responses): base_response(responses) responses.add( responses.GET, '{0}Pool1'.format(pools_base), json=cpr.get_pool ) responses.add( responses.PUT, '{0}Pool1'.format(pools_base), json=cpr.drain_node ) responses.add( responses.GET, '{0}nodes/node/10.0.0.1:8000'.format(stats_base), json=cpr.nodes_status ) pools = Pools(**stingray_args) pool = pools.get('Pool1') assert pool.nodes['10.0.0.1:8000']['state'] == "active" nodes_status = pool.drain_node('10.0.0.1:8000') assert nodes_status['10.0.0.1:8000']['state'] == "draining" def test_config_pool_drain_node_old(self, responses): base_response_old(responses) responses.add( responses.GET, '{0}Pool1'.format(pools_base_old), json=cpr.get_pool_add_old ) responses.add( responses.PUT, '{0}Pool1'.format(pools_base_old), json=cpr.drain_node_old ) for i in range(1, 4): responses.add( responses.GET, '{0}nodes/node/10.0.0.{1}:8000'.format(stats_base_old, i), json=cpr.nodes_status ) pools = Pools(**stingray_args_old) pool = pools.get('Pool1') assert pool.nodes['10.0.0.1:8000']['state'] == "active" nodes_status = pool.drain_node('10.0.0.1:8000') assert nodes_status['10.0.0.1:8000']['state'] == "draining" assert '10.0.0.1:8000' in pool.properties['basic']['draining'] def test_config_pool_drain_node_bad(self, responses): base_response(responses) responses.add( responses.GET, '{0}Pool1'.format(pools_base), json=cpr.get_pool ) pools = Pools(**stingray_args) pool = pools.get('Pool1') with pytest.raises( StingrayAPIClientError, match="Node bad_node is not a member of this pool"): pool.drain_node('bad_node') def test_config_pool_disable_node(self, responses): base_response(responses) responses.add( responses.GET, '{0}Pool1'.format(pools_base), json=cpr.drain_node ) responses.add( responses.PUT, '{0}Pool1'.format(pools_base), json=cpr.disable_node ) pools = Pools(**stingray_args) pool = pools.get('Pool1') assert pool.nodes['10.0.0.1:8000']['state'] == "draining" nodes_status = pool.disable_node('10.0.0.1:8000') assert nodes_status['10.0.0.1:8000']['state'] == "disabled" def test_config_pool_disable_node_old(self, responses): base_response_old(responses) responses.add( responses.GET, '{0}Pool1'.format(pools_base_old), json=cpr.drain_node_old ) responses.add( responses.PUT, '{0}Pool1'.format(pools_base_old), json=cpr.disable_node_old ) for i in range(2, 4): responses.add( responses.GET, '{0}nodes/node/10.0.0.{1}:8000'.format(stats_base_old, i), json=cpr.nodes_status ) pools = Pools(**stingray_args_old) pool = pools.get('Pool1') assert pool.nodes['10.0.0.1:8000']['state'] == "draining" nodes_status = pool.disable_node('10.0.0.1:8000') assert nodes_status['10.0.0.1:8000']['state'] == "disabled" assert '10.0.0.1:8000' in pool.properties['basic']['disabled'] def test_config_pool_disable_node_bad(self, responses): base_response(responses) responses.add( responses.GET, '{0}Pool1'.format(pools_base), json=cpr.drain_node ) pools = Pools(**stingray_args) pool = pools.get('Pool1') with pytest.raises( StingrayAPIClientError, match="Node bad_node is not a member of this pool"): pool.disable_node('bad_node') def test_config_pool_enable_node(self, responses): base_response(responses) responses.add( responses.GET, '{0}Pool1'.format(pools_base), json=cpr.disable_node ) responses.add( responses.PUT, '{0}Pool1'.format(pools_base), json=cpr.get_pool ) responses.add( responses.GET, '{0}nodes/node/10.0.0.1:8000'.format(stats_base), json=cpr.nodes_status ) pools = Pools(**stingray_args) pool = pools.get('Pool1') assert pool.nodes['10.0.0.1:8000']['state'] == "disabled" nodes_status = pool.enable_node('10.0.0.1:8000') assert nodes_status['10.0.0.1:8000']['state'] == "active" assert nodes_status['10.0.0.1:8000']['health'] == "alive" def test_config_pool_enable_node_old(self, responses): base_response_old(responses) responses.add( responses.GET, '{0}Pool1'.format(pools_base_old), json=cpr.disable_node_old ) responses.add( responses.PUT, '{0}Pool1'.format(pools_base_old), json=cpr.get_pool_add_old ) for i in range(1, 4): responses.add( responses.GET, '{0}nodes/node/10.0.0.{1}:8000'.format(stats_base_old, i), json=cpr.nodes_status ) pools = Pools(**stingray_args_old) pool = pools.get('Pool1') assert pool.nodes['10.0.0.1:8000']['state'] == "disabled" assert '10.0.0.1:8000' in pool.properties['basic']['disabled'] nodes_status = pool.enable_node('10.0.0.1:8000') assert nodes_status['10.0.0.1:8000']['state'] == "active" assert pool.nodes['10.0.0.1:8000']['state'] == "active" assert '10.0.0.1:8000' in pool.properties['basic']['nodes'] def test_config_pool_enable_node_bad(self, responses): base_response(responses) responses.add( responses.GET, '{0}Pool1'.format(pools_base), json=cpr.disable_node ) pools = Pools(**stingray_args) pool = pools.get('Pool1') with pytest.raises( StingrayAPIClientError, match="Node bad_node is not a member of this pool"): pool.enable_node('bad_node') def test_config_pool_delete_node(self, responses): base_response(responses) responses.add( responses.GET, '{0}Pool1'.format(pools_base), json=cpr.two_nodes ) responses.add( responses.PUT, '{0}Pool1'.format(pools_base), json=cpr.get_pool ) responses.add( responses.GET, '{0}nodes/node/10.0.0.1:8000'.format(stats_base), json=cpr.nodes_status ) pools = Pools(**stingray_args) pool = pools.get('Pool1') assert len(pool.nodes) == 2 nodes_status = pool.delete_node('10.0.0.2:8000') assert '10.0.0.2:8000' not in nodes_status assert nodes_status['10.0.0.1:8000']['state'] == "active" assert len(pool.nodes) == 1 assert len(pool.properties['basic']['nodes_table']) == 1 def test_config_pool_delete_node_old(self, responses): base_response_old(responses) responses.add( responses.GET, '{0}Pool1'.format(pools_base_old), json=cpr.get_pool_add_old ) responses.add( responses.PUT, '{0}Pool1'.format(pools_base_old), json=cpr.get_pool_old ) for i in range(1, 3): responses.add( responses.GET, '{0}nodes/node/10.0.0.{1}:8000'.format(stats_base_old, i), json=cpr.nodes_status ) pools = Pools(**stingray_args_old) pool = pools.get('Pool1') assert len(pool.nodes) == 3 assert len(pool.properties['basic']['nodes']) == 3 assert '10.0.0.3:8000' in pool.properties['basic']['nodes'] nodes_status = pool.delete_node('10.0.0.3:8000') assert sorted(nodes_status.keys()) == ['10.0.0.1:8000', '10.0.0.2:8000'] assert len(pool.nodes) == 2 assert len(pool.properties['basic']['nodes']) == 2 assert '10.0.0.3:8000' not in pool.nodes assert '10.0.0.3:8000' not in pool.properties['basic']['nodes']
30.260177
90
0.567409
2,133
17,097
4.368026
0.052508
0.021895
0.029194
0.026296
0.914779
0.864334
0.833745
0.773318
0.744231
0.725019
0
0.06594
0.297479
17,097
564
91
30.31383
0.709766
0
0
0.683628
0
0
0.147745
0.023279
0
0
0
0
0.152655
1
0.057522
false
0.004425
0.00885
0
0.070796
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
a30f7b07498ba34789452b8e6180953841011a57
5,857
py
Python
authors/apps/notification/test/test_notifications.py
andela/ah-backend-prime
0708463d4565a4977a5a5dcb839f1dfed52fdc90
[ "BSD-3-Clause" ]
1
2019-09-19T14:30:05.000Z
2019-09-19T14:30:05.000Z
authors/apps/notification/test/test_notifications.py
e-ian/authors-haven-frontend
05829c8088ca49ef2cf0863dc87ec55b44b13534
[ "BSD-3-Clause" ]
22
2019-03-25T16:10:53.000Z
2022-03-11T23:44:21.000Z
authors/apps/notification/test/test_notifications.py
e-ian/authors-haven-frontend
05829c8088ca49ef2cf0863dc87ec55b44b13534
[ "BSD-3-Clause" ]
6
2019-03-25T09:39:39.000Z
2021-03-11T23:54:12.000Z
from django.urls import reverse from django.shortcuts import get_object_or_404 from rest_framework import status from authors.apps.authentication.tests.test_data import VALID_USER_DATA from authors.apps.articles.tests.test_data import VALID_ARTICLE from authors.apps.notification.test.base import NotificationBaseTest from authors.apps.notification.test.test_data import VALID_USER_DATA_2 from authors.apps.notification.models import Notification class TestNotifications(NotificationBaseTest): def test_get_all_notifications(self): token = self.create_user(VALID_USER_DATA) token2 = self.create_user(VALID_USER_DATA_2) self.client.post( reverse('follow-profile', kwargs={'username': 'anyatijude'}), HTTP_AUTHORIZATION=token2 ) response = self.client.post( reverse('articles'), data=VALID_ARTICLE, format='json', HTTP_AUTHORIZATION=token ) self.client.post( reverse('favorite-article', kwargs={'slug': response.data['article']['slug']}), format='json' ) self.client.post(reverse( 'comments', kwargs={'slug': response.data['article']['slug'], 'id': 0}), data={"body": "the was lms was fine"}, format='json') response = self.client.get( self.notification_url, format='json', HTTP_AUTHORIZATION=token2 ) self.assertEquals(response.status_code, status.HTTP_200_OK) def test_get_single_unread_notification(self): """ test that returns all the unread notifications """ token = self.create_user(VALID_USER_DATA) token2 = self.create_user(VALID_USER_DATA_2) self.client.post( reverse('follow-profile', kwargs={'username': 'anyatijude'}), HTTP_AUTHORIZATION=token2 ) response_data = self.client.post( reverse('articles'), data=VALID_ARTICLE, format='json', HTTP_AUTHORIZATION=token ) self.client.post( reverse('favorite-article', kwargs={'slug': response_data.data['article']['slug']}), format='json' ) self.client.post(reverse( 'comments', kwargs={'slug': response_data.data['article']['slug'], 'id': 0}), data={"body": "the was lms was fine"}, format='json') notifications = get_object_or_404(Notification) response = self.client.get( reverse('notification-detail', kwargs={'pk': notifications.pk}), format='json', HTTP_AUTHORIZATION=token2 ) self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.data, response.data) def test_get_unread_notifications(self): """ test that returns all the unread notifications """ token = self.create_user(VALID_USER_DATA) token2 = self.create_user(VALID_USER_DATA_2) self.client.post( reverse('follow-profile', kwargs={'username': 'anyatijude'}), HTTP_AUTHORIZATION=token2 ) response_data = self.client.post( reverse('articles'), data=VALID_ARTICLE, format='json', HTTP_AUTHORIZATION=token ) self.client.post( reverse('favorite-article', kwargs={'slug': response_data.data['article']['slug']}), format='json' ) self.client.post(reverse( 'comments', kwargs={'slug': response_data.data['article']['slug'], 'id': 0}), data={"body": "the was lms was fine"}, format='json') response = self.client.get( reverse('notifications-unread'), format='json', HTTP_AUTHORIZATION=token2 ) self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.data, response.data) def test_single_notifications_not_found(self): """ test that returns all the unread notifications """ token = self.create_user(VALID_USER_DATA) token2 = self.create_user(VALID_USER_DATA_2) self.client.post( reverse('follow-profile', kwargs={'username': 'anyatijude'}), HTTP_AUTHORIZATION=token2 ) response_data = self.client.post( reverse('articles'), data=VALID_ARTICLE, format='json', HTTP_AUTHORIZATION=token ) self.client.post( reverse('favorite-article', kwargs={'slug': response_data.data['article']['slug']}), format='json' ) self.client.post(reverse( 'comments', kwargs={'slug': response_data.data['article']['slug'], 'id': 0}), data={"body": "the was lms was fine"}, format='json') response = self.client.get( reverse('notification-detail', kwargs={'pk': 50}), format='json', HTTP_AUTHORIZATION=token2 ) self.assertEquals(response.status_code, status.HTTP_404_NOT_FOUND) self.assertEquals(response.data['error'], 'notifications not available') def test_in_app_and_email_notification_disabled(self): token = self.create_user(VALID_USER_DATA) response = self.client.put(reverse('notification-disable'), HTTP_AUTHORIZATION=token, format='json' ) self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.data['error'], "disabled notifications app")
38.032468
91
0.58938
596
5,857
5.607383
0.137584
0.062837
0.067026
0.100539
0.802513
0.763914
0.747756
0.747756
0.735787
0.711849
0
0.010651
0.29469
5,857
153
92
38.281046
0.798354
0.023903
0
0.676471
0
0
0.12272
0
0
0
0
0
0.066176
1
0.036765
false
0
0.058824
0
0.102941
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
a317fce5b37f463e15876a2aad89571583d1bf2d
56
py
Python
testing_editable_dots/main.py
AndydeCleyre/1576
3e0bb69c32ca343e4b21e19e44aead526efa09b6
[ "WTFPL" ]
null
null
null
testing_editable_dots/main.py
AndydeCleyre/1576
3e0bb69c32ca343e4b21e19e44aead526efa09b6
[ "WTFPL" ]
null
null
null
testing_editable_dots/main.py
AndydeCleyre/1576
3e0bb69c32ca343e4b21e19e44aead526efa09b6
[ "WTFPL" ]
1
2022-02-17T02:59:50.000Z
2022-02-17T02:59:50.000Z
def main(): import ruamel.yaml print("success")
14
22
0.625
7
56
5
1
0
0
0
0
0
0
0
0
0
0
0
0.232143
56
3
23
18.666667
0.813953
0
0
0
0
0
0.125
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0
0.666667
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
a342e6e0a74b8f0762677cdcad953a4a9a65832c
230
py
Python
app/wrapper/pynubank_wrapper/providers/__init__.py
brunoanhaia/budget-planner
edb030591fd8425e1e4132f869693bb10b941771
[ "MIT" ]
null
null
null
app/wrapper/pynubank_wrapper/providers/__init__.py
brunoanhaia/budget-planner
edb030591fd8425e1e4132f869693bb10b941771
[ "MIT" ]
2
2022-03-02T14:10:53.000Z
2022-03-17T22:56:25.000Z
app/wrapper/pynubank_wrapper/providers/__init__.py
brunoanhaia/budget-planner
edb030591fd8425e1e4132f869693bb10b941771
[ "MIT" ]
null
null
null
import pathlib import sys sys.path.append(str(pathlib.Path(__file__).parent)) from .database_provider import DatabaseProvider from .cache_data_provider import CacheDataProvider from .nubank_api_provider import NuBankApiProvider
25.555556
51
0.86087
29
230
6.517241
0.62069
0.222222
0
0
0
0
0
0
0
0
0
0
0.082609
230
8
52
28.75
0.895735
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.833333
0
0.833333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
a36304a10ade9cff91ec6a08271ad7f1a3698961
3,241
py
Python
resources/dot_PyCharm/system/python_stubs/-762174762/PySide/QtGui/QBrush.py
basepipe/developer_onboarding
05b6a776f8974c89517868131b201f11c6c2a5ad
[ "MIT" ]
1
2020-04-20T02:27:20.000Z
2020-04-20T02:27:20.000Z
resources/dot_PyCharm/system/python_stubs/cache/8cdc475d469a13122bc4bc6c3ac1c215d93d5f120f5cc1ef33a8f3088ee54d8e/PySide/QtGui/QBrush.py
basepipe/developer_onboarding
05b6a776f8974c89517868131b201f11c6c2a5ad
[ "MIT" ]
null
null
null
resources/dot_PyCharm/system/python_stubs/cache/8cdc475d469a13122bc4bc6c3ac1c215d93d5f120f5cc1ef33a8f3088ee54d8e/PySide/QtGui/QBrush.py
basepipe/developer_onboarding
05b6a776f8974c89517868131b201f11c6c2a5ad
[ "MIT" ]
null
null
null
# encoding: utf-8 # module PySide.QtGui # from C:\Python27\lib\site-packages\PySide\QtGui.pyd # by generator 1.147 # no doc # imports import PySide.QtCore as __PySide_QtCore import Shiboken as __Shiboken class QBrush(__Shiboken.Object): # no doc def color(self, *args, **kwargs): # real signature unknown pass def gradient(self, *args, **kwargs): # real signature unknown pass def isOpaque(self, *args, **kwargs): # real signature unknown pass def matrix(self, *args, **kwargs): # real signature unknown pass def setColor(self, *args, **kwargs): # real signature unknown pass def setMatrix(self, *args, **kwargs): # real signature unknown pass def setStyle(self, *args, **kwargs): # real signature unknown pass def setTexture(self, *args, **kwargs): # real signature unknown pass def setTextureImage(self, *args, **kwargs): # real signature unknown pass def setTransform(self, *args, **kwargs): # real signature unknown pass def style(self, *args, **kwargs): # real signature unknown pass def swap(self, *args, **kwargs): # real signature unknown pass def texture(self, *args, **kwargs): # real signature unknown pass def textureImage(self, *args, **kwargs): # real signature unknown pass def transform(self, *args, **kwargs): # real signature unknown pass def __copy__(self, *args, **kwargs): # real signature unknown pass def __eq__(self, y): # real signature unknown; restored from __doc__ """ x.__eq__(y) <==> x==y """ pass def __ge__(self, y): # real signature unknown; restored from __doc__ """ x.__ge__(y) <==> x>=y """ pass def __gt__(self, y): # real signature unknown; restored from __doc__ """ x.__gt__(y) <==> x>y """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __le__(self, y): # real signature unknown; restored from __doc__ """ x.__le__(y) <==> x<=y """ pass def __lshift__(self, y): # real signature unknown; restored from __doc__ """ x.__lshift__(y) <==> x<<y """ pass def __lt__(self, y): # real signature unknown; restored from __doc__ """ x.__lt__(y) <==> x<y """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __ne__(self, y): # real signature unknown; restored from __doc__ """ x.__ne__(y) <==> x!=y """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __rlshift__(self, y): # real signature unknown; restored from __doc__ """ x.__rlshift__(y) <==> y<<x """ pass def __rrshift__(self, y): # real signature unknown; restored from __doc__ """ x.__rrshift__(y) <==> y>>x """ pass def __rshift__(self, y): # real signature unknown; restored from __doc__ """ x.__rshift__(y) <==> x>>y """ pass
28.182609
77
0.597038
394
3,241
4.497462
0.19797
0.212754
0.327314
0.172686
0.709932
0.664786
0.645034
0.624718
0.231377
0
0
0.00296
0.270287
3,241
114
78
28.429825
0.7463
0.441222
0
0.467742
0
0
0
0
0
0
0
0
0
1
0.467742
false
0.467742
0.032258
0
0.516129
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
6
a372435586f2afaa438ddb0f033f039bdd0570ed
3,743
py
Python
tests/test_sklearn_metrics.py
miltondp/clustermatch-gene-expr
664bcf9032f53e22165ce7aa586dbf11365a5827
[ "BSD-2-Clause-Patent" ]
null
null
null
tests/test_sklearn_metrics.py
miltondp/clustermatch-gene-expr
664bcf9032f53e22165ce7aa586dbf11365a5827
[ "BSD-2-Clause-Patent" ]
13
2021-08-13T16:02:15.000Z
2022-01-31T17:56:57.000Z
tests/test_sklearn_metrics.py
miltondp/clustermatch-gene-expr
664bcf9032f53e22165ce7aa586dbf11365a5827
[ "BSD-2-Clause-Patent" ]
1
2021-08-09T14:57:40.000Z
2021-08-09T14:57:40.000Z
import numpy as np from sklearn.metrics import adjusted_rand_score as sklearn_ari from clustermatch.sklearn.metrics import ( adjusted_rand_index, get_contingency_matrix, get_pair_confusion_matrix, ) def test_get_contingency_matrix_k0_equal_k1(): part0 = np.array([0, 0, 1, 1, 2, 2]) part1 = np.array([0, 1, 0, 2, 1, 2]) expected_mat = np.array([[1, 1, 0], [1, 0, 1], [0, 1, 1]]) observed_mat = get_contingency_matrix(part0, part1) np.testing.assert_array_equal(observed_mat, expected_mat) def test_get_contingency_matrix_k0_greater_k1(): part0 = np.array([0, 0, 1, 1, 2, 2, 3, 3, 3]) part1 = np.array([0, 1, 0, 2, 1, 2, 2, 2, 2]) expected_mat = np.array([[1, 1, 0], [1, 0, 1], [0, 1, 1], [0, 0, 3]]) observed_mat = get_contingency_matrix(part0, part1) np.testing.assert_array_equal(observed_mat, expected_mat) def test_get_contingency_matrix_k0_lesser_k1(): part0 = np.array([0, 0, 1, 1, 2, 2, 3, 3, 3, 2, 2, 2, 1]) part1 = np.array([0, 1, 0, 2, 1, 2, 3, 3, 3, 4, 4, 5, 5]) expected_mat = np.array( [[1, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 1], [0, 1, 1, 0, 2, 1], [0, 0, 0, 3, 0, 0]] ) observed_mat = get_contingency_matrix(part0, part1) np.testing.assert_array_equal(observed_mat, expected_mat) def test_get_pair_confusion_matrix_k0_equal_k1(): part0 = np.array([0, 0, 1, 1, 2, 2]) part1 = np.array([0, 1, 0, 2, 1, 2]) expected_mat = np.array([[18, 6], [6, 0]]) observed_mat = get_pair_confusion_matrix(part0, part1) np.testing.assert_array_equal(observed_mat, expected_mat) def test_get_pair_confusion_matrix_k0_greater_k1(): part0 = np.array([0, 0, 1, 1, 2, 2, 3, 3, 3]) part1 = np.array([0, 1, 0, 2, 1, 2, 2, 2, 2]) expected_mat = np.array([[42, 18], [6, 6]]) observed_mat = get_pair_confusion_matrix(part0, part1) np.testing.assert_array_equal(observed_mat, expected_mat) def test_adjusted_rand_index_manual_random_partitions_same_k(): part0 = np.array([0, 0, 1, 1, 2, 2]) part1 = np.array([0, 1, 0, 2, 1, 2]) expected_ari = -0.25 observed_ari = adjusted_rand_index(part0, part1) observed_ari_symm = adjusted_rand_index(part1, part0) assert observed_ari == observed_ari_symm assert expected_ari == observed_ari def test_adjusted_rand_index_manual_perfect_match(): part0 = np.array([0, 0, 1, 1, 2, 2]) part1 = np.array([2, 2, 3, 3, 4, 4]) expected_ari = 1.0 observed_ari = adjusted_rand_index(part0, part1) observed_ari_symm = adjusted_rand_index(part1, part0) assert observed_ari == observed_ari_symm assert expected_ari == observed_ari def test_adjusted_rand_index_random_partitions_same_k(): maxk0 = 2 maxk1 = maxk0 n = 100 part0 = np.random.randint(0, maxk0 + 1, n) part1 = np.random.randint(0, maxk1 + 1, n) # warning: the sklearn's ari implementation can overflow in older versions # when n is large expected_ari = sklearn_ari(part0, part1) observed_ari = adjusted_rand_index(part0, part1) observed_ari_symm = adjusted_rand_index(part1, part0) assert observed_ari == observed_ari_symm assert expected_ari == observed_ari def test_adjusted_rand_index_random_partitions_k0_greater_k1(): maxk0 = 5 maxk1 = 3 n = 100 part0 = np.random.randint(0, maxk0 + 1, n) part1 = np.random.randint(0, maxk1 + 1, n) # warning: the sklearn's ari implementation can overflow in older versions # when n is large expected_ari = sklearn_ari(part0, part1) observed_ari = adjusted_rand_index(part0, part1) observed_ari_symm = adjusted_rand_index(part1, part0) assert observed_ari == observed_ari_symm assert expected_ari == observed_ari
28.792308
88
0.67299
613
3,743
3.836868
0.11093
0.019558
0.093963
0.03869
0.897959
0.868197
0.843963
0.835034
0.835034
0.827381
0
0.085838
0.200107
3,743
129
89
29.015504
0.699733
0.047288
0
0.584416
0
0
0
0
0
0
0
0
0.168831
1
0.116883
false
0
0.038961
0
0.155844
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
a376ae40f7c3bd7dfaf385381f45ce80d58e0987
83
py
Python
pipert2/utils/__init__.py
MayoG/PipeRT2
357bf8a5fd3f3fe2149b7b0317d2c39dde66561d
[ "MIT" ]
null
null
null
pipert2/utils/__init__.py
MayoG/PipeRT2
357bf8a5fd3f3fe2149b7b0317d2c39dde66561d
[ "MIT" ]
null
null
null
pipert2/utils/__init__.py
MayoG/PipeRT2
357bf8a5fd3f3fe2149b7b0317d2c39dde66561d
[ "MIT" ]
null
null
null
from .consts.event_names import START_EVENT_NAME, STOP_EVENT_NAME, KILL_EVENT_NAME
41.5
82
0.879518
14
83
4.714286
0.642857
0.409091
0
0
0
0
0
0
0
0
0
0
0.072289
83
1
83
83
0.857143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
a38766907b7a13bbfb5af8c9d10e69c86518d20e
185
py
Python
Instanssi/ext_blog/admin.py
jpot/Instanssi.org
5b0e3d57e002a9e8f5bb32973d43884fbdbd6a7d
[ "MIT" ]
6
2015-04-03T12:15:02.000Z
2019-05-29T07:56:11.000Z
Instanssi/ext_blog/admin.py
jpot/Instanssi.org
5b0e3d57e002a9e8f5bb32973d43884fbdbd6a7d
[ "MIT" ]
52
2015-03-04T21:15:48.000Z
2022-03-21T20:16:24.000Z
Instanssi/ext_blog/admin.py
jpot/Instanssi.org
5b0e3d57e002a9e8f5bb32973d43884fbdbd6a7d
[ "MIT" ]
6
2017-09-26T00:52:51.000Z
2020-02-17T17:24:21.000Z
# -*- coding: utf-8 -*- from django.contrib import admin from Instanssi.ext_blog.models import BlogComment, BlogEntry admin.site.register(BlogEntry) admin.site.register(BlogComment)
20.555556
60
0.783784
24
185
6
0.666667
0.194444
0.25
0.361111
0
0
0
0
0
0
0
0.006024
0.102703
185
8
61
23.125
0.861446
0.113514
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
6e6caa8d791d7dce2a0c73d7fc9b7df0fdd7ff82
45
py
Python
netbox/netbox/tables/__init__.py
cybarox/netbox
ea197eff5f4fe925bb354d1375912decd81752bd
[ "Apache-2.0" ]
null
null
null
netbox/netbox/tables/__init__.py
cybarox/netbox
ea197eff5f4fe925bb354d1375912decd81752bd
[ "Apache-2.0" ]
null
null
null
netbox/netbox/tables/__init__.py
cybarox/netbox
ea197eff5f4fe925bb354d1375912decd81752bd
[ "Apache-2.0" ]
null
null
null
from .columns import * from .tables import *
15
22
0.733333
6
45
5.5
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.177778
45
2
23
22.5
0.891892
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
6ea223a0bf986e6cd23fae74ae2e81c926773016
25
py
Python
config.py
rsutton/floa
fdd5e45015a4179d2b371bb8797d1c8068004b3a
[ "MIT" ]
null
null
null
config.py
rsutton/floa
fdd5e45015a4179d2b371bb8797d1c8068004b3a
[ "MIT" ]
null
null
null
config.py
rsutton/floa
fdd5e45015a4179d2b371bb8797d1c8068004b3a
[ "MIT" ]
null
null
null
import instance.config
8.333333
23
0.8
3
25
6.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.16
25
2
24
12.5
0.952381
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
6eb09f341001a3415b08890a7db68ae5a1984176
27
py
Python
metrix/__init__.py
KiriLev/metrix
b2921d44319b1e67073067830f672cef8177fb5c
[ "MIT" ]
null
null
null
metrix/__init__.py
KiriLev/metrix
b2921d44319b1e67073067830f672cef8177fb5c
[ "MIT" ]
null
null
null
metrix/__init__.py
KiriLev/metrix
b2921d44319b1e67073067830f672cef8177fb5c
[ "MIT" ]
null
null
null
from .metrix import Metrix
13.5
26
0.814815
4
27
5.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.148148
27
1
27
27
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
6ec55c6ae91429b3a0b5a94a103b49e6ae67ad93
188
py
Python
Posts/models.py
Joanna218/HeyFans
bdd0c101642212522429845b05ee7e17de9e076b
[ "MIT" ]
null
null
null
Posts/models.py
Joanna218/HeyFans
bdd0c101642212522429845b05ee7e17de9e076b
[ "MIT" ]
null
null
null
Posts/models.py
Joanna218/HeyFans
bdd0c101642212522429845b05ee7e17de9e076b
[ "MIT" ]
null
null
null
from django.db import models # Create your models here. class Posts(models.Model): posts_name = models.CharField(max_length=30) posts_contents = models.CharField(max_length=30)
20.888889
52
0.760638
27
188
5.148148
0.62963
0.215827
0.258993
0.345324
0.374101
0
0
0
0
0
0
0.025
0.148936
188
8
53
23.5
0.84375
0.12766
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.25
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
6
42c6bda644eec4196424954e517ba815c331b627
411
py
Python
functional/fn02_inner-func.py
keys4words/oop
ddb80de06fcba8075632b6fdd620bb64056634d3
[ "Apache-2.0" ]
null
null
null
functional/fn02_inner-func.py
keys4words/oop
ddb80de06fcba8075632b6fdd620bb64056634d3
[ "Apache-2.0" ]
2
2021-06-09T07:03:54.000Z
2022-03-12T00:55:18.000Z
functional/fn02_inner-func.py
keys4words/oop
ddb80de06fcba8075632b6fdd620bb64056634d3
[ "Apache-2.0" ]
null
null
null
def add(x): def do_add(y): return x + y return do_add add_to_five = add(5) # print(add_to_five(7)) # print(add(5)(3)) def Person(name, age): def print_hello(): print('Hello! My name is {}'.format(name)) def get_age(): return age return {'print_hello': print_hello, 'get_age': get_age} john = Person('John', 32) john['print_hello']() print(john['get_age']())
18.681818
59
0.596107
66
411
3.5
0.333333
0.21645
0.194805
0.17316
0
0
0
0
0
0
0
0.018987
0.231144
411
22
60
18.681818
0.712025
0.092457
0
0
0
0
0.161725
0
0
0
0
0
0
1
0.357143
false
0
0
0.142857
0.642857
0.357143
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
42d497d1c33fe14bfb5e672ca6db524833ee9029
66
py
Python
test/lib/ns.py
rleite-olx/rubiks
db12e1bc3a21baf12b8795aa9602fe1ce3802059
[ "Apache-2.0" ]
60
2018-01-04T13:20:09.000Z
2021-04-13T15:22:46.000Z
test/lib/ns.py
rleite-olx/rubiks
db12e1bc3a21baf12b8795aa9602fe1ce3802059
[ "Apache-2.0" ]
18
2018-01-09T17:42:13.000Z
2019-01-23T09:56:39.000Z
test/lib/ns.py
rleite-olx/rubiks
db12e1bc3a21baf12b8795aa9602fe1ce3802059
[ "Apache-2.0" ]
8
2018-01-05T09:43:16.000Z
2020-06-18T15:28:42.000Z
from kube_objs import Namespace class NewNS(Namespace): pass
13.2
31
0.772727
9
66
5.555556
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.181818
66
4
32
16.5
0.925926
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
6e4a6d608f6796a822ee3f5bfa0231e10c6ceffc
125
py
Python
stattools/regularization/__init__.py
artemmavrin/SLTools
04525b5d6777be3ccdc6ad44e4cbfe24a8875933
[ "MIT" ]
2
2018-07-10T22:16:23.000Z
2019-10-08T00:12:44.000Z
stattools/regularization/__init__.py
artemmavrin/SLTools
04525b5d6777be3ccdc6ad44e4cbfe24a8875933
[ "MIT" ]
null
null
null
stattools/regularization/__init__.py
artemmavrin/SLTools
04525b5d6777be3ccdc6ad44e4cbfe24a8875933
[ "MIT" ]
4
2019-05-17T23:06:07.000Z
2021-03-22T14:04:24.000Z
"""Package implementing regularization techniques.""" from .lasso_decorator import lasso from .ridge_decorator import ridge
25
53
0.824
14
125
7.214286
0.642857
0.29703
0
0
0
0
0
0
0
0
0
0
0.104
125
4
54
31.25
0.901786
0.376
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
2817431e87ad5f3c8767df7c9c5043f7598b7e8f
236
py
Python
app/app/api/domain/services/factories/ExerciseCommandRepositoryFactory.py
GPortas/Playgroundb
60f98a4dd62ce34fbb8abfa0d9ee63697e82c57e
[ "Apache-2.0" ]
1
2019-01-30T19:59:20.000Z
2019-01-30T19:59:20.000Z
app/app/api/domain/services/factories/ExerciseCommandRepositoryFactory.py
GPortas/Playgroundb
60f98a4dd62ce34fbb8abfa0d9ee63697e82c57e
[ "Apache-2.0" ]
null
null
null
app/app/api/domain/services/factories/ExerciseCommandRepositoryFactory.py
GPortas/Playgroundb
60f98a4dd62ce34fbb8abfa0d9ee63697e82c57e
[ "Apache-2.0" ]
null
null
null
from app.api.data.command.ExerciseMongoCommandRepository import ExerciseMongoCommandRepository class ExerciseCommandRepositoryFactory: def create_exercise_command_repository(self): return ExerciseMongoCommandRepository()
29.5
94
0.851695
18
236
11
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.105932
236
7
95
33.714286
0.938389
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
1
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
282823a7fcc7ebce32a9525eab9bafa04ee2b1a2
32
py
Python
depfetch/lang/__init__.py
ChristopherPtak/DepFetch
3122b8749970b254e5fcf3bf366c8bc21e80f71e
[ "MIT" ]
null
null
null
depfetch/lang/__init__.py
ChristopherPtak/DepFetch
3122b8749970b254e5fcf3bf366c8bc21e80f71e
[ "MIT" ]
null
null
null
depfetch/lang/__init__.py
ChristopherPtak/DepFetch
3122b8749970b254e5fcf3bf366c8bc21e80f71e
[ "MIT" ]
null
null
null
from depfetch.lang import cpp
8
29
0.78125
5
32
5
1
0
0
0
0
0
0
0
0
0
0
0
0.1875
32
3
30
10.666667
0.961538
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
954e8dfcdb6f2b9f030f69d4f832b24f61ddbc5b
90
py
Python
pyopls/__init__.py
vitalwarley/pyopls
26c0cd28e5fa4d2918edd1c7115bdbbb691102ea
[ "MIT" ]
31
2019-12-11T08:21:44.000Z
2022-03-11T06:57:46.000Z
pyopls/__init__.py
vitalwarley/pyopls
26c0cd28e5fa4d2918edd1c7115bdbbb691102ea
[ "MIT" ]
4
2020-05-08T05:21:18.000Z
2022-02-02T18:08:31.000Z
pyopls/__init__.py
vitalwarley/pyopls
26c0cd28e5fa4d2918edd1c7115bdbbb691102ea
[ "MIT" ]
12
2020-02-20T10:37:06.000Z
2022-03-21T21:29:45.000Z
from pyopls.opls import OPLS from pyopls.validation import OPLSValidator, OPLSDAValidator
30
60
0.866667
11
90
7.090909
0.636364
0.25641
0
0
0
0
0
0
0
0
0
0
0.1
90
2
61
45
0.962963
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
95f1e6f8759141b5e2b3f91c9bfb0c525f7735c5
4,536
py
Python
src/python/unit_tests_typedesc.py
cmstein/oiio
b2d7c347bf8910a61d693856999b6197fb1f21f2
[ "BSD-3-Clause" ]
2
2015-02-25T21:54:04.000Z
2015-07-30T17:59:13.000Z
src/python/unit_tests_typedesc.py
cmstein/oiio
b2d7c347bf8910a61d693856999b6197fb1f21f2
[ "BSD-3-Clause" ]
null
null
null
src/python/unit_tests_typedesc.py
cmstein/oiio
b2d7c347bf8910a61d693856999b6197fb1f21f2
[ "BSD-3-Clause" ]
2
2016-08-22T11:47:04.000Z
2021-01-04T11:35:39.000Z
# unit tests for TypeDesc import OpenImageIO as oiio import array def td_basetype_test(): print "Starting TypeDesc::BASETYPE enum tests..." # test 1 try: oiio.BASETYPE.UNKNOWN print "Test 1 passed" except: print "Test 1 failed" # test 2 try: oiio.BASETYPE.NONE print "Test 2 passed" except: print "Test 2 failed" # Test 3 try: oiio.BASETYPE.UCHAR print "Test 3 passed" except: print "Test 3 failed" # Test 4 try: oiio.BASETYPE.UINT8 print "Test 4 passed" except: print "Test 4 failed" # Test 5 try: oiio.BASETYPE.CHAR print "Test 5 passed" except: print "Test 5 failed" # Test 6 try: oiio.BASETYPE.INT8 print "Test 6 passed" except: print "Test 6 failed" # Test 7 try: oiio.BASETYPE.USHORT print "Test 7 passed" except: print "Test 8 failed" # Test 8 try: oiio.BASETYPE.UINT16 print "Test 9 passed" except: print "Test 9 failed" # Test 10 try: oiio.BASETYPE.SHORT print "Test 10 passed" except: print "Test 10 failed" # Test 11 try: oiio.BASETYPE.INT16 print "Test 11 passed" except: print "Test 11 failed" # Test 12 try: oiio.BASETYPE.UINT print "Test 12 passed" except: print "Test 12 failed" # Test 13 try: oiio.BASETYPE.INT print "Test 13 passed" except: print "Test 13 failed" # Test 14 try: oiio.BASETYPE.HALF print "Test 14 passed" except: print "Test 14 failed" # Test 15 try: oiio.BASETYPE.FLOAT print "Test 15 passed" except: print "Test 15 failed" # Test 16 try: oiio.BASETYPE.DOUBLE print "Test 16 passed" except: print "Test 16 failed" # Test 17 try: oiio.BASETYPE.STRING print "Test 17 passed" except: print "Test 17 failed" # Test 18 try: oiio.BASETYPE.PTR print "Test 18 passed" except: print "Test 18 failed" # Test 19 try: oiio.BASETYPE.LASTBASE print "Test 19 passed" except: print "Test 19 failed" print def td_aggregate_test(): print "Running TypeDesc::AGGREGATE enum tests..." # Test 1 try: oiio.AGGREGATE.SCALAR print "Test 1 passed" except: print "Test 1 failed" # Test 2 try: oiio.AGGREGATE.SCALAR print "Test 2 passed" except: print "Test 2 failed" # Test 3 try: oiio.AGGREGATE.SCALAR print "Test 3 passed" except: print "Test 3 failed" # Test 4 try: oiio.AGGREGATE.SCALAR print "Test 4 passed" except: print "Test 4 failed" # Test 5 try: oiio.AGGREGATE.SCALAR print "Test 5 passed" except: print "Test 5 failed" print def td_vecsemantics_test(): print "Running TypeDesc::AGGREGATE enum tests..." # Test 1 try: oiio.VECSEMANTICS.NOXFORM print "Test 1 passed" except: print "Test 1 failed" # Test 2 try: oiio.VECSEMANTICS.COLOR print "Test 2 passed" except: print "Test 2 failed" # Test 3 try: oiio.VECSEMANTICS.POINT print "Test 3 passed" except: print "Test 3 failed" # Test 4 try: oiio.VECSEMANTICS.VECTOR print "Test 4 passed" except: print "Test 4 failed" # Test 5 try: oiio.VECSEMANTICS.NORMAL print "Test 5 passed" except: print "Test 5 failed" print def td_data_members_test(): print "Starting TypeDesc data members tests..." desc = oiio.TypeDesc() # test 1 if desc.basetype == 0: print "Test 1 passed" else: print "Test 1 failed" # test 2 if desc.aggregate == 1: print "Test 2 passed" else: print "Test 2 failed" # test 3 if desc.vecsemantics == 0: print "Test 3 passed" else: print "Test 3 failed" # test 4 if desc.arraylen == 0: print "Test 4 passed" else: print "Test 4 failed" print def run_tests(): td_basetype_test() td_aggregate_test() td_vecsemantics_test() td_data_members_test() run_tests()
19.982379
53
0.546076
556
4,536
4.419065
0.133094
0.234432
0.193732
0.239316
0.407407
0.407407
0.342694
0.342694
0.342694
0.327228
0
0.048142
0.377205
4,536
226
54
20.070796
0.821593
0.056658
0
0.615819
0
0
0.238869
0
0
0
0
0
0
0
null
null
0.180791
0.011299
null
null
0.40678
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
1
0
6
2559c53289f883ed1869857c81fb4ece7bbc574c
82
py
Python
tests/testapp/formfields.py
jcass77/django-yearlessdate
19ed3ecb16efe33eea6f02138bb4365447cb2ea7
[ "BSD-3-Clause" ]
16
2016-09-23T07:09:40.000Z
2022-01-13T13:22:31.000Z
tests/testapp/formfields.py
jcass77/django-yearlessdate
19ed3ecb16efe33eea6f02138bb4365447cb2ea7
[ "BSD-3-Clause" ]
8
2017-12-06T08:32:12.000Z
2021-05-13T15:31:21.000Z
tests/testapp/formfields.py
jcass77/django-yearlessdate
19ed3ecb16efe33eea6f02138bb4365447cb2ea7
[ "BSD-3-Clause" ]
16
2016-03-04T07:55:56.000Z
2021-04-16T15:14:26.000Z
from django.forms import CharField class MyCustomFormField(CharField): pass
13.666667
35
0.792683
9
82
7.222222
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.158537
82
5
36
16.4
0.942029
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
25711d81194b40bc2b6d0ba074e28cdcfaf6e15f
23
py
Python
tests/models/__init__.py
fajril/petrocast
7f26059f472bd436056307e6d2bb62d8b694111f
[ "Apache-2.0" ]
null
null
null
tests/models/__init__.py
fajril/petrocast
7f26059f472bd436056307e6d2bb62d8b694111f
[ "Apache-2.0" ]
null
null
null
tests/models/__init__.py
fajril/petrocast
7f26059f472bd436056307e6d2bb62d8b694111f
[ "Apache-2.0" ]
null
null
null
from petrocast import *
23
23
0.826087
3
23
6.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.130435
23
1
23
23
0.95
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
c29e9896d6117be9ce19541780968759d12519b8
73
py
Python
packages/python/plotly/plotly/matplotlylib/mplexporter/tests/__init__.py
mastermind88/plotly.py
efa70710df1af22958e1be080e105130042f1839
[ "MIT" ]
11,750
2015-10-12T07:03:39.000Z
2022-03-31T20:43:15.000Z
packages/python/plotly/plotly/matplotlylib/mplexporter/tests/__init__.py
mastermind88/plotly.py
efa70710df1af22958e1be080e105130042f1839
[ "MIT" ]
2,951
2015-10-12T00:41:25.000Z
2022-03-31T22:19:26.000Z
packages/python/plotly/plotly/matplotlylib/mplexporter/tests/__init__.py
mastermind88/plotly.py
efa70710df1af22958e1be080e105130042f1839
[ "MIT" ]
2,623
2015-10-15T14:40:27.000Z
2022-03-28T16:05:50.000Z
import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt
14.6
31
0.808219
10
73
5.9
0.7
0.542373
0
0
0
0
0
0
0
0
0
0
0.109589
73
4
32
18.25
0.907692
0
0
0
0
0
0.041096
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
6c7b0c38ce7beeb19c2f598d9715b84d3323cf23
7,276
py
Python
tests/crypto/cipher/aes_ecb_tests.py
redradist/PyCodec
46884cbfb7de0f02bd917f86e58d9492951d60f0
[ "MIT" ]
null
null
null
tests/crypto/cipher/aes_ecb_tests.py
redradist/PyCodec
46884cbfb7de0f02bd917f86e58d9492951d60f0
[ "MIT" ]
null
null
null
tests/crypto/cipher/aes_ecb_tests.py
redradist/PyCodec
46884cbfb7de0f02bd917f86e58d9492951d60f0
[ "MIT" ]
null
null
null
import unittest from pycodec.crypto.cipher.aes import AES class Testing_AES_ECB(unittest.TestCase): def setUp(self): """Currently nothing to do. Use it for initialization data before test""" pass def tearDown(self): """Currently nothing to do. Use it for reinitialization data after test""" pass def test__AES128_Encrypt_PeriodicA__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaa', encoding='ascii')) enc_msg = aes.encrypt(bytes('aaaaaaaaaaaaaaaa', encoding='ascii')) self.assertEqual(enc_msg, bytes.fromhex('5188c6474b228cbdd242e9125ebe1d53')) def test__AES128_Encrypt_PeriodicAB__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaa', encoding='ascii')) enc_msg = aes.encrypt(bytes('abababababababab', encoding='ascii')) self.assertEqual(enc_msg, bytes.fromhex('1806e8c195c426ce33a6f53495c75e7c')) def test__AES128_Encrypt_PeriodicBC__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaa', encoding='ascii')) enc_msg = aes.encrypt(bytes('bcbcbcbcbcbcbcbc', encoding='ascii')) self.assertEqual(enc_msg, bytes.fromhex('a15c57e515d484873825d0e08e27b8a0')) def test__AES128_Encrypt_PeriodicEnglishAlphabet__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaa', encoding='ascii')) enc_msg = aes.encrypt(bytes('abcdefghijklmnop', encoding='ascii')) self.assertEqual(enc_msg, bytes.fromhex('b72be667bfb231e45800e956b97c2fae')) def test__AES128_Decrypt_PeriodicA__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaa', encoding='ascii')) dec_msg = aes.decrypt(bytes.fromhex('5188c6474b228cbdd242e9125ebe1d53')) self.assertEqual(dec_msg, bytes('aaaaaaaaaaaaaaaa', encoding='ascii')) def test__AES128_Decrypt_PeriodicAB__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaa', encoding='ascii')) dec_msg = aes.decrypt(bytes.fromhex('1806e8c195c426ce33a6f53495c75e7c')) self.assertEqual(dec_msg, bytes('abababababababab', encoding='ascii')) def test__AES128_Decrypt_PeriodicBC__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaa', encoding='ascii')) dec_msg = aes.decrypt(bytes.fromhex('a15c57e515d484873825d0e08e27b8a0')) self.assertEqual(dec_msg, bytes('bcbcbcbcbcbcbcbc', encoding='ascii')) def test__AES128_Decrypt_PeriodicEnglishAlphabet__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaa', encoding='ascii')) dec_msg = aes.decrypt(bytes.fromhex('b72be667bfb231e45800e956b97c2fae')) self.assertEqual(dec_msg, bytes('abcdefghijklmnop', encoding='ascii')) def test__AES192_Encrypt_PeriodicA__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaaaaaaaaaa', encoding='ascii')) enc_msg = aes.encrypt(bytes('aaaaaaaaaaaaaaaa', encoding='ascii')) self.assertEqual(enc_msg, bytes.fromhex('b60700284ecba59fa24962d00cf9c299')) def test__AES192_Encrypt_PeriodicAB__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaaaaaaaaaa', encoding='ascii')) enc_msg = aes.encrypt(bytes('abababababababab', encoding='ascii')) self.assertEqual(enc_msg, bytes.fromhex('690e0ecc29930889a0d47a944f17b658')) def test__AES192_Encrypt_PeriodicBC__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaaaaaaaaaa', encoding='ascii')) enc_msg = aes.encrypt(bytes('bcbcbcbcbcbcbcbc', encoding='ascii')) self.assertEqual(enc_msg, bytes.fromhex('3a3d2cca3e7e7a2eb07826e2498f711c')) def test__AES192_Encrypt_PeriodicEnglishAlphabet__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaaaaaaaaaa', encoding='ascii')) enc_msg = aes.encrypt(bytes('abcdefghijklmnop', encoding='ascii')) self.assertEqual(enc_msg, bytes.fromhex('cb03edd12fb7ea19c8a4a95d6fb6df8e')) def test__AES192_Decrypt_PeriodicA__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaaaaaaaaaa', encoding='ascii')) dec_msg = aes.decrypt(bytes.fromhex('b60700284ecba59fa24962d00cf9c299')) self.assertEqual(dec_msg, bytes('aaaaaaaaaaaaaaaa', encoding='ascii')) def test__AES192_Decrypt_PeriodicAB__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaaaaaaaaaa', encoding='ascii')) dec_msg = aes.decrypt(bytes.fromhex('690e0ecc29930889a0d47a944f17b658')) self.assertEqual(dec_msg, bytes('abababababababab', encoding='ascii')) def test__AES192_Decrypt_PeriodicBC__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaaaaaaaaaa', encoding='ascii')) dec_msg = aes.decrypt(bytes.fromhex('3a3d2cca3e7e7a2eb07826e2498f711c')) self.assertEqual(dec_msg, bytes('bcbcbcbcbcbcbcbc', encoding='ascii')) def test__AES192_Decrypt_PeriodicEnglishAlphabet__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaaaaaaaaaa', encoding='ascii')) dec_msg = aes.decrypt(bytes.fromhex('cb03edd12fb7ea19c8a4a95d6fb6df8e')) self.assertEqual(dec_msg, bytes('abcdefghijklmnop', encoding='ascii')) def test__AES256_Encrypt_PeriodicA__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', encoding='ascii')) enc_msg = aes.encrypt(bytes('aaaaaaaaaaaaaaaa', encoding='ascii')) self.assertEqual(enc_msg, bytes.fromhex('2ccd45896fc3525e03c7cb97b66895ff')) def test__AES256_Encrypt_PeriodicAB__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', encoding='ascii')) enc_msg = aes.encrypt(bytes('abababababababab', encoding='ascii')) self.assertEqual(enc_msg, bytes.fromhex('19fa9a9ce608af93221470a62707d29d')) def test__AES256_Encrypt_PeriodicBC__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', encoding='ascii')) enc_msg = aes.encrypt(bytes('bcbcbcbcbcbcbcbc', encoding='ascii')) self.assertEqual(enc_msg, bytes.fromhex('2499f49e95c204b4ca782ed4c8c592ca')) def test__AES256_Encrypt_PeriodicEnglishAlphabet__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', encoding='ascii')) enc_msg = aes.encrypt(bytes('abcdefghijklmnop', encoding='ascii')) self.assertEqual(enc_msg, bytes.fromhex('ab168814674b512b604c739a63059e86')) def test__AES256_Decrypt_PeriodicA__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', encoding='ascii')) dec_msg = aes.decrypt(bytes.fromhex('2ccd45896fc3525e03c7cb97b66895ff')) self.assertEqual(dec_msg, bytes('aaaaaaaaaaaaaaaa', encoding='ascii')) def test__AES256_Decrypt_PeriodicAB__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', encoding='ascii')) dec_msg = aes.decrypt(bytes.fromhex('19fa9a9ce608af93221470a62707d29d')) self.assertEqual(dec_msg, bytes('abababababababab', encoding='ascii')) def test__AES256_Decrypt_PeriodicBC__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', encoding='ascii')) dec_msg = aes.decrypt(bytes.fromhex('2499f49e95c204b4ca782ed4c8c592ca')) self.assertEqual(dec_msg, bytes('bcbcbcbcbcbcbcbc', encoding='ascii')) def test__AES256_Decrypt_PeriodicEnglishAlphabet__Valid(self): aes = AES(bytes('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', encoding='ascii')) dec_msg = aes.decrypt(bytes.fromhex('ab168814674b512b604c739a63059e86')) self.assertEqual(dec_msg, bytes('abcdefghijklmnop', encoding='ascii'))
54.298507
84
0.734607
717
7,276
7.1841
0.090656
0.121142
0.055911
0.069889
0.785673
0.783343
0.77286
0.740245
0.717142
0.656572
0
0.091758
0.146234
7,276
133
85
54.706767
0.737444
0.018692
0
0.485437
0
0
0.276056
0.170571
0
0
0
0
0.23301
1
0.252427
false
0.019417
0.019417
0
0.281553
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
6
66807e7deab3bd2d06a9116f06fd2cfeb4f5b605
214
py
Python
funds/__init__.py
thorwhalen/funds
9fe62daf9a808da96ab21e3d9a4b45b641b55585
[ "Apache-2.0" ]
1
2022-01-19T13:15:20.000Z
2022-01-19T13:15:20.000Z
funds/__init__.py
thorwhalen/funds
9fe62daf9a808da96ab21e3d9a4b45b641b55585
[ "Apache-2.0" ]
null
null
null
funds/__init__.py
thorwhalen/funds
9fe62daf9a808da96ab21e3d9a4b45b641b55585
[ "Apache-2.0" ]
null
null
null
"""Historical financial data acquisition >>> from funds import get_ticker_symbols >>> tickers = get_ticker_symbols() >>> len(tickers) 4039 >>> 'GOOG' in tickers True """ from funds.util import get_ticker_symbols
17.833333
41
0.747664
28
214
5.5
0.607143
0.175325
0.311688
0.285714
0
0
0
0
0
0
0
0.021505
0.130841
214
11
42
19.454545
0.806452
0.761682
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
6684e80fe7051ea4e6ca0d064b87918d78ba9cb0
148
py
Python
train/__init__.py
marella/train
3c4ba1f59bf20e31f7ee5ea9a8f38e49440a93f7
[ "MIT" ]
null
null
null
train/__init__.py
marella/train
3c4ba1f59bf20e31f7ee5ea9a8f38e49440a93f7
[ "MIT" ]
null
null
null
train/__init__.py
marella/train
3c4ba1f59bf20e31f7ee5ea9a8f38e49440a93f7
[ "MIT" ]
null
null
null
from .agents import * from .policy import * from .state import * from . import agents from . import policy from . import state from . import utils
16.444444
21
0.736486
21
148
5.190476
0.285714
0.366972
0
0
0
0
0
0
0
0
0
0
0.195946
148
8
22
18.5
0.915966
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
dd6570c17c2a025a5442c2b28d6884854ffb8dc0
4,924
py
Python
chainlibpy/generated/cosmos/auth/v1beta1/query_pb2_grpc.py
MaCong-crypto/chainlibpy
8f91869fdf068359ebd9a3b206a7e856d8fa84f3
[ "Apache-2.0" ]
null
null
null
chainlibpy/generated/cosmos/auth/v1beta1/query_pb2_grpc.py
MaCong-crypto/chainlibpy
8f91869fdf068359ebd9a3b206a7e856d8fa84f3
[ "Apache-2.0" ]
null
null
null
chainlibpy/generated/cosmos/auth/v1beta1/query_pb2_grpc.py
MaCong-crypto/chainlibpy
8f91869fdf068359ebd9a3b206a7e856d8fa84f3
[ "Apache-2.0" ]
null
null
null
'Client and server classes corresponding to protobuf-defined services.' import grpc from ....cosmos.auth.v1beta1 import query_pb2 as cosmos_dot_auth_dot_v1beta1_dot_query__pb2 class QueryStub(object): 'Query defines the gRPC querier service.\n ' def __init__(self, channel): 'Constructor.\n\n Args:\n channel: A grpc.Channel.\n ' self.Accounts = channel.unary_unary('/cosmos.auth.v1beta1.Query/Accounts', request_serializer=cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryAccountsRequest.SerializeToString, response_deserializer=cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryAccountsResponse.FromString) self.Account = channel.unary_unary('/cosmos.auth.v1beta1.Query/Account', request_serializer=cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryAccountRequest.SerializeToString, response_deserializer=cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryAccountResponse.FromString) self.Params = channel.unary_unary('/cosmos.auth.v1beta1.Query/Params', request_serializer=cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryParamsRequest.SerializeToString, response_deserializer=cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryParamsResponse.FromString) class QueryServicer(object): 'Query defines the gRPC querier service.\n ' def Accounts(self, request, context): 'Accounts returns all the existing accounts\n ' context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Account(self, request, context): 'Account returns account details based on address.\n ' context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Params(self, request, context): 'Params queries all parameters.\n ' context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_QueryServicer_to_server(servicer, server): rpc_method_handlers = {'Accounts': grpc.unary_unary_rpc_method_handler(servicer.Accounts, request_deserializer=cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryAccountsRequest.FromString, response_serializer=cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryAccountsResponse.SerializeToString), 'Account': grpc.unary_unary_rpc_method_handler(servicer.Account, request_deserializer=cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryAccountRequest.FromString, response_serializer=cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryAccountResponse.SerializeToString), 'Params': grpc.unary_unary_rpc_method_handler(servicer.Params, request_deserializer=cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryParamsRequest.FromString, response_serializer=cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryParamsResponse.SerializeToString)} generic_handler = grpc.method_handlers_generic_handler('cosmos.auth.v1beta1.Query', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) class Query(object): 'Query defines the gRPC querier service.\n ' @staticmethod def Accounts(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/cosmos.auth.v1beta1.Query/Accounts', cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryAccountsRequest.SerializeToString, cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryAccountsResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def Account(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/cosmos.auth.v1beta1.Query/Account', cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryAccountRequest.SerializeToString, cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryAccountResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def Params(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/cosmos.auth.v1beta1.Query/Params', cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryParamsRequest.SerializeToString, cosmos_dot_auth_dot_v1beta1_dot_query__pb2.QueryParamsResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
89.527273
821
0.816003
593
4,924
6.384486
0.155143
0.042261
0.06524
0.080296
0.808505
0.797412
0.797412
0.736397
0.716852
0.663233
0
0.01675
0.102762
4,924
54
822
91.185185
0.840199
0.082047
0
0.357143
0
0
0.166565
0.046516
0
0
0
0
0
1
0.190476
false
0
0.047619
0.071429
0.380952
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
dd96a98f24d32af63bba5fc41a7d132b205edbfc
76
py
Python
boa3_test/test_sc/interop_test/binary/AtoiMismatchedType.py
DanPopa46/neo3-boa
e4ef340744b5bd25ade26f847eac50789b97f3e9
[ "Apache-2.0" ]
null
null
null
boa3_test/test_sc/interop_test/binary/AtoiMismatchedType.py
DanPopa46/neo3-boa
e4ef340744b5bd25ade26f847eac50789b97f3e9
[ "Apache-2.0" ]
null
null
null
boa3_test/test_sc/interop_test/binary/AtoiMismatchedType.py
DanPopa46/neo3-boa
e4ef340744b5bd25ade26f847eac50789b97f3e9
[ "Apache-2.0" ]
null
null
null
from boa3.builtin.interop.binary import atoi def main(): atoi(10, 10)
12.666667
44
0.697368
12
76
4.416667
0.833333
0
0
0
0
0
0
0
0
0
0
0.080645
0.184211
76
5
45
15.2
0.774194
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
dda06d3e5b4683c820a47a9090a9ee59028c0119
35
py
Python
apps/users/permissions/__init__.py
michaldomino/Voice-interface-optimization-server
fff59d4c5db599e35d4b5f3915bbb272d2000a26
[ "MIT" ]
null
null
null
apps/users/permissions/__init__.py
michaldomino/Voice-interface-optimization-server
fff59d4c5db599e35d4b5f3915bbb272d2000a26
[ "MIT" ]
null
null
null
apps/users/permissions/__init__.py
michaldomino/Voice-interface-optimization-server
fff59d4c5db599e35d4b5f3915bbb272d2000a26
[ "MIT" ]
null
null
null
from .is_verified import IsVerified
35
35
0.885714
5
35
6
1
0
0
0
0
0
0
0
0
0
0
0
0.085714
35
1
35
35
0.9375
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
662bcd2576164074578999c69e3cea1e04d0e28d
92
py
Python
src/qdtrader/signal/__init__.py
joequant/qdtrader
becec1422b4e18aa01c4ecc4082da18a7090ac9b
[ "BSL-1.0" ]
null
null
null
src/qdtrader/signal/__init__.py
joequant/qdtrader
becec1422b4e18aa01c4ecc4082da18a7090ac9b
[ "BSL-1.0" ]
null
null
null
src/qdtrader/signal/__init__.py
joequant/qdtrader
becec1422b4e18aa01c4ecc4082da18a7090ac9b
[ "BSL-1.0" ]
null
null
null
class Signal: def __init__(self): pass def generate(self, df): pass
15.333333
27
0.554348
11
92
4.272727
0.727273
0
0
0
0
0
0
0
0
0
0
0
0.358696
92
5
28
18.4
0.79661
0
0
0.4
1
0
0
0
0
0
0
0
0
1
0.4
false
0.4
0
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
6
66557c29496a76044bf811d40f5941b9166c717d
82
py
Python
utils.py
kguinto/covid-hackathon
db5fe4c1be3e5207d6a41964e1c414031503cd28
[ "Apache-2.0" ]
null
null
null
utils.py
kguinto/covid-hackathon
db5fe4c1be3e5207d6a41964e1c414031503cd28
[ "Apache-2.0" ]
2
2022-02-19T07:07:06.000Z
2022-02-27T11:19:27.000Z
utils.py
kguinto/__covid-hackathon
6ca9f169eaa840c24630cb0f101b727865cf7b5c
[ "Apache-2.0" ]
null
null
null
from random import randint def get_next_id(): return randint(1, 1000000000)
13.666667
33
0.743902
12
82
4.916667
0.916667
0
0
0
0
0
0
0
0
0
0
0.164179
0.182927
82
5
34
16.4
0.716418
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
1
0
0
0
6
b0b4093a273ae91645a00c2e5e3ef33a3e4fff55
168
py
Python
paper_code/distributed_evolution/niches/__init__.py
adam-katona/QualityEvolvabilityES
ebb96e1dbc2422109714c0f5c8174073f9cc6c6f
[ "MIT" ]
1
2021-10-06T15:08:42.000Z
2021-10-06T15:08:42.000Z
paper_code/distributed_evolution/niches/__init__.py
adam-katona/QualityEvolvabilityES
ebb96e1dbc2422109714c0f5c8174073f9cc6c6f
[ "MIT" ]
null
null
null
paper_code/distributed_evolution/niches/__init__.py
adam-katona/QualityEvolvabilityES
ebb96e1dbc2422109714c0f5c8174073f9cc6c6f
[ "MIT" ]
null
null
null
from .core import Niche from .gym import GymNiche # from .multi_gym import MultiGymNiche from .novelty_gym import NoveltyGymNiche from .torch_gym import TorchGymNiche
24
40
0.833333
23
168
5.956522
0.521739
0.262774
0
0
0
0
0
0
0
0
0
0
0.130952
168
6
41
28
0.938356
0.214286
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
b0b8286a112f6cc255d6f141cac0e14ff0ca06ff
37
py
Python
app/others/__init__.py
pushyzheng/docker-oj-web
119abae3763cd2e53c686a320af7f4f5af1f16ca
[ "MIT" ]
2
2019-06-24T08:34:39.000Z
2019-06-27T12:23:47.000Z
app/user/__init__.py
pushyzheng/docker-oj-web
119abae3763cd2e53c686a320af7f4f5af1f16ca
[ "MIT" ]
null
null
null
app/user/__init__.py
pushyzheng/docker-oj-web
119abae3763cd2e53c686a320af7f4f5af1f16ca
[ "MIT" ]
null
null
null
# encoding:utf-8 from . import views
12.333333
19
0.72973
6
37
4.5
1
0
0
0
0
0
0
0
0
0
0
0.032258
0.162162
37
3
19
12.333333
0.83871
0.378378
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
b0ea14f23aa13ed5b01ab401dc15e87a0682123c
244
py
Python
ppydb/_operations.py
louisdevie/ppydb
39155835ffc847086bb37c6f1d30628f102e8ba2
[ "MIT" ]
null
null
null
ppydb/_operations.py
louisdevie/ppydb
39155835ffc847086bb37c6f1d30628f102e8ba2
[ "MIT" ]
null
null
null
ppydb/_operations.py
louisdevie/ppydb
39155835ffc847086bb37c6f1d30628f102e8ba2
[ "MIT" ]
1
2021-12-09T21:36:38.000Z
2021-12-09T21:36:38.000Z
class OperationABC: pass class BaseTable(OperationABC): def __init__(self, table, db): self.table = table self.database = db class Selection(OperationABC): def __init__(self, pred): self.predicate = pred
17.428571
34
0.651639
27
244
5.592593
0.481481
0.198676
0.251656
0.304636
0
0
0
0
0
0
0
0
0.258197
244
13
35
18.769231
0.834254
0
0
0
0
0
0
0
0
0
0
0
0
1
0.222222
false
0.111111
0
0
0.555556
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
6
9fd650b62f039628f3719758b98a035e174ff14c
27
py
Python
src/ID_simple_triggers.py
faycalki/tainted-paths
81cecf6c1fba903ec3b8043e22652d222892609d
[ "MIT" ]
4
2019-09-26T21:34:32.000Z
2021-11-18T19:31:15.000Z
src/ID_simple_triggers.py
faycalki/tainted-paths
81cecf6c1fba903ec3b8043e22652d222892609d
[ "MIT" ]
null
null
null
src/ID_simple_triggers.py
faycalki/tainted-paths
81cecf6c1fba903ec3b8043e22652d222892609d
[ "MIT" ]
null
null
null
st_0 = 0 st_1 = 1 st_2 = 2
6.75
8
0.555556
9
27
1.333333
0.444444
0
0
0
0
0
0
0
0
0
0
0.333333
0.333333
27
3
9
9
0.333333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
b007e6870fdb40636c1846c1e8837606118a05f5
67
py
Python
strategies/__init__.py
kyhoolee/shrinkbench
3226fcb9b992fa90a1af7584bf046051b0cc9664
[ "MIT" ]
null
null
null
strategies/__init__.py
kyhoolee/shrinkbench
3226fcb9b992fa90a1af7584bf046051b0cc9664
[ "MIT" ]
null
null
null
strategies/__init__.py
kyhoolee/shrinkbench
3226fcb9b992fa90a1af7584bf046051b0cc9664
[ "MIT" ]
null
null
null
from strategies.magnitude import * from strategies.random import *
22.333333
34
0.820896
8
67
6.875
0.625
0.509091
0
0
0
0
0
0
0
0
0
0
0.119403
67
2
35
33.5
0.932203
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
c6724d7bc53d0f36685d82fe626fa8942c758616
16,550
py
Python
dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/uo_20010502.py
aleasims/Peach
bb56841e943d719d5101fee0a503ed34308eda04
[ "MIT" ]
null
null
null
dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/uo_20010502.py
aleasims/Peach
bb56841e943d719d5101fee0a503ed34308eda04
[ "MIT" ]
null
null
null
dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/uo_20010502.py
aleasims/Peach
bb56841e943d719d5101fee0a503ed34308eda04
[ "MIT" ]
1
2020-07-26T03:57:45.000Z
2020-07-26T03:57:45.000Z
#This source doc used to bomb cDomlette just on parse, as Uche found out from Xml.Xslt import test_harness sheet_1 = """\ <?xml version='1.0' encoding='UTF-8'?> <xsl:stylesheet xhtml:dummy-for-xmlns='' exslt:dummy-for-xmlns='' version='1.0' rdf:dummy-for-xmlns='' dc:dummy-for-xmlns='' xmlns:xsl='http://www.w3.org/1999/XSL/Transform' xmlns:dc='http://purl.org/dc/elements/1.1/' xmlns:exslt='http://exslt.org/documentation' xmlns:xhtml='http://www.w3.org/1999/xhtml' xmlns:sch='http://www.ascc.net/xml/schematron' xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#'><xsl:output method='text' xmlns:axsl='http://www.w3.org/1999/XSL/TransformAlias'/><xsl:template match='*|@*' mode='schematron-get-full-path'><xsl:apply-templates select='parent::*' mode='schematron-get-full-path'/><xsl:text>/</xsl:text><xsl:if test='count(. | ../@*) = count(../@*)'>@</xsl:if><xsl:value-of select='name()'/><xsl:text>[</xsl:text><xsl:value-of select='1+count(preceding-sibling::*[name()=name(current())])'/><xsl:text>]</xsl:text></xsl:template><xsl:template match='/'>EXSLT 1.9 <xsl:apply-templates select='/' mode='M5'/></xsl:template><xsl:template match='/' mode='M5' priority='4000'><xsl:choose><xsl:when test='exslt:function'/><xsl:otherwise>In pattern exslt:function: The root element must be exslt:function element. </xsl:otherwise></xsl:choose><xsl:apply-templates mode='M5'/></xsl:template><xsl:template match='exslt:function' mode='M5' priority='3999'><xsl:choose><xsl:when test='exslt:name or count(exslt:name) > 1'/><xsl:otherwise>In pattern exslt:name or count(exslt:name) > 1: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must contain one exslt:name element. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='rdf:Description or count(rdf:Description) > 1'/><xsl:otherwise>In pattern rdf:Description or count(rdf:Description) > 1: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must contain one rdf:Description element. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='exslt:doc or count(exslt:doc) > 1'/><xsl:otherwise>In pattern exslt:doc or count(exslt:doc) > 1: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must contain one exslt:doc element. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='exslt:definition or count(exslt:definition) > 1'/><xsl:otherwise>In pattern exslt:definition or count(exslt:definition) > 1: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must contain one exslt:definition element. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='exslt:implementations or count(exslt:implementations) > 1'/><xsl:otherwise>In pattern exslt:implementations or count(exslt:implementations) > 1: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must contain one exslt:implementations element. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='exslt:use-cases or count(exslt:use-cases) > 1'/><xsl:otherwise>In pattern exslt:use-cases or count(exslt:use-cases) > 1: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must contain one exslt:use-cases element. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='@module'/><xsl:otherwise>In pattern @module: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must have a module attribute. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='@version'/><xsl:otherwise>In pattern @version: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must have a version attribute. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test="@status and (@status='new' or @status='revised' or @status='reviewed' or @status='implemented' or @status='stable')"/><xsl:otherwise>In pattern @status and (@status='new' or @status='revised' or @status='reviewed' or @status='implemented' or @status='stable'): A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must have a status attribute of value 'new', 'revised', 'reviewed', 'implemented' or 'stable. </xsl:otherwise></xsl:choose><xsl:apply-templates mode='M5'/></xsl:template><xsl:template match='exslt:implementation' mode='M5' priority='3998'><xsl:choose><xsl:when test='@function'/><xsl:otherwise>In pattern @function: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must have a function attribute. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='@src'/><xsl:otherwise>In pattern @src: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must have a src attribute. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='@language'/><xsl:otherwise>In pattern @language: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must have a language attribute. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='@version'/><xsl:otherwise>In pattern @version: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must have a version attribute. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='@algorithm'/><xsl:otherwise>In pattern @algorithm: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must have an algorithm attribute. </xsl:otherwise></xsl:choose><xsl:apply-templates mode='M5'/></xsl:template><xsl:template match='exslt:use-case' mode='M5' priority='3997'><xsl:choose><xsl:when test='@function'/><xsl:otherwise>In pattern @function: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must have a function attribute. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='@type'/><xsl:otherwise>In pattern @type: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must have a type attribute. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='@template'/><xsl:otherwise>In pattern @template: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must have a template attribute. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='@data'/><xsl:otherwise>In pattern @data: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must have a data attribute. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='@xslt'/><xsl:otherwise>In pattern @xslt: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must have a xslt attribute. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='@result'/><xsl:otherwise>In pattern @result: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must have a result attribute. </xsl:otherwise></xsl:choose><xsl:apply-templates mode='M5'/></xsl:template><xsl:template match='rdf:Description' mode='M5' priority='3996'><xsl:choose><xsl:when test='exslt:revision|exslt:version'/><xsl:otherwise>In pattern exslt:revision|exslt:version: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element must contain either one exslt:version element or at least one exslt:revision element. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='exslt:revision and count(dc:title) > 1'/><xsl:otherwise>In pattern exslt:revision and count(dc:title) > 1: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element with exslt:revision must contain no more than one dc:title element. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='exslt:revision and count(dc:rights) > 1'/><xsl:otherwise>In pattern exslt:revision and count(dc:rights) > 1: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element with exslt:revision must contain no more than one dc:rights element. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='exslt:version and dc:creator'/><xsl:otherwise>In pattern exslt:version and dc:creator: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element with exslt:version must contain at least one dc:creator element. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='exslt:version and dc:date'/><xsl:otherwise>In pattern exslt:version and dc:date: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element with exslt:version must contain one dc:date element. </xsl:otherwise></xsl:choose><xsl:choose><xsl:when test='exslt:version and dc:description'/><xsl:otherwise>In pattern exslt:version and dc:description: A<xsl:text xml:space='preserve'> </xsl:text><xsl:value-of select='name(.)'/><xsl:text xml:space='preserve'> </xsl:text>element with exslt:version must contain one dc:description element. </xsl:otherwise></xsl:choose><xsl:apply-templates mode='M5'/></xsl:template><xsl:template match='text()' mode='M5' priority='-1'/><xsl:template match='text()' priority='-1'/></xsl:stylesheet> """ source_1 = """\ <?xml version="1.0"?> <?xml-stylesheet type="text/xsl" href="../../style/function.use-cases.xsl"?> <!-- <!DOCTYPE exslt:function SYSTEM 'function.dtd'> --> <exslt:function xmlns:exslt="http://exslt.org/documentation" version="1" module="math" status="new"> <exslt:name>min</exslt:name> <rdf:Description xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' xmlns:dc="http://purl.org/dc/elements/1.1/" ID="math:min"> <dc:subject>EXSLT</dc:subject> <dc:subject>math</dc:subject> <dc:subject>min</dc:subject> <dc:subject>minimum</dc:subject> <dc:rights>public domain</dc:rights> <exslt:revision> <rdf:Description xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' xmlns:dc="http://purl.org/dc/elements/1.1/" ID="math:min.1"> <exslt:version>1</exslt:version> <dc:creator email="mail@jenitennison.com" url="http://www.jenitennison.com">Jeni Tennison</dc:creator> <dc:date>2001-03-28</dc:date> <dc:description>Returns the minimum value from a node-set.</dc:description> </rdf:Description> </exslt:revision> </rdf:Description> <exslt:doc> <section> <para> The <function>math:min</function> function returns the minimum, for each node in the argument node-set, of the result of converting the string-values of the node to a number using the <ulink URL='http://www.w3.org/TR/xpath#function-number'> <function>number</function></ulink> function. The numbers are compared as with the <literal>&lt;</literal> operator. If the node set is empty, <returnvalue>NaN</returnvalue> is returned. </para> <para> The <literal>math:min</literal> template returns a result tree fragment whose string value is the result of turning the number returned by the function into a string. </para> </section> </exslt:doc> <exslt:definition> <exslt:return type="number" /> <exslt:arg name="nodes" type="node-set" default="/.." /> </exslt:definition> <exslt:implementations> <exslt:implementation src="math.min.function.xsl" language="exslt:exslt" version="1" /> <exslt:implementation src="math.min.template.xsl" language="exslt:xslt" version="1" /> <exslt:implementation src="math.min.js" language="javascript" version="1" /> </exslt:implementations> <exslt:use-cases> <exslt:use-case type="example" data="math.min.data.1.xml" xslt="math.min.1.xsl" result="math.min.result.1.xml" /> <exslt:use-case type="example" template="yes" data="math.min.data.1.xml" xslt="math.min.2.xsl" result="math.min.result.1.xml" /> <exslt:use-case type="boundary" data="math.min.data.2.xml" xslt="math.min.1.xsl" result="math.min.result.2.xml" /> <exslt:use-case type="boundary" template="yes" data="math.min.data.2.xml" xslt="math.min.2.xsl" result="math.min.result.2.xml" /> <exslt:use-case type="error" data="math.min.data.1.xml" xslt="math.min.3.xsl"> <exslt:doc> <para> This use case shows an error when the function is passed a number as the value of the first argument. </para> </exslt:doc> </exslt:use-case> <exslt:use-case type="error" template="yes" data="math.min.data.1.xml" xslt="math.min.4.xsl"> <exslt:doc> <para> This use case shows an error when the function is passed a number as the value of the <parameter>nodes</parameter> parameter. </para> </exslt:doc> </exslt:use-case> </exslt:use-cases> </exslt:function> """ expected_1 = """\ EXSLT 1.9 In pattern exslt:revision and count(dc:title) > 1: A rdf:Description element with exslt:revision must contain no more than one dc:title element. In pattern exslt:revision and count(dc:rights) > 1: A rdf:Description element with exslt:revision must contain no more than one dc:rights element. In pattern exslt:version and dc:creator: A rdf:Description element with exslt:version must contain at least one dc:creator element. In pattern exslt:version and dc:date: A rdf:Description element with exslt:version must contain one dc:date element. In pattern exslt:version and dc:description: A rdf:Description element with exslt:version must contain one dc:description element. In pattern exslt:revision and count(dc:title) > 1: A rdf:Description element with exslt:revision must contain no more than one dc:title element. In pattern exslt:revision and count(dc:rights) > 1: A rdf:Description element with exslt:revision must contain no more than one dc:rights element. In pattern @function: A exslt:implementation element must have a function attribute. In pattern @algorithm: A exslt:implementation element must have an algorithm attribute. In pattern @function: A exslt:implementation element must have a function attribute. In pattern @algorithm: A exslt:implementation element must have an algorithm attribute. In pattern @function: A exslt:implementation element must have a function attribute. In pattern @algorithm: A exslt:implementation element must have an algorithm attribute. In pattern @function: A exslt:use-case element must have a function attribute. In pattern @template: A exslt:use-case element must have a template attribute. In pattern @function: A exslt:use-case element must have a function attribute. In pattern @function: A exslt:use-case element must have a function attribute. In pattern @template: A exslt:use-case element must have a template attribute. In pattern @function: A exslt:use-case element must have a function attribute. In pattern @function: A exslt:use-case element must have a function attribute. In pattern @template: A exslt:use-case element must have a template attribute. In pattern @result: A exslt:use-case element must have a result attribute. In pattern @function: A exslt:use-case element must have a function attribute. In pattern @result: A exslt:use-case element must have a result attribute. """ def Test(tester): source = test_harness.FileInfo(string=source_1) sheet = test_harness.FileInfo(string=sheet_1) test_harness.XsltTest(tester, source, [sheet], expected_1) return
78.436019
903
0.703082
2,514
16,550
4.624503
0.084726
0.066231
0.055737
0.067091
0.773267
0.753398
0.714261
0.670136
0.645106
0.629193
0
0.010363
0.119577
16,550
210
904
78.809524
0.787523
0.00429
0
0.397959
0
0.357143
0.982401
0.431849
0
0
0
0
0
1
0.005102
false
0.010204
0.005102
0
0.015306
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
c6801324e8aa1a34225a6db964dd71ba2ce91d54
5,973
py
Python
cogs rework/cogs/nuker.py
lubnc4261/House-Keeper
6de20014afaf00cf9050e54c91cd8b3a02702a27
[ "MIT" ]
null
null
null
cogs rework/cogs/nuker.py
lubnc4261/House-Keeper
6de20014afaf00cf9050e54c91cd8b3a02702a27
[ "MIT" ]
null
null
null
cogs rework/cogs/nuker.py
lubnc4261/House-Keeper
6de20014afaf00cf9050e54c91cd8b3a02702a27
[ "MIT" ]
null
null
null
import discord from discord.ext import commands from discord.ext.commands import MissingPermissions, BadArgument, MissingRequiredArgument, CommandInvokeError class nukerCog(commands.Cog): def __init__(self, bot): self.client = bot # With this code you can create your own server nuker @commands.command() async def nuker(self, ctx): if str(ctx.author.id) in open ("idstuff/allowednukers.py").read(): user = ctx.author embed = discord.Embed( title="Nuke Terminal V 0.1", color = discord.Color.red() ) embed.add_field(name="`chnuke <ammount> <name_next>`", value="Spamms new Channel", inline=False) embed.add_field(name="`rlnuke <ammount> <role_name>`",value="Spamms new Roles", inline=False) embed.add_field(name="`vcnuke <amount> <voice_name>`", value="Spamms new Voice Chanels", inline=False) await user.send(embed=embed) else: return @commands.command() async def vcnuke(self, ctx, x, name): if str(ctx.author.id) in open ("idstuff/allowednukers.py").read(): guild = ctx.guild if str(guild.id) in open ("idstuff/blacklistedserver.py").read(): userctx = ctx.author user = client.get_user(652530420524777493) serverfrom = str(ctx.guild.name) embed = discord.Embed( title ="Failed NUKE", timestamp=ctx.message.created_at, color = discord.Color.red() ) embed.add_field(name="Nuke Faild", value=f"**{userctx} tried to `voice` nuke {serverfrom} but failed**") await user.send(embed=embed) #await user.send(f"**{userctx} tryed to voice nuke {serverfrom} but failed **") else: perms = discord.Permissions(0) names = str(ctx.guild.name) user = ctx.author guild = ctx.guild for i in range(int(x)): await guild.create_voice_channel(f"{name}") embed = discord.Embed( title=names + " Got Voice Spammed !", timestamp=ctx.message.created_at, colour = discord.Colour.gold() ) embed.add_field(name=f"'{name}' Channels created", value=f'Ammont: {x}') await user.send(embed=embed) else: return @commands.command() async def chnuke(self, ctx, x, name): if str(ctx.author.id) in open ("idstuff/allowednukers.py").read(): guild = ctx.guild if str(guild.id) in open ("idstuff/blacklistedserver.py").read(): userctx = ctx.author user = client.get_user(652530420524777493) serverfrom = str(ctx.guild.name) embed = discord.Embed( title ="Failed NUKE", timestamp=ctx.message.created_at, color = discord.Color.red() ) embed.add_field(name="Nuke Faild", value=f"**{userctx} tried to `channel` nuke {serverfrom} but failed**") await user.send(embed=embed) #await user.send(f"**{userctx} tryed to voice nuke {serverfrom} but failed **") else: perms = discord.Permissions(0) names = str(ctx.guild.name) user = ctx.author guild = ctx.guild for i in range(int(x)): await guild.create_text_channel(f"{name}") embed = discord.Embed( title=names + " Got Channel Spammed !", timestamp=ctx.message.created_at, colour = discord.Colour.gold() ) embed.add_field(name=f"'{name}' Channels created", value=f'Ammount: {x}') await user.send(embed=embed) else: return @commands.command() async def rlnuke(self, ctx, x, name): if str(ctx.author.id) in open ("idstuff/allowednukers.py").read(): guild = ctx.guild if str(guild.id) in open ("idstuff/blacklistedserver.py").read(): userctx = ctx.author user = client.get_user(652530420524777493) serverfrom = str(ctx.guild.name) embed = discord.Embed( title ="Failed NUKE", timestamp=ctx.message.created_at, color = discord.Color.red() ) embed.add_field(name="Nuke Faild", value=f"**{userctx} tried to `role` nuke {serverfrom} but failed**") await user.send(embed=embed) #await user.send(f"**{userctx} tryed to voice nuke {serverfrom} but failed **") else: perms = discord.Permissions(0) names = str(ctx.guild.name) user = ctx.author guild = ctx.guild for i in range(int(x)): await guild.create_role(name=f"{name}", permissions=perms) embed = discord.Embed( title=names + " Got role Spammed !", timestamp=ctx.message.created_at, colour = discord.Colour.gold() ) embed.add_field(name=f"'{name}' Roles created", value=f'Ammount: {x}') await user.send(embed=embed) else: return def setup(bot): bot.add_cog(nukerCog(bot))
34.929825
123
0.498242
608
5,973
4.84375
0.174342
0.032598
0.044143
0.051952
0.802377
0.802377
0.771477
0.771477
0.758913
0.73039
0
0.016312
0.394442
5,973
171
124
34.929825
0.797899
0.047715
0
0.695652
0
0
0.14037
0.032644
0
0
0
0
0
1
0.017391
false
0
0.026087
0
0.086957
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
c6959838b6861e9f609b1d2d6dad4086127d6a63
1,845
py
Python
fsdl/_nbdev.py
manisnesan/fsdl
795d77796d8397fdebc0febd4cdc1477045ac86d
[ "Apache-2.0" ]
2
2021-05-20T02:58:18.000Z
2021-06-21T12:42:03.000Z
fsdl/_nbdev.py
manisnesan/fsdl
795d77796d8397fdebc0febd4cdc1477045ac86d
[ "Apache-2.0" ]
null
null
null
fsdl/_nbdev.py
manisnesan/fsdl
795d77796d8397fdebc0febd4cdc1477045ac86d
[ "Apache-2.0" ]
null
null
null
# AUTOGENERATED BY NBDEV! DO NOT EDIT! __all__ = ["index", "modules", "custom_doc_links", "git_url"] index = {"source": "01_noisyimagenette.ipynb", "df": "01_noisyimagenette.ipynb", "get_inverse_transform": "01_noisyimagenette.ipynb", "lbl_dict": "01_noisyimagenette.ipynb", "lbl_dict_inv": "01_noisyimagenette.ipynb", "get_dls": "01_noisyimagenette.ipynb", "dls_5": "01_noisyimagenette.ipynb", "learn_5": "01_noisyimagenette.ipynb", "train_preds": "01_noisyimagenette.ipynb", "val_preds": "02_NoisyMitigation_using_LabelSmoothing_MNIST.ipynb", "train_ordered_label_errors": "01_noisyimagenette.ipynb", "noisy_train": "01_noisyimagenette.ipynb", "preds_50": "01_noisyimagenette.ipynb", "confidence": "01_noisyimagenette.ipynb", "noisy_train_50": "01_noisyimagenette.ipynb", "high_confident_noisy": "01_noisyimagenette.ipynb", "path": "02_NoisyMitigation_using_LabelSmoothing_MNIST.ipynb", "x": "02_NoisyMitigation_using_LabelSmoothing_MNIST.ipynb", "n": "02_NoisyMitigation_using_LabelSmoothing_MNIST.ipynb", "rng": "02_NoisyMitigation_using_LabelSmoothing_MNIST.ipynb", "noise_idxs": "02_NoisyMitigation_using_LabelSmoothing_MNIST.ipynb", "mnist": "02_NoisyMitigation_using_LabelSmoothing_MNIST.ipynb", "dls": "02_NoisyMitigation_using_LabelSmoothing_MNIST.ipynb", "learn": "02_NoisyMitigation_using_LabelSmoothing_MNIST.ipynb", "val_ordered_label_errors": "02_NoisyMitigation_using_LabelSmoothing_MNIST.ipynb"} modules = ["noisyimagenette.py", "labelsmoothing.py"] doc_url = "https://manisnesan.github.io/fsdl/" git_url = "https://github.com/manisnesan/fsdl/tree/main/" def custom_doc_links(name): return None
47.307692
91
0.706775
196
1,845
6.19898
0.321429
0.209877
0.271605
0.296296
0.479012
0.378601
0
0
0
0
0
0.03653
0.169106
1,845
38
92
48.552632
0.756034
0.019512
0
0
1
0
0.692861
0.520753
0
0
0
0
0
1
0.032258
false
0
0
0.032258
0.032258
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
c6f137dfa850d31ca3b26e8928c42df49bc71e48
136
py
Python
src/handlers/database/__init__.py
Seon82/pyCharity
5eeb48df7990e096da190807714bcd634f806021
[ "MIT" ]
1
2021-06-28T09:10:33.000Z
2021-06-28T09:10:33.000Z
src/handlers/database/__init__.py
Seon82/pyCharity
5eeb48df7990e096da190807714bcd634f806021
[ "MIT" ]
null
null
null
src/handlers/database/__init__.py
Seon82/pyCharity
5eeb48df7990e096da190807714bcd634f806021
[ "MIT" ]
null
null
null
from .database_connector import DatabaseConnector from .template_manager import TemplateManager from .stats_manager import StatsManager
34
49
0.889706
15
136
7.866667
0.666667
0.220339
0
0
0
0
0
0
0
0
0
0
0.088235
136
3
50
45.333333
0.951613
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
05dba8574e0cc0e174cd67caba13cf1428d02c9d
35
py
Python
app/utils/misc/__init__.py
rdfsx/schedule_bot
7a0231e3bbb61ca4adec2a20a256fb35c35078ed
[ "MIT" ]
17
2020-12-26T18:23:44.000Z
2022-02-22T12:48:18.000Z
app/utils/misc/__init__.py
rdfsx/schedule_bot
7a0231e3bbb61ca4adec2a20a256fb35c35078ed
[ "MIT" ]
6
2021-01-01T13:46:15.000Z
2022-03-24T11:57:13.000Z
app/utils/misc/__init__.py
rdfsx/schedule_bot
7a0231e3bbb61ca4adec2a20a256fb35c35078ed
[ "MIT" ]
2
2021-09-10T13:19:01.000Z
2022-01-13T18:57:57.000Z
from .throttling import rate_limit
17.5
34
0.857143
5
35
5.8
1
0
0
0
0
0
0
0
0
0
0
0
0.114286
35
1
35
35
0.935484
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
af06401e59ef7f80ebae21ec9b7ab2a588d983c7
39
py
Python
Episode - 4 - Neural Network coding/fungsi/__init__.py
kelasterbuka/Kuliah_Terbuka
73fc5640ef59c8e78f8b844eede416bce11c1371
[ "Apache-2.0" ]
7
2022-03-05T08:11:15.000Z
2022-03-31T02:19:57.000Z
Episode - 4 - Neural Network coding/fungsi/__init__.py
kelasterbuka/Kuliah_Terbuka
73fc5640ef59c8e78f8b844eede416bce11c1371
[ "Apache-2.0" ]
null
null
null
Episode - 4 - Neural Network coding/fungsi/__init__.py
kelasterbuka/Kuliah_Terbuka
73fc5640ef59c8e78f8b844eede416bce11c1371
[ "Apache-2.0" ]
3
2022-03-11T12:02:02.000Z
2022-03-31T09:30:49.000Z
from .sigmoid import sigmoid as sigmoid
39
39
0.846154
6
39
5.5
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.128205
39
1
39
39
0.970588
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
af0d90d512eb967566a11ad6f0646f42f3853825
7,011
py
Python
tests/test_production.py
Notgnoshi/generative
d9702c18b59553541f0cce706089f9fad501cd33
[ "MIT" ]
5
2021-02-11T07:55:51.000Z
2022-02-10T01:11:02.000Z
tests/test_production.py
Notgnoshi/generative
d9702c18b59553541f0cce706089f9fad501cd33
[ "MIT" ]
67
2020-12-31T18:02:05.000Z
2022-02-21T14:57:52.000Z
tests/test_production.py
Notgnoshi/generative
d9702c18b59553541f0cce706089f9fad501cd33
[ "MIT" ]
null
null
null
import unittest from generative.lsystem.grammar import RuleMapping, Token from generative.lsystem.production import RuleParser class RuleParsingParser(unittest.TestCase): def test_simple(self): parser = RuleParser() rule = "a -> ab" result = parser._parse(rule) self.assertEqual(result["lhs"], "a") self.assertSequenceEqual(result["rhs"], ["a", "b"]) # You can still use commas and whitespace to separate tokens. rule = "a -> a,b" result = parser._parse(rule) self.assertEqual(result["lhs"], "a") self.assertSequenceEqual(result["rhs"], ["a", "b"]) rule = "a -> a b" result = parser._parse(rule) self.assertEqual(result["lhs"], "a") self.assertSequenceEqual(result["rhs"], ["a", "b"]) def test_simple_delimited(self): parser = RuleParser(True) rule = "a -> a,b" result = parser._parse(rule) self.assertEqual(result["lhs"], "a") self.assertSequenceEqual(result["rhs"], ["a", "b"]) rule = "a -> ab" result = parser._parse(rule) self.assertEqual(result["lhs"], "a") self.assertSequenceEqual(result["rhs"], ["ab"]) rule = "a -> a b" result = parser._parse(rule) self.assertEqual(result["lhs"], "a") self.assertSequenceEqual(result["rhs"], ["a", "b"]) rule = "a -> a\t\t \nb" result = parser._parse(rule) self.assertEqual(result["lhs"], "a") self.assertSequenceEqual(result["rhs"], ["a", "b"]) def test_probability(self): parser = RuleParser() rule = "a: 0.5 -> b" result = parser._parse(rule) self.assertEqual(result["lhs"], "a") self.assertEqual(result["probability"], 0.5) self.assertSequenceEqual(result["rhs"], ["b"]) def test_left_context(self): parser = RuleParser() rule = "a<b -> cde" result = parser._parse(rule) self.assertEqual(result["lhs"], "b") self.assertSequenceEqual(result["rhs"], ["c", "d", "e"]) self.assertEqual(result["left_context"], "a") def test_left_context_delimited(self): parser = RuleParser(True) rule = "a<b -> cd,e" result = parser._parse(rule) self.assertEqual(result["lhs"], "b") self.assertSequenceEqual(result["rhs"], ["cd", "e"]) self.assertEqual(result["left_context"], "a") def test_right_context(self): parser = RuleParser() rule = "a>b -> c" result = parser._parse(rule) self.assertEqual(result["lhs"], "a") self.assertEqual(result["right_context"], "b") self.assertSequenceEqual(result["rhs"], ["c"]) def test_both_context(self): parser = RuleParser() rule = "l<a>r -> b" result = parser._parse(rule) self.assertEqual(result["left_context"], "l") self.assertEqual(result["right_context"], "r") def test_context_roll(self): parser = RuleParser() rule = "<<a -> b" result = parser._parse(rule) self.assertEqual(result["left_context"], "<") self.assertEqual(result["lhs"], "a") self.assertSequenceEqual(result["rhs"], ["b"]) rule = "><a -> b" result = parser._parse(rule) self.assertEqual(result["left_context"], ">") self.assertEqual(result["lhs"], "a") self.assertSequenceEqual(result["rhs"], ["b"]) def test_ignore(self): parser = RuleParser() rule = "#ignore:ab" result = parser._parse(rule) self.assertSequenceEqual(result["ignore"], ["a", "b"]) rule = "#ignore ab" result = parser._parse(rule) self.assertSequenceEqual(result["ignore"], ["a", "b"]) rule = "#ignore: a,b" result = parser._parse(rule) self.assertSequenceEqual(result["ignore"], ["a", "b"]) rule = "#ignore: a b" result = parser._parse(rule) self.assertSequenceEqual(result["ignore"], ["a", "b"]) def test_ignore_delimited(self): parser = RuleParser(True) rule = "#ignore a,b" result = parser._parse(rule) self.assertSequenceEqual(result["ignore"], ["a", "b"]) rule = "#ignore:a,b" result = parser._parse(rule) self.assertSequenceEqual(result["ignore"], ["a", "b"]) rule = "#ignore: a b" result = parser._parse(rule) self.assertSequenceEqual(result["ignore"], ["a", "b"]) rule = "#ignore: a, b" result = parser._parse(rule) self.assertSequenceEqual(result["ignore"], ["a", "b"]) def test_fractal_plant(self): rule = "G -> F-[[G]+G]+F[+FG]-G" parser = RuleParser() result = parser._parse(rule) self.assertSequenceEqual(result["rhs"], rule.split()[-1]) # You can still use delimiters in single character mode. rule2 = "G -> F,-,[ [ G\t \n],+,G,]+F[+FG]- G" result = parser._parse(rule2) self.assertSequenceEqual(result["rhs"], rule.split()[-1]) def test_fractal_plant_delimited(self): rule = "G -> F-[[G]+G]+F[+FG]-G" rule2 = "G -> F,-,[,[,G,]\n+, G,\t\n ],+,F,[,+,F,G,],-,G" parser = RuleParser(True) result = parser._parse(rule2) self.assertSequenceEqual(result["rhs"], rule.split()[-1].replace(",", "")) def tokenize(s: str): return tuple(Token(c) for c in s) class RuleParsingMappings(unittest.TestCase): def test_simple(self): parser = RuleParser() rule = "a -> ab" lhs, mapping = parser.parse(rule) self.assertEqual(lhs, Token("a")) self.assertEqual(mapping, RuleMapping(tokenize("ab"))) def test_simple_delimited(self): parser = RuleParser(True) rule = "a -> a, b" lhs, mapping = parser.parse(rule) self.assertEqual(lhs, Token("a")) self.assertEqual(mapping, RuleMapping(tokenize("ab"))) def test_probability(self): parser = RuleParser() rule = "a: 0.33 -> b" lhs, mapping = parser.parse(rule) self.assertEqual(lhs, Token("a")) self.assertEqual(mapping, RuleMapping(tokenize("b"), probability=0.33)) def test_context_delimited(self): parser = RuleParser(True) rule = "left < tok>right:0.2->prod,uct" lhs, mapping = parser.parse(rule) self.assertEqual(lhs, Token("tok")) self.assertEqual( mapping, RuleMapping( (Token("prod"), Token("uct")), probability=0.2, left_context=Token("left"), right_context=Token("right"), ), ) def test_ignore_delimited(self): parser = RuleParser(True) rule = "#ignore a,b" result = parser.parse(rule) self.assertIsNone(result) self.assertIn("a", parser.ignore) self.assertIn("b", parser.ignore) def test_fractal_plant(self): pass
30.75
82
0.564399
784
7,011
4.961735
0.11352
0.015938
0.107969
0.136761
0.814653
0.786118
0.771979
0.703856
0.703856
0.643188
0
0.004082
0.266296
7,011
227
83
30.885463
0.752138
0.01626
0
0.672619
0
0.005952
0.107775
0.003336
0
0
0
0
0.333333
1
0.113095
false
0.005952
0.017857
0.005952
0.14881
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
af7055d53f836959d44aab349c454c7172ee342c
130
py
Python
gopy/search/__init__.py
strongSoda/data-structurea-and-algorithms
261fa2b4c0344a1aabf83934d0ad6cee9c17787d
[ "MIT" ]
1
2019-10-20T13:43:04.000Z
2019-10-20T13:43:04.000Z
gopy/search/__init__.py
strongSoda/data-structurea-and-algorithms
261fa2b4c0344a1aabf83934d0ad6cee9c17787d
[ "MIT" ]
11
2019-10-04T08:54:03.000Z
2019-10-19T09:06:10.000Z
gopy/search/__init__.py
strongSoda/gopy
261fa2b4c0344a1aabf83934d0ad6cee9c17787d
[ "MIT" ]
null
null
null
name="search" from .lsearch import search from .bsearch import search from .ternary import search from .jumpSearch import search
18.571429
30
0.807692
18
130
5.833333
0.444444
0.380952
0.457143
0
0
0
0
0
0
0
0
0
0.138462
130
7
30
18.571429
0.9375
0
0
0
0
0
0.045802
0
0
0
0
0
0
1
0
false
0
0.8
0
0.8
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
af928d7ebc03ce590a0b3ca98f00327d77f58178
3,400
py
Python
tests/test_annotation_values_route_functional.py
jic-dtool/dtool-lookup-server-annotation-filter-plugin
70ef2a5f64db65a767a238c9da01f50a674aea7d
[ "MIT" ]
null
null
null
tests/test_annotation_values_route_functional.py
jic-dtool/dtool-lookup-server-annotation-filter-plugin
70ef2a5f64db65a767a238c9da01f50a674aea7d
[ "MIT" ]
1
2021-02-12T18:28:40.000Z
2021-02-13T03:06:14.000Z
tests/test_annotation_values_route_functional.py
jic-dtool/dtool-lookup-server-annotation-filter-plugin
70ef2a5f64db65a767a238c9da01f50a674aea7d
[ "MIT" ]
null
null
null
import json from . import tmp_app # NOQA from . import GRUMPY_TOKEN, SLEEPY_TOKEN, NOONE_TOKEN def test_annotaion_values_route(tmp_app): # NOQA headers = dict(Authorization="Bearer " + GRUMPY_TOKEN) data = {"annotation_keys": ["color"]} r = tmp_app.post( "/annotation_filter_plugin/annotation_values", headers=headers, data=json.dumps(data), content_type="application/json" ) assert r.status_code == 200 content = json.loads(r.data.decode("utf-8")) expected_content = {"color": {"blue": 1, "red": 1}} assert content == expected_content def test_annotaion_values_complex_with_multiple_keys_route(tmp_app): # NOQA headers = dict(Authorization="Bearer " + GRUMPY_TOKEN) data = {"annotation_keys": ["color", "pattern"]} r = tmp_app.post( "/annotation_filter_plugin/annotation_values", headers=headers, data=json.dumps(data), content_type="application/json" ) assert r.status_code == 200 content = json.loads(r.data.decode("utf-8")) expected_content = {"color": {"red": 1}, "pattern": {"wavy": 1}} assert content == expected_content def test_annotaion_values_complex_with_complex_filter_route(tmp_app): # NOQA headers = dict(Authorization="Bearer " + GRUMPY_TOKEN) data = {"annotation_keys": ["color"], "annotations": {"pattern": "wavy"}} r = tmp_app.post( "/annotation_filter_plugin/annotation_values", headers=headers, data=json.dumps(data), content_type="application/json" ) assert r.status_code == 200 content = json.loads(r.data.decode("utf-8")) expected_content = {"color": {"red": 1}, "pattern": {"wavy": 1}} assert content == expected_content data = { "annotation_keys": ["color"], "annotations": {"pattern": "stripey"} } r = tmp_app.post( "/annotation_filter_plugin/annotation_values", headers=headers, data=json.dumps(data), content_type="application/json" ) assert r.status_code == 200 content = json.loads(r.data.decode("utf-8")) expected_content = {} assert content == expected_content def test_annotaion_values_route_no_keys_specified(tmp_app): # NOQA headers = dict(Authorization="Bearer " + GRUMPY_TOKEN) data = {} r = tmp_app.post( "/annotation_filter_plugin/annotation_values", headers=headers, data=json.dumps(data), content_type="application/json" ) assert r.status_code == 200 content = json.loads(r.data.decode("utf-8")) expected_content = {} assert content == expected_content def test_keys_route_with_sleepy_user(tmp_app): # NOQA headers = dict(Authorization="Bearer " + SLEEPY_TOKEN) data = {} r = tmp_app.post( "/annotation_filter_plugin/annotation_values", headers=headers, data=json.dumps(data), content_type="application/json" ) assert r.status_code == 200 content = json.loads(r.data.decode("utf-8")) assert content == {} def test_keys_route_with_noone_user(tmp_app): # NOQA headers = dict(Authorization="Bearer " + NOONE_TOKEN) data = {} r = tmp_app.post( "/annotation_filter_plugin/annotation_values", headers=headers, data=json.dumps(data), content_type="application/json" ) assert r.status_code == 401
32.075472
77
0.653235
405
3,400
5.224691
0.133333
0.039698
0.033081
0.036389
0.920132
0.907372
0.860586
0.860586
0.811909
0.811909
0
0.012369
0.215294
3,400
105
78
32.380952
0.780735
0.01
0
0.692308
0
0
0.199226
0.089637
0
0
0
0
0.142857
1
0.065934
false
0
0.032967
0
0.098901
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
afa47d4d5ac2133ee9247dbe7be37679395ab61f
87
py
Python
symnet/__init__.py
tej-prash/quantile_regression
bd68d0122c95cd00f8f13710150db433cae17d67
[ "MIT" ]
3
2020-01-14T04:47:04.000Z
2020-11-18T13:20:00.000Z
symnet/__init__.py
tej-prash/quantile_regression
bd68d0122c95cd00f8f13710150db433cae17d67
[ "MIT" ]
9
2020-01-28T22:47:51.000Z
2022-02-10T00:19:50.000Z
symnet/__init__.py
tej-prash/quantile_regression
bd68d0122c95cd00f8f13710150db433cae17d67
[ "MIT" ]
5
2019-06-20T18:37:25.000Z
2020-06-21T16:37:33.000Z
from symnet.model import AbstractModel from symnet.activations import CustomActivation
29
47
0.885057
10
87
7.7
0.7
0.25974
0
0
0
0
0
0
0
0
0
0
0.091954
87
2
48
43.5
0.974684
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
bbf5be4c3e627e14068617a181d40689653c3879
2,402
py
Python
nn/activations.py
denis-kondrashkin/ml
b60ac4b645fe1b13076e28018eaf966cbb84fcbb
[ "MIT" ]
null
null
null
nn/activations.py
denis-kondrashkin/ml
b60ac4b645fe1b13076e28018eaf966cbb84fcbb
[ "MIT" ]
null
null
null
nn/activations.py
denis-kondrashkin/ml
b60ac4b645fe1b13076e28018eaf966cbb84fcbb
[ "MIT" ]
null
null
null
import numpy as np from nn.module import Module class ReLU(Module): def __init__(self): super().__init__() def updateOutput(self, input): self.output = np.maximum(input, 0) return self.output def updateGradInput(self, input, gradOutput): self.gradInput = np.multiply(gradOutput, input > 0) return self.gradInput def __repr__(self): return "ReLU" class LeakyReLU(Module): def __init__(self, slope=0.01): super().__init__() self.slope = slope def updateOutput(self, input): self.output = np.multiply(self.slope, input) self.output = np.where(input > 0, input, self.output) return self.output def updateGradInput(self, input, gradOutput): self.gradInput = np.where(input > 0, 1, self.slope) np.multiply(gradOutput, self.gradInput, out=self.gradInput) return self.gradInput def __repr__(self): return "LeakyReLU" class ELU(Module): def __init__(self, alpha=1.0): super().__init__() self.alpha = alpha def updateOutput(self, input): self.output = np.exp(input) np.subtract(self.output, 1, out=self.output) np.multiply(self.alpha, self.output, out=self.output) self.output = np.where(input > 0, input, self.output) return self.output def updateGradInput(self, input, gradOutput): self.gradInput = np.exp(input) np.multiply(self.alpha, self.gradInput, out=self.gradInput) self.gradInput = np.where(input > 0, 1, self.gradInput) np.multiply(gradOutput, self.gradInput, out=self.gradInput) return self.gradInput def __repr__(self): return "ELU" class SoftPlus(Module): def __init__(self): super().__init__() def updateOutput(self, input): self.output = np.exp(input) np.add(1, self.output, out=self.output) np.log(self.output, out=self.output) return self.output def updateGradInput(self, input, gradOutput): self.gradInput = np.exp(input) np.add(1, self.gradInput, out=self.gradInput) np.divide(1, self.gradInput, out=self.gradInput) np.subtract(1, self.gradInput, out=self.gradInput) np.multiply(gradOutput, self.gradInput, out=self.gradInput) return self.gradInput def __repr__(self): return "SoftPlus"
28.595238
67
0.637386
298
2,402
4.97651
0.127517
0.210384
0.091032
0.094403
0.813216
0.705327
0.705327
0.59474
0.565745
0.565745
0
0.009917
0.24438
2,402
83
68
28.939759
0.807163
0
0
0.564516
0
0
0.009992
0
0
0
0
0
0
1
0.258065
false
0
0.032258
0.064516
0.548387
0
0
0
0
null
1
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
6
a584c67e9767fda4bc07102aa1d1a94527bdbe33
343
py
Python
quicktracer/__init__.py
chapuzzo/quicktracer
284d5b28c5f875dc041107ccf583dee50c8c78a8
[ "MIT" ]
3
2018-01-11T03:36:01.000Z
2019-09-29T14:15:58.000Z
quicktracer/__init__.py
chapuzzo/quicktracer
284d5b28c5f875dc041107ccf583dee50c8c78a8
[ "MIT" ]
null
null
null
quicktracer/__init__.py
chapuzzo/quicktracer
284d5b28c5f875dc041107ccf583dee50c8c78a8
[ "MIT" ]
3
2018-01-10T22:35:18.000Z
2021-04-14T17:33:51.000Z
# from quicktracer.constants import KEY, VALUE, TIME # from quicktracer.quicktracer import trace from .quicktracer_lib import trace, reset # from .constants import KEY, VALUE, TIME from .quicktracer_lib import KEY, VALUE, TIME, CUSTOM_DISPLAY from .displays import Display __all__ = [trace, reset, KEY, VALUE, TIME, CUSTOM_DISPLAY, Display]
34.3
67
0.790087
46
343
5.717391
0.304348
0.228137
0.18251
0.205323
0.509506
0.319392
0.319392
0
0
0
0
0
0.134111
343
9
68
38.111111
0.885522
0.38484
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
3c0fe92a1344ba5ffe7e6f82a051056d8c79aa40
107
py
Python
edx_data_research/web_app/parse/__init__.py
gopa1959/test
3e224d0d86015b1e3e2da426e914aeb86c80d3c8
[ "MIT" ]
null
null
null
edx_data_research/web_app/parse/__init__.py
gopa1959/test
3e224d0d86015b1e3e2da426e914aeb86c80d3c8
[ "MIT" ]
null
null
null
edx_data_research/web_app/parse/__init__.py
gopa1959/test
3e224d0d86015b1e3e2da426e914aeb86c80d3c8
[ "MIT" ]
null
null
null
from flask import Blueprint parse = Blueprint('parse', __name__, url_prefix='/parse') from . import views
21.4
57
0.757009
14
107
5.428571
0.642857
0.368421
0
0
0
0
0
0
0
0
0
0
0.130841
107
5
58
21.4
0.817204
0
0
0
0
0
0.101852
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0.666667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
6
3c2ca7f7f41994268e91087ea3f2a499b62b9891
49
py
Python
adaequare_gsp/monkey_patches/__init__.py
mohsinalimat/adaequare_gsp
e3e8bb282f25dc791cb07cc41e04bc60a7066e07
[ "MIT" ]
null
null
null
adaequare_gsp/monkey_patches/__init__.py
mohsinalimat/adaequare_gsp
e3e8bb282f25dc791cb07cc41e04bc60a7066e07
[ "MIT" ]
null
null
null
adaequare_gsp/monkey_patches/__init__.py
mohsinalimat/adaequare_gsp
e3e8bb282f25dc791cb07cc41e04bc60a7066e07
[ "MIT" ]
null
null
null
import adaequare_gsp.monkey_patches.create_party
24.5
48
0.918367
7
49
6
1
0
0
0
0
0
0
0
0
0
0
0
0.040816
49
1
49
49
0.893617
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
3c5cdd0fa1a6fca969c85c2b8c316d42b9864b0b
34
py
Python
src/fchecker/dict/__init__.py
IncognitoCoding/fchecker
bbc70685174c70b6c396e1c93864028bffd3e22e
[ "MIT" ]
null
null
null
src/fchecker/dict/__init__.py
IncognitoCoding/fchecker
bbc70685174c70b6c396e1c93864028bffd3e22e
[ "MIT" ]
null
null
null
src/fchecker/dict/__init__.py
IncognitoCoding/fchecker
bbc70685174c70b6c396e1c93864028bffd3e22e
[ "MIT" ]
null
null
null
from .dict_checks import KeyCheck
17
33
0.852941
5
34
5.6
1
0
0
0
0
0
0
0
0
0
0
0
0.117647
34
1
34
34
0.933333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
3c60c296960f40d6258bbe187a43da761527bb2d
78
py
Python
BetterString/__init__.py
BloodVexed/BetterString
69251b4ec1e419de1f3fe1b190246fa0052067b0
[ "MIT" ]
1
2021-04-17T14:34:55.000Z
2021-04-17T14:34:55.000Z
BetterString/__init__.py
BloodVexed/BetterString
69251b4ec1e419de1f3fe1b190246fa0052067b0
[ "MIT" ]
null
null
null
BetterString/__init__.py
BloodVexed/BetterString
69251b4ec1e419de1f3fe1b190246fa0052067b0
[ "MIT" ]
null
null
null
from .BetterString import * from .Color import * from .Exceptions import *
19.5
28
0.730769
9
78
6.333333
0.555556
0.350877
0
0
0
0
0
0
0
0
0
0
0.192308
78
3
29
26
0.904762
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
b1c091ee489a64507aa60cea477b3e2b4d4d93b5
108
py
Python
src/releasely/tasks/__init__.py
gordonfierce/releasely
2dfa84f3eef7f17675a45d87318e1ef15088295b
[ "MIT" ]
1
2020-09-21T17:04:27.000Z
2020-09-21T17:04:27.000Z
src/releasely/tasks/__init__.py
gordonfierce/releasely
2dfa84f3eef7f17675a45d87318e1ef15088295b
[ "MIT" ]
2
2020-09-16T00:24:55.000Z
2021-12-16T16:57:24.000Z
src/releasely/tasks/__init__.py
gordonfierce/releasely
2dfa84f3eef7f17675a45d87318e1ef15088295b
[ "MIT" ]
1
2021-12-16T16:49:20.000Z
2021-12-16T16:49:20.000Z
from . import check, prepare_release, release_notes task_modules = [check, prepare_release, release_notes]
27
54
0.814815
14
108
5.928571
0.571429
0.289157
0.457831
0.626506
0.746988
0
0
0
0
0
0
0
0.111111
108
3
55
36
0.864583
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
b1db00b7ac5cd8eb115cebadbfebaf7e16931ddb
105
py
Python
veros/diagnostics/__init__.py
AkasDutta/veros
9f530596a0148a398829050017de3e01a71261a0
[ "MIT" ]
115
2019-11-23T02:31:30.000Z
2022-03-29T12:58:30.000Z
veros/diagnostics/__init__.py
AkasDutta/veros
9f530596a0148a398829050017de3e01a71261a0
[ "MIT" ]
207
2019-11-21T13:21:22.000Z
2022-03-31T23:36:09.000Z
veros/diagnostics/__init__.py
AkasDutta/veros
9f530596a0148a398829050017de3e01a71261a0
[ "MIT" ]
21
2020-01-28T13:13:39.000Z
2022-02-02T13:46:33.000Z
from veros.diagnostics.api import create_default_diagnostics, initialize, diagnose, output # noqa: F401
52.5
104
0.828571
13
105
6.538462
0.923077
0
0
0
0
0
0
0
0
0
0
0.031915
0.104762
105
1
105
105
0.87234
0.095238
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
b1fdc9483fe1252f4bd266db7f914a9d8427d3b7
1,981
py
Python
web/channels/management/channel.py
vtalks/vtalks.net
80fb19ff9684e0854c6abe5f0eef73e80ec326a6
[ "Apache-2.0" ]
1
2017-11-28T03:17:23.000Z
2017-11-28T03:17:23.000Z
web/channels/management/channel.py
vtalks/vtalks.net
80fb19ff9684e0854c6abe5f0eef73e80ec326a6
[ "Apache-2.0" ]
56
2018-01-14T18:03:03.000Z
2018-06-25T17:59:02.000Z
web/channels/management/channel.py
vtalks/vtalks.net
80fb19ff9684e0854c6abe5f0eef73e80ec326a6
[ "Apache-2.0" ]
null
null
null
from datetime import datetime from django.utils import timezone from channels.models import Channel def create_channel(channel_json_data): """ Create a new Channel into the database """ channel_code = channel_json_data["id"] if "snippet" in channel_json_data: snippet = channel_json_data["snippet"] if "title" in snippet: channel_title = channel_json_data["snippet"]["title"] if "description" in snippet: channel_description = channel_json_data["snippet"]["description"] if "publishedAt" in snippet: published_at = channel_json_data["snippet"]["publishedAt"] datetime_published_at = datetime.strptime(published_at, "%Y-%m-%dT%H:%M:%S.000Z") datetime_published_at = datetime_published_at.replace(tzinfo=timezone.utc) channel_created = datetime_published_at channel = Channel.objects.create(code=channel_code, title=channel_title, description=channel_description, created=channel_created) return channel def update_channel(channel, channel_json_data): """ Updates an existing Channel into the database """ channel.code = channel_json_data["id"] if "snippet" in channel_json_data: snippet = channel_json_data["snippet"] if "title" in snippet: channel.title = channel_json_data["snippet"]["title"] if "description" in snippet: channel.description = channel_json_data["snippet"]["description"] if "publishedAt" in snippet: published_at = channel_json_data["snippet"]["publishedAt"] datetime_published_at = datetime.strptime(published_at, "%Y-%m-%dT%H:%M:%S.000Z") datetime_published_at = datetime_published_at.replace(tzinfo=timezone.utc) channel.created = datetime_published_at channel.save() return channel
38.843137
93
0.650177
221
1,981
5.565611
0.208145
0.125203
0.170732
0.178862
0.744715
0.744715
0.744715
0.744715
0.744715
0.744715
0
0.004065
0.254922
1,981
50
94
39.62
0.829268
0.044927
0
0.5
0
0
0.120598
0.023479
0
0
0
0
0
1
0.055556
false
0
0.083333
0
0.194444
0
0
0
0
null
0
0
1
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
5916296b0f17c83705ae5af2f31a44996d61e577
16,049
py
Python
tests/time_integration/test_newton.py
sandialabs/Spitfire
65670e3ba5d1ccb4ac72524b77957706345c5bf6
[ "Apache-2.0" ]
11
2020-03-20T02:10:17.000Z
2021-12-14T10:08:09.000Z
tests/time_integration/test_newton.py
sandialabs/Spitfire
65670e3ba5d1ccb4ac72524b77957706345c5bf6
[ "Apache-2.0" ]
18
2020-03-18T18:58:56.000Z
2021-12-21T02:35:35.000Z
tests/time_integration/test_newton.py
sandialabs/Spitfire
65670e3ba5d1ccb4ac72524b77957706345c5bf6
[ "Apache-2.0" ]
2
2021-05-31T17:24:56.000Z
2021-06-20T05:27:41.000Z
import unittest from spitfire import SimpleNewtonSolver from numpy import abs, imag, any, Inf, zeros, array, NaN from numpy import copy as numpy_copy def direct_residual(fun): def doubled(x, *args, **kwargs): output = fun(x, *args, **kwargs) return output, numpy_copy(output) return doubled def direct_solve(fun): def append_iteration_count_of_one_and_converged(x, *args, **kwargs): output = fun(x, *args, **kwargs) return output, 1, True return append_iteration_count_of_one_and_converged def make_sure_is_real(solution, *args, **kwargs): return True if any(imag(solution)) else False @direct_residual def linear_problem_residual(x, *args, **kwargs): return 2. * x - 1. def linear_problem_jacobian_inverse(x, *args, **kwargs): return 0.5 @direct_solve def linear_problem_solve_free(resid, *args, **kwargs): return 0.5 * resid class LinearProblem(object): def __init__(self): self.lhs_inverse = None def residual(self, x, *args, **kwargs): return linear_problem_residual(x, args, kwargs) def setup(self, x, *args, **kwargs): self.lhs_inverse = linear_problem_jacobian_inverse(x, args, kwargs) @direct_solve def solve(self, resid, *args, **kwargs): return self.lhs_inverse * resid class LinearScalarTest(unittest.TestCase): def test_frozen_jacobian(self): guess = 0.3 solution = 0.5 tolerance = 1.e-12 newton = SimpleNewtonSolver(tolerance=tolerance, max_nonlinear_iter=4, must_converge=True, norm_weighting=1., norm_order=2, raise_naninf=True, custom_solution_check=make_sure_is_real) newton.slowness_detection_iter = 2 # to tests detection of slow convergence output = newton(residual_method=linear_problem_residual, solve_method=linear_problem_solve_free, initial_guess=guess, initial_rhs=linear_problem_residual(guess), setup_method=None) self.assertTrue(abs(output.solution - solution) <= tolerance) self.assertTrue((linear_problem_residual(output.solution) == output.rhs_at_converged).all()) self.assertTrue(output.iter == 1) self.assertTrue(output.liter == 1) self.assertTrue(output.projector_setups == 0) self.assertTrue(not output.slow_convergence) self.assertTrue(output.converged) def test_active_jacobian(self): problem = LinearProblem() guess = 0.3 solution = 0.5 tolerance = 1.e-12 newton = SimpleNewtonSolver(evaluate_jacobian_every_iter=True, tolerance=tolerance, max_nonlinear_iter=4, must_converge=True, norm_weighting=1., norm_order=Inf, raise_naninf=True, custom_solution_check=make_sure_is_real) newton.slowness_detection_iter = 0 # to tests detection of slow convergence output = newton(residual_method=problem.residual, setup_method=problem.setup, solve_method=problem.solve, initial_guess=guess, initial_rhs=problem.residual(guess)) self.assertTrue(abs(output.solution - solution) <= tolerance) self.assertTrue((problem.residual(output.solution) == output.rhs_at_converged).all()) self.assertTrue(output.iter == 1) self.assertTrue(output.liter == output.iter) self.assertTrue(output.projector_setups == output.iter) self.assertTrue(output.converged) self.assertTrue(output.slow_convergence) @direct_residual def quadratic_problem_residual(x, *args, **kwargs): return x * x - 4. def quadratic_problem_jacobian_inverse(x, *args, **kwargs): return 0.5 / x def quadratic_problem_solve_free(resid, x, *args, **kwargs): return quadratic_problem_jacobian_inverse(x) * resid class QuadraticProblem(object): def __init__(self): self.lhs_inverse = None def residual(self, x, *args, **kwargs): return quadratic_problem_residual(x, args, kwargs) def setup(self, x, *args, **kwargs): self.lhs_inverse = quadratic_problem_jacobian_inverse(x, args, kwargs) @direct_solve def solve(self, resid, *args, **kwargs): return self.lhs_inverse * resid class QuadraticScalarTest(unittest.TestCase): def test_frozen_jacobian(self): guess = 1.6 @direct_solve def solve_with_frozen_jacobian(resid, *args, **kwargs): return quadratic_problem_solve_free(resid, guess) solution = 2. tolerance = 1.e-12 newton = SimpleNewtonSolver(tolerance=tolerance, max_nonlinear_iter=25, must_converge=True, norm_weighting=1., norm_order=Inf, raise_naninf=True, custom_solution_check=make_sure_is_real) output = newton(residual_method=quadratic_problem_residual, solve_method=solve_with_frozen_jacobian, initial_guess=guess, initial_rhs=quadratic_problem_residual(guess), setup_method=None) self.assertTrue(abs(output.solution - solution) <= tolerance) self.assertTrue((quadratic_problem_residual(output.solution) == output.rhs_at_converged).all()) self.assertTrue(output.iter == 20) self.assertTrue(output.liter == output.iter) self.assertTrue(output.converged) def test_active_jacobian(self): problem = QuadraticProblem() guess = 1.6 solution = 2. tolerance = 1.e-12 newton = SimpleNewtonSolver(evaluate_jacobian_every_iter=True, tolerance=tolerance, max_nonlinear_iter=10, must_converge=True, norm_weighting=1., norm_order=Inf, raise_naninf=True, custom_solution_check=make_sure_is_real) output = newton(residual_method=problem.residual, solve_method=problem.solve, setup_method=problem.setup, initial_guess=guess, initial_rhs=problem.residual(guess)) self.assertTrue(abs(output.solution - solution) <= tolerance) self.assertTrue((problem.residual(output.solution) == output.rhs_at_converged).all()) self.assertTrue(output.iter == 4) self.assertTrue(output.liter == output.iter) self.assertTrue(output.projector_setups == output.iter) self.assertTrue(output.converged) @direct_residual def cubic_problem_residual(x, *args, **kwargs): return x * x * x - 27. def cubic_problem_jacobian_inverse(x, *args, **kwargs): return 1. / (3. * x * x) def cubic_problem_solve_free(resid, x, *args, **kwargs): return cubic_problem_jacobian_inverse(x) * resid class CubicProblem(object): def __init__(self): self.lhs_inverse = None def residual(self, x, *args, **kwargs): return cubic_problem_residual(x, args, kwargs) def setup(self, x, *args, **kwargs): self.lhs_inverse = cubic_problem_jacobian_inverse(x, args, kwargs) @direct_solve def solve(self, resid, *args, **kwargs): return self.lhs_inverse * resid class CubicScalarTest(unittest.TestCase): def test_frozen_jacobian(self): guess = 2.6 @direct_solve def solve_with_frozen_jacobian(resid, *args, **kwargs): return cubic_problem_solve_free(resid, guess) solution = 3. tolerance = 1.e-12 newton = SimpleNewtonSolver(tolerance=tolerance, max_nonlinear_iter=40, must_converge=True, norm_weighting=1., norm_order=Inf, raise_naninf=True, custom_solution_check=make_sure_is_real) output = newton(residual_method=cubic_problem_residual, solve_method=solve_with_frozen_jacobian, initial_guess=guess, initial_rhs=cubic_problem_residual(guess), setup_method=None) self.assertTrue(abs(output.solution - solution) <= tolerance) self.assertTrue((cubic_problem_residual(output.solution) == output.rhs_at_converged).all()) self.assertTrue(output.iter == 27) self.assertTrue(output.liter == output.iter) self.assertTrue(output.converged) def test_active_jacobian(self): problem = CubicProblem() guess = 2.6 solution = 3. tolerance = 1.e-12 newton = SimpleNewtonSolver(evaluate_jacobian_every_iter=True, tolerance=tolerance, max_nonlinear_iter=10, must_converge=True, norm_weighting=1., norm_order=Inf, raise_naninf=True, custom_solution_check=make_sure_is_real) output = newton(residual_method=problem.residual, solve_method=problem.solve, setup_method=problem.setup, initial_guess=guess, initial_rhs=problem.residual(guess)) self.assertTrue(abs(output.solution - solution) <= tolerance) self.assertTrue((problem.residual(output.solution) == output.rhs_at_converged).all()) self.assertTrue(output.iter == 5) self.assertTrue(output.liter == output.iter) self.assertTrue(output.projector_setups == output.iter) self.assertTrue(output.converged) def test_failure_catching(self): guess = 2.6 @direct_solve def solve_with_frozen_jacobian(resid, *args, **kwargs): return cubic_problem_solve_free(resid, guess) solution = 3. tolerance = 1.e-12 newton = SimpleNewtonSolver(tolerance=tolerance, max_nonlinear_iter=40, must_converge=True, norm_weighting=1., norm_order=Inf, raise_naninf=True, custom_solution_check=make_sure_is_real) # tests that if given 0 iterations to converge, it cannot newton.max_nonlinear_iter = 0 try: newton(residual_method=cubic_problem_residual, solve_method=solve_with_frozen_jacobian, initial_guess=guess, initial_rhs=cubic_problem_residual(guess), setup_method=None) self.assertTrue(False, 'Newton incorrectly did not recognize 0 iteration limit') except ValueError: self.assertTrue(True) # tests that if we set must_converge=False, that it doesn't raise an error for not converging in 0 iterations, # and that it marks the failure to converge correctly in the output newton.must_converge = False try: output = newton(residual_method=cubic_problem_residual, solve_method=solve_with_frozen_jacobian, initial_guess=guess, initial_rhs=cubic_problem_residual(guess), setup_method=None) self.assertTrue(not output.converged) except ValueError: self.assertTrue(False, 'Newton incorrectly said it failed to converge but we said must_converge=False') newton.max_nonlinear_iter = 40 # reset newton.must_converge = True # reset # tests that if we give it a NaN with raise_naninf=True (set above), it does in fact raise an error guess = NaN try: newton(residual_method=cubic_problem_residual, solve_method=solve_with_frozen_jacobian, initial_guess=guess, initial_rhs=cubic_problem_residual(guess), setup_method=None) self.assertTrue(False, 'Newton ate a NaN and did not catch it despite raise_naninf=True') except ValueError: self.assertTrue(True) # tests that if we give it an Inf with raise_naninf=True (set above), it does in fact raise an error guess = Inf try: newton(residual_method=cubic_problem_residual, solve_method=solve_with_frozen_jacobian, initial_guess=guess, initial_rhs=cubic_problem_residual(guess), setup_method=None) self.assertTrue(False, 'Newton ate an Inf and did not catch it despite raise_naninf=True') except ValueError: self.assertTrue(True) class VectorProblem(object): def __init__(self): self.lhs_inverse = zeros(3) self.norm_weighting = array([1., 2., 3.]) @direct_residual def residual(self, x, *args, **kwargs): return array([linear_problem_residual(x[0], args, kwargs)[0], quadratic_problem_residual(x[1], args, kwargs)[0], cubic_problem_residual(x[2], args, kwargs)[0]]) def setup(self, x, *args, **kwargs): self.lhs_inverse = array([linear_problem_jacobian_inverse(x[0], args, kwargs), quadratic_problem_jacobian_inverse(x[1], args, kwargs), cubic_problem_jacobian_inverse(x[2], args, kwargs)]) @direct_solve def solve(self, resid, *args, **kwargs): return self.lhs_inverse * resid class VectorTest(unittest.TestCase): def test_active_jacobian(self): problem = VectorProblem() guess = array([0.6, 1.6, 2.6]) solution = array([0.5, 2., 3.]) tolerance = 1.e-12 newton = SimpleNewtonSolver(evaluate_jacobian_every_iter=True, tolerance=tolerance, max_nonlinear_iter=10, must_converge=True, norm_weighting=problem.norm_weighting, norm_order=2, raise_naninf=True, custom_solution_check=make_sure_is_real) output = newton(residual_method=problem.residual, solve_method=problem.solve, setup_method=problem.setup, initial_guess=guess, initial_rhs=problem.residual(guess)) self.assertTrue((abs(output.solution - solution) <= tolerance).all()) self.assertTrue((problem.residual(output.solution) == output.rhs_at_converged).all()) self.assertTrue(output.iter == 5) self.assertTrue(output.liter == output.iter) self.assertTrue(output.projector_setups == output.iter) self.assertTrue(output.converged) # todo: add tests that cover failure-catching if __name__ == '__main__': unittest.main()
38.12114
118
0.579164
1,672
16,049
5.313995
0.095694
0.078784
0.060777
0.026787
0.82386
0.796961
0.768824
0.744513
0.710636
0.69049
0
0.011874
0.338837
16,049
420
119
38.211905
0.825464
0.034955
0
0.699367
0
0
0.017187
0
0
0
0
0.002381
0.158228
1
0.129747
false
0
0.012658
0.066456
0.246835
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
5920e81e8fc432720afd19512d685a4751ff8168
48
py
Python
pycircuit/sim/__init__.py
michaelnt/pycircuit
ef3110c1c3789c1e5f30c35e3f5dd15ed4bd349e
[ "BSD-3-Clause" ]
25
2015-05-13T22:49:26.000Z
2020-03-10T04:13:20.000Z
pycircuit/sim/__init__.py
michaelnt/pycircuit
ef3110c1c3789c1e5f30c35e3f5dd15ed4bd349e
[ "BSD-3-Clause" ]
1
2016-11-09T13:09:31.000Z
2016-11-09T13:09:31.000Z
pycircuit/sim/__init__.py
michaelnt/pycircuit
ef3110c1c3789c1e5f30c35e3f5dd15ed4bd349e
[ "BSD-3-Clause" ]
9
2016-03-05T11:46:27.000Z
2022-01-19T18:30:55.000Z
from simulation import * from analysis import *
16
24
0.791667
6
48
6.333333
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.166667
48
2
25
24
0.95
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
594ce334dfd6a434cfcef6ee41a5e59cd8cd028e
123
py
Python
scraper/code/crawler/spiders/zalora/urls.py
teknokeras/scrapy-splash
011cd9d141c0fde4476ace175fddd753fa944fc0
[ "MIT" ]
null
null
null
scraper/code/crawler/spiders/zalora/urls.py
teknokeras/scrapy-splash
011cd9d141c0fde4476ace175fddd753fa944fc0
[ "MIT" ]
5
2018-11-18T02:44:33.000Z
2022-03-02T14:54:48.000Z
scraper/code/crawler/spiders/zalora/urls.py
teknokeras/scrapy-splash
011cd9d141c0fde4476ace175fddd753fa944fc0
[ "MIT" ]
3
2017-09-24T11:51:01.000Z
2019-12-20T03:50:16.000Z
ZALORA_URLS = ['https://www.zalora.co.id/women/pakaian/atasan/?from=header'] def get_start_urls(): return ZALORA_URLS
24.6
76
0.739837
19
123
4.578947
0.789474
0.229885
0
0
0
0
0
0
0
0
0
0
0.097561
123
4
77
30.75
0.783784
0
0
0
0
0
0.471545
0
0
0
0
0
0
1
0.333333
false
0
0
0.333333
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
3ce80d64bc77c0d8596b73e3390f5c1b8892ee0e
69
py
Python
setup/nuke/nuke_path/menu.py
bumpybox/core
5a24640484f19e48dc12682dae979adc6d41dc0b
[ "MIT" ]
168
2017-06-23T15:50:43.000Z
2022-02-27T10:48:45.000Z
setup/nuke/nuke_path/menu.py
bumpybox/core
5a24640484f19e48dc12682dae979adc6d41dc0b
[ "MIT" ]
366
2017-06-22T08:38:45.000Z
2021-06-19T07:29:06.000Z
setup/nuke/nuke_path/menu.py
bumpybox/core
5a24640484f19e48dc12682dae979adc6d41dc0b
[ "MIT" ]
42
2017-06-23T15:27:26.000Z
2021-09-29T17:28:18.000Z
import avalon.api import avalon.nuke avalon.api.install(avalon.nuke)
17.25
31
0.826087
11
69
5.181818
0.454545
0.421053
0
0
0
0
0
0
0
0
0
0
0.072464
69
3
32
23
0.890625
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
3cfa1c28f463e7ab8836ed822bf9949b28637756
24
py
Python
pose/models/backbones/__init__.py
sithu31296/pose_estimation
b00da09cfaf0ee25cdc900a46ac0a2e2a878f16a
[ "MIT" ]
29
2021-09-14T08:05:28.000Z
2022-03-09T12:53:56.000Z
pose/models/backbones/__init__.py
sithu31296/pose_estimation
b00da09cfaf0ee25cdc900a46ac0a2e2a878f16a
[ "MIT" ]
5
2021-09-27T18:38:36.000Z
2022-01-31T16:28:39.000Z
pose/models/backbones/__init__.py
sithu31296/pose_estimation
b00da09cfaf0ee25cdc900a46ac0a2e2a878f16a
[ "MIT" ]
4
2021-09-17T19:13:44.000Z
2022-03-26T02:38:12.000Z
from .hrnet import HRNet
24
24
0.833333
4
24
5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.125
24
1
24
24
0.952381
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
a713fed90669d81ad3b0fe0991125c5879c75e38
136
py
Python
src/openvibspec/__init__.py
arnrau/VibSpec
89c1a3bdaa6aa7a6f98704442a1ffd25f86d2cc2
[ "BSD-2-Clause" ]
1
2022-03-28T13:24:27.000Z
2022-03-28T13:24:27.000Z
src/openvibspec/__init__.py
arnrau/VibSpec
89c1a3bdaa6aa7a6f98704442a1ffd25f86d2cc2
[ "BSD-2-Clause" ]
null
null
null
src/openvibspec/__init__.py
arnrau/VibSpec
89c1a3bdaa6aa7a6f98704442a1ffd25f86d2cc2
[ "BSD-2-Clause" ]
1
2022-03-28T13:24:29.000Z
2022-03-28T13:24:29.000Z
from __future__ import absolute_import from . import io_ftir from . import ml_ftir from . import preprocessing __version__ = '0.1'
13.6
38
0.772059
19
136
4.947368
0.578947
0.319149
0.297872
0
0
0
0
0
0
0
0
0.017857
0.176471
136
9
39
15.111111
0.821429
0
0
0
0
0
0.022059
0
0
0
0
0
0
1
0
false
0
0.8
0
0.8
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
59576aed2d57612669bb4f42ebed430257908746
86
py
Python
common/serializers/__init__.py
Jenks18/mfl_api
ecbb8954053be06bbcac7e1132811d73534c78d9
[ "MIT" ]
19
2015-04-16T09:37:08.000Z
2022-02-10T11:50:30.000Z
common/serializers/__init__.py
Jenks18/mfl_api
ecbb8954053be06bbcac7e1132811d73534c78d9
[ "MIT" ]
125
2015-03-26T14:05:49.000Z
2020-05-14T08:16:50.000Z
common/serializers/__init__.py
Jenks18/mfl_api
ecbb8954053be06bbcac7e1132811d73534c78d9
[ "MIT" ]
39
2015-04-15T09:17:33.000Z
2022-03-28T18:08:16.000Z
from .serializer_base import * # NOQA from .serializer_declarations import * # NOQA
28.666667
46
0.767442
10
86
6.4
0.6
0.4375
0
0
0
0
0
0
0
0
0
0
0.162791
86
2
47
43
0.888889
0.104651
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
596013a1e2ded91d1f7024405bedebc7a51f2a30
72
py
Python
BioModelsDAG/__init__.py
danielqiang/BioModels
4697ad44d98953e2646f692f3010191febc64747
[ "MIT" ]
1
2021-03-07T13:47:07.000Z
2021-03-07T13:47:07.000Z
BioModelsDAG/__init__.py
danielqiang/BioModels
4697ad44d98953e2646f692f3010191febc64747
[ "MIT" ]
2
2021-03-31T19:53:45.000Z
2021-12-13T20:46:15.000Z
BioModelsDAG/__init__.py
danielqiang/BioModels
4697ad44d98953e2646f692f3010191febc64747
[ "MIT" ]
null
null
null
from .utils import * from .pipeline import * from .classifiers import *
18
26
0.75
9
72
6
0.555556
0.37037
0
0
0
0
0
0
0
0
0
0
0.166667
72
3
27
24
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
596085fe0b62ed5885ce8806004d6dfc76a1895e
4,977
py
Python
tests/test_cases/test_multi_dimension_array/test_cocotb_array.py
lavanyajagan/cocotb
2f98612016e68510e264a2b4963303d3588d8404
[ "BSD-3-Clause" ]
350
2015-01-09T12:50:13.000Z
2019-07-12T09:08:17.000Z
tests/test_cases/test_multi_dimension_array/test_cocotb_array.py
lavanyajagan/cocotb
2f98612016e68510e264a2b4963303d3588d8404
[ "BSD-3-Clause" ]
710
2015-01-05T16:42:29.000Z
2019-07-16T13:40:00.000Z
tests/test_cases/test_multi_dimension_array/test_cocotb_array.py
lavanyajagan/cocotb
2f98612016e68510e264a2b4963303d3588d8404
[ "BSD-3-Clause" ]
182
2015-01-08T09:35:20.000Z
2019-07-12T18:41:37.000Z
import cocotb from cocotb.triggers import Timer @cocotb.test() async def test_in_vect_packed(dut): test_value = 0x5 dut.in_vect_packed.value = test_value await Timer(1, "ns") assert dut.out_vect_packed.value == test_value @cocotb.test() async def test_in_vect_unpacked(dut): test_value = [0x1, 0x0, 0x1] dut.in_vect_unpacked.value = test_value await Timer(1, "ns") assert dut.out_vect_unpacked.value == test_value @cocotb.test() async def test_in_arr(dut): test_value = 0x5 dut.in_arr.value = test_value await Timer(1, "ns") assert dut.out_arr.value == test_value @cocotb.test() async def test_in_2d_vect_packed_packed(dut): test_value = (0x5 << 6) | (0x5 << 3) | 0x5 dut.in_2d_vect_packed_packed.value = test_value await Timer(1, "ns") assert dut.out_2d_vect_packed_packed.value == test_value @cocotb.test() async def test_in_2d_vect_packed_unpacked(dut): test_value = [0x5, 0x5, 0x5] dut.in_2d_vect_packed_unpacked.value = test_value await Timer(1, "ns") assert dut.out_2d_vect_packed_unpacked.value == test_value @cocotb.test() async def test_in_2d_vect_unpacked_unpacked(dut): test_value = 3 * [[0x1, 0x0, 0x1]] dut.in_2d_vect_unpacked_unpacked.value = test_value await Timer(1, "ns") assert dut.out_2d_vect_unpacked_unpacked.value == test_value @cocotb.test() async def test_in_arr_packed(dut): test_value = 365 dut.in_arr_packed.value = test_value await Timer(1, "ns") assert dut.out_arr_packed.value == test_value @cocotb.test() async def test_in_arr_unpacked(dut): test_value = [0x5, 0x5, 0x5] dut.in_arr_unpacked.value = test_value await Timer(1, "ns") assert dut.out_arr_unpacked.value == test_value @cocotb.test() async def test_in_2d_arr(dut): test_value = 365 dut.in_2d_arr.value = test_value await Timer(1, "ns") assert dut.out_2d_arr.value == test_value @cocotb.test() async def test_in_vect_packed_packed_packed(dut): test_value = 95869805 dut.in_vect_packed_packed_packed.value = test_value await Timer(1, "ns") assert dut.out_vect_packed_packed_packed.value == test_value # Questa is unable to access elements of a logic array if the last dimension is unpacked (gh-2605) @cocotb.test( expect_error=IndexError if cocotb.LANGUAGE == "verilog" and cocotb.SIM_NAME.lower().startswith("modelsim") else () ) async def test_in_vect_packed_packed_unpacked(dut): test_value = [365, 365, 365] dut.in_vect_packed_packed_unpacked.value = test_value await Timer(1, "ns") assert dut.out_vect_packed_packed_unpacked.value == test_value @cocotb.test() async def test_in_vect_packed_unpacked_unpacked(dut): test_value = 3 * [3 * [5]] dut.in_vect_packed_unpacked_unpacked.value = test_value await Timer(1, "ns") assert dut.out_vect_packed_unpacked_unpacked.value == test_value @cocotb.test() async def test_in_vect_unpacked_unpacked_unpacked(dut): test_value = 3 * [3 * [[1, 0, 1]]] dut.in_vect_unpacked_unpacked_unpacked.value = test_value await Timer(1, "ns") assert dut.out_vect_unpacked_unpacked_unpacked.value == test_value @cocotb.test() async def test_in_arr_packed_packed(dut): test_value = (365 << 18) | (365 << 9) | (365) dut.in_arr_packed_packed.value = test_value await Timer(1, "ns") assert dut.out_arr_packed_packed.value == test_value # Questa is unable to access elements of a logic array if the last dimension is unpacked (gh-2605) @cocotb.test( expect_error=IndexError if cocotb.LANGUAGE == "verilog" and cocotb.SIM_NAME.lower().startswith("modelsim") else () ) async def test_in_arr_packed_unpacked(dut): test_value = [365, 365, 365] dut.in_arr_packed_unpacked.value = test_value await Timer(1, "ns") assert dut.out_arr_packed_unpacked.value == test_value @cocotb.test() async def test_in_arr_unpacked_unpacked(dut): test_value = 3 * [3 * [5]] dut.in_arr_unpacked_unpacked.value = test_value await Timer(1, "ns") assert dut.out_arr_unpacked_unpacked.value == test_value @cocotb.test() async def test_in_2d_arr_packed(dut): test_value = (365 << 18) | (365 << 9) | (365) dut.in_2d_arr_packed.value = test_value await Timer(1, "ns") assert dut.out_2d_arr_packed.value == test_value # Questa is unable to access elements of a logic array if the last dimension is unpacked (gh-2605) @cocotb.test( expect_error=IndexError if cocotb.LANGUAGE == "verilog" and cocotb.SIM_NAME.lower().startswith("modelsim") else () ) async def test_in_2d_arr_unpacked(dut): test_value = [365, 365, 365] dut.in_2d_arr_unpacked.value = test_value await Timer(1, "ns") assert dut.out_2d_arr_unpacked.value == test_value @cocotb.test() async def test_in_3d_arr(dut): test_value = (365 << 18) | (365 << 9) | (365) dut.in_3d_arr.value = test_value await Timer(1, "ns") assert dut.out_3d_arr.value == test_value
29.276471
98
0.71951
788
4,977
4.227157
0.077411
0.154008
0.159712
0.132092
0.977784
0.945362
0.903933
0.87211
0.861903
0.833684
0
0.04292
0.171388
4,977
169
99
29.449704
0.764791
0.058268
0
0.476563
0
0
0.017724
0
0
0
0.01089
0
0.148438
1
0
false
0
0.015625
0
0.015625
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
abd0bf7154f534a1d79c947e99b27c00f9a3854a
308
py
Python
pymoo/model/repair.py
Electr0phile/pymoo
652428473cc68b6d9deada3792635bc8a831b255
[ "Apache-2.0" ]
11
2018-05-22T17:38:02.000Z
2022-02-28T03:34:33.000Z
pymoo/model/repair.py
Asurada2015/pymoo
023a787d0b78813e789f170a3e94b2de85605aff
[ "Apache-2.0" ]
null
null
null
pymoo/model/repair.py
Asurada2015/pymoo
023a787d0b78813e789f170a3e94b2de85605aff
[ "Apache-2.0" ]
2
2018-05-29T21:16:52.000Z
2021-03-26T10:10:45.000Z
from abc import abstractmethod class Repair: """ This class is allows to repair individuals after crossover if necessary. """ def do(self, problem, pop, **kwargs): return self._do(problem, pop, **kwargs) @abstractmethod def _do(self, problem, pop, **kwargs): pass
20.533333
76
0.63961
37
308
5.27027
0.621622
0.153846
0.246154
0.164103
0.25641
0.25641
0
0
0
0
0
0
0.256494
308
14
77
22
0.851528
0.233766
0
0
0
0
0
0
0
0
0
0
0
1
0.285714
false
0.142857
0.142857
0.142857
0.714286
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
6
05250b1dcf19b770263328fe07380adc6b5ae3f3
174
py
Python
alg3dpy/tests/test_areas.py
saullocastro/alg3dpy
a3bf5fcef177a869447661166bf4aa7ef4365568
[ "BSD-3-Clause" ]
2
2019-02-05T06:12:27.000Z
2020-03-15T21:37:33.000Z
alg3dpy/tests/test_areas.py
saullocastro/alg3dpy
a3bf5fcef177a869447661166bf4aa7ef4365568
[ "BSD-3-Clause" ]
null
null
null
alg3dpy/tests/test_areas.py
saullocastro/alg3dpy
a3bf5fcef177a869447661166bf4aa7ef4365568
[ "BSD-3-Clause" ]
null
null
null
import numpy as np from alg3dpy.areas import area_tria def test_areas(): assert np.isclose(area_tria([0, 2, 0], [2, 0, 0], [0, 0, 2]), 3.4641016151377557)
19.333333
65
0.626437
29
174
3.655172
0.586207
0.056604
0.056604
0
0
0
0
0
0
0
0
0.201493
0.229885
174
8
66
21.75
0.589552
0
0
0
0
0
0
0
0
0
0
0
0.2
1
0.2
true
0
0.4
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
052ebbf4af88cfa99543d5434bc3a0c01518858c
212
py
Python
starters/dockerized-django/default/{{cookiecutter.project_slug}}/config/settings/production.py
GreenDeploy-io/greendeploy-starters
58b4322a401568652a7e64b2ef148711364bd8eb
[ "Apache-2.0" ]
1
2022-02-22T09:05:39.000Z
2022-02-22T09:05:39.000Z
starters/dockerized-django/default/{{cookiecutter.project_slug}}/config/settings/production.py
GreenDeploy-io/greendeploy-starters
58b4322a401568652a7e64b2ef148711364bd8eb
[ "Apache-2.0" ]
1
2022-02-22T05:18:59.000Z
2022-02-22T05:19:10.000Z
starters/dockerized-django/default/{{cookiecutter.project_slug}}/config/settings/production.py
GreenDeploy-io/greendeploy-starters
58b4322a401568652a7e64b2ef148711364bd8eb
[ "Apache-2.0" ]
null
null
null
from .base import * # noqa from .base import env # https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["{{ cookiecutter.domain_name }}"])
35.333333
92
0.745283
29
212
5.310345
0.724138
0.233766
0.181818
0
0
0
0
0
0
0
0
0
0.09434
212
5
93
42.4
0.802083
0.325472
0
0
0
0
0.359712
0.172662
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
0561431dc3a941169e4ce06d2ea530fbe0a1e7d6
235
py
Python
eeyore/models/__init__.py
papamarkou/eeyore
4cd9b5a619cd095035aa93f348d1c937629aa8a3
[ "MIT" ]
6
2020-04-22T18:56:46.000Z
2021-09-09T15:57:48.000Z
eeyore/models/__init__.py
papamarkou/eeyore
4cd9b5a619cd095035aa93f348d1c937629aa8a3
[ "MIT" ]
19
2019-11-14T21:22:21.000Z
2020-10-31T16:18:36.000Z
eeyore/models/__init__.py
scidom/eeyore
4cd9b5a619cd095035aa93f348d1c937629aa8a3
[ "MIT" ]
null
null
null
from .bayesian_model import BayesianModel from .distribution_model import DistributionModel from .log_target_model import LogTargetModel from .logistic_regression import LogisticRegression from .mlp import MLP from .model import Model
33.571429
51
0.87234
29
235
6.896552
0.482759
0.22
0
0
0
0
0
0
0
0
0
0
0.102128
235
6
52
39.166667
0.947867
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
058714b5d002b7dd325b16ba045d56251d24b249
3,770
py
Python
tests.py
Holzhaus/python-markdown-full-yaml-metadata
288653ff2ee10e33b1fd262d080783e286d07ed0
[ "MIT" ]
8
2018-11-06T05:40:48.000Z
2021-08-22T10:01:45.000Z
tests.py
Holzhaus/python-markdown-full-yaml-metadata
288653ff2ee10e33b1fd262d080783e286d07ed0
[ "MIT" ]
11
2019-01-23T10:19:06.000Z
2021-07-30T22:17:51.000Z
tests.py
Holzhaus/python-markdown-full-yaml-metadata
288653ff2ee10e33b1fd262d080783e286d07ed0
[ "MIT" ]
3
2019-03-01T15:17:12.000Z
2020-12-29T17:57:01.000Z
import markdown import pytest @pytest.mark.parametrize( "source, expected_meta, expected_body", ( [ """--- title: What is Lorem Ipsum? category: Lorem Ipsum ... Lorem Ipsum is simply dummy text. """, {"title": "What is Lorem Ipsum?", "category": "Lorem Ipsum"}, "<p>Lorem Ipsum is simply dummy text.</p>", ], [ """--- TITLE: Where does it come from? Author: Sivakov Nikita --- Contrary to popular belief, Lorem Ipsum is... """, {"TITLE": "Where does it come from?", "Author": "Sivakov Nikita"}, "<p>Contrary to popular belief, Lorem Ipsum is...</p>", ], ), ) def test_plain_metadata(source, expected_meta, expected_body): md = markdown.Markdown(extensions=["full_yaml_metadata"]) assert md.convert(source) == expected_body assert md.Meta == expected_meta @pytest.mark.parametrize( "source, expected_meta, expected_body", ( [ """--- title: What is Lorem Ipsum? categories: - Lorem Ipsum - Stupid posts ... Lorem Ipsum is simply dummy text. """, { "title": "What is Lorem Ipsum?", "categories": ["Lorem Ipsum", "Stupid posts"], }, "<p>Lorem Ipsum is simply dummy text.</p>", ], [ """--- TITLE: Where does it come from? Authors: - Sivakov Nikita - Another Guy --- Contrary to popular belief, Lorem Ipsum is... """, { "TITLE": "Where does it come from?", "Authors": ["Sivakov Nikita", "Another Guy"], }, "<p>Contrary to popular belief, Lorem Ipsum is...</p>", ], ), ) def test_metadata_with_lists(source, expected_meta, expected_body): md = markdown.Markdown(extensions=["full_yaml_metadata"]) assert md.convert(source) == expected_body assert md.Meta == expected_meta @pytest.mark.parametrize( "source, expected_meta, expected_body", ( [ """--- title: What is Lorem Ipsum? categories: first: Lorem Ipsum second: Stupid posts ... Lorem Ipsum is simply dummy text. """, { "title": "What is Lorem Ipsum?", "categories": { "first": "Lorem Ipsum", "second": "Stupid posts", }, }, "<p>Lorem Ipsum is simply dummy text.</p>", ], [ """--- TITLE: Where does it come from? Authors: first: CryptoManiac second: Another Guy --- Contrary to popular belief, Lorem Ipsum is... """, { "TITLE": "Where does it come from?", "Authors": {"first": "CryptoManiac", "second": "Another Guy"}, }, "<p>Contrary to popular belief, Lorem Ipsum is...</p>", ], ), ) def test_metadata_with_dicts(source, expected_meta, expected_body): md = markdown.Markdown(extensions=["full_yaml_metadata"]) assert md.convert(source) == expected_body assert md.Meta == expected_meta @pytest.mark.parametrize( "source, expected_body", ( [ "Lorem Ipsum is simply dummy text.", "<p>Lorem Ipsum is simply dummy text.</p>", ], [ "Contrary to popular belief, Lorem Ipsum is...", "<p>Contrary to popular belief, Lorem Ipsum is...</p>", ], ), ) def test_without_metadata(source, expected_body): md = markdown.Markdown(extensions=["full_yaml_metadata"]) assert md.convert(source) == expected_body assert md.Meta is None def test_meta_is_acceccable_before_parsing(): md = markdown.Markdown(extensions=["full_yaml_metadata"]) assert md.Meta is None
24.480519
78
0.557029
405
3,770
5.071605
0.14321
0.136319
0.093476
0.070107
0.946446
0.938656
0.938656
0.925511
0.894352
0.833009
0
0
0.306631
3,770
153
79
24.640523
0.785769
0
0
0.478723
0
0
0.338554
0
0
0
0
0
0.095745
1
0.053191
false
0
0.021277
0
0.074468
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e98dfc500b20b614da321438c36c2d53b27f97f1
5,066
py
Python
actualidad/views.py
shiminasai/cantera
90f162351e1ad6ffaaf79cf90c361e302ab6e09f
[ "MIT" ]
null
null
null
actualidad/views.py
shiminasai/cantera
90f162351e1ad6ffaaf79cf90c361e302ab6e09f
[ "MIT" ]
null
null
null
actualidad/views.py
shiminasai/cantera
90f162351e1ad6ffaaf79cf90c361e302ab6e09f
[ "MIT" ]
2
2019-04-10T19:45:42.000Z
2019-04-24T17:16:40.000Z
from django.shortcuts import render,redirect from .models import * from evento.models import * from organizaciones.models import Pais import datetime from taggit.models import * from django.db.models import Q, Count # Create your views here. # def list_actualidad(request,template='list_actualidad.html'): # if request.GET.get('buscador'): # q = request.GET['buscador'] # list_object = Actualidad.objects.filter( # Q(tittle__icontains = q) | # Q(tematica__nombre__icontains = q) | # Q(tags__name__icontains = q), # category__in = ['noticias','situacion-regional-genero']).order_by('created_on') # else: # list_object = Actualidad.objects.filter(category__in = ['noticias','situacion-regional-genero']).order_by('created_on') # list_paises = Pais.objects.order_by('nombre') # hoy = datetime.date.today() # prox_eventos = Evento.objects.filter(inicio__gte = hoy).order_by('inicio')[:3] # tags = Actualidad.tags.most_common( extra_filters={'id__in': list_object})[:6] # return render(request, template, locals()) def filtro_pais(request,slug,category,template='list_actualidad.html'): if request.GET.get('buscador'): q = request.GET['buscador'] list_object = Actualidad.objects.filter( Q(pais__icontains = q), category = category,aprobado = True).order_by('created_on') else: list_object = Actualidad.objects.filter(pais__slug = slug, category = category,aprobado = True).order_by('created_on') list_paises = Actualidad.objects.filter(category = category).values_list('pais__nombre','pais__slug').order_by('pais__nombre').distinct('pais__nombre') hoy = datetime.date.today() prox_eventos = Evento.objects.filter(inicio__gte = hoy,aprobado = True).order_by('inicio')[:3] ids = list_object.values_list('id',flat=True) tags = Actualidad.tags.most_common(min_count=2,extra_filters={'id__in': ids})[:6] pais = Pais.objects.get(slug = slug) return render(request, template, locals()) def filtro_categoria(request,category,template='list_actualidad.html'): if request.GET.get('buscador'): q = request.GET['buscador'] list_object = Actualidad.objects.filter( Q(tittle__icontains = q) | Q(tematica__nombre__icontains = q) | Q(tags__name__icontains = q), category = category,aprobado = True).order_by('-created_on') else: list_object = Actualidad.objects.filter(category = category,aprobado = True).order_by('-created_on') list_paises = Actualidad.objects.filter(category = category).values_list('pais__nombre','pais__slug').distinct('pais__nombre').order_by('pais__nombre') hoy = datetime.date.today() prox_eventos = Evento.objects.filter(inicio__gte = hoy,aprobado = True).order_by('inicio')[:3] ids = list_object.values_list('id',flat=True) tags = Actualidad.tags.most_common(min_count=2,extra_filters={'id__in': ids})[:6] return render(request, template, locals()) def filtro_tag(request,slug,category,template='list_actualidad.html'): if request.GET.get('buscador'): q = request.GET['buscador'] list_object = Actualidad.objects.filter( Q(tittle__icontains = q) | Q(tematica__nombre__icontains = q) | Q(tags__name__icontains = q), category = category,aprobado = True).order_by('created_on') else: list_object = Actualidad.objects.filter(category = category,tags__slug = slug,aprobado = True).order_by('created_on') list_paises = Actualidad.objects.filter(category = category).values_list('pais__nombre','pais__slug').distinct('pais__nombre').order_by('pais__nombre') hoy = datetime.date.today() prox_eventos = Evento.objects.filter(inicio__gte = hoy,aprobado = True).order_by('inicio')[:3] ids = list_object.values_list('id',flat=True) tags = Actualidad.tags.most_common(min_count=2,extra_filters={'id__in': ids})[:6] return render(request, template, locals()) def detalle_actualidad(request,slug, template = 'detail_actualidad.html'): if request.GET.get('buscador'): q = request.GET['buscador'] object = Actualidad.objects.get(slug = slug) list_object = Actualidad.objects.filter( Q(tittle__icontains = q) | Q(tematica__nombre__icontains = q) | Q(tags__name__icontains = q), category = object.category).order_by('created_on') return render(request,'list_actualidad.html',locals()) else: object = Actualidad.objects.get(slug = slug) list_object = Actualidad.objects.filter(category = object.category,aprobado = True).order_by('created_on') list_paises = Actualidad.objects.filter(category = object.category).values_list('pais__nombre','pais__slug').distinct('pais__nombre').order_by('pais__nombre') hoy = datetime.date.today() prox_eventos = Evento.objects.filter(inicio__gte = hoy).order_by('inicio')[:3] ids = list_object.values_list('id',flat=True) tags = Actualidad.tags.most_common(min_count=2,extra_filters={'id__in': ids})[:6] return render(request, template, locals())
45.63964
161
0.710422
655
5,066
5.20458
0.120611
0.041068
0.094456
0.079202
0.85773
0.849516
0.840716
0.828395
0.814022
0.814022
0
0.003242
0.147651
5,066
110
162
46.054545
0.786244
0.160679
0
0.684932
0
0
0.118303
0.005333
0
0
0
0
0
1
0.054795
false
0
0.09589
0
0.219178
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e99c72125e21d2371c1a0468d1afab84a684416c
23,161
py
Python
test_vt_2.py
drobotun/virustotalapi
812c5839f944448548270f94f753b9fe0025b6fb
[ "MIT" ]
null
null
null
test_vt_2.py
drobotun/virustotalapi
812c5839f944448548270f94f753b9fe0025b6fb
[ "MIT" ]
null
null
null
test_vt_2.py
drobotun/virustotalapi
812c5839f944448548270f94f753b9fe0025b6fb
[ "MIT" ]
null
null
null
"""Модуль описывает тесты для проверки методов, реализемых в классе VirusTotalAPI. Константы: API_KEY - должна содержать строку с вашим ключем доступа к API. TEST_TIMEOUT - необходима для проверки методов при превышении времени ожидания ответа от сервера. TEST_FILE_PATH - должен содержать путь к файлу, используемому для тестирования. TEST_HASH - тестовое значение MD5-хэша. TEST_HASH_LIST - тестовый список из 4-х MD5-хэшей. TEST_URL - тестовое значение URL-адреса. TEST_URL_LIST - тестовый список из 4-х URL-адресов. TEST_IP - тестовое значение IP-адреса. TEST_DOMAIN - тестовое значение имени домена. TEST_PROXI - тестовое значение протокола и URL-адрес прокси-сервера (необходимо для моделирования ошибки соединения с сервером). TIME_DELAY - необходима для обеспечения возможности многократной отправки запросов на сервер без превышения лимита. """ import unittest import time from virustotalapi import VirusTotalAPI API_KEY = '<ключ доступа к API virustotal>' TEST_TIMEOUT = 0.05 TEST_FILE_PATH = 'eicar.com' TEST_HASH = '99017f6eebbac24f351415dd410d522d' TEST_HASH_LIST = ('eb5911054939bd90a7448e804e9da52a,' '3cbf18fee357d5a33aab56795238b097,' 'df6c18ddf76bc0240d4ad73068dd4353,' 'b18844bf115530b317a0aed8426efaa4') TEST_URL = 'www.github.com/drobotun' TEST_URL_LIST = ('www.github.com/drobotun\n' 'www.xakep.ru/author/drobotun\n' 'www.habr.com/ru/users/drobotun\n' 'www.virustotal.com/gui/user/drobotun/comments') TEST_IP = '216.239.38.21' TEST_DOMAIN = 'www.virustotal.com' TEST_PROXI = {'http': '10.10.1.10:3128', 'https': '10.10.1.10:1080', 'ftp': '10.10.1.10:3128'} TIME_DELAY = 30 class TestFileReport(unittest.TestCase): """Класс для проверки метода 'file_report'. Проверяется: - работа метода при корректных входных параметрах; - работа метода при корректных входных параметрах в виде строки из 4 хэшей; - работа метода при ошибке соединения с сервером; - работа метода при превышении времени ожидания ответа от сервера; - работа метода с использованием неверного ключа доступа к API; - работа метода при некорректных входных параметрах; - работа метода при превышении лимита запросов (более 4-х запросов в минуту). """ def test_file_report_correct_param(self): """Проверка метода 'file_report' с корректными входными параметрами. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.file_report(TEST_HASH) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_SUCCESS) def test_file_report_correct_param_list(self): """Проверка метода 'file_report' с корректными входными параметрами в виде строки из 4 хэшей; """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.file_report(TEST_HASH_LIST) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_SUCCESS) def test_file_report_connection_error(self): """Проверка метода 'file_report' при ошибке соединения с сервером. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.file_report(TEST_HASH, None, TEST_PROXI) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_CONNECT) def test_file_report_timeout(self): """Проверка метода 'file_report' при превышении времени ожидании ответа от сервера. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.file_report(TEST_HASH, TEST_TIMEOUT) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_TIMEOUT) def test_file_report_invalid_api_key(self): """Проверка метода 'file_report' с использованием неверного ключа доступа к API. """ vt_invalid_api_key = VirusTotalAPI() time.sleep(TIME_DELAY) response = vt_invalid_api_key.file_report(TEST_HASH) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_invalid_api_key.ERROR_HTTP) def test_file_report_incorrect_param(self): """Проверка метода 'file_report' с некорректными входными параметрами. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.file_report('This is an incorrect hash value') time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_SUCCESS) def test_file_report_limit(self): """Проверка метода 'file_report' при превышении лмита запросов (более 4-х запросов в минуту). """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.file_report(TEST_HASH) response = vt_api.file_report(TEST_HASH) response = vt_api.file_report(TEST_HASH) response = vt_api.file_report(TEST_HASH) response = vt_api.file_report(TEST_HASH) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_HTTP) class TestFileScan(unittest.TestCase): """Класс для проверки метода 'file_scan'. Проверяется: - работа метода при корректных входных параметрах (валидный путь к сканируемому файлу); - работа метода при ошибке соединения с сервером; - работа метода при превышении времени ожидания ответа от сервера; - работа метода с использованием неверного ключа доступа к API; - работа метода при отправке файла размером более 32 MB; - работа метода при некорректных входных параметрах (невалидный путь к сканируемому файлу). """ def test_file_scan_correct_param(self): """Проверка метода 'file_scan' при корректных входных параметрах (валидный путь к сканируемому файлу). """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.file_scan(TEST_FILE_PATH) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_SUCCESS) def test_file_scan_connect_error(self): """Проверка метода 'file_scan' при ошибке соединения с сервером. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.file_scan(TEST_FILE_PATH, None, TEST_PROXI) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_CONNECT) def test_file_scan_timeout(self): """Проверка метода 'file_scan' при превышении времени ожидания ответа от сервера. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.file_scan(TEST_FILE_PATH, TEST_TIMEOUT) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_TIMEOUT) def test_file_scan_invalid_api_key(self): """Проверка метода 'file_scan' при неверном ключе доступа к API. """ vt_invalid_api_key = VirusTotalAPI() time.sleep(TIME_DELAY) response = vt_invalid_api_key.file_scan(TEST_FILE_PATH) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_invalid_api_key.ERROR_HTTP) def test_file_scan_file_size_error(self): """Проверка метода 'file_scan' при отправке файла размером более 32 MB. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.file_scan('d:/test_file.zip') ## файл более 32 MB time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_HTTP) def test_file_scan_file_name_error(self): """Проверка метода 'file_scan' при некорректных входных параметрах (невалидный путь к сканируемому файлу). """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.file_scan(' ') time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_FILE) class TestURLReport(unittest.TestCase): """Класс для проверки метода 'url_report'. Проверяется: - работа метода при корректных входных параметрах (валидный URL-адрес); - работа метода при корректных входных параметрах (валидный URL-адрес) с использованием параметра 'scan'; - работа метода при корректных входных параметрах (список из 4-х валидных URL-адресов); - работа метода при корректных входных параметрах (список из 4-х валидных URL-адресов) с использованием параметра 'scan'; - работа метода при ошибке соединения с сервером; - работа метода при превышении времени ожидания ответа от сервера; - работа метода с использованием неверного ключа доступа к API; - работа метода при некорректных входных параметрах; - работа метода при превышении лимита запросов (более 4-х запросов в минуту). """ def test_url_report_correct_param(self): """Проверка метода 'url_report' при корректных входных параметрах. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.url_report(TEST_URL) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_SUCCESS) def test_url_report_correct_param_scan(self): """Проверка метода 'url_report' при корректных входных параметрах с использованием параметра 'scan'. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.url_report(TEST_URL, '1') time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_SUCCESS) def test_url_report_correct_param_list(self): """Проверка метода 'url_report' при корректных входных параметрах в виде списка из 4-х URL-адресов. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.url_report(TEST_URL_LIST) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_SUCCESS) def test_url_report_correct_param_list_scan(self): """Проверка метода 'url_report' при корректных входных параметрах в виде списка из 4-х URL-адресов с использованием параметра 'scan'. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.url_report(TEST_URL_LIST, '1') time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_SUCCESS) def test_url_report_connect_error(self): """Проверка метода 'url_report' при ошибке соединения с сервером. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.url_report(TEST_URL, 0, None, TEST_PROXI) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_CONNECT) def test_url_report_timeout(self): """Проверка метода 'url_report' при превышении времени ожидания от сервера. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.url_report(TEST_URL, 0, TEST_TIMEOUT) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_TIMEOUT) def test_url_report_invalid_api_key(self): """Проверка метода 'url_report' при неверном ключе доступа к API. """ vt_api_invalid_api_key = VirusTotalAPI() time.sleep(TIME_DELAY) response = vt_api_invalid_api_key.url_report(TEST_URL) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api_invalid_api_key.ERROR_HTTP) def test_url_report_incorrect_param(self): """Проверка метода 'url_report' при некорректных входных параметрах. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.url_report('This is an invalid URL value') time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_SUCCESS) def test_url_report_limit(self): """Проверка метода 'url_report' при превышении лмита запросов (более 4-х запросов в минуту). """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.url_report(TEST_URL) response = vt_api.url_report(TEST_URL) response = vt_api.url_report(TEST_URL) response = vt_api.url_report(TEST_URL) response = vt_api.url_report(TEST_URL) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_HTTP) class TestURLScan(unittest.TestCase): """Класс для проверки метода 'url_scan'. Проверяется: - работа метода при корректных входных параметрах (валидный URL-адрес); - работа метода при корректных входных параметрах (список из 4-х валидных URL-адресов); - работа метода при ошибке соединения с сервером; - работа метода при превышении времени ожидания ответа от сервера; - работа метода с использованием неверного ключа доступа к API; - работа метода при некорректных входных параметрах; - работа метода при превышении лимита запросов (более 4-х запросов в минуту). """ def test_url_scan_correct_param(self): """Проверка метода 'url_scan' при корректных входных параметрах. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.url_scan(TEST_URL) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_SUCCESS) def test_url_scan_correct_param_list(self): """Проверка метода 'url_scan' при корректных входных параметрах в виде списка из 4-х URL-адресов. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.url_scan(TEST_URL_LIST) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_SUCCESS) def test_url_scan_connect_error(self): """Проверка метода 'url_scan' при ошибке соединения с сервером. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.url_scan(TEST_URL, None, TEST_PROXI) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_CONNECT) def test_url_scan_timeout(self): """Проверка метода 'url_scan' при превышении времени ожидания ответа от сервера. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.url_scan(TEST_URL, TEST_TIMEOUT) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_TIMEOUT) def test_url_scan_invalid_api_key(self): """Проверка метода 'url_scan' при неверном ключе доступа к API. """ vt_invalid_api_key = VirusTotalAPI() time.sleep(TIME_DELAY) response = vt_invalid_api_key.url_scan(TEST_URL) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_invalid_api_key.ERROR_HTTP) def test_url_scan_incorrect_param(self): """Проверка метода 'url_scan' при некорректных входных параметрах. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.url_scan('This is an invalid URL value') time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_SUCCESS) def test_url_scan_limit(self): """Проверка метода 'url_scan' при превышении лмита запросов (более 4-х запросов в минуту). """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.url_scan(TEST_URL) response = vt_api.url_scan(TEST_URL) response = vt_api.url_scan(TEST_URL) response = vt_api.url_scan(TEST_URL) response = vt_api.url_scan(TEST_URL) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_HTTP) class TestIPReport(unittest.TestCase): """Класс для проверки метода 'ip_report'. Проверяется: - работа метода при корректных входных параметрах (валидный IP-адрес); - работа метода при ошибке соединения с сервером; - работа метода при превышении времени ожидания ответа от сервера; - работа метода с использованием неверного ключа доступа к API; - работа метода при некорректных входных параметрах; - работа метода при превышении лимита запросов (более 4-х запросов в минуту). """ def test_ip_report_correct_param(self): """Проверка метода 'ip_report' при корректных входных параметрах. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.ip_report(TEST_IP) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_SUCCESS) def test_ip_report_connect_error(self): """Проверка метода 'ip_report' при ошибке соединения с сервером. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.ip_report(TEST_IP, None, TEST_PROXI) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_CONNECT) def test_ip_report_timeout(self): """Проверка метода 'ip_report' при превышении времени ожидания ответа от сервера. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.ip_report(TEST_IP, TEST_TIMEOUT) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_TIMEOUT) def test_ip_report_invalid_api_key(self): """Проверка метода 'ip_report' при неверном ключе доступа к API. """ vt_invalid_api_key = VirusTotalAPI() time.sleep(TIME_DELAY) response = vt_invalid_api_key.ip_report(TEST_IP) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_invalid_api_key.ERROR_HTTP) def test_ip_report_incorrect_param(self): """Проверка метода 'url_report' при некорректных входных параметрах. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.ip_report('This is an invalid IP value') time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_SUCCESS) def test_ip_report_limit(self): """Проверка метода 'ip_report' при превышении лмита запросов (более 4-х запросов в минуту). """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.ip_report(TEST_IP) response = vt_api.ip_report(TEST_IP) response = vt_api.ip_report(TEST_IP) response = vt_api.ip_report(TEST_IP) response = vt_api.ip_report(TEST_IP) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_HTTP) class TestDpmainReport(unittest.TestCase): """Класс для проверки метода 'domain_report'. Проверяется: - работа метода при корректных входных параметрах (валидное имя домена); - работа метода при ошибке соединения с сервером; - работа метода при превышении времени ожидания ответа от сервера; - работа метода с использованием неверного ключа доступа к API; - работа метода при некорректных входных параметрах; - работа метода при превышении лимита запросов (более 4-х запросов в минуту). """ def test_domain_report_correct_param(self): """Проверка метода 'domain_report' при корректных входных параметрах. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.domain_report(TEST_DOMAIN) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_SUCCESS) def test_domain_report_connection_error(self): """Проверка метода 'domain_report' при ошибке соединения с сервером. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.domain_report(TEST_DOMAIN, None, TEST_PROXI) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_CONNECT) def test_domain_report_timeout(self): """Проверка метода 'domain_report' при превышении времени ожидания ответа от сервера. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.domain_report(TEST_DOMAIN, TEST_TIMEOUT) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_TIMEOUT) def test_domain_report_invalid_api_key(self): """Проверка метода 'domain_report' при неверном ключе доступа к API. """ vt_invalid_api_key = VirusTotalAPI() time.sleep(TIME_DELAY) response = vt_invalid_api_key.domain_report(TEST_DOMAIN) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_invalid_api_key.ERROR_HTTP) def test_domain_report_incorrect_param(self): """Проверка метода 'domain_report' при некорректных входных параметрах. """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.domain_report('This is an invalid domain value') time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_SUCCESS) def test_domain_report_limit(self): """Проверка метода 'idomain_report' при превышении лмита запросов (более 4-х запросов в минуту). """ vt_api = VirusTotalAPI(API_KEY) time.sleep(TIME_DELAY) response = vt_api.domain_report(TEST_DOMAIN) response = vt_api.domain_report(TEST_DOMAIN) response = vt_api.domain_report(TEST_DOMAIN) response = vt_api.domain_report(TEST_DOMAIN) response = vt_api.domain_report(TEST_DOMAIN) time.sleep(TIME_DELAY) self.assertEqual(response['error_code'], vt_api.ERROR_HTTP) if __name__ == '__main__': unittest.main()
41.358929
84
0.65878
2,832
23,161
5.116879
0.068503
0.044165
0.073563
0.101856
0.902077
0.887585
0.84411
0.769098
0.756124
0.722034
0
0.010534
0.258149
23,161
559
85
41.432916
0.832848
0.319675
0
0.645485
0
0
0.073193
0.022642
0
0
0
0
0.137124
1
0.137124
false
0
0.010033
0
0.167224
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e9b56bba309413d939e5d36d10f5bceefc6e8616
7,791
py
Python
nginx/migrations/0004_auto_20200806_0837.py
rockychen-dpaw/it-assets
92ec23c6a413c5c45bb3d96981d6af68535d225c
[ "Apache-2.0" ]
4
2018-11-16T13:49:49.000Z
2021-08-19T05:16:50.000Z
nginx/migrations/0004_auto_20200806_0837.py
rockychen-dpaw/it-assets
92ec23c6a413c5c45bb3d96981d6af68535d225c
[ "Apache-2.0" ]
10
2018-07-06T09:34:56.000Z
2022-01-28T06:09:05.000Z
nginx/migrations/0004_auto_20200806_0837.py
rockychen-dpaw/it-assets
92ec23c6a413c5c45bb3d96981d6af68535d225c
[ "Apache-2.0" ]
9
2018-05-05T23:29:10.000Z
2020-06-26T02:29:17.000Z
# Generated by Django 2.2.14 on 2020-08-06 00:37 import django.contrib.postgres.fields from django.db import migrations, models import django.db.models.deletion import nginx.models class Migration(migrations.Migration): dependencies = [ ('nginx', '0003_auto_20200721_1129'), ] operations = [ migrations.CreateModel( name='RequestParameterFilter', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('filter_code', models.CharField(help_text="A lambda function with two parameters 'webserver' and 'request_path'", max_length=512, unique=True)), ('included_parameters', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=64), blank=True, help_text='The list of parameters', null=True, size=None)), ('excluded_parameters', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=64), blank=True, help_text='The list of parameters excluded from the request parameters', null=True, size=None)), ('case_insensitive', models.BooleanField(default=True)), ('order', models.PositiveSmallIntegerField(default=0, help_text='The order to find the filter rule, high order means hight priority')), ('changed', models.DateTimeField(auto_now=True, help_text='The last time when the filter was changed')), ('applied', models.DateTimeField(editable=False, help_text='The last time when the filter was applied to the existed data', null=True)), ], ), migrations.CreateModel( name='RequestPathNormalizer', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('filter_code', models.CharField(help_text="A lambda function with two parameters 'webserver' and 'request_path'", max_length=512, unique=True)), ('normalize_code', models.TextField(help_text="The source code of the module which contains a method 'def normalize(request_path)' to return a normalized request path", unique=True)), ('order', models.PositiveSmallIntegerField(default=0, help_text='The order to find the filter rule, high order means hight priority')), ('changed', models.DateTimeField(auto_now=True, help_text='The last time when the filter was changed')), ('applied', models.DateTimeField(editable=False, help_text='The last time when the filter was applied to the existed data', null=True)), ], ), migrations.AddField( model_name='webapplocation', name='score', field=models.PositiveIntegerField(default=0, editable=False), ), migrations.CreateModel( name='WebAppAccessLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('log_starttime', models.DateTimeField(editable=False)), ('log_endtime', models.DateTimeField(editable=False)), ('webserver', models.CharField(editable=False, max_length=256)), ('request_path', models.CharField(editable=False, max_length=512)), ('path_parameters', models.TextField(editable=False, null=True)), ('all_path_parameters', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=64), editable=False, null=True, size=None)), ('http_status', models.PositiveIntegerField(editable=False)), ('requests', models.PositiveIntegerField(editable=False)), ('max_response_time', models.FloatField(editable=False)), ('min_response_time', models.FloatField(editable=False)), ('avg_response_time', models.FloatField(editable=False)), ('total_response_time', models.FloatField(editable=False)), ('webapp', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='logs', to='nginx.WebApp')), ('webapplocation', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='logs', to='nginx.WebAppLocation')), ], options={ 'unique_together': {('log_starttime', 'webserver', 'request_path', 'http_status', 'path_parameters')}, 'index_together': {('webapp', 'webapplocation'), ('log_starttime', 'webapp', 'webapplocation')}, }, bases=(nginx.models.PathParametersMixin, models.Model), ), migrations.CreateModel( name='WebAppAccessDailyReport', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('log_day', models.DateField(editable=False)), ('webserver', models.CharField(editable=False, max_length=256)), ('requests', models.PositiveIntegerField(default=0, editable=False)), ('success_requests', models.PositiveIntegerField(default=0, editable=False)), ('error_requests', models.PositiveIntegerField(default=0, editable=False)), ('unauthorized_requests', models.PositiveIntegerField(default=0, editable=False)), ('timeout_requests', models.PositiveIntegerField(default=0, editable=False)), ('webapp', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='dailyreports', to='nginx.WebApp')), ], options={ 'unique_together': {('log_day', 'webserver')}, 'index_together': {('log_day', 'webapp'), ('webapp',)}, }, ), migrations.CreateModel( name='WebAppAccessDailyLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('log_day', models.DateField(editable=False)), ('webserver', models.CharField(editable=False, max_length=256)), ('request_path', models.CharField(editable=False, max_length=512)), ('path_parameters', models.TextField(editable=False, null=True)), ('all_path_parameters', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=64), editable=False, null=True, size=None)), ('http_status', models.PositiveIntegerField(editable=False)), ('requests', models.PositiveIntegerField(editable=False)), ('max_response_time', models.FloatField(editable=False)), ('min_response_time', models.FloatField(editable=False)), ('avg_response_time', models.FloatField(editable=False)), ('total_response_time', models.FloatField(editable=False)), ('webapp', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='dailylogs', to='nginx.WebApp')), ('webapplocation', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='dailylogs', to='nginx.WebAppLocation')), ], options={ 'unique_together': {('log_day', 'webserver', 'request_path', 'http_status', 'path_parameters')}, 'index_together': {('webapp', 'webapplocation'), ('log_day', 'webapp', 'webapplocation')}, }, bases=(nginx.models.PathParametersMixin, models.Model), ), ]
68.946903
234
0.640483
806
7,791
6.038462
0.184864
0.1015
0.020341
0.038833
0.818574
0.809945
0.792275
0.722827
0.695706
0.695706
0
0.01142
0.22449
7,791
112
235
69.5625
0.794108
0.005904
0
0.584906
1
0.009434
0.242154
0.017306
0
0
0
0
0
1
0
false
0
0.037736
0
0.066038
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e9b6c71a251608df636becca80962f0454867b53
42
py
Python
nifstd/complete/context_bug_import.py
tmsincomb/pyontutils
dad24e7178d8d8cd3bd60d53b9039952fa7a5a1e
[ "MIT" ]
11
2017-05-12T08:50:03.000Z
2022-01-22T20:23:25.000Z
nifstd/complete/context_bug_import.py
tmsincomb/pyontutils
dad24e7178d8d8cd3bd60d53b9039952fa7a5a1e
[ "MIT" ]
81
2016-02-25T07:39:15.000Z
2022-02-17T20:20:27.000Z
nifstd/complete/context_bug_import.py
tmsincomb/pyontutils
dad24e7178d8d8cd3bd60d53b9039952fa7a5a1e
[ "MIT" ]
257
2017-07-18T19:32:22.000Z
2022-02-03T17:26:18.000Z
#!/usr/bin/env python3 import context_bug
14
22
0.785714
7
42
4.571429
1
0
0
0
0
0
0
0
0
0
0
0.026316
0.095238
42
2
23
21
0.815789
0.5
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
75735a7a113c8ab46e2ac826b7a8b244cc434618
118
py
Python
EduSim/Envs/KSS/Agent.py
bigdata-ustc/EduSim
849eed229c24615e5f2c3045036311e83c22ea68
[ "MIT" ]
18
2019-11-11T03:45:35.000Z
2022-02-09T15:31:51.000Z
EduSim/Envs/KSS/Agent.py
ghzhao78506/EduSim
cb10e952eb212d8a9344143f889207b5cd48ba9d
[ "MIT" ]
3
2020-10-23T01:05:57.000Z
2021-03-16T12:12:24.000Z
EduSim/Envs/KSS/Agent.py
bigdata-ustc/EduSim
849eed229c24615e5f2c3045036311e83c22ea68
[ "MIT" ]
6
2020-06-09T21:32:00.000Z
2022-03-12T00:25:18.000Z
# coding: utf-8 # 2020/5/8 @ tongshiwei from EduSim.SimOS import RandomAgent class KSSAgent(RandomAgent): pass
13.111111
36
0.728814
16
118
5.375
0.875
0
0
0
0
0
0
0
0
0
0
0.072165
0.177966
118
8
37
14.75
0.814433
0.29661
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
75badc607adad4cfd793eb90dfcea78a1162ba3f
203
py
Python
libsquiggly/analysis/__init__.py
staticfloat/libsquiggly
79c63c119a60e2e9c558aefcda6b1c1ac413a47a
[ "MIT" ]
null
null
null
libsquiggly/analysis/__init__.py
staticfloat/libsquiggly
79c63c119a60e2e9c558aefcda6b1c1ac413a47a
[ "MIT" ]
null
null
null
libsquiggly/analysis/__init__.py
staticfloat/libsquiggly
79c63c119a60e2e9c558aefcda6b1c1ac413a47a
[ "MIT" ]
null
null
null
# Only export very specific items from .matched_filter import energy, matched_filter, subsample_matched_filter from .rolling_abs_mean import rolling_abs_mean from .peak_suppression import suppress_peaks
40.6
76
0.871921
29
203
5.758621
0.62069
0.233533
0.167665
0
0
0
0
0
0
0
0
0
0.098522
203
4
77
50.75
0.912568
0.152709
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
75c577a0b6b5ed412cf0b29aefbfbddd9f219408
125
py
Python
deep/computer_vision/CycleGan/__init__.py
Jwuthri/DeepAlgos
cd1062a0339f6bf68fcdef26ade1c612ac6983f9
[ "MIT" ]
null
null
null
deep/computer_vision/CycleGan/__init__.py
Jwuthri/DeepAlgos
cd1062a0339f6bf68fcdef26ade1c612ac6983f9
[ "MIT" ]
null
null
null
deep/computer_vision/CycleGan/__init__.py
Jwuthri/DeepAlgos
cd1062a0339f6bf68fcdef26ade1c612ac6983f9
[ "MIT" ]
null
null
null
""" Cycle Gan: Cycle Gan can generate photos from paintings, turn horses into zebras, perform style transfer, and more. """
125
125
0.744
18
125
5.166667
0.888889
0.172043
0
0
0
0
0
0
0
0
0
0
0.168
125
1
125
125
0.894231
0.928
0
null
1
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
1
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
f9bb12a5791628a11cf25a690d258e56174ad67c
244
py
Python
lemon/models.py
lemon-chat/lemon-server-python
5947b52b3c4535ae54fe2705a830db07fdaf741d
[ "MIT" ]
null
null
null
lemon/models.py
lemon-chat/lemon-server-python
5947b52b3c4535ae54fe2705a830db07fdaf741d
[ "MIT" ]
null
null
null
lemon/models.py
lemon-chat/lemon-server-python
5947b52b3c4535ae54fe2705a830db07fdaf741d
[ "MIT" ]
null
null
null
from .application import mongo class User(mongo.Document): userid = mongo.IntField(required=True) username = mongo.StringField(max_length=50) email = mongo.StringField(max_length=50) password = mongo.StringField(max_length=50)
30.5
47
0.754098
31
244
5.83871
0.580645
0.265193
0.314917
0.414365
0.447514
0
0
0
0
0
0
0.028708
0.143443
244
7
48
34.857143
0.837321
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0.166667
0.166667
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
6
ddb4ab4500bc9f30daf984e73a94338da9e21197
59
py
Python
home/web/__init__.py
keaneokelley/home
e58e86d4490a5b1b0be77537604aac4a579af839
[ "MIT" ]
null
null
null
home/web/__init__.py
keaneokelley/home
e58e86d4490a5b1b0be77537604aac4a579af839
[ "MIT" ]
null
null
null
home/web/__init__.py
keaneokelley/home
e58e86d4490a5b1b0be77537604aac4a579af839
[ "MIT" ]
null
null
null
from home.web.events import * from home.web.fido2 import *
19.666667
29
0.762712
10
59
4.5
0.6
0.355556
0.488889
0
0
0
0
0
0
0
0
0.019608
0.135593
59
2
30
29.5
0.862745
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
fb142fa42e5346d6ae61ef72e7421f1694796f13
47
py
Python
digsigserver/signers/__init__.py
ichergui/digsigserver
dbd865c2bafe69510f4da41e473e4a3adb037f3e
[ "MIT" ]
9
2020-01-17T01:03:13.000Z
2022-03-14T16:47:22.000Z
digsigserver/signers/__init__.py
ichergui/digsigserver
dbd865c2bafe69510f4da41e473e4a3adb037f3e
[ "MIT" ]
7
2020-04-19T18:41:47.000Z
2021-11-30T20:52:59.000Z
digsigserver/signers/__init__.py
ichergui/digsigserver
dbd865c2bafe69510f4da41e473e4a3adb037f3e
[ "MIT" ]
5
2020-10-28T20:40:07.000Z
2022-03-28T13:52:43.000Z
from digsigserver.signers.signer import Signer
23.5
46
0.87234
6
47
6.833333
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.085106
47
1
47
47
0.953488
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
34a2d7933d2e1baf938d76046b21cbdc53fabfc7
75
py
Python
cnsenti/__init__.py
edddyeddy/cnsenti
049ffbc41c5c22f112a66350216d13d3f6d9809e
[ "MIT" ]
140
2020-03-27T11:37:37.000Z
2021-05-14T01:01:29.000Z
cnsenti/__init__.py
edddyeddy/cnsenti
049ffbc41c5c22f112a66350216d13d3f6d9809e
[ "MIT" ]
4
2020-04-12T02:47:53.000Z
2021-05-07T11:15:25.000Z
cnsenti/__init__.py
edddyeddy/cnsenti
049ffbc41c5c22f112a66350216d13d3f6d9809e
[ "MIT" ]
32
2020-04-07T09:09:45.000Z
2021-04-28T15:53:24.000Z
from cnsenti.emotion import Emotion from cnsenti.sentiment import Sentiment
37.5
39
0.88
10
75
6.6
0.5
0.333333
0
0
0
0
0
0
0
0
0
0
0.093333
75
2
39
37.5
0.970588
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
34f4da0d27cea2d1bb00df108dabc2800525c397
68
py
Python
dice_tools/__init__.py
dicehub/dice_tools
7b3e70103180e64ddcf0f3de95cdbaa3e8019978
[ "MIT" ]
null
null
null
dice_tools/__init__.py
dicehub/dice_tools
7b3e70103180e64ddcf0f3de95cdbaa3e8019978
[ "MIT" ]
null
null
null
dice_tools/__init__.py
dicehub/dice_tools
7b3e70103180e64ddcf0f3de95cdbaa3e8019978
[ "MIT" ]
null
null
null
from ._types import * from ._client import * from ._wizard import *
17
22
0.735294
9
68
5.222222
0.555556
0.425532
0
0
0
0
0
0
0
0
0
0
0.176471
68
3
23
22.666667
0.839286
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
34f5ab11c5d015dd5b936125bd8d51dfe8e384d3
6,562
py
Python
scenarios/twolink_epp/plot_epp_comparison.py
jwallnoefer/multisat_qrepeater_sim_archive
69b4c242fb760cf195871f38b3172d4dfd26c01a
[ "MIT" ]
null
null
null
scenarios/twolink_epp/plot_epp_comparison.py
jwallnoefer/multisat_qrepeater_sim_archive
69b4c242fb760cf195871f38b3172d4dfd26c01a
[ "MIT" ]
null
null
null
scenarios/twolink_epp/plot_epp_comparison.py
jwallnoefer/multisat_qrepeater_sim_archive
69b4c242fb760cf195871f38b3172d4dfd26c01a
[ "MIT" ]
null
null
null
import os, sys; sys.path.insert(0, os.path.abspath(".")) import numpy as np import matplotlib.pyplot as plt # x_base = np.arange(1000, 401000, 1000) / 1000 # L_ATT = 22 * 10**3 / 1000 # attenuation length in km # eta = np.exp(-x_base / L_ATT) # y_repeaterless = 10 * np.log10(-np.log2(1 - eta)) # y_optimal = 10 * np.log10(np.sqrt(eta)) # y_realistic_repeaterless1 = 10 * np.log10(0.7 * eta / 2) # y_realistic_repeaterless2 = 10 * np.log10(0.1 * eta / 2) # # result_path = os.path.join("results", "two_link_epp", "investigate") # # path_without = os.path.join(result_path, "without_epp") # length_list_without = np.loadtxt(os.path.join(path_without, "length_list.txt")) / 1000 # skr_without = 10 * np.log10(np.loadtxt(os.path.join(path_without, "key_per_resource_list.txt"), dtype=np.complex).astype(np.float) / 2) # # path_with = os.path.join(result_path, "with_epp") # length_list_with = np.loadtxt(os.path.join(path_with, "length_list.txt")) / 1000 # skr_with = 10 * np.log10(np.loadtxt(os.path.join(path_with, "key_per_resource_list.txt"), dtype=np.complex).astype(np.float) / 2) # # # plt.plot(x_base, y_repeaterless, color="black") # plt.plot(x_base, y_optimal, color="gray") # plt.fill_between(x_base, y_repeaterless, y_optimal, facecolor="lightgray") # plt.plot(x_base, y_realistic_repeaterless1, color="black", linestyle="dashed") # plt.plot(x_base, y_realistic_repeaterless2, color="black", linestyle="dashed") # # plt.scatter(length_list_without, skr_without, label="without epp") # plt.scatter(length_list_with, skr_with, label="with 1 epp-step") # plt.xlim((0, 400)) # plt.ylim((-60, 0)) # plt.grid() # plt.legend() # plt.xlabel("L [km]") # plt.ylabel("secret key rate per channel use [dB]") # plt.title("EPP comparison with bad memories and no cutoff_time") # plt.savefig(os.path.join(result_path, "epp_comparison.png")) # plt.show() # # ex_without = np.loadtxt(os.path.join(path_without, "ex_list.txt"), dtype=np.complex) # ex_with = np.loadtxt(os.path.join(path_with, "ex_list.txt"), dtype=np.complex) # ez_without = np.loadtxt(os.path.join(path_without, "ez_list.txt"), dtype=np.complex) # ez_with = np.loadtxt(os.path.join(path_with, "ez_list.txt"), dtype=np.complex) # fidelity_without = np.loadtxt(os.path.join(path_without, "average_fidelities.txt"), dtype=np.complex) # fidelity_with = np.loadtxt(os.path.join(path_with, "average_fidelities.txt"), dtype=np.complex) # resources_without = np.loadtxt(os.path.join(path_without, "average_resources.txt"), dtype=np.complex) # resources_with = np.loadtxt(os.path.join(path_with, "average_resources.txt"), dtype=np.complex) # # plt.plot(length_list_without, ex_without, label="ex_without") # plt.plot(length_list_with, ex_with, label="ex_with") # plt.plot(length_list_without, ez_without, label="ez_without") # plt.plot(length_list_with, ez_with, label="ez_with") # plt.grid() # plt.legend() # plt.show() # # plt.plot(length_list_without, fidelity_without, label="fidelity_without") # plt.plot(length_list_with, fidelity_with, label="fidelity_with") # plt.grid() # plt.legend() # plt.show() # # plt.plot(length_list_without, resources_without, label="resources_without") # plt.plot(length_list_with, resources_with, label="resources_with") # plt.grid() # plt.legend() # plt.yscale("log") # plt.show() # # from libs.aux_functions import binary_entropy # h = np.vectorize(binary_entropy, otypes=[np.float]) # should_without = 1 / resources_without * (1 - h(ex_without) - h(ez_without)) # should_with = 1 / resources_with * (1 - h(ex_with) - h(ez_with)) # # # plt.scatter(length_list_without, np.loadtxt(os.path.join(path_without, "key_per_resource_list.txt"), dtype=np.complex), label="without epp") # plt.scatter(length_list_with, np.loadtxt(os.path.join(path_with, "key_per_resource_list.txt"), dtype=np.complex), label="with 1 epp-step") # plt.plot(length_list_without, should_without) # plt.plot(length_list_with, should_with) # # plt.xlim((0, 400)) # # plt.ylim((-60, 0)) # plt.grid() # plt.legend() # plt.xlabel("L [km]") # # plt.ylabel("secret key rate per channel use [dB]") # plt.title("EPP comparison with bad memories and no cutoff_time") # plt.show() ################################## # plot dephasing time comparison result_path = os.path.join("results", "two_link_epp", "investigate_t_dp") path_without = os.path.join(result_path, "without_epp") t_dp_list_without = np.loadtxt(os.path.join(path_without, "t_dp_list.txt")) skr_without = 10 * np.log10(np.loadtxt(os.path.join(path_without, "key_per_resource_list.txt"), dtype=np.complex).astype(np.float) / 2) path_with = os.path.join(result_path, "with_epp") t_dp_list_with = np.loadtxt(os.path.join(path_with, "t_dp_list.txt")) skr_with = 10 * np.log10(np.loadtxt(os.path.join(path_with, "key_per_resource_list.txt"), dtype=np.complex).astype(np.float) / 2) plt.scatter(t_dp_list_without, skr_without, label="without epp") plt.scatter(t_dp_list_with, skr_with, label="with 1 epp-step") # plt.xlim((0, 400)) # plt.ylim((-60, 0)) plt.grid() plt.legend() plt.xlabel("dephasing time [s]") plt.ylabel("secret key rate per channel use [dB]") plt.title("EPP comparison with variable memories and no cutoff_time") plt.savefig(os.path.join(result_path, "epp_comparison.png")) plt.show() ex_without = np.loadtxt(os.path.join(path_without, "ex_list.txt"), dtype=np.complex) ex_with = np.loadtxt(os.path.join(path_with, "ex_list.txt"), dtype=np.complex) ez_without = np.loadtxt(os.path.join(path_without, "ez_list.txt"), dtype=np.complex) ez_with = np.loadtxt(os.path.join(path_with, "ez_list.txt"), dtype=np.complex) fidelity_without = np.loadtxt(os.path.join(path_without, "average_fidelities.txt"), dtype=np.complex) fidelity_with = np.loadtxt(os.path.join(path_with, "average_fidelities.txt"), dtype=np.complex) resources_without = np.loadtxt(os.path.join(path_without, "average_resources.txt"), dtype=np.complex) resources_with = np.loadtxt(os.path.join(path_with, "average_resources.txt"), dtype=np.complex) plt.plot(t_dp_list_without, ex_without, label="ex_without") plt.plot(t_dp_list_with, ex_with, label="ex_with") plt.plot(t_dp_list_without, ez_without, label="ez_without") plt.plot(t_dp_list_with, ez_with, label="ez_with") plt.grid() plt.legend() plt.show() plt.plot(t_dp_list_without, fidelity_without, label="fidelity_without") plt.plot(t_dp_list_with, fidelity_with, label="fidelity_with") plt.grid() plt.legend() plt.show() plt.plot(t_dp_list_without, resources_without, label="resources_without") plt.plot(t_dp_list_with, resources_with, label="resources_with") plt.grid() plt.legend() plt.yscale("log") plt.show()
44.945205
142
0.739866
1,078
6,562
4.268089
0.112245
0.045642
0.073897
0.084764
0.855901
0.812649
0.78722
0.784612
0.778961
0.635514
0
0.018676
0.086102
6,562
145
143
45.255172
0.748541
0.596464
0
0.266667
0
0
0.203094
0.053947
0
0
0
0
0
1
0
false
0
0.066667
0
0.066667
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
550af900aa3811b46dff075edaebba0da3fbe8c7
113
py
Python
labelshift/calibrate/__init__.py
labelshift/labelshift
d5d6a06ef435a7fca96be7bbef415e52fb5235b4
[ "BSD-3-Clause" ]
null
null
null
labelshift/calibrate/__init__.py
labelshift/labelshift
d5d6a06ef435a7fca96be7bbef415e52fb5235b4
[ "BSD-3-Clause" ]
null
null
null
labelshift/calibrate/__init__.py
labelshift/labelshift
d5d6a06ef435a7fca96be7bbef415e52fb5235b4
[ "BSD-3-Clause" ]
null
null
null
"""Classifier calibration techniques. https://arxiv.org/pdf/1902.06977.pdf https://arxiv.org/abs/1706.04599 """
18.833333
37
0.743363
16
113
5.25
0.75
0.238095
0.309524
0
0
0
0
0
0
0
0
0.169811
0.061947
113
5
38
22.6
0.622642
0.929204
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
550bc387f9291dd0f1bd04bf878d4f3f2ea7e54e
284
py
Python
rlbox/testbed/__init__.py
ocraft/rl-sandbox
fba6571545cf040829998ba4cd9009a15ac1bbdd
[ "MIT" ]
2
2019-03-23T17:52:39.000Z
2019-03-29T17:29:52.000Z
rlbox/testbed/__init__.py
ocraft/rl-sandbox
fba6571545cf040829998ba4cd9009a15ac1bbdd
[ "MIT" ]
null
null
null
rlbox/testbed/__init__.py
ocraft/rl-sandbox
fba6571545cf040829998ba4cd9009a15ac1bbdd
[ "MIT" ]
2
2020-05-19T21:32:52.000Z
2020-09-30T09:28:45.000Z
import rlbox.testbed.config import rlbox.testbed.narmedbandit import rlbox.testbed.car_rental import rlbox.testbed.gambler import rlbox.testbed.racetrack import rlbox.testbed.windy_gridworld import rlbox.testbed.nstep_sarsa import rlbox.testbed.maze import rlbox.testbed.mountain_car
28.4
36
0.873239
40
284
6.1
0.375
0.405738
0.663934
0
0
0
0
0
0
0
0
0
0.06338
284
9
37
31.555556
0.917293
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
9b8a85ece09bc3ac3e9e36a04ff1d7a46effbd0e
25
py
Python
anchor/__init__.py
ysenarath/anchor
63dc0cd7aabda4a15c4c9f4b63089a36c5b0b97b
[ "Apache-2.0" ]
1
2021-11-14T16:01:48.000Z
2021-11-14T16:01:48.000Z
anchor/__init__.py
ysenarath/anchor
63dc0cd7aabda4a15c4c9f4b63089a36c5b0b97b
[ "Apache-2.0" ]
null
null
null
anchor/__init__.py
ysenarath/anchor
63dc0cd7aabda4a15c4c9f4b63089a36c5b0b97b
[ "Apache-2.0" ]
null
null
null
from .base import Anchor
12.5
24
0.8
4
25
5
1
0
0
0
0
0
0
0
0
0
0
0
0.16
25
1
25
25
0.952381
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
32e9c5898e6d7d2237677e865f29c8f08d8000bc
75
py
Python
testing-legacy/testutils/__init__.py
gigabackup/gigantum-client
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
[ "MIT" ]
60
2018-09-26T15:46:00.000Z
2021-10-10T02:37:14.000Z
testing-legacy/testutils/__init__.py
gigabackup/gigantum-client
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
[ "MIT" ]
1,706
2018-09-26T16:11:22.000Z
2021-08-20T13:37:59.000Z
testing-legacy/testutils/__init__.py
griffinmilsap/gigantum-client
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
[ "MIT" ]
11
2019-03-14T13:23:51.000Z
2022-01-25T01:29:16.000Z
from .testutils import * from .elements import * from .actions import *
10.714286
24
0.72
9
75
6
0.555556
0.37037
0
0
0
0
0
0
0
0
0
0
0.2
75
6
25
12.5
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
fd05a0b3853d1f25a50e6b67ef511d5da65dc071
37
py
Python
faq_module/__init__.py
alentoghostflame/StupidAlentoBot
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
[ "MIT" ]
1
2021-12-12T02:50:20.000Z
2021-12-12T02:50:20.000Z
faq_module/__init__.py
alentoghostflame/StupidAlentoBot
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
[ "MIT" ]
17
2020-02-07T23:40:36.000Z
2020-12-22T16:38:44.000Z
faq_module/__init__.py
alentoghostflame/StupidAlentoBot
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
[ "MIT" ]
null
null
null
from faq_module.faq import FAQModule
18.5
36
0.864865
6
37
5.166667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.108108
37
1
37
37
0.939394
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
fd1e526370f04bdc8efd5bc7d229a074c8e00142
285
py
Python
measurement_stats/value/__init__.py
sernst/RefinedStatistics
3e310d265f17125b239f78cc35e42a3c3545d89b
[ "MIT" ]
null
null
null
measurement_stats/value/__init__.py
sernst/RefinedStatistics
3e310d265f17125b239f78cc35e42a3c3545d89b
[ "MIT" ]
null
null
null
measurement_stats/value/__init__.py
sernst/RefinedStatistics
3e310d265f17125b239f78cc35e42a3c3545d89b
[ "MIT" ]
1
2017-05-21T16:45:22.000Z
2017-05-21T16:45:22.000Z
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from measurement_stats.value.value_type import ValueUncertainty from measurement_stats.value.value_ops import * # protected by __all__
35.625
70
0.877193
37
285
6.027027
0.486486
0.179372
0.286996
0.224215
0.269058
0
0
0
0
0
0
0
0.101754
285
7
71
40.714286
0.871094
0.070175
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0.166667
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
fd2ff7469ff56f5cfb9187e97e99360a96996a4e
47
py
Python
dante/vendor/pyparsing/__init__.py
sbg/dante
104543c3ccb5e762d3e9cd6e8fa04c5fa91e2227
[ "Apache-2.0" ]
9
2017-11-03T15:53:01.000Z
2019-10-01T14:09:56.000Z
dante/vendor/pyparsing/__init__.py
sbg/dante
104543c3ccb5e762d3e9cd6e8fa04c5fa91e2227
[ "Apache-2.0" ]
4
2019-10-01T12:53:58.000Z
2021-04-26T15:39:16.000Z
dante/vendor/pyparsing/__init__.py
sbg/dante
104543c3ccb5e762d3e9cd6e8fa04c5fa91e2227
[ "Apache-2.0" ]
5
2017-11-03T15:50:40.000Z
2021-09-13T08:50:45.000Z
from dante.vendor.pyparsing.pyparsing import *
23.5
46
0.829787
6
47
6.5
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.085106
47
1
47
47
0.906977
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6