hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
87f14b098265544dbf022e3f54455436deb0ad4b
| 24
|
py
|
Python
|
structures/tree/__init__.py
|
spencerpomme/pyalgolib
|
d055287caa4a779ea833c7efc305cd4f966bd841
|
[
"MIT"
] | null | null | null |
structures/tree/__init__.py
|
spencerpomme/pyalgolib
|
d055287caa4a779ea833c7efc305cd4f966bd841
|
[
"MIT"
] | null | null | null |
structures/tree/__init__.py
|
spencerpomme/pyalgolib
|
d055287caa4a779ea833c7efc305cd4f966bd841
|
[
"MIT"
] | null | null | null |
# data structure module
| 12
| 23
| 0.791667
| 3
| 24
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.95
| 0.875
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e20433bc889dc0f32de713dc2c45f59d8175f0f2
| 481
|
py
|
Python
|
cachetclient/v1/__init__.py
|
amdemas/cachet-client
|
6a34ada87f99f8a3af593eefadc37a83f59827dd
|
[
"MIT"
] | null | null | null |
cachetclient/v1/__init__.py
|
amdemas/cachet-client
|
6a34ada87f99f8a3af593eefadc37a83f59827dd
|
[
"MIT"
] | null | null | null |
cachetclient/v1/__init__.py
|
amdemas/cachet-client
|
6a34ada87f99f8a3af593eefadc37a83f59827dd
|
[
"MIT"
] | null | null | null |
from cachetclient.v1.client import Client # noqa
from cachetclient.v1.subscribers import Subscriber # noqa
from cachetclient.v1.components import Component # noqa
from cachetclient.v1.component_groups import ComponentGroup # noqa
from cachetclient.v1.incident_updates import IndicentUpdate # noqa
from cachetclient.v1.metrics import Metric # noqa
from cachetclient.v1.metric_points import MetricPoint # noqa
from cachetclient.v1 import enums # noqa
__version__ = '1.1.0'
| 40.083333
| 67
| 0.814969
| 62
| 481
| 6.209677
| 0.370968
| 0.332468
| 0.374026
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02619
| 0.126819
| 481
| 11
| 68
| 43.727273
| 0.890476
| 0.081081
| 0
| 0
| 0
| 0
| 0.011547
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.888889
| 0
| 0.888889
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e20447ecbe22286bf4163b908e5eba34ac71394f
| 141
|
py
|
Python
|
documentation/admin.py
|
establishment/django-establishment
|
ad1d04fe9efc748e2fba5b4bc67446d2a4cf12f6
|
[
"CC0-1.0"
] | 1
|
2017-04-27T19:35:42.000Z
|
2017-04-27T19:35:42.000Z
|
documentation/admin.py
|
establishment/django-establishment
|
ad1d04fe9efc748e2fba5b4bc67446d2a4cf12f6
|
[
"CC0-1.0"
] | null | null | null |
documentation/admin.py
|
establishment/django-establishment
|
ad1d04fe9efc748e2fba5b4bc67446d2a4cf12f6
|
[
"CC0-1.0"
] | null | null | null |
from django.contrib import admin
from establishment.documentation.models import DocumentationEntry
admin.site.register(DocumentationEntry)
| 23.5
| 65
| 0.87234
| 15
| 141
| 8.2
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078014
| 141
| 5
| 66
| 28.2
| 0.946154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e204c971bbb2c9c7ca0b1436805590f663181057
| 96
|
py
|
Python
|
libs/parsers/__init__.py
|
pullself/Compilers
|
590226d02e5291857cb3875bd1ed6315c37fc74e
|
[
"MIT"
] | null | null | null |
libs/parsers/__init__.py
|
pullself/Compilers
|
590226d02e5291857cb3875bd1ed6315c37fc74e
|
[
"MIT"
] | null | null | null |
libs/parsers/__init__.py
|
pullself/Compilers
|
590226d02e5291857cb3875bd1ed6315c37fc74e
|
[
"MIT"
] | null | null | null |
import libs.parsers.parser
import libs.parsers.constructor
__all__ = ['parser', 'constructor']
| 19.2
| 35
| 0.78125
| 11
| 96
| 6.454545
| 0.545455
| 0.28169
| 0.478873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 96
| 4
| 36
| 24
| 0.816092
| 0
| 0
| 0
| 0
| 0
| 0.177083
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
354d0a0e8ce535bc378cbced25f44f2527b5fa3a
| 373
|
py
|
Python
|
bindings/python/capstone/__init__.py
|
zouguangxian/capstone
|
a1818520dfb37596cc5a3f19f3e04412c4c66dca
|
[
"BSD-3-Clause"
] | 1
|
2021-07-06T23:36:41.000Z
|
2021-07-06T23:36:41.000Z
|
bindings/python/capstone/__init__.py
|
zouguangxian/capstone
|
a1818520dfb37596cc5a3f19f3e04412c4c66dca
|
[
"BSD-3-Clause"
] | null | null | null |
bindings/python/capstone/__init__.py
|
zouguangxian/capstone
|
a1818520dfb37596cc5a3f19f3e04412c4c66dca
|
[
"BSD-3-Clause"
] | null | null | null |
from capstone import Cs, CsError, cs_disasm_quick, cs_version, CS_API_MAJOR, CS_API_MINOR, CS_ARCH_ARM, CS_ARCH_ARM64, CS_ARCH_MIPS, CS_ARCH_X86, CS_MODE_LITTLE_ENDIAN, CS_MODE_ARM, CS_MODE_THUMB, CS_OPT_SYNTAX, CS_OPT_SYNTAX_INTEL, CS_OPT_SYNTAX_ATT, CS_OPT_DETAIL, CS_OPT_ON, CS_OPT_OFF, CS_MODE_16, CS_MODE_32, CS_MODE_64, CS_MODE_BIG_ENDIAN, CS_MODE_MICRO, CS_MODE_N64
| 186.5
| 372
| 0.86059
| 77
| 373
| 3.532468
| 0.428571
| 0.198529
| 0.121324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034783
| 0.075067
| 373
| 1
| 373
| 373
| 0.753623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
359377901334427aec4295ca0684ad7dffa3d7ff
| 182
|
py
|
Python
|
server/app/api/weather/resources.py
|
WagnerJM/home_pod
|
f6a51e4956d5956a85084f637e267406f21df6df
|
[
"MIT"
] | null | null | null |
server/app/api/weather/resources.py
|
WagnerJM/home_pod
|
f6a51e4956d5956a85084f637e267406f21df6df
|
[
"MIT"
] | null | null | null |
server/app/api/weather/resources.py
|
WagnerJM/home_pod
|
f6a51e4956d5956a85084f637e267406f21df6df
|
[
"MIT"
] | null | null | null |
from flask import request
from flask_restful import Resource
from flask_jwt_extended import get_jwt_claims, get_jwt_identity, jwt_required
from app.cache import redis_client
| 26
| 78
| 0.840659
| 28
| 182
| 5.142857
| 0.571429
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 182
| 6
| 79
| 30.333333
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ea20af329182c07294d2c1bbed18aee79997d85a
| 32
|
py
|
Python
|
uresnet/iotools/__init__.py
|
NuTufts/uresnet_pytorch
|
3a05f2349ae1e9601d05a80384920d8a22b4bc34
|
[
"MIT"
] | null | null | null |
uresnet/iotools/__init__.py
|
NuTufts/uresnet_pytorch
|
3a05f2349ae1e9601d05a80384920d8a22b4bc34
|
[
"MIT"
] | null | null | null |
uresnet/iotools/__init__.py
|
NuTufts/uresnet_pytorch
|
3a05f2349ae1e9601d05a80384920d8a22b4bc34
|
[
"MIT"
] | null | null | null |
from .iotools import io_factory
| 16
| 31
| 0.84375
| 5
| 32
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ea436f37e311aec310471106ec69e43fc23e41c4
| 1,152
|
py
|
Python
|
cannlytics/lims/qc.py
|
mindthegrow/cannlytics
|
c266bc1169bef75214985901cd3165f415ad9ba7
|
[
"MIT"
] | 7
|
2021-05-31T15:30:22.000Z
|
2022-02-05T14:12:31.000Z
|
cannlytics/lims/qc.py
|
mindthegrow/cannlytics
|
c266bc1169bef75214985901cd3165f415ad9ba7
|
[
"MIT"
] | 17
|
2021-06-09T01:04:27.000Z
|
2022-03-18T14:48:12.000Z
|
cannlytics/lims/qc.py
|
mindthegrow/cannlytics
|
c266bc1169bef75214985901cd3165f415ad9ba7
|
[
"MIT"
] | 5
|
2021-06-07T13:52:33.000Z
|
2021-08-04T00:09:39.000Z
|
"""
Quality Control Tools | Cannlytics
Author: Keegan Skeate <keegan@cannlytics.com>
Created: 2/6/2021
Updated: 6/23/2021
License: MIT License <https://opensource.org/licenses/MIT>
Perform various quality control checks and analyses to ensure
that your laboratory is operating as desired.
TODO:
- Trend analyte results.
- Create predictions of lab results given available inputs!
- Statistics for internal standards.
"""
def backup_data():
"""Backup data stored in Firestore."""
return NotImplementedError
def calculate_relative_percent_diff():
"""Calculate relative perecent difference between two samples."""
return NotImplementedError
def plot_area_response():
"""Plot area response over time for a group of samples."""
return NotImplementedError
def plot_deviations():
"""Plot deviations in results for a group of samples."""
return NotImplementedError
def track_deviations():
"""Track deviations in results for a group of samples."""
return NotImplementedError
def metrc_reconciliation():
"""Reconcile Metrc data with Firestore data."""
return NotImplementedError
| 24.510638
| 69
| 0.737847
| 137
| 1,152
| 6.138686
| 0.576642
| 0.178359
| 0.166468
| 0.166468
| 0.260404
| 0.209275
| 0.209275
| 0.209275
| 0.154578
| 0.154578
| 0
| 0.013757
| 0.179688
| 1,152
| 46
| 70
| 25.043478
| 0.87619
| 0.630208
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021739
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
ea43b6473a80169e05c17d102859f981a7b958d9
| 76
|
py
|
Python
|
containershare/validator/__init__.py
|
vsoch/containershare-python
|
9db2a4d7c7fcb0c21edd5d2e2b5396d7108fe392
|
[
"BSD-3-Clause"
] | null | null | null |
containershare/validator/__init__.py
|
vsoch/containershare-python
|
9db2a4d7c7fcb0c21edd5d2e2b5396d7108fe392
|
[
"BSD-3-Clause"
] | 1
|
2018-07-30T22:11:56.000Z
|
2018-07-30T22:11:56.000Z
|
containershare/validator/__init__.py
|
vsoch/containershare-python
|
9db2a4d7c7fcb0c21edd5d2e2b5396d7108fe392
|
[
"BSD-3-Clause"
] | null | null | null |
from .library import LibraryValidator
from .runtime import RuntimeValidator
| 25.333333
| 37
| 0.868421
| 8
| 76
| 8.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 76
| 2
| 38
| 38
| 0.970588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ea5034df827000a7c021ce8d10922be02ea67910
| 70
|
py
|
Python
|
chatrender/celery.py
|
The-Politico/django-politico-slackchat-renderer
|
adb3ed2ba5039a97ee7b021d39aa40cab11e5661
|
[
"MIT"
] | 2
|
2018-07-02T16:49:35.000Z
|
2018-07-09T03:52:28.000Z
|
chatrender/celery.py
|
The-Politico/django-politico-slackchat-renderer
|
adb3ed2ba5039a97ee7b021d39aa40cab11e5661
|
[
"MIT"
] | 42
|
2018-02-14T21:28:54.000Z
|
2022-02-10T18:30:58.000Z
|
chatrender/celery.py
|
The-Politico/django-politico-slackchat-renderer
|
adb3ed2ba5039a97ee7b021d39aa40cab11e5661
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from chatrender.tasks.publish import publish_slackchat
| 23.333333
| 54
| 0.842857
| 9
| 70
| 6.444444
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015873
| 0.1
| 70
| 2
| 55
| 35
| 0.904762
| 0.171429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ea61b0a3ba0b11abd7ed94000c53187e3c4b4ffc
| 109
|
py
|
Python
|
viper_dev.py
|
safinsingh/viper
|
f7fa9182713c4f0fbb33c2e881f668b807fd3956
|
[
"MIT"
] | null | null | null |
viper_dev.py
|
safinsingh/viper
|
f7fa9182713c4f0fbb33c2e881f668b807fd3956
|
[
"MIT"
] | null | null | null |
viper_dev.py
|
safinsingh/viper
|
f7fa9182713c4f0fbb33c2e881f668b807fd3956
|
[
"MIT"
] | null | null | null |
from viper import *
import inspect
def GetSource(func):
lines = inspect.getsource(func)
print(lines)
| 18.166667
| 35
| 0.724771
| 14
| 109
| 5.642857
| 0.642857
| 0.329114
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183486
| 109
| 6
| 36
| 18.166667
| 0.88764
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.6
| 0.2
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
575b35db9c979401cf63c36c72e33a04e3269d4a
| 69
|
py
|
Python
|
win/devkit/other/pymel/extras/completion/py/maya/app/sceneAssembly/__init__.py
|
leegoonz/Maya-devkit
|
b81fe799b58e854e4ef16435426d60446e975871
|
[
"ADSL"
] | 21
|
2015-04-27T05:01:36.000Z
|
2021-11-22T13:45:14.000Z
|
python/maya/site-packages/pymel-1.0.5/extras/completion/py/maya/app/sceneAssembly/__init__.py
|
0xb1dd1e/PipelineConstructionSet
|
621349da1b6d1437e95d0c9e48ee9f36d59f19fd
|
[
"BSD-3-Clause"
] | null | null | null |
python/maya/site-packages/pymel-1.0.5/extras/completion/py/maya/app/sceneAssembly/__init__.py
|
0xb1dd1e/PipelineConstructionSet
|
621349da1b6d1437e95d0c9e48ee9f36d59f19fd
|
[
"BSD-3-Clause"
] | 9
|
2018-06-02T09:18:49.000Z
|
2021-12-20T09:24:35.000Z
|
from . import adskPrepareRender
import maya.cmds as cmd
import maya
| 13.8
| 31
| 0.811594
| 10
| 69
| 5.6
| 0.7
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15942
| 69
| 4
| 32
| 17.25
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
576cb2eea467a88f13b66f007bd906188c23f5fc
| 4,239
|
py
|
Python
|
lib/systems/chlorophyll_c2.py
|
pulsar-chem/BPModule
|
f8e64e04fdb01947708f098e833600c459c2ff0e
|
[
"BSD-3-Clause"
] | null | null | null |
lib/systems/chlorophyll_c2.py
|
pulsar-chem/BPModule
|
f8e64e04fdb01947708f098e833600c459c2ff0e
|
[
"BSD-3-Clause"
] | null | null | null |
lib/systems/chlorophyll_c2.py
|
pulsar-chem/BPModule
|
f8e64e04fdb01947708f098e833600c459c2ff0e
|
[
"BSD-3-Clause"
] | null | null | null |
import pulsar as psr
def load_ref_system():
""" Returns chlorophyll_c2 as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C -2.51105 2.48309 -0.00367
C -1.01315 4.06218 0.09798
C 0.16582 4.71018 0.03445
C 1.49771 4.10555 -0.16166
C 3.11156 2.64885 -0.24011
C 3.76129 1.46796 -0.14139
C 3.09159 0.16424 -0.04831
C 1.53984 -1.29933 -0.10261
C -0.91181 -1.59853 -0.35248
C -2.45607 -0.05240 -0.35298
C -3.10620 1.27291 -0.16460
N 1.75017 2.78429 -0.23961
N 1.76493 0.00621 -0.16383
N -1.13225 -0.25365 -0.35470
C -3.20202 3.66639 0.28881
C 2.68521 4.82968 -0.22081
C 3.68747 -1.09005 0.15938
C -2.24542 4.67945 0.36245
C -2.41991 6.13752 0.71407
C -3.56473 6.73948 1.06129
C 3.72098 3.91621 -0.29248
N -1.17823 2.72953 -0.07212
Mg 0.26793 1.27490 -0.35377
C 0.34760 -2.11926 -0.19964
C 2.64511 -1.99016 0.09433
C 2.31747 -3.38932 0.17127
O 3.13845 -4.27117 0.35919
C 0.80633 -3.58955 -0.01558
O -0.47000 -3.54277 1.99390
C 0.19951 -4.22772 1.23456
O 0.33390 -5.55184 1.51538
C -2.14828 -2.27838 -0.46597
C -2.34627 -3.76961 -0.59495
C -3.52338 -4.42976 -0.61516
C -3.58867 -5.90449 -0.76226
O -2.56916 -6.57072 -0.87406
O -4.79233 -6.51526 -0.77059
C -3.12715 -1.27243 -0.46107
C 0.83624 -6.50254 0.65250
C 5.12089 -1.42417 0.42504
C -4.62172 -1.44902 -0.55794
C -4.68427 3.81267 0.53984
C 2.86086 6.32939 -0.22433
C 6.20198 3.61878 -0.78465
C 5.15458 4.35926 -0.39637
H 0.16297 5.77206 0.17065
H 4.82596 1.44735 -0.07176
H -4.17158 1.24062 -0.09244
H -1.55035 6.77752 0.72933
H -4.51438 6.24702 1.11665
H -3.54680 7.79385 1.31901
H 0.60711 -4.17663 -0.93384
H -1.47776 -4.37959 -0.70310
H -4.46318 -3.92775 -0.52160
H -4.86588 -7.47619 -0.86197
H 0.78374 -7.49457 1.14518
H 1.89335 -6.30524 0.40486
H 0.22776 -6.53794 -0.27557
H 5.75783 -0.51879 0.44869
H 5.50014 -2.12026 -0.34842
H 5.18953 -1.91331 1.42119
H -5.18859 -0.50460 -0.59248
H -4.99451 -2.00986 0.32226
H -4.86783 -1.97442 -1.50531
H -5.27115 2.88458 0.45319
H -5.12053 4.53158 -0.18161
H -4.83661 4.15056 1.58664
H 1.91301 6.89359 -0.19061
H 3.38213 6.63177 -1.15809
H 3.47321 6.63697 0.64765
H 7.18160 4.08465 -0.84048
H 6.13795 2.59183 -1.09149
H 5.36680 5.40361 -0.19182
""")
| 52.333333
| 70
| 0.362114
| 541
| 4,239
| 2.829945
| 0.475046
| 0.01437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.694298
| 0.553197
| 4,239
| 80
| 71
| 52.9875
| 0.114044
| 0.026185
| 0
| 0
| 0
| 0
| 0.979552
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012987
| true
| 0
| 0.012987
| 0
| 0.038961
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
17c5134f523338eb38c2be750ed00943cad1dc8d
| 34
|
py
|
Python
|
matfactor/__init__.py
|
Joshua-Chin/matfactor
|
6730ca7ddb7844d9d50f7e5725f5ccdaae31721b
|
[
"Apache-2.0"
] | 1
|
2018-02-13T02:55:16.000Z
|
2018-02-13T02:55:16.000Z
|
matfactor/__init__.py
|
Joshua-Chin/matfactor
|
6730ca7ddb7844d9d50f7e5725f5ccdaae31721b
|
[
"Apache-2.0"
] | null | null | null |
matfactor/__init__.py
|
Joshua-Chin/matfactor
|
6730ca7ddb7844d9d50f7e5725f5ccdaae31721b
|
[
"Apache-2.0"
] | null | null | null |
from ._factorize import factorize
| 17
| 33
| 0.852941
| 4
| 34
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
17f3d9a2300741cd7506a6c4460578d98121f0a5
| 154
|
py
|
Python
|
gather/handlers/__init__.py
|
openghg/gather
|
0096cfe66b0093cdd294fa2a67c060d7fc28d2fa
|
[
"Apache-2.0"
] | null | null | null |
gather/handlers/__init__.py
|
openghg/gather
|
0096cfe66b0093cdd294fa2a67c060d7fc28d2fa
|
[
"Apache-2.0"
] | null | null | null |
gather/handlers/__init__.py
|
openghg/gather
|
0096cfe66b0093cdd294fa2a67c060d7fc28d2fa
|
[
"Apache-2.0"
] | null | null | null |
from ._scrape import scrape_handler
from ._binary_data import data_handler
from ._crds import crds_handler
__all__ = ["scrape_handler", "data_handler"]
| 22
| 44
| 0.811688
| 21
| 154
| 5.333333
| 0.380952
| 0.232143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116883
| 154
| 6
| 45
| 25.666667
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0.168831
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
aa25080d95db6003a47cb7e9144e0411c4a11460
| 88
|
py
|
Python
|
onnxmltools/convert/common/utils.py
|
xjarvik/onnxmltools
|
e4fbdc09814ceedc7655d85b6c4203ca21d8433a
|
[
"Apache-2.0"
] | 1
|
2022-01-28T04:59:37.000Z
|
2022-01-28T04:59:37.000Z
|
onnxmltools/convert/common/utils.py
|
xjarvik/onnxmltools
|
e4fbdc09814ceedc7655d85b6c4203ca21d8433a
|
[
"Apache-2.0"
] | null | null | null |
onnxmltools/convert/common/utils.py
|
xjarvik/onnxmltools
|
e4fbdc09814ceedc7655d85b6c4203ca21d8433a
|
[
"Apache-2.0"
] | 1
|
2021-07-05T23:51:56.000Z
|
2021-07-05T23:51:56.000Z
|
# SPDX-License-Identifier: Apache-2.0
from onnxconverter_common.utils import * # noqa
| 22
| 48
| 0.772727
| 12
| 88
| 5.583333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025974
| 0.125
| 88
| 3
| 49
| 29.333333
| 0.844156
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
aa35c651bcb63b7652b7a85574cf82938974798d
| 206
|
py
|
Python
|
src/page/page_parser.py
|
baallezx/collect
|
7156f239d133660e03bba334d716025b96d6b230
|
[
"MIT"
] | 1
|
2016-02-08T10:53:48.000Z
|
2016-02-08T10:53:48.000Z
|
src/page/page_parser.py
|
baallezx/collect
|
7156f239d133660e03bba334d716025b96d6b230
|
[
"MIT"
] | null | null | null |
src/page/page_parser.py
|
baallezx/collect
|
7156f239d133660e03bba334d716025b96d6b230
|
[
"MIT"
] | null | null | null |
# TODO: implement a page_parser that uses nlp and stats to get a good read of a file.
class page_parser(object):
"""
a multi purpose parser that can read these file types
"""
def __init__(self):
pass
| 25.75
| 85
| 0.728155
| 36
| 206
| 4
| 0.75
| 0.138889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.199029
| 206
| 7
| 86
| 29.428571
| 0.872727
| 0.669903
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
aa35ead0147cdaa5e16792a1159c28c73e8158c5
| 110
|
py
|
Python
|
crawl_and_scrap/__main__.py
|
byung-u/GranXiSearch
|
80a4a2cd19e39424013b7838aafbbbffd2a3574b
|
[
"MIT"
] | 1
|
2017-06-21T10:44:27.000Z
|
2017-06-21T10:44:27.000Z
|
crawl_and_scrap/__main__.py
|
byung-u/GranXiSearch
|
80a4a2cd19e39424013b7838aafbbbffd2a3574b
|
[
"MIT"
] | 5
|
2017-02-05T15:20:32.000Z
|
2017-03-11T14:09:49.000Z
|
crawl_and_scrap/__main__.py
|
byung-u/FindTheTreasure
|
80a4a2cd19e39424013b7838aafbbbffd2a3574b
|
[
"MIT"
] | null | null | null |
"""crwal_and_scrap trying to gathering news with web scrawl"""
from crwal_and_scrap.main import main
main()
| 18.333333
| 62
| 0.781818
| 18
| 110
| 4.555556
| 0.722222
| 0.195122
| 0.317073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 110
| 5
| 63
| 22
| 0.863158
| 0.509091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a4c5246b1bb457ccf2adc163a51a115f0a845803
| 56
|
py
|
Python
|
utils/test_fm.py
|
dilum1995/DAugmentor
|
6cc86dccf826415a88b8226265e16ae96b5cc05b
|
[
"MIT"
] | 1
|
2020-08-02T13:06:03.000Z
|
2020-08-02T13:06:03.000Z
|
utils/test_fm.py
|
dilum1995/DAugmentor
|
6cc86dccf826415a88b8226265e16ae96b5cc05b
|
[
"MIT"
] | null | null | null |
utils/test_fm.py
|
dilum1995/DAugmentor
|
6cc86dccf826415a88b8226265e16ae96b5cc05b
|
[
"MIT"
] | null | null | null |
from utils import constants as const
print(const.PATH)
| 14
| 36
| 0.803571
| 9
| 56
| 5
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 56
| 3
| 37
| 18.666667
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
a4e00d2cb49985d38fa9a42e89c6babd42a14602
| 190
|
py
|
Python
|
ldaptor/apps/webui/i18n.py
|
tv42/ldaptor
|
3f227602c8c021b9e943136a2dc8d7db44a11e50
|
[
"MIT"
] | 1
|
2015-11-25T04:01:26.000Z
|
2015-11-25T04:01:26.000Z
|
ldaptor/apps/webui/i18n.py
|
tv42/ldaptor
|
3f227602c8c021b9e943136a2dc8d7db44a11e50
|
[
"MIT"
] | null | null | null |
ldaptor/apps/webui/i18n.py
|
tv42/ldaptor
|
3f227602c8c021b9e943136a2dc8d7db44a11e50
|
[
"MIT"
] | 2
|
2019-11-06T02:14:10.000Z
|
2022-01-10T08:34:11.000Z
|
from nevow.inevow import ILanguages
from nevow.i18n import I18NConfig
from nevow import i18n
_ = i18n.Translator(domain='ldaptor-webui')
def render():
return i18n.render(translator=_)
| 21.111111
| 43
| 0.778947
| 25
| 190
| 5.84
| 0.56
| 0.184932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 0.131579
| 190
| 8
| 44
| 23.75
| 0.824242
| 0
| 0
| 0
| 0
| 0
| 0.068421
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.5
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
a4e76062bd271f3f1cc37a95b088bfcb694c00bc
| 217
|
py
|
Python
|
hs_modflow_modelinstance/admin.py
|
ResearchSoftwareInstitute/MyHPOM
|
2d48fe5ac8d21173b1685eb33059bb391fe24414
|
[
"BSD-3-Clause"
] | 1
|
2018-09-17T13:07:29.000Z
|
2018-09-17T13:07:29.000Z
|
hs_modflow_modelinstance/admin.py
|
ResearchSoftwareInstitute/MyHPOM
|
2d48fe5ac8d21173b1685eb33059bb391fe24414
|
[
"BSD-3-Clause"
] | 100
|
2017-08-01T23:48:04.000Z
|
2018-04-03T13:17:27.000Z
|
hs_modflow_modelinstance/admin.py
|
ResearchSoftwareInstitute/MyHPOM
|
2d48fe5ac8d21173b1685eb33059bb391fe24414
|
[
"BSD-3-Clause"
] | 2
|
2017-07-27T20:41:33.000Z
|
2017-07-27T22:40:57.000Z
|
from mezzanine.pages.admin import PageAdmin
from django.contrib import admin
from hs_modflow_modelinstance.models import MODFLOWModelInstanceResource
admin.site.register(MODFLOWModelInstanceResource, PageAdmin)
| 36.166667
| 73
| 0.866359
| 23
| 217
| 8.086957
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092166
| 217
| 5
| 74
| 43.4
| 0.944162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
104fe95906a89882b90ee817c831630744acea53
| 355
|
py
|
Python
|
tests/test_deploy.py
|
NCAR/marbl-solutions
|
0840e2a594d49218b1510cd8cb95d9d058495a8a
|
[
"MIT"
] | null | null | null |
tests/test_deploy.py
|
NCAR/marbl-solutions
|
0840e2a594d49218b1510cd8cb95d9d058495a8a
|
[
"MIT"
] | 1
|
2022-02-11T22:53:37.000Z
|
2022-02-11T22:53:37.000Z
|
tests/test_deploy.py
|
NCAR/marbl-solutions
|
0840e2a594d49218b1510cd8cb95d9d058495a8a
|
[
"MIT"
] | null | null | null |
import solutions
def test_deploy_config():
deploy_config = solutions.config.deploy_config
assert deploy_config['reference_case'] == 'ref_case'
assert type(deploy_config['reference_case_path']) == list
assert deploy_config['reference_case_file_format'] == 'history'
assert deploy_config['case_to_compare_file_format'] == 'timeseries'
| 35.5
| 71
| 0.766197
| 44
| 355
| 5.75
| 0.431818
| 0.332016
| 0.213439
| 0.296443
| 0.245059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126761
| 355
| 9
| 72
| 39.444444
| 0.816129
| 0
| 0
| 0
| 0
| 0
| 0.312676
| 0.149296
| 0
| 0
| 0
| 0
| 0.571429
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
10521ee81224fcf01be655be4e17446c05559c19
| 148
|
py
|
Python
|
backend/home/models.py
|
crowdbotics-apps/test-29106
|
34df3fa66e798f61d9189fa248f21cabb9bca0e1
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/models.py
|
crowdbotics-apps/test-29106
|
34df3fa66e798f61d9189fa248f21cabb9bca0e1
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/models.py
|
crowdbotics-apps/test-29106
|
34df3fa66e798f61d9189fa248f21cabb9bca0e1
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.conf import settings
from django.db import models
class Tasks(models.Model):
"Generated Model"
task_name = models.TextField()
| 18.5
| 34
| 0.75
| 20
| 148
| 5.5
| 0.7
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168919
| 148
| 7
| 35
| 21.142857
| 0.894309
| 0.101351
| 0
| 0
| 1
| 0
| 0.101351
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
106e4b0ccf7d2a2518a1959d9ed235098c74fcea
| 97
|
py
|
Python
|
getKey.py
|
cychiang/spotify-lyrics
|
78219ea2e9c8eacda7a8cb1cecbb7ecdd39d208e
|
[
"Apache-2.0"
] | null | null | null |
getKey.py
|
cychiang/spotify-lyrics
|
78219ea2e9c8eacda7a8cb1cecbb7ecdd39d208e
|
[
"Apache-2.0"
] | null | null | null |
getKey.py
|
cychiang/spotify-lyrics
|
78219ea2e9c8eacda7a8cb1cecbb7ecdd39d208e
|
[
"Apache-2.0"
] | null | null | null |
def musixmatch():
with open('musixmatch.txt', 'r') as key:
return key.readline()
| 24.25
| 44
| 0.587629
| 12
| 97
| 4.75
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.257732
| 97
| 4
| 45
| 24.25
| 0.791667
| 0
| 0
| 0
| 0
| 0
| 0.153061
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
1071d5d3c31d440ee16ee62b40826c1d771809f7
| 216
|
py
|
Python
|
regularize_charsets.py
|
sys-bio/temp-biomodels
|
596eebb590d72e74419773f4e9b829a62d7fff9a
|
[
"CC0-1.0"
] | null | null | null |
regularize_charsets.py
|
sys-bio/temp-biomodels
|
596eebb590d72e74419773f4e9b829a62d7fff9a
|
[
"CC0-1.0"
] | 5
|
2022-03-30T21:33:45.000Z
|
2022-03-31T20:08:15.000Z
|
regularize_charsets.py
|
sys-bio/temp-biomodels
|
596eebb590d72e74419773f4e9b829a62d7fff9a
|
[
"CC0-1.0"
] | null | null | null |
from charset_normalizer import from_path, normalize
results = from_path('original\BIOMD0000000424\BIOMD0000000424_url.xml')
best = str(results.best())
normalize('original\BIOMD0000000424\BIOMD0000000424_url.xml')
| 27
| 71
| 0.828704
| 25
| 216
| 6.96
| 0.52
| 0.091954
| 0.436782
| 0.471264
| 0.505747
| 0
| 0
| 0
| 0
| 0
| 0
| 0.199005
| 0.069444
| 216
| 7
| 72
| 30.857143
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
10b7df52a7109b3cd059e5aa3d9c1aee9eb2218c
| 2,171
|
py
|
Python
|
tests/models/boundary/test_is_boundary_concave_to_y.py
|
EderVs/Voronoi-Diagrams
|
6e69f9b6eb516dee12d66f187cf267a7b527da5f
|
[
"MIT"
] | 3
|
2021-11-12T17:43:08.000Z
|
2022-01-03T02:47:34.000Z
|
tests/models/boundary/test_is_boundary_concave_to_y.py
|
EderVs/Voronoi-Diagrams
|
6e69f9b6eb516dee12d66f187cf267a7b527da5f
|
[
"MIT"
] | 3
|
2021-11-19T20:12:31.000Z
|
2021-11-19T20:14:39.000Z
|
tests/models/boundary/test_is_boundary_concave_to_y.py
|
EderVs/Voronoi-Diagrams
|
6e69f9b6eb516dee12d66f187cf267a7b527da5f
|
[
"MIT"
] | null | null | null |
"""Test is_boundary_not_x_monotone method in WeightedPointBoundary."""
# Standard
from typing import List, Any
from random import randint
# Models
from voronoi_diagrams.models import (
WeightedSite,
WeightedPointBisector,
WeightedPointBoundary,
)
# Math
from decimal import Decimal
class TestWeightedPointBoundaryIsBoundaryConcaveToY:
"""Test formula."""
def test_with_concave_to_y_boundary(self):
"""Test with a boundary that is concave to y."""
p = WeightedSite(Decimal(-20), Decimal(10), Decimal(2))
# q is the one in the top.
q = WeightedSite(Decimal(-5), Decimal(10), Decimal(7))
bisector = WeightedPointBisector(sites=(p, q))
boundary_plus = WeightedPointBoundary(bisector=bisector, sign=True)
boundary_minus = WeightedPointBoundary(bisector=bisector, sign=False)
assert not boundary_plus.is_boundary_not_x_monotone()
assert boundary_minus.is_boundary_not_x_monotone()
def test_with_normal_boundary(self):
"""Test with a boundary that is not concave to y."""
p = WeightedSite(Decimal(-20), Decimal(10), Decimal(2))
# q is the one in the top.
q = WeightedSite(Decimal(-8), Decimal(18), Decimal(7))
bisector = WeightedPointBisector(sites=(p, q))
boundary_plus = WeightedPointBoundary(bisector=bisector, sign=True)
boundary_minus = WeightedPointBoundary(bisector=bisector, sign=False)
assert not boundary_plus.is_boundary_not_x_monotone()
assert not boundary_minus.is_boundary_not_x_monotone()
def test_with_stopped_boundary(self):
"""Test with a boundary that is not concave to y."""
p = WeightedSite(Decimal(-20), Decimal(10), Decimal(2))
# q is the one in the top.
q = WeightedSite(Decimal(-5), Decimal(15), Decimal(7))
bisector = WeightedPointBisector(sites=(p, q))
boundary_plus = WeightedPointBoundary(bisector=bisector, sign=True)
boundary_minus = WeightedPointBoundary(bisector=bisector, sign=False)
assert not boundary_plus.is_boundary_not_x_monotone()
assert not boundary_minus.is_boundary_not_x_monotone()
| 41.75
| 77
| 0.706587
| 265
| 2,171
| 5.592453
| 0.218868
| 0.047233
| 0.061404
| 0.066127
| 0.787449
| 0.772605
| 0.772605
| 0.772605
| 0.748988
| 0.748988
| 0
| 0.015499
| 0.197605
| 2,171
| 51
| 78
| 42.568627
| 0.835247
| 0.143713
| 0
| 0.515152
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.090909
| false
| 0
| 0.121212
| 0
| 0.242424
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
10bd99ec8ccedb03c569281fb82814ef2e18a1af
| 44
|
py
|
Python
|
europython-2018/code/simple_bind/run.py
|
svenstaro/talks
|
0462268a8c684dde65aceb2fb98644cb655c5013
|
[
"MIT"
] | 5
|
2018-07-26T10:45:41.000Z
|
2020-08-16T17:45:51.000Z
|
europython-2018/code/simple_bind/run.py
|
svenstaro/talks
|
0462268a8c684dde65aceb2fb98644cb655c5013
|
[
"MIT"
] | null | null | null |
europython-2018/code/simple_bind/run.py
|
svenstaro/talks
|
0462268a8c684dde65aceb2fb98644cb655c5013
|
[
"MIT"
] | 1
|
2020-10-02T22:09:15.000Z
|
2020-10-02T22:09:15.000Z
|
from europython import hello
hello("Alisa")
| 14.666667
| 28
| 0.795455
| 6
| 44
| 5.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 44
| 2
| 29
| 22
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
52c38d1cc6e6ce5d847e7873d6a974fa56d65e99
| 163
|
py
|
Python
|
tests/__init__.py
|
RonenTRA/faster-than-requests
|
237a57cf2607e0694c87fea8e313461bf9a462e7
|
[
"MIT"
] | 857
|
2018-11-18T17:55:01.000Z
|
2022-03-31T23:39:10.000Z
|
tests/__init__.py
|
RonenTRA/faster-than-requests
|
237a57cf2607e0694c87fea8e313461bf9a462e7
|
[
"MIT"
] | 181
|
2018-12-08T18:31:05.000Z
|
2022-03-29T01:40:02.000Z
|
tests/__init__.py
|
RonenTRA/faster-than-requests
|
237a57cf2607e0694c87fea8e313461bf9a462e7
|
[
"MIT"
] | 92
|
2018-11-22T03:53:31.000Z
|
2022-03-21T10:54:24.000Z
|
# Allow tests/ directory to see faster_than_requests/ package on PYTHONPATH
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
| 32.6
| 75
| 0.815951
| 25
| 163
| 5.08
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104294
| 163
| 4
| 76
| 40.75
| 0.869863
| 0.447853
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
52d3d72868c077690bde8ce4b9a24f77c6b48f81
| 134
|
py
|
Python
|
app/blueprints/printing/__init__.py
|
OrigamiCranes/PrintingPortal
|
e25f9f683dca3a0dcf4c90ae50515d7693447cb8
|
[
"MIT",
"Unlicense"
] | null | null | null |
app/blueprints/printing/__init__.py
|
OrigamiCranes/PrintingPortal
|
e25f9f683dca3a0dcf4c90ae50515d7693447cb8
|
[
"MIT",
"Unlicense"
] | null | null | null |
app/blueprints/printing/__init__.py
|
OrigamiCranes/PrintingPortal
|
e25f9f683dca3a0dcf4c90ae50515d7693447cb8
|
[
"MIT",
"Unlicense"
] | null | null | null |
from flask import Blueprint, url_for
bp = Blueprint('printing', __name__,template_folder='templates')
from . import routes, forms
| 16.75
| 64
| 0.768657
| 17
| 134
| 5.705882
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134328
| 134
| 7
| 65
| 19.142857
| 0.836207
| 0
| 0
| 0
| 0
| 0
| 0.12782
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 5
|
5e05f2cccefdec04a8fd2cca1ee7503f900daacf
| 292
|
py
|
Python
|
app/main/views.py
|
chushijituan/job_analysis
|
a99d8f12b9dafa93de448a27d2f76ee6ddbde469
|
[
"MIT"
] | 45
|
2016-07-07T08:53:04.000Z
|
2022-01-10T11:00:40.000Z
|
app/main/views.py
|
chushijituan/job_analysis
|
a99d8f12b9dafa93de448a27d2f76ee6ddbde469
|
[
"MIT"
] | 1
|
2016-07-09T03:40:13.000Z
|
2017-02-02T06:58:27.000Z
|
app/main/views.py
|
chushijituan/job_analysis
|
a99d8f12b9dafa93de448a27d2f76ee6ddbde469
|
[
"MIT"
] | 20
|
2016-07-08T02:18:49.000Z
|
2019-06-09T14:21:26.000Z
|
# coding: utf-8
from . import main
from flask import render_template, jsonify, flash, request, current_app, url_for, Response, g, abort
@main.route('/')
def index():
return render_template('index.html')
@main.route('/about')
def about_page():
return render_template('about.html')
| 20.857143
| 100
| 0.715753
| 41
| 292
| 4.95122
| 0.634146
| 0.206897
| 0.197044
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003984
| 0.140411
| 292
| 13
| 101
| 22.461538
| 0.804781
| 0.044521
| 0
| 0
| 0
| 0
| 0.097473
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
eacf62541cfea44c5aa6f4ef694688addf50cbbc
| 232
|
py
|
Python
|
catsndogs/training.py
|
simonpf/catsndogs
|
36732a7c2c767b2bb6efa87a849598170c8026e8
|
[
"MIT"
] | 1
|
2020-12-18T17:19:37.000Z
|
2020-12-18T17:19:37.000Z
|
catsndogs/training.py
|
simonpf/catsndogs
|
36732a7c2c767b2bb6efa87a849598170c8026e8
|
[
"MIT"
] | null | null | null |
catsndogs/training.py
|
simonpf/catsndogs
|
36732a7c2c767b2bb6efa87a849598170c8026e8
|
[
"MIT"
] | null | null | null |
import os
import glob
from catsndogs.data import get_training_data
folder = get_training_data()
cats = glob.glob(os.path.join(get_training_data(), "cat", "*.jpg"))
dogs = glob.glob(os.path.join(get_training_data(), "dog", "*.jpg"))
| 33.142857
| 67
| 0.737069
| 37
| 232
| 4.405405
| 0.432432
| 0.269939
| 0.368098
| 0.171779
| 0.404908
| 0.404908
| 0.404908
| 0.404908
| 0
| 0
| 0
| 0
| 0.090517
| 232
| 6
| 68
| 38.666667
| 0.772512
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d820b4f0770506dfe9510b1820590790869fb745
| 247
|
py
|
Python
|
apollo/embeds/__init__.py
|
rpetti/apollo
|
1304d8623e6dfe8c9b269b7e90611b3688c0c61e
|
[
"MIT"
] | null | null | null |
apollo/embeds/__init__.py
|
rpetti/apollo
|
1304d8623e6dfe8c9b269b7e90611b3688c0c61e
|
[
"MIT"
] | null | null | null |
apollo/embeds/__init__.py
|
rpetti/apollo
|
1304d8623e6dfe8c9b269b7e90611b3688c0c61e
|
[
"MIT"
] | null | null | null |
from .about_embed import AboutEmbed
from .event_embed import EventEmbed
from .help_embed import HelpEmbed
from .select_channel_embed import SelectChannelEmbed
from .start_time_embed import StartTimeEmbed
from .time_zone_embed import TimeZoneEmbed
| 35.285714
| 52
| 0.878543
| 33
| 247
| 6.30303
| 0.515152
| 0.317308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097166
| 247
| 6
| 53
| 41.166667
| 0.932735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
dc3a778a081bc0e908fbf22ada6b3c5f69d5f4aa
| 16,560
|
py
|
Python
|
sdk/python/pulumi_kong/_inputs.py
|
pulumi/pulumi-kong
|
775c17e4eac38934252410ed3dcdc6fc3bd40c5c
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2020-02-23T10:05:20.000Z
|
2020-05-15T14:22:10.000Z
|
sdk/python/pulumi_kong/_inputs.py
|
pulumi/pulumi-kong
|
775c17e4eac38934252410ed3dcdc6fc3bd40c5c
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2020-04-21T22:04:23.000Z
|
2022-03-31T15:29:53.000Z
|
sdk/python/pulumi_kong/_inputs.py
|
pulumi/pulumi-kong
|
775c17e4eac38934252410ed3dcdc6fc3bd40c5c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'RouteDestinationArgs',
'RouteHeaderArgs',
'RouteSourceArgs',
'UpstreamHealthchecksArgs',
'UpstreamHealthchecksActiveArgs',
'UpstreamHealthchecksActiveHealthyArgs',
'UpstreamHealthchecksActiveUnhealthyArgs',
'UpstreamHealthchecksPassiveArgs',
'UpstreamHealthchecksPassiveHealthyArgs',
'UpstreamHealthchecksPassiveUnhealthyArgs',
]
@pulumi.input_type
class RouteDestinationArgs:
def __init__(__self__, *,
ip: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None):
if ip is not None:
pulumi.set(__self__, "ip", ip)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ip")
@ip.setter
def ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@pulumi.input_type
class RouteHeaderArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
values: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
:param pulumi.Input[str] name: The name of the route
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the route
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def values(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "values")
@values.setter
def values(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class RouteSourceArgs:
def __init__(__self__, *,
ip: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None):
if ip is not None:
pulumi.set(__self__, "ip", ip)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ip")
@ip.setter
def ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@pulumi.input_type
class UpstreamHealthchecksArgs:
def __init__(__self__, *,
active: Optional[pulumi.Input['UpstreamHealthchecksActiveArgs']] = None,
passive: Optional[pulumi.Input['UpstreamHealthchecksPassiveArgs']] = None):
if active is not None:
pulumi.set(__self__, "active", active)
if passive is not None:
pulumi.set(__self__, "passive", passive)
@property
@pulumi.getter
def active(self) -> Optional[pulumi.Input['UpstreamHealthchecksActiveArgs']]:
return pulumi.get(self, "active")
@active.setter
def active(self, value: Optional[pulumi.Input['UpstreamHealthchecksActiveArgs']]):
pulumi.set(self, "active", value)
@property
@pulumi.getter
def passive(self) -> Optional[pulumi.Input['UpstreamHealthchecksPassiveArgs']]:
return pulumi.get(self, "passive")
@passive.setter
def passive(self, value: Optional[pulumi.Input['UpstreamHealthchecksPassiveArgs']]):
pulumi.set(self, "passive", value)
@pulumi.input_type
class UpstreamHealthchecksActiveArgs:
def __init__(__self__, *,
concurrency: Optional[pulumi.Input[int]] = None,
healthy: Optional[pulumi.Input['UpstreamHealthchecksActiveHealthyArgs']] = None,
http_path: Optional[pulumi.Input[str]] = None,
https_sni: Optional[pulumi.Input[str]] = None,
https_verify_certificate: Optional[pulumi.Input[bool]] = None,
timeout: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
unhealthy: Optional[pulumi.Input['UpstreamHealthchecksActiveUnhealthyArgs']] = None):
if concurrency is not None:
pulumi.set(__self__, "concurrency", concurrency)
if healthy is not None:
pulumi.set(__self__, "healthy", healthy)
if http_path is not None:
pulumi.set(__self__, "http_path", http_path)
if https_sni is not None:
pulumi.set(__self__, "https_sni", https_sni)
if https_verify_certificate is not None:
pulumi.set(__self__, "https_verify_certificate", https_verify_certificate)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
if type is not None:
pulumi.set(__self__, "type", type)
if unhealthy is not None:
pulumi.set(__self__, "unhealthy", unhealthy)
@property
@pulumi.getter
def concurrency(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "concurrency")
@concurrency.setter
def concurrency(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "concurrency", value)
@property
@pulumi.getter
def healthy(self) -> Optional[pulumi.Input['UpstreamHealthchecksActiveHealthyArgs']]:
return pulumi.get(self, "healthy")
@healthy.setter
def healthy(self, value: Optional[pulumi.Input['UpstreamHealthchecksActiveHealthyArgs']]):
pulumi.set(self, "healthy", value)
@property
@pulumi.getter(name="httpPath")
def http_path(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "http_path")
@http_path.setter
def http_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "http_path", value)
@property
@pulumi.getter(name="httpsSni")
def https_sni(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "https_sni")
@https_sni.setter
def https_sni(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "https_sni", value)
@property
@pulumi.getter(name="httpsVerifyCertificate")
def https_verify_certificate(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "https_verify_certificate")
@https_verify_certificate.setter
def https_verify_certificate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "https_verify_certificate", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def unhealthy(self) -> Optional[pulumi.Input['UpstreamHealthchecksActiveUnhealthyArgs']]:
return pulumi.get(self, "unhealthy")
@unhealthy.setter
def unhealthy(self, value: Optional[pulumi.Input['UpstreamHealthchecksActiveUnhealthyArgs']]):
pulumi.set(self, "unhealthy", value)
@pulumi.input_type
class UpstreamHealthchecksActiveHealthyArgs:
def __init__(__self__, *,
http_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
interval: Optional[pulumi.Input[int]] = None,
successes: Optional[pulumi.Input[int]] = None):
if http_statuses is not None:
pulumi.set(__self__, "http_statuses", http_statuses)
if interval is not None:
pulumi.set(__self__, "interval", interval)
if successes is not None:
pulumi.set(__self__, "successes", successes)
@property
@pulumi.getter(name="httpStatuses")
def http_statuses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
return pulumi.get(self, "http_statuses")
@http_statuses.setter
def http_statuses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "http_statuses", value)
@property
@pulumi.getter
def interval(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "interval")
@interval.setter
def interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "interval", value)
@property
@pulumi.getter
def successes(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "successes")
@successes.setter
def successes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "successes", value)
@pulumi.input_type
class UpstreamHealthchecksActiveUnhealthyArgs:
def __init__(__self__, *,
http_failures: Optional[pulumi.Input[int]] = None,
http_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
interval: Optional[pulumi.Input[int]] = None,
tcp_failures: Optional[pulumi.Input[int]] = None,
timeouts: Optional[pulumi.Input[int]] = None):
if http_failures is not None:
pulumi.set(__self__, "http_failures", http_failures)
if http_statuses is not None:
pulumi.set(__self__, "http_statuses", http_statuses)
if interval is not None:
pulumi.set(__self__, "interval", interval)
if tcp_failures is not None:
pulumi.set(__self__, "tcp_failures", tcp_failures)
if timeouts is not None:
pulumi.set(__self__, "timeouts", timeouts)
@property
@pulumi.getter(name="httpFailures")
def http_failures(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "http_failures")
@http_failures.setter
def http_failures(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http_failures", value)
@property
@pulumi.getter(name="httpStatuses")
def http_statuses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
return pulumi.get(self, "http_statuses")
@http_statuses.setter
def http_statuses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "http_statuses", value)
@property
@pulumi.getter
def interval(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "interval")
@interval.setter
def interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "interval", value)
@property
@pulumi.getter(name="tcpFailures")
def tcp_failures(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "tcp_failures")
@tcp_failures.setter
def tcp_failures(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "tcp_failures", value)
@property
@pulumi.getter
def timeouts(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "timeouts")
@timeouts.setter
def timeouts(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeouts", value)
@pulumi.input_type
class UpstreamHealthchecksPassiveArgs:
def __init__(__self__, *,
healthy: Optional[pulumi.Input['UpstreamHealthchecksPassiveHealthyArgs']] = None,
type: Optional[pulumi.Input[str]] = None,
unhealthy: Optional[pulumi.Input['UpstreamHealthchecksPassiveUnhealthyArgs']] = None):
if healthy is not None:
pulumi.set(__self__, "healthy", healthy)
if type is not None:
pulumi.set(__self__, "type", type)
if unhealthy is not None:
pulumi.set(__self__, "unhealthy", unhealthy)
@property
@pulumi.getter
def healthy(self) -> Optional[pulumi.Input['UpstreamHealthchecksPassiveHealthyArgs']]:
return pulumi.get(self, "healthy")
@healthy.setter
def healthy(self, value: Optional[pulumi.Input['UpstreamHealthchecksPassiveHealthyArgs']]):
pulumi.set(self, "healthy", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def unhealthy(self) -> Optional[pulumi.Input['UpstreamHealthchecksPassiveUnhealthyArgs']]:
return pulumi.get(self, "unhealthy")
@unhealthy.setter
def unhealthy(self, value: Optional[pulumi.Input['UpstreamHealthchecksPassiveUnhealthyArgs']]):
pulumi.set(self, "unhealthy", value)
@pulumi.input_type
class UpstreamHealthchecksPassiveHealthyArgs:
def __init__(__self__, *,
http_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
successes: Optional[pulumi.Input[int]] = None):
if http_statuses is not None:
pulumi.set(__self__, "http_statuses", http_statuses)
if successes is not None:
pulumi.set(__self__, "successes", successes)
@property
@pulumi.getter(name="httpStatuses")
def http_statuses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
return pulumi.get(self, "http_statuses")
@http_statuses.setter
def http_statuses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "http_statuses", value)
@property
@pulumi.getter
def successes(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "successes")
@successes.setter
def successes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "successes", value)
@pulumi.input_type
class UpstreamHealthchecksPassiveUnhealthyArgs:
def __init__(__self__, *,
http_failures: Optional[pulumi.Input[int]] = None,
http_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
tcp_failures: Optional[pulumi.Input[int]] = None,
timeouts: Optional[pulumi.Input[int]] = None):
if http_failures is not None:
pulumi.set(__self__, "http_failures", http_failures)
if http_statuses is not None:
pulumi.set(__self__, "http_statuses", http_statuses)
if tcp_failures is not None:
pulumi.set(__self__, "tcp_failures", tcp_failures)
if timeouts is not None:
pulumi.set(__self__, "timeouts", timeouts)
@property
@pulumi.getter(name="httpFailures")
def http_failures(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "http_failures")
@http_failures.setter
def http_failures(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http_failures", value)
@property
@pulumi.getter(name="httpStatuses")
def http_statuses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
return pulumi.get(self, "http_statuses")
@http_statuses.setter
def http_statuses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "http_statuses", value)
@property
@pulumi.getter(name="tcpFailures")
def tcp_failures(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "tcp_failures")
@tcp_failures.setter
def tcp_failures(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "tcp_failures", value)
@property
@pulumi.getter
def timeouts(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "timeouts")
@timeouts.setter
def timeouts(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeouts", value)
| 34.936709
| 103
| 0.650906
| 1,830
| 16,560
| 5.707104
| 0.054645
| 0.131655
| 0.169188
| 0.088472
| 0.754021
| 0.703562
| 0.67388
| 0.65473
| 0.648506
| 0.597472
| 0
| 0.000078
| 0.222766
| 16,560
| 473
| 104
| 35.010571
| 0.811359
| 0.015278
| 0
| 0.701333
| 1
| 0
| 0.118541
| 0.060162
| 0
| 0
| 0
| 0
| 0
| 1
| 0.202667
| false
| 0.053333
| 0.013333
| 0.085333
| 0.330667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
f4c5360b157971c47ab67890d1de372c50e60d6a
| 282
|
py
|
Python
|
MTS/__init__.py
|
ohhorob/pyMTS
|
e7553b96e72ac6d4f91657bdb7c632aeeaba3c9b
|
[
"Apache-2.0"
] | 1
|
2021-04-28T12:23:42.000Z
|
2021-04-28T12:23:42.000Z
|
MTS/__init__.py
|
ohhorob/pyMTS
|
e7553b96e72ac6d4f91657bdb7c632aeeaba3c9b
|
[
"Apache-2.0"
] | null | null | null |
MTS/__init__.py
|
ohhorob/pyMTS
|
e7553b96e72ac6d4f91657bdb7c632aeeaba3c9b
|
[
"Apache-2.0"
] | null | null | null |
# MTS Log protocol -- http://www.innovatemotorsports.com/support/downloads/Seriallog-2.pdf
# Serial: 8-N-1-19.2 kbit/sec
# Packet periodicity: 81.92 milliseconds (12.2 hertz) (8 MHz / 655360)
# Sample resolution: 10 bits (0..5V at 0.1% resolution)
import Header
from word import *
| 35.25
| 90
| 0.734043
| 46
| 282
| 4.5
| 0.847826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106122
| 0.131206
| 282
| 7
| 91
| 40.285714
| 0.738776
| 0.847518
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f4e2c1950f15699ea2256ebd32a508dcb6887549
| 105
|
py
|
Python
|
test_initial.py
|
BickySamourai/djreact
|
cea500cb3dc841100cc058110d7e2c6d813ca8b8
|
[
"MIT"
] | 1
|
2018-12-05T11:21:50.000Z
|
2018-12-05T11:21:50.000Z
|
test_initial.py
|
floriansollami/djreact
|
cea500cb3dc841100cc058110d7e2c6d813ca8b8
|
[
"MIT"
] | 2
|
2020-02-11T23:28:33.000Z
|
2020-06-05T19:36:41.000Z
|
test_initial.py
|
BickySamourai/djreact
|
cea500cb3dc841100cc058110d7e2c6d813ca8b8
|
[
"MIT"
] | 1
|
2018-12-10T10:32:23.000Z
|
2018-12-10T10:32:23.000Z
|
def hello(name):
return 'Hello ' + 'name'
def test_hello():
assert hello('name') == 'Hello name'
| 21
| 40
| 0.609524
| 14
| 105
| 4.5
| 0.428571
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.209524
| 105
| 5
| 40
| 21
| 0.759036
| 0
| 0
| 0
| 0
| 0
| 0.226415
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.5
| false
| 0
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
f4fbc7791249dd3dc8759c139bed36e338524bfa
| 4,198
|
py
|
Python
|
games_logger/games/migrations/0001_initial.py
|
HaeckelK/games_logger_django
|
0a8a51e73f56e68d2dea6252a263c408ca86071e
|
[
"MIT"
] | null | null | null |
games_logger/games/migrations/0001_initial.py
|
HaeckelK/games_logger_django
|
0a8a51e73f56e68d2dea6252a263c408ca86071e
|
[
"MIT"
] | 3
|
2021-01-10T10:45:32.000Z
|
2021-01-10T13:31:05.000Z
|
games_logger/games/migrations/0001_initial.py
|
HaeckelK/games_logger_django
|
0a8a51e73f56e68d2dea6252a263c408ca86071e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.5 on 2021-01-07 21:50
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
('description_short', models.CharField(max_length=50)),
('description_long', models.CharField(max_length=250)),
('created_on', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('players_min', models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(1)])),
('players_max', models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(1)])),
('expands', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='games.game')),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='GenreCategory',
fields=[
('category_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='games.category')),
],
bases=('games.category',),
),
migrations.CreateModel(
name='PlatformCategory',
fields=[
('category_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='games.category')),
],
bases=('games.category',),
),
migrations.CreateModel(
name='TimeCategory',
fields=[
('category_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='games.category')),
],
bases=('games.category',),
),
migrations.CreateModel(
name='Match',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comments', models.CharField(max_length=250)),
('created_on', models.DateTimeField(auto_now_add=True)),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.game')),
('players', models.ManyToManyField(related_name='players', to='games.Player')),
('winners', models.ManyToManyField(related_name='winners', to='games.Player')),
],
),
migrations.AddField(
model_name='game',
name='genre',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.genrecategory'),
),
migrations.AddField(
model_name='game',
name='platform',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.platformcategory'),
),
migrations.AddField(
model_name='game',
name='time',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.timecategory'),
),
]
| 45.139785
| 193
| 0.595045
| 414
| 4,198
| 5.891304
| 0.202899
| 0.0328
| 0.051661
| 0.081181
| 0.752768
| 0.752768
| 0.709717
| 0.709717
| 0.647396
| 0.647396
| 0
| 0.010065
| 0.266317
| 4,198
| 92
| 194
| 45.630435
| 0.781818
| 0.010719
| 0
| 0.635294
| 1
| 0
| 0.11708
| 0.0053
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.035294
| 0
| 0.082353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
762189364ae8346baa62adb5a86bb79745cc8954
| 83
|
py
|
Python
|
contrib/frontends/py/nntpchan/__init__.py
|
majestrate/nntpchan
|
f92f68c3cdce4b7ce6d4121ca4356b36ebcd933f
|
[
"MIT"
] | 233
|
2015-08-06T02:51:52.000Z
|
2022-02-14T11:29:13.000Z
|
contrib/frontends/py/nntpchan/__init__.py
|
Revivify/nntpchan
|
0d555bb88a2298dae9aacf11348e34c52befa3d8
|
[
"MIT"
] | 98
|
2015-09-19T22:29:00.000Z
|
2021-06-12T09:43:13.000Z
|
contrib/frontends/py/nntpchan/__init__.py
|
Revivify/nntpchan
|
0d555bb88a2298dae9aacf11348e34c52befa3d8
|
[
"MIT"
] | 49
|
2015-08-06T02:51:55.000Z
|
2020-03-11T04:23:56.000Z
|
#
# entry for gunicorn
#
from nntpchan.app import app
from nntpchan import viewsp
| 11.857143
| 28
| 0.771084
| 12
| 83
| 5.333333
| 0.666667
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180723
| 83
| 6
| 29
| 13.833333
| 0.941176
| 0.216867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
52192abd9407b91e90fb61d5319cec65580111e5
| 34
|
py
|
Python
|
exercises/spiral-matrix/spiral_matrix.py
|
kishankj/python
|
82042de746128127502e109111e6c4e8ab002af6
|
[
"MIT"
] | 1,177
|
2017-06-21T20:24:06.000Z
|
2022-03-29T02:30:55.000Z
|
exercises/spiral-matrix/spiral_matrix.py
|
kishankj/python
|
82042de746128127502e109111e6c4e8ab002af6
|
[
"MIT"
] | 1,890
|
2017-06-18T20:06:10.000Z
|
2022-03-31T18:35:51.000Z
|
exercises/spiral-matrix/spiral_matrix.py
|
kishankj/python
|
82042de746128127502e109111e6c4e8ab002af6
|
[
"MIT"
] | 1,095
|
2017-06-26T23:06:19.000Z
|
2022-03-29T03:25:38.000Z
|
def spiral_matrix(size):
pass
| 11.333333
| 24
| 0.705882
| 5
| 34
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 34
| 2
| 25
| 17
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
5231e6bd87f94e0063595e79c3086076e75fc714
| 31
|
py
|
Python
|
src/awss3/__init__.py
|
ZhiruiFeng/CarsMemory
|
658afb98b1b8a667ae45e599ceb56f51759fdfce
|
[
"MIT"
] | 9
|
2019-01-26T21:57:38.000Z
|
2021-08-13T11:55:56.000Z
|
src/awss3/__init__.py
|
ZhiruiFeng/CarsMemory
|
658afb98b1b8a667ae45e599ceb56f51759fdfce
|
[
"MIT"
] | 6
|
2019-02-03T05:42:50.000Z
|
2021-06-01T23:24:35.000Z
|
src/awss3/__init__.py
|
ZhiruiFeng/CarsMemory
|
658afb98b1b8a667ae45e599ceb56f51759fdfce
|
[
"MIT"
] | 5
|
2019-03-06T04:33:57.000Z
|
2021-05-31T17:43:57.000Z
|
#!/usr/bin/env python
# aws s3
| 10.333333
| 21
| 0.645161
| 6
| 31
| 3.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 0.16129
| 31
| 2
| 22
| 15.5
| 0.730769
| 0.870968
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
52813eb5a92147299029b6f56c0318355c220c8b
| 131
|
py
|
Python
|
backend/app/admin/components/__init__.py
|
griviala/garpix_page
|
55f1d9bc6d1de29d18e15369bebcbef18811b5a4
|
[
"MIT"
] | null | null | null |
backend/app/admin/components/__init__.py
|
griviala/garpix_page
|
55f1d9bc6d1de29d18e15369bebcbef18811b5a4
|
[
"MIT"
] | null | null | null |
backend/app/admin/components/__init__.py
|
griviala/garpix_page
|
55f1d9bc6d1de29d18e15369bebcbef18811b5a4
|
[
"MIT"
] | null | null | null |
from .text_component import TextComponentAdmin # noqa
from .text_description_component import TextDescriptionComponentAdmin # noqa
| 43.666667
| 76
| 0.877863
| 13
| 131
| 8.615385
| 0.615385
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091603
| 131
| 2
| 77
| 65.5
| 0.941176
| 0.068702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5282a5299abff7b8701b16a10c2c45a9be1078cc
| 27
|
py
|
Python
|
portal/pulsar/__init__.py
|
bbhunter/pulsar
|
1f6384482eebc71137716e27ba7a010f3aea7241
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 12
|
2021-12-28T14:15:27.000Z
|
2022-03-29T00:45:00.000Z
|
portal/pulsar/__init__.py
|
bbhunter/pulsar
|
1f6384482eebc71137716e27ba7a010f3aea7241
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2022-02-09T12:47:14.000Z
|
2022-02-09T12:47:14.000Z
|
portal/pulsar/__init__.py
|
bbhunter/pulsar
|
1f6384482eebc71137716e27ba7a010f3aea7241
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2022-01-18T03:59:11.000Z
|
2022-01-18T03:59:11.000Z
|
from .celeryapp import *
| 6.75
| 24
| 0.703704
| 3
| 27
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 27
| 3
| 25
| 9
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
528a9e7052216cb329d6d29d4440112a9d78b9fe
| 146
|
py
|
Python
|
starter_code/api_keys.py
|
bjouellette/python-api-challenge
|
855c31769893596211ef072df8412cd47a557e19
|
[
"ADSL"
] | 1
|
2022-01-27T00:04:14.000Z
|
2022-01-27T00:04:14.000Z
|
starter_code/api_keys.py
|
bjouellette/python-api-challenge
|
855c31769893596211ef072df8412cd47a557e19
|
[
"ADSL"
] | null | null | null |
starter_code/api_keys.py
|
bjouellette/python-api-challenge
|
855c31769893596211ef072df8412cd47a557e19
|
[
"ADSL"
] | null | null | null |
# OpenWeatherMap API Key
weather_api_key = "e1067d92d6b631a16363bf4db3023b19"
# Google API Key
g_key = "AIzaSyA4RYdQ1nxoMTIW854C7wvVJMf0Qz5qjNk"
| 24.333333
| 52
| 0.842466
| 13
| 146
| 9.230769
| 0.615385
| 0.15
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.229008
| 0.10274
| 146
| 5
| 53
| 29.2
| 0.687023
| 0.253425
| 0
| 0
| 0
| 0
| 0.669811
| 0.669811
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bfdb50593c6e1e9d0effbbd8845a4184d945a3b0
| 547
|
py
|
Python
|
plugins/minfraud/komand_minfraud/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/minfraud/komand_minfraud/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/minfraud/komand_minfraud/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .account_lookup.action import AccountLookup
from .all_lookup.action import AllLookup
from .billing_lookup.action import BillingLookup
from .card_lookup.action import CardLookup
from .cart_lookup.action import CartLookup
from .device_lookup.action import DeviceLookup
from .email_lookup.action import EmailLookup
from .event_lookup.action import EventLookup
from .order_lookup.action import OrderLookup
from .payment_lookup.action import PaymentLookup
from .shipping_lookup.action import ShippingLookup
| 42.076923
| 50
| 0.859232
| 73
| 547
| 6.287671
| 0.452055
| 0.287582
| 0.431373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096892
| 547
| 12
| 51
| 45.583333
| 0.92915
| 0.067642
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
bfe27fe9229cd07b93680b62a614ccd6ac91ab8a
| 208
|
py
|
Python
|
wmt-etl/config.py
|
ministryofjustice/hmpps-wmt
|
66a85b029e2fc2b525b299f9e2ac1803b9cf8516
|
[
"MIT"
] | 3
|
2017-02-27T17:09:20.000Z
|
2017-03-27T08:23:50.000Z
|
wmt-etl/config.py
|
ministryofjustice/hmpps-wmt
|
66a85b029e2fc2b525b299f9e2ac1803b9cf8516
|
[
"MIT"
] | 3
|
2017-03-03T16:08:20.000Z
|
2017-03-16T17:19:34.000Z
|
wmt-etl/config.py
|
ministryofjustice/noms-wmt-alpha
|
66a85b029e2fc2b525b299f9e2ac1803b9cf8516
|
[
"MIT"
] | 1
|
2021-04-11T06:54:44.000Z
|
2021-04-11T06:54:44.000Z
|
import os
DB_SERVER = os.getenv('WMT_DB_SERVER', 'localhost')
DB_NAME = os.getenv('WMT_DB_NAME', 'wmt_db')
DB_USERNAME = os.getenv('WMT_DB_USERNAME', 'wmt')
DB_PASSWORD = os.getenv('WMT_DB_PASSWORD', 'wmt')
| 29.714286
| 51
| 0.735577
| 35
| 208
| 4
| 0.285714
| 0.214286
| 0.314286
| 0.371429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091346
| 208
| 6
| 52
| 34.666667
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0.360577
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.2
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
870a1fd5ff0795c011afc5632b304b463b0623e3
| 131
|
py
|
Python
|
testapp2/admin.py
|
gabrielbiasi/django-improved-permissions
|
9cf6d0ddb8a4dcfa2e58d3adbf1357e56a64ce71
|
[
"MIT"
] | 12
|
2018-03-22T00:30:32.000Z
|
2021-04-24T16:26:08.000Z
|
testapp2/admin.py
|
s-sys/django-improved-permissions
|
9cf6d0ddb8a4dcfa2e58d3adbf1357e56a64ce71
|
[
"MIT"
] | 27
|
2018-03-18T00:43:37.000Z
|
2020-06-05T18:09:18.000Z
|
testapp2/admin.py
|
gabrielbiasi/django-improved-permissions
|
9cf6d0ddb8a4dcfa2e58d3adbf1357e56a64ce71
|
[
"MIT"
] | 2
|
2018-03-28T17:54:43.000Z
|
2021-01-11T21:17:08.000Z
|
""" testapp2 admin configs """
from django.contrib import admin
from testapp2.models import Library
admin.site.register(Library)
| 18.714286
| 35
| 0.78626
| 17
| 131
| 6.058824
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017391
| 0.122137
| 131
| 6
| 36
| 21.833333
| 0.878261
| 0.167939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
87234ddbc75c00fe76141a6b66832d15ac92c6f3
| 2,495
|
py
|
Python
|
slides/figs/draw.py
|
hiyouga/AMP-Poster-Slides-LaTeX
|
c1fd40aa5ef3216f17b4d27dc6e6092e3cc52e40
|
[
"MIT"
] | 8
|
2021-05-25T11:56:48.000Z
|
2021-12-20T07:12:01.000Z
|
slides/figs/draw.py
|
hiyouga/AMP-Poster-Slides-LaTeX
|
c1fd40aa5ef3216f17b4d27dc6e6092e3cc52e40
|
[
"MIT"
] | 1
|
2021-05-28T15:25:37.000Z
|
2021-05-30T05:01:24.000Z
|
slides/figs/draw.py
|
hiyouga/AMP-Poster-Slides-LaTeX
|
c1fd40aa5ef3216f17b4d27dc6e6092e3cc52e40
|
[
"MIT"
] | 2
|
2021-05-26T01:39:53.000Z
|
2021-12-20T06:36:04.000Z
|
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
default_params = {
'text.usetex': False,
'font.family': 'Times New Roman',
'font.serif': 'Times New Roman'
}
if __name__ == '__main__':
plt.rcParams.update(default_params)
myfont1 = matplotlib.font_manager.FontProperties(fname='C:\\times.ttf', size=14)
myfont2 = matplotlib.font_manager.FontProperties(fname='C:\\times.ttf', size=12)
plt.figure(figsize=(5, 3))
x = np.linspace(0.001, 5, 1000)
y1 = 0.001 * x ** 2 + 0.02 * 1 / x + 0.02
y2 = 0.12 * x ** 2 + 0.04 * 1 / x + 0.06
plt.plot(x, y1, color='b', linestyle='--', label='Training error')
plt.plot(x, y2, color='g', linestyle='-', label='Generalization error')
cx = 0.55
cy = 0.12 * cx ** 2 + 0.04 * 1 / cx + 0.06
plt.plot([cx, cx], [-0.01, cy], color='r', linestyle=':')
plt.plot([-0.01, cx], [cy, cy], color='r', linestyle=':')
plt.text(cx-0.3, -0.12, 'Optimal capacity', fontproperties=myfont2)
plt.arrow(1.6, 0.21, 0.0, 0.12, head_width=0.03, head_length=0.03, shape='full', fc='black', ec='black', linewidth=1)
plt.arrow(1.6, 0.21, 0.0, -0.12, head_width=0.03, head_length=0.03, shape='full', fc='black', ec='black', linewidth=1)
plt.text(1.65, 0.18, 'Generalization gap', fontproperties=myfont2)
plt.legend(loc='upper right', prop=myfont1)
plt.xticks([0])
plt.yticks([])
plt.xlabel('Capacity', fontproperties=myfont1)
plt.ylabel('Error', fontproperties=myfont1)
plt.xlim((-0.01, 2.5))
plt.ylim((-0.01, 1.2))
plt.savefig('gap1.pdf', format='pdf', dpi=900, bbox_inches='tight')
plt.figure(figsize=(5, 3))
x = np.linspace(0.001, 5, 1000)
y1 = 0.005 * x ** 2 + 0.03 * 1 / x + 0.03
y2 = 0.04 * x ** 2 + 0.05 * 1 / x + 0.03
plt.plot(x, y1, color='b', linestyle='--', label='Training error')
plt.plot(x, y2, color='g', linestyle='-', label='Generalization error')
cx = 0.855
cy = 0.04 * cx ** 2 + 0.05 * 1 / cx + 0.03
plt.plot([cx, cx], [-0.01, cy], color='r', linestyle=':')
plt.plot([-0.01, cx], [cy, cy], color='r', linestyle=':')
plt.text(cx-0.3, -0.12, 'Optimal capacity', fontproperties=myfont2)
plt.legend(loc='upper right', prop=myfont1)
plt.xticks([0])
plt.yticks([])
plt.xlabel('Capacity', fontproperties=myfont1)
plt.ylabel('Error', fontproperties=myfont1)
plt.xlim((-0.01, 2.5))
plt.ylim((-0.01, 1.2))
plt.savefig('gap2.pdf', format='pdf', dpi=900, bbox_inches='tight')
| 43.77193
| 122
| 0.600802
| 406
| 2,495
| 3.647783
| 0.258621
| 0.037812
| 0.008103
| 0.045915
| 0.784605
| 0.784605
| 0.784605
| 0.784605
| 0.740041
| 0.668467
| 0
| 0.103922
| 0.182365
| 2,495
| 56
| 123
| 44.553571
| 0.622059
| 0
| 0
| 0.528302
| 0
| 0
| 0.136273
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.056604
| 0
| 0.056604
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
872a7c2cdb92c261fe174b94da5759ed7dfbd97f
| 40
|
py
|
Python
|
expressive_regex/exceptions.py
|
fsadannn/expressive_regex
|
3bf113e8288a0f7d756f24cf882be8709630d4d3
|
[
"MIT"
] | 2
|
2020-07-31T13:49:17.000Z
|
2020-09-16T14:47:23.000Z
|
expressive_regex/exceptions.py
|
fsadannn/expressive_regex
|
3bf113e8288a0f7d756f24cf882be8709630d4d3
|
[
"MIT"
] | null | null | null |
expressive_regex/exceptions.py
|
fsadannn/expressive_regex
|
3bf113e8288a0f7d756f24cf882be8709630d4d3
|
[
"MIT"
] | null | null | null |
class BadStatement(Exception):
pass
| 13.333333
| 30
| 0.75
| 4
| 40
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175
| 40
| 2
| 31
| 20
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
874c9017ad17aa813ad7a4b4bd54385bf7e3cba6
| 91
|
py
|
Python
|
build/lib/acousondePy/__init__.py
|
SvenGastauer/acousondePy
|
94a99dc9de35d644a35cbfa3078110a67a35212e
|
[
"MIT"
] | null | null | null |
build/lib/acousondePy/__init__.py
|
SvenGastauer/acousondePy
|
94a99dc9de35d644a35cbfa3078110a67a35212e
|
[
"MIT"
] | null | null | null |
build/lib/acousondePy/__init__.py
|
SvenGastauer/acousondePy
|
94a99dc9de35d644a35cbfa3078110a67a35212e
|
[
"MIT"
] | null | null | null |
from .MTRead import MTread,spec_plot,read_multiple_MT
from .main import MTreadgui,acousonde
| 45.5
| 53
| 0.868132
| 14
| 91
| 5.428571
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 91
| 2
| 54
| 45.5
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
87567263e4472013f3e9c6f40f7e91f2cff4f5d5
| 40
|
py
|
Python
|
tipos de datos/integer1.py
|
gabys12/portafolio-fundamento-de-programacion
|
c9b47f32e885ed6ae80b14133a609798ea034e19
|
[
"CNRI-Python"
] | null | null | null |
tipos de datos/integer1.py
|
gabys12/portafolio-fundamento-de-programacion
|
c9b47f32e885ed6ae80b14133a609798ea034e19
|
[
"CNRI-Python"
] | null | null | null |
tipos de datos/integer1.py
|
gabys12/portafolio-fundamento-de-programacion
|
c9b47f32e885ed6ae80b14133a609798ea034e19
|
[
"CNRI-Python"
] | null | null | null |
x = 100
y = 50
print('x=', x, 'y=', y)
| 8
| 23
| 0.4
| 9
| 40
| 1.777778
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 0.275
| 40
| 4
| 24
| 10
| 0.37931
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5e8a67301e87d6b49ee5b0aa718dbabf712a571d
| 245
|
py
|
Python
|
main.py
|
glhrmfrts/instr
|
ba08fae5149c193ada0003c0f4ca042dca84e457
|
[
"MIT"
] | null | null | null |
main.py
|
glhrmfrts/instr
|
ba08fae5149c193ada0003c0f4ca042dca84e457
|
[
"MIT"
] | null | null | null |
main.py
|
glhrmfrts/instr
|
ba08fae5149c193ada0003c0f4ca042dca84e457
|
[
"MIT"
] | null | null | null |
from instr.instruments import *
from instr.effects import *
s = Sqr().bind(tremolo(), echo(0.4, 0.8)).loop(2, [(244, 1), (289, 1), (365, 2)]).loop(4, [(244, 0.1), (289, 0.1), (365, 0.1), (1, 0.1), (237, 0.1), (1, 0.1)]).save('tests/instr.wav')
| 49
| 183
| 0.555102
| 49
| 245
| 2.77551
| 0.469388
| 0.088235
| 0.044118
| 0.058824
| 0.073529
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206573
| 0.130612
| 245
| 4
| 184
| 61.25
| 0.431925
| 0
| 0
| 0
| 0
| 0
| 0.061224
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5e8def353cd8038d157012651972bf4783cd5467
| 77
|
py
|
Python
|
planar_ising/lipton_tarjan/__init__.py
|
ValeryTyumen/planar_ising
|
5a1803487e1dd59c5d5e790cc949b7234bf52ac8
|
[
"MIT"
] | 8
|
2019-05-02T20:27:21.000Z
|
2020-11-01T20:41:38.000Z
|
planar_ising/lipton_tarjan/__init__.py
|
ValeryTyumen/planar_ising
|
5a1803487e1dd59c5d5e790cc949b7234bf52ac8
|
[
"MIT"
] | 1
|
2019-09-03T18:15:53.000Z
|
2019-09-06T16:41:12.000Z
|
planar_ising/lipton_tarjan/__init__.py
|
ValeryTyumen/planar_ising
|
5a1803487e1dd59c5d5e790cc949b7234bf52ac8
|
[
"MIT"
] | 3
|
2019-08-11T23:08:58.000Z
|
2022-03-19T09:09:50.000Z
|
from .planar_separator import PlanarSeparator
from . import separation_class
| 25.666667
| 45
| 0.87013
| 9
| 77
| 7.222222
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103896
| 77
| 2
| 46
| 38.5
| 0.942029
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5e9c05834cf6ad1608c5d29f26bb72785dc3ceb3
| 45
|
py
|
Python
|
pyIOS/exceptions.py
|
jtdub/pyIOS
|
1842b92068e3b0a980d53e0719efd41dbbdaf082
|
[
"Apache-2.0"
] | 12
|
2016-01-09T17:47:05.000Z
|
2022-02-09T18:09:41.000Z
|
pyIOS/exceptions.py
|
jtdub/pyIOS
|
1842b92068e3b0a980d53e0719efd41dbbdaf082
|
[
"Apache-2.0"
] | 16
|
2016-01-05T15:49:31.000Z
|
2016-08-04T20:59:15.000Z
|
pyIOS/exceptions.py
|
jtdub/pyIOS
|
1842b92068e3b0a980d53e0719efd41dbbdaf082
|
[
"Apache-2.0"
] | 1
|
2016-04-06T16:00:32.000Z
|
2016-04-06T16:00:32.000Z
|
class InvalidInputError(Exception):
pass
| 15
| 35
| 0.777778
| 4
| 45
| 8.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 45
| 2
| 36
| 22.5
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
5e9cfe480f48cebb3ee33cedf5c2da8409e69016
| 71
|
py
|
Python
|
purestorage/__init__.py
|
sile16/rest-client
|
01604e00e8a64157e056fca614d320c3afd0f2d1
|
[
"BSD-2-Clause"
] | 20
|
2018-10-26T01:33:15.000Z
|
2022-03-31T19:56:08.000Z
|
purestorage/__init__.py
|
sile16/rest-client
|
01604e00e8a64157e056fca614d320c3afd0f2d1
|
[
"BSD-2-Clause"
] | 15
|
2018-08-09T20:42:21.000Z
|
2022-01-14T15:59:58.000Z
|
purestorage/__init__.py
|
sile16/rest-client
|
01604e00e8a64157e056fca614d320c3afd0f2d1
|
[
"BSD-2-Clause"
] | 16
|
2018-10-22T18:31:42.000Z
|
2021-08-09T15:33:35.000Z
|
from .purestorage import FlashArray, PureError, PureHTTPError, VERSION
| 35.5
| 70
| 0.84507
| 7
| 71
| 8.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098592
| 71
| 1
| 71
| 71
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5eb6ed393de918f8c3120b183de9a52d1c9d90da
| 216
|
py
|
Python
|
master/scripts/paths.py
|
OPU-Surveillance-System/monitoring
|
2c2c657c74fce9a5938d986372f9077708617d9c
|
[
"MIT"
] | 4
|
2020-12-24T11:51:28.000Z
|
2022-02-08T09:02:38.000Z
|
master/scripts/paths.py
|
OPU-Surveillance-System/monitoring
|
2c2c657c74fce9a5938d986372f9077708617d9c
|
[
"MIT"
] | 1
|
2021-11-16T02:54:35.000Z
|
2021-11-16T02:54:35.000Z
|
master/scripts/paths.py
|
OPU-Surveillance-System/monitoring
|
2c2c657c74fce9a5938d986372f9077708617d9c
|
[
"MIT"
] | null | null | null |
"""
Define the environment paths
"""
#Path variables
TEMPLATE_PATH = "/home/scom/documents/opu_surveillance_system/monitoring/master/"
STATIC_PATH = "/home/scom/documents/opu_surveillance_system/monitoring/static/"
| 27
| 81
| 0.805556
| 26
| 216
| 6.461538
| 0.615385
| 0.095238
| 0.142857
| 0.25
| 0.619048
| 0.619048
| 0.619048
| 0.619048
| 0
| 0
| 0
| 0
| 0.069444
| 216
| 7
| 82
| 30.857143
| 0.835821
| 0.199074
| 0
| 0
| 0
| 0
| 0.763636
| 0.763636
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5eb703c2f5ca89811146d4e9b20de24a3405a5d5
| 3,181
|
py
|
Python
|
tests/strategies/test_local_strategy.py
|
gijswobben/customs
|
72c0d071fe35ed84eb6d6371eb651edcd13a1044
|
[
"MIT"
] | null | null | null |
tests/strategies/test_local_strategy.py
|
gijswobben/customs
|
72c0d071fe35ed84eb6d6371eb651edcd13a1044
|
[
"MIT"
] | null | null | null |
tests/strategies/test_local_strategy.py
|
gijswobben/customs
|
72c0d071fe35ed84eb6d6371eb651edcd13a1044
|
[
"MIT"
] | null | null | null |
from flask.globals import request
import pytest
from flask import Flask
from typing import Dict
from customs import Customs
from customs.exceptions import UnauthorizedException
from customs.strategies import LocalStrategy
def test_local_strategy_initialization_without_customs():
class Local(LocalStrategy):
def get_or_create_user(self, user: Dict) -> Dict:
return super().get_or_create_user(user)
def validate_credentials(self, username: str, password: str) -> Dict:
return super().validate_credentials(username, password)
with pytest.warns(UserWarning):
print(Customs.get_instance())
strategy = Local()
assert strategy.name == "local"
def test_local_strategy_initialization_with_customs():
class Local(LocalStrategy):
def get_or_create_user(self, user: Dict) -> Dict:
return super().get_or_create_user(user)
def validate_credentials(self, username: str, password: str) -> Dict:
return super().validate_credentials(username, password)
# Create customs
app = Flask("TESTS")
app.secret_key = "630738a8-3b13-4311-8018-87554d6f7e85"
Customs(app)
# Create the strategy
strategy = Local()
assert strategy.name == "local"
# Cleanup of the Customs object used for testing
Customs.remove_instance()
def test_local_strategy_extract_crendentials():
class Local(LocalStrategy):
def get_or_create_user(self, user: Dict) -> Dict:
return super().get_or_create_user(user)
def validate_credentials(self, username: str, password: str) -> Dict:
return super().validate_credentials(username, password)
# Create customs
app = Flask("TESTS")
app.secret_key = "630738a8-3b13-4311-8018-87554d6f7e85"
Customs(app)
# Create the strategy
strategy = Local()
with app.test_request_context("/?test=123", json={"bla": "bla"}):
credentials = strategy.extract_credentials(request)
assert credentials == {}
with app.test_request_context("/?username=test&password=test"):
credentials = strategy.extract_credentials(request)
assert "username" in credentials
assert "password" in credentials
# Cleanup of the Customs object used for testing
Customs.remove_instance()
def test_local_strategy_authenticate():
class Local(LocalStrategy):
def get_or_create_user(self, user: Dict) -> Dict:
return super().get_or_create_user(user)
def validate_credentials(self, username: str, password: str) -> Dict:
return {}
# Create customs
app = Flask("TESTS")
app.secret_key = "630738a8-3b13-4311-8018-87554d6f7e85"
Customs(app)
# Create the strategy
strategy = Local()
with app.test_request_context("/?test=123", json={"bla": "bla"}):
with pytest.raises(UnauthorizedException):
user = strategy.authenticate(request)
with app.test_request_context("/?username=test&password=test"):
user = strategy.authenticate(request)
assert user == {}
# Cleanup of the Customs object used for testing
Customs.remove_instance()
| 30.586538
| 77
| 0.688463
| 368
| 3,181
| 5.774457
| 0.17663
| 0.018824
| 0.041412
| 0.056471
| 0.787294
| 0.755294
| 0.680471
| 0.680471
| 0.680471
| 0.634353
| 0
| 0.034731
| 0.212512
| 3,181
| 103
| 78
| 30.883495
| 0.813573
| 0.07702
| 0
| 0.703125
| 0
| 0
| 0.081681
| 0.056733
| 0
| 0
| 0
| 0
| 0.09375
| 1
| 0.1875
| false
| 0.15625
| 0.109375
| 0.125
| 0.484375
| 0.015625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
|
0
| 5
|
5ed705bdaa66e16d951b96579ba77a4976ae6a2d
| 66
|
py
|
Python
|
pygame_ui/__init__.py
|
oof6969696969/pygame_ui
|
ca59652f30718dd8c578d994239d3a2d7aadae9c
|
[
"MIT"
] | null | null | null |
pygame_ui/__init__.py
|
oof6969696969/pygame_ui
|
ca59652f30718dd8c578d994239d3a2d7aadae9c
|
[
"MIT"
] | null | null | null |
pygame_ui/__init__.py
|
oof6969696969/pygame_ui
|
ca59652f30718dd8c578d994239d3a2d7aadae9c
|
[
"MIT"
] | null | null | null |
from lib.pygame_ui import UIManager, Widgets, Shapes, load_theme
| 33
| 65
| 0.818182
| 10
| 66
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 66
| 1
| 66
| 66
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0d6cfdc31d74171c37475fd7569d74d50b976420
| 24
|
py
|
Python
|
test.py
|
JohnnyBruh/Repository
|
f8bfb14737eee78fa8da400c7f6ddb21efda4baf
|
[
"CC0-1.0"
] | null | null | null |
test.py
|
JohnnyBruh/Repository
|
f8bfb14737eee78fa8da400c7f6ddb21efda4baf
|
[
"CC0-1.0"
] | null | null | null |
test.py
|
JohnnyBruh/Repository
|
f8bfb14737eee78fa8da400c7f6ddb21efda4baf
|
[
"CC0-1.0"
] | null | null | null |
print("yaaaay")
input()
| 12
| 16
| 0.666667
| 3
| 24
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 24
| 2
| 17
| 12
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
0d736e7e49cff33ae106086347e250953188ade6
| 249
|
py
|
Python
|
Multiples of 3 and 5.py
|
ahmedharbi197/Project-Euler
|
596fa7622233868a08200f2d7fe3b7e83d0af41f
|
[
"MIT"
] | 1
|
2019-06-10T23:10:38.000Z
|
2019-06-10T23:10:38.000Z
|
Multiples of 3 and 5.py
|
ahmedharbi197/Project-Euler
|
596fa7622233868a08200f2d7fe3b7e83d0af41f
|
[
"MIT"
] | null | null | null |
Multiples of 3 and 5.py
|
ahmedharbi197/Project-Euler
|
596fa7622233868a08200f2d7fe3b7e83d0af41f
|
[
"MIT"
] | null | null | null |
import sys
t = int(input().strip())
for a0 in range(t):
n = int(input().strip())
def preSum(q):
return (q*(1+q) //2 )
result = 3*preSum(int((n-1)//3)) + 5*preSum(int((n-1)//5)) - 15*preSum(int((n-1)//15))
print(int(result))
| 24.9
| 90
| 0.526104
| 45
| 249
| 2.911111
| 0.488889
| 0.206107
| 0.229008
| 0.251908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070707
| 0.204819
| 249
| 9
| 91
| 27.666667
| 0.590909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0.125
| 0.375
| 0.125
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
0d92dd46b5c6cda3a158d46142ec944eda28a213
| 7,986
|
py
|
Python
|
fairmlhealth/__fairness_metrics.py
|
masino-lab/fairMLHealth
|
943ffed5f57997401823bd2afc257f34f76ea157
|
[
"MIT"
] | 19
|
2020-10-29T10:14:59.000Z
|
2022-03-20T06:27:35.000Z
|
fairmlhealth/__fairness_metrics.py
|
masino-lab/fairMLHealth
|
943ffed5f57997401823bd2afc257f34f76ea157
|
[
"MIT"
] | 52
|
2020-10-14T19:21:27.000Z
|
2021-09-15T19:01:52.000Z
|
fairmlhealth/__fairness_metrics.py
|
masino-lab/fairMLHealth
|
943ffed5f57997401823bd2afc257f34f76ea157
|
[
"MIT"
] | 9
|
2020-12-02T21:40:27.000Z
|
2021-11-01T18:09:10.000Z
|
""" Custom Fairness Metrics
Note that ratio and difference computation is handled by AIF360's
sklearn.metrics module. As of the V 0.4.0 release, these are calculated as
[unprivileged/privileged] and [unprivileged - privileged], respectively
"""
from typing import Callable
from aif360.sklearn.metrics import difference, ratio
import numpy as np
import pandas as pd
from warnings import catch_warnings, filterwarnings
from .performance_metrics import (
false_positive_rate,
true_positive_rate,
true_negative_rate,
false_negative_rate,
precision,
)
def __manage_undefined_ratios(func: Callable):
""" Wraps ratio functions to return NaN values instead of 0.0 in cases
where the ratio is undefined
"""
def wrapper(*args, **kwargs):
funcname = getattr(func, "__name__", "an unknown function")
msg = (
"The ratio is ill-defined and being set to 0.0 because"
+ f" '{funcname}' for privileged samples is 0."
)
with catch_warnings(record=True) as w:
filterwarnings("ignore", message=msg)
res = func(*args, **kwargs)
if len(w) > 0:
return np.nan
else:
return res
return wrapper
@__manage_undefined_ratios
def ppv_ratio(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group ratio of Postive Predictive Values
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return ratio(precision, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp)
@__manage_undefined_ratios
def tpr_ratio(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group ratio of True Positive Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return ratio(
true_positive_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
@__manage_undefined_ratios
def fpr_ratio(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group ratio of False Positive Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return ratio(
false_positive_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
@__manage_undefined_ratios
def tnr_ratio(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group ratio of True Negative Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return ratio(
true_negative_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
@__manage_undefined_ratios
def fnr_ratio(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group ratio of False Negative Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return ratio(
false_negative_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
def ppv_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group difference of Positive Predictive Values
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return difference(precision, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp)
def tpr_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group difference of True Positive Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return difference(
true_positive_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
def fpr_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group difference of False Positive Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return difference(
false_positive_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
def tnr_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group difference of True Negative Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return difference(
true_negative_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
def fnr_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group difference of False Negative Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return difference(
false_negative_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
""" Combined Metrics """
def eq_odds_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the greatest discrepancy between the between-group FPR
difference and the between-group TPR difference
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
fprD = fpr_diff(y_true, y_pred, pa_name=pa_name, priv_grp=priv_grp)
tprD = tpr_diff(y_true, y_pred, pa_name=pa_name, priv_grp=priv_grp)
if abs(fprD) > abs(tprD):
return fprD
else:
return tprD
def eq_odds_ratio(
y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1
):
""" Returns the greatest discrepancy between the between-group FPR
ratio and the between-group TPR ratio
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
priv_grp (int, optional): . Defaults to 1.
"""
fprR = fpr_ratio(y_true, y_pred, pa_name=pa_name, priv_grp=priv_grp)
tprR = tpr_ratio(y_true, y_pred, pa_name=pa_name, priv_grp=priv_grp)
if np.isnan(fprR) or np.isnan(tprR):
return np.nan
elif round(abs(fprR - 1), 6) > round(abs(tprR - 1), 6):
return fprR
else:
return tprR
| 29.577778
| 88
| 0.660155
| 1,153
| 7,986
| 4.35386
| 0.117086
| 0.076494
| 0.033466
| 0.062151
| 0.780478
| 0.772112
| 0.772112
| 0.766335
| 0.766335
| 0.766335
| 0
| 0.007182
| 0.250313
| 7,986
| 269
| 89
| 29.687732
| 0.831301
| 0.464688
| 0
| 0.302326
| 0
| 0
| 0.034261
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162791
| false
| 0
| 0.069767
| 0
| 0.44186
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0dc892c0ef85acbf71fdb47d9850dae8da0e6d7a
| 10,355
|
py
|
Python
|
code/services/synergy_services.py
|
EdsonECM17/DS_Proyecto_02_Synergy_Logistics
|
a6c347f99e69b926d337db82653dd16850668f4b
|
[
"MIT"
] | null | null | null |
code/services/synergy_services.py
|
EdsonECM17/DS_Proyecto_02_Synergy_Logistics
|
a6c347f99e69b926d337db82653dd16850668f4b
|
[
"MIT"
] | null | null | null |
code/services/synergy_services.py
|
EdsonECM17/DS_Proyecto_02_Synergy_Logistics
|
a6c347f99e69b926d337db82653dd16850668f4b
|
[
"MIT"
] | null | null | null |
from typing import List
from processing.sl_filters import SynergyLogisticsFilters
class Service(SynergyLogisticsFilters):
"""
Clase que contine servicios para el analisis de la tabla de Synergy Logistics.
"""
def get_routes_list(self, direction:str or None = None) -> List:
"""Genera una lista con todas las rutas diferentes de la tabla.
Args:
direction (str or None, optional): Dirección de transacción. Defaults to None.
Returns:
List: Lista con rutas con formato origen-destino.
"""
routes_list = []
# Filter tables by direction
filtered_table = self.filter_routes_df(direction=direction)
# Check row by row table
for index, row in filtered_table.iterrows():
# route=origin-destination
route = (row['origin']+ "-" + row['destination'])
if not route in routes_list:
routes_list.append(route)
return routes_list
def get_total_elements(self, direction:str or None = None, year:int or None = None, transport_mode:str or None = None) -> int:
"""
Cuenta el número de transacciones en una tabla filtrada.
Se pueden filtrar resultados por dirección, año y/o medio de transporte.
Args:
direction (str or None, optional): Dirección de transacción. Defaults to None.
year (int or None, optional): Año de transacciones. Defaults to None.
transport_mode (str or None, optional): Tipo de medio de transporte. Defaults to None.
Returns:
int: Total de casos en tabla filtrada.
"""
# Tabla filtrada
filtered_table = self.filter_routes_df(direction=direction, start_year=year,
end_year=year, transport_mode=transport_mode)
# Contar filas en la tabla
elements_count= len(filtered_table)
return elements_count
def get_route_frecuency(self, route:str, direction:str or None = None, year:int or None = None)-> int:
"""
Cuenta las veces que una ruta aparece en una tabla filtrada.
Se pueden filtrar resultados por dirección, año y/o medio de transporte.
Args:
route (str): Rutas con formato origen-destino.
direction (str or None, optional): Dirección de transacción. Defaults to None.
year (int or None, optional): Año de transacciones. Defaults to None.
Returns:
int: Numero de apariciones de ruta en la tabla filtrada.
"""
# Obtener origen y destino para filtros
origin, destination = route.split("-")
# Tabla filtrada
filtered_table = self.filter_routes_df(origin=origin, destination=destination, direction=direction,
start_year=year, end_year=year)
# Contar filas en la tabla
route_frecuency = len(filtered_table)
return route_frecuency
def get_total_value(self, direction:str or None = None, year:int or None = None, transport_mode: str or None = None) -> int:
"""
Suma el valor total dentro de una tabla filtrada.
Se pueden filtrar resultados por dirección, año y/o medio de transporte.
Args:
route (str): Rutas con formato origen-destino.
direction (str or None, optional): Dirección de transacción. Defaults to None.
year (int or None, optional): Año de transacciones. Defaults to None.
transport_mode (str or None, optional): Tipo de medio de transporte. Defaults to None.
Returns:
int: suma de valor de elementos en tabla filtrada.
"""
filtered_table = self.filter_routes_df(direction=direction, start_year=year, end_year=year, transport_mode=transport_mode)
total_value = filtered_table["total_value"].sum()
return total_value
def get_route_value(self, route:str, direction:str or None = None, year:int or None = None) -> int:
"""
Suma el valor total para una ruta especifica dentro de una tabla filtrada.
Se pueden filtrar resultados por dirección, año y/o medio de transporte.
Args:
route (str): Rutas con formato origen-destino.
direction (str or None, optional): Dirección de transacción. Defaults to None.
year (int or None, optional): Año de transacciones. Defaults to None.
transport_mode (str or None, optional): Tipo de medio de transporte. Defaults to None.
Returns:
int: suma de valor de elementos en tabla filtrada.
"""
origin, destination = route.split("-")
filtered_table = self.filter_routes_df(origin=origin, destination=destination, direction=direction,
start_year=year, end_year=year)
route_value = filtered_table["total_value"].sum()
return route_value
def get_top_ten(self, all_cases: dict) -> dict:
"""De un diccionario de elementos se obtienen los 10 casos con mejores resultados.
Args:
all_cases (dict): Diccionario con todos los casos
Returns:
List: Lista con los 10 casos con mejores resultados.
"""
top_ten_cases = sorted(all_cases, key=all_cases.get, reverse=True)[:10]
top_ten_dict = {}
for case in top_ten_cases:
top_ten_dict[case] = all_cases[case]
return top_ten_dict
def get_transport_frecuency(self, transport:str, direction:str or None = None, year:int or None = None)-> int:
"""
Cuenta las veces que un transporte aparece en una tabla filtrada.
Se pueden filtrar resultados por dirección, año y/o medio de transporte.
Args:
transport (str): Tipo de medio de transporte.
direction (str or None, optional): Dirección de transacción. Defaults to None.
year (int or None, optional): Año de transacciones. Defaults to None.
Returns:
int: Numero de apariciones de transporte en la tabla filtrada.
"""
# Tabla filtrada
filtered_table = self.filter_routes_df(transport_mode=transport, direction=direction,
start_year=year, end_year=year)
# Contar filas en la tabla
transport_frecuency = len(filtered_table)
return transport_frecuency
def get_transport_value(self, transport:str, direction:str or None = None, year:int or None = None) -> int:
"""
Suma el valor total para un transporte especifico dentro de una tabla filtrada.
Se pueden filtrar resultados por dirección, año y/o medio de transporte.
Args:
transport (str): Tipo de medio de transporte.
direction (str or None, optional): Dirección de transacción. Defaults to None.
year (int or None, optional): Año de transacciones. Defaults to None.
Returns:
int: suma de valor de elementos en tabla filtrada.
"""
filtered_table = self.filter_routes_df(transport_mode=transport, direction=direction,
start_year=year, end_year=year)
transport_value = filtered_table["total_value"].sum()
return transport_value
def get_country_frecuency(self, origin:str or None = None, destination:str or None = None, direction:str or None = None, year:int or None = None)-> int:
"""
Cuenta las veces que un pais aparece en una tabla filtrada.
Se pueden filtrar resultados por dirección, año y/o medio de transporte.
Args:
origin (str or None, optional): Pais de origen. Defaults to None.
destination (str or None, optional): Pais de destino. Defaults to None.
direction (str or None, optional): Dirección de transacción. Defaults to None.
year (int or None, optional): Año de transacciones. Defaults to None.
Returns:
int: Numero de apariciones de transporte en la tabla filtrada.
"""
# Tabla filtrada
filtered_table = self.filter_routes_df(origin=origin, destination=destination, direction=direction,
start_year=year, end_year=year)
# Contar filas en la tabla
transport_frecuency = len(filtered_table)
return transport_frecuency
def get_country_value(self, origin:str or None = None, destination:str or None = None, direction:str or None = None, year:int or None = None) -> int:
"""
Suma el valor total para un pais especifico dentro de una tabla filtrada.
Se pueden filtrar resultados por dirección, año y/o medio de transporte.
Args:
origin (str or None, optional): Pais de origen. Defaults to None.
destination (str or None, optional): Pais de destino. Defaults to None.
direction (str or None, optional): Dirección de transacción. Defaults to None.
year (int or None, optional): Año de transacciones. Defaults to None.
Returns:
int: suma de valor de elementos en tabla filtrada.
"""
filtered_table = self.filter_routes_df(origin=origin, destination=destination, direction=direction,
start_year=year, end_year=year)
transport_value = filtered_table["total_value"].sum()
return transport_value
def reorder_dict_max(self, data_dict: dict) -> dict:
"""
Ordena diccionario a partir de valores de mayor a menor.
Elimita los elementos del diccionario que tengan un valor de 0.
Args:
data_dict (dict): Diccionario de datos desordenados.
Returns:
dict: Diccionario de datos filtrados.
"""
# Crear nuevo diccionario para almacenar datos ordenados
ordered_data_dict = {}
ordered_keys = sorted(data_dict, key=data_dict.get, reverse=True)
for key in ordered_keys:
# if value is 0, skip
if data_dict[key] == 0:
continue
# if value > 0
else:
ordered_data_dict[key]=data_dict[key]
return ordered_data_dict
| 45.416667
| 156
| 0.630324
| 1,282
| 10,355
| 4.982839
| 0.123245
| 0.044145
| 0.043676
| 0.05072
| 0.757514
| 0.738259
| 0.724796
| 0.713212
| 0.703037
| 0.703037
| 0
| 0.00138
| 0.300241
| 10,355
| 227
| 157
| 45.61674
| 0.88021
| 0.471077
| 0
| 0.328358
| 0
| 0
| 0.013856
| 0
| 0
| 0
| 0
| 0.004405
| 0
| 1
| 0.164179
| false
| 0
| 0.029851
| 0
| 0.373134
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
218cc1a2784c43a8ecfb6c736b8023171e1890c1
| 149
|
py
|
Python
|
vb2py/PythonCard/__init__.py
|
ceprio/xl_vb2py
|
899fec0301140fd8bd313e8c80b3fa839b3f5ee4
|
[
"BSD-3-Clause"
] | null | null | null |
vb2py/PythonCard/__init__.py
|
ceprio/xl_vb2py
|
899fec0301140fd8bd313e8c80b3fa839b3f5ee4
|
[
"BSD-3-Clause"
] | null | null | null |
vb2py/PythonCard/__init__.py
|
ceprio/xl_vb2py
|
899fec0301140fd8bd313e8c80b3fa839b3f5ee4
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Created: 2001/08/05
Purpose: Turn PythonCard into a package
__version__ = "$Revision: 1.1.1.1 $"
__date__ = "$Date: 2001/08/06 19:53:11 $"
"""
| 16.555556
| 41
| 0.651007
| 24
| 149
| 3.708333
| 0.75
| 0.067416
| 0.067416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206349
| 0.154362
| 149
| 8
| 42
| 18.625
| 0.5
| 0.932886
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
21af95c3e6f5614235525e918b9f73b1e391d922
| 42
|
py
|
Python
|
fzzzMaskBackend/users/serializers.py
|
FZZZMask/backend
|
4f987e96a5ff42d89cf536c099b944f5f7254764
|
[
"BSD-3-Clause"
] | null | null | null |
fzzzMaskBackend/users/serializers.py
|
FZZZMask/backend
|
4f987e96a5ff42d89cf536c099b944f5f7254764
|
[
"BSD-3-Clause"
] | 3
|
2020-02-11T23:24:39.000Z
|
2021-06-04T21:45:25.000Z
|
fzzzMaskBackend/users/serializers.py
|
FZZZMask/backend
|
4f987e96a5ff42d89cf536c099b944f5f7254764
|
[
"BSD-3-Clause"
] | null | null | null |
from rest_framework import serializers
| 8.4
| 38
| 0.833333
| 5
| 42
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 42
| 4
| 39
| 10.5
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
21b4b857672198b3794c4cd67434ee8e238bf40c
| 164
|
py
|
Python
|
util/prelude.py
|
sinsay/ds_define
|
0ee89edfc3ad1ed37c5b88e13936229baf50a966
|
[
"Apache-2.0"
] | null | null | null |
util/prelude.py
|
sinsay/ds_define
|
0ee89edfc3ad1ed37c5b88e13936229baf50a966
|
[
"Apache-2.0"
] | null | null | null |
util/prelude.py
|
sinsay/ds_define
|
0ee89edfc3ad1ed37c5b88e13936229baf50a966
|
[
"Apache-2.0"
] | null | null | null |
from .enum import EnumBase
def is_builtin_type(obj) -> bool:
"""
检查 obj 是否基础类型
"""
return isinstance(obj, (int, str, float, bool)) or obj is None
| 18.222222
| 66
| 0.628049
| 24
| 164
| 4.208333
| 0.791667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 164
| 8
| 67
| 20.5
| 0.821138
| 0.079268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
21b737190d56432c7d4ca921f5d6f60d7150164a
| 289
|
py
|
Python
|
batch/batch/public_gcr_images.py
|
MariusDanner/hail
|
5ca0305f8243b5888931b1afaa1fbfb617dee097
|
[
"MIT"
] | null | null | null |
batch/batch/public_gcr_images.py
|
MariusDanner/hail
|
5ca0305f8243b5888931b1afaa1fbfb617dee097
|
[
"MIT"
] | null | null | null |
batch/batch/public_gcr_images.py
|
MariusDanner/hail
|
5ca0305f8243b5888931b1afaa1fbfb617dee097
|
[
"MIT"
] | null | null | null |
from typing import List
def public_gcr_images(project: str) -> List[str]:
# the worker cannot import batch_configuration because it does not have all the environment
# variables
return [f'gcr.io/{project}/{name}' for name in ('query', 'hail', 'python-dill', 'batch-worker')]
| 36.125
| 100
| 0.709343
| 42
| 289
| 4.809524
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16609
| 289
| 7
| 101
| 41.285714
| 0.838174
| 0.342561
| 0
| 0
| 0
| 0
| 0.294118
| 0.122995
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
21beae082b613ebc189de03f874795adfa3f6a13
| 68
|
py
|
Python
|
Other_AIMA_Scripts/planning.py
|
erensezener/aima-based-irl
|
fbbe28986cec0b5e58fef0f00338a180ed03759a
|
[
"MIT"
] | 12
|
2015-06-17T05:15:40.000Z
|
2021-05-18T15:39:33.000Z
|
Other_AIMA_Scripts/planning.py
|
erensezener/aima-based-irl
|
fbbe28986cec0b5e58fef0f00338a180ed03759a
|
[
"MIT"
] | 1
|
2020-03-14T08:45:49.000Z
|
2020-03-14T08:45:49.000Z
|
Other_AIMA_Scripts/planning.py
|
erensezener/aima-based-irl
|
fbbe28986cec0b5e58fef0f00338a180ed03759a
|
[
"MIT"
] | 5
|
2016-09-10T19:16:56.000Z
|
2018-10-10T05:09:03.000Z
|
"""Planning (Chapters 11-12)
"""
from __future__ import generators
| 13.6
| 33
| 0.735294
| 8
| 68
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067797
| 0.132353
| 68
| 4
| 34
| 17
| 0.711864
| 0.367647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
21ee59da3e9f824ace6a440137a55162daab5528
| 200
|
py
|
Python
|
Timers.py
|
elegenstein-tgm/astrosim
|
1b09a32f543f5cc810621f8beaff20d57d0add22
|
[
"MIT"
] | null | null | null |
Timers.py
|
elegenstein-tgm/astrosim
|
1b09a32f543f5cc810621f8beaff20d57d0add22
|
[
"MIT"
] | null | null | null |
Timers.py
|
elegenstein-tgm/astrosim
|
1b09a32f543f5cc810621f8beaff20d57d0add22
|
[
"MIT"
] | null | null | null |
class Timer:
def __init__(self, duration, ticks):
self.duration = duration
self.ticks = ticks
self.thread = None
def start(self):
pass
# start Thread here
| 20
| 40
| 0.585
| 23
| 200
| 4.913043
| 0.521739
| 0.212389
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.335
| 200
| 9
| 41
| 22.222222
| 0.849624
| 0.085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.142857
| 0
| 0
| 0.428571
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
1d09f4af7ac6dd139ab8ee8934a37f14f97144a4
| 17,847
|
py
|
Python
|
scripts/trajectories.py
|
Miedema/MCNetwork
|
daab1fe5880c47695c6e21124f99aa6b2589aba1
|
[
"Apache-2.0"
] | null | null | null |
scripts/trajectories.py
|
Miedema/MCNetwork
|
daab1fe5880c47695c6e21124f99aa6b2589aba1
|
[
"Apache-2.0"
] | null | null | null |
scripts/trajectories.py
|
Miedema/MCNetwork
|
daab1fe5880c47695c6e21124f99aa6b2589aba1
|
[
"Apache-2.0"
] | 1
|
2021-10-05T14:34:30.000Z
|
2021-10-05T14:34:30.000Z
|
#!/usr/bin/python3
from tools import *
from sys import argv
from os.path import join
import h5py
import matplotlib.pylab as plt
from matplotlib.patches import Wedge
import numpy as np
if len(argv) > 1:
pathToSimFolder = argv[1]
else:
pathToSimFolder = "../data/"
parameters, electrodes = readParameters(pathToSimFolder)
electrodeNumber = len(electrodes)
acceptorPos = np.zeros((int(parameters["acceptorNumber"]), 2))
try:
donorPos = np.zeros((int(parameters["donorNumber"]), 2))
except KeyError:
donorPos = np.zeros(
(int(parameters["acceptorNumber"] * parameters["compensationFactor"]), 2)
)
with open(join(pathToSimFolder, "device.txt")) as deviceFile:
line = next(deviceFile)
line = next(deviceFile)
for i in range(acceptorPos.shape[0]):
acceptorPos[i] = next(deviceFile).split(" ")
line = next(deviceFile)
line = next(deviceFile)
for i in range(donorPos.shape[0]):
donorPos[i] = next(deviceFile).split(" ")
# print(acceptorPos)
# print(donorPos)
electrodePositions = np.empty((len(electrodes), 2))
for i in range(len(electrodes)):
if parameters["geometry"] == "rect":
if electrodes[i][1] == 0:
electrodePositions[i] = [0, electrodes[i][0] * parameters["lenY"]]
if electrodes[i][1] == 1:
electrodePositions[i] = [
parameters["lenX"],
electrodes[i][0] * parameters["lenY"],
]
if electrodes[i][1] == 2:
electrodePositions[i] = [electrodes[i][0] * parameters["lenX"], 0]
if electrodes[i][1] == 3:
electrodePositions[i] = [
electrodes[i][0] * parameters["lenX"],
parameters["lenY"],
]
elif parameters["geometry"] == "circle":
electrodePositions[i] = [
parameters["radius"] * np.cos(electrodes[i][0] / 360 * 2 * np.pi),
parameters["radius"] * np.sin(electrodes[i][0] / 360 * 2 * np.pi),
]
# print(electrodePositions)
def colorMaker(x):
from matplotlib import colors
from scipy.interpolate import interp1d
cols = ["darkred", "darkgreen"]
rgbaData = np.array([colors.to_rgba(c) for c in cols])
rInterpolater = interp1d(np.linspace(0, 1, len(cols)), rgbaData[:, 0])
gInterpolater = interp1d(np.linspace(0, 1, len(cols)), rgbaData[:, 1])
bInterpolater = interp1d(np.linspace(0, 1, len(cols)), rgbaData[:, 2])
return np.array([rInterpolater(x), gInterpolater(x), bInterpolater(x), 1])
inp = ["0_0", "0_1", "1_0", "1_1"]
for fileNumber in [1, 2, 3, 4]:
print(inp[fileNumber - 1])
# for fileNumber in [1]:
data = np.genfromtxt(
join(pathToSimFolder, f"swapTrackFile{fileNumber}.txt"),
delimiter=";",
dtype=int,
)
trajectoriesSortedByStartEnd = [
[[] for j in range(len(electrodes))] for i in range(len(electrodes))
]
trajectories = []
hops = 20000
IDs = {}
hitID = 0
for i in range(hops):
hoppingSite1 = data[i, 0]
hoppingSite2 = data[i, 1]
# print("hoppingSite1",hoppingSite1,"hoppingSite2",hoppingSite2)
if hoppingSite1 in IDs:
ID = IDs[hoppingSite1]
del IDs[hoppingSite1]
# print("found ID",ID)
else:
ID = hitID
hitID += 1
trajectories.append([])
# print("new ID", ID)
if hoppingSite2 < parameters["acceptorNumber"]:
IDs[hoppingSite2] = ID
trajectories[ID].append([hoppingSite1, hoppingSite2])
# sort trajectories
for i in range(len(trajectories)):
if trajectories[i][0][0] >= parameters["acceptorNumber"]:
if trajectories[i][-1][1] >= parameters["acceptorNumber"]:
trajectoriesSortedByStartEnd[
trajectories[i][0][0] - int(parameters["acceptorNumber"])
][trajectories[i][-1][1] - int(parameters["acceptorNumber"])].append(
trajectories[i]
)
# print(trajectories[i][0][0], trajectories[i][-1][1])
for k in range(len(electrodes)):
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
electodePlotWidth = 8
for i in range(len(electrodes)):
if i == parameters["outputElectrode"]:
col = "blue"
elif i == parameters["inputElectrode1"]:
if fileNumber in [3, 4]:
col = "red"
else:
col = "rosybrown"
elif i == parameters["inputElectrode2"]:
if fileNumber in [2, 4]:
col = "red"
else:
col = "rosybrown"
else:
col = "green"
if parameters["geometry"] == "rect":
if electrodes[i][1] == 0:
angle = 0
xy = (
0 - electodePlotWidth / 2,
electrodes[i][0] * parameters["lenY"]
- parameters["electrodeWidth"] / 2,
)
elif electrodes[i][1] == 1:
angle = 0
xy = (
parameters["lenX"] - electodePlotWidth / 2,
electrodes[i][0] * parameters["lenY"]
- parameters["electrodeWidth"] / 2,
)
elif electrodes[i][1] == 2:
angle = 90
xy = (
electrodes[i][0] * parameters["lenX"]
+ parameters["electrodeWidth"] / 2,
0 - electodePlotWidth / 2,
)
elif electrodes[i][1] == 3:
angle = 90
xy = (
electrodes[i][0] * parameters["lenX"]
+ parameters["electrodeWidth"] / 2,
parameters["lenY"] - electodePlotWidth / 2,
)
ax.add_artist(
plt.Rectangle(
xy,
electodePlotWidth,
parameters["electrodeWidth"],
angle=angle,
fc=col,
ec=col,
zorder=-1,
)
)
elif parameters["geometry"] == "circle":
electrodeWidth = (
parameters["electrodeWidth"]
/ (parameters["radius"] * 2 * np.pi)
* 360
) # in degrees
ax.add_artist(
Wedge(
(0, 0),
parameters["radius"] + electodePlotWidth / 2,
electrodes[i][0] - electrodeWidth / 2,
electrodes[i][0] + electrodeWidth / 2,
width=electodePlotWidth,
fc=col,
ec=col,
zorder=-1,
)
)
ax.scatter(acceptorPos[:, 0], acceptorPos[:, 1], c="k", marker=".", s=20)
ax.scatter(donorPos[:, 0], donorPos[:, 1], c="k", marker="x", s=20)
for l in range(len(electrodes)):
trajectories = trajectoriesSortedByStartEnd[k][l]
for i in range(len(trajectories)):
for j in range(len(trajectories[i])):
hoppingSite1 = trajectories[i][j][0]
hoppingSite2 = trajectories[i][j][1]
if hoppingSite1 >= parameters["acceptorNumber"]:
x1, y1 = (
electrodePositions[
hoppingSite1 - int(parameters["acceptorNumber"])
][0],
electrodePositions[
hoppingSite1 - int(parameters["acceptorNumber"])
][1],
)
else:
x1, y1 = (
acceptorPos[hoppingSite1, 0],
acceptorPos[hoppingSite1, 1],
)
if hoppingSite2 >= parameters["acceptorNumber"]:
x2, y2 = (
electrodePositions[
hoppingSite2 - int(parameters["acceptorNumber"])
][0],
electrodePositions[
hoppingSite2 - int(parameters["acceptorNumber"])
][1],
)
else:
x2, y2 = (
acceptorPos[hoppingSite2, 0],
acceptorPos[hoppingSite2, 1],
)
# ax.plot([x1,x2],[y1,y2],"-",alpha=0.05,color="k",linewidth=2)
ax.plot(
[x1, x2],
[y1, y2],
"-",
alpha=0.05,
color=color(l, len(electrodes)),
linewidth=2,
)
# if currentRatio>0.5:
# ax.arrow((x2+x1)/2,(y2+y1)/2,(x2-x1)*0.001,(y2-y1)*0.001,color=colorMaker(abs(currentRatio-0.5)*2),ec=None,alpha=absBins[i,j],linewidth=0,head_width=(currentRatio-0.5)*20)
ax.axis("off")
if parameters["geometry"] == "circle":
ax.add_artist(
plt.Circle((0, 0), parameters["radius"], fc="none", ec="k", zorder=-2)
)
elif parameters["geometry"] == "rect":
ax.add_artist(
plt.Rectangle(
(0, 0),
parameters["lenX"],
parameters["lenY"],
fc="none",
ec="k",
zorder=-2,
)
)
if parameters["geometry"] == "rect":
ax.set_xlim(
-electodePlotWidth / 2, parameters["lenX"] + electodePlotWidth / 2
)
ax.set_ylim(
-electodePlotWidth / 2, parameters["lenY"] + electodePlotWidth / 2
)
elif parameters["geometry"] == "circle":
ax.set_xlim(
-parameters["radius"] - electodePlotWidth,
parameters["radius"] + electodePlotWidth,
)
ax.set_ylim(
-parameters["radius"] - electodePlotWidth,
parameters["radius"] + electodePlotWidth,
)
ax.set_aspect("equal")
plt.savefig(
join(pathToSimFolder, f"trajectory_fromEl_{k}_{inp[fileNumber-1]}.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
for k in range(len(electrodes)):
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
electodePlotWidth = 8
for i in range(len(electrodes)):
if i == parameters["outputElectrode"]:
col = "blue"
elif i == parameters["inputElectrode1"]:
if fileNumber in [3, 4]:
col = "red"
else:
col = "rosybrown"
elif i == parameters["inputElectrode2"]:
if fileNumber in [2, 4]:
col = "red"
else:
col = "rosybrown"
else:
col = "green"
if parameters["geometry"] == "rect":
if electrodes[i][1] == 0:
angle = 0
xy = (
0 - electodePlotWidth / 2,
electrodes[i][0] * parameters["lenY"]
- parameters["electrodeWidth"] / 2,
)
elif electrodes[i][1] == 1:
angle = 0
xy = (
parameters["lenX"] - electodePlotWidth / 2,
electrodes[i][0] * parameters["lenY"]
- parameters["electrodeWidth"] / 2,
)
elif electrodes[i][1] == 2:
angle = 90
xy = (
electrodes[i][0] * parameters["lenX"]
+ parameters["electrodeWidth"] / 2,
0 - electodePlotWidth / 2,
)
elif electrodes[i][1] == 3:
angle = 90
xy = (
electrodes[i][0] * parameters["lenX"]
+ parameters["electrodeWidth"] / 2,
parameters["lenY"] - electodePlotWidth / 2,
)
ax.add_artist(
plt.Rectangle(
xy,
electodePlotWidth,
parameters["electrodeWidth"],
angle=angle,
fc=col,
ec=col,
zorder=-1,
)
)
elif parameters["geometry"] == "circle":
electrodeWidth = (
parameters["electrodeWidth"]
/ (parameters["radius"] * 2 * np.pi)
* 360
) # in degrees
ax.add_artist(
Wedge(
(0, 0),
parameters["radius"] + electodePlotWidth / 2,
electrodes[i][0] - electrodeWidth / 2,
electrodes[i][0] + electrodeWidth / 2,
width=electodePlotWidth,
fc=col,
ec=col,
zorder=-1,
)
)
ax.scatter(acceptorPos[:, 0], acceptorPos[:, 1], c="k", marker=".", s=20)
ax.scatter(donorPos[:, 0], donorPos[:, 1], c="k", marker="x", s=20)
for l in range(len(electrodes)):
trajectories = trajectoriesSortedByStartEnd[l][k]
for i in range(len(trajectories)):
for j in range(len(trajectories[i])):
hoppingSite1 = trajectories[i][j][0]
hoppingSite2 = trajectories[i][j][1]
if hoppingSite1 >= parameters["acceptorNumber"]:
x1, y1 = (
electrodePositions[
hoppingSite1 - int(parameters["acceptorNumber"])
][0],
electrodePositions[
hoppingSite1 - int(parameters["acceptorNumber"])
][1],
)
else:
x1, y1 = (
acceptorPos[hoppingSite1, 0],
acceptorPos[hoppingSite1, 1],
)
if hoppingSite2 >= parameters["acceptorNumber"]:
x2, y2 = (
electrodePositions[
hoppingSite2 - int(parameters["acceptorNumber"])
][0],
electrodePositions[
hoppingSite2 - int(parameters["acceptorNumber"])
][1],
)
else:
x2, y2 = (
acceptorPos[hoppingSite2, 0],
acceptorPos[hoppingSite2, 1],
)
# ax.plot([x1,x2],[y1,y2],"-",alpha=0.05,color="k",linewidth=2)
ax.plot(
[x1, x2],
[y1, y2],
"-",
alpha=0.05,
color=color(l, len(electrodes)),
linewidth=2,
)
# if currentRatio>0.5:
# ax.arrow((x2+x1)/2,(y2+y1)/2,(x2-x1)*0.001,(y2-y1)*0.001,color=colorMaker(abs(currentRatio-0.5)*2),ec=None,alpha=absBins[i,j],linewidth=0,head_width=(currentRatio-0.5)*20)
ax.axis("off")
if parameters["geometry"] == "circle":
ax.add_artist(
plt.Circle((0, 0), parameters["radius"], fc="none", ec="k", zorder=-2)
)
elif parameters["geometry"] == "rect":
ax.add_artist(
plt.Rectangle(
(0, 0),
parameters["lenX"],
parameters["lenY"],
fc="none",
ec="k",
zorder=-2,
)
)
if parameters["geometry"] == "rect":
ax.set_xlim(
-electodePlotWidth / 2, parameters["lenX"] + electodePlotWidth / 2
)
ax.set_ylim(
-electodePlotWidth / 2, parameters["lenY"] + electodePlotWidth / 2
)
elif parameters["geometry"] == "circle":
ax.set_xlim(
-parameters["radius"] - electodePlotWidth,
parameters["radius"] + electodePlotWidth,
)
ax.set_ylim(
-parameters["radius"] - electodePlotWidth,
parameters["radius"] + electodePlotWidth,
)
ax.set_aspect("equal")
plt.savefig(
join(pathToSimFolder, f"trajectory_toEl_{k}_{inp[fileNumber-1]}.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
| 36.646817
| 197
| 0.428083
| 1,441
| 17,847
| 5.278973
| 0.126301
| 0.043381
| 0.028395
| 0.034705
| 0.772709
| 0.750624
| 0.742737
| 0.722492
| 0.708689
| 0.693046
| 0
| 0.043305
| 0.451392
| 17,847
| 486
| 198
| 36.722222
| 0.733633
| 0.046955
| 0
| 0.690821
| 0
| 0
| 0.071988
| 0.006887
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002415
| false
| 0
| 0.021739
| 0
| 0.02657
| 0.002415
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
df102fd4bc161dbff752d14a5d6d5415a2686808
| 78
|
py
|
Python
|
test/test.py
|
hcamacho4200/dev_opts_training
|
6ce91cbeb30af7eae29c084f6180d53f64f5e9b0
|
[
"Apache-2.0"
] | 1
|
2021-10-03T22:23:06.000Z
|
2021-10-03T22:23:06.000Z
|
test/test.py
|
hcamacho4200/dev_opts_training
|
6ce91cbeb30af7eae29c084f6180d53f64f5e9b0
|
[
"Apache-2.0"
] | null | null | null |
test/test.py
|
hcamacho4200/dev_opts_training
|
6ce91cbeb30af7eae29c084f6180d53f64f5e9b0
|
[
"Apache-2.0"
] | 1
|
2021-12-11T19:24:59.000Z
|
2021-12-11T19:24:59.000Z
|
def test_test():
"""A generic test
:return:
"""
assert True
| 9.75
| 21
| 0.512821
| 9
| 78
| 4.333333
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.346154
| 78
| 7
| 22
| 11.142857
| 0.764706
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
10d394809031c831a797106d7da931ca1931a5d8
| 89
|
py
|
Python
|
Contest/ABC017/b/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
Contest/ABC017/b/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
Contest/ABC017/b/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
import re
print(re.sub("ch|o|k|u", "", input()) and "NO" or "YES")
| 29.666667
| 56
| 0.606742
| 18
| 89
| 3
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012658
| 0.11236
| 89
| 3
| 56
| 29.666667
| 0.670886
| 0.235955
| 0
| 0
| 0
| 0
| 0.191176
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
10dcd63f6971079940c1323786fecd18f6fad2b3
| 162
|
py
|
Python
|
pythonperlin/__init__.py
|
timpyrkov/pyperlin
|
c79080657aa79df1abc83e481d2b09cac5edbff7
|
[
"MIT"
] | null | null | null |
pythonperlin/__init__.py
|
timpyrkov/pyperlin
|
c79080657aa79df1abc83e481d2b09cac5edbff7
|
[
"MIT"
] | null | null | null |
pythonperlin/__init__.py
|
timpyrkov/pyperlin
|
c79080657aa79df1abc83e481d2b09cac5edbff7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from pythonperlin.perlin import perlin
from pkg_resources import get_distribution
__version__ = get_distribution('pythonperlin').version
| 27
| 54
| 0.796296
| 19
| 162
| 6.421053
| 0.631579
| 0.245902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006897
| 0.104938
| 162
| 6
| 54
| 27
| 0.834483
| 0.12963
| 0
| 0
| 0
| 0
| 0.085714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
10e230dff4f183cac0ecd093228e7522ab70f334
| 26
|
py
|
Python
|
25_Assigment_Operator/main.py
|
jmmedel/Python-Tutorials-
|
243ae9a6b51a4fce03dd90c02da13b859cbfbe5f
|
[
"MIT"
] | null | null | null |
25_Assigment_Operator/main.py
|
jmmedel/Python-Tutorials-
|
243ae9a6b51a4fce03dd90c02da13b859cbfbe5f
|
[
"MIT"
] | null | null | null |
25_Assigment_Operator/main.py
|
jmmedel/Python-Tutorials-
|
243ae9a6b51a4fce03dd90c02da13b859cbfbe5f
|
[
"MIT"
] | null | null | null |
x = 5
x |= 3
print(x)
| 3.25
| 8
| 0.384615
| 6
| 26
| 1.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 0.423077
| 26
| 7
| 9
| 3.714286
| 0.533333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
10eff1d39f6acd5ae6fc306444aa467930b6a9d1
| 1,624
|
py
|
Python
|
ozpcenter/models/import_task_result.py
|
emosher/ozp-backend
|
d31d00bb8a28a8d0c999813f616b398f41516244
|
[
"Apache-2.0"
] | 1
|
2018-10-05T17:03:01.000Z
|
2018-10-05T17:03:01.000Z
|
ozpcenter/models/import_task_result.py
|
emosher/ozp-backend
|
d31d00bb8a28a8d0c999813f616b398f41516244
|
[
"Apache-2.0"
] | 1
|
2017-01-06T19:20:32.000Z
|
2017-01-06T19:20:32.000Z
|
ozpcenter/models/import_task_result.py
|
emosher/ozp-backend
|
d31d00bb8a28a8d0c999813f616b398f41516244
|
[
"Apache-2.0"
] | 7
|
2016-12-16T15:42:05.000Z
|
2020-09-05T01:11:27.000Z
|
from django.db import models
from ozpcenter.utils import get_now_utc
from .import_task import ImportTask
class ImportTaskResultManager(models.Manager):
def get_queryset(self):
return super().get_queryset()
def find_all(self):
return self.all()
def find_by_id(self, id):
return self.get(id=id)
def find_all_by_import_task(self, import_task_pk):
return self.filter(import_task=import_task_pk)
def create_result(self, import_task_id, result, message):
result = self.create(import_task_id=import_task_id, result=result, message=message)
ImportTask.objects.filter(id=import_task_id).update(last_run_result=result.id)
return result
class ImportTaskResult(models.Model):
"""
Import Task Result
Represents the results of an import task that has been run previously
"""
class Meta:
db_table = 'import_task_result'
objects = ImportTaskResultManager()
RESULT_PASS = 'Pass'
RESULT_FAIL = 'Fail'
RESULT_CHOICES = (
(RESULT_PASS, 'Pass'),
(RESULT_FAIL, 'Fail'),
)
import_task = models.ForeignKey(ImportTask, related_name="results")
run_date = models.DateTimeField(default=get_now_utc)
result = models.CharField(max_length=4, choices=RESULT_CHOICES)
message = models.CharField(max_length=4000, null=False)
def __repr__(self):
return '{0!s} | Date: {1!s} | Result: {2!s}'.format(self.import_task, self.run_date, self.result)
def __str__(self):
return '{0!s} | Date: {1!s} | Result: {2!s}'.format(self.import_task, self.run_date, self.result)
| 28.491228
| 105
| 0.690887
| 219
| 1,624
| 4.86758
| 0.315068
| 0.140713
| 0.052533
| 0.033771
| 0.178236
| 0.178236
| 0.125704
| 0.125704
| 0.125704
| 0.125704
| 0
| 0.008455
| 0.198892
| 1,624
| 56
| 106
| 29
| 0.810915
| 0.054803
| 0
| 0.058824
| 0
| 0.058824
| 0.073267
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.205882
| false
| 0.058824
| 0.441176
| 0.176471
| 1.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
|
0
| 5
|
80178d726d35bfda33f77aca84b7fdccd2b6d2ea
| 253
|
py
|
Python
|
src/fl_simulation/server/aggregation/__init__.py
|
microsoft/fl-simulation
|
d177d329c82559c7efe82deae8dea8f9baa49495
|
[
"MIT"
] | 5
|
2021-12-14T02:21:53.000Z
|
2021-12-26T07:45:13.000Z
|
src/fl_simulation/server/aggregation/__init__.py
|
microsoft/fl-simulation
|
d177d329c82559c7efe82deae8dea8f9baa49495
|
[
"MIT"
] | 1
|
2022-01-04T04:51:20.000Z
|
2022-01-04T04:51:20.000Z
|
src/fl_simulation/server/aggregation/__init__.py
|
microsoft/fl-simulation
|
d177d329c82559c7efe82deae8dea8f9baa49495
|
[
"MIT"
] | null | null | null |
"""Utilities and implementation for model aggregation on the central server."""
from .aggregator import *
from .fedavg import *
from .fedprox import *
from .scaffold import *
from .aggregator_with_dropouts import *
from .multi_model_aggregator import *
| 31.625
| 79
| 0.790514
| 32
| 253
| 6.125
| 0.59375
| 0.255102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134387
| 253
| 8
| 80
| 31.625
| 0.894977
| 0.288538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
339ebff6a47b6fc0d76354525972cdafcdf197e6
| 216
|
py
|
Python
|
osvolbackup/verbose.py
|
CCSGroupInternational/osvolbackup
|
d0d93812a729acdb6c961c6bdd1cc2cb5c9c87f5
|
[
"Apache-2.0"
] | 1
|
2019-02-27T12:59:49.000Z
|
2019-02-27T12:59:49.000Z
|
osvolbackup/verbose.py
|
CCSGroupInternational/osvolbackup
|
d0d93812a729acdb6c961c6bdd1cc2cb5c9c87f5
|
[
"Apache-2.0"
] | 4
|
2019-03-07T09:31:51.000Z
|
2019-03-12T15:19:40.000Z
|
osvolbackup/verbose.py
|
CCSGroupInternational/osvolbackup
|
d0d93812a729acdb6c961c6bdd1cc2cb5c9c87f5
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
from os import getenv
from datetime import datetime
def vprint(*a, **k):
if not getenv('VERBOSE'):
return
print(datetime.now(), ' ', end='')
print(*a, **k)
| 19.636364
| 38
| 0.643519
| 29
| 216
| 4.62069
| 0.62069
| 0.029851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217593
| 216
| 10
| 39
| 21.6
| 0.792899
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| true
| 0
| 0.375
| 0
| 0.625
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 5
|
33e70135430c756b9a90a2e67be6abde70c17fb4
| 100
|
py
|
Python
|
paraVerComoFuncionaAlgumasCoisas/sqlite3/fazendoTeste/teste.py
|
jonasht/pythonEstudos
|
5e7d28e7bd82b9d1b08e795867fdbaa743f4b747
|
[
"MIT"
] | null | null | null |
paraVerComoFuncionaAlgumasCoisas/sqlite3/fazendoTeste/teste.py
|
jonasht/pythonEstudos
|
5e7d28e7bd82b9d1b08e795867fdbaa743f4b747
|
[
"MIT"
] | null | null | null |
paraVerComoFuncionaAlgumasCoisas/sqlite3/fazendoTeste/teste.py
|
jonasht/pythonEstudos
|
5e7d28e7bd82b9d1b08e795867fdbaa743f4b747
|
[
"MIT"
] | null | null | null |
import PegandoVariavel as v
print(v.get_Pessoas())
print()
for d in v.get_Pessoas():
print(d)
| 12.5
| 27
| 0.7
| 17
| 100
| 4
| 0.588235
| 0.117647
| 0.323529
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17
| 100
| 8
| 28
| 12.5
| 0.819277
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.6
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
33ed53521c15ad28a778f2b1528538b64f181026
| 32
|
py
|
Python
|
week01/test_f.py
|
wasit7/cn350
|
a84a6ed04ada532e0a12c69d705cf3c15d7e0240
|
[
"MIT"
] | null | null | null |
week01/test_f.py
|
wasit7/cn350
|
a84a6ed04ada532e0a12c69d705cf3c15d7e0240
|
[
"MIT"
] | null | null | null |
week01/test_f.py
|
wasit7/cn350
|
a84a6ed04ada532e0a12c69d705cf3c15d7e0240
|
[
"MIT"
] | null | null | null |
n=1
def f(x):
print(n)
f(0)
| 6.4
| 12
| 0.46875
| 9
| 32
| 1.666667
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 0.28125
| 32
| 5
| 13
| 6.4
| 0.565217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.25
| 0.25
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1d0fdb41642d7e059e2af9967d0a5707e8be001c
| 1,829
|
py
|
Python
|
StationeryBG.py
|
CharlesW1970/Handright
|
cda9400232e1815f7137ab3bd86ded8e307f35c7
|
[
"BSD-3-Clause"
] | 1
|
2020-10-14T06:05:35.000Z
|
2020-10-14T06:05:35.000Z
|
StationeryBG.py
|
CharlesW1970/Handright
|
cda9400232e1815f7137ab3bd86ded8e307f35c7
|
[
"BSD-3-Clause"
] | null | null | null |
StationeryBG.py
|
CharlesW1970/Handright
|
cda9400232e1815f7137ab3bd86ded8e307f35c7
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
from PIL import Image, ImageFont
from handright import Template, handwrite
text = """
这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。
这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。
这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。
这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。
这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。
"""
imagex = Image.open("./pic/stationeryBackground.jpg")
width, height = imagex.size
imagex = imagex.resize((width * 2, height * 2), resample=Image.LANCZOS)
template = Template(background=imagex,
font_size=140,
font=ImageFont.truetype("./fonts/whx_2nd.ttf"),
line_spacing=220,
fill=0, # 字体“颜色”
left_margin=380,
top_margin=370,
right_margin=340,
bottom_margin=340,
word_spacing=12,
line_spacing_sigma=7, # 行间距随机扰动
font_size_sigma=3, # 字体大小随机扰动
word_spacing_sigma=6, # 字间距随机扰动
end_chars=", 。", # 防止特定字符因排版算法的自动换行而出现在行首
perturb_x_sigma=2, # 笔画横向偏移随机扰动
perturb_y_sigma=2, # 笔画纵向偏移随机扰动
perturb_theta_sigma=0.05, # 笔画旋转偏移随机扰动
)
images = handwrite(text, template)
for i, im in enumerate(images):
assert isinstance(im, Image.Image)
im.show()
im.save("./output/{}.png".format(i))
| 43.547619
| 172
| 0.783488
| 198
| 1,829
| 7.136364
| 0.414141
| 1.07431
| 1.58811
| 2.086341
| 0.54494
| 0.54494
| 0.54494
| 0.54494
| 0.54494
| 0.54494
| 0
| 0.020233
| 0.108256
| 1,829
| 42
| 173
| 43.547619
| 0.845494
| 0.055221
| 0
| 0.142857
| 0
| 0.142857
| 0.543655
| 0.506403
| 0
| 0
| 0
| 0
| 0.028571
| 0
| null | null | 0
| 0.057143
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1d5874fe18b2e75012e32310519149b4c42547fe
| 81
|
py
|
Python
|
app_folder/schemas/api.py
|
Nuznhy/day-f-hack
|
9f3dbcd73e73ea4e7807e5197bf0b0ded76bc9f3
|
[
"MIT"
] | 2
|
2021-10-02T12:12:57.000Z
|
2021-11-16T11:36:15.000Z
|
app_folder/schemas/api.py
|
Nuznhy/day-f-hack
|
9f3dbcd73e73ea4e7807e5197bf0b0ded76bc9f3
|
[
"MIT"
] | null | null | null |
app_folder/schemas/api.py
|
Nuznhy/day-f-hack
|
9f3dbcd73e73ea4e7807e5197bf0b0ded76bc9f3
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel
class ReadyResponse(BaseModel):
status: str
| 13.5
| 31
| 0.777778
| 9
| 81
| 7
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17284
| 81
| 5
| 32
| 16.2
| 0.940299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1d72d39d1134cd19db279903a576512ac8b9b659
| 139
|
py
|
Python
|
vedasal/criteria/losses/builder.py
|
Kuro96/vedasal
|
3c5588bf12059af5bd7bc779fd5f9dc0b2901cb2
|
[
"Apache-2.0"
] | 2
|
2020-11-06T06:39:04.000Z
|
2020-11-11T03:39:22.000Z
|
vedasal/criteria/losses/builder.py
|
Kuro96/vedasal
|
3c5588bf12059af5bd7bc779fd5f9dc0b2901cb2
|
[
"Apache-2.0"
] | null | null | null |
vedasal/criteria/losses/builder.py
|
Kuro96/vedasal
|
3c5588bf12059af5bd7bc779fd5f9dc0b2901cb2
|
[
"Apache-2.0"
] | null | null | null |
from vedacore.misc import registry, build_from_cfg
def build_loss(cfg):
loss = build_from_cfg(cfg, registry, 'loss')
return loss
| 19.857143
| 50
| 0.741007
| 21
| 139
| 4.666667
| 0.47619
| 0.183673
| 0.244898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172662
| 139
| 6
| 51
| 23.166667
| 0.852174
| 0
| 0
| 0
| 0
| 0
| 0.028777
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
d535465b4cadf1f4ee90d4f52f137ea35dc5bc11
| 56
|
py
|
Python
|
dominion/cards/__init__.py
|
billletson/dominion
|
ad430e20aa1615758091df1ca39a5fc7313e921e
|
[
"MIT"
] | null | null | null |
dominion/cards/__init__.py
|
billletson/dominion
|
ad430e20aa1615758091df1ca39a5fc7313e921e
|
[
"MIT"
] | null | null | null |
dominion/cards/__init__.py
|
billletson/dominion
|
ad430e20aa1615758091df1ca39a5fc7313e921e
|
[
"MIT"
] | null | null | null |
from .constants import *
from .actions import ACTIONS
| 18.666667
| 29
| 0.767857
| 7
| 56
| 6.142857
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 56
| 2
| 30
| 28
| 0.934783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d53f7a92ad96592864829c170139b3f620bcb9e7
| 109
|
py
|
Python
|
aiohttp_devtools/start/__init__.py
|
antonmyronyuk/aiohttp-devtools
|
be06d295a8911a43f7ad582a88a3d64d6482b6e8
|
[
"MIT"
] | 2
|
2018-11-13T06:34:17.000Z
|
2019-01-08T14:33:09.000Z
|
aiohttp_devtools/start/__init__.py
|
theruziev/aiohttp-devtools
|
8ab8a621964c8af0021c62e7971eea8c04f534e8
|
[
"MIT"
] | 1
|
2021-02-27T14:13:58.000Z
|
2021-02-27T14:13:58.000Z
|
aiohttp_devtools/start/__init__.py
|
theruziev/aiohttp-devtools
|
8ab8a621964c8af0021c62e7971eea8c04f534e8
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from .main import DatabaseChoice, ExampleChoice, SessionChoices, StartProject, TemplateChoice
| 36.333333
| 93
| 0.834862
| 10
| 109
| 9.1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010204
| 0.100917
| 109
| 2
| 94
| 54.5
| 0.918367
| 0.110092
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d54e3560503b5dd94b9ef2b1f63b8e1ccc96eeee
| 164
|
py
|
Python
|
allauth/socialaccount/providers/stripe/urls.py
|
mina-gaid/scp
|
38e1cd303d4728a987df117f666ce194e241ed1a
|
[
"MIT"
] | 1
|
2018-04-06T21:36:59.000Z
|
2018-04-06T21:36:59.000Z
|
allauth/socialaccount/providers/stripe/urls.py
|
mina-gaid/scp
|
38e1cd303d4728a987df117f666ce194e241ed1a
|
[
"MIT"
] | 6
|
2020-06-05T18:44:19.000Z
|
2022-01-13T00:48:56.000Z
|
allauth/socialaccount/providers/stripe/urls.py
|
mina-gaid/scp
|
38e1cd303d4728a987df117f666ce194e241ed1a
|
[
"MIT"
] | 1
|
2022-02-01T17:19:28.000Z
|
2022-02-01T17:19:28.000Z
|
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import StripeProvider
urlpatterns = default_urlpatterns(StripeProvider)
| 32.8
| 75
| 0.878049
| 17
| 164
| 8.352941
| 0.647059
| 0.253521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006579
| 0.073171
| 164
| 4
| 76
| 41
| 0.927632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d56af4bb09c9819923a740a48a9336b6c5056a35
| 42
|
py
|
Python
|
python_module/sirius/utils/exceptions.py
|
mtaillefumier/SIRIUS
|
50ec1c202c019113c5660f1966b170dec9dfd4d4
|
[
"BSD-2-Clause"
] | 77
|
2016-03-18T08:38:30.000Z
|
2022-03-11T14:06:25.000Z
|
python_module/sirius/utils/exceptions.py
|
simonpintarelli/SIRIUS
|
f4b5c4810af2a3ea1e67992d65750535227da84b
|
[
"BSD-2-Clause"
] | 240
|
2016-04-12T16:39:11.000Z
|
2022-03-31T08:46:12.000Z
|
python_module/sirius/utils/exceptions.py
|
simonpintarelli/SIRIUS
|
f4b5c4810af2a3ea1e67992d65750535227da84b
|
[
"BSD-2-Clause"
] | 43
|
2016-03-18T17:45:07.000Z
|
2022-02-28T05:27:59.000Z
|
class NotEnoughBands(Exception):
pass
| 14
| 32
| 0.761905
| 4
| 42
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 42
| 2
| 33
| 21
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
63740319cbdd9993b7493ee891f9371a9c6e02c1
| 256
|
py
|
Python
|
components/collector/src/source_collectors/sonarqube/duplicated_lines.py
|
kargaranamir/quality-time
|
1c427c61bee9d31c3526f0a01be2218a7e167c23
|
[
"Apache-2.0"
] | 33
|
2016-01-20T07:35:48.000Z
|
2022-03-14T09:20:51.000Z
|
components/collector/src/source_collectors/sonarqube/duplicated_lines.py
|
kargaranamir/quality-time
|
1c427c61bee9d31c3526f0a01be2218a7e167c23
|
[
"Apache-2.0"
] | 2,410
|
2016-01-22T18:13:01.000Z
|
2022-03-31T16:57:34.000Z
|
components/collector/src/source_collectors/sonarqube/duplicated_lines.py
|
kargaranamir/quality-time
|
1c427c61bee9d31c3526f0a01be2218a7e167c23
|
[
"Apache-2.0"
] | 21
|
2016-01-16T11:49:23.000Z
|
2022-01-14T21:53:22.000Z
|
"""SonarQube duplicated lines collector."""
from .base import SonarQubeMetricsBaseClass
class SonarQubeDuplicatedLines(SonarQubeMetricsBaseClass):
"""SonarQube duplicated lines collector."""
valueKey = "duplicated_lines"
totalKey = "lines"
| 23.272727
| 58
| 0.765625
| 20
| 256
| 9.75
| 0.6
| 0.230769
| 0.246154
| 0.338462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140625
| 256
| 10
| 59
| 25.6
| 0.886364
| 0.292969
| 0
| 0
| 0
| 0
| 0.123529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
6386ae51574c0702fa4ae47e2c3e29449d380984
| 71
|
py
|
Python
|
utils_demo/percentage_format.py
|
IBM/nesa-demo
|
4e87217f44ff66414f78df6962ee8633d89f0cf5
|
[
"MIT"
] | 2
|
2021-12-16T13:16:56.000Z
|
2022-01-19T14:23:18.000Z
|
utils_demo/percentage_format.py
|
SocioProphet/nesa-demo
|
4e87217f44ff66414f78df6962ee8633d89f0cf5
|
[
"MIT"
] | null | null | null |
utils_demo/percentage_format.py
|
SocioProphet/nesa-demo
|
4e87217f44ff66414f78df6962ee8633d89f0cf5
|
[
"MIT"
] | 1
|
2022-03-07T19:57:59.000Z
|
2022-03-07T19:57:59.000Z
|
def percentage_format(x: float) -> str:
return f"{(x * 100):.1f}%"
| 23.666667
| 39
| 0.591549
| 11
| 71
| 3.727273
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0.183099
| 71
| 2
| 40
| 35.5
| 0.637931
| 0
| 0
| 0
| 0
| 0
| 0.225352
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
639892302eb62bb4521dec46165a447fd1bb4884
| 370
|
py
|
Python
|
bitmovin_api_sdk/account/organizations/groups/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 11
|
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/account/organizations/groups/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 8
|
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/account/organizations/groups/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 13
|
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
from bitmovin_api_sdk.account.organizations.groups.groups_api import GroupsApi
from bitmovin_api_sdk.account.organizations.groups.tenants.tenants_api import TenantsApi
from bitmovin_api_sdk.account.organizations.groups.invitations.invitations_api import InvitationsApi
from bitmovin_api_sdk.account.organizations.groups.permissions.permissions_api import PermissionsApi
| 74
| 100
| 0.905405
| 47
| 370
| 6.87234
| 0.319149
| 0.148607
| 0.185759
| 0.22291
| 0.544892
| 0.544892
| 0.544892
| 0
| 0
| 0
| 0
| 0
| 0.043243
| 370
| 4
| 101
| 92.5
| 0.912429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
63c88a948245c1382a743f6e1329878390cf91ac
| 51,310
|
py
|
Python
|
gazoo_device/tests/unit_tests/utility_tests/adb_utils_test.py
|
dedsec-9/gazoo-device
|
5ed2867c258da80e53b6aae07ec7a65efe473a28
|
[
"Apache-2.0"
] | null | null | null |
gazoo_device/tests/unit_tests/utility_tests/adb_utils_test.py
|
dedsec-9/gazoo-device
|
5ed2867c258da80e53b6aae07ec7a65efe473a28
|
[
"Apache-2.0"
] | null | null | null |
gazoo_device/tests/unit_tests/utility_tests/adb_utils_test.py
|
dedsec-9/gazoo-device
|
5ed2867c258da80e53b6aae07ec7a65efe473a28
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This test script performs unit tests on functions in the adb_utils module."""
import grp
import json
import os
import subprocess
from unittest import mock
from gazoo_device import config
from gazoo_device import errors
from gazoo_device.tests.unit_tests.utils import unit_test_case
from gazoo_device.utility import adb_utils
from gazoo_device.utility import host_utils
ADB_CMD_PATH = "/usr/bin/adb"
FAKE_ADB_DEVICES_OUTPUT = ("List of devices attached\n"
"04576e89\tdevice\n"
"04576ee5\tsideload\n"
"04576eaz\toffline\n"
"123.45.67.89:5555\tdevice\n"
"123.45.67.90:5555\tsideload\n"
"123.45.67.91:5555\toffline\n\n")
ADB_DEVICES = ["04576e89", "123.45.67.89"]
SIDELOAD_DEVICES = ["04576ee5", "123.45.67.90:5555"]
FAKE_ADB_REBOOT = ""
FAKE_ADB_ROOT = ""
FAKE_SHELL = "abc\n123\n"
FASTBOOT_CMD_PATH = "/usr/bin/fastboot"
FASTBOOT_CMD = os.path.basename(FASTBOOT_CMD_PATH)
FASTBOOT_DEVICES = ["04576e89", "06011HFDD0165R", "04576ee5"]
FAKE_FASTBOOT = ("04576e89 fastboot\n"
"06011HFDD0165R Android Fastboot\n"
"04576ee5 fastboot\n\n")
FAKE_FASTBOOT_REBOOT = ("Rebooting...\n\n"
"Finished. Total time: 0.157s\n")
DEVICE_NAME = "somedevice"
DEVICE_ADB_SERIAL = "aabbccdd"
DEVICE_FASTBOOT_SERIAL = "aabbccdd"
TEST_GROUP_ENTRY = ("plugdev", None, 46, None)
TEST_GOOD_GROUP_LIST = [42, 46]
TEST_USER_UID = 1000
TEST_USER_NAME = "test_user"
class AdbUtilsTests(unit_test_case.UnitTestCase):
"""ADB utility tests."""
@mock.patch.object(host_utils, "has_command", return_value=False)
def test_010_adb_utils_get_fastboot_path_raises_error(self,
mock_get_command_path):
"""Verify get_fastboot_path raises error if get_command_path fails."""
with self.assertRaises(RuntimeError):
adb_utils.get_fastboot_path()
mock_get_command_path.assert_called()
@mock.patch.object(
host_utils, "get_command_path", return_value=FASTBOOT_CMD_PATH)
def test_011_adb_utils_get_fastboot_path_calls_get_command_path(
self, mock_get_command_path):
"""Verify get_fastboot_path calls get_command_path."""
self.assertEqual(FASTBOOT_CMD_PATH, adb_utils.get_fastboot_path())
mock_get_command_path.assert_called()
@mock.patch.object(
subprocess,
"check_output",
return_value=FAKE_FASTBOOT.encode("utf-8", errors="replace"))
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_020_adb_utils_get_fastboot_devices_calls_get_fastboot_path(
self, mock_get_fastboot_path, mock_subprocess):
"""Verify get_fastboot_devices calls get_fastboot_path."""
self.assertEqual(FASTBOOT_DEVICES, adb_utils.get_fastboot_devices())
mock_get_fastboot_path.assert_called()
mock_subprocess.assert_called()
@mock.patch.object(host_utils, "has_command", return_value=False)
def test_021_adb_utils_get_fastboot_devices_bad_fastboot_path(
self, mock_has_command):
"""Verify get_fastboot_devices skips get_fastboot_path."""
devices = adb_utils.get_fastboot_devices(fastboot_path="bogus/path")
self.assertEqual(devices, [])
mock_has_command.assert_called()
@mock.patch.object(
subprocess,
"check_output",
side_effect=subprocess.CalledProcessError(-1, ["fastboot", "devices"]))
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_022_adb_utils_get_fastboot_devices_subprocess_errors(
self, mock_get_fastboot_path, mock_subprocess):
"""Verify get_fastboot_devices handles subprocess errors internally."""
self.assertEqual([], adb_utils.get_fastboot_devices())
mock_get_fastboot_path.assert_called()
mock_subprocess.assert_called()
@mock.patch.object(os.path, "exists", return_value=True)
def test_023_adb_utils_get_fastboot_path_uses_correct_path(self, mock_exists):
"""Verify get_fastboot_devices skips get_fastboot_path."""
path = adb_utils.get_fastboot_path(fastboot_path="genuine/path")
self.assertEqual(path, "genuine/path")
@mock.patch.object(
adb_utils, "get_fastboot_devices", return_value=FASTBOOT_DEVICES)
def test_030_adb_utils_is_fastboot_mode_true(self, mock_get_fastboot_devices):
"""Verify is_fastboot_mode returns True."""
adb_serial = "04576e89"
self.assertTrue(adb_utils.is_fastboot_mode(adb_serial))
mock_get_fastboot_devices.assert_called()
@mock.patch.object(
adb_utils, "get_fastboot_devices", return_value=FASTBOOT_DEVICES)
def test_031_adb_utils_is_fastboot_mode_false(self,
mock_get_fastboot_devices):
"""Verify is_fastboot_mode returns False."""
adb_serial = "bogus"
self.assertFalse(adb_utils.is_fastboot_mode(adb_serial))
mock_get_fastboot_devices.assert_called()
@mock.patch.object(
adb_utils, "get_sideload_devices", return_value=SIDELOAD_DEVICES)
def test_032_adb_utils_is_sideload_mode_true(self, mock_get_sideload_devices):
"""Verify is_sideload_mode on True."""
adb_serial = SIDELOAD_DEVICES[0]
self.assertTrue(adb_utils.is_sideload_mode(adb_serial))
mock_get_sideload_devices.assert_called_once()
@mock.patch.object(
adb_utils, "get_sideload_devices", return_value=SIDELOAD_DEVICES)
def test_033_adb_utils_is_sideload_mode_false(self,
mock_get_sideload_devices):
"""Verify is_sideload_mode on False."""
adb_serial = "bogus"
self.assertFalse(adb_utils.is_sideload_mode(adb_serial))
mock_get_sideload_devices.assert_called_once()
@mock.patch.object(
subprocess,
"check_output",
return_value=FASTBOOT_CMD_PATH.encode("utf-8", errors="replace"))
@mock.patch.object(grp, "getgrnam", return_value=TEST_GROUP_ENTRY)
@mock.patch.object(os, "getgroups", return_value=TEST_GOOD_GROUP_LIST)
@mock.patch.object(os, "getuid", return_value=TEST_USER_UID)
@mock.patch.object(os, "getlogin", return_value=TEST_USER_NAME)
def test_040_adb_utils_verify_user_has_fastboot(self, mock_getlogin,
mock_getuid, mock_getgroups,
mock_getgrnam,
mock_check_output):
"""Verify that verify_usr_has_fastboot works correctly."""
try:
adb_utils.verify_user_has_fastboot(DEVICE_NAME)
mock_check_output.assert_called()
except subprocess.CalledProcessError as err:
self.fail("verify_user_has_fastboot() raised error: {!r}".format(err))
@mock.patch.object(
subprocess,
"check_output",
side_effect=subprocess.CalledProcessError(1, ["which", FASTBOOT_CMD]))
def test_041_adb_utils_verify_user_has_fastboot_no_fastboot(
self, mock_check_output):
"""Verify that verify_user_has_fastboot raises if fastboot not present."""
with self.assertRaises(errors.DeviceError):
adb_utils.verify_user_has_fastboot(DEVICE_NAME)
mock_check_output.assert_called()
@mock.patch.object(host_utils, "get_command_path", return_value=ADB_CMD_PATH)
def test_050_adb_utils_get_adb_path_no_config_file(self,
mock_get_command_path):
"""Verify get_adb_path handles open errors internally."""
config_file = os.path.join(self.artifacts_directory,
self._testMethodName + ".json")
with mock.patch.dict(config.__dict__,
{"DEFAULT_GDM_CONFIG_FILE": config_file}):
self.assertEqual(ADB_CMD_PATH, adb_utils.get_adb_path())
mock_get_command_path.assert_called()
@mock.patch.object(host_utils, "get_command_path", return_value=ADB_CMD_PATH)
@mock.patch.object(json, "load", side_effect=ValueError)
def test_051_adb_utils_get_adb_path_bad_config_data(self, mock_json_load,
mock_get_command_path):
"""Verify get_adb_path handles json.load errors internally."""
config_file = os.path.join(self.artifacts_directory,
self._testMethodName + ".json")
with open(config_file, "w") as gdm_config:
gdm_config.write("{}")
with mock.patch.dict(config.__dict__,
{"DEFAULT_GDM_CONFIG_FILE": config_file}):
self.assertEqual(ADB_CMD_PATH, adb_utils.get_adb_path())
mock_json_load.assert_called()
mock_get_command_path.assert_called()
@mock.patch.object(host_utils, "get_command_path", return_value=ADB_CMD_PATH)
def test_052_adb_utils_get_adb_path_no_adb_path_in_config(
self, mock_get_command_path):
"""Verify get_adb_path handles missing adb_path key errors internally."""
config_file = os.path.join(self.artifacts_directory,
self._testMethodName + ".json")
with open(config_file, "w") as gdm_config:
gdm_config.write("{}")
with mock.patch.dict(config.__dict__,
{"DEFAULT_GDM_CONFIG_FILE": config_file}):
self.assertEqual(ADB_CMD_PATH, adb_utils.get_adb_path())
mock_get_command_path.assert_called()
@mock.patch.object(host_utils, "has_command", return_value=False)
def test_053_adb_utils_get_adb_path_bad_adb_path_raises_error(
self, mock_has_command):
"""Verify get_adb_path bad adb_path raises error."""
config_file = os.path.join(self.artifacts_directory,
self._testMethodName + ".json")
with open(config_file, "w") as gdm_config:
gdm_config.write("{\"")
gdm_config.write(config.ADB_BIN_PATH_CONFIG)
gdm_config.write("\":")
gdm_config.write("\"/some/bad/path\"}")
with mock.patch.dict(config.__dict__,
{"DEFAULT_GDM_CONFIG_FILE": config_file}):
with self.assertRaises(RuntimeError):
adb_utils.get_adb_path()
@mock.patch.object(os.path, "exists", return_value=True)
def test_054_adb_utils_get_fadb_path_uses_correct_path(self, mock_exists):
"""Verify get_adb_path defaults to path passed in."""
path = adb_utils.get_adb_path(adb_path="genuine/path")
self.assertEqual(path, "genuine/path")
@mock.patch.object(
adb_utils, "_adb_command", return_value=FAKE_ADB_DEVICES_OUTPUT)
def test_060_adb_utils_get_adb_devices_calls_get_adb_path(
self, mock_adb_command):
"""Verify get_adb_devices calls _adb_command."""
self.assertEqual(ADB_DEVICES, adb_utils.get_adb_devices())
mock_adb_command.assert_called()
@mock.patch.object(host_utils, "has_command", return_value=False)
@mock.patch.object(os.path, "exists", return_value=False)
def test_061_adb_utils_get_adb_devices_returns_list_when_no_adb(
self, mock_exists, mock_has_command):
"""Verify get_adb_devices calls _adb_command."""
self.assertEqual([], adb_utils.get_adb_devices())
@mock.patch.object(
adb_utils, "_adb_command", return_value=FAKE_ADB_DEVICES_OUTPUT)
def test_062_adb_utils_get_sideload_devices_on_success(
self, mock_adb_command):
"""Verify get_sideload_devices returns devices on success."""
self.assertEqual(SIDELOAD_DEVICES, adb_utils.get_sideload_devices())
mock_adb_command.assert_called_once_with("devices", adb_path=None)
@mock.patch.object(adb_utils, "_adb_command", side_effect=RuntimeError())
def test_063_adb_utils_get_sideload_devices_on_failure(
self, mock_adb_command):
"""Verify get_sideload_devices returns empty list on failure."""
self.assertEqual([], adb_utils.get_sideload_devices())
mock_adb_command.assert_called_once_with("devices", adb_path=None)
@mock.patch.object(adb_utils, "get_adb_devices", return_value=ADB_DEVICES)
def test_070_adb_utils_is_adb_mode_returns_true(self, mock_get_adb_devices):
"""Verify is_adb_mode calls get_adb_devices."""
adb_serial = "04576e89"
self.assertTrue(adb_utils.is_adb_mode(adb_serial))
mock_get_adb_devices.assert_called()
@mock.patch.object(adb_utils, "get_adb_devices", return_value=ADB_DEVICES)
def test_071_adb_utils_is_adb_mode_returns_false(self, mock_get_adb_devices):
"""Verify is_adb_mode calls get_adb_devices."""
adb_serial = "bogus"
self.assertFalse(adb_utils.is_adb_mode(adb_serial))
mock_get_adb_devices.assert_called()
@mock.patch.object(adb_utils, "is_fastboot_mode", return_value=False)
@mock.patch.object(adb_utils, "is_adb_mode", return_value=True)
def test_080_adb_utils_is_device_online_yes_no(self, mock_is_adb_mode,
mock_is_fastboot_mode):
"""Verify is_device_online calls is_adb_mode and not is_fastboot_mode."""
self.assertTrue(adb_utils.is_device_online(DEVICE_ADB_SERIAL))
mock_is_adb_mode.assert_called()
mock_is_fastboot_mode.assert_not_called()
@mock.patch.object(adb_utils, "is_fastboot_mode", return_value=True)
@mock.patch.object(adb_utils, "is_adb_mode", return_value=False)
def test_081_adb_utils_is_device_online_no_yes(self, mock_is_adb_mode,
mock_is_fastboot_mode):
"""Verify is_device_online calls is_adb_mode and is_fastboot_mode."""
self.assertTrue(adb_utils.is_device_online(DEVICE_ADB_SERIAL))
mock_is_adb_mode.assert_called()
mock_is_fastboot_mode.assert_called()
@mock.patch.object(adb_utils, "is_fastboot_mode", return_value=False)
@mock.patch.object(adb_utils, "is_adb_mode", return_value=False)
def test_082_adb_utils_is_device_online_no_no(self, mock_is_adb_mode,
mock_is_fastboot_mode):
"""Verify is_device_online calls is_adb_mode and is_fastboot_mode."""
self.assertFalse(adb_utils.is_device_online(DEVICE_ADB_SERIAL))
mock_is_adb_mode.assert_called()
mock_is_fastboot_mode.assert_called()
@mock.patch.object(adb_utils, "is_fastboot_mode", return_value=True)
@mock.patch.object(adb_utils, "is_adb_mode", return_value=True)
def test_083_adb_utils_is_device_online_yes_yes(self, mock_is_adb_mode,
mock_is_fastboot_mode):
"""Verify is_device_online calls is_adb_mode and not is_fastboot_mode."""
self.assertTrue(adb_utils.is_device_online(DEVICE_ADB_SERIAL))
mock_is_adb_mode.assert_called()
mock_is_fastboot_mode.assert_not_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_100_adb_utils_adb_command_without_adb_serial(self,
mock_get_adb_path):
"""Verify _adb_command without adb_serial."""
command = "fake_command"
command_output = "fake output\n"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command)
self.assertEqual(command_output, output)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_101_adb_utils_adb_command_with_string_command(
self, mock_get_adb_path):
"""Verify _adb_command with string command."""
command = "fake_command"
command_output = "fake output\n"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command, DEVICE_ADB_SERIAL)
self.assertEqual(command_output, output)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_102_adb_utils_adb_command_with_string_command(
self, mock_get_adb_path):
"""Verify _adb_command with unicode command."""
command = u"fake_command"
command_output = "fake output\n"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command, DEVICE_ADB_SERIAL)
self.assertEqual(command_output, output)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_103_adb_utils_adb_command_with_list_command(self, mock_get_adb_path):
"""Verify _adb_command with command list."""
command = ["fake_command", "arg1"]
command_output = "fake output\n"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command, DEVICE_ADB_SERIAL)
self.assertEqual(command_output, output)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_104_adb_utils_adb_command_with_tuple_command(self,
mock_get_adb_path):
"""Verify _adb_command with tuple list."""
command = ("fake_command", "arg1")
command_output = "fake output\n"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command, DEVICE_ADB_SERIAL)
self.assertEqual(command_output, output)
mock_get_adb_path.assert_called()
@mock.patch.object(os.path, "exists", return_value=False)
@mock.patch.object(host_utils, "has_command", return_value=False)
def test_105_adb_utils_adb_command_bad_adb_path(self, mock_has_command,
mock_os_path_exists):
"""Verify _adb_command skips get_adb_path raises error on bad path."""
with self.assertRaises(RuntimeError):
adb_utils._adb_command(
"fake_command", DEVICE_ADB_SERIAL, adb_path="bogus/path")
mock_os_path_exists.assert_called()
mock_has_command.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_106_adb_utils_adb_command_include_return_code(
self, mock_get_adb_path):
"""Verify _adb_command include_return_code returns tuple."""
command = "fake_command"
command_output = "fake output\n"
command_return_code = 1
mock_popen = mock.MagicMock(
spec=subprocess.Popen, returncode=command_return_code)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output, return_code = adb_utils._adb_command(
command, DEVICE_ADB_SERIAL, include_return_code=True)
self.assertEqual(command_output, output)
self.assertEqual(command_return_code, return_code)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_107_adb_utils_adb_command_with_offline(self, mock_get_adb_path):
"""Verify _adb_command succeeds if output includes "offline"."""
command = "fake_command"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (
FAKE_ADB_DEVICES_OUTPUT.encode("utf-8"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command)
self.assertEqual(FAKE_ADB_DEVICES_OUTPUT, output)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "_adb_command", return_value="Success\n")
@mock.patch.object(os.path, "exists", return_value=True)
def test_119_adb_utils_install_package_on_device_success(
self, mock_path_exists, mock_adb_command):
"""Verify install_package_on_device on success."""
fake_package_path = "/tmp/xxx.apk"
adb_utils.install_package_on_device(
fake_package_path, adb_serial=DEVICE_ADB_SERIAL, adb_path=ADB_CMD_PATH)
mock_path_exists.assert_called_once_with(fake_package_path)
mock_adb_command.assert_called_once_with(("install", fake_package_path),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
@mock.patch.object(adb_utils, "_adb_command", return_value="Success\n")
@mock.patch.object(os.path, "exists", return_value=True)
def test_120_adb_utils_install_package_on_device_with_flags_success(
self, mock_path_exists, mock_adb_command):
"""Verify install_package_on_device with flags on success."""
fake_package_path = "/tmp/xxx.apk"
adb_utils.install_package_on_device(
fake_package_path,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH,
allow_downgrade=True,
allow_test_apk=True,
reinstall=True,
all_permissions=True)
mock_path_exists.assert_called_once_with(fake_package_path)
mock_adb_command.assert_called_once_with(
("install", "-d", "-g", "-r", "-t", fake_package_path),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
@mock.patch.object(adb_utils, "_adb_command")
@mock.patch.object(os.path, "exists")
def test_121_adb_utils_install_package_on_device_exception(
self, mock_path_exists, mock_adb_command):
"""Verify install_package_on_device raise exception."""
# Note:
# install_package_on_device() raises exception when:
# 1) package_path is not a file.
# 2) 'Success\n' is not found in command response.
fake_package_path = "/tmp/xxx.apk"
# 1) package path not a file
mock_path_exists.return_value = False
with self.assertRaises(ValueError):
adb_utils.install_package_on_device(
fake_package_path,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
mock_path_exists.assert_called_with(fake_package_path)
# 2) 'Success\n' is not in command response
mock_path_exists.return_value = True
mock_adb_command.return_value = ""
with self.assertRaises(errors.DeviceError):
adb_utils.install_package_on_device(
fake_package_path,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
mock_adb_command.assert_called_with(("install", fake_package_path),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
@mock.patch.object(adb_utils, "_adb_command", return_value="Success\n")
def test_122_adb_utils_uninstall_package_on_device_success(
self, mock_adb_command):
"""Verify uninstall_package_on_device on success."""
fake_package_name = "com.google.fakepackage"
adb_utils.uninstall_package_on_device(
fake_package_name, adb_serial=DEVICE_ADB_SERIAL, adb_path=ADB_CMD_PATH)
mock_adb_command.assert_called_once_with(("uninstall", fake_package_name),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
@mock.patch.object(adb_utils, "_adb_command", return_value="")
def test_123_adb_utils_uninstall_package_on_device_exception(
self, mock_adb_command):
"""Verify uninstall_package_on_device raise exception."""
fake_package_name = "com.google.fakepackage"
with self.assertRaises(errors.DeviceError):
adb_utils.uninstall_package_on_device(
fake_package_name,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
mock_adb_command.assert_called_once_with(("uninstall", fake_package_name),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
@mock.patch.object(adb_utils, "_adb_command", return_value=FAKE_SHELL)
@mock.patch.object(os.path, "isfile", return_value=True)
def test_124_adb_utils_sideload_package_on_success(self, mock_os_path_isfile,
mock_adb_command):
"""Verify sideload_pacakge calls _adb_command."""
package_path = "/tmp/abc"
self.assertEqual(
adb_utils.sideload_package(package_path, DEVICE_ADB_SERIAL), FAKE_SHELL)
mock_os_path_isfile.assert_called_once_with(package_path)
mock_adb_command.assert_called_once_with(("sideload", package_path),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=None)
@mock.patch.object(adb_utils, "_adb_command", return_value=FAKE_SHELL)
@mock.patch.object(os.path, "isfile", return_value=False)
def test_125_adb_utils_sideload_package_on_exception(self,
mock_os_path_isfile,
mock_adb_command):
"""Verify sideload_pacakge raises exception when package_path invalid."""
package_path = "/tmp/abc"
with self.assertRaises(RuntimeError):
adb_utils.sideload_package(package_path, DEVICE_ADB_SERIAL)
mock_os_path_isfile.assert_called_once_with(package_path)
mock_adb_command.assert_not_called()
@mock.patch.object(adb_utils, "_adb_command", return_value=FAKE_ADB_REBOOT)
def test_140_adb_utils_enter_fastboot_calls_get_adb_path(
self, mock_adb_command):
"""Verify enter_fastboot calls get_adb_path."""
self.assertEqual(FAKE_ADB_REBOOT,
adb_utils.enter_fastboot(DEVICE_ADB_SERIAL))
mock_adb_command.assert_called()
@mock.patch.object(adb_utils, "_adb_command", return_value=FAKE_ADB_REBOOT)
def test_141_adb_utils_enter_sideload(self, mock_adb_command):
"""Verify enter_sideload calls _adb_command."""
# Note:
# Verify both 1) sideload auto reboot and 2) no auto reboot.
# With auto_reboot: False
self.assertEqual(
FAKE_ADB_REBOOT,
adb_utils.enter_sideload(DEVICE_ADB_SERIAL, auto_reboot=False))
mock_adb_command.assert_called_with(("reboot", "sideload"),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=None)
# With auto_reboot: True
self.assertEqual(
FAKE_ADB_REBOOT,
adb_utils.enter_sideload(DEVICE_ADB_SERIAL, auto_reboot=True))
mock_adb_command.assert_called_with(("reboot", "sideload-auto-reboot"),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=None)
@mock.patch.object(
subprocess,
"check_output",
return_value=FAKE_FASTBOOT_REBOOT.encode("utf-8", errors="replace"))
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_150_adb_utils_exit_fastboot_calls_get_fastboot_path(
self, mock_get_fastboot_path, mock_os_path_exists, mock_subprocess):
"""Verify exit_fastboot calls get_fastboot_path."""
self.assertEqual(FAKE_FASTBOOT_REBOOT,
adb_utils.exit_fastboot(DEVICE_ADB_SERIAL))
mock_get_fastboot_path.assert_called()
mock_os_path_exists.assert_called()
mock_subprocess.assert_called()
@mock.patch.object(os.path, "exists", return_value=False)
@mock.patch.object(adb_utils, "get_fastboot_path")
def test_151_adb_utils_exit_fastboot_bad_fastboot_path(
self, mock_get_fastboot_path, mock_os_path_exists):
"""Verify exit_fastboot skips get_fastboot_path."""
with self.assertRaises(RuntimeError):
adb_utils.exit_fastboot(DEVICE_ADB_SERIAL, fastboot_path="bogus/path")
mock_get_fastboot_path.assert_not_called()
mock_os_path_exists.assert_called()
@mock.patch.object(
subprocess,
"check_output",
side_effect=subprocess.CalledProcessError(
-1, ["timeout", "10.0", "fastboot", "reboot"]))
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
@mock.patch.object(os.path, "exists", return_value=True)
def test_152_adb_utils_exit_fastboot_bad_request(self, mock_get_fastboot_path,
mock_os_path_exists,
mock_check_output):
"""Verify exit_fastboot returns None."""
result = adb_utils.exit_fastboot(DEVICE_ADB_SERIAL)
self.assertIsNone(result)
mock_get_fastboot_path.assert_called()
mock_os_path_exists.assert_called()
mock_check_output.assert_called()
@mock.patch.object(adb_utils, "_adb_command", return_value=FAKE_ADB_REBOOT)
def test_160_adb_utils_reboot_device_calls_get_adb_path(
self, mock_adb_command):
"""Verify reboot_device calls get_adb_path."""
self.assertEqual(FAKE_ADB_REBOOT,
adb_utils.reboot_device(DEVICE_ADB_SERIAL))
mock_adb_command.assert_called()
@mock.patch.object(adb_utils, "_adb_command", return_value=FAKE_ADB_ROOT)
def test_170_adb_utils_root_device_calls_get_adb_path(self, mock_adb_command):
"""Verify root_device calls get_adb_path."""
self.assertEqual(FAKE_ADB_ROOT, adb_utils.root_device(DEVICE_ADB_SERIAL))
mock_adb_command.assert_called()
@mock.patch.object(
adb_utils, "_adb_command", return_value=("pull output\n", 0))
def test_180_adb_utils_pull_from_device_with_single_file(
self, mock_adb_command):
"""Verify pull_file for a single source file."""
sources = "/some/device/path/to/file"
adb_utils.pull_from_device(DEVICE_ADB_SERIAL, sources)
mock_adb_command.assert_called()
@mock.patch.object(
adb_utils, "_adb_command", return_value=("pull output\n", 0))
def test_181_adb_utils_pull_from_device_with_multiple_files(
self, mock_adb_command):
"""Verify pull_from_device calls get_adb_path."""
sources = ["/some/device/path/to/file", "/some/device/path/to/other_file"]
adb_utils.pull_from_device(DEVICE_ADB_SERIAL, sources)
mock_adb_command.assert_called()
@mock.patch.object(
adb_utils, "_adb_command", return_value=("pull output\n", 1))
def test_182_adb_utils_pull_from_device_bad_returncode(
self, mock_adb_command):
"""Verify pull_from_device raises if ADB command fails."""
sources = "/some/device/path/to/file"
with self.assertRaises(RuntimeError):
adb_utils.pull_from_device(DEVICE_ADB_SERIAL, sources)
mock_adb_command.assert_called()
@mock.patch.object(adb_utils, "_adb_command")
@mock.patch.object(os.path, "exists", return_value=False)
def test_183_adb_utils_pull_from_device_bad_destination_path(
self, mock_os_path_exists, mock_adb_command):
"""Verify pull_from_device provided bad destination path."""
sources = "/some/device/path/to/file"
destination_path = "/bogus/path"
with self.assertRaises(ValueError):
adb_utils.pull_from_device(
DEVICE_ADB_SERIAL, sources, destination_path=destination_path)
mock_os_path_exists.assert_called()
mock_adb_command.assert_not_called()
@mock.patch.object(
adb_utils, "_adb_command", return_value=("push output\n", 0))
@mock.patch.object(os.path, "exists", return_value=True)
def test_190_adb_utils_push_to_device_with_single_file(
self, mock_os_path_exists, mock_adb_command):
"""Verify push_to_device sends a single file."""
sources = "/fake/local/path"
destination_path = "/fake/device/path"
adb_utils.push_to_device(DEVICE_ADB_SERIAL, sources, destination_path)
mock_os_path_exists.assert_called()
mock_adb_command.assert_called()
@mock.patch.object(
adb_utils, "_adb_command", return_value=("push output\n", 0))
@mock.patch.object(os.path, "exists", return_value=True)
def test_191_adb_utils_push_to_device_with_multiple_files(
self, mock_os_path_exists, mock_adb_command):
"""Verify push_to_device sends multiple files."""
sources = ["/fake/local/path/to/file1", "/fake/local/path/to/file2"]
destination_path = "/fake/device/path"
adb_utils.push_to_device(DEVICE_ADB_SERIAL, sources, destination_path)
mock_os_path_exists.assert_called()
mock_adb_command.assert_called()
@mock.patch.object(os.path, "exists", return_value=False)
def test_192_adb_utils_push_to_device_fails_single_file(
self, mock_os_path_exists):
"""Verify push_to_device fails single file path check."""
sources = "/bogus/local/file"
destination_path = "/fake/device/path"
with self.assertRaises(ValueError):
adb_utils.push_to_device(DEVICE_ADB_SERIAL, sources, destination_path)
mock_os_path_exists.assert_called()
@mock.patch.object(os.path, "exists", side_effect=[True, False])
def test_193_adb_utils_push_to_device_fails_multiple_files(
self, mock_os_path_exists):
"""Verify push_to_device fails multiple files path check."""
sources = ["/fake/local/path/to/file1", "/fake/local/path/to/file2"]
destination_path = "/fake/device/path"
with self.assertRaises(ValueError):
adb_utils.push_to_device(DEVICE_ADB_SERIAL, sources, destination_path)
mock_os_path_exists.assert_called()
@mock.patch.object(
adb_utils, "_adb_command", return_value=("push output\n", 1))
@mock.patch.object(os.path, "exists", return_value=True)
def test_194_adb_utils_push_to_device_bad_returncode(self,
mock_os_path_exists,
mock_adb_command):
"""Verify push_file subprocess.communicate returns non-zero returncode."""
sources = "/fake/local/path"
destination_path = "/fake/device/path"
with self.assertRaises(RuntimeError):
adb_utils.push_to_device(DEVICE_ADB_SERIAL, sources, destination_path)
mock_os_path_exists.assert_called()
mock_adb_command.assert_called()
@mock.patch.object(adb_utils, "_adb_command", return_value="fake\n")
def test_200_adb_shell(self, mock_adb_command):
"""Verifies shell works as expected."""
self.assertEqual("fake\n", adb_utils.shell("12345", 'echo "fake"'))
mock_adb_command.assert_called_once_with(
["shell", 'echo "fake"'], "12345",
adb_path=None, retries=mock.ANY, timeout=None,
include_return_code=False)
@mock.patch.object(adb_utils, "_adb_command", return_value=("fake\n", 0))
def test_201_adb_shell_include_return_code(self, mock_adb_command):
"""Verifies shell include return code will return output and code tuple."""
output, return_code = adb_utils.shell(
"12345", 'echo "fake"', include_return_code=True)
self.assertEqual("fake\n", output)
self.assertEqual(0, return_code)
mock_adb_command.assert_called_once_with(
["shell", 'echo "fake"'], "12345",
adb_path=None, retries=mock.ANY, timeout=None, include_return_code=True)
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value="/fake/path/to/fastboot")
@mock.patch.object(os.path, "exists", return_value=False)
def test_300_adb_utils_fastboot_command_without_fastboot_path(
self, mock_exists, mock_get_fastboot_path):
"""Verify get_fastboot_path called when fastboot_path is not given."""
with self.assertRaises(RuntimeError):
adb_utils._fastboot_command("fake command")
mock_get_fastboot_path.assert_called_once()
mock_exists.assert_called()
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value="/fake/path/to/fastboot")
@mock.patch.object(os.path, "exists", return_value=False)
def test_301_adb_utils_fastboot_command_with_bad_fastboot_path(
self, mock_exists, mock_get_fastboot_path):
"""Verify _fastboot_command raise error when given a bad fastboot_path."""
with self.assertRaises(RuntimeError):
adb_utils._fastboot_command(
"fake_command", fastboot_path="/fake/path/to/fastboot")
mock_get_fastboot_path.assert_not_called()
mock_exists.assert_called()
@mock.patch.object(os.path, "exists", return_value=True)
def test_302_adb_utils_fastboot_command_without_fastboot_serial(
self, mock_exists):
"""Verify _fastboot_command without fastboot_serial."""
fastboot_executable = "fastboot"
command = "fake_command"
command_output = "fake_command_output"
mock_proc = mock.MagicMock(spec=subprocess.Popen)
mock_proc.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_proc):
output = adb_utils._fastboot_command(
command, fastboot_path=fastboot_executable)
self.assertEqual(output, command_output)
mock_exists.assert_called()
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_303_adb_utils_fastboot_command_with_string_command(
self, mock_get_fastboot_path, mock_exists):
"""Verify _fastboot_command with string command."""
command = "fake_command"
command_output = "fake command output"
mock_proc = mock.MagicMock(spec=subprocess.Popen)
mock_proc.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_proc):
output = adb_utils._fastboot_command(command, DEVICE_FASTBOOT_SERIAL)
self.assertEqual(command_output, output)
mock_get_fastboot_path.assert_called()
mock_exists.assert_called()
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_304_adb_utils_fastboot_command_with_string_command_unicode(
self, mock_get_fastboot_path, mock_exists):
"""Verify _fastboot_command with unicode string command."""
command = u"fake_command"
command_output = "fake command output"
mock_proc = mock.MagicMock(spec=subprocess.Popen)
mock_proc.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_proc):
output = adb_utils._fastboot_command(command, DEVICE_FASTBOOT_SERIAL)
self.assertEqual(command_output, output)
mock_get_fastboot_path.assert_called()
mock_exists.assert_called()
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_305_adb_utils_fastboot_command_with_list_command(
self, mock_get_fastboot_path, mock_exists):
"""Verify _fastboot_command with command list."""
command = ["fake_command", "arg1"]
command_output = "fake output"
mock_proc = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_proc.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_proc):
output = adb_utils._fastboot_command(command, DEVICE_FASTBOOT_SERIAL)
self.assertEqual(command_output, output)
mock_get_fastboot_path.assert_called()
mock_exists.assert_called()
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_306_adb_utils_fastboot_command_with_tuple_command(
self, mock_get_fastboot_path, mock_exists):
"""Verify _fastboot_command with command tuple."""
command = ("fake_command", "arg1")
command_output = "fake output"
mock_proc = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_proc.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_proc):
output = adb_utils._fastboot_command(command, DEVICE_FASTBOOT_SERIAL)
self.assertEqual(command_output, output)
mock_get_fastboot_path.assert_called()
mock_exists.assert_called()
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_307_adb_utils_fastboot_command_include_return_code(
self, mock_get_fastboot_path, mock_exists):
"""Verify _fastboot_command include_return_code works."""
command = "fake_command"
command_output = "fake output"
command_return_code = 1
mock_proc = mock.MagicMock(
spec=subprocess.Popen, returncode=command_return_code)
mock_proc.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_proc):
output, return_code = adb_utils._fastboot_command(
command, DEVICE_FASTBOOT_SERIAL, include_return_code=True)
self.assertEqual(command_output, output)
self.assertEqual(command_return_code, return_code)
mock_get_fastboot_path.assert_called()
mock_exists.assert_called()
@mock.patch.object(adb_utils, "_fastboot_command")
def test_308_adb_utils_fastboot_unlock_device(self, mock_fastboot_command):
"""Verify fastbook_unlock_device calls _fastboot_command correctly."""
fastboot_serial = "fake_fastboot_serial"
fastboot_path = FASTBOOT_CMD_PATH
fastboot_timeout = 30.0
adb_utils.fastboot_unlock_device(
fastboot_serial, fastboot_path=fastboot_path, timeout=fastboot_timeout)
mock_fastboot_command.assert_called()
mock_fastboot_command.assert_called_with(("flashing", "unlock"),
fastboot_serial=fastboot_serial,
fastboot_path=fastboot_path,
timeout=fastboot_timeout)
@mock.patch.object(adb_utils, "_fastboot_command")
def test_309_adb_utils_fastboot_lock_device(self, mock_fastboot_command):
"""Verify fastbook_lock_device calls _fastboot_command correctly."""
fastboot_serial = "fake_fastboot_serial"
fastboot_path = FASTBOOT_CMD_PATH
fastboot_timeout = 30.0
adb_utils.fastboot_lock_device(
fastboot_serial, fastboot_path=fastboot_path, timeout=fastboot_timeout)
mock_fastboot_command.assert_called()
mock_fastboot_command.assert_called_with(("flashing", "lock"),
fastboot_serial=fastboot_serial,
fastboot_path=fastboot_path,
timeout=fastboot_timeout)
@mock.patch.object(adb_utils, "_fastboot_command")
def test_310_adb_utils_fastboot_wipe_userdata(self, mock_fastboot_command):
"""Verify fastboot_wipe_userdata calls _fastboot_command correctly."""
fastboot_serial = "fake_fastboot_serial"
fastboot_path = FASTBOOT_CMD_PATH
fastboot_timeout = 30.0
adb_utils.fastboot_wipe_userdata(
fastboot_serial, fastboot_path=fastboot_path, timeout=fastboot_timeout)
mock_fastboot_command.assert_called()
mock_fastboot_command.assert_called_with(
"-w",
fastboot_serial=fastboot_serial,
fastboot_path=fastboot_path,
timeout=fastboot_timeout)
@mock.patch.object(
adb_utils,
"_adb_command",
return_value="connected to aabbccdd")
def test_311_adb_connect(self, mock_adb_command):
"""Verify adb connect method."""
adb_utils.connect(DEVICE_ADB_SERIAL)
@mock.patch.object(
adb_utils,
"_adb_command",
return_value="unable to connect")
def test_312_adb_connect_failure_to_connect(self, mock_adb_command):
"""Verify adb connect method."""
with self.assertRaises(errors.DeviceError):
adb_utils.connect(DEVICE_ADB_SERIAL)
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_313_adb_command_terminate(self, mock_get_adb_path):
"""Verify adb connect method."""
command = "fake_command"
command_output = "fake output\n"
mock_proc = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_proc.communicate.side_effect = subprocess.TimeoutExpired(
cmd=command, timeout=1)
mock_proc.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_proc):
with mock.patch.object(mock_proc, "terminate") as mock_terminate:
with self.assertRaises(subprocess.TimeoutExpired):
adb_utils.shell(DEVICE_ADB_SERIAL, command, timeout=1)
mock_terminate.assert_called_once()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_314_adb_shell_retry_failed(self, mock_adb_command):
"""Verify shell works as expected."""
command_output = "error: closed"
mock_proc = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_proc.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_proc):
with self.assertRaises(errors.DeviceError):
adb_utils.shell('echo "fake"', "12345")
@mock.patch.object(adb_utils, "_adb_command", return_value=("Output", 0))
def test_320_adb_utils_add_port_forwarding_success(self, mock_adb_command):
"""Verifies add_port_forwarding on success."""
output = adb_utils.add_port_forwarding(host_port=123,
device_port=456,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
mock_adb_command.assert_called_once_with(
("forward", "tcp:123", "tcp:456"),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH,
include_return_code=True)
self.assertEqual(output, "Output")
@mock.patch.object(adb_utils, "_adb_command", return_value=("Error", 1))
def test_321_adb_utils_add_port_forwarding_exception(self, mock_adb_command):
"""Verifies add_port_forwarding raises exception."""
with self.assertRaises(RuntimeError):
adb_utils.add_port_forwarding(host_port=123,
device_port=456,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
mock_adb_command.assert_called_once_with(
("forward", "tcp:123", "tcp:456"),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH,
include_return_code=True)
@mock.patch.object(adb_utils, "_adb_command", return_value=("Output", 0))
def test_325_adb_utils_remove_port_forwarding_success(self, mock_adb_command):
"""Verifies remove_port_forwarding on success."""
output = adb_utils.remove_port_forwarding(host_port=123,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
mock_adb_command.assert_called_once_with(
("forward", "--remove", "tcp:123"),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH,
include_return_code=True)
self.assertEqual(output, "Output")
@mock.patch.object(adb_utils, "_adb_command", return_value=("Error", 1))
def test_326_adb_utils_remove_port_forwarding_exception(self,
mock_adb_command):
"""Verifies remove_port_forwarding on raise exception."""
with self.assertRaises(RuntimeError):
adb_utils.remove_port_forwarding(host_port=123,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
mock_adb_command.assert_called_once_with(
("forward", "--remove", "tcp:123"),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH,
include_return_code=True)
@mock.patch.object(adb_utils, "_fastboot_command")
def test_330_adb_utils_fastboot_check_is_unlocked(self,
mock_fastboot_command):
"""Verifies fastboot_check_is_unlocked function return correct result."""
fastboot_serial = "fake_fastboot_serial"
unlocked_output = "unlocked: yes"
locked_output = "unlocked: no"
mock_fastboot_command.return_value = unlocked_output
unlocked_expected = adb_utils.fastboot_check_is_unlocked(
fastboot_serial=fastboot_serial)
mock_fastboot_command.return_value = locked_output
locked_expected = adb_utils.fastboot_check_is_unlocked(
fastboot_serial=fastboot_serial)
self.assertTrue(unlocked_expected)
self.assertFalse(locked_expected)
@mock.patch.object(adb_utils, "_fastboot_command")
def test_331_adb_utils_fastboot_check_is_unlocked_exception(
self, mock_fastboot_command):
"""Verifies fastboot_check_is_unlocked function raises with bad output."""
fastboot_serial = "fake_fastboot_serial"
unknown_output = "something went wrong"
mock_fastboot_command.return_value = unknown_output
with self.assertRaises(RuntimeError):
adb_utils.fastboot_check_is_unlocked(fastboot_serial=fastboot_serial)
@mock.patch.object(
adb_utils, "_adb_command", return_value=("bugreport output\n", 0))
def test_340_adb_utils_bugreport(self, mock_adb_command):
"""Verifies bugreport."""
adb_utils.bugreport(DEVICE_ADB_SERIAL)
mock_adb_command.assert_called()
@mock.patch.object(
adb_utils, "_adb_command", return_value=("bugreport output\n", 1))
def test_341_adb_utils_bugreport_bad_returncode(
self, mock_adb_command):
"""Verifies bugreport raises if ADB command fails."""
with self.assertRaises(RuntimeError):
adb_utils.bugreport(DEVICE_ADB_SERIAL)
mock_adb_command.assert_called()
@mock.patch.object(adb_utils, "_adb_command")
@mock.patch.object(os.path, "exists", return_value=False)
def test_342_adb_utils_pull_from_device_bad_destination_path(
self, mock_os_path_exists, mock_adb_command):
"""Verifies bugreport provided bad destination path."""
destination_path = "/bogus/path"
with self.assertRaises(ValueError):
adb_utils.bugreport(DEVICE_ADB_SERIAL, destination_path=destination_path)
mock_os_path_exists.assert_called()
mock_adb_command.assert_not_called()
if __name__ == "__main__":
unit_test_case.main()
| 47.159926
| 80
| 0.717657
| 6,765
| 51,310
| 5.004139
| 0.061641
| 0.056479
| 0.059817
| 0.038283
| 0.853277
| 0.806516
| 0.754113
| 0.716953
| 0.677015
| 0.640475
| 0
| 0.01348
| 0.184564
| 51,310
| 1,087
| 81
| 47.203312
| 0.795626
| 0.100838
| 0
| 0.670857
| 0
| 0
| 0.090209
| 0.01193
| 0
| 0
| 0
| 0
| 0.212571
| 1
| 0.096
| false
| 0
| 0.011429
| 0
| 0.108571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
63cb3e72171de82701ab61371e19e92285bb291d
| 8,866
|
py
|
Python
|
gbdtmo/gbdtmo.py
|
samanemami/GBDTMO
|
33ee163d5db4dd71dae620c8e1f8295ed33c0a24
|
[
"MIT"
] | 2
|
2021-09-15T16:18:15.000Z
|
2022-01-12T10:35:18.000Z
|
gbdtmo/gbdtmo.py
|
samanemami/GBDTMO
|
33ee163d5db4dd71dae620c8e1f8295ed33c0a24
|
[
"MIT"
] | null | null | null |
gbdtmo/gbdtmo.py
|
samanemami/GBDTMO
|
33ee163d5db4dd71dae620c8e1f8295ed33c0a24
|
[
"MIT"
] | null | null | null |
import numpy as np
import numpy.ctypeslib as npct
import ctypes
from .histogram import get_bins_maps
from .lib_utils import *
class BoostUtils:
def __init__(self, lib):
self.lib = lib
self._boostnode = None
def _set_gh(self, g, h):
self.lib.SetGH(self._boostnode, g, h)
def _set_bin(self, bins):
num, value = [], []
for i, _ in enumerate(bins):
num.append(len(_))
num = np.array(num, np.uint16)
value = np.concatenate(bins, axis=0)
self.lib.SetBin(self._boostnode, num, value)
def _set_label(self, x: np.array, is_train: bool):
if x.dtype == np.float64:
if x.ndim == 1:
self.lib.SetLabelDouble.argtypes = [ctypes.c_void_p, array_1d_double, ctypes.c_bool]
elif x.ndim == 2:
self.lib.SetLabelDouble.argtypes = [ctypes.c_void_p, array_2d_double, ctypes.c_bool]
else:
assert False, "label must be 1D or 2D array"
self.lib.SetLabelDouble(self._boostnode, x, is_train)
elif x.dtype == np.int32:
if x.ndim == 1:
self.lib.SetLabelInt.argtypes = [ctypes.c_void_p, array_1d_int, ctypes.c_bool]
elif x.ndim == 2:
self.lib.SetLabelInt.argtypes = [ctypes.c_void_p, array_2d_int, ctypes.c_bool]
else:
assert False, "label must be 1D or 2D array"
self.lib.SetLabelInt(self._boostnode, x, is_train)
else:
assert False, "dtype of label must be float64 or int32"
def boost(self):
self.lib.Boost(self._boostnode)
def dump(self, path):
self.lib.Dump(self._boostnode, path)
def load(self, path):
self.lib.Load(self._boostnode, path)
def train(self, num):
self.lib.Train(self._boostnode, num)
class GBDTSingle(BoostUtils):
def __init__(self, lib, out_dim=1, params={}):
super(BoostUtils, self).__init__()
BoostUtils.__init__(self, lib)
self.out_dim = out_dim
self.params = default_params()
self.params.update(params)
self.__dict__.update(self.params)
def set_booster(self, inp_dim):
self._boostnode = self.lib.SingleNew(inp_dim,
self.params['loss'],
self.params['max_depth'],
self.params['max_leaves'],
self.params['seed'],
self.params['min_samples'],
self.params['num_threads'],
self.params['lr'],
self.params['reg_l1'],
self.params['reg_l2'],
self.params['gamma'],
self.params['base_score'],
self.params['early_stop'],
self.params['verbose'],
self.params['hist_cache'])
def set_data(self, train_set: tuple = None, eval_set: tuple = None):
if train_set is not None:
self.data, self.label = train_set
self.set_booster(self.data.shape[-1])
self.bins, self.maps = get_bins_maps(self.data, self.max_bins, self.num_threads)
self._set_bin(self.bins)
self.maps = np.ascontiguousarray(self.maps.transpose())
self.preds_train = np.full(len(self.data) * self.out_dim, self.base_score, dtype='float64')
self.lib.SetData.argtypes = [ctypes.c_void_p, array_2d_uint16, array_2d_double,
array_1d_double, ctypes.c_int, ctypes.c_bool]
self.lib.SetData(self._boostnode, self.maps, self.data,
self.preds_train, len(self.data), True)
if self.label is not None:
self._set_label(self.label, True)
if eval_set is not None:
self.data_eval, self.label_eval = eval_set
self.preds_eval = np.full(len(self.data_eval) * self.out_dim, self.base_score, dtype='float64')
maps = np.zeros((1, 1), 'uint16')
self.lib.SetData(self._boostnode, maps, self.data_eval,
self.preds_eval, len(self.data_eval), False)
if self.label_eval is not None:
self._set_label(self.label_eval, False)
def train_multi(self, num):
'''
only used for multi-classification
'''
assert self.out_dim>1, "out_dim must bigger than 1"
self.lib.TrainMulti(self._boostnode, num, self.out_dim)
def predict(self, x, num_trees=0):
preds = np.full(len(x) * self.out_dim, self.base_score, dtype='float64')
if self.out_dim == 1:
self.lib.Predict.argtypes = [ctypes.c_void_p, array_2d_double, array_1d_double,
ctypes.c_int, ctypes.c_int]
self.lib.Predict(self._boostnode, x, preds, len(x), num_trees)
return preds
else:
self.lib.PredictMulti(self._boostnode, x, preds, len(x), self.out_dim, num_trees)
preds = np.reshape(preds, (self.out_dim, len(x)))
return np.transpose(preds)
def reset(self):
self.lib.Reset(self._boostnode)
class GBDTMulti(BoostUtils):
def __init__(self, lib, out_dim=1, params={}):
super(BoostUtils, self).__init__()
BoostUtils.__init__(self, lib)
self.out_dim = out_dim
self.params = default_params()
self.params.update(params)
self.__dict__.update(self.params)
def set_booster(self, inp_dim, out_dim):
self._boostnode = self.lib.MultiNew(inp_dim,
self.out_dim,
self.params['topk'],
self.params['loss'],
self.params['max_depth'],
self.params['max_leaves'],
self.params['seed'],
self.params['min_samples'],
self.params['num_threads'],
self.params['lr'],
self.params['reg_l1'],
self.params['reg_l2'],
self.params['gamma'],
self.params['base_score'],
self.params['early_stop'],
self.params['one_side'],
self.params['verbose'],
self.params['hist_cache'])
def set_data(self, train_set: tuple = None, eval_set: tuple = None):
if train_set is not None:
self.data, self.label = train_set
self.set_booster(self.data.shape[-1], self.out_dim)
self.bins, self.maps = get_bins_maps(self.data, self.max_bins, self.num_threads)
self._set_bin(self.bins)
self.maps = np.ascontiguousarray(self.maps.transpose())
self.preds_train = np.full((len(self.data), self.out_dim), self.base_score, dtype='float64')
self.lib.SetData.argtypes = [ctypes.c_void_p, array_2d_uint16, array_2d_double,
array_2d_double, ctypes.c_int, ctypes.c_bool]
self.lib.SetData(self._boostnode, self.maps, self.data,
self.preds_train, len(self.data), True)
if self.label is not None:
self._set_label(self.label, True)
if eval_set is not None:
self.data_eval, self.label_eval = eval_set
self.preds_eval = np.full((len(self.data_eval), self.out_dim), self.base_score, dtype='float64')
maps = np.zeros((1, 1), 'uint16')
self.lib.SetData(self._boostnode, maps, self.data_eval,
self.preds_eval, len(self.data_eval), False)
if self.label_eval is not None:
self._set_label(self.label_eval, False)
def predict(self, x, num_trees=0):
preds = np.full((len(x), self.out_dim), self.base_score, dtype='float64')
self.lib.Predict.argtypes = [ctypes.c_void_p, array_2d_double, array_2d_double,
ctypes.c_int, ctypes.c_int]
self.lib.Predict(self._boostnode, x, preds, len(x), num_trees)
return preds
| 46.663158
| 108
| 0.518498
| 1,037
| 8,866
| 4.197686
| 0.131148
| 0.082702
| 0.034459
| 0.034918
| 0.77349
| 0.744774
| 0.735814
| 0.734436
| 0.732598
| 0.682058
| 0
| 0.012631
| 0.374915
| 8,866
| 189
| 109
| 46.910053
| 0.772826
| 0.003835
| 0
| 0.592593
| 0
| 0
| 0.045073
| 0
| 0
| 0
| 0
| 0
| 0.024691
| 1
| 0.111111
| false
| 0
| 0.030864
| 0
| 0.179012
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
63d0e3d64e13f20d2c0c373757cdad7580a1300b
| 4,300
|
py
|
Python
|
src/utils/plots.py
|
Light4Code/tensorflow-research
|
392c2d7bc376f491fec68d479b130f883d6d028d
|
[
"MIT"
] | 5
|
2020-02-29T16:28:55.000Z
|
2021-11-24T07:47:36.000Z
|
src/utils/plots.py
|
Light4Code/tensorflow-research
|
392c2d7bc376f491fec68d479b130f883d6d028d
|
[
"MIT"
] | 3
|
2020-11-13T18:41:57.000Z
|
2022-02-10T01:37:51.000Z
|
src/utils/plots.py
|
Light4Code/tensorflow-research
|
392c2d7bc376f491fec68d479b130f883d6d028d
|
[
"MIT"
] | 4
|
2020-03-24T10:50:17.000Z
|
2020-06-02T13:07:28.000Z
|
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import utils.image_util as iu
from utils.custom_types import Vector
def plot_history(loss, acc, val_loss, val_acc):
plt.figure(figsize=(20, 10))
plt.subplot(2, 1, 1)
plt.title("Loss")
plt.grid()
plt.plot(loss)
plt.plot(val_loss)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(["Train", "Test"], loc="upper left")
plt.subplot(2, 1, 2)
plt.title("Accuracy")
plt.grid()
plt.plot(acc)
plt.plot(val_acc)
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend(["Train", "Test"], loc="upper left")
plt.show()
def plot_difference(
predictions, test_images, input_shape: Vector, threshold: float = 0.0
):
plt.figure(figsize=(20, 10))
pred_count = len(predictions)
plt_shape = (input_shape[0], input_shape[1])
plt_cmap = "gray"
if input_shape[2] > 1:
plt_shape = (
input_shape[0],
input_shape[1],
input_shape[2],
)
index = 1
plt_index = 0
for test_image in test_images:
original_image = test_image.reshape(plt_shape)
pred_image = predictions[plt_index].reshape(plt_shape)
diff, se = iu.create_diff(original_image, pred_image, threshold)
mask = ma.masked_where(diff == False, diff)
plt.subplot(pred_count, 4, index)
plt.title("Original")
plt.imshow(original_image, interpolation="none", cmap=plt_cmap)
index += 1
plt.subplot(pred_count, 4, index)
plt.title("Prediction")
plt.imshow(pred_image, interpolation="none", cmap=plt_cmap)
index += 1
plt.subplot(pred_count, 4, index)
plt.title("Diff (SE: {0})".format(round(se, 2)))
plt.imshow(diff, interpolation="none", cmap=plt_cmap)
index += 1
plt.subplot(pred_count, 4, index)
plt.title("Overlay")
plt.imshow(original_image, interpolation="none", cmap=plt_cmap)
plt.imshow(mask, cmap="jet", interpolation="none", alpha=0.7)
index += 1
plt_index += 1
plt.show()
def plot_prediction(
predictions, test_images, input_shape: Vector, threshold: float = 0.4
):
plt.figure(figsize=(20, 10))
pred_count = len(predictions)
plt_shape = (input_shape[0], input_shape[1])
plt_cmap = "gray"
if input_shape[2] > 1:
plt_shape = (
input_shape[0],
input_shape[1],
input_shape[2],
)
index = 1
plt_index = 0
for test_image in test_images:
original_image = test_image.reshape(plt_shape)
pred_image = predictions[plt_index].reshape(plt_shape)
mask = ma.masked_where(pred_image < threshold, pred_image)
plt.subplot(pred_count, 3, index)
plt.title("Original")
plt.imshow(original_image, interpolation="none", cmap=plt_cmap)
index += 1
plt.subplot(pred_count, 3, index)
plt.title("Prediction")
plt.imshow(pred_image, interpolation="none", cmap=plt_cmap)
index += 1
plt.subplot(pred_count, 3, index)
plt.title("Overlay")
plt.imshow(original_image, interpolation="none", cmap=plt_cmap)
plt.imshow(mask, cmap="jet", interpolation="none", alpha=0.7)
index += 1
plt_index += 1
plt.show()
def plot_classification(predictions, test_images, input_shape: Vector, classes: [], threshold: float = 0.4):
plt.figure(figsize=(20, 10))
pred_count = len(predictions)
plt_shape = (input_shape[0], input_shape[1])
plt_cmap = "gray"
if input_shape[2] > 1:
plt_shape = (
input_shape[0],
input_shape[1],
input_shape[2],
)
index = 1
plt_index = 0
for test_image in test_images:
original_image = test_image.reshape(plt_shape)
pred = predictions[plt_index]
c_idx = np.argmax(pred)
plt.subplot(pred_count, 1, index)
value = pred[c_idx]
if (value >= threshold):
plt.title("{0} ({1})".format(classes[c_idx], value))
else:
plt.title("{0} ({1})".format("Unknown", value))
plt.imshow(original_image, interpolation="none", cmap=plt_cmap)
index += 1
plt_index += 1
plt.show()
| 31.851852
| 108
| 0.608837
| 576
| 4,300
| 4.359375
| 0.152778
| 0.033453
| 0.050179
| 0.060534
| 0.75906
| 0.720032
| 0.705297
| 0.700119
| 0.657905
| 0.622461
| 0
| 0.026341
| 0.258372
| 4,300
| 134
| 109
| 32.089552
| 0.761054
| 0
| 0
| 0.701613
| 0
| 0
| 0.05093
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.040323
| 0
| 0.072581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
63d33b671fe35061f0300b274a32b66e28d0a7ac
| 80
|
py
|
Python
|
pypi_starter/__init__.py
|
wuhaifengdhu/pypi-starter
|
3ccb80dd9490f9d65b986350d82f9a20743af17f
|
[
"Apache-2.0"
] | null | null | null |
pypi_starter/__init__.py
|
wuhaifengdhu/pypi-starter
|
3ccb80dd9490f9d65b986350d82f9a20743af17f
|
[
"Apache-2.0"
] | null | null | null |
pypi_starter/__init__.py
|
wuhaifengdhu/pypi-starter
|
3ccb80dd9490f9d65b986350d82f9a20743af17f
|
[
"Apache-2.0"
] | null | null | null |
"""
Your application
"""
from submodule.main import *
from main import *
| 11.428571
| 29
| 0.65
| 9
| 80
| 5.777778
| 0.666667
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2375
| 80
| 6
| 30
| 13.333333
| 0.852459
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8921c8342da583a53fe7e6baf0b2a3160c459d31
| 52
|
py
|
Python
|
examples/fore.py
|
LyQuid12/colorgb
|
78addcf85f0e750ca45a7955e5008f7a8a946281
|
[
"MIT"
] | 1
|
2022-01-26T10:26:24.000Z
|
2022-01-26T10:26:24.000Z
|
examples/fore.py
|
LyQuid12/colorgb
|
78addcf85f0e750ca45a7955e5008f7a8a946281
|
[
"MIT"
] | null | null | null |
examples/fore.py
|
LyQuid12/colorgb
|
78addcf85f0e750ca45a7955e5008f7a8a946281
|
[
"MIT"
] | null | null | null |
import colorgb
print(colorgb.fore("Hi!", "green"))
| 13
| 35
| 0.692308
| 7
| 52
| 5.142857
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096154
| 52
| 3
| 36
| 17.333333
| 0.765957
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
892b0a91946a1df7a1b4680a474c70df84c8c932
| 857
|
py
|
Python
|
tests/v2/test_team_response_attributes.py
|
anbnyc/datadog-api-client-python
|
162bd0c6f2523a809aec08a3197e85dc74b78c21
|
[
"Apache-2.0"
] | null | null | null |
tests/v2/test_team_response_attributes.py
|
anbnyc/datadog-api-client-python
|
162bd0c6f2523a809aec08a3197e85dc74b78c21
|
[
"Apache-2.0"
] | null | null | null |
tests/v2/test_team_response_attributes.py
|
anbnyc/datadog-api-client-python
|
162bd0c6f2523a809aec08a3197e85dc74b78c21
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import sys
import unittest
import datadog_api_client.v2
from datadog_api_client.v2.model.team_response_attributes import TeamResponseAttributes
class TestTeamResponseAttributes(unittest.TestCase):
"""TeamResponseAttributes unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTeamResponseAttributes(self):
"""Test TeamResponseAttributes"""
# FIXME: construct object with mandatory attributes with example values
# model = TeamResponseAttributes() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.969697
| 108
| 0.733956
| 96
| 857
| 6.40625
| 0.71875
| 0.03252
| 0.052033
| 0.058537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017266
| 0.189032
| 857
| 32
| 109
| 26.78125
| 0.867626
| 0.493582
| 0
| 0.230769
| 0
| 0
| 0.019185
| 0
| 0
| 0
| 0
| 0.03125
| 0
| 1
| 0.230769
| false
| 0.230769
| 0.307692
| 0
| 0.615385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
895c1ad530c40f1c5d1fb2b8ed674dd74150a4b7
| 49
|
py
|
Python
|
urduhack/utils/tests/test_text.py
|
fahdrazavi/urduhack
|
a2370b0d8c1ee3f260ff90ca5056f45ed9b73ee8
|
[
"MIT"
] | null | null | null |
urduhack/utils/tests/test_text.py
|
fahdrazavi/urduhack
|
a2370b0d8c1ee3f260ff90ca5056f45ed9b73ee8
|
[
"MIT"
] | null | null | null |
urduhack/utils/tests/test_text.py
|
fahdrazavi/urduhack
|
a2370b0d8c1ee3f260ff90ca5056f45ed9b73ee8
|
[
"MIT"
] | null | null | null |
# coding: utf8
"""Test cases for text.py file"""
| 16.333333
| 33
| 0.653061
| 8
| 49
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 0.163265
| 49
| 2
| 34
| 24.5
| 0.756098
| 0.836735
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
895ceb1226f1223073e975c0eb48dfda8072b8fb
| 2,136
|
py
|
Python
|
performance_test/testcase.py
|
DeqingQu/CacheControlHelper
|
d9425ff08253f10ef1028b724418c705f8d58bf5
|
[
"MIT"
] | null | null | null |
performance_test/testcase.py
|
DeqingQu/CacheControlHelper
|
d9425ff08253f10ef1028b724418c705f8d58bf5
|
[
"MIT"
] | null | null | null |
performance_test/testcase.py
|
DeqingQu/CacheControlHelper
|
d9425ff08253f10ef1028b724418c705f8d58bf5
|
[
"MIT"
] | null | null | null |
from cache_control_helper import CacheControlHelper
import time
import sys
import requests
import requests_cache
requests_cache.install_cache('performance_test')
def get_request(url):
requests = CacheControlHelper()
try:
res = requests.get(url, timeout=120)
except requests.exceptions.Timeout:
print(url, file=sys.stderr)
print('Timeout for URL: ' + url, file=sys.stderr)
return None
except KeyboardInterrupt:
sys.exit(0)
except BaseException as e:
print(url, file=sys.stderr)
print('%s received for URL: %s' % (e, url), file=sys.stderr)
return None
status_code = res.status_code
if status_code != 200:
print(url, file=sys.stderr)
print('Status code ' + str(status_code) + ' for url: ' + url, file=sys.stderr)
return None
return res.json()
def get_request_cache(url):
try:
res = requests.get(url, timeout=120)
except requests.exceptions.Timeout:
print(url, file=sys.stderr)
print('Timeout for URL: ' + url, file=sys.stderr)
return None
except KeyboardInterrupt:
sys.exit(0)
except BaseException as e:
print(url, file=sys.stderr)
print('%s received for URL: %s' % (e, url), file=sys.stderr)
return None
status_code = res.status_code
if status_code != 200:
print(url, file=sys.stderr)
print('Status code ' + str(status_code) + ' for url: ' + url, file=sys.stderr)
return None
return res.json()
if __name__ == '__main__':
# using CacheControl
# base_url = 'http://localhost:3000/test/'
# t = time.time()
# for i in range(100000):
# r = get_request(base_url + str(i))
# if i % 1000 == 0:
# print(r)
# # print(time.time() - t)
# print(time.time() - t)
# using requests-cache
base_url = 'http://localhost:3000/test/'
t = time.time()
for i in range(10000):
r = get_request_cache(base_url + str(i))
if i % 1000 == 0:
print(r)
# print(time.time() - t)
print(time.time() - t)
| 28.48
| 86
| 0.598315
| 279
| 2,136
| 4.46595
| 0.207885
| 0.067416
| 0.096308
| 0.154093
| 0.772071
| 0.772071
| 0.772071
| 0.772071
| 0.772071
| 0.772071
| 0
| 0.027886
| 0.27809
| 2,136
| 74
| 87
| 28.864865
| 0.780156
| 0.13015
| 0
| 0.679245
| 0
| 0
| 0.094954
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037736
| false
| 0
| 0.09434
| 0
| 0.283019
| 0.264151
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
89b8b74f1ee518f5d1b44c2b1c318c3869dd4dbd
| 192
|
py
|
Python
|
tracedump/pwn_wrapper.py
|
Mic92/tracedumpd
|
a84eac58106f1f1d7a82f5dee2a327861e763e4e
|
[
"MIT"
] | 1
|
2021-03-22T18:04:53.000Z
|
2021-03-22T18:04:53.000Z
|
tracedump/pwn_wrapper.py
|
Mic92/tracedump
|
a84eac58106f1f1d7a82f5dee2a327861e763e4e
|
[
"MIT"
] | null | null | null |
tracedump/pwn_wrapper.py
|
Mic92/tracedump
|
a84eac58106f1f1d7a82f5dee2a327861e763e4e
|
[
"MIT"
] | null | null | null |
import os
# stop pwnlib from doing fancy things
os.environ["PWNLIB_NOTERM"] = "1"
from pwnlib.elf.corefile import Coredump, Mapping # noqa: E402
from pwnlib.elf.elf import ELF # noqa: E402
| 27.428571
| 63
| 0.75
| 30
| 192
| 4.766667
| 0.566667
| 0.13986
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04321
| 0.15625
| 192
| 6
| 64
| 32
| 0.839506
| 0.296875
| 0
| 0
| 0
| 0
| 0.10687
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
982a5a6c85f4198d136ac06c6691b36716f7b587
| 940
|
py
|
Python
|
sdks/python/test/test_ReleaseUpdateError.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | null | null | null |
sdks/python/test/test_ReleaseUpdateError.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 6
|
2019-10-23T06:38:53.000Z
|
2022-01-22T07:57:58.000Z
|
sdks/python/test/test_ReleaseUpdateError.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 2
|
2019-10-23T06:31:05.000Z
|
2021-08-21T17:32:47.000Z
|
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
from __future__ import absolute_import
import unittest
import appcenter_sdk
from ReleaseUpdateError.clsReleaseUpdateError import ReleaseUpdateError # noqa: E501
from appcenter_sdk.rest import ApiException
class TestReleaseUpdateError(unittest.TestCase):
"""ReleaseUpdateError unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testReleaseUpdateError(self):
"""Test ReleaseUpdateError"""
# FIXME: construct object with mandatory attributes with example values
# model = appcenter_sdk.models.clsReleaseUpdateError.ReleaseUpdateError() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.5
| 95
| 0.723404
| 98
| 940
| 6.77551
| 0.632653
| 0.036145
| 0.078313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014628
| 0.2
| 940
| 39
| 96
| 24.102564
| 0.868351
| 0.469149
| 0
| 0.214286
| 0
| 0
| 0.017505
| 0
| 0
| 0
| 0
| 0.025641
| 0
| 1
| 0.214286
| false
| 0.214286
| 0.357143
| 0
| 0.642857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
983a2aeb9ad32099cd6f39c374a89a7d58015b41
| 1,535
|
py
|
Python
|
campbellsoup/utilities_test.py
|
NBOCampbellToets/CampbellSoup
|
45478c3e5e0362d01af8898078c6621f7b11c191
|
[
"PostgreSQL"
] | null | null | null |
campbellsoup/utilities_test.py
|
NBOCampbellToets/CampbellSoup
|
45478c3e5e0362d01af8898078c6621f7b11c191
|
[
"PostgreSQL"
] | 45
|
2016-11-21T16:01:44.000Z
|
2018-05-25T13:35:01.000Z
|
campbellsoup/utilities_test.py
|
NBOCampbellToets/CampbellSoup
|
45478c3e5e0362d01af8898078c6621f7b11c191
|
[
"PostgreSQL"
] | 1
|
2019-02-27T08:04:55.000Z
|
2019-02-27T08:04:55.000Z
|
# (c) 2016 Julian Gonggrijp
from .utilities import *
def test_un_camelcase():
assert un_camelcase('CampbellSoupX') == 'campbell_soup_x'
assert un_camelcase('NBOCampbellToets') == 'n_b_o_campbell_toets'
def test_append_to():
__all__ = []
class Example(object):
pass
@append_to(__all__)
class Illustration(object):
pass
@append_to(__all__)
def foo():
pass
def bar():
pass
assert __all__ == ['Illustration', 'foo']
def test_maybe():
tester = {
'banana': [
0,
'x',
[1, 2, 3],
{
'deep_banana': {'value': 'deeper_banana'},
}
],
'orange': [],
}
assert len(maybe(tester, 'banana')) == 4
assert maybe(tester, 'banana', 0) == 0
assert maybe(tester, 'banana', 1) == 'x'
assert maybe(tester, 'banana', 1, 0) == 'x'
assert maybe(tester, 'banana', 1, 1) == None
assert maybe(tester, 'banana', 2) == [1, 2, 3]
assert maybe(tester, 'banana', 2, 2) == 3
assert maybe(tester, 'banana', 2, 3) == None
assert maybe(tester, 'banana', 3, 'deep_banana', 'value') == 'deeper_banana'
assert maybe(tester, 'banana', 3, 'deep_banana', 'other') == None
assert maybe(tester, 'banana', 4) == None
assert maybe(tester, 'orange') == []
assert maybe(tester, 'orange', 3) == None
assert maybe(tester, 'orange', 3, fallback='') == ''
assert maybe(tester, 'kiwi') == None
assert maybe(tester, 'kiwi', fallback=10) == 10
| 28.425926
| 80
| 0.558306
| 179
| 1,535
| 4.586592
| 0.273743
| 0.227771
| 0.310597
| 0.280146
| 0.511571
| 0.261876
| 0.146163
| 0
| 0
| 0
| 0
| 0.030466
| 0.272964
| 1,535
| 53
| 81
| 28.962264
| 0.705197
| 0.016287
| 0
| 0.133333
| 0
| 0
| 0.172414
| 0
| 0
| 0
| 0
| 0
| 0.422222
| 1
| 0.111111
| false
| 0.088889
| 0.022222
| 0
| 0.177778
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
986a40624bae1a159be9cd68b43440b563e81ee0
| 208
|
py
|
Python
|
test.py
|
Timokasse/rediscache
|
e5bef0da973bdf53efaaea99b0ed9b41bb331ade
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
Timokasse/rediscache
|
e5bef0da973bdf53efaaea99b0ed9b41bb331ade
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
Timokasse/rediscache
|
e5bef0da973bdf53efaaea99b0ed9b41bb331ade
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from time import sleep
from rediscache import rediscache
import time, redis
@rediscache(1, 2)
def getTestValue():
return (5, 'toto')
if __name__ == '__main__':
myfunction()
| 13.866667
| 33
| 0.697115
| 27
| 208
| 5.074074
| 0.777778
| 0.233577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017647
| 0.182692
| 208
| 14
| 34
| 14.857143
| 0.788235
| 0.096154
| 0
| 0
| 0
| 0
| 0.064171
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| true
| 0
| 0.375
| 0.125
| 0.625
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
98b33ea8451d3967d4ce2088f2eba80859167c6d
| 44,549
|
py
|
Python
|
tests/test_dataset_tensor_backend.py
|
evendrow/deepsnap
|
8d5762bf4a2ef6910ad602895685cac892207ba8
|
[
"MIT"
] | null | null | null |
tests/test_dataset_tensor_backend.py
|
evendrow/deepsnap
|
8d5762bf4a2ef6910ad602895685cac892207ba8
|
[
"MIT"
] | null | null | null |
tests/test_dataset_tensor_backend.py
|
evendrow/deepsnap
|
8d5762bf4a2ef6910ad602895685cac892207ba8
|
[
"MIT"
] | null | null | null |
import copy
import random
import torch
import unittest
from torch_geometric.datasets import TUDataset, Planetoid
from copy import deepcopy
from deepsnap.graph import Graph
from deepsnap.hetero_graph import HeteroGraph
from deepsnap.dataset import GraphDataset, Generator, EnsembleGenerator
from tests.utils import (
pyg_to_dicts,
simple_networkx_graph,
simple_networkx_small_graph,
simple_networkx_graph_alphabet,
simple_networkx_multigraph,
generate_dense_hete_dataset,
generate_simple_small_hete_graph,
gen_graph
)
class TestDatasetTensorBackend(unittest.TestCase):
def test_dataset_basic(self):
_, x, y, edge_x, edge_y, edge_index, graph_x, graph_y = (
simple_networkx_graph()
)
G = Graph(
node_feature=x, node_label=y, edge_index=edge_index,
edge_feature=edge_x, edge_label=edge_y,
graph_feature=graph_x, graph_label=graph_y, directed=True
)
H = deepcopy(G)
dataset = GraphDataset([G, H])
self.assertEqual(len(dataset), 2)
def test_dataset_property(self):
_, x, y, edge_x, edge_y, edge_index, graph_x, graph_y = (
simple_networkx_graph()
)
G = Graph(
node_feature=x, node_label=y, edge_index=edge_index,
edge_feature=edge_x, edge_label=edge_y,
graph_feature=graph_x, graph_label=graph_y, directed=True
)
H = deepcopy(G)
H.graph_label = torch.tensor([1])
graphs = [G, H]
dataset = GraphDataset(graphs)
self.assertEqual(dataset.num_node_labels, 5)
self.assertEqual(dataset.num_node_features, 2)
self.assertEqual(dataset.num_edge_labels, 4)
self.assertEqual(dataset.num_edge_features, 2)
self.assertEqual(dataset.num_graph_labels, 1)
self.assertEqual(dataset.num_graph_features, 2)
self.assertEqual(dataset.num_labels, 5) # node task
dataset = GraphDataset(graphs, task="edge")
self.assertEqual(dataset.num_labels, 4)
dataset = GraphDataset(graphs, task="link_pred")
self.assertEqual(dataset.num_labels, 5)
dataset = GraphDataset(graphs, task="graph")
self.assertEqual(dataset.num_labels, 1)
def test_dataset_hetero_graph_split(self):
G = generate_dense_hete_dataset()
hete = HeteroGraph(G)
hete = HeteroGraph(
node_feature=hete.node_feature,
node_label=hete.node_label,
edge_feature=hete.edge_feature,
edge_label=hete.edge_label,
edge_index=hete.edge_index,
directed=True
)
# node
dataset = GraphDataset([hete], task="node")
split_res = dataset.split()
for node_type in hete.node_label_index:
num_nodes = int(len(hete.node_label_index[node_type]))
node_0 = int(num_nodes * 0.8)
node_1 = int(num_nodes * 0.1)
node_2 = num_nodes - node_0 - node_1
self.assertEqual(
len(split_res[0][0].node_label_index[node_type]),
node_0,
)
self.assertEqual(
len(split_res[1][0].node_label_index[node_type]),
node_1,
)
self.assertEqual(
len(split_res[2][0].node_label_index[node_type]),
node_2,
)
# node with specified split type
dataset = GraphDataset([hete], task="node")
node_split_types = ["n1"]
split_res = dataset.split(split_types=node_split_types)
for node_type in hete.node_label_index:
if node_type in node_split_types:
num_nodes = int(len(hete.node_label_index[node_type]))
node_0 = int(num_nodes * 0.8)
node_1 = int(num_nodes * 0.1)
node_2 = num_nodes - node_0 - node_1
self.assertEqual(
len(split_res[0][0].node_label_index[node_type]),
node_0,
)
self.assertEqual(
len(split_res[1][0].node_label_index[node_type]),
node_1,
)
self.assertEqual(
len(split_res[2][0].node_label_index[node_type]),
node_2,
)
else:
num_nodes = int(len(hete.node_label_index[node_type]))
self.assertEqual(
len(split_res[0][0].node_label_index[node_type]),
num_nodes,
)
self.assertEqual(
len(split_res[1][0].node_label_index[node_type]),
num_nodes,
)
self.assertEqual(
len(split_res[2][0].node_label_index[node_type]),
num_nodes,
)
# node with specified split type (string mode)
dataset = GraphDataset([hete], task="node")
node_split_types = "n1"
split_res = dataset.split(split_types=node_split_types)
for node_type in hete.node_label_index:
if node_type in node_split_types:
num_nodes = int(len(hete.node_label_index[node_type]))
node_0 = int(num_nodes * 0.8)
node_1 = int(num_nodes * 0.1)
node_2 = num_nodes - node_0 - node_1
self.assertEqual(
len(split_res[0][0].node_label_index[node_type]),
node_0,
)
self.assertEqual(
len(split_res[1][0].node_label_index[node_type]),
node_1,
)
self.assertEqual(
len(split_res[2][0].node_label_index[node_type]),
node_2,
)
else:
num_nodes = int(len(hete.node_label_index[node_type]))
self.assertEqual(
len(split_res[0][0].node_label_index[node_type]),
num_nodes,
)
self.assertEqual(
len(split_res[1][0].node_label_index[node_type]),
num_nodes,
)
self.assertEqual(
len(split_res[2][0].node_label_index[node_type]),
num_nodes,
)
# edge
dataset = GraphDataset([hete], task="edge")
split_res = dataset.split()
for edge_type in hete.edge_label_index:
num_edges = hete.edge_label_index[edge_type].shape[1]
edge_0 = int(num_edges * 0.8)
edge_1 = int(num_edges * 0.1)
edge_2 = num_edges - edge_0 - edge_1
self.assertEqual(
split_res[0][0].edge_label_index[edge_type].shape[1],
edge_0,
)
self.assertEqual(
split_res[1][0].edge_label_index[edge_type].shape[1],
edge_1,
)
self.assertEqual(
split_res[2][0].edge_label_index[edge_type].shape[1],
edge_2,
)
# edge with specified split type
dataset = GraphDataset([hete], task="edge")
edge_split_types = [("n1", "e1", "n1"), ("n1", "e2", "n2")]
split_res = dataset.split(split_types=edge_split_types)
for edge_type in hete.edge_label_index:
if edge_type in edge_split_types:
num_edges = hete.edge_label_index[edge_type].shape[1]
edge_0 = int(num_edges * 0.8)
edge_1 = int(num_edges * 0.1)
edge_2 = num_edges - edge_0 - edge_1
self.assertEqual(
split_res[0][0].edge_label_index[edge_type].shape[1],
edge_0,
)
self.assertEqual(
split_res[1][0].edge_label_index[edge_type].shape[1],
edge_1,
)
self.assertEqual(
split_res[2][0].edge_label_index[edge_type].shape[1],
edge_2,
)
else:
num_edges = hete.edge_label_index[edge_type].shape[1]
self.assertEqual(
split_res[0][0].edge_label_index[edge_type].shape[1],
num_edges,
)
self.assertEqual(
split_res[1][0].edge_label_index[edge_type].shape[1],
num_edges,
)
self.assertEqual(
split_res[2][0].edge_label_index[edge_type].shape[1],
num_edges,
)
# link_pred
dataset = GraphDataset([hete], task="link_pred")
split_res = dataset.split(transductive=True)
for edge_type in hete.edge_label_index:
num_edges = hete.edge_label_index[edge_type].shape[1]
edge_0 = 2 * int(0.8 * num_edges)
edge_1 = 2 * int(0.1 * num_edges)
edge_2 = 2 * (
num_edges - int(0.8 * num_edges) - int(0.1 * num_edges)
)
self.assertEqual(
split_res[0][0].edge_label_index[edge_type].shape[1],
edge_0
)
self.assertEqual(
split_res[1][0].edge_label_index[edge_type].shape[1],
edge_1
)
self.assertEqual(
split_res[2][0].edge_label_index[edge_type].shape[1],
edge_2
)
# link_pred with specified split type
dataset = GraphDataset([hete], task="link_pred")
link_split_types = [("n1", "e1", "n1"), ("n1", "e2", "n2")]
split_res = dataset.split(
transductive=True,
split_types=link_split_types
)
for edge_type in hete.edge_label_index:
if edge_type in link_split_types:
num_edges = hete.edge_label_index[edge_type].shape[1]
edge_0 = 2 * int(0.8 * num_edges)
edge_1 = 2 * int(0.1 * num_edges)
edge_2 = 2 * (
num_edges - int(0.8 * num_edges) - int(0.1 * num_edges)
)
self.assertEqual(
split_res[0][0].edge_label_index[edge_type].shape[1],
edge_0
)
self.assertEqual(
split_res[1][0].edge_label_index[edge_type].shape[1],
edge_1
)
self.assertEqual(
split_res[2][0].edge_label_index[edge_type].shape[1],
edge_2
)
else:
num_edges = hete.edge_label_index[edge_type].shape[1]
self.assertEqual(
split_res[0][0].edge_label_index[edge_type].shape[1],
num_edges
)
self.assertEqual(
split_res[1][0].edge_label_index[edge_type].shape[1],
num_edges
)
self.assertEqual(
split_res[2][0].edge_label_index[edge_type].shape[1],
num_edges
)
# link_pred + disjoint
dataset = GraphDataset(
[hete],
task="link_pred",
edge_train_mode="disjoint",
edge_message_ratio=0.5,
)
split_res = dataset.split(
transductive=True,
split_ratio=[0.6, 0.2, 0.2],
)
for edge_type in hete.edge_label_index:
num_edges = hete.edge_label_index[edge_type].shape[1]
edge_0 = int(0.6 * num_edges)
edge_0 = 2 * (edge_0 - int(0.5 * edge_0))
edge_1 = 2 * int(0.2 * num_edges)
edge_2 = 2 * (
num_edges - int(0.6 * num_edges) - int(0.2 * num_edges)
)
self.assertEqual(
split_res[0][0].edge_label_index[edge_type].shape[1],
edge_0,
)
self.assertEqual(
split_res[1][0].edge_label_index[edge_type].shape[1],
edge_1,
)
self.assertEqual(
split_res[2][0].edge_label_index[edge_type].shape[1],
edge_2,
)
# link pred with edge_split_mode set to "exact"
dataset = GraphDataset(
[hete],
task="link_pred",
edge_split_mode="approximate"
)
split_res = dataset.split(transductive=True)
hete_link_train_edge_num = 0
hete_link_test_edge_num = 0
hete_link_val_edge_num = 0
num_edges = 0
for edge_type in hete.edge_label_index:
num_edges += hete.edge_label_index[edge_type].shape[1]
if edge_type in split_res[0][0].edge_label_index:
hete_link_train_edge_num += (
split_res[0][0].edge_label_index[edge_type].shape[1]
)
if edge_type in split_res[1][0].edge_label_index:
hete_link_test_edge_num += (
split_res[1][0].edge_label_index[edge_type].shape[1]
)
if edge_type in split_res[2][0].edge_label_index:
hete_link_val_edge_num += (
split_res[2][0].edge_label_index[edge_type].shape[1]
)
# num_edges_reduced = num_edges - 3
edge_0 = 2 * int(0.8 * num_edges)
edge_1 = 2 * int(0.1 * num_edges)
edge_2 = 2 * (
num_edges - int(0.8 * num_edges) - int(0.1 * num_edges)
)
self.assertEqual(
hete_link_train_edge_num,
edge_0
)
self.assertEqual(
hete_link_test_edge_num,
edge_1
)
self.assertEqual(
hete_link_val_edge_num,
edge_2
)
# link pred with specified types and edge_split_mode set to "exact"
dataset = GraphDataset(
[hete],
task="link_pred",
edge_split_mode="approximate",
)
link_split_types = [("n1", "e1", "n1"), ("n1", "e2", "n2")]
split_res = dataset.split(
transductive=True,
split_types=link_split_types,
)
hete_link_train_edge_num = 0
hete_link_test_edge_num = 0
hete_link_val_edge_num = 0
num_split_type_edges = 0
num_non_split_type_edges = 0
for edge_type in hete.edge_label_index:
if edge_type in link_split_types:
num_split_type_edges += (
hete.edge_label_index[edge_type].shape[1]
)
else:
num_non_split_type_edges += (
hete.edge_label_index[edge_type].shape[1]
)
if edge_type in split_res[0][0].edge_label_index:
hete_link_train_edge_num += (
split_res[0][0].edge_label_index[edge_type].shape[1]
)
if edge_type in split_res[1][0].edge_label_index:
hete_link_test_edge_num += (
split_res[1][0].edge_label_index[edge_type].shape[1]
)
if edge_type in split_res[2][0].edge_label_index:
hete_link_val_edge_num += (
split_res[2][0].edge_label_index[edge_type].shape[1]
)
# num_edges_reduced = num_split_type_edges - 3
num_edges = num_split_type_edges
edge_0 = 2 * int(0.8 * num_edges) + num_non_split_type_edges
edge_1 = 2 * int(0.1 * num_edges) + num_non_split_type_edges
edge_2 = 2 * (
num_edges - int(0.8 * num_edges) - int(0.1 * num_edges)
) + num_non_split_type_edges
self.assertEqual(hete_link_train_edge_num, edge_0)
self.assertEqual(hete_link_test_edge_num, edge_1)
self.assertEqual(hete_link_val_edge_num, edge_2)
def test_dataset_split(self):
# inductively split with graph task
pyg_dataset = TUDataset("./enzymes", "ENZYMES")
ds = pyg_to_dicts(pyg_dataset)
graphs = [Graph(**item) for item in ds]
dataset = GraphDataset(graphs, task="graph")
split_res = dataset.split(transductive=False)
num_graphs = len(dataset)
num_train = int(0.8 * num_graphs)
num_val = int(0.1 * num_graphs)
num_test = num_graphs - num_train - num_val
self.assertEqual(num_train, len(split_res[0]))
self.assertEqual(num_val, len(split_res[1]))
self.assertEqual(num_test, len(split_res[2]))
# inductively split with link_pred task
# and default (`all`) edge_train_mode
pyg_dataset = TUDataset("./enzymes", "ENZYMES")
ds = pyg_to_dicts(pyg_dataset)
graphs = [Graph(**item) for item in ds]
dataset = GraphDataset(graphs, task="link_pred")
split_res = dataset.split(transductive=False)
num_graphs = len(dataset)
num_train = int(0.8 * num_graphs)
num_val = int(0.1 * num_graphs)
num_test = num_graphs - num_train - num_val
self.assertEqual(num_train, len(split_res[0]))
self.assertEqual(num_val, len(split_res[1]))
self.assertEqual(num_test, len(split_res[2]))
# inductively split with link_pred task and `disjoint` edge_train_mode
pyg_dataset = TUDataset("./enzymes", "ENZYMES")
ds = pyg_to_dicts(pyg_dataset)
graphs = [Graph(**item) for item in ds]
dataset = GraphDataset(
graphs,
task="link_pred",
edge_train_mode="disjoint",
)
split_res = dataset.split(transductive=False)
num_graphs = len(dataset)
num_train = int(0.8 * num_graphs)
num_val = int(0.1 * num_graphs)
num_test = num_graphs - num_train - num_val
self.assertEqual(num_train, len(split_res[0]))
self.assertEqual(num_val, len(split_res[1]))
self.assertEqual(num_test, len(split_res[2]))
# transductively split with node task
pyg_dataset = Planetoid("./cora", "Cora")
ds = pyg_to_dicts(pyg_dataset, task="cora")
graphs = [Graph(**item) for item in ds]
dataset = GraphDataset(graphs, task="node")
num_nodes = dataset.num_nodes[0]
num_edges = dataset.num_edges[0]
node_0 = int(0.8 * num_nodes)
node_1 = int(0.1 * num_nodes)
node_2 = num_nodes - node_0 - node_1
split_res = dataset.split()
self.assertEqual(
len(split_res[0][0].node_label_index),
node_0
)
self.assertEqual(
len(split_res[1][0].node_label_index),
node_1
)
self.assertEqual(
len(split_res[2][0].node_label_index),
node_2
)
# transductively split with link_pred task
# and default (`all`) edge_train_mode
dataset = GraphDataset(graphs, task="link_pred")
edge_0 = 2 * 2 * int(0.8 * num_edges)
edge_1 = 2 * 2 * int(0.1 * num_edges)
edge_2 = 2 * 2 * (
num_edges - int(0.8 * num_edges) - int(0.1 * num_edges)
)
split_res = dataset.split()
self.assertEqual(
split_res[0][0].edge_label_index.shape[1],
edge_0
)
self.assertEqual(
split_res[1][0].edge_label_index.shape[1],
edge_1
)
self.assertEqual(
split_res[2][0].edge_label_index.shape[1],
edge_2
)
# transductively split with link_pred task, `split` edge_train_mode
# and 0.5 edge_message_ratio
dataset = GraphDataset(
graphs,
task="link_pred",
edge_train_mode="disjoint",
edge_message_ratio=0.5,
)
split_res = dataset.split()
edge_0 = 2 * int(0.8 * num_edges)
edge_0 = 2 * (edge_0 - int(0.5 * edge_0))
edge_1 = 2 * 2 * int(0.1 * num_edges)
edge_2 = 2 * 2 * (
num_edges - int(0.8 * num_edges) - int(0.1 * num_edges)
)
self.assertEqual(
split_res[0][0].edge_label_index.shape[1],
edge_0,
)
self.assertEqual(split_res[1][0].edge_label_index.shape[1], edge_1)
self.assertEqual(split_res[2][0].edge_label_index.shape[1], edge_2)
# transductively split with link_pred task
# and specified edge_negative_sampling_ratio
dataset = GraphDataset(
graphs,
task="link_pred",
edge_negative_sampling_ratio=2
)
split_res = dataset.split()
edge_0 = (2 + 1) * (2 * int(0.8 * num_edges))
edge_1 = (2 + 1) * (2 * int(0.1 * num_edges))
edge_2 = (2 + 1) * (
2 * (num_edges - int(0.8 * num_edges) - int(0.1 * num_edges))
)
self.assertEqual(split_res[0][0].edge_label_index.shape[1], edge_0)
self.assertEqual(split_res[1][0].edge_label_index.shape[1], edge_1)
self.assertEqual(split_res[2][0].edge_label_index.shape[1], edge_2)
def test_dataset_split_custom(self):
# transductive split with node task (self defined dataset)
G, x, y, edge_x, edge_y, edge_index, graph_x, graph_y = (
simple_networkx_graph()
)
Graph.add_edge_attr(G, "edge_feature", edge_x)
Graph.add_edge_attr(G, "edge_label", edge_y)
Graph.add_node_attr(G, "node_feature", x)
Graph.add_node_attr(G, "node_label", y)
Graph.add_graph_attr(G, "graph_feature", graph_x)
Graph.add_graph_attr(G, "graph_label", graph_y)
num_nodes = len(list(G.nodes))
nodes_train = torch.tensor(list(G.nodes)[: int(0.3 * num_nodes)])
nodes_val = torch.tensor(
list(G.nodes)[int(0.3 * num_nodes): int(0.6 * num_nodes)]
)
nodes_test = torch.tensor(list(G.nodes)[int(0.6 * num_nodes):])
graph_train = Graph(
node_feature=x, node_label=y, edge_index=edge_index,
node_label_index=nodes_train, directed=True
)
graph_val = Graph(
node_feature=x, node_label=y, edge_index=edge_index,
node_label_index=nodes_val, directed=True
)
graph_test = Graph(
node_feature=x, node_label=y, edge_index=edge_index,
node_label_index=nodes_test, directed=True
)
graphs_train = [graph_train]
graphs_val = [graph_val]
graphs_test = [graph_test]
dataset_train, dataset_val, dataset_test = (
GraphDataset(graphs_train, task='node'),
GraphDataset(graphs_val, task='node'),
GraphDataset(graphs_test, task='node')
)
self.assertEqual(
dataset_train[0].node_label_index.tolist(),
list(range(int(0.3 * num_nodes)))
)
self.assertEqual(
dataset_val[0].node_label_index.tolist(),
list(range(int(0.3 * num_nodes), int(0.6 * num_nodes)))
)
self.assertEqual(
dataset_test[0].node_label_index.tolist(),
list(range(int(0.6 * num_nodes), num_nodes))
)
# transductive split with link_pred task (train/val split)
edges = list(G.edges)
num_edges = len(edges)
edges_train = edges[: int(0.7 * num_edges)]
edges_val = edges[int(0.7 * num_edges):]
link_size_list = [len(edges_train), len(edges_val)]
# generate pseudo pos and neg edges, they may overlap here
train_pos = torch.LongTensor(edges_train).permute(1, 0)
val_pos = torch.LongTensor(edges_val).permute(1, 0)
val_neg = torch.randint(high=10, size=val_pos.shape, dtype=torch.int64)
val_neg_double = torch.cat((val_neg, val_neg), dim=1)
num_train = len(edges_train)
num_val = len(edges_val)
graph_train = Graph(
node_feature=x, edge_index=edge_index,
edge_feature=edge_x, directed=True,
edge_label_index=train_pos
)
graph_val = Graph(
node_feature=x, edge_index=edge_index,
edge_feature=edge_x, directed=True,
edge_label_index=val_pos,
negative_edge=val_neg_double
)
graphs_train = [graph_train]
graphs_val = [graph_val]
dataset_train, dataset_val = (
GraphDataset(
graphs_train, task='link_pred', resample_negatives=True
),
GraphDataset(
graphs_val, task='link_pred', edge_negative_sampling_ratio=2
)
)
self.assertEqual(
dataset_train[0].edge_label_index.shape[1],
2 * link_size_list[0]
)
self.assertEqual(
dataset_train[0].edge_label.shape[0],
2 * link_size_list[0]
)
self.assertEqual(
dataset_val[0].edge_label_index.shape[1],
val_pos.shape[1] + val_neg_double.shape[1]
)
self.assertEqual(
dataset_val[0].edge_label.shape[0],
val_pos.shape[1] + val_neg_double.shape[1]
)
self.assertTrue(
torch.equal(
dataset_train[0].edge_label_index[:, :num_train],
train_pos
)
)
self.assertTrue(
torch.equal(
dataset_val[0].edge_label_index[:, :num_val],
val_pos
)
)
self.assertTrue(
torch.equal(
dataset_val[0].edge_label_index[:, num_val:],
val_neg_double
)
)
dataset_train.resample_negatives = False
self.assertTrue(
torch.equal(
dataset_train[0].edge_label_index,
dataset_train[0].edge_label_index
)
)
# transductive split with link_pred task with edge label
edge_label_train = torch.LongTensor([1, 2, 3, 2, 1, 1, 2, 3, 2, 0, 0])
edge_label_val = torch.LongTensor([1, 2, 3, 2, 1, 0])
graph_train = Graph(
node_feature=x,
edge_index=edge_index,
directed=True,
edge_label_index=train_pos,
edge_label=edge_label_train
)
graph_val = Graph(
node_feature=x,
edge_index=edge_index,
directed=True,
edge_label_index=val_pos,
negative_edge=val_neg,
edge_label=edge_label_val
)
graphs_train = [graph_train]
graphs_val = [graph_val]
dataset_train, dataset_val = (
GraphDataset(graphs_train, task='link_pred'),
GraphDataset(graphs_val, task='link_pred')
)
self.assertTrue(
torch.equal(
dataset_train[0].edge_label_index,
dataset_train[0].edge_label_index
)
)
self.assertTrue(
torch.equal(
dataset_train[0].edge_label[:num_train],
edge_label_train
)
)
self.assertTrue(
torch.equal(
dataset_val[0].edge_label[:num_val],
edge_label_val
)
)
# Multiple graph tensor backend link prediction (inductive)
pyg_dataset = Planetoid('./cora', 'Cora')
x = pyg_dataset[0].x
y = pyg_dataset[0].y
edge_index = pyg_dataset[0].edge_index
row, col = edge_index
mask = row < col
row, col = row[mask], col[mask]
edge_index = torch.stack([row, col], dim=0)
edge_index = torch.cat(
[edge_index, torch.flip(edge_index, [0])], dim=1
)
graphs = [
Graph(
node_feature=x, node_label=y,
edge_index=edge_index, directed=False
)
]
graphs = [copy.deepcopy(graphs[0]) for _ in range(10)]
edge_label_index = graphs[0].edge_label_index
dataset = GraphDataset(
graphs,
task='link_pred',
edge_message_ratio=0.6,
edge_train_mode="all"
)
datasets = {}
datasets['train'], datasets['val'], datasets['test'] = dataset.split(
transductive=False, split_ratio=[0.85, 0.05, 0.1]
)
edge_label_index_split = (
datasets['train'][0].edge_label_index[
:, 0:edge_label_index.shape[1]
]
)
self.assertTrue(
torch.equal(
edge_label_index,
edge_label_index_split
)
)
# transductive split with node task (pytorch geometric dataset)
pyg_dataset = Planetoid("./cora", "Cora")
ds = pyg_to_dicts(pyg_dataset, task="cora")
graphs = [Graph(**item) for item in ds]
split_ratio = [0.3, 0.3, 0.4]
node_size_list = [0 for i in range(len(split_ratio))]
for graph in graphs:
custom_splits = [[] for i in range(len(split_ratio))]
split_offset = 0
num_nodes = graph.num_nodes
shuffled_node_indices = torch.randperm(graph.num_nodes)
for i, split_ratio_i in enumerate(split_ratio):
if i != len(split_ratio) - 1:
num_split_i = int(split_ratio_i * num_nodes)
nodes_split_i = (
shuffled_node_indices[
split_offset: split_offset + num_split_i
]
)
split_offset += num_split_i
else:
nodes_split_i = shuffled_node_indices[split_offset:]
custom_splits[i] = nodes_split_i
node_size_list[i] += len(nodes_split_i)
graph.custom = {
"general_splits": custom_splits
}
node_feature = graphs[0].node_feature
edge_index = graphs[0].edge_index
directed = graphs[0].directed
graph_train = Graph(
node_feature=node_feature,
edge_index=edge_index,
directed=directed,
node_label_index=graphs[0].custom["general_splits"][0]
)
graph_val = Graph(
node_feature=node_feature,
edge_index=edge_index,
directed=directed,
node_label_index=graphs[0].custom["general_splits"][1]
)
graph_test = Graph(
node_feature=node_feature,
edge_index=edge_index,
directed=directed,
node_label_index=graphs[0].custom["general_splits"][2]
)
train_dataset = GraphDataset([graph_train], task="node")
val_dataset = GraphDataset([graph_val], task="node")
test_dataset = GraphDataset([graph_test], task="node")
self.assertEqual(
len(train_dataset[0].node_label_index),
node_size_list[0]
)
self.assertEqual(
len(val_dataset[0].node_label_index),
node_size_list[1]
)
self.assertEqual(
len(test_dataset[0].node_label_index),
node_size_list[2]
)
# transductive split with edge task
pyg_dataset = Planetoid("./cora", "Cora")
graphs_g = GraphDataset.pyg_to_graphs(pyg_dataset)
ds = pyg_to_dicts(pyg_dataset, task="cora")
graphs = [Graph(**item) for item in ds]
split_ratio = [0.3, 0.3, 0.4]
edge_size_list = [0 for i in range(len(split_ratio))]
for i, graph in enumerate(graphs):
custom_splits = [[] for i in range(len(split_ratio))]
split_offset = 0
edges = list(graphs_g[i].G.edges)
num_edges = graph.num_edges
random.shuffle(edges)
for i, split_ratio_i in enumerate(split_ratio):
if i != len(split_ratio) - 1:
num_split_i = int(split_ratio_i * num_edges)
edges_split_i = (
edges[split_offset: split_offset + num_split_i]
)
split_offset += num_split_i
else:
edges_split_i = edges[split_offset:]
custom_splits[i] = edges_split_i
edge_size_list[i] += len(edges_split_i)
graph.custom = {
"general_splits": custom_splits
}
node_feature = graphs[0].node_feature
edge_index = graphs[0].edge_index
directed = graphs[0].directed
train_index = torch.tensor(
graphs[0].custom["general_splits"][0]
).permute(1, 0)
train_index = torch.cat((train_index, train_index), dim=1)
val_index = torch.tensor(
graphs[0].custom["general_splits"][1]
).permute(1, 0)
val_index = torch.cat((val_index, val_index), dim=1)
test_index = torch.tensor(
graphs[0].custom["general_splits"][2]
).permute(1, 0)
test_index = torch.cat((test_index, test_index), dim=1)
graph_train = Graph(
node_feature=node_feature,
edge_index=edge_index,
directed=directed,
edge_label_index=train_index
)
graph_val = Graph(
node_feature=node_feature,
edge_index=edge_index,
directed=directed,
edge_label_index=val_index
)
graph_test = Graph(
node_feature=node_feature,
edge_index=edge_index,
directed=directed,
edge_label_index=test_index
)
train_dataset = GraphDataset([graph_train], task="edge")
val_dataset = GraphDataset([graph_val], task="edge")
test_dataset = GraphDataset([graph_test], task="edge")
self.assertEqual(
train_dataset[0].edge_label_index.shape[1],
2 * edge_size_list[0]
)
self.assertEqual(
val_dataset[0].edge_label_index.shape[1],
2 * edge_size_list[1]
)
self.assertEqual(
test_dataset[0].edge_label_index.shape[1],
2 * edge_size_list[2]
)
# inductive split with graph task
pyg_dataset = TUDataset("./enzymes", "ENZYMES")
ds = pyg_to_dicts(pyg_dataset)
graphs = [Graph(**item) for item in ds]
num_graphs = len(graphs)
split_ratio = [0.3, 0.3, 0.4]
graph_size_list = []
split_offset = 0
custom_split_graphs = []
for i, split_ratio_i in enumerate(split_ratio):
if i != len(split_ratio) - 1:
num_split_i = int(split_ratio_i * num_graphs)
custom_split_graphs.append(
graphs[split_offset: split_offset + num_split_i]
)
split_offset += num_split_i
graph_size_list.append(num_split_i)
else:
custom_split_graphs.append(graphs[split_offset:])
graph_size_list.append(len(graphs[split_offset:]))
dataset = GraphDataset(
graphs, task="graph",
custom_split_graphs=custom_split_graphs
)
split_res = dataset.split(transductive=False)
self.assertEqual(graph_size_list[0], len(split_res[0]))
self.assertEqual(graph_size_list[1], len(split_res[1]))
self.assertEqual(graph_size_list[2], len(split_res[2]))
def test_filter(self):
pyg_dataset = TUDataset("./enzymes", "ENZYMES")
ds = pyg_to_dicts(pyg_dataset)
graphs = [Graph(**item) for item in ds]
dataset = GraphDataset(graphs, task="graph")
thresh = 90
orig_dataset_size = len(dataset)
num_graphs_large = 0
for graph in dataset:
if graph.num_nodes >= thresh:
num_graphs_large += 1
dataset = dataset.filter(
lambda graph: graph.num_nodes < thresh, deep_copy=False
)
filtered_dataset_size = len(dataset)
self.assertEqual(
orig_dataset_size - filtered_dataset_size,
num_graphs_large,
)
def test_resample_disjoint_heterogeneous(self):
G = generate_dense_hete_dataset()
hete = HeteroGraph(G)
hete = HeteroGraph(
node_feature=hete.node_feature,
node_label=hete.node_label,
edge_feature=hete.edge_feature,
edge_label=hete.edge_label,
edge_index=hete.edge_index,
directed=True
)
graphs = [hete]
dataset = GraphDataset(
graphs,
task="link_pred",
edge_train_mode="disjoint",
edge_message_ratio=0.8,
resample_disjoint=True,
resample_disjoint_period=1
)
dataset_train, _, _ = dataset.split(split_ratio=[0.5, 0.2, 0.3])
graph_train_first = dataset_train[0]
graph_train_second = dataset_train[0]
for message_type in graph_train_first.edge_index:
self.assertEqual(
graph_train_first.edge_label_index[message_type].shape[1],
graph_train_second.edge_label_index[message_type].shape[1]
)
self.assertEqual(
graph_train_first.edge_label[message_type].shape,
graph_train_second.edge_label[message_type].shape
)
def test_resample_disjoint(self):
pyg_dataset = Planetoid("./cora", "Cora")
graphs = GraphDataset.pyg_to_graphs(pyg_dataset)
graph = graphs[0]
graph = Graph(
node_label=graph.node_label,
node_feature=graph.node_feature,
edge_index=graph.edge_index,
edge_feature=graph.edge_feature,
directed=False
)
graphs = [graph]
dataset = GraphDataset(
graphs,
task="link_pred",
edge_train_mode="disjoint",
edge_message_ratio=0.8,
resample_disjoint=True,
resample_disjoint_period=1
)
dataset_train, _, _ = dataset.split(split_ratio=[0.5, 0.2, 0.3])
graph_train_first = dataset_train[0]
graph_train_second = dataset_train[0]
self.assertEqual(
graph_train_first.edge_label_index.shape[1],
graph_train_second.edge_label_index.shape[1]
)
self.assertTrue(
torch.equal(
graph_train_first.edge_label,
graph_train_second.edge_label
)
)
def test_secure_split_heterogeneous(self):
G = generate_simple_small_hete_graph()
graph = HeteroGraph(G)
graph = HeteroGraph(
node_label=graph.node_label,
edge_index=graph.edge_index,
edge_label=graph.edge_label,
directed=True
)
graphs = [graph]
# node task
dataset = GraphDataset(graphs, task="node")
split_res = dataset.split()
for node_type in graph.node_label_index:
num_nodes = graph.node_label_index[node_type].shape[0]
num_nodes_reduced = num_nodes - 3
node_0 = 1 + int(num_nodes_reduced * 0.8)
node_1 = 1 + int(num_nodes_reduced * 0.1)
node_2 = num_nodes - node_0 - node_1
node_size = [node_0, node_1, node_2]
for i in range(3):
self.assertEqual(
split_res[i][0].node_label_index[node_type].shape[0],
node_size[i]
)
self.assertEqual(
split_res[i][0].node_label[node_type].shape[0],
node_size[i]
)
# edge task
dataset = GraphDataset(graphs, task="edge")
split_res = dataset.split()
for message_type in graph.edge_label_index:
num_edges = graph.edge_label_index[message_type].shape[1]
num_edges_reduced = num_edges - 3
edge_0 = 1 + int(num_edges_reduced * 0.8)
edge_1 = 1 + int(num_edges_reduced * 0.1)
edge_2 = num_edges - edge_0 - edge_1
edge_size = [edge_0, edge_1, edge_2]
for i in range(3):
self.assertEqual(
split_res[i][0].edge_label_index[message_type].shape[1],
edge_size[i]
)
self.assertEqual(
split_res[i][0].edge_label[message_type].shape[0],
edge_size[i]
)
# link_pred task
dataset = GraphDataset(graphs, task="link_pred")
split_res = dataset.split()
for message_type in graph.edge_label_index:
num_edges = graph.edge_label_index[message_type].shape[1]
num_edges_reduced = num_edges - 3
edge_0 = 2 * (1 + int(num_edges_reduced * 0.8))
edge_1 = 2 * (1 + int(num_edges_reduced * 0.1))
edge_2 = 2 * num_edges - edge_0 - edge_1
edge_size = [edge_0, edge_1, edge_2]
for i in range(3):
self.assertEqual(
split_res[i][0].edge_label_index[message_type].shape[1],
edge_size[i]
)
self.assertEqual(
split_res[i][0].edge_label[message_type].shape[0],
edge_size[i]
)
def test_secure_split(self):
G = simple_networkx_small_graph()
graph = Graph(G)
graph = Graph(
node_label=graph.node_label,
edge_index=graph.edge_index,
edge_label=graph.edge_label,
directed=True
)
graphs = [graph]
# node task
dataset = GraphDataset(graphs, task="node")
num_nodes = dataset.num_nodes[0]
num_nodes_reduced = num_nodes - 3
node_0 = 1 + int(0.8 * num_nodes_reduced)
node_1 = 1 + int(0.1 * num_nodes_reduced)
node_2 = num_nodes - node_0 - node_1
node_size = [node_0, node_1, node_2]
split_res = dataset.split()
for i in range(3):
self.assertEqual(
split_res[i][0].node_label_index.shape[0],
node_size[i]
)
self.assertEqual(
split_res[i][0].node_label.shape[0],
node_size[i]
)
# edge task
dataset = GraphDataset(graphs, task="edge")
num_edges = dataset.num_edges[0]
num_edges_reduced = num_edges - 3
edge_0 = 1 + int(0.8 * num_edges_reduced)
edge_1 = 1 + int(0.1 * num_edges_reduced)
edge_2 = num_edges - edge_0 - edge_1
edge_size = [edge_0, edge_1, edge_2]
split_res = dataset.split()
for i in range(3):
self.assertEqual(
split_res[i][0].edge_label_index.shape[1],
edge_size[i]
)
self.assertEqual(
split_res[i][0].edge_label.shape[0],
edge_size[i]
)
# link_pred task
dataset = GraphDataset(graphs, task="link_pred")
num_edges = dataset.num_edges[0]
num_edges_reduced = num_edges - 3
edge_0 = 2 * (1 + int(0.8 * num_edges_reduced))
edge_1 = 2 * (1 + int(0.1 * num_edges_reduced))
edge_2 = 2 * num_edges - edge_0 - edge_1
edge_size = [edge_0, edge_1, edge_2]
split_res = dataset.split()
for i in range(3):
self.assertEqual(
split_res[i][0].edge_label_index.shape[1],
edge_size[i]
)
self.assertEqual(
split_res[i][0].edge_label.shape[0],
edge_size[i]
)
# graph task
graphs = [deepcopy(graph) for _ in range(5)]
dataset = GraphDataset(graphs, task="link_pred")
num_graphs = len(dataset)
num_graphs_reduced = num_graphs - 3
num_train = 1 + int(num_graphs_reduced * 0.8)
num_val = 1 + int(num_graphs_reduced * 0.1)
num_test = num_graphs - num_train - num_val
split_res = dataset.split(transductive=False)
self.assertEqual(num_train, len(split_res[0]))
self.assertEqual(num_val, len(split_res[1]))
self.assertEqual(num_test, len(split_res[2]))
if __name__ == "__main__":
unittest.main()
| 35.667734
| 79
| 0.548946
| 5,428
| 44,549
| 4.175387
| 0.033161
| 0.061331
| 0.059919
| 0.040372
| 0.83979
| 0.791828
| 0.732616
| 0.704774
| 0.658269
| 0.637796
| 0
| 0.028584
| 0.352915
| 44,549
| 1,248
| 80
| 35.696314
| 0.757623
| 0.030169
| 0
| 0.571036
| 0
| 0
| 0.017167
| 0
| 0
| 0
| 0
| 0
| 0.108158
| 1
| 0.009166
| false
| 0
| 0.009166
| 0
| 0.019248
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7f49d895aed1ee10667f68cdf1cd1adf069c4fea
| 142
|
py
|
Python
|
code/textProcessing.py
|
corollari/BaaCL
|
3ebe9ba7c3859243351fe1b12d4eb114bb51b441
|
[
"Unlicense"
] | 1
|
2019-03-06T19:43:46.000Z
|
2019-03-06T19:43:46.000Z
|
code/textProcessing.py
|
corollari/BaaL
|
3ebe9ba7c3859243351fe1b12d4eb114bb51b441
|
[
"Unlicense"
] | null | null | null |
code/textProcessing.py
|
corollari/BaaL
|
3ebe9ba7c3859243351fe1b12d4eb114bb51b441
|
[
"Unlicense"
] | null | null | null |
def preprocess(text):
text=text.replace('\n', '\n\r')
return text
def getLetter():
return open("./input/letter.txt", "r").read()
| 20.285714
| 49
| 0.605634
| 20
| 142
| 4.3
| 0.65
| 0.186047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169014
| 142
| 6
| 50
| 23.666667
| 0.728814
| 0
| 0
| 0
| 0
| 0
| 0.176056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.