hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ff7b58b6e4827e815ace89a2af8cf4fc2ffcfe0e
| 113
|
py
|
Python
|
pyslide/contour/__init__.py
|
sarthakpati/pyslide
|
57a28ae3d236a30405814bfed6a89cac82a814d8
|
[
"MIT"
] | 47
|
2017-11-03T03:10:06.000Z
|
2022-02-13T14:42:57.000Z
|
pyslide/contour/__init__.py
|
sarthakpati/pyslide
|
57a28ae3d236a30405814bfed6a89cac82a814d8
|
[
"MIT"
] | 5
|
2017-11-03T03:40:29.000Z
|
2021-04-13T20:31:56.000Z
|
pyslide/contour/__init__.py
|
sarthakpati/pyslide
|
57a28ae3d236a30405814bfed6a89cac82a814d8
|
[
"MIT"
] | 11
|
2017-11-03T03:10:10.000Z
|
2021-04-13T13:55:29.000Z
|
# -*- coding: utf-8 -*-
from ._adjust import *
from ._check import *
from ._rela import *
from ._split import *
| 16.142857
| 23
| 0.654867
| 15
| 113
| 4.666667
| 0.6
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010989
| 0.19469
| 113
| 6
| 24
| 18.833333
| 0.758242
| 0.185841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ff7cbed85adf70a5a3eaa842a99f8ca081372761
| 40
|
py
|
Python
|
haferml/__init__.py
|
emptymalei/haferml
|
ba193ce1c022c89fb4e88924b7bb7a05b676929a
|
[
"MIT"
] | 11
|
2021-04-17T18:51:45.000Z
|
2021-06-25T19:42:25.000Z
|
haferml/__init__.py
|
emptymalei/haferml
|
ba193ce1c022c89fb4e88924b7bb7a05b676929a
|
[
"MIT"
] | 3
|
2021-04-29T19:24:15.000Z
|
2021-05-21T04:30:54.000Z
|
haferml/__init__.py
|
emptymalei/haferml
|
ba193ce1c022c89fb4e88924b7bb7a05b676929a
|
[
"MIT"
] | 2
|
2021-06-10T00:55:43.000Z
|
2021-12-30T07:37:07.000Z
|
from haferml.version import __version__
| 20
| 39
| 0.875
| 5
| 40
| 6.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ff9b2aae9f7014ffd50f7364000e88daf77b6432
| 123
|
py
|
Python
|
tests/cases/resources/tests/async/__init__.py
|
cchmc-bmi-os/serrano
|
ffecaa8e866423386e8a8c2432f99dd02ae7b4c1
|
[
"BSD-2-Clause"
] | 6
|
2015-01-16T14:27:54.000Z
|
2020-08-27T16:32:52.000Z
|
tests/cases/resources/tests/async/__init__.py
|
cchmc-bmi-os/serrano
|
ffecaa8e866423386e8a8c2432f99dd02ae7b4c1
|
[
"BSD-2-Clause"
] | 52
|
2015-01-05T19:11:18.000Z
|
2017-02-16T14:28:38.000Z
|
tests/cases/resources/tests/async/__init__.py
|
cchmc-bmi-os/serrano
|
ffecaa8e866423386e8a8c2432f99dd02ae7b4c1
|
[
"BSD-2-Clause"
] | 6
|
2015-07-29T18:52:04.000Z
|
2020-01-02T16:04:01.000Z
|
from .base import * # noqa
from .exporter import * # noqa
from .preview import * # noqa
from .query import * # noqa
| 24.6
| 31
| 0.650407
| 16
| 123
| 5
| 0.4375
| 0.5
| 0.525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.252033
| 123
| 4
| 32
| 30.75
| 0.869565
| 0.154472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
440dde93a55ff4a7f3a8942d8ea45bb7ec048a03
| 314
|
py
|
Python
|
integration/tests_ok/predicates_string.py
|
jleverenz/hurl
|
b81ca8ab7e0e409ec0c074fd8e118721ff4d3fb3
|
[
"Apache-2.0"
] | null | null | null |
integration/tests_ok/predicates_string.py
|
jleverenz/hurl
|
b81ca8ab7e0e409ec0c074fd8e118721ff4d3fb3
|
[
"Apache-2.0"
] | null | null | null |
integration/tests_ok/predicates_string.py
|
jleverenz/hurl
|
b81ca8ab7e0e409ec0c074fd8e118721ff4d3fb3
|
[
"Apache-2.0"
] | null | null | null |
from flask import request
from app import app
@app.route("/predicates-string")
def predicates_string():
return "Hello World!"
@app.route("/predicates-string-empty")
def predicates_string_empty():
return ""
@app.route("/predicates-string-unicode")
def predicates_string_unicode():
return "\u2708"
| 17.444444
| 40
| 0.729299
| 39
| 314
| 5.74359
| 0.384615
| 0.428571
| 0.241071
| 0.321429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014706
| 0.133758
| 314
| 17
| 41
| 18.470588
| 0.808824
| 0
| 0
| 0
| 0
| 0
| 0.273885
| 0.159236
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| true
| 0
| 0.181818
| 0.272727
| 0.727273
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
441084d18db6660b6c3ce4efb0a657dd1f3a4613
| 36
|
py
|
Python
|
scidb/plugins/sandbox/__init__.py
|
oxdc/sci.db
|
0a751a0e05e7ad4c83c350e32e32ea9ce5831cbb
|
[
"MIT"
] | null | null | null |
scidb/plugins/sandbox/__init__.py
|
oxdc/sci.db
|
0a751a0e05e7ad4c83c350e32e32ea9ce5831cbb
|
[
"MIT"
] | null | null | null |
scidb/plugins/sandbox/__init__.py
|
oxdc/sci.db
|
0a751a0e05e7ad4c83c350e32e32ea9ce5831cbb
|
[
"MIT"
] | null | null | null |
from .sandbox import SandboxManager
| 18
| 35
| 0.861111
| 4
| 36
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
443cc1c6e2d990e86edc9ab3e69d68e2403f006a
| 33
|
py
|
Python
|
devin/__init__.py
|
peterskipper/devin
|
9822637ebcee64ae22f5180030715e9583e97243
|
[
"MIT"
] | null | null | null |
devin/__init__.py
|
peterskipper/devin
|
9822637ebcee64ae22f5180030715e9583e97243
|
[
"MIT"
] | null | null | null |
devin/__init__.py
|
peterskipper/devin
|
9822637ebcee64ae22f5180030715e9583e97243
|
[
"MIT"
] | null | null | null |
from devin.cli import devin_main
| 16.5
| 32
| 0.848485
| 6
| 33
| 4.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4477f699cabfdb87f1d308d97c9bb6d1ee56a275
| 4,612
|
py
|
Python
|
pokemons/tests/mocks.py
|
vanessa/pokebattle
|
3017ad226bc05cc06cb5eb34455c13d959be10ab
|
[
"MIT"
] | 2
|
2018-03-16T15:33:31.000Z
|
2018-05-08T17:53:22.000Z
|
pokemons/tests/mocks.py
|
vanessa/pokebattle
|
3017ad226bc05cc06cb5eb34455c13d959be10ab
|
[
"MIT"
] | 3
|
2018-05-02T18:38:51.000Z
|
2018-05-08T20:03:43.000Z
|
pokemons/tests/mocks.py
|
vanessa/pokebattle
|
3017ad226bc05cc06cb5eb34455c13d959be10ab
|
[
"MIT"
] | 2
|
2018-03-16T02:32:20.000Z
|
2018-03-16T14:17:15.000Z
|
# pylint: skip-file
POKEAPI_POKEMON_LIST_EXAMPLE = {
"count": 949,
"previous": None,
"results": [
{
"url": "https://pokeapi.co/api/v2/pokemon/21/",
"name": "spearow"
},
{
"url": "https://pokeapi.co/api/v2/pokemon/22/",
"name": "fearow"
}
]
}
POKEAPI_POKEMON_DATA_EXAMPLE_FIRST = {
"forms": [
{
"url": "https://pokeapi.co/api/v2/pokemon-form/21/",
"name": "spearow"
}
],
"stats": [
{
"stat": {
"url": "https://pokeapi.co/api/v2/stat/6/",
"name": "speed"
},
"effort": 1,
"base_stat": 70
},
{
"stat": {
"url": "https://pokeapi.co/api/v2/stat/5/",
"name": "special-defense"
},
"effort": 0,
"base_stat": 31
},
{
"stat": {
"url": "https://pokeapi.co/api/v2/stat/4/",
"name": "special-attack"
},
"effort": 0,
"base_stat": 31
},
{
"stat": {
"url": "https://pokeapi.co/api/v2/stat/3/",
"name": "defense"
},
"effort": 0,
"base_stat": 30
},
{
"stat": {
"url": "https://pokeapi.co/api/v2/stat/2/",
"name": "attack"
},
"effort": 0,
"base_stat": 60
},
{
"stat": {
"url": "https://pokeapi.co/api/v2/stat/1/",
"name": "hp"
},
"effort": 0,
"base_stat": 40
}
],
"name": "spearow",
"weight": 20,
"sprites": {
"back_female": None,
"back_shiny_female": None,
"back_default": "https://raw.githubusercontent.com/PokeAPI/sprites/master/sprites/pokemon/back/21.png",
"front_female": None,
"front_shiny_female": None,
"back_shiny": "https://raw.githubusercontent.com/PokeAPI/sprites/master/sprites/pokemon/back/shiny/21.png",
"front_default": "https://raw.githubusercontent.com/PokeAPI/sprites/master/sprites/pokemon/21.png",
"front_shiny": "https://raw.githubusercontent.com/PokeAPI/sprites/master/sprites/pokemon/shiny/21.png"
},
"id": 21,
"order": 30,
"base_experience": 52
}
POKEAPI_POKEMON_DATA_EXAMPLE_SECOND = {
"forms": [
{
"url": "https://pokeapi.co/api/v2/pokemon-form/22/",
"name": "fearow"
}
],
"stats": [
{
"stat": {
"url": "https://pokeapi.co/api/v2/stat/6/",
"name": "speed"
},
"effort": 2,
"base_stat": 100
},
{
"stat": {
"url": "https://pokeapi.co/api/v2/stat/5/",
"name": "special-defense"
},
"effort": 0,
"base_stat": 31
},
{
"stat": {
"url": "https://pokeapi.co/api/v2/stat/4/",
"name": "special-attack"
},
"effort": 0,
"base_stat": 31
},
{
"stat": {
"url": "https://pokeapi.co/api/v2/stat/3/",
"name": "defense"
},
"effort": 0,
"base_stat": 65
},
{
"stat": {
"url": "https://pokeapi.co/api/v2/stat/2/",
"name": "attack"
},
"effort": 0,
"base_stat": 90
},
{
"stat": {
"url": "https://pokeapi.co/api/v2/stat/1/",
"name": "hp"
},
"effort": 0,
"base_stat": 40
}
],
"name": "fearow",
"weight": 100,
"sprites": {
"back_female": None,
"back_shiny_female": None,
"back_default": "https://raw.githubusercontent.com/PokeAPI/sprites/master/sprites/pokemon/back/22.png",
"front_female": None,
"front_shiny_female": None,
"back_shiny": "https://raw.githubusercontent.com/PokeAPI/sprites/master/sprites/pokemon/back/shiny/22.png",
"front_default": "https://raw.githubusercontent.com/PokeAPI/sprites/master/sprites/pokemon/22.png",
"front_shiny": "https://raw.githubusercontent.com/PokeAPI/sprites/master/sprites/pokemon/shiny/22.png"
},
"id": 22,
"order": 30,
"base_experience": 52
}
| 27.951515
| 115
| 0.429965
| 428
| 4,612
| 4.528037
| 0.158879
| 0.066047
| 0.123839
| 0.140351
| 0.861713
| 0.837977
| 0.837977
| 0.80805
| 0.80805
| 0.768834
| 0
| 0.039026
| 0.394406
| 4,612
| 164
| 116
| 28.121951
| 0.654851
| 0.003686
| 0
| 0.4875
| 0
| 0.05
| 0.44263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
44810194b180375091ee63be340de197449a8b8e
| 6,553
|
py
|
Python
|
nyuseu/tests/test_views.py
|
foxmask/nyuseu
|
e563bec3be46be0f0430987560b1126872f97a5f
|
[
"BSD-3-Clause"
] | 1
|
2020-09-04T20:41:33.000Z
|
2020-09-04T20:41:33.000Z
|
nyuseu/tests/test_views.py
|
foxmask/nyuseu
|
e563bec3be46be0f0430987560b1126872f97a5f
|
[
"BSD-3-Clause"
] | 4
|
2020-09-06T20:30:20.000Z
|
2020-09-09T12:08:35.000Z
|
nyuseu/tests/test_views.py
|
foxmask/nyuseu
|
e563bec3be46be0f0430987560b1126872f97a5f
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
from django.test import RequestFactory, TestCase
from django.contrib.messages.storage.fallback import FallbackStorage
from nyuseu.models import Feeds, Folders, Articles
from nyuseu.views import ArticlesListView, ArticlesDetailView,\
marked_as_read, marked_as_unread, read_later, unread_later
class ArticlesListViewTestCase(TestCase):
def create_stuff(self):
folder = Folders.objects.create(title="FolderD")
title = 'Le Free de la passion'
url = 'https://foxmask.github.io/feeds/all.atom.xml'
status = True
feeds = Feeds.objects.create(folder=folder, title=title, url=url, status=status)
title = 'TEST TITLE'
image = ''
text = 'TEST'
read = False
Articles.objects.create(feeds=feeds, title=title, image=image, text=text, read=read)
return feeds
def setUp(self):
super(ArticlesListViewTestCase, self).setUp()
# Every test needs access to the request factory.
self.factory = RequestFactory()
def test_all_articles_list(self):
template = "nyuseu/articles_list.html"
# Setup request and view.
request = RequestFactory().get('/')
view = ArticlesListView.as_view(template_name=template)
# Run.
response = view(request)
# Check.
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name[0], "nyuseu/articles_list.html")
def test_articles_list_from_feeds(self):
feeds = self.create_stuff()
template = "nyuseu/articles_list.html"
# Setup request and view.
request = RequestFactory().get('feeds/{}/'.format(feeds.id))
kwargs = {'feeds': feeds.id}
view = ArticlesListView.as_view(template_name=template)(request, **kwargs)
# Run.
# response = view(request)
# Check.
self.assertEqual(view.status_code, 200)
self.assertEqual(view.template_name[0], "nyuseu/articles_list.html")
def test_articles_list_no_page_size(self):
feeds = self.create_stuff()
template = "nyuseu/articles_list.html"
# Setup request and view.
request = RequestFactory().get('feeds/{}/'.format(feeds.id))
kwargs = {'feeds': feeds.id}
view = ArticlesListView.as_view(template_name=template)(request, **kwargs)
# Run.
# response = view(request)
# Check.
self.assertEqual(view.status_code, 200)
self.assertEqual(view.template_name[0], "nyuseu/articles_list.html")
def test_articles_page_one(self):
template = "nyuseu/articles_list.html"
# Setup request and view.
request = RequestFactory().get('/?page=1')
view = ArticlesListView.as_view(template_name=template)
# Run.
response = view(request)
# Check.
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name[0], "nyuseu/articles_list.html")
class ArticlesDetailViewTestCase(TestCase):
def create_articles(self):
folder = Folders.objects.create(title="FolderC")
title = 'Le Free de la passion'
url = 'https://foxmask.github.io/feeds/all.atom.xml'
status = True
feeds = Feeds.objects.create(folder=folder, title=title, url=url, status=status)
title = 'TEST TITLE'
image = ''
text = 'TEST'
read = False
article = Articles.objects.create(feeds=feeds, title=title, image=image, text=text, read=read)
return article
def setUp(self):
super(ArticlesDetailViewTestCase, self).setUp()
# Every test needs access to the request factory.
self.factory = RequestFactory()
def test_articles_detail(self):
article = self.create_articles()
template = "nyuseu/articles_detail.html"
# Setup request and view.
request = RequestFactory().get('articles/{}/'.format(article.id))
view = ArticlesDetailView.as_view(template_name=template)
# Run.
response = view(request, pk=article.id)
# Check.
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name[0], "nyuseu/articles_detail.html")
class ViewFunction(TestCase):
def create_articles(self):
folder = Folders.objects.create(title="FolderC")
title = 'Le Free de la passion'
url = 'https://foxmask.github.io/feeds/all.atom.xml'
status = True
feeds = Feeds.objects.create(folder=folder, title=title, url=url, status=status)
title = 'TEST TITLE'
image = ''
text = 'TEST'
read = False
article = Articles.objects.create(feeds=feeds, title=title, image=image, text=text, read=read)
return article
def setUp(self):
super(ViewFunction, self).setUp()
self.request = RequestFactory().get('/')
def test_marked_as_read(self):
article = self.create_articles()
# Setup request and view.
setattr(self.request, 'session', 'session')
messages = FallbackStorage(self.request)
setattr(self.request, '_messages', messages)
response = marked_as_read(request=self.request, article_id=article.id)
# Check.
self.assertEqual(response.status_code, 302)
def test_marked_as_unread(self):
article = self.create_articles()
# Setup request and view.
setattr(self.request, 'session', 'session')
messages = FallbackStorage(self.request)
setattr(self.request, '_messages', messages)
response = marked_as_unread(request=self.request, article_id=article.id)
# Check.
self.assertEqual(response.status_code, 302)
def test_read_later(self):
article = self.create_articles()
# Setup request and view.
setattr(self.request, 'session', 'session')
messages = FallbackStorage(self.request)
setattr(self.request, '_messages', messages)
response = read_later(request=self.request, article_id=article.id)
# Check.
self.assertEqual(response.status_code, 302)
def test_unread_later(self):
article = self.create_articles()
# Setup request and view.
setattr(self.request, 'session', 'session')
messages = FallbackStorage(self.request)
setattr(self.request, '_messages', messages)
response = unread_later(request=self.request, article_id=article.id)
# Check.
self.assertEqual(response.status_code, 302)
| 37.022599
| 102
| 0.650542
| 737
| 6,553
| 5.660787
| 0.130258
| 0.044823
| 0.055129
| 0.040988
| 0.834372
| 0.827421
| 0.819032
| 0.819032
| 0.805609
| 0.794104
| 0
| 0.006796
| 0.236533
| 6,553
| 176
| 103
| 37.232955
| 0.827104
| 0.070655
| 0
| 0.697479
| 0
| 0
| 0.107903
| 0.041907
| 0
| 0
| 0
| 0
| 0.117647
| 1
| 0.12605
| false
| 0.02521
| 0.033613
| 0
| 0.210084
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
448b301ed671bfb542b24261be4858e0ba91ffaa
| 4,939
|
py
|
Python
|
tests/test_cog.py
|
vlro/terracotta
|
26ef2f61bd8306fd8fecd27288df6426a6751534
|
[
"MIT"
] | 448
|
2018-03-06T09:38:42.000Z
|
2022-03-31T12:58:02.000Z
|
tests/test_cog.py
|
vlro/terracotta
|
26ef2f61bd8306fd8fecd27288df6426a6751534
|
[
"MIT"
] | 154
|
2018-11-28T08:13:18.000Z
|
2022-03-31T09:01:26.000Z
|
tests/test_cog.py
|
vlro/terracotta
|
26ef2f61bd8306fd8fecd27288df6426a6751534
|
[
"MIT"
] | 59
|
2018-12-14T15:41:17.000Z
|
2022-02-17T23:55:52.000Z
|
import pytest
import affine
import rasterio
from rasterio.io import MemoryFile
from rasterio.shutil import copy
from rasterio.enums import Resampling
import numpy as np
BASE_PROFILE = {
'driver': 'GTiff',
'dtype': 'uint16',
'nodata': 0,
'count': 1,
'crs': {'init': 'epsg:32637'},
'transform': affine.Affine(
2.0, 0.0, 694920.0,
0.0, -2.0, 2055666.0
)
}
def test_validate_optimized(tmpdir):
from terracotta import cog
outfile = str(tmpdir / 'raster.tif')
raster_data = 1000 * np.random.rand(512, 512).astype(np.uint16)
profile = BASE_PROFILE.copy()
profile.update(
height=raster_data.shape[0],
width=raster_data.shape[1],
tiled=True,
blockxsize=256,
blockysize=256
)
with MemoryFile() as mf, mf.open(**profile) as dst:
dst.write(raster_data, 1)
overviews = [2 ** j for j in range(1, 4)]
dst.build_overviews(overviews, Resampling.nearest)
copy(dst, outfile, copy_src_overviews=True, **profile)
assert cog.validate(outfile)
def test_validate_optimized_small(tmpdir):
from terracotta import cog
outfile = str(tmpdir / 'raster.tif')
raster_data = 1000 * np.random.rand(128, 128).astype(np.uint16)
profile = BASE_PROFILE.copy()
profile.update(
height=raster_data.shape[0],
width=raster_data.shape[1]
)
with rasterio.open(outfile, 'w', **profile) as dst:
dst.write(raster_data, 1)
assert cog.validate(outfile)
def test_validate_unoptimized(tmpdir):
from terracotta import cog
outfile = str(tmpdir / 'raster.tif')
raster_data = 1000 * np.random.rand(512, 512).astype(np.uint16)
profile = BASE_PROFILE.copy()
profile.update(
height=raster_data.shape[0],
width=raster_data.shape[1]
)
with rasterio.open(outfile, 'w', **profile) as dst:
dst.write(raster_data, 1)
assert not cog.validate(outfile)
def test_validate_no_overviews(tmpdir):
from terracotta import cog
outfile = str(tmpdir / 'raster.tif')
raster_data = 1000 * np.random.rand(512, 512).astype(np.uint16)
profile = BASE_PROFILE.copy()
profile.update(
height=raster_data.shape[0],
width=raster_data.shape[1],
tiled=True,
blockxsize=256,
blockysize=256
)
with rasterio.open(outfile, 'w', **profile) as dst:
dst.write(raster_data, 1)
assert not cog.validate(outfile)
def test_validate_not_tiled(tmpdir):
from terracotta import cog
outfile = str(tmpdir / 'raster.tif')
raster_data = 1000 * np.random.rand(512, 512).astype(np.uint16)
profile = BASE_PROFILE.copy()
profile.update(
height=raster_data.shape[0],
width=raster_data.shape[1]
)
with rasterio.open(outfile, 'w', **profile) as dst:
dst.write(raster_data, 1)
overviews = [2 ** j for j in range(1, 4)]
dst.build_overviews(overviews, Resampling.nearest)
assert not cog.validate(outfile)
def test_validate_wrong_offset(tmpdir):
from terracotta import cog
outfile = str(tmpdir / 'raster.tif')
raster_data = 1000 * np.random.rand(512, 512).astype(np.uint16)
profile = BASE_PROFILE.copy()
profile.update(
height=raster_data.shape[0],
width=raster_data.shape[1],
tiled=True,
blockxsize=256,
blockysize=256
)
with rasterio.open(outfile, 'w', **profile) as dst:
dst.write(raster_data, 1)
overviews = [2 ** j for j in range(1, 4)]
dst.build_overviews(overviews, Resampling.nearest)
assert not cog.validate(outfile)
def test_validate_external_overview(tmpdir):
import os
from terracotta import cog
outfile = str(tmpdir / 'raster.tif')
raster_data = 1000 * np.random.rand(512, 512).astype(np.uint16)
profile = BASE_PROFILE.copy()
profile.update(
height=raster_data.shape[0],
width=raster_data.shape[1],
tiled=True,
blockxsize=256,
blockysize=256
)
with rasterio.Env(TIFF_USE_OVR=True):
with rasterio.open(outfile, 'w', **profile) as dst:
dst.write(raster_data, 1)
overviews = [2 ** j for j in range(1, 4)]
dst.build_overviews(overviews, Resampling.nearest)
assert os.path.isfile(f'{outfile}.ovr')
assert not cog.validate(outfile)
@pytest.mark.filterwarnings("ignore:Dataset has no geotransform set")
def test_validate_not_gtiff(tmpdir):
from terracotta import cog
outfile = str(tmpdir / 'raster.png')
raster_data = 1000 * np.random.rand(512, 512).astype(np.uint16)
profile = BASE_PROFILE.copy()
profile.update(
height=raster_data.shape[0],
width=raster_data.shape[1],
driver='PNG'
)
with rasterio.open(outfile, 'w', **profile) as dst:
dst.write(raster_data, 1)
assert not cog.validate(outfile)
| 24.944444
| 69
| 0.644665
| 650
| 4,939
| 4.789231
| 0.152308
| 0.102795
| 0.077096
| 0.059107
| 0.820752
| 0.812078
| 0.812078
| 0.78895
| 0.78895
| 0.772567
| 0
| 0.049722
| 0.23446
| 4,939
| 197
| 70
| 25.071066
| 0.773605
| 0
| 0
| 0.692857
| 0
| 0
| 0.040494
| 0
| 0
| 0
| 0
| 0
| 0.064286
| 1
| 0.057143
| false
| 0
| 0.114286
| 0
| 0.171429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
448b897dfde902dacd8aedc31d7581f16773d4cf
| 266
|
py
|
Python
|
tests/test_customrest.py
|
hexatester/dapodik
|
d89c0fb899c89e866527f6b7b57f741abd6444ea
|
[
"MIT"
] | 4
|
2021-02-01T15:19:35.000Z
|
2022-01-26T02:47:21.000Z
|
tests/test_customrest.py
|
hexatester/dapodik
|
d89c0fb899c89e866527f6b7b57f741abd6444ea
|
[
"MIT"
] | 3
|
2020-01-08T17:07:15.000Z
|
2020-01-08T18:05:12.000Z
|
tests/test_customrest.py
|
hexatester/dapodik
|
d89c0fb899c89e866527f6b7b57f741abd6444ea
|
[
"MIT"
] | 2
|
2021-08-04T13:48:08.000Z
|
2021-12-25T02:36:49.000Z
|
import attr
from dapodik.base import BaseDapodik
from dapodik.customrest import BaseCustomrest
from dapodik.customrest import Wilayah
def test_base_customresr():
assert issubclass(BaseCustomrest, BaseDapodik)
def test_member():
assert attr.has(Wilayah)
| 19
| 50
| 0.808271
| 32
| 266
| 6.625
| 0.5
| 0.15566
| 0.198113
| 0.254717
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135338
| 266
| 13
| 51
| 20.461538
| 0.921739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9232eff87a2e080d05efe86d1cb469205597f43c
| 13,755
|
py
|
Python
|
pyne/tests/test_fluka.py
|
AllSafeCyberSecur1ty/Nuclear-Engineering
|
302d6dcc7c0a85a9191098366b076cf9cb5a9f6e
|
[
"MIT"
] | 1
|
2022-03-26T20:01:13.000Z
|
2022-03-26T20:01:13.000Z
|
pyne/tests/test_fluka.py
|
AllSafeCyberSecur1ty/Nuclear-Engineering
|
302d6dcc7c0a85a9191098366b076cf9cb5a9f6e
|
[
"MIT"
] | null | null | null |
pyne/tests/test_fluka.py
|
AllSafeCyberSecur1ty/Nuclear-Engineering
|
302d6dcc7c0a85a9191098366b076cf9cb5a9f6e
|
[
"MIT"
] | 1
|
2022-03-26T19:59:13.000Z
|
2022-03-26T19:59:13.000Z
|
#!/usr/bin/python
import os
import nose.tools
from nose.plugins.skip import SkipTest
from nose.tools import assert_equal
from pyne import fluka
# Mesh specific imports
from pyne.mesh import Mesh, StatMesh, MeshError, HAVE_PYMOAB
# Test Usrbin and UsrbinTally classes
def test_single_usrbin():
"""Test a usrbin file containing a single tally."""
if not HAVE_PYMOAB:
raise SkipTest
thisdir = os.path.dirname(__file__)
usrbin_file = os.path.join(thisdir, "fluka_usrbin_single.lis")
usrbin_object = fluka.Usrbin(usrbin_file)
# Test UsrbinTally attributes
expected_xbounds = [-3.0, 0.0, 3.0, 6.0]
expected_ybounds = [-3.0, -1.0, 1.0, 3.0]
expected_zbounds = [-3.0, -2.0, -1.0, 0.0]
assert_equal(usrbin_object.tally["single_n"].coord_sys, "Cartesian")
assert_equal(usrbin_object.tally["single_n"].name, "single_n")
assert_equal(usrbin_object.tally["single_n"].particle, "8")
assert_equal(usrbin_object.tally["single_n"].x_bounds, expected_xbounds)
assert_equal(usrbin_object.tally["single_n"].y_bounds, expected_ybounds)
assert_equal(usrbin_object.tally["single_n"].z_bounds, expected_zbounds)
# Test error and part data values match
expected_part_data = [
1.0984e-02,
4.1051e-03,
1.0636e-03,
2.1837e-02,
5.5610e-03,
1.9119e-03,
1.0971e-02,
3.3943e-03,
1.2456e-03,
1.6615e-02,
2.9501e-03,
7.4597e-04,
1.0395e-01,
6.1186e-03,
1.4997e-03,
1.7421e-02,
3.0824e-03,
7.3878e-04,
1.8097e-02,
5.2532e-03,
2.1572e-03,
1.0465e-01,
6.2611e-03,
1.8829e-03,
1.7323e-02,
5.5092e-03,
2.1418e-03,
]
expected_error_data = [
5.0179e00,
1.6521e01,
1.3973e01,
4.2025e00,
8.1766e00,
1.1465e01,
7.2005e00,
1.0479e01,
1.5640e01,
5.5994e00,
1.3275e01,
2.7617e01,
7.3788e-01,
6.7200e00,
1.9092e01,
7.3670e00,
1.3018e01,
2.8866e01,
5.7221e00,
1.5916e01,
2.6001e01,
8.3490e-01,
1.6715e01,
1.2759e01,
5.0763e00,
1.1420e01,
1.0040e01,
]
for i, v_e in enumerate(
usrbin_object.tally["single_n"].structured_iterate_hex("zyx")
):
read = usrbin_object.tally["single_n"].part_data_tag[v_e]
assert_equal(usrbin_object.tally["single_n"].part_data_tag.name, "part_data_8")
expected = expected_part_data[i]
assert_equal(read, expected)
for i, v_e in enumerate(
usrbin_object.tally["single_n"].structured_iterate_hex("zyx")
):
read = usrbin_object.tally["single_n"].error_data_tag[v_e]
assert_equal(
usrbin_object.tally["single_n"].error_data_tag.name, "error_data_8"
)
expected = expected_error_data[i]
assert_equal(read, expected)
def test_multiple_usrbin():
"""Test a usrbin file containing multiple (two) tallies."""
if not HAVE_PYMOAB:
raise SkipTest
thisdir = os.path.dirname(__file__)
usrbin_file = os.path.join(thisdir, "fluka_usrbin_multiple.lis")
usrbin_object = fluka.Usrbin(usrbin_file)
# Tally #1:
# Test UsrbinTally attributes
expected_xbounds = [-3.0, 0.0, 3.0, 6.0]
expected_ybounds = [-3.0, -1.0, 1.0, 3.0]
expected_zbounds = [-3.0, -2.0, -1.0, 0.0]
assert_equal(usrbin_object.tally["multi_p"].coord_sys, "Cartesian")
assert_equal(usrbin_object.tally["multi_p"].name, "multi_p")
assert_equal(usrbin_object.tally["multi_p"].particle, "7")
assert_equal(usrbin_object.tally["multi_p"].x_bounds, expected_xbounds)
assert_equal(usrbin_object.tally["multi_p"].y_bounds, expected_ybounds)
assert_equal(usrbin_object.tally["multi_p"].z_bounds, expected_zbounds)
# Test error and part data values match
expected_part_data = [
7.5083e-04,
1.7570e-04,
3.3361e-05,
1.1232e-03,
3.4735e-04,
1.5816e-04,
6.2264e-04,
2.3071e-04,
8.3469e-05,
1.6700e-03,
4.1785e-04,
7.6990e-05,
3.3842e-03,
9.2931e-04,
2.4958e-04,
1.0121e-03,
2.7993e-04,
6.1043e-05,
7.7401e-04,
3.2480e-04,
9.3145e-06,
1.4245e-03,
4.3352e-04,
1.7392e-04,
7.3166e-04,
2.4210e-04,
1.4804e-04,
]
expected_error_data = [
2.2149e01,
7.4509e01,
1.0000e02,
2.4621e01,
4.6383e01,
3.3621e01,
2.1616e01,
7.5885e01,
1.0000e02,
2.0067e01,
3.3654e01,
6.1265e01,
1.8407e01,
1.6239e01,
5.2119e01,
1.5791e01,
3.8452e01,
1.0000e02,
7.6577e00,
3.5290e01,
1.0000e02,
8.3702e00,
5.3283e01,
6.2602e01,
1.1655e01,
6.2289e01,
6.7541e01,
]
for i, v_e in enumerate(
usrbin_object.tally["multi_p"].structured_iterate_hex("zyx")
):
read = usrbin_object.tally["multi_p"].part_data_tag[v_e]
assert_equal(usrbin_object.tally["multi_p"].part_data_tag.name, "part_data_7")
expected = expected_part_data[i]
assert_equal(read, expected)
for i, v_e in enumerate(
usrbin_object.tally["multi_p"].structured_iterate_hex("zyx")
):
read = usrbin_object.tally["multi_p"].error_data_tag[v_e]
assert_equal(usrbin_object.tally["multi_p"].error_data_tag.name, "error_data_7")
expected = expected_error_data[i]
assert_equal(read, expected)
# Tally #2:
# Test UsrbinTally attributes
expected_xbounds = [-3.0, 0.0, 3.0, 6.0]
expected_ybounds = [-3.0, -1.0, 1.0, 3.0]
expected_zbounds = [-3.0, -2.0, -1.0, 0.0]
assert_equal(usrbin_object.tally["multi_n"].coord_sys, "Cartesian")
assert_equal(usrbin_object.tally["multi_n"].name, "multi_n")
assert_equal(usrbin_object.tally["multi_n"].particle, "8")
assert_equal(usrbin_object.tally["multi_n"].x_bounds, expected_xbounds)
assert_equal(usrbin_object.tally["multi_n"].y_bounds, expected_ybounds)
assert_equal(usrbin_object.tally["multi_n"].z_bounds, expected_zbounds)
# Test error and part data values match
expected_part_data = [
1.0984e-02,
4.1051e-03,
1.0636e-03,
2.1837e-02,
5.5610e-03,
1.9119e-03,
1.0971e-02,
3.3943e-03,
1.2456e-03,
1.6615e-02,
2.9501e-03,
7.4597e-04,
1.0395e-01,
6.1186e-03,
1.4997e-03,
1.7421e-02,
3.0824e-03,
7.3878e-04,
1.8097e-02,
5.2532e-03,
2.1572e-03,
1.0465e-01,
6.2611e-03,
1.8829e-03,
1.7323e-02,
5.5092e-03,
2.1418e-03,
]
expected_error_data = [
5.0179e00,
1.6521e01,
1.3973e01,
4.2025e00,
8.1766e00,
1.1465e01,
7.2005e00,
1.0479e01,
1.5640e01,
5.5994e00,
1.3275e01,
2.7617e01,
7.3788e-01,
6.7200e00,
1.9092e01,
7.3670e00,
1.3018e01,
2.8866e01,
5.7221e00,
1.5916e01,
2.6001e01,
8.3490e-01,
1.6715e01,
1.2759e01,
5.0763e00,
1.1420e01,
1.0040e01,
]
for i, v_e in enumerate(
usrbin_object.tally["multi_n"].structured_iterate_hex("zyx")
):
read = usrbin_object.tally["multi_n"].part_data_tag[v_e]
assert usrbin_object.tally["multi_n"].part_data_tag.name == "part_data_8"
expected = expected_part_data[i]
assert_equal(read, expected)
for i, v_e in enumerate(
usrbin_object.tally["multi_n"].structured_iterate_hex("zyx")
):
read = usrbin_object.tally["multi_n"].error_data_tag[v_e]
assert usrbin_object.tally["multi_n"].error_data_tag.name == "error_data_8"
expected = expected_error_data[i]
assert_equal(read, expected)
def test_degenerate_usrbin():
"""Test usrbin file containing tallies with different number of bins in
each direction.
"""
if not HAVE_PYMOAB:
raise SkipTest
thisdir = os.path.dirname(__file__)
usrbin_file = os.path.join(thisdir, "fluka_usrbin_degenerate.lis")
usrbin_object = fluka.Usrbin(usrbin_file)
# Tally #1:
# Test UsrbinTally attributes
expected_xbounds = [-3.0, 0.0, 3.0, 6.0]
expected_ybounds = [-3.0, 0.0, 3.0]
expected_zbounds = [-3.0, 0.0]
assert_equal(usrbin_object.tally["degen1"].coord_sys, "Cartesian")
assert_equal(usrbin_object.tally["degen1"].name, "degen1")
assert_equal(usrbin_object.tally["degen1"].particle, "8")
assert_equal(usrbin_object.tally["degen1"].x_bounds, expected_xbounds)
assert_equal(usrbin_object.tally["degen1"].y_bounds, expected_ybounds)
assert_equal(usrbin_object.tally["degen1"].z_bounds, expected_zbounds)
# Test error and part data values match
expected_part_data = [
3.5279e-02,
4.7334e-03,
1.4458e-03,
3.6242e-02,
4.6521e-03,
1.5292e-03,
]
expected_error_data = [
1.2016e00,
6.4313e00,
7.7312e00,
2.0235e00,
9.4199e00,
8.0514e00,
]
for i, v_e in enumerate(
usrbin_object.tally["degen1"].structured_iterate_hex("zyx")
):
read = usrbin_object.tally["degen1"].part_data_tag[v_e]
assert usrbin_object.tally["degen1"].part_data_tag.name == "part_data_8"
expected = expected_part_data[i]
assert_equal(read, expected)
for i, v_e in enumerate(
usrbin_object.tally["degen1"].structured_iterate_hex("zyx")
):
read = usrbin_object.tally["degen1"].error_data_tag[v_e]
assert usrbin_object.tally["degen1"].error_data_tag.name == "error_data_8"
expected = expected_error_data[i]
assert_equal(read, expected)
# Tally #2:
# Test UsrbinTally attributes
expected_xbounds = [-3.0, 1.5, 6.0]
expected_ybounds = [-3.0, 3.0]
expected_zbounds = [-3.0, -2.0, -1.0, 0.0]
assert_equal(usrbin_object.tally["degen2"].coord_sys, "Cartesian")
assert_equal(usrbin_object.tally["degen2"].name, "degen2")
assert_equal(usrbin_object.tally["degen2"].particle, "8")
assert_equal(usrbin_object.tally["degen2"].x_bounds, expected_xbounds)
assert_equal(usrbin_object.tally["degen2"].y_bounds, expected_ybounds)
assert_equal(usrbin_object.tally["degen2"].z_bounds, expected_zbounds)
# Test error and part data values match
expected_part_data = [
1.1543e-02,
2.0295e-03,
3.2603e-02,
1.4229e-03,
3.3492e-02,
2.7923e-03,
]
expected_error_data = [
2.7321e00,
5.2342e00,
7.4679e-01,
4.2862e00,
1.3090e00,
1.4151e01,
]
for i, v_e in enumerate(
usrbin_object.tally["degen2"].structured_iterate_hex("zyx")
):
read = usrbin_object.tally["degen2"].part_data_tag[v_e]
assert usrbin_object.tally["degen2"].part_data_tag.name == "part_data_8"
expected = expected_part_data[i]
assert_equal(read, expected)
for i, v_e in enumerate(
usrbin_object.tally["degen2"].structured_iterate_hex("zyx")
):
read = usrbin_object.tally["degen2"].error_data_tag[v_e]
assert usrbin_object.tally["degen2"].error_data_tag.name == "error_data_8"
expected = expected_error_data[i]
assert_equal(read, expected)
# Tally #3:
# Test UsrbinTally attributes
expected_xbounds = [-3.0, 6.0]
expected_ybounds = [-3.0, -1.0, 1.0, 3.0]
expected_zbounds = [-3.0, -1.5, 0.0]
assert_equal(usrbin_object.tally["degen3"].coord_sys, "Cartesian")
assert_equal(usrbin_object.tally["degen3"].name, "degen3")
assert_equal(usrbin_object.tally["degen3"].particle, "8")
assert_equal(usrbin_object.tally["degen3"].x_bounds, expected_xbounds)
assert_equal(usrbin_object.tally["degen3"].y_bounds, expected_ybounds)
assert_equal(usrbin_object.tally["degen3"].z_bounds, expected_zbounds)
# Test error and part data values match
expected_part_data = [
5.8037e-03,
1.3260e-02,
5.6046e-03,
7.9677e-03,
4.3111e-02,
8.1349e-03,
]
expected_error_data = [
6.1913e00,
2.3684e00,
4.6124e00,
3.2523e00,
1.3714e00,
4.3161e00,
]
for i, v_e in enumerate(
usrbin_object.tally["degen3"].structured_iterate_hex("zyx")
):
read = usrbin_object.tally["degen3"].part_data_tag[v_e]
assert usrbin_object.tally["degen3"].part_data_tag.name == "part_data_8"
expected = expected_part_data[i]
assert_equal(read, expected)
for i, v_e in enumerate(
usrbin_object.tally["degen3"].structured_iterate_hex("zyx")
):
read = usrbin_object.tally["degen3"].error_data_tag[v_e]
assert usrbin_object.tally["degen3"].error_data_tag.name == "error_data_8"
expected = expected_error_data[i]
assert_equal(read, expected)
# test file writing to catch upstream changes in mesh
def test_mesh_write():
if not HAVE_PYMOAB:
raise SkipTest
thisdir = os.path.dirname(__file__)
usrbin_file = os.path.join(thisdir, "fluka_usrbin_single.lis")
usrbin_object = fluka.Usrbin(usrbin_file)
data = usrbin_object.tally["single_n"]
data.write_hdf5("test_fluka_data.h5m")
| 29.265957
| 88
| 0.611487
| 1,897
| 13,755
| 4.198735
| 0.134423
| 0.116008
| 0.155807
| 0.115505
| 0.833522
| 0.830508
| 0.792844
| 0.745637
| 0.712367
| 0.623101
| 0
| 0.155271
| 0.262087
| 13,755
| 469
| 89
| 29.328358
| 0.629458
| 0.054671
| 0
| 0.543147
| 0
| 0
| 0.067861
| 0.007566
| 0
| 0
| 0
| 0
| 0.154822
| 1
| 0.010152
| false
| 0
| 0.015228
| 0
| 0.025381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
925055150527191de22b372bad8f3d205df1a42c
| 4,742
|
py
|
Python
|
tt/test/test_datatable.py
|
anthonyoteri/timetrack2
|
3d246f640647ff9e231b37eefb5d75ca4186efcd
|
[
"BSD-3-Clause"
] | null | null | null |
tt/test/test_datatable.py
|
anthonyoteri/timetrack2
|
3d246f640647ff9e231b37eefb5d75ca4186efcd
|
[
"BSD-3-Clause"
] | 68
|
2018-01-30T13:57:25.000Z
|
2018-05-02T16:08:08.000Z
|
tt/test/test_datatable.py
|
anthonyoteri/timetrack2
|
3d246f640647ff9e231b37eefb5d75ca4186efcd
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2018, Anthony Oteri
# All rights reserved
from tt.datatable import Datatable
from unittest import mock
def test_basic_table():
rows = [
{"col_1": "1", "col_2": "2", "col_3": "3"},
{"col_1": "4", "col_2": "5", "col_3": "6"},
]
t = Datatable(table=rows)
headers, table = t._make()
assert headers == ["Col_1", "Col_2", "Col_3"]
assert table == [["1", "2", "3"], ["4", "5", "6"]]
def test_basic_table_append():
rows = [
{"col_1": "1", "col_2": "2", "col_3": "3"},
{"col_1": "4", "col_2": "5", "col_3": "6"},
]
t = Datatable()
for r in rows:
t.append(r)
headers, table = t._make()
assert headers == ["Col_1", "Col_2", "Col_3"]
assert table == [["1", "2", "3"], ["4", "5", "6"]]
def test_basic_table_labels():
rows = [
{"col_1": "1", "col_2": "2", "col_3": "3"},
{"col_1": "4", "col_2": "5", "col_3": "6"},
]
t = Datatable(table=rows, labels=["row_0", "row_1"])
headers, table = t._make()
assert headers == ["Col_1", "Col_2", "Col_3"]
assert table == [["Row_0", "1", "2", "3"], ["Row_1", "4", "5", "6"]]
def test_basic_table_append_with_labels():
rows = [
{"col_1": "1", "col_2": "2", "col_3": "3"},
{"col_1": "4", "col_2": "5", "col_3": "6"},
]
t = Datatable()
for i, r in enumerate(rows):
t.append(r, label="row_%d" % i)
headers, table = t._make()
assert headers == ["Col_1", "Col_2", "Col_3"]
assert table == [["Row_0", "1", "2", "3"], ["Row_1", "4", "5", "6"]]
def test_basic_table_not_enough_labels():
rows = [
{"col_1": "1", "col_2": "2", "col_3": "3"},
{"col_1": "4", "col_2": "5", "col_3": "6"},
]
t = Datatable(table=rows, labels=["row_0"])
headers, table = t._make()
assert headers == ["Col_1", "Col_2", "Col_3"]
assert table == [["Row_0", "1", "2", "3"], [None, "4", "5", "6"]]
def test_basic_table_summary():
rows = [
{"col_1": "1", "col_2": "2", "col_3": "3"},
{"col_1": "4", "col_2": "5", "col_3": "6"},
]
t = Datatable(table=rows, summaries=["row_0", "row_1"], summary_header="summary")
headers, table = t._make()
assert headers == ["Col_1", "Col_2", "Col_3", "summary"]
assert table == [["1", "2", "3", "row_0"], ["4", "5", "6", "row_1"]]
def test_basic_table_append_with_summary():
rows = [
{"col_1": "1", "col_2": "2", "col_3": "3"},
{"col_1": "4", "col_2": "5", "col_3": "6"},
]
t = Datatable()
for i, r in enumerate(rows):
t.append(r, summary="row_%d" % i)
headers, table = t._make()
assert headers == ["Col_1", "Col_2", "Col_3"]
assert table == [["1", "2", "3", "row_0"], ["4", "5", "6", "row_1"]]
def test_basic_table_not_enough_summaries():
rows = [
{"col_1": "1", "col_2": "2", "col_3": "3"},
{"col_1": "4", "col_2": "5", "col_3": "6"},
]
t = Datatable(table=rows, summaries=["row_0"])
headers, table = t._make()
assert headers == ["Col_1", "Col_2", "Col_3"]
assert table == [["1", "2", "3", "row_0"], ["4", "5", "6", None]]
def test_basic_table_missing_values():
rows = [{"col_1": "1", "col_3": "3"}, {"col_2": "5", "col_3": "6"}]
t = Datatable(table=rows)
headers, table = t._make()
assert headers == ["Col_1", "Col_2", "Col_3"]
assert table == [["1", None, "3"], [None, "5", "6"]]
def test_basic_table_extra_column():
rows = [
{"col_1": "1", "col_2": "2", "col_3": "3"},
{"col_1": "4", "col_2": "5", "col_3": "6"},
]
t = Datatable(table=rows, headers=["col_1", "col_2", "col_3", "col_4"])
headers, table = t._make()
assert headers == ["Col_1", "Col_2", "Col_3", "Col_4"]
assert table == [["1", "2", "3", None], ["4", "5", "6", None]]
@mock.patch("tabulate.tabulate")
def test_table_string_conversion(tabulate, mocker):
rows = [
{"col_1": "1", "col_2": "2", "col_3": "3"},
{"col_1": "4", "col_2": "5", "col_3": "6"},
]
table_fmt = mocker.MagicMock()
t = Datatable(table=rows, table_fmt=table_fmt)
tabulate.return_value = ""
str(t)
headers = ["Col_1", "Col_2", "Col_3"]
table = [["1", "2", "3"], ["4", "5", "6"]]
tabulate.assert_called_once_with(table, headers, tablefmt=table_fmt)
@mock.patch("tabulate.tabulate")
def test_table_string_conversion_with_caption(tabulate, mocker):
rows = [
{"col_1": "1", "col_2": "2", "col_3": "3"},
{"col_1": "4", "col_2": "5", "col_3": "6"},
]
table_fmt = mocker.MagicMock()
t = Datatable(table=rows, table_fmt=table_fmt)
t.caption = "Foo"
tabulate.return_value = "Bar"
assert "Foo\nBar" == str(t)
| 25.771739
| 85
| 0.507592
| 709
| 4,742
| 3.118477
| 0.09732
| 0.065129
| 0.052013
| 0.048847
| 0.826775
| 0.816373
| 0.793306
| 0.76436
| 0.758933
| 0.701945
| 0
| 0.072336
| 0.236187
| 4,742
| 183
| 86
| 25.912568
| 0.538101
| 0.011177
| 0
| 0.588235
| 0
| 0
| 0.176697
| 0
| 0
| 0
| 0
| 0
| 0.184874
| 1
| 0.10084
| false
| 0
| 0.016807
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2b9c782a0a2b6ec6959bf4d956e206bf03ae2d04
| 61
|
py
|
Python
|
tests/test_code/py/subset_find_exception/zero.py
|
FreddyZeng/code2flow
|
37e45ca4340289f8ceec79b3fe5131c401387c58
|
[
"MIT"
] | 1
|
2022-03-16T13:44:35.000Z
|
2022-03-16T13:44:35.000Z
|
tests/test_code/py/subset_find_exception/zero.py
|
FreddyZeng/code2flow
|
37e45ca4340289f8ceec79b3fe5131c401387c58
|
[
"MIT"
] | null | null | null |
tests/test_code/py/subset_find_exception/zero.py
|
FreddyZeng/code2flow
|
37e45ca4340289f8ceec79b3fe5131c401387c58
|
[
"MIT"
] | null | null | null |
def private():
pass
class Abra:
def other():
private()
| 7.625
| 14
| 0.622951
| 8
| 61
| 4.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.229508
| 61
| 7
| 15
| 8.714286
| 0.808511
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| true
| 0.2
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
2beebd4d3a25c80fdd642652ef4fdba1a26d0c17
| 41
|
py
|
Python
|
patheng/__init__.py
|
kamakazikamikaze/patheng
|
956237bb50259d2536d772302dda6ecf8901b4a0
|
[
"Unlicense"
] | null | null | null |
patheng/__init__.py
|
kamakazikamikaze/patheng
|
956237bb50259d2536d772302dda6ecf8901b4a0
|
[
"Unlicense"
] | null | null | null |
patheng/__init__.py
|
kamakazikamikaze/patheng
|
956237bb50259d2536d772302dda6ecf8901b4a0
|
[
"Unlicense"
] | null | null | null |
from . import elk, utils, crypto, oauth2
| 20.5
| 40
| 0.731707
| 6
| 41
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029412
| 0.170732
| 41
| 1
| 41
| 41
| 0.852941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a600e4270c26cf31d45d138f16a50b5679bb7805
| 24
|
py
|
Python
|
tools/processor/engines/lib/__init__.py
|
Skarlett/tumblr_boobs
|
bdbb7ebabfb6d6290ce830f6c4b32711f2911d6a
|
[
"MIT"
] | null | null | null |
tools/processor/engines/lib/__init__.py
|
Skarlett/tumblr_boobs
|
bdbb7ebabfb6d6290ce830f6c4b32711f2911d6a
|
[
"MIT"
] | null | null | null |
tools/processor/engines/lib/__init__.py
|
Skarlett/tumblr_boobs
|
bdbb7ebabfb6d6290ce830f6c4b32711f2911d6a
|
[
"MIT"
] | null | null | null |
from . import imagehash
| 12
| 23
| 0.791667
| 3
| 24
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a62a916cbf34239ac1ef60a49e9b83e93b4cc309
| 1,217
|
py
|
Python
|
agent_admin_sdk/client.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
agent_admin_sdk/client.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
agent_admin_sdk/client.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import agent_admin_sdk.api.admin_task.admin_task_client
import agent_admin_sdk.api.agent.agent_client
import agent_admin_sdk.api.org.org_client
import agent_admin_sdk.api.org_init.org_init_client
import agent_admin_sdk.api.plugin.plugin_client
import agent_admin_sdk.api.plugin_version.plugin_version_client
class Client(object):
def __init__(self, server_ip="", server_port=0, service_name=""):
self.admin_task = agent_admin_sdk.api.admin_task.admin_task_client.AdminTaskClient(server_ip, server_port, service_name)
self.agent = agent_admin_sdk.api.agent.agent_client.AgentClient(server_ip, server_port, service_name)
self.org = agent_admin_sdk.api.org.org_client.OrgClient(server_ip, server_port, service_name)
self.org_init = agent_admin_sdk.api.org_init.org_init_client.OrgInitClient(server_ip, server_port, service_name)
self.plugin = agent_admin_sdk.api.plugin.plugin_client.PluginClient(server_ip, server_port, service_name)
self.plugin_version = agent_admin_sdk.api.plugin_version.plugin_version_client.PluginVersionClient(server_ip, server_port, service_name)
| 38.03125
| 144
| 0.769104
| 175
| 1,217
| 4.914286
| 0.171429
| 0.139535
| 0.181395
| 0.223256
| 0.795349
| 0.788372
| 0.747674
| 0.462791
| 0.288372
| 0
| 0
| 0.001925
| 0.146261
| 1,217
| 31
| 145
| 39.258065
| 0.825794
| 0.017256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.428571
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a65943876f77c68f170860bc377af66b6737a2b7
| 43
|
py
|
Python
|
sewer/http_providers/__init__.py
|
aphexer/sewer
|
1012d76349c6a369132c6b0201d5fc3322072f4c
|
[
"MIT"
] | null | null | null |
sewer/http_providers/__init__.py
|
aphexer/sewer
|
1012d76349c6a369132c6b0201d5fc3322072f4c
|
[
"MIT"
] | null | null | null |
sewer/http_providers/__init__.py
|
aphexer/sewer
|
1012d76349c6a369132c6b0201d5fc3322072f4c
|
[
"MIT"
] | null | null | null |
from .common import BaseHttp # noqa: F401
| 21.5
| 42
| 0.744186
| 6
| 43
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 0.186047
| 43
| 1
| 43
| 43
| 0.828571
| 0.232558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a6cb637a32c30bdb8286f5f2422fc36dfaa23f49
| 184
|
py
|
Python
|
tests/experimental-features/environment.py
|
stevegibson/behave-webdriver
|
c0130bb59217d5a32a44efe9d9c588851a7b9f58
|
[
"MIT"
] | null | null | null |
tests/experimental-features/environment.py
|
stevegibson/behave-webdriver
|
c0130bb59217d5a32a44efe9d9c588851a7b9f58
|
[
"MIT"
] | null | null | null |
tests/experimental-features/environment.py
|
stevegibson/behave-webdriver
|
c0130bb59217d5a32a44efe9d9c588851a7b9f58
|
[
"MIT"
] | null | null | null |
from behave_webdriver import BehaveDriver
def before_all(context):
context.behave_driver = BehaveDriver.headless_chrome()
def after_all(context):
context.behave_driver.quit()
| 26.285714
| 58
| 0.804348
| 23
| 184
| 6.173913
| 0.608696
| 0.140845
| 0.239437
| 0.323944
| 0.408451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11413
| 184
| 6
| 59
| 30.666667
| 0.871166
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
4700533085bb891e8070034702468cd15220387e
| 249
|
py
|
Python
|
examples/docs_snippets/docs_snippets/concepts/partitions_schedules_sensors/schedule_from_partitions.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | 4,606
|
2018-06-21T17:45:20.000Z
|
2022-03-31T23:39:42.000Z
|
examples/docs_snippets/docs_snippets/concepts/partitions_schedules_sensors/schedule_from_partitions.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | 6,221
|
2018-06-12T04:36:01.000Z
|
2022-03-31T21:43:05.000Z
|
examples/docs_snippets/docs_snippets/concepts/partitions_schedules_sensors/schedule_from_partitions.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | 619
|
2018-08-22T22:43:09.000Z
|
2022-03-31T22:48:06.000Z
|
"""isort:skip_file"""
from .partitioned_job import do_stuff_partitioned
# start_marker
from dagster import build_schedule_from_partitioned_job
do_stuff_partitioned_schedule = build_schedule_from_partitioned_job(do_stuff_partitioned)
# end_marker
| 24.9
| 89
| 0.871486
| 34
| 249
| 5.823529
| 0.441176
| 0.227273
| 0.272727
| 0.282828
| 0.494949
| 0.494949
| 0.494949
| 0.494949
| 0
| 0
| 0
| 0
| 0.076305
| 249
| 9
| 90
| 27.666667
| 0.86087
| 0.160643
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4714102eff9848074a5d3fea3ea31ecabbd7c603
| 177
|
py
|
Python
|
tests/components/default_config/conftest.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/default_config/conftest.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/default_config/conftest.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""default_config session fixtures."""
import pytest
@pytest.fixture(autouse=True)
def default_config_mock_async_zeroconf(mock_async_zeroconf):
"""Auto mock zeroconf."""
| 19.666667
| 60
| 0.774011
| 22
| 177
| 5.909091
| 0.636364
| 0.2
| 0.261538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101695
| 177
| 8
| 61
| 22.125
| 0.81761
| 0.293785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5b320a772745ff439dd87cb2e156a326154b897a
| 269
|
py
|
Python
|
snmpagent_unity/unity_impl/PoolRaidLevels.py
|
factioninc/snmp-unity-agent
|
3525dc0fac60d1c784dcdd7c41693544bcbef843
|
[
"Apache-2.0"
] | 2
|
2019-03-01T11:14:59.000Z
|
2019-10-02T17:47:59.000Z
|
snmpagent_unity/unity_impl/PoolRaidLevels.py
|
factioninc/snmp-unity-agent
|
3525dc0fac60d1c784dcdd7c41693544bcbef843
|
[
"Apache-2.0"
] | 2
|
2019-03-01T11:26:29.000Z
|
2019-10-11T18:56:54.000Z
|
snmpagent_unity/unity_impl/PoolRaidLevels.py
|
factioninc/snmp-unity-agent
|
3525dc0fac60d1c784dcdd7c41693544bcbef843
|
[
"Apache-2.0"
] | 1
|
2019-10-03T21:09:17.000Z
|
2019-10-03T21:09:17.000Z
|
class PoolRaidLevels(object):
def read_get(self, name, idx_name, unity_client):
return unity_client.get_pool_raid_levels(idx_name)
class PoolRaidLevelsColumn(object):
def get_idx(self, name, idx, unity_client):
return unity_client.get_pools()
| 29.888889
| 58
| 0.747212
| 37
| 269
| 5.108108
| 0.459459
| 0.232804
| 0.116402
| 0.232804
| 0.328042
| 0.328042
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163569
| 269
| 8
| 59
| 33.625
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
5b50bd49d0f319efdbee540aa7b5b1a6d4e7be51
| 67
|
py
|
Python
|
qmotor/message/__init__.py
|
yulinfeng000/qmotor
|
ad3e9eea291f5b87e09fcdd5e42f1eb13d752565
|
[
"MIT"
] | null | null | null |
qmotor/message/__init__.py
|
yulinfeng000/qmotor
|
ad3e9eea291f5b87e09fcdd5e42f1eb13d752565
|
[
"MIT"
] | null | null | null |
qmotor/message/__init__.py
|
yulinfeng000/qmotor
|
ad3e9eea291f5b87e09fcdd5e42f1eb13d752565
|
[
"MIT"
] | null | null | null |
from .common import *
from .matcher import *
from .builder import *
| 22.333333
| 22
| 0.746269
| 9
| 67
| 5.555556
| 0.555556
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164179
| 67
| 3
| 23
| 22.333333
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5b6b00405a78b2f835ea67732a929c0b3ad3172b
| 42
|
py
|
Python
|
thespian/__init__.py
|
dendron2000/Thespian
|
0acbc5a0803f6d2be3421ea6eb08c6beecbf3802
|
[
"MIT"
] | 210
|
2015-08-31T19:39:34.000Z
|
2020-01-10T08:07:48.000Z
|
thespian/__init__.py
|
dendron2000/Thespian
|
0acbc5a0803f6d2be3421ea6eb08c6beecbf3802
|
[
"MIT"
] | 85
|
2017-04-08T19:28:42.000Z
|
2022-03-23T15:25:49.000Z
|
thespian/__init__.py
|
dendron2000/Thespian
|
0acbc5a0803f6d2be3421ea6eb08c6beecbf3802
|
[
"MIT"
] | 47
|
2015-09-01T19:24:20.000Z
|
2020-01-02T20:03:05.000Z
|
"Actor System for concurrent execution."
| 14
| 40
| 0.785714
| 5
| 42
| 6.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 42
| 2
| 41
| 21
| 0.916667
| 0.904762
| 0
| 0
| 0
| 0
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5b7b5fbf0c4582cdbe06b98044a020bcfc799c08
| 154
|
py
|
Python
|
daiquiri/exceptions.py
|
aipescience/daiquiri-admin
|
63d93bbbee0617178deb2e329e294c447d44137c
|
[
"Apache-2.0"
] | null | null | null |
daiquiri/exceptions.py
|
aipescience/daiquiri-admin
|
63d93bbbee0617178deb2e329e294c447d44137c
|
[
"Apache-2.0"
] | null | null | null |
daiquiri/exceptions.py
|
aipescience/daiquiri-admin
|
63d93bbbee0617178deb2e329e294c447d44137c
|
[
"Apache-2.0"
] | null | null | null |
class DaiquiriException(Exception):
def __init__(self, errors):
self.errors = errors
def __str__(self):
return repr(self.errors)
| 22
| 35
| 0.662338
| 17
| 154
| 5.529412
| 0.588235
| 0.319149
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.24026
| 154
| 6
| 36
| 25.666667
| 0.803419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
5b9949671e74aec3cfcb695b8712b55493527378
| 13,248
|
py
|
Python
|
update_evolution_markets.py
|
youth4ever/bittrex
|
9d6b70f81b74df7377d39414408eb0d8a4ed8b15
|
[
"MIT"
] | null | null | null |
update_evolution_markets.py
|
youth4ever/bittrex
|
9d6b70f81b74df7377d39414408eb0d8a4ed8b15
|
[
"MIT"
] | null | null | null |
update_evolution_markets.py
|
youth4ever/bittrex
|
9d6b70f81b74df7377d39414408eb0d8a4ed8b15
|
[
"MIT"
] | null | null | null |
# Created by Bogdan Trif on 25-01-2018 , 7:10 PM.
import time
import pymysql
from conf.db_conn import *
from includes.DB_functions import *
from includes.app_functions import *
t1 = time.time()
debug_level = 0
date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime() )
connection = pymysql.connect( host= HOST, port=3306, user=USER, passwd=PASSWD, db=DB1 )
cur = connection.cursor()
update_evolution_table_log = 'tmp/update_evolution_table.log'
valid_MARKETS = 'tmp/valid_Markets.txt'
t2 = time.time()
file_append_with_text(update_evolution_table_log, str(date)+ ' update _evolution MARKETS read_file_line_by_line took ' + str(round((t2-t1)*1000,2)) + ' ms' )
def update_evolution_market(market):
prep_MarketQuery = 'SELECT id, last_price, buy_vs_sell, volume FROM `market_name` ORDER BY ID DESC LIMIT 1;'
prep_MarketResult = cur.execute(prep_MarketQuery.replace('market_name', market))
if prep_MarketResult == 1 :
row = cur.fetchone()
# if debug_level >= 2 : print('row : ', row )
last_id = row[0]
last_price = float(row[1])
last_BuyVsSell = float( row[2] )
last_Vol = float(row[3])
######### Updating / CREATING the rows MARKETS to the _evolution table #################
prep_evolutionSelect = "select market, pr_ch_1m from _evolution where market = 'market_name' ;"
# if debug_level >= 3 : print('prep_evolutionSelect --->', prep_evolutionSelect )
try :
prep_evolutionResult = cur.execute(prep_evolutionSelect.replace('market_name', market))
# if debug_level >= 3 : print('prep_evolutionResult --> ', prep_evolutionResult )
##### If there is no row in evolution table, insert one
if prep_evolutionResult == 0 :
# if debug_level >= 3 :
# print('there is no such row in the evolution table. THIS WILL BE CREATED RIGHT NOW !' )
createRowQuery = QUERIES['evolution_initial_insert']
# if debug_level >= 3 : print(' createRowQuery --> ', createRowQuery)
createRowResult = cur.execute( createRowQuery , (market ) )
# if debug_level >= 3 : print('=== createRowResult ', createRowResult)
# connection.commit()
##### If there is a table UPDATE the existing values in the row market :
elif prep_evolutionResult == 1 :
# if debug_level >= 3 : print('We will update all the values up to the corresponding times')
first_market_query = cur.execute(QUERIES['first_market_row'].replace('market_name' , market) )
market_first_id = cur.fetchone()[0]
# if debug_level >= 3 : print('market_first_id = ', market_first_id )
last_market_query = cur.execute(QUERIES['last_market_row'].replace('market_name' , market) )
market_last_id = cur.fetchone()[0]
# if debug_level >= 3 : print('market_last_id = ', market_last_id )
max_time_diff = market_last_id - market_first_id
##### DO THE UPDATES :
i = 0
PR_CH, BvS_CH = [], []
upd_ch_Str = ''
while Times[i] <= max_time_diff :
# if debug_level >= 3 : print(Times[i],' ', Periods[i])
value_behind = market_last_id-Times[i]
# if debug_level >= 3 : print('value of the price / BuyvsSell behind = ', value_behind )
#### FIRST SELECT OLD VALUES : ###############
price_and_buy_vs_sell_select = cur.execute( QUERIES['custom_price_and_buy_vs_sell_market'].replace('market_name', market), value_behind )
######## PRICE & BUY_VS_SELL CHANGE #########
# if debug_level >= 4 : print(' price_and_buy_vs_sell_select ----------->', price_and_buy_vs_sell_select )
rowa = cur.fetchone()
previous_price, previous_buy_vs_sell = float(rowa[0]), float(rowa[1])
# if debug_level >= 3 : print('previous_price, previous_buy_vs_sell = ', previous_price, previous_buy_vs_sell )
priceChange = round( (( float(last_price) - previous_price ) / previous_price) * 100 , 2)
buy_vs_sellChange = round( (( float(last_BuyVsSell) - previous_buy_vs_sell ) / previous_buy_vs_sell) * 100 , 2)
# if debug_level >= 3 :
# print('previous_price = ', previous_price, ' the most recent price = ', last_price , ' priceChange = ' , priceChange )
# print('previous_buy_vs_sell = ', previous_buy_vs_sell, ' the most recent buy_vs_sell = ', last_BuyVsSell , ' buy_vs_sellChange = ' , buy_vs_sellChange )
PR_CH.append(priceChange)
BvS_CH.append(buy_vs_sellChange)
upd_ch_Str += 'pr_ch_'+str(Periods[i])+' = %s,'
i+=1
# if debug_level >= 3 : print('PR_CH = ', PR_CH ,' BvS_CH = ', BvS_CH)
########## CONSTRUCTING THE UPDATE QUERY , WE WILL UPDATE ALL Fields ONLY ONCE ! Like in example bellow :
## UPDATE `_evolution` SET pr_ch_2m=1.23, pr_ch_5m=-0.78 WHERE market='BTC-1ST';
upd_BvS_Str = upd_ch_Str.replace('pr_ch_', 'BvsS_ch_')
upd_BvS_Str = upd_BvS_Str.rstrip(',')
update_Str = upd_ch_Str + upd_BvS_Str
# print('update_Str : ', update_Str, '\n')
update_evolution_Query = "UPDATE `_evolution` SET " + update_Str + " WHERE market='market_name';"
# if debug_level >= 3 : print('update_evolution_Query : ', update_evolution_Query)
VALUES_tuple = tuple(PR_CH) + tuple(BvS_CH)
# if debug_level >= 3 : print('VALUES tuple :::: ', VALUES_tuple )
update_evolution_market = cur.execute(update_evolution_Query.replace('market_name', market) , VALUES_tuple )
except :
# if debug_level >= 3 :
print('To complete afterwards')
with open(valid_MARKETS, 'r') as f :
for cnt, line in enumerate(f) :
market = line.rstrip('\n')
if debug_level >= 1 : print(str(cnt+1),'. ', market )
update_evolution_market(market)
#
#
# prep_MarketQuery = 'SELECT id, last_price, buy_vs_sell, volume FROM `market_name` ORDER BY ID DESC LIMIT 1;'
# prep_MarketResult = cur.execute(prep_MarketQuery.replace('market_name', market))
# if prep_MarketResult == 1 :
# row = cur.fetchone()
# if debug_level >= 2 : print('row : ', row )
#
# last_id = row[0]
# last_price = float(row[1])
# last_BuyVsSell = float( row[2] )
# last_Vol = float(row[3])
#
#
#
# ######### Updating / CREATING the rows MARKETS to the _evolution table #################
# prep_evolutionSelect = "select market, pr_ch_1m from _evolution where market = 'market_name' ;"
# if debug_level >= 3 : print('prep_evolutionSelect --->', prep_evolutionSelect )
# try :
# prep_evolutionResult = cur.execute(prep_evolutionSelect.replace('market_name', market))
# if debug_level >= 3 : print('prep_evolutionResult --> ', prep_evolutionResult )
#
# ##### If there is no row in evolution table, insert one
# if prep_evolutionResult == 0 :
# if debug_level >= 3 :
# print('there is no such row in the evolution table. THIS WILL BE CREATED RIGHT NOW !' )
# createRowQuery = QUERIES['evolution_initial_insert']
# if debug_level >= 3 :
# print(' createRowQuery --> ', createRowQuery)
# createRowResult = cur.execute( createRowQuery , (market ) )
# if debug_level >= 3 :
# print('=== createRowResult ', createRowResult)
# connection.commit()
#
#
#
# ##### If there is a table UPDATE the existing values in the row market :
# elif prep_evolutionResult == 1 :
# if debug_level >= 3 :
# print('We will update all the values up to the corresponding times')
#
# first_market_query = cur.execute(QUERIES['first_market_row'].replace('market_name' , market) )
# market_first_id = cur.fetchone()[0]
# if debug_level >= 3 : print('market_first_id = ', market_first_id )
#
# last_market_query = cur.execute(QUERIES['last_market_row'].replace('market_name' , market) )
# market_last_id = cur.fetchone()[0]
# if debug_level >= 3 : print('market_last_id = ', market_last_id )
#
# max_time_diff = market_last_id - market_first_id
#
# ##### DO THE UPDATES :
# i = 0
# PR_CH, BvS_CH = [], []
# upd_ch_Str = ''
# while Times[i] <= max_time_diff :
# if debug_level >= 3 : print(Times[i],' ', Periods[i])
# value_behind = market_last_id-Times[i]
# if debug_level >= 3 : print('value of the price / BuyvsSell behind = ', value_behind )
#
# #### FIRST SELECT OLD VALUES : ###############
# price_and_buy_vs_sell_select = cur.execute( QUERIES['custom_price_and_buy_vs_sell_market'].replace('market_name', market), value_behind )
# ######## PRICE & BUY_VS_SELL CHANGE #########
# if debug_level >= 4 : print(' price_and_buy_vs_sell_select ----------->', price_and_buy_vs_sell_select )
# rowa = cur.fetchone()
# previous_price, previous_buy_vs_sell = float(rowa[0]), float(rowa[1])
# if debug_level >= 3 : print('previous_price, previous_buy_vs_sell = ', previous_price, previous_buy_vs_sell )
#
# priceChange = round( (( float(last_price) - previous_price ) / previous_price) * 100 , 2)
# buy_vs_sellChange = round( (( float(last_BuyVsSell) - previous_buy_vs_sell ) / previous_buy_vs_sell) * 100 , 2)
#
# if debug_level >= 3 :
# print('previous_price = ', previous_price, ' the most recent price = ', last_price , ' priceChange = ' , priceChange )
# print('previous_buy_vs_sell = ', previous_buy_vs_sell, ' the most recent buy_vs_sell = ', last_BuyVsSell , ' buy_vs_sellChange = ' , buy_vs_sellChange )
#
# PR_CH.append(priceChange)
# BvS_CH.append(buy_vs_sellChange)
# upd_ch_Str += 'pr_ch_'+str(Periods[i])+' = %s,'
#
#
# i+=1
#
# if debug_level >= 3 : print('PR_CH = ', PR_CH ,' BvS_CH = ', BvS_CH)
#
# ########## CONSTRUCTING THE UPDATE QUERY , WE WILL UPDATE ALL Fields ONLY ONCE ! Like in example bellow :
# ## UPDATE `_evolution` SET pr_ch_2m=1.23, pr_ch_5m=-0.78 WHERE market='BTC-1ST';
#
# upd_BvS_Str = upd_ch_Str.replace('pr_ch_', 'BvsS_ch_')
# upd_BvS_Str = upd_BvS_Str.rstrip(',')
# update_Str = upd_ch_Str + upd_BvS_Str
# # print('update_Str : ', update_Str, '\n')
# update_evolution_Query = "UPDATE `_evolution` SET " + update_Str + " WHERE market='market_name';"
#
# if debug_level >= 3 : print('update_evolution_Query : ', update_evolution_Query)
# VALUES_tuple = tuple(PR_CH) + tuple(BvS_CH)
#
# if debug_level >= 3 : print('VALUES tuple :::: ', VALUES_tuple )
# update_evolution_market = cur.execute(update_evolution_Query.replace('market_name', market) , VALUES_tuple )
#
#
#
#
#
#
# except :
# if debug_level >= 3 :
# print('To complete afterwards')
connection.commit()
t3 = time.time()
file_append_with_text(update_evolution_table_log, str(date)+ ' _evolution table UPDATE took ' + str(round((t3-t2)*1000,2)) + ' ms' )
file_append_with_text(update_evolution_table_log, '---'*30 )
connection.close()
| 51.548638
| 190
| 0.530571
| 1,441
| 13,248
| 4.563498
| 0.129771
| 0.057786
| 0.067518
| 0.06326
| 0.909519
| 0.909519
| 0.909519
| 0.909519
| 0.903285
| 0.903285
| 0
| 0.016137
| 0.354469
| 13,248
| 256
| 191
| 51.75
| 0.752806
| 0.569897
| 0
| 0
| 0
| 0
| 0.111978
| 0.028412
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014925
| false
| 0.014925
| 0.074627
| 0
| 0.089552
| 0.029851
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5bac72a378e411fc11d06f0cd1fa9428a642f70a
| 66
|
py
|
Python
|
acq4/analysis/tools/poissonScore/__init__.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 47
|
2015-01-05T16:18:10.000Z
|
2022-03-16T13:09:30.000Z
|
acq4/analysis/tools/poissonScore/__init__.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 48
|
2015-04-19T16:51:41.000Z
|
2022-03-31T14:48:16.000Z
|
acq4/analysis/tools/poissonScore/__init__.py
|
sensapex/acq4
|
9561ba73caff42c609bd02270527858433862ad8
|
[
"MIT"
] | 32
|
2015-01-15T14:11:49.000Z
|
2021-07-15T13:44:52.000Z
|
from __future__ import print_function
from .poissonScore import *
| 22
| 37
| 0.848485
| 8
| 66
| 6.375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 66
| 2
| 38
| 33
| 0.87931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
f348a58e791f17d20237fb104fe99ee9fe0419f5
| 170
|
py
|
Python
|
Important-Backend/pluscodedecoder.py
|
taaaahahaha/SIH-Ignite
|
91cc84f34825fc5c0cf8c1ef2e2698b1020a1ae1
|
[
"MIT"
] | null | null | null |
Important-Backend/pluscodedecoder.py
|
taaaahahaha/SIH-Ignite
|
91cc84f34825fc5c0cf8c1ef2e2698b1020a1ae1
|
[
"MIT"
] | null | null | null |
Important-Backend/pluscodedecoder.py
|
taaaahahaha/SIH-Ignite
|
91cc84f34825fc5c0cf8c1ef2e2698b1020a1ae1
|
[
"MIT"
] | null | null | null |
import openlocationcode
openlocationcode.decode('8F6CCQCW+2F')
# [34.42, 8.796125, 34.420125, 8.79625, 34.4200625, 8.7961875, 10]
# openlocationcode.decode('7JFJ4V47+PH')
| 42.5
| 66
| 0.764706
| 23
| 170
| 5.652174
| 0.695652
| 0.338462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.322785
| 0.070588
| 170
| 4
| 67
| 42.5
| 0.5
| 0.605882
| 0
| 0
| 0
| 0
| 0.169231
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f35132a119852ddb054ec81ac665a34cd0fdf0e9
| 167
|
py
|
Python
|
pyllusion/psychopy/__init__.py
|
RebeccaHirst/Pyllusion
|
9944076e38bced0eabb49c607482b71809150bdb
|
[
"MIT"
] | null | null | null |
pyllusion/psychopy/__init__.py
|
RebeccaHirst/Pyllusion
|
9944076e38bced0eabb49c607482b71809150bdb
|
[
"MIT"
] | null | null | null |
pyllusion/psychopy/__init__.py
|
RebeccaHirst/Pyllusion
|
9944076e38bced0eabb49c607482b71809150bdb
|
[
"MIT"
] | null | null | null |
"""
Pyllusion submodule.
"""
from .psychopy_line import psychopy_line
from .psychopy_circles import psychopy_circle
from .psychopy_rectangle import psychopy_rectangle
| 23.857143
| 50
| 0.844311
| 20
| 167
| 6.75
| 0.45
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095808
| 167
| 6
| 51
| 27.833333
| 0.89404
| 0.11976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f35722427c11be156da9bffcb3b983df53536ac3
| 324
|
py
|
Python
|
asserts/sourse/test_base_app.py
|
gresm/pygame_february
|
e6a59ba3a98d1d12bbbf13e30e676fd52721abb6
|
[
"Apache-2.0"
] | 2
|
2021-04-07T12:47:47.000Z
|
2021-04-08T10:40:02.000Z
|
asserts/sourse/test_base_app.py
|
gresm/pygame_february
|
e6a59ba3a98d1d12bbbf13e30e676fd52721abb6
|
[
"Apache-2.0"
] | null | null | null |
asserts/sourse/test_base_app.py
|
gresm/pygame_february
|
e6a59ba3a98d1d12bbbf13e30e676fd52721abb6
|
[
"Apache-2.0"
] | 1
|
2021-04-04T14:57:41.000Z
|
2021-04-04T14:57:41.000Z
|
from unittest import TestCase
class TestBaseApp(TestCase):
def test_run(self):
pass
def test_loop(self):
pass
def test_handle_input(self):
pass
def test_check_events(self):
pass
def test_handle_event(self):
pass
def test_key_pressed(self):
pass
| 14.727273
| 32
| 0.617284
| 41
| 324
| 4.634146
| 0.463415
| 0.221053
| 0.289474
| 0.394737
| 0.221053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.317901
| 324
| 21
| 33
| 15.428571
| 0.859729
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0.428571
| 0.071429
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
f377c94809000c9e51ccf4374552bce10f13dffb
| 70
|
py
|
Python
|
example/test_all.py
|
waiteperspectives/Morelia
|
e6e6180d688c6bdf538f6b43a97755c76329646a
|
[
"MIT"
] | 17
|
2015-11-18T02:49:47.000Z
|
2019-12-22T08:46:26.000Z
|
example/test_all.py
|
waiteperspectives/Morelia
|
e6e6180d688c6bdf538f6b43a97755c76329646a
|
[
"MIT"
] | 230
|
2015-04-28T16:48:20.000Z
|
2022-03-25T13:01:07.000Z
|
example/test_all.py
|
waiteperspectives/Morelia
|
e6e6180d688c6bdf538f6b43a97755c76329646a
|
[
"MIT"
] | 8
|
2015-04-16T07:45:35.000Z
|
2019-06-20T17:09:49.000Z
|
from test_acceptance import * # noqa
from test_unit import * # noqa
| 23.333333
| 37
| 0.742857
| 10
| 70
| 5
| 0.6
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 70
| 2
| 38
| 35
| 0.892857
| 0.128571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f3c988bf69533063a1949c4c935878ce6cc8183c
| 80
|
py
|
Python
|
bert/etl/security.py
|
jbcurtin/bert
|
956e1647b590ac13b679579231b085895778d807
|
[
"MIT"
] | 2
|
2019-08-28T21:39:50.000Z
|
2019-12-17T10:53:28.000Z
|
bert/etl/security.py
|
jbcurtin/bert
|
956e1647b590ac13b679579231b085895778d807
|
[
"MIT"
] | 19
|
2019-09-04T21:19:12.000Z
|
2021-03-28T22:10:32.000Z
|
bert/etl/security.py
|
jbcurtin/bert
|
956e1647b590ac13b679579231b085895778d807
|
[
"MIT"
] | 1
|
2019-08-28T21:39:53.000Z
|
2019-08-28T21:39:53.000Z
|
import enum
class AccessLevel(enum.Enum):
PUBLIC_READ: str = 'public-read'
| 16
| 36
| 0.725
| 11
| 80
| 5.181818
| 0.636364
| 0.350877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1625
| 80
| 4
| 37
| 20
| 0.850746
| 0
| 0
| 0
| 0
| 0
| 0.1375
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
344feefc2f2389cc0134f488b74560650e430245
| 17,895
|
py
|
Python
|
donkeycar/parts/ml/keras.py
|
alanwells/donkey
|
8ea6d255266923fa819e04bc7c6b37800184bb22
|
[
"MIT"
] | null | null | null |
donkeycar/parts/ml/keras.py
|
alanwells/donkey
|
8ea6d255266923fa819e04bc7c6b37800184bb22
|
[
"MIT"
] | null | null | null |
donkeycar/parts/ml/keras.py
|
alanwells/donkey
|
8ea6d255266923fa819e04bc7c6b37800184bb22
|
[
"MIT"
] | 1
|
2018-11-07T12:06:09.000Z
|
2018-11-07T12:06:09.000Z
|
'''
pilots.py
Methods to create, use, save and load pilots. Pilots
contain the highlevel logic used to determine the angle
and throttle of a vehicle. Pilots can include one or more
models to help direct the vehicles motion.
'''
import os
import numpy as np
import keras
from ... import utils
import donkeycar as dk
from donkeycar import utils
import pdb
class KerasPilot():
def load(self, model_path):
self.model = keras.models.load_model(model_path)
def train(self, train_gen, val_gen,
saved_model_path, epochs=500, steps=100, train_split=0.8):
"""
train_gen: generator that yields an array of images an array of
"""
#checkpoint to save model after each epoch
save_best = keras.callbacks.ModelCheckpoint(saved_model_path,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min')
#stop training if the validation error stops improving.
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
min_delta=.0005,
patience=5,
verbose=1,
mode='auto')
callbacks_list = [save_best, early_stop]
hist = self.model.fit_generator(
train_gen,
steps_per_epoch=steps,
epochs=epochs,
verbose=1,
validation_data=val_gen,
callbacks=callbacks_list,
validation_steps=steps*(1.0 - train_split))
return hist
class KerasCategorical(KerasPilot):
def __init__(self, model=None, *args, **kwargs):
super(KerasCategorical, self).__init__(*args, **kwargs)
if model:
self.model = model
else:
self.model = default_categorical()
def run(self, img_arr):
img_arr = img_arr.reshape((1,) + img_arr.shape)
angle_binned, throttle = self.model.predict(img_arr)
#angle_certainty = max(angle_binned[0])
angle_unbinned = utils.linear_unbin(angle_binned)
return angle_unbinned, throttle[0][0]
class KerasCategoricalCropped(KerasPilot):
def __init__(self, model=None, *args, **kwargs):
super(KerasCategoricalCropped, self).__init__(*args, **kwargs)
if model:
self.model = model
else:
self.model = categorical_cropped()
def run(self, img_arr):
img_arr = img_arr[60:,:]
img_arr = img_arr.reshape((1,) + img_arr.shape)
angle_binned, throttle = self.model.predict(img_arr)
#angle_certainty = max(angle_binned[0])
angle_unbinned = utils.linear_unbin(angle_binned)
return angle_unbinned, throttle[0][0]
class KerasLinear(KerasPilot):
def __init__(self, model=None, num_outputs=None, *args, **kwargs):
super(KerasLinear, self).__init__(*args, **kwargs)
if model:
self.model = model
elif num_outputs is not None:
self.model = default_n_linear(num_outputs)
else:
self.model = default_linear()
def run(self, img_arr):
img_arr = img_arr.reshape((1,) + img_arr.shape)
outputs = self.model.predict(img_arr)
#print(len(outputs), outputs)
steering = outputs[0]
throttle = outputs[1]
return steering[0][0], throttle[0][0]
class CommaLinear(KerasPilot):
def __init__(self, model=None, num_outputs=None, *args, **kwargs):
super(CommaLinear, self).__init__(*args, **kwargs)
if model:
self.model = model
else:
self.model = comma_linear()
def run(self, img_arr):
img_arr = img_arr.reshape((1,) + img_arr.shape)
output = self.model.predict(img_arr)
steering = output[0][0]
throttle = 0.5 #comma model is steering only. hardcode throttle
return steering, throttle
def default_categorical():
from keras.layers import Input, Dense, merge
from keras.models import Model
from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
from keras.layers import Activation, Dropout, Flatten, Dense
img_in = Input(shape=(120, 160, 3), name='img_in') # First layer, input layer, Shape comes from camera.py resolution, RGB
x = img_in
#x = BatchNormalization(epsilon=0.001, axis=1)(x)
x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x) # 24 features, 5 pixel x 5 pixel kernel (convolution, feauture) window, 2wx2h stride, relu activation
x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x) # 32 features, 5px5p kernel window, 2wx2h stride, relu activatiion
x = Convolution2D(64, (5,5), strides=(2,2), activation='relu')(x) # 64 features, 5px5p kernal window, 2wx2h stride, relu
x = Convolution2D(64, (3,3), strides=(2,2), activation='relu')(x) # 64 features, 3px3p kernal window, 2wx2h stride, relu
x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x) # 64 features, 3px3p kernal window, 1wx1h stride, relu
# Possibly add MaxPooling (will make it less sensitive to position in image). Camera angle fixed, so may not to be needed
x = Flatten(name='flattened')(x) # Flatten to 1D (Fully connected)
x = Dense(100, activation='relu')(x) # Classify the data into 100 features, make all negatives 0
x = Dropout(.1)(x) # Randomly drop out (turn off) 10% of the neurons (Prevent overfitting)
x = Dense(50, activation='relu')(x) # Classify the data into 50 features, make all negatives 0
x = Dropout(.1)(x) # Randomly drop out 10% of the neurons (Prevent overfitting)
#categorical output of the angle
angle_out = Dense(15, activation='softmax', name='angle_out')(x) # Connect every input with every output and output 15 hidden units. Use Softmax to give percentage. 15 categories and find best one based off percentage 0.0-1.0
#continous output of throttle
throttle_out = Dense(1, activation='relu', name='throttle_out')(x) # Reduce to 1 number, Positive number only
model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])
model.compile(optimizer='adam',
loss={'angle_out': 'categorical_crossentropy',
'throttle_out': 'mean_absolute_error'},
loss_weights={'angle_out': 0.9, 'throttle_out': .001})
return model
def default_linear():
from keras.layers import Input, Dense, merge
from keras.models import Model
from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
from keras.layers import Activation, Dropout, Flatten, Dense
img_in = Input(shape=(120,160,3), name='img_in')
x = img_in
x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)
x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)
x = Convolution2D(64, (5,5), strides=(2,2), activation='relu')(x)
x = Convolution2D(64, (3,3), strides=(2,2), activation='relu')(x)
x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)
x = Flatten(name='flattened')(x)
x = Dense(100, activation='linear')(x)
x = Dropout(.1)(x)
x = Dense(50, activation='linear')(x)
x = Dropout(.1)(x)
#categorical output of the angle
angle_out = Dense(1, activation='linear', name='angle_out')(x)
#continous output of throttle
throttle_out = Dense(1, activation='linear', name='throttle_out')(x)
model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])
model.compile(optimizer='adam',
loss={'angle_out': 'mean_squared_error',
'throttle_out': 'mean_squared_error'},
loss_weights={'angle_out': 0.5, 'throttle_out': .5})
return model
def default_relu():
from keras.layers import Input, Dense, merge
from keras.models import Model
from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
from keras.layers import Activation, Dropout, Flatten, Dense
img_in = Input(shape=(120,160,3), name='img_in')
x = img_in
x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)
x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)
x = Convolution2D(64, (5,5), strides=(2,2), activation='relu')(x)
x = Convolution2D(64, (3,3), strides=(2,2), activation='relu')(x)
x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)
x = Flatten(name='flattened')(x)
x = Dense(100, activation='relu')(x)
x = Dropout(.1)(x)
x = Dense(50, activation='relu')(x)
x = Dropout(.1)(x)
#categorical output of the angle
angle_out = Dense(1, activation='relu', name='angle_out')(x)
#continous output of throttle
throttle_out = Dense(1, activation='relu', name='throttle_out')(x)
model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])
model.compile(optimizer='rmsprop',
loss={'angle_out': 'mean_squared_error',
'throttle_out': 'mean_squared_error'},
loss_weights={'angle_out': 0.9, 'throttle_out': .001})
return model
def nvidia_categorical():
from keras.layers import Input, Dense, merge
from keras.models import Model
from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
from keras.layers import Activation, Dropout, Flatten, Dense
img_in = Input(shape=(120, 160, 3), name='img_in') # First layer, input layer, Shape comes from camera.py resolution, RGB
x = img_in
x = BatchNormalization(epsilon=0.001, axis=1)(x)
x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x) # 24 features, 5 pixel x 5 pixel kernel (convolution, feauture) window, 2wx2h stride, relu activation
x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x) # 32 features, 5px5p kernel window, 2wx2h stride, relu activatiion
x = Convolution2D(64, (5,5), strides=(2,2), activation='relu')(x) # 64 features, 5px5p kernal window, 2wx2h stride, relu
x = Convolution2D(64, (3,3), strides=(2,2), activation='relu')(x) # 64 features, 3px3p kernal window, 2wx2h stride, relu
x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x) # 64 features, 3px3p kernal window, 1wx1h stride, relu
# Possibly add MaxPooling (will make it less sensitive to position in image). Camera angle fixed, so may not to be needed
x = Flatten(name='flattened')(x) # Flatten to 1D (Fully connected)
x = Dense(1164, activation='relu')(x)
x = Dropout(.1)(x) # Randomly drop out (turn off) 10% of the neurons (Prevent
x = Dense(100, activation='relu')(x) # Classify the data into 100 features, make all negatives 0overfitting)
x = Dropout(.1)(x) # Randomly drop out 10% of the neurons (Prevent overfitting)
x = Dense(50, activation='relu')(x) # Classify the data into 50 features, make all negatives 0
x = Dropout(.1)(x) # Randomly drop out 10% of the neurons (Prevent overfitting)
#categorical output of the angle
angle_out = Dense(15, activation='softmax', name='angle_out')(x) # Connect every input with every output and output 15 hidden units. Use Softmax to give percentage. 15 categories and find best one based off percentage 0.0-1.0
#continous output of throttle
throttle_out = Dense(1, activation='relu', name='throttle_out')(x) # Reduce to 1 number, Positive number only
model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])
model.compile(optimizer='adam',
loss={'angle_out': 'categorical_crossentropy',
'throttle_out': 'mean_absolute_error'},
loss_weights={'angle_out': 0.5, 'throttle_out': 0.5})
return model
def categorical_cropped():
from keras.layers import Input, Dense, merge
from keras.models import Model
from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
from keras.layers import Activation, Dropout, Flatten, Dense
img_in = Input(shape=(60, 160, 3), name='img_in') # First layer, input layer, Shape comes from camera.py resolution, RGB
x = img_in
#x = BatchNormalization(epsilon=0.001, axis=1)(x)
x = Convolution2D(24, (5,5), strides=(2,2), padding="same", activation='relu')(x) # 24 features, 5 pixel x 5 pixel kernel (convolution, feauture) window, 2wx2h stride, relu activation
x = Convolution2D(32, (5,5), strides=(2,2), padding="same", activation='relu')(x) # 32 features, 5px5p kernel window, 2wx2h stride, relu activatiion
x = Convolution2D(64, (5,5), strides=(2,2), padding="same", activation='relu')(x) # 64 features, 5px5p kernal window, 2wx2h stride, relu
x = Convolution2D(64, (3,3), strides=(2,2), padding="same", activation='relu')(x) # 64 features, 3px3p kernal window, 2wx2h stride, relu
x = Convolution2D(64, (3,3), strides=(1,1), padding="same", activation='relu')(x) # 64 features, 3px3p kernal window, 1wx1h stride, relu
# Possibly add MaxPooling (will make it less sensitive to position in image). Camera angle fixed, so may not to be needed
x = Flatten(name='flattened')(x) # Flatten to 1D (Fully connected)
x = Dense(100, activation='relu')(x) # Classify the data into 100 features, make all negatives 0
x = Dropout(.1)(x) # Randomly drop out (turn off) 10% of the neurons (Prevent overfitting)
x = Dense(50, activation='relu')(x) # Classify the data into 50 features, make all negatives 0
x = Dropout(.1)(x) # Randomly drop out 10% of the neurons (Prevent overfitting)
#categorical output of the angle
angle_out = Dense(15, activation='softmax', name='angle_out')(x) # Connect every input with every output and output 15 hidden units. Use Softmax to give percentage. 15 categories and find best one based off percentage 0.0-1.0
#continous output of throttle
throttle_out = Dense(1, activation='relu', name='throttle_out')(x) # Reduce to 1 number, Positive number only
model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])
model.compile(optimizer='adam',
loss={'angle_out': 'categorical_crossentropy',
'throttle_out': 'mean_absolute_error'},
loss_weights={'angle_out': 0.9, 'throttle_out': .001})
return model
def default_n_linear(num_outputs):
from keras.layers import Input, Dense, merge
from keras.models import Model
from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
from keras.layers import Activation, Dropout, Flatten, Cropping2D, Lambda
img_in = Input(shape=(120,160,3), name='img_in')
x = img_in
x = Cropping2D(cropping=((60,0), (0,0)))(x) #trim 60 pixels off top
x = Lambda(lambda x: x/127.5 - 1.)(x) # normalize and re-center
x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)
x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)
x = Convolution2D(64, (5,5), strides=(1,1), activation='relu')(x)
x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)
x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)
x = Flatten(name='flattened')(x)
x = Dense(100, activation='relu')(x)
x = Dropout(.1)(x)
x = Dense(50, activation='relu')(x)
x = Dropout(.1)(x)
outputs = []
for i in range(num_outputs):
outputs.append(Dense(1, activation='linear', name='n_outputs' + str(i))(x))
model = Model(inputs=[img_in], outputs=outputs)
model.compile(optimizer='adam',
loss='mse')
return model
def comma_linear():
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Lambda, ELU
from keras.layers.convolutional import Convolution2D
ch, row, col = 3, 120, 160 # camera format
model = Sequential()
model.add(Lambda(lambda x: x/127.5 - 1.,
input_shape=(row, col, ch),
output_shape=(row, col, ch)))
model.add(Convolution2D(16, (8, 8), strides=(4, 4), padding="same"))
model.add(ELU())
model.add(Convolution2D(32, (5, 5), strides=(2, 2), padding="same"))
model.add(ELU())
model.add(Convolution2D(64, (5, 5), strides=(2, 2), padding="same"))
model.add(Flatten())
model.add(Dropout(.2))
model.add(ELU())
model.add(Dense(512))
model.add(Dropout(.5))
model.add(ELU())
model.add(Dense(1))
model.compile(optimizer="adam", loss="mse")
return model
| 47.34127
| 236
| 0.601565
| 2,266
| 17,895
| 4.654899
| 0.120035
| 0.022279
| 0.058305
| 0.030338
| 0.821767
| 0.811244
| 0.805176
| 0.796265
| 0.784225
| 0.755783
| 0
| 0.047217
| 0.278067
| 17,895
| 378
| 237
| 47.34127
| 0.769255
| 0.224364
| 0
| 0.614504
| 0
| 0
| 0.06838
| 0.005226
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064886
| false
| 0
| 0.129771
| 0
| 0.259542
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
34761e254f2cd259d2e56d742bfbf5bd6555b33d
| 33
|
py
|
Python
|
twads/__init__.py
|
jdgillespie91/twitter-ads
|
9485fd2a196e47b012bb1209634a76b89732dfca
|
[
"MIT"
] | null | null | null |
twads/__init__.py
|
jdgillespie91/twitter-ads
|
9485fd2a196e47b012bb1209634a76b89732dfca
|
[
"MIT"
] | null | null | null |
twads/__init__.py
|
jdgillespie91/twitter-ads
|
9485fd2a196e47b012bb1209634a76b89732dfca
|
[
"MIT"
] | null | null | null |
from .main import Client # noqa
| 16.5
| 32
| 0.727273
| 5
| 33
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212121
| 33
| 1
| 33
| 33
| 0.923077
| 0.121212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
caeae76e7965a56ae806a43727a368925d059f6e
| 234
|
py
|
Python
|
code/node/models/__init__.py
|
Intelligent-Systems-Phystech/2022-Project-109
|
14ff4c844afd817e37c803af87c95fabc505fb10
|
[
"MIT"
] | null | null | null |
code/node/models/__init__.py
|
Intelligent-Systems-Phystech/2022-Project-109
|
14ff4c844afd817e37c803af87c95fabc505fb10
|
[
"MIT"
] | null | null | null |
code/node/models/__init__.py
|
Intelligent-Systems-Phystech/2022-Project-109
|
14ff4c844afd817e37c803af87c95fabc505fb10
|
[
"MIT"
] | null | null | null |
from .cde import NeuralCde # noqa: F401
from .eegnet import EegNet # noqa: F401
#from .erpcov_ts_lr import erpcov_ts_lr # noqa: F401
from .lmu import LmuLstm # noqa: F401
from .ode_lstm import OdeLstm # noqa: F401
#import numpy
| 33.428571
| 53
| 0.74359
| 37
| 234
| 4.567568
| 0.432432
| 0.236686
| 0.284024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078534
| 0.183761
| 234
| 6
| 54
| 39
| 0.806283
| 0.452991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1b190b2bab3f735dbd056881bf99ddc6a573d34a
| 26
|
py
|
Python
|
segmentation_models_pytorch/unet_3D/__init__.py
|
bartoszptak/segmentation_models.pytorch
|
7f443f7ae39a58841adce1f3a7973d6f4bcd052a
|
[
"MIT"
] | null | null | null |
segmentation_models_pytorch/unet_3D/__init__.py
|
bartoszptak/segmentation_models.pytorch
|
7f443f7ae39a58841adce1f3a7973d6f4bcd052a
|
[
"MIT"
] | null | null | null |
segmentation_models_pytorch/unet_3D/__init__.py
|
bartoszptak/segmentation_models.pytorch
|
7f443f7ae39a58841adce1f3a7973d6f4bcd052a
|
[
"MIT"
] | null | null | null |
from .model import Unet_3D
| 26
| 26
| 0.846154
| 5
| 26
| 4.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 0.115385
| 26
| 1
| 26
| 26
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1b6f8f779eb0f6eae6d667985ba6793ea818f112
| 141
|
py
|
Python
|
semana_03/django/produtos/views.py
|
luispaulojr/cursoPython
|
24aaa73741508986d7f747be8f3822889be81025
|
[
"MIT"
] | null | null | null |
semana_03/django/produtos/views.py
|
luispaulojr/cursoPython
|
24aaa73741508986d7f747be8f3822889be81025
|
[
"MIT"
] | null | null | null |
semana_03/django/produtos/views.py
|
luispaulojr/cursoPython
|
24aaa73741508986d7f747be8f3822889be81025
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def metodoProdutos(request):
return render(request, 'produtox/index.html')
| 28.2
| 49
| 0.780142
| 18
| 141
| 6.111111
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12766
| 141
| 5
| 49
| 28.2
| 0.894309
| 0.163121
| 0
| 0
| 0
| 0
| 0.162393
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
1b95aa9e811447e64c036c9cc15164619556c939
| 165
|
py
|
Python
|
retrobiocat_web/__init__.py
|
ihayhurst/RetroBioCat
|
d674897459c0ab65faad5ed3017c55cf51bcc020
|
[
"MIT"
] | 9
|
2020-12-01T16:33:02.000Z
|
2022-01-19T20:02:42.000Z
|
retrobiocat_web/__init__.py
|
ihayhurst/RetroBioCat
|
d674897459c0ab65faad5ed3017c55cf51bcc020
|
[
"MIT"
] | 4
|
2020-10-02T14:38:32.000Z
|
2021-08-02T09:23:58.000Z
|
retrobiocat_web/__init__.py
|
ihayhurst/RetroBioCat
|
d674897459c0ab65faad5ed3017c55cf51bcc020
|
[
"MIT"
] | 6
|
2021-01-14T07:48:36.000Z
|
2022-03-20T17:34:27.000Z
|
import datetime
from retrobiocat_web.version import version
__version__ = version
from retrobiocat_web.retro.generation.network_generation.network import Network
| 20.625
| 79
| 0.866667
| 20
| 165
| 6.8
| 0.45
| 0.220588
| 0.264706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09697
| 165
| 7
| 80
| 23.571429
| 0.912752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
59f80c094a13e43dfd8dea3fd281c8c0ba20d523
| 197
|
py
|
Python
|
tccli/services/bm/__init__.py
|
tarnover/tencentcloud-cli
|
5b0537913a33884a20d7663405a8aa1c2276b41a
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/bm/__init__.py
|
tarnover/tencentcloud-cli
|
5b0537913a33884a20d7663405a8aa1c2276b41a
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/bm/__init__.py
|
tarnover/tencentcloud-cli
|
5b0537913a33884a20d7663405a8aa1c2276b41a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from tccli.services.bm.bm_client import register_arg
from tccli.services.bm.bm_client import get_actions_info
from tccli.services.bm.bm_client import AVAILABLE_VERSION_LIST
| 39.4
| 62
| 0.822335
| 32
| 197
| 4.8125
| 0.53125
| 0.175325
| 0.331169
| 0.37013
| 0.642857
| 0.642857
| 0.642857
| 0
| 0
| 0
| 0
| 0.005556
| 0.086294
| 197
| 4
| 63
| 49.25
| 0.85
| 0.106599
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
941ac4e51bbf4b819863cb15fa660c554db5d2d8
| 35
|
py
|
Python
|
rpyg/__init__.py
|
mimiflynn/pyrpg
|
f61904c0ccd336ec1ab2a408d7a9add6a5c433b5
|
[
"MIT"
] | null | null | null |
rpyg/__init__.py
|
mimiflynn/pyrpg
|
f61904c0ccd336ec1ab2a408d7a9add6a5c433b5
|
[
"MIT"
] | 3
|
2018-05-31T16:15:03.000Z
|
2018-06-05T19:02:34.000Z
|
rpyg/__init__.py
|
mimiflynn/pyrpg
|
f61904c0ccd336ec1ab2a408d7a9add6a5c433b5
|
[
"MIT"
] | null | null | null |
from rpyg.engine.core import Rpyg
| 11.666667
| 33
| 0.8
| 6
| 35
| 4.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 35
| 2
| 34
| 17.5
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
942183719279e597d730d2d4344702f539100b6a
| 4,983
|
py
|
Python
|
blaze/chrome/devtools.py
|
muralisr/blaze
|
e0d183af441cfe63ba1346cd0d6d8ac76ff494ca
|
[
"MIT"
] | 5
|
2020-12-16T03:13:59.000Z
|
2022-03-06T07:16:39.000Z
|
blaze/chrome/devtools.py
|
muralisr/blaze
|
e0d183af441cfe63ba1346cd0d6d8ac76ff494ca
|
[
"MIT"
] | 9
|
2020-09-25T23:25:59.000Z
|
2022-03-11T23:45:14.000Z
|
blaze/chrome/devtools.py
|
muralisr/blaze
|
e0d183af441cfe63ba1346cd0d6d8ac76ff494ca
|
[
"MIT"
] | 3
|
2019-10-16T21:22:07.000Z
|
2020-07-21T13:38:22.000Z
|
""" This module implements methods interacting with Chrome DevTools """
import json
import os
import subprocess
import sys
import tempfile
from typing import Optional
from blaze.action.policy import Policy
from blaze.config.client import ClientEnvironment
from blaze.config.config import Config
from blaze.logger import logger
from blaze.mahimahi import MahiMahiConfig
from .har import har_from_json, Har
def capture_har_in_replay_server(
url: str,
config: Config,
client_env: ClientEnvironment,
policy: Optional[Policy] = None,
cache_time: Optional[int] = None,
user_data_dir: Optional[str] = None,
extract_critical_requests: Optional[bool] = False,
) -> Har:
"""
capture_har spawns a headless chrome instance and connects to its remote debugger
in order to extract the HAR file generated by loading the given URL. The har capturer
is launched inside a replay shell using the specified Mahimahi config, which means
that the webpage needs to have been recorded before calling this method
"""
log = logger.with_namespace("capture_har_in_replay_server")
if not config.env_config or not config.env_config.replay_dir:
raise ValueError("replay_dir must be specified")
policy = policy or Policy.from_dict({})
mahimahi_config = MahiMahiConfig(config=config, policy=policy, client_environment=client_env)
with tempfile.TemporaryDirectory() as temp_dir:
policy_file = os.path.join(temp_dir, "policy.json")
output_file = os.path.join(temp_dir, "har.json")
trace_file = os.path.join(temp_dir, "trace_file")
with open(policy_file, "w") as f:
log.debug("writing push policy file", policy_file=policy_file)
f.write(json.dumps(policy.as_dict))
with open(trace_file, "w") as f:
log.debug("writing trace file", trace_file=trace_file)
f.write(mahimahi_config.formatted_trace_file)
# configure the HAR capturer
cmd = mahimahi_config.har_capture_cmd(
share_dir=temp_dir,
har_output_file_name="har.json",
policy_file_name="policy.json",
link_trace_file_name="trace_file",
capture_url=url,
cache_time=cache_time,
user_data_dir=user_data_dir,
extract_critical_requests=extract_critical_requests,
)
# spawn the HAR capturer process
log.debug("spawning har capturer", url=url, cmd=cmd)
har_capture_proc = subprocess.run(cmd, stdout=sys.stderr, stderr=sys.stderr, timeout=300)
har_capture_proc.check_returncode()
with open(output_file, "r") as f:
return har_from_json(f.read())
def capture_si_in_replay_server(
url: str,
config: Config,
client_env: ClientEnvironment,
policy: Optional[Policy] = None,
cache_time: Optional[int] = None,
user_data_dir: Optional[str] = None,
extract_critical_requests: Optional[bool] = False,
) -> float:
"""
capture_har spawns a headless chrome instance and connects to its remote debugger
in order to extract the HAR file generated by loading the given URL. The har capturer
is launched inside a replay shell using the specified Mahimahi config, which means
that the webpage needs to have been recorded before calling this method
"""
log = logger.with_namespace("capture_si_in_replay_server")
if not config.env_config or not config.env_config.replay_dir:
raise ValueError("replay_dir must be specified")
policy = policy or Policy.from_dict({})
mahimahi_config = MahiMahiConfig(config=config, policy=policy, client_environment=client_env)
with tempfile.TemporaryDirectory() as temp_dir:
policy_file = os.path.join(temp_dir, "policy.json")
output_file = os.path.join(temp_dir, "si.json")
trace_file = os.path.join(temp_dir, "trace_file")
with open(policy_file, "w") as f:
log.debug("writing push policy file", policy_file=policy_file)
f.write(json.dumps(policy.as_dict))
with open(trace_file, "w") as f:
log.debug("writing trace file", trace_file=trace_file)
f.write(mahimahi_config.formatted_trace_file)
# configure the SI (speed index) capturer
cmd = mahimahi_config.si_capture_cmd(
share_dir=temp_dir,
si_output_file_name="si.json",
policy_file_name="policy.json",
link_trace_file_name="trace_file",
capture_url=url,
cache_time=cache_time,
user_data_dir=user_data_dir,
extract_critical_requests=extract_critical_requests,
)
# spawn the SI capturer process
log.debug("spawning SI capturer", url=url, cmd=cmd)
si_capture_proc = subprocess.run(cmd, stdout=sys.stderr, stderr=sys.stderr)
si_capture_proc.check_returncode()
with open(output_file, "r") as f:
return float(f.read())
| 38.929688
| 97
| 0.692153
| 675
| 4,983
| 4.887407
| 0.195556
| 0.049106
| 0.020006
| 0.025462
| 0.842073
| 0.80388
| 0.788724
| 0.788724
| 0.788724
| 0.788724
| 0
| 0.000777
| 0.224764
| 4,983
| 127
| 98
| 39.23622
| 0.853223
| 0.168372
| 0
| 0.622222
| 0
| 0
| 0.087255
| 0.01348
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.133333
| 0
| 0.177778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
945c4fb23a2432481d6edb642d0cdf70926877c8
| 171
|
py
|
Python
|
python/8kyu/pick_a_set_of_first_elements.py
|
Sigmanificient/codewars
|
b34df4bf55460d312b7ddf121b46a707b549387a
|
[
"MIT"
] | 3
|
2021-06-08T01:57:13.000Z
|
2021-06-26T10:52:47.000Z
|
python/8kyu/pick_a_set_of_first_elements.py
|
Sigmanificient/codewars
|
b34df4bf55460d312b7ddf121b46a707b549387a
|
[
"MIT"
] | null | null | null |
python/8kyu/pick_a_set_of_first_elements.py
|
Sigmanificient/codewars
|
b34df4bf55460d312b7ddf121b46a707b549387a
|
[
"MIT"
] | 2
|
2021-06-10T21:20:13.000Z
|
2021-06-30T10:13:26.000Z
|
"""Kata url: https://www.codewars.com/kata/572b77262bedd351e9000076."""
from typing import List
def first(seq: List[int], n: int = 1) -> List[int]:
return seq[:n]
| 19
| 71
| 0.666667
| 25
| 171
| 4.56
| 0.72
| 0.122807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131034
| 0.152047
| 171
| 8
| 72
| 21.375
| 0.655172
| 0.380117
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
946bdf8246dab017eabdf2063218f70f474666b2
| 27
|
py
|
Python
|
brian2/codegen/runtime/GSLweave_rt/__init__.py
|
rgerkin/brian2
|
34761a58b0d4c2275194e648449419b3dd73286b
|
[
"BSD-2-Clause"
] | 2
|
2020-03-20T13:30:19.000Z
|
2020-03-20T13:30:57.000Z
|
brian2/codegen/runtime/GSLweave_rt/__init__.py
|
rgerkin/brian2
|
34761a58b0d4c2275194e648449419b3dd73286b
|
[
"BSD-2-Clause"
] | null | null | null |
brian2/codegen/runtime/GSLweave_rt/__init__.py
|
rgerkin/brian2
|
34761a58b0d4c2275194e648449419b3dd73286b
|
[
"BSD-2-Clause"
] | null | null | null |
from .GSLweave_rt import *
| 13.5
| 26
| 0.777778
| 4
| 27
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9480e31abe67aa3ee41e23dd8246e47620979bef
| 130
|
py
|
Python
|
testing/ejemplo/config.py
|
rauljrz/curso_python
|
f241125f0a51c39899f5d59537dca9e7b4c53489
|
[
"Apache-2.0"
] | null | null | null |
testing/ejemplo/config.py
|
rauljrz/curso_python
|
f241125f0a51c39899f5d59537dca9e7b4c53489
|
[
"Apache-2.0"
] | null | null | null |
testing/ejemplo/config.py
|
rauljrz/curso_python
|
f241125f0a51c39899f5d59537dca9e7b4c53489
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
def carpeta_ssh():
"""Funcion que devuelve ruta de configuracion ssh."""
return Path.home() / ".ssh"
| 26
| 55
| 0.707692
| 18
| 130
| 5.055556
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161538
| 130
| 5
| 56
| 26
| 0.834862
| 0.361538
| 0
| 0
| 0
| 0
| 0.051282
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
849b41cadb555aabaf7db3f7f686222807c3c6f5
| 21
|
py
|
Python
|
cadence/langs/__init__.py
|
tomaarsen/cadence
|
c6878c6224aac6c26efc9dbd59a1de551df85e8f
|
[
"MIT"
] | 12
|
2021-03-29T17:41:26.000Z
|
2022-02-03T07:30:05.000Z
|
cadence/langs/__init__.py
|
tomaarsen/cadence
|
c6878c6224aac6c26efc9dbd59a1de551df85e8f
|
[
"MIT"
] | 1
|
2021-03-30T11:12:44.000Z
|
2021-03-30T14:56:50.000Z
|
cadence/langs/__init__.py
|
tomaarsen/cadence
|
c6878c6224aac6c26efc9dbd59a1de551df85e8f
|
[
"MIT"
] | 3
|
2021-03-29T18:52:27.000Z
|
2022-01-15T06:50:21.000Z
|
from .langs import *
| 10.5
| 20
| 0.714286
| 3
| 21
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
84c878711ca48396e667dba1ae087abf0eea4bba
| 12,022
|
py
|
Python
|
src/cm/migrations/0001_initial.py
|
didorothy/Consulting
|
f8943f5275c75a1ca1a1bf2b36743e47ba921625
|
[
"BSD-3-Clause"
] | null | null | null |
src/cm/migrations/0001_initial.py
|
didorothy/Consulting
|
f8943f5275c75a1ca1a1bf2b36743e47ba921625
|
[
"BSD-3-Clause"
] | null | null | null |
src/cm/migrations/0001_initial.py
|
didorothy/Consulting
|
f8943f5275c75a1ca1a1bf2b36743e47ba921625
|
[
"BSD-3-Clause"
] | 1
|
2022-03-06T21:16:52.000Z
|
2022-03-06T21:16:52.000Z
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Customer'
db.create_table('cm_customer', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=500)),
('address1', self.gf('django.db.models.fields.CharField')(max_length=1000, blank=True)),
('address2', self.gf('django.db.models.fields.CharField')(max_length=1000, blank=True)),
('city', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('state', self.gf('django.contrib.localflavor.us.models.USStateField')(max_length=2, null=True, blank=True)),
('postal_code', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('cm', ['Customer'])
# Adding model 'Contact'
db.create_table('cm_contact', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('customer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cm.Customer'])),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('cm', ['Contact'])
# Adding model 'Project'
db.create_table('cm_project', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('customer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cm.Customer'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('estimate', self.gf('django.db.models.fields.DecimalField')(max_digits=20, decimal_places=4)),
('rate', self.gf('django.db.models.fields.DecimalField')(default='45.0', max_digits=20, decimal_places=2)),
('complete', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('cm', ['Project'])
# Adding model 'TimeEntry'
db.create_table('cm_timeentry', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cm.Project'])),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('start', self.gf('django.db.models.fields.DateTimeField')()),
('stop', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('invoice_line_item', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cm.InvoiceLineItem'], null=True, blank=True)),
))
db.send_create_signal('cm', ['TimeEntry'])
# Adding model 'Invoice'
db.create_table('cm_invoice', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('invoice_number', self.gf('django.db.models.fields.CharField')(max_length=255)),
('customer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cm.Customer'])),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cm.Project'], null=True, blank=True)),
('invoice_date', self.gf('django.db.models.fields.DateField')()),
('invoice_total', self.gf('django.db.models.fields.DecimalField')(max_digits=18, decimal_places=2)),
('paid', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('cm', ['Invoice'])
# Adding model 'InvoiceLineItem'
db.create_table('cm_invoicelineitem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('invoice', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cm.Invoice'])),
('description', self.gf('django.db.models.fields.CharField')(max_length=500)),
('quantity', self.gf('django.db.models.fields.DecimalField')(max_digits=18, decimal_places=2)),
('price', self.gf('django.db.models.fields.DecimalField')(max_digits=18, decimal_places=2)),
))
db.send_create_signal('cm', ['InvoiceLineItem'])
# Adding model 'Payment'
db.create_table('cm_payment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('invoice', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cm.Invoice'])),
('payment_type', self.gf('django.db.models.fields.CharField')(max_length=30)),
('transaction_number', self.gf('django.db.models.fields.CharField')(max_length=50)),
('date_received', self.gf('django.db.models.fields.DateField')()),
('pay_date', self.gf('django.db.models.fields.DateField')()),
('amount', self.gf('django.db.models.fields.DecimalField')(max_digits=18, decimal_places=2)),
))
db.send_create_signal('cm', ['Payment'])
def backwards(self, orm):
# Deleting model 'Customer'
db.delete_table('cm_customer')
# Deleting model 'Contact'
db.delete_table('cm_contact')
# Deleting model 'Project'
db.delete_table('cm_project')
# Deleting model 'TimeEntry'
db.delete_table('cm_timeentry')
# Deleting model 'Invoice'
db.delete_table('cm_invoice')
# Deleting model 'InvoiceLineItem'
db.delete_table('cm_invoicelineitem')
# Deleting model 'Payment'
db.delete_table('cm_payment')
models = {
'cm.contact': {
'Meta': {'object_name': 'Contact'},
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Customer']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'cm.customer': {
'Meta': {'object_name': 'Customer'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'state': ('django.contrib.localflavor.us.models.USStateField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'})
},
'cm.invoice': {
'Meta': {'ordering': "('-invoice_date',)", 'object_name': 'Invoice'},
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Customer']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_date': ('django.db.models.fields.DateField', [], {}),
'invoice_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invoice_total': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '2'}),
'paid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Project']", 'null': 'True', 'blank': 'True'})
},
'cm.invoicelineitem': {
'Meta': {'ordering': "('invoice', 'id')", 'object_name': 'InvoiceLineItem'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Invoice']"}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '2'})
},
'cm.payment': {
'Meta': {'ordering': "('pay_date',)", 'object_name': 'Payment'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '2'}),
'date_received': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Invoice']"}),
'pay_date': ('django.db.models.fields.DateField', [], {}),
'payment_type': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'transaction_number': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'cm.project': {
'Meta': {'object_name': 'Project'},
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Customer']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'estimate': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rate': ('django.db.models.fields.DecimalField', [], {'default': "'45.0'", 'max_digits': '20', 'decimal_places': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cm.timeentry': {
'Meta': {'object_name': 'TimeEntry'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_line_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.InvoiceLineItem']", 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Project']"}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'stop': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['cm']
| 62.614583
| 154
| 0.58526
| 1,342
| 12,022
| 5.132638
| 0.084948
| 0.11266
| 0.195122
| 0.278746
| 0.799797
| 0.791376
| 0.77381
| 0.744048
| 0.686702
| 0.644599
| 0
| 0.014449
| 0.194061
| 12,022
| 192
| 155
| 62.614583
| 0.69646
| 0.031526
| 0
| 0.286624
| 0
| 0
| 0.489508
| 0.301256
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012739
| false
| 0
| 0.025478
| 0
| 0.057325
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1708893273dfec468ba0752033c479f85d9c81aa
| 19
|
py
|
Python
|
neo_backend/api/views/__init__.py
|
what-digital/blockselfie-bacckend
|
231ffd92b1fc570a58ec5058d7f04d065c3866ba
|
[
"MIT"
] | 2
|
2018-11-04T17:51:32.000Z
|
2018-11-05T14:21:41.000Z
|
neo_backend/api/views/__init__.py
|
what-digital/blockselfie-backend
|
231ffd92b1fc570a58ec5058d7f04d065c3866ba
|
[
"MIT"
] | null | null | null |
neo_backend/api/views/__init__.py
|
what-digital/blockselfie-backend
|
231ffd92b1fc570a58ec5058d7f04d065c3866ba
|
[
"MIT"
] | null | null | null |
from .sc import *
| 6.333333
| 17
| 0.631579
| 3
| 19
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.263158
| 19
| 2
| 18
| 9.5
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ca63a0e350d108aa4c218f1721388238b1b8c7fa
| 11,035
|
py
|
Python
|
iridauploader/tests/parsers/directory/test_sample_parser.py
|
COMBAT-SARS-COV-2/irida-uploader
|
b9d04d187d6a5a9fdcaef5b27135965ffac99db0
|
[
"Apache-2.0"
] | 7
|
2019-01-25T16:56:11.000Z
|
2021-01-12T15:32:08.000Z
|
iridauploader/tests/parsers/directory/test_sample_parser.py
|
COMBAT-SARS-COV-2/irida-uploader
|
b9d04d187d6a5a9fdcaef5b27135965ffac99db0
|
[
"Apache-2.0"
] | 80
|
2019-01-29T14:54:26.000Z
|
2022-03-25T18:51:51.000Z
|
iridauploader/tests/parsers/directory/test_sample_parser.py
|
COMBAT-SARS-COV-2/irida-uploader
|
b9d04d187d6a5a9fdcaef5b27135965ffac99db0
|
[
"Apache-2.0"
] | 9
|
2019-03-14T09:58:05.000Z
|
2022-01-06T20:14:45.000Z
|
import unittest
from os import path
from unittest.mock import patch
import iridauploader.parsers.directory.sample_parser as sample_parser
from iridauploader.parsers.exceptions import SampleSheetError
import iridauploader.model as model
path_to_module = path.abspath(path.dirname(__file__))
if len(path_to_module) == 0:
path_to_module = '.'
class TestVerifySampleSheetFileNamesInFileList(unittest.TestCase):
"""
test file existence verification function
"""
def setUp(self):
print("\nStarting " + self.__module__ + ": " + self._testMethodName)
def test_valid(self):
sheet_file = path.join(path_to_module, "fake_dir_data",
"SampleList_simple.csv")
run_data_directory_file_list = ["file_1.fastq.gz", "file_2.fastq.gz"]
sample_parser.verify_sample_sheet_file_names_in_file_list(sheet_file, run_data_directory_file_list)
def test_file_names_do_not_match_paired_end(self):
sheet_file = path.join(path_to_module, "fake_dir_data",
"SampleList_simple.csv")
run_data_directory_file_list = ["file_1.fastq.gz", "file_a.fastq.gz"]
with self.assertRaises(SampleSheetError):
sample_parser.verify_sample_sheet_file_names_in_file_list(sheet_file, run_data_directory_file_list)
def test_file_names_do_not_match_single_end(self):
sheet_file = path.join(path_to_module, "fake_dir_data",
"no_reverse.csv")
run_data_directory_file_list = ["file_a.fastq.gz"]
with self.assertRaises(SampleSheetError):
sample_parser.verify_sample_sheet_file_names_in_file_list(sheet_file, run_data_directory_file_list)
class TestBuildSampleListFromSampleSheetWithAbsPath(unittest.TestCase):
"""
test parsing the list of samples from a sample sheet
"""
def setUp(self):
print("\nStarting " + self.__module__ + ": " + self._testMethodName)
def test_valid(self):
"""
Given a valid sample sheet, parse correctly
:return:
"""
sheet_file = path.join(path_to_module, "fake_dir_data",
"SampleList_simple.csv")
file_path_1 = path.join(path_to_module,
"fake_dir_data", "file_1.fastq.gz")
file_path_2 = path.join(path_to_module,
"fake_dir_data", "file_2.fastq.gz")
res = sample_parser.build_sample_list_from_sample_sheet_with_abs_path(sheet_file)
# Check we have 1 sample
self.assertEqual(len(res), 1)
# Check if data is correct
self.assertEqual(res[0].sample_name, "my-sample-1")
self.assertEqual(res[0].get_uploadable_dict()["sample_project"], "75")
self.assertEqual(res[0].get_uploadable_dict()["File_Forward"], "file_1.fastq.gz")
self.assertEqual(res[0].get_uploadable_dict()["File_Reverse"], "file_2.fastq.gz")
self.assertEqual(res[0].sequence_file.file_list[0], file_path_1)
self.assertEqual(res[0].sequence_file.file_list[1], file_path_2)
@patch("iridauploader.parsers.directory.sample_parser._parse_samples")
def test_valid_full_file_path(self, mock_parse_samples):
"""
Given a valid sample sheet with full file paths, parse correctly
:return:
"""
sheet_file = path.join(path_to_module, "fake_dir_data",
"SampleList_simple.csv")
file_path_1 = path.join(path_to_module,
"fake_dir_data", "file_1.fastq.gz")
file_path_2 = path.join(path_to_module,
"fake_dir_data", "file_2.fastq.gz")
sample_list = [
model.Sample(
sample_name='my-sample-1',
description="",
sample_number=0,
samp_dict={
('sample_project', '75'),
('File_Forward', path.abspath(file_path_1)),
('File_Reverse', path.abspath(file_path_2))
}
)
]
mock_parse_samples.return_value = sample_list
res = sample_parser.build_sample_list_from_sample_sheet_with_abs_path(sheet_file)
mock_parse_samples.assert_called_with(sheet_file)
# Check we have 1 sample
self.assertEqual(len(res), 1)
# Check if data is correct
self.assertEqual(res[0].sample_name, "my-sample-1")
self.assertEqual(res[0].get_uploadable_dict()["sample_project"], "75")
self.assertEqual(res[0].get_uploadable_dict()["File_Forward"], path.abspath(file_path_1))
self.assertEqual(res[0].get_uploadable_dict()["File_Reverse"], path.abspath(file_path_2))
self.assertEqual(res[0].sequence_file.file_list[0], file_path_1)
self.assertEqual(res[0].sequence_file.file_list[1], file_path_2)
def test_no_forward_read(self):
"""
No Valid files were found with names given in sample sheet
:return:
"""
directory = path.join(path_to_module, "fake_dir_data")
file_path = path.join(directory, "list_no_forward.csv")
with self.assertRaises(SampleSheetError):
res = sample_parser.build_sample_list_from_sample_sheet_with_abs_path(file_path)
def test_no_reverse_read(self):
"""
The file list in the sample sheet is invalid
:return:
"""
directory = path.join(path_to_module, "fake_dir_data")
file_path = path.join(directory, "list_no_reverse.csv")
with self.assertRaises(SampleSheetError):
res = sample_parser.build_sample_list_from_sample_sheet_with_abs_path(file_path)
class TestBuildSampleListFromSampleSheetNoVerify(unittest.TestCase):
"""
test parsing the list of samples from a sample sheet
These samplelists are built without verification that the files exist
But we still check the positive test cases
"""
def setUp(self):
print("\nStarting " + self.__module__ + ": " + self._testMethodName)
def test_valid(self):
"""
Given a valid sample sheet, parse correctly
:return:
"""
sheet_file = path.join(path_to_module, "fake_dir_data",
"SampleList_simple.csv")
file_name_1 = "file_1.fastq.gz"
file_name_2 = "file_2.fastq.gz"
res = sample_parser.build_sample_list_from_sample_sheet_no_verify(sheet_file)
# Check we have 1 sample
self.assertEqual(len(res), 1)
# Check if data is correct
self.assertEqual(res[0].sample_name, "my-sample-1")
self.assertEqual(res[0].get_uploadable_dict()["sample_project"], "75")
self.assertEqual(res[0].get_uploadable_dict()["File_Forward"], file_name_1)
self.assertEqual(res[0].get_uploadable_dict()["File_Reverse"], file_name_2)
self.assertEqual(res[0].sequence_file.file_list[0], file_name_1)
self.assertEqual(res[0].sequence_file.file_list[1], file_name_2)
class TestOnlySingleOrPairedInSampleList(unittest.TestCase):
"""
test boolean of if there is only one of paired or single end files in a list of samples
"""
def setUp(self):
print("\nStarting " + self.__module__ + ": " + self._testMethodName)
def test_mixed_paired_and_single_reads(self):
directory = path.join(path_to_module, "fake_dir_data")
file_path = path.join(directory, "list_mixed.csv")
sample_list = sample_parser.build_sample_list_from_sample_sheet_with_abs_path(file_path)
res = sample_parser.only_single_or_paired_in_sample_list(sample_list)
self.assertFalse(res)
def test_only_single_end_reads(self):
directory = path.join(path_to_module, "fake_dir_data")
file_path = path.join(directory, "no_reverse.csv")
sample_list = sample_parser.build_sample_list_from_sample_sheet_with_abs_path(file_path)
res = sample_parser.only_single_or_paired_in_sample_list(sample_list)
self.assertTrue(res)
def test_only_paired_end_reads(self):
directory = path.join(path_to_module, "fake_dir_data")
file_path = path.join(directory, "SampleList.csv")
sample_list = sample_parser.build_sample_list_from_sample_sheet_with_abs_path(file_path)
res = sample_parser.only_single_or_paired_in_sample_list(sample_list)
self.assertTrue(res)
class TestParseSamples(unittest.TestCase):
"""
Test validity or invalidity of parsed samples
"""
def setUp(self):
print("\nStarting " + self.__module__ + ": " + self._testMethodName)
def test_valid(self):
"""
Given a valid sample sheet, parse correctly
:return:
"""
sheet_file = path.join(path_to_module, "fake_dir_data",
"SampleList_simple.csv")
file_name_1 = "file_1.fastq.gz"
file_name_2 = "file_2.fastq.gz"
res = sample_parser._parse_samples(sheet_file)
# Check we have 1 sample
self.assertEqual(len(res), 1)
# Check if data is correct
self.assertEqual(res[0].sample_name, "my-sample-1")
self.assertEqual(res[0].get_uploadable_dict()["sample_project"], "75")
self.assertEqual(res[0].get_uploadable_dict()["File_Forward"], file_name_1)
self.assertEqual(res[0].get_uploadable_dict()["File_Reverse"], file_name_2)
def test_no_forward_read(self):
"""
No Valid files were found with names given in sample sheet
:return:
"""
directory = path.join(path_to_module, "fake_dir_data")
file_path = path.join(directory, "no_forward.csv")
with self.assertRaises(SampleSheetError):
res = sample_parser._parse_samples(file_path)
def test_no_reverse_read(self):
"""
The file list in the sample sheet is invalid
:return:
"""
directory = path.join(path_to_module, "fake_dir_data")
file_path = path.join(directory, "no_reverse.csv")
res = sample_parser._parse_samples(file_path)
# This should have an empty file reverse
self.assertEqual(res[0]["File_Reverse"], "")
def test_no_reverse_read_with_comma(self):
"""
The file list in the sample sheet is invalid
:return:
"""
directory = path.join(path_to_module, "fake_dir_data")
file_path = path.join(directory, "no_reverse_with_comma.csv")
res = sample_parser._parse_samples(file_path)
# This should have an empty file reverse
self.assertEqual(res[0]["File_Reverse"], "")
def test_no_read_files_in_list(self):
"""
The file list in the sample sheet is invalid
:return:
"""
directory = path.join(path_to_module, "fake_dir_data")
file_path = path.join(directory, "no_read_files.csv")
with self.assertRaises(SampleSheetError):
res = sample_parser._parse_samples(file_path)
| 37.920962
| 111
| 0.659085
| 1,421
| 11,035
| 4.751583
| 0.097115
| 0.045024
| 0.063981
| 0.067536
| 0.839307
| 0.81561
| 0.813537
| 0.794135
| 0.794135
| 0.794135
| 0
| 0.010741
| 0.240689
| 11,035
| 290
| 112
| 38.051724
| 0.795083
| 0.113276
| 0
| 0.639241
| 0
| 0
| 0.125773
| 0.02249
| 0
| 0
| 0
| 0
| 0.240506
| 1
| 0.132911
| false
| 0
| 0.037975
| 0
| 0.202532
| 0.031646
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ca888d85992f8704708691da1b48ab497161d8ce
| 128
|
py
|
Python
|
wallpaper/monitor/generic.py
|
gabbpuy/wallpaper
|
ec5fdeeb92d1a6285e1ac2ec3b0164929b7ea305
|
[
"BSD-2-Clause"
] | 1
|
2021-06-01T21:24:42.000Z
|
2021-06-01T21:24:42.000Z
|
wallpaper/monitor/generic.py
|
gabbpuy/wallpaper
|
ec5fdeeb92d1a6285e1ac2ec3b0164929b7ea305
|
[
"BSD-2-Clause"
] | null | null | null |
wallpaper/monitor/generic.py
|
gabbpuy/wallpaper
|
ec5fdeeb92d1a6285e1ac2ec3b0164929b7ea305
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from .monitor import Monitor
def get_monitors(size):
return Monitor("Generic", size, size, 1, 0),
| 18.285714
| 48
| 0.648438
| 18
| 128
| 4.555556
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0.179688
| 128
| 6
| 49
| 21.333333
| 0.752381
| 0.164063
| 0
| 0
| 0
| 0
| 0.066667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
ca992bf92d5d1b8735d16f79e78a5bc00a1c2a61
| 114
|
py
|
Python
|
db_conn/query/sc_soccer/__init__.py
|
szkkteam/db_conn
|
06cf0bc98b92ce542e7475afdd33eb5eb9f27645
|
[
"MIT"
] | null | null | null |
db_conn/query/sc_soccer/__init__.py
|
szkkteam/db_conn
|
06cf0bc98b92ce542e7475afdd33eb5eb9f27645
|
[
"MIT"
] | null | null | null |
db_conn/query/sc_soccer/__init__.py
|
szkkteam/db_conn
|
06cf0bc98b92ce542e7475afdd33eb5eb9f27645
|
[
"MIT"
] | null | null | null |
from .tables import *
from .insert import *
from .update import *
from .select import *
from .complex import *
| 22.8
| 22
| 0.710526
| 15
| 114
| 5.4
| 0.466667
| 0.493827
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201754
| 114
| 5
| 23
| 22.8
| 0.89011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
04a44ded7b0346c2d6cca43cf66bbd67e1c16e35
| 16,404
|
py
|
Python
|
tests/unit/dataactvalidator/test_b9_object_class_program_activity.py
|
COEJKnight/one
|
6a5f8cd9468ab368019eb2597821b7837f74d9e2
|
[
"CC0-1.0"
] | 1
|
2018-10-29T12:54:44.000Z
|
2018-10-29T12:54:44.000Z
|
tests/unit/dataactvalidator/test_b9_object_class_program_activity.py
|
COEJKnight/one
|
6a5f8cd9468ab368019eb2597821b7837f74d9e2
|
[
"CC0-1.0"
] | null | null | null |
tests/unit/dataactvalidator/test_b9_object_class_program_activity.py
|
COEJKnight/one
|
6a5f8cd9468ab368019eb2597821b7837f74d9e2
|
[
"CC0-1.0"
] | null | null | null |
from tests.unit.dataactcore.factories.staging import ObjectClassProgramActivityFactory
from tests.unit.dataactcore.factories.domain import ProgramActivityFactory
from tests.unit.dataactcore.factories.job import SubmissionFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'b9_object_class_program_activity'
def test_column_headers(database):
expected_subset = {'row_number', 'agency_identifier', 'main_account_code', 'program_activity_name',
'program_activity_code'}
actual = set(query_columns(_FILE, database))
assert (actual & expected_subset) == expected_subset
def test_success(database):
""" Testing valid program activity name for the corresponding TAS/TAFS as defined in Section 82 of OMB Circular
A-11. """
op_1 = ObjectClassProgramActivityFactory(row_number=1, agency_identifier='test', main_account_code='test',
program_activity_name='test', program_activity_code='test')
op_2 = ObjectClassProgramActivityFactory(row_number=2, agency_identifier='test', main_account_code='test',
program_activity_name='test', program_activity_code='test')
pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test', program_activity_name='test', program_activity_code='test')
assert number_of_errors(_FILE, database, models=[op_1, op_2, pa]) == 0
def test_success_fiscal_year(database):
""" Testing valid name for FY that matches with budget_year"""
op = ObjectClassProgramActivityFactory(row_number=1, submission_id='1', agency_identifier='test',
main_account_code='test', program_activity_name='test',
program_activity_code='test')
pa_1 = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test', program_activity_name='test', program_activity_code='test')
pa_2 = ProgramActivityFactory(budget_year=2017, agency_id='test2', allocation_transfer_id='test2',
account_number='test2', program_activity_name='test2', program_activity_code='test2')
submission = SubmissionFactory(submission_id='1', reporting_fiscal_year='2017')
assert number_of_errors(_FILE, database, models=[op, pa_1, pa_2], submission=submission) == 0
def test_failure_fiscal_year(database):
""" Testing invalid name for FY, not matches with budget_year"""
op = ObjectClassProgramActivityFactory(row_number=1, submission_id='1', agency_identifier='test4',
main_account_code='test4', program_activity_name='test4',
program_activity_code='test4')
pa_1 = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test', program_activity_name='test', program_activity_code='test')
pa_2 = ProgramActivityFactory(budget_year=2017, agency_id='test2', allocation_transfer_id='test2',
account_number='test2', program_activity_name='test2', program_activity_code='test2')
pa_3 = ProgramActivityFactory(budget_year=2018, agency_id='test3', allocation_transfer_id='test3',
account_number='test3', program_activity_name='test3', program_activity_code='test3')
pa_4 = ProgramActivityFactory(budget_year=2019, agency_id='test4', allocation_transfer_id='test4',
account_number='test4', program_activity_name='test4', program_activity_code='test4')
submission = SubmissionFactory(submission_id='1', reporting_fiscal_year='2017')
assert number_of_errors(_FILE, database, models=[op, pa_1, pa_2, pa_3, pa_4], submission=submission) == 1
def test_success_unknown_value(database):
""" Testing valid Unknown/other program activity name with '0000' code """
op = ObjectClassProgramActivityFactory(row_number=1, agency_identifier='test', main_account_code='test',
program_activity_name='Unknown/Other', program_activity_code='0000')
pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test', program_activity_name='test', program_activity_code='test')
assert number_of_errors(_FILE, database, models=[op, pa]) == 0
def test_success_ignore_blank_program_activity_name(database):
""" Testing program activity name validation to ignore blanks if monetary sum is 0 """
op = ObjectClassProgramActivityFactory(row_number=1, beginning_period_of_availa=2016, agency_identifier='test',
main_account_code='test', program_activity_name='',
program_activity_code='test',
deobligations_recov_by_pro_cpe=0, gross_outlay_amount_by_pro_cpe=0,
gross_outlay_amount_by_pro_fyb=0, gross_outlays_delivered_or_cpe=0,
gross_outlays_delivered_or_fyb=0, gross_outlays_undelivered_cpe=0,
gross_outlays_undelivered_fyb=0, obligations_delivered_orde_cpe=0,
obligations_delivered_orde_fyb=0, obligations_incurred_by_pr_cpe=0,
obligations_undelivered_or_cpe=0, obligations_undelivered_or_fyb=0,
ussgl480100_undelivered_or_cpe=0, ussgl480100_undelivered_or_fyb=0,
ussgl480200_undelivered_or_cpe=0, ussgl480200_undelivered_or_fyb=0,
ussgl483100_undelivered_or_cpe=0, ussgl483200_undelivered_or_cpe=0,
ussgl487100_downward_adjus_cpe=0, ussgl487200_downward_adjus_cpe=0,
ussgl488100_upward_adjustm_cpe=0, ussgl488200_upward_adjustm_cpe=0,
ussgl490100_delivered_orde_cpe=0, ussgl490100_delivered_orde_fyb=0,
ussgl490200_delivered_orde_cpe=0, ussgl490800_authority_outl_cpe=0,
ussgl490800_authority_outl_fyb=0, ussgl493100_delivered_orde_cpe=0,
ussgl497100_downward_adjus_cpe=0, ussgl497200_downward_adjus_cpe=0,
ussgl498100_upward_adjustm_cpe=0, ussgl498200_upward_adjustm_cpe=0)
pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test', program_activity_name='test', program_activity_code='test')
assert number_of_errors(_FILE, database, models=[op, pa]) == 0
def test_success_ignore_case(database):
""" Testing program activity validation to ignore case """
op = ObjectClassProgramActivityFactory(row_number=1, beginning_period_of_availa=2016, agency_identifier='test',
main_account_code='test', program_activity_name='TEST',
program_activity_code='test')
pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test', program_activity_name='test', program_activity_code='test')
assert number_of_errors(_FILE, database, models=[op, pa]) == 0
def test_failure_program_activity_name(database):
""" Testing invalid program activity name for the corresponding TAS/TAFS as defined in Section 82 of OMB Circular
A-11. """
op_1 = ObjectClassProgramActivityFactory(row_number=1, agency_identifier='test', main_account_code='test',
program_activity_name='test_wrong', program_activity_code='test')
op_2 = ObjectClassProgramActivityFactory(row_number=1, agency_identifier='test', main_account_code='test',
program_activity_name='test_wrong', program_activity_code='0000')
pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test', program_activity_name='test', program_activity_code='test')
assert number_of_errors(_FILE, database, models=[op_1, op_2, pa]) == 1
def test_failure_program_activity_code(database):
op_1 = ObjectClassProgramActivityFactory(row_number=1, agency_identifier='test', main_account_code='test',
program_activity_name='test', program_activity_code='test_wrong')
op_2 = ObjectClassProgramActivityFactory(row_number=1, agency_identifier='test', main_account_code='test',
program_activity_name='Unknown/Other', program_activity_code='123456')
pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test', program_activity_name='test', program_activity_code='test')
assert number_of_errors(_FILE, database, models=[op_1, op_2, pa]) == 1
def test_failure_empty_activity_name(database):
""" Testing program activity name validation to not ignore blanks if monetary sum is not 0 """
pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test', program_activity_name='test', program_activity_code='test')
# one monetary amount not 0
op = ObjectClassProgramActivityFactory(row_number=1, beginning_period_of_availa=2016, agency_identifier='test',
main_account_code='test', program_activity_name='',
program_activity_code='test',
deobligations_recov_by_pro_cpe=2, gross_outlay_amount_by_pro_cpe=0,
gross_outlay_amount_by_pro_fyb=0, gross_outlays_delivered_or_cpe=0,
gross_outlays_delivered_or_fyb=0, gross_outlays_undelivered_cpe=0,
gross_outlays_undelivered_fyb=0, obligations_delivered_orde_cpe=0,
obligations_delivered_orde_fyb=0, obligations_incurred_by_pr_cpe=0,
obligations_undelivered_or_cpe=0, obligations_undelivered_or_fyb=0,
ussgl480100_undelivered_or_cpe=0, ussgl480100_undelivered_or_fyb=0,
ussgl480200_undelivered_or_cpe=0, ussgl480200_undelivered_or_fyb=0,
ussgl483100_undelivered_or_cpe=0, ussgl483200_undelivered_or_cpe=0,
ussgl487100_downward_adjus_cpe=0, ussgl487200_downward_adjus_cpe=0,
ussgl488100_upward_adjustm_cpe=0, ussgl488200_upward_adjustm_cpe=0,
ussgl490100_delivered_orde_cpe=0, ussgl490100_delivered_orde_fyb=0,
ussgl490200_delivered_orde_cpe=0, ussgl490800_authority_outl_cpe=0,
ussgl490800_authority_outl_fyb=0, ussgl493100_delivered_orde_cpe=0,
ussgl497100_downward_adjus_cpe=0, ussgl497200_downward_adjus_cpe=0,
ussgl498100_upward_adjustm_cpe=0, ussgl498200_upward_adjustm_cpe=0)
assert number_of_errors(_FILE, database, models=[op, pa]) == 1
# several monetary amounts not 0
op = ObjectClassProgramActivityFactory(row_number=1, beginning_period_of_availa=2016, agency_identifier='test',
main_account_code='test', program_activity_name='',
program_activity_code='test',
deobligations_recov_by_pro_cpe=2, gross_outlay_amount_by_pro_cpe=0,
gross_outlay_amount_by_pro_fyb=0, gross_outlays_delivered_or_cpe=0,
gross_outlays_delivered_or_fyb=0, gross_outlays_undelivered_cpe=0,
gross_outlays_undelivered_fyb=0, obligations_delivered_orde_cpe=0,
obligations_delivered_orde_fyb=-2, obligations_incurred_by_pr_cpe=0,
obligations_undelivered_or_cpe=0, obligations_undelivered_or_fyb=0,
ussgl480100_undelivered_or_cpe=0, ussgl480100_undelivered_or_fyb=0,
ussgl480200_undelivered_or_cpe=0, ussgl480200_undelivered_or_fyb=-0.4,
ussgl483100_undelivered_or_cpe=0, ussgl483200_undelivered_or_cpe=0,
ussgl487100_downward_adjus_cpe=0.4, ussgl487200_downward_adjus_cpe=0,
ussgl488100_upward_adjustm_cpe=0, ussgl488200_upward_adjustm_cpe=0,
ussgl490100_delivered_orde_cpe=0, ussgl490100_delivered_orde_fyb=0,
ussgl490200_delivered_orde_cpe=0, ussgl490800_authority_outl_cpe=0,
ussgl490800_authority_outl_fyb=0, ussgl493100_delivered_orde_cpe=0,
ussgl497100_downward_adjus_cpe=0, ussgl497200_downward_adjus_cpe=0,
ussgl498100_upward_adjustm_cpe=0, ussgl498200_upward_adjustm_cpe=0)
assert number_of_errors(_FILE, database, models=[op, pa]) == 1
# all monetary amounts not 0
op = ObjectClassProgramActivityFactory(row_number=1, beginning_period_of_availa=2016, agency_identifier='test',
main_account_code='test', program_activity_name='',
program_activity_code='test',
deobligations_recov_by_pro_cpe=2, gross_outlay_amount_by_pro_cpe=100,
gross_outlay_amount_by_pro_fyb=-0.00003, gross_outlays_delivered_or_cpe=10,
gross_outlays_delivered_or_fyb=5, gross_outlays_undelivered_cpe=5,
gross_outlays_undelivered_fyb=5, obligations_delivered_orde_cpe=5,
obligations_delivered_orde_fyb=-2, obligations_incurred_by_pr_cpe=5,
obligations_undelivered_or_cpe=5, obligations_undelivered_or_fyb=5,
ussgl480100_undelivered_or_cpe=5, ussgl480100_undelivered_or_fyb=5,
ussgl480200_undelivered_or_cpe=5, ussgl480200_undelivered_or_fyb=-0.4,
ussgl483100_undelivered_or_cpe=5, ussgl483200_undelivered_or_cpe=5,
ussgl487100_downward_adjus_cpe=0.4, ussgl487200_downward_adjus_cpe=5,
ussgl488100_upward_adjustm_cpe=5, ussgl488200_upward_adjustm_cpe=5,
ussgl490100_delivered_orde_cpe=5, ussgl490100_delivered_orde_fyb=5,
ussgl490200_delivered_orde_cpe=5, ussgl490800_authority_outl_cpe=5,
ussgl490800_authority_outl_fyb=5, ussgl493100_delivered_orde_cpe=5,
ussgl497100_downward_adjus_cpe=5, ussgl497200_downward_adjus_cpe=5,
ussgl498100_upward_adjustm_cpe=5, ussgl498200_upward_adjustm_cpe=5)
assert number_of_errors(_FILE, database, models=[op, pa]) == 1
| 71.321739
| 119
| 0.623506
| 1,702
| 16,404
| 5.548766
| 0.091657
| 0.028801
| 0.072427
| 0.053579
| 0.846569
| 0.804638
| 0.794896
| 0.792143
| 0.792143
| 0.763342
| 0
| 0.071058
| 0.305962
| 16,404
| 229
| 120
| 71.633188
| 0.758454
| 0.04377
| 0
| 0.624204
| 0
| 0
| 0.042235
| 0.004735
| 0
| 0
| 0
| 0
| 0.076433
| 1
| 0.063694
| false
| 0
| 0.025478
| 0
| 0.089172
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
04a5a695a5fcabe7083d7e5122e03dfb9ec66d54
| 2,775
|
py
|
Python
|
python/problem8.py
|
arthurshmidt/euler
|
de3676364722241e7ef454b7dd47dc2692279114
|
[
"MIT"
] | null | null | null |
python/problem8.py
|
arthurshmidt/euler
|
de3676364722241e7ef454b7dd47dc2692279114
|
[
"MIT"
] | null | null | null |
python/problem8.py
|
arthurshmidt/euler
|
de3676364722241e7ef454b7dd47dc2692279114
|
[
"MIT"
] | null | null | null |
# The four adjacent digits in the 1000-digit number that have the greatest
# product are 9 × 9 × 8 × 9 = 5832.
#
# 73167176531330624919225119674426574742355349194934
# 96983520312774506326239578318016984801869478851843
# 85861560789112949495459501737958331952853208805511
# 12540698747158523863050715693290963295227443043557
# 66896648950445244523161731856403098711121722383113
# 62229893423380308135336276614282806444486645238749
# 30358907296290491560440772390713810515859307960866
# 70172427121883998797908792274921901699720888093776
# 65727333001053367881220235421809751254540594752243
# 52584907711670556013604839586446706324415722155397
# 53697817977846174064955149290862569321978468622482
# 83972241375657056057490261407972968652414535100474
# 82166370484403199890008895243450658541227588666881
# 16427171479924442928230863465674813919123162824586
# 17866458359124566529476545682848912883142607690042
# 24219022671055626321111109370544217506941658960408
# 07198403850962455444362981230987879927244284909188
# 84580156166097919133875499200524063689912560717606
# 05886116467109405077541002256983155200055935729725
# 71636269561882670428252483600823257530420752963450
#
# Find the thirteen adjacent digits in the 1000-digit number that have the greatest
# product. What is the value of this product?
if __name__ == "__main__":
index = 0
temp = 1
product = 0
string_num = ""
num = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
print(len(num))
for i in range(0,len(num)-13):
for v in range(0,13):
temp *= int(num[i+v])
if temp > product:
product = temp
index = i
temp = 1
for w in range(0,13):
string_num = string_num + num[index+w]
print(f"index: {index}, number: {string_num}, product: {product}")
| 57.8125
| 1,012
| 0.868829
| 131
| 2,775
| 18.335878
| 0.48855
| 0.014988
| 0.009992
| 0.01582
| 0.049958
| 0.049958
| 0.049958
| 0.049958
| 0.049958
| 0.049958
| 0
| 0.808045
| 0.095135
| 2,775
| 47
| 1,013
| 59.042553
| 0.147352
| 0.452252
| 0
| 0.117647
| 0
| 0
| 0.713137
| 0.670241
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.117647
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
04a6a60e4ed36e0b207d2c0550af17a2348ddd4b
| 2,040
|
py
|
Python
|
python/tests/explainer/perpendicular_bisector.py
|
geometer/sandbox
|
373ec96e69df76744a19b51f7caa865cbc6b58cd
|
[
"Apache-2.0"
] | 6
|
2020-04-19T11:26:18.000Z
|
2021-06-21T18:42:51.000Z
|
python/tests/explainer/perpendicular_bisector.py
|
geometer/sandbox
|
373ec96e69df76744a19b51f7caa865cbc6b58cd
|
[
"Apache-2.0"
] | 31
|
2020-04-21T17:24:39.000Z
|
2020-08-27T15:59:12.000Z
|
python/tests/explainer/perpendicular_bisector.py
|
geometer/sandbox
|
373ec96e69df76744a19b51f7caa865cbc6b58cd
|
[
"Apache-2.0"
] | null | null | null |
from sandbox import Scene
from sandbox.property import PointsCollinearityProperty, SameOrOppositeSideProperty
from .base import ExplainerTest
class ThreePointsOnPerpendicularBisectorCollinearity1(ExplainerTest):
def createScene(self):
scene = Scene()
A, B, C = scene.nondegenerate_triangle(labels=('A', 'B', 'C')).points
D = scene.free_point(label='D')
E = scene.free_point(label='E')
A.segment(C).congruent_constraint(B.segment(C))
A.segment(D).congruent_constraint(B.segment(D))
A.segment(E).congruent_constraint(B.segment(E))
return scene
def test(self):
A = self.scene.get('A')
B = self.scene.get('B')
C = self.scene.get('C')
D = self.scene.get('D')
E = self.scene.get('E')
prop0 = PointsCollinearityProperty(C, D, E, True)
prop1 = SameOrOppositeSideProperty(D.segment(E), A, B, False)
self.assertIn(prop0, self.explainer.context)
self.assertNotIn(prop1, self.explainer.context)
self.assertEqual(len(self.explainer.explanation(prop0).reason.all_premises), 6)
class ThreePointsOnPerpendicularBisectorCollinearity2(ExplainerTest):
def createScene(self):
scene = Scene()
A, B, C = scene.nondegenerate_triangle(labels=('A', 'B', 'C')).points
D = scene.free_point(label='D')
E = scene.free_point(label='E')
A.segment(C).congruent_constraint(B.segment(C))
A.segment(D).congruent_constraint(B.segment(D))
A.segment(E).congruent_constraint(B.segment(E))
D.not_equal_constraint(E)
return scene
def test(self):
A = self.scene.get('A')
B = self.scene.get('B')
C = self.scene.get('C')
D = self.scene.get('D')
E = self.scene.get('E')
prop0 = PointsCollinearityProperty(C, D, E, True)
prop1 = SameOrOppositeSideProperty(D.segment(E), A, B, False)
self.assertIn(prop0, self.explainer.context)
self.assertIn(prop1, self.explainer.context)
| 37.090909
| 87
| 0.642647
| 252
| 2,040
| 5.142857
| 0.198413
| 0.083333
| 0.092593
| 0.125
| 0.704475
| 0.704475
| 0.704475
| 0.704475
| 0.704475
| 0.704475
| 0
| 0.007524
| 0.218137
| 2,040
| 54
| 88
| 37.777778
| 0.805016
| 0
| 0
| 0.8
| 0
| 0
| 0.009804
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.088889
| false
| 0
| 0.066667
| 0
| 0.244444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
04c79fd8181fb7085179cad9a55e285699a28f9b
| 48
|
py
|
Python
|
brutelogger/__init__.py
|
giuse/brutelogger
|
1979576b7d78f2e35cdf12b936d912783f7a1d34
|
[
"MIT"
] | 1
|
2021-05-14T10:21:35.000Z
|
2021-05-14T10:21:35.000Z
|
brutelogger/__init__.py
|
giuse/brutelogger
|
1979576b7d78f2e35cdf12b936d912783f7a1d34
|
[
"MIT"
] | null | null | null |
brutelogger/__init__.py
|
giuse/brutelogger
|
1979576b7d78f2e35cdf12b936d912783f7a1d34
|
[
"MIT"
] | null | null | null |
from brutelogger.brutelogger import BruteLogger
| 24
| 47
| 0.895833
| 5
| 48
| 8.6
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 48
| 1
| 48
| 48
| 0.977273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
04cc1c4ea7eddf281db797b317f9a62bf677dac7
| 74
|
py
|
Python
|
src/bitstream_python/__init__.py
|
devast8a/bitstream
|
b834b31083bfff0d42c508363bd97fba78161d92
|
[
"MIT"
] | null | null | null |
src/bitstream_python/__init__.py
|
devast8a/bitstream
|
b834b31083bfff0d42c508363bd97fba78161d92
|
[
"MIT"
] | 2
|
2016-02-27T04:49:57.000Z
|
2016-02-27T04:50:58.000Z
|
src/bitstream_python/__init__.py
|
devast8a/bitstream_python
|
b834b31083bfff0d42c508363bd97fba78161d92
|
[
"MIT"
] | null | null | null |
from bitstream import *
import types_python_primitives
import types_numpy
| 18.5
| 30
| 0.878378
| 10
| 74
| 6.2
| 0.7
| 0.354839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 74
| 3
| 31
| 24.666667
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
04ea8f3034f0d15cde9f6b3005d6282d7a547dee
| 32
|
py
|
Python
|
littleweb/__init__.py
|
lujin123/asynclittle
|
b215113d63a99b1ece0948acf860d405e45d74c2
|
[
"Apache-2.0"
] | 1
|
2019-06-20T15:55:49.000Z
|
2019-06-20T15:55:49.000Z
|
littleweb/__init__.py
|
lujin123/asynclittle
|
b215113d63a99b1ece0948acf860d405e45d74c2
|
[
"Apache-2.0"
] | null | null | null |
littleweb/__init__.py
|
lujin123/asynclittle
|
b215113d63a99b1ece0948acf860d405e45d74c2
|
[
"Apache-2.0"
] | null | null | null |
# Created by lujin at 10/12/2017
| 32
| 32
| 0.75
| 7
| 32
| 3.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.296296
| 0.15625
| 32
| 1
| 32
| 32
| 0.592593
| 0.9375
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b6e4bb1815f48df5f13bb1d170b1b0b61834b332
| 154
|
py
|
Python
|
zzr_mailer/content/models/__init__.py
|
AHRJ/mailer
|
a16f66b45b782676d5138c5102734573fded8a48
|
[
"MIT"
] | null | null | null |
zzr_mailer/content/models/__init__.py
|
AHRJ/mailer
|
a16f66b45b782676d5138c5102734573fded8a48
|
[
"MIT"
] | 10
|
2021-07-09T08:26:33.000Z
|
2022-03-31T07:17:52.000Z
|
zzr_mailer/content/models/__init__.py
|
AHRJ/mailer
|
a16f66b45b782676d5138c5102734573fded8a48
|
[
"MIT"
] | null | null | null |
from .advertisement import Advertisement # noqa
from .article import Article # noqa
from .journal import Journal # noqa
from .news import News # noqa
| 30.8
| 48
| 0.766234
| 20
| 154
| 5.9
| 0.35
| 0.20339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 154
| 4
| 49
| 38.5
| 0.936508
| 0.123377
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b6e577df84a5880325f470701d06927e94aa872f
| 292
|
py
|
Python
|
molecule/resources/tests/default_pyenv_root.py
|
hurricanehrndz/ansible-pyenv
|
fd1db76d1baf98ba70d801725097a752071fcda3
|
[
"MIT"
] | null | null | null |
molecule/resources/tests/default_pyenv_root.py
|
hurricanehrndz/ansible-pyenv
|
fd1db76d1baf98ba70d801725097a752071fcda3
|
[
"MIT"
] | null | null | null |
molecule/resources/tests/default_pyenv_root.py
|
hurricanehrndz/ansible-pyenv
|
fd1db76d1baf98ba70d801725097a752071fcda3
|
[
"MIT"
] | null | null | null |
def get_pyenv_root():
return "/usr/local/pyenv"
def get_user():
return "root"
def get_group():
return "root"
def get_rc_file():
return "/etc/profile.d/pyenv.sh"
def get_python_test_case():
return "3.9.0", True
def get_venv_test_case():
return "neovim", True
| 12.695652
| 36
| 0.650685
| 46
| 292
| 3.869565
| 0.521739
| 0.202247
| 0.146067
| 0.179775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012931
| 0.205479
| 292
| 22
| 37
| 13.272727
| 0.75431
| 0
| 0
| 0.166667
| 0
| 0
| 0.19863
| 0.078767
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
8e116a521cc2e4cd114d40fbf4676381c32fce31
| 188
|
py
|
Python
|
backend/db/entities/laporan/shift.py
|
R-N/sistem_gaji_vue_thrift
|
9ba800b4d8e7849e2c6c4016cb32633caab087be
|
[
"MIT"
] | null | null | null |
backend/db/entities/laporan/shift.py
|
R-N/sistem_gaji_vue_thrift
|
9ba800b4d8e7849e2c6c4016cb32633caab087be
|
[
"MIT"
] | null | null | null |
backend/db/entities/laporan/shift.py
|
R-N/sistem_gaji_vue_thrift
|
9ba800b4d8e7849e2c6c4016cb32633caab087be
|
[
"MIT"
] | null | null | null |
from sqlalchemy.orm import reconstructor
from .base import DbLaporanEntity
from ..mixin import MxShiftBase, MxCommited
class DbShift(MxCommited, MxShiftBase, DbLaporanEntity):
pass
| 20.888889
| 56
| 0.81383
| 20
| 188
| 7.65
| 0.65
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132979
| 188
| 8
| 57
| 23.5
| 0.93865
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
f3e9fd893919c29001a1515fb29a637eb4f50c48
| 131
|
py
|
Python
|
generate_zip.py
|
tormak/zipper-swapper
|
ecd342d82dcc762ea71022fa03868c82759d157e
|
[
"Apache-2.0"
] | null | null | null |
generate_zip.py
|
tormak/zipper-swapper
|
ecd342d82dcc762ea71022fa03868c82759d157e
|
[
"Apache-2.0"
] | null | null | null |
generate_zip.py
|
tormak/zipper-swapper
|
ecd342d82dcc762ea71022fa03868c82759d157e
|
[
"Apache-2.0"
] | null | null | null |
import zipfile
import sys
with zipfile.ZipFile('test.zip', 'a') as file_zip:
file_zip.write('12.txt')
file_zip.write('2')
| 18.714286
| 50
| 0.687023
| 22
| 131
| 3.954545
| 0.590909
| 0.241379
| 0.275862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.152672
| 131
| 6
| 51
| 21.833333
| 0.756757
| 0
| 0
| 0
| 0
| 0
| 0.122137
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
6d133ca2afa0576d2090debedeb9334e295f706c
| 3,934
|
py
|
Python
|
panel_disconnect.py
|
RentadroneCL/model-definition
|
9dab1f1a808a1efc54d64144745277396c145ff7
|
[
"MIT"
] | 2
|
2020-01-22T19:54:16.000Z
|
2020-02-07T12:20:17.000Z
|
panel_disconnect.py
|
RentadroneCL/model-definition
|
9dab1f1a808a1efc54d64144745277396c145ff7
|
[
"MIT"
] | 4
|
2020-06-03T00:27:22.000Z
|
2020-07-15T17:15:23.000Z
|
panel_disconnect.py
|
RentadroneCL/model-definition
|
9dab1f1a808a1efc54d64144745277396c145ff7
|
[
"MIT"
] | 1
|
2020-01-21T22:38:22.000Z
|
2020-01-21T22:38:22.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 17 13:55:42 2020
@author: dlsaavedra
"""
import numpy as np
def disconnect(image, boxes, obj_thresh = 0.5, area_min = 400, merge = 0, z_thresh = 1.8):
new_boxes = []
for num, box in enumerate(boxes):
xmin = box.xmin + merge
xmax = box.xmax - merge
ymin = box.ymin + merge
ymax = box.ymax - merge
if xmin > 0 and ymin > 0 and xmax < image.shape[1] and ymax < image.shape[0] and box.get_score() > obj_thresh:
area = (ymax - ymin)*(xmax - xmin)
z_score = np.sum(image[np.int(ymin):np.int(ymax), np.int(xmin):np.int(xmax)]) / area
if area > area_min:
box.z_score = z_score
new_boxes.append(box)
#boxes_area_score[str(num)] = {'xmin': xmin, 'xmax': xmax, 'ymin': ymin, 'ymax': ymax, 'score' : score, 'area' : area}
mean_score = np.mean([box.z_score for box in new_boxes])
sd_score = np.std([box.z_score for box in new_boxes])
new_boxes = [box for box in new_boxes if (box.z_score - mean_score)/sd_score > z_thresh]
for box in new_boxes:
z_score = (box.z_score - mean_score)/sd_score
box.classes[0] = min((z_score-z_thresh)*0.5/(3-z_thresh)+ 0.5, 1)
box.score = -1
return new_boxes
def disconnect_plot(image, boxes, obj_thresh = 0.5, area_min = 400, merge = 0, z_thresh = 1.8):
new_boxes = []
for num, box in enumerate(boxes):
xmin = box.xmin + merge
xmax = box.xmax - merge
ymin = box.ymin + merge
ymax = box.ymax - merge
if xmin > 0 and ymin > 0 and xmax < image.shape[1] and ymax < image.shape[0] and box.get_score() > obj_thresh:
area = (ymax - ymin)*(xmax - xmin)
z_score = np.sum(image[np.int(ymin):np.int(ymax), np.int(xmin):np.int(xmax)]) / area
if area > area_min:
box.z_score = z_score
new_boxes.append(box)
#boxes_area_score[str(num)] = {'xmin': xmin, 'xmax': xmax, 'ymin': ymin, 'ymax': ymax, 'score' : score, 'area' : area}
mean_score = np.mean([box.z_score for box in new_boxes])
sd_score = np.std([box.z_score for box in new_boxes])
normal_score = ([box.z_score for box in new_boxes] - mean_score)/sd_score
# plt.figure()
# _ = plt.hist(normal_score, bins='auto') # arguments are passed to np.histogram
# plt.title("Histogram with 'auto' bins")
# plt.show()
#
# plt.figure()
# mean = np.mean([boxes_area_score[i]['area'] for i in boxes_area_score])
# sd = np.std([boxes_area_score[i]['area'] for i in boxes_area_score])
# normal = ([boxes_area_score[i]['area'] for i in boxes_area_score] - mean)/sd
# _ = plt.hist(normal, bins='auto') # arguments are passed to np.histogram
# plt.title("Histogram with 'auto' bins")
# plt.show()
new_boxes = [box for box in new_boxes if (box.z_score - mean_score)/sd_score > z_thresh]
for box in new_boxes:
z_score = (box.z_score - mean_score)/sd_score
box.classes[0] = min((z_score-z_thresh)*0.5/(3-z_thresh)+ 0.5, 1)
colors = plt.cm.brg(np.linspace(0, 1, 21)).tolist()
plt.figure(figsize=(10,6))
plt.imshow(I,cmap = 'gray')
current_axis = plt.gca()
for box in new_boxes:
color = colors[2]
#boxes_area_score[key]['score_norm'] = (boxes_area_score[key]['score'] - mean) / sd
#z_score = (box.score - mean_score) / sd_score
#z_score = (boxes_area_score[key]['area'] )
### Escribe el z-score
#if z_score > 1:
current_axis.text((box.xmin + box.xmax)/2,
(box.ymin+ box.ymax)/2,
'%.2f' % box.classes[0], size='x-large',
color='white', bbox={'facecolor':color, 'alpha':1.0})
return new_boxes
| 34.208696
| 134
| 0.577529
| 603
| 3,934
| 3.595357
| 0.177446
| 0.063653
| 0.045664
| 0.050738
| 0.756458
| 0.728782
| 0.718635
| 0.718635
| 0.707103
| 0.707103
| 0
| 0.023835
| 0.274784
| 3,934
| 114
| 135
| 34.508772
| 0.736067
| 0.283172
| 0
| 0.735849
| 0
| 0
| 0.012195
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037736
| false
| 0
| 0.018868
| 0
| 0.09434
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6d2b1536c336b2641205033f93ad4d4f5e9297c7
| 133
|
py
|
Python
|
mindefuse/problem/secret/__init__.py
|
sinistro14/mindefuse
|
c7371a81731d0b9a03d3ef18f91c336e4135c17d
|
[
"MIT"
] | null | null | null |
mindefuse/problem/secret/__init__.py
|
sinistro14/mindefuse
|
c7371a81731d0b9a03d3ef18f91c336e4135c17d
|
[
"MIT"
] | 1
|
2019-08-22T19:51:12.000Z
|
2019-08-22T19:51:12.000Z
|
mindefuse/problem/secret/__init__.py
|
sinistro14/mindefuse
|
c7371a81731d0b9a03d3ef18f91c336e4135c17d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.7
from .secret import Secret
from .secret_types import SecretTypes
from .secret_factory import SecretFactory
| 22.166667
| 41
| 0.819549
| 19
| 133
| 5.631579
| 0.631579
| 0.280374
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016949
| 0.112782
| 133
| 5
| 42
| 26.6
| 0.889831
| 0.172932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
edb5aac8197239c588429007ced012e9659b4a1f
| 196
|
py
|
Python
|
vcauth/admin.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 5
|
2021-01-14T03:34:42.000Z
|
2022-03-07T15:34:18.000Z
|
vcauth/admin.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 551
|
2020-10-19T00:02:38.000Z
|
2022-03-30T02:18:22.000Z
|
vcauth/admin.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth.models import User
from vcauth.user_admin import CustomUserAdmin
admin.site.unregister(User)
admin.site.register(User, CustomUserAdmin)
| 24.5
| 45
| 0.841837
| 27
| 196
| 6.074074
| 0.481481
| 0.121951
| 0.207317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086735
| 196
| 7
| 46
| 28
| 0.916201
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6121c390b795f28167daa872759644b141c80430
| 515
|
py
|
Python
|
uk_election_timetables/elections/__init__.py
|
DemocracyClub/uk-election-timetables
|
2541f9e5050a393906bafa2b70709fe650de3f32
|
[
"MIT"
] | 2
|
2020-11-14T15:56:56.000Z
|
2021-01-11T11:11:09.000Z
|
uk_election_timetables/elections/__init__.py
|
DemocracyClub/uk-election-timetables
|
2541f9e5050a393906bafa2b70709fe650de3f32
|
[
"MIT"
] | 12
|
2020-11-18T20:27:43.000Z
|
2021-12-15T10:47:01.000Z
|
uk_election_timetables/elections/__init__.py
|
DemocracyClub/uk-election-timetables
|
2541f9e5050a393906bafa2b70709fe650de3f32
|
[
"MIT"
] | null | null | null |
from uk_election_timetables.elections.scottish_parliament import *
from uk_election_timetables.elections.senedd_cymru import *
from uk_election_timetables.elections.greater_london_assembly import *
from uk_election_timetables.elections.northern_ireland_assembly import *
from uk_election_timetables.elections.local import *
from uk_election_timetables.elections.uk_parliament import *
from uk_election_timetables.elections.police_and_crime_commissioner import *
from uk_election_timetables.elections.mayor import *
| 57.222222
| 76
| 0.891262
| 66
| 515
| 6.560606
| 0.318182
| 0.110855
| 0.258661
| 0.443418
| 0.789838
| 0.713626
| 0.443418
| 0
| 0
| 0
| 0
| 0
| 0.062136
| 515
| 8
| 77
| 64.375
| 0.89648
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
612a97a90be2b64abb318b18fd0688833b207c63
| 166
|
py
|
Python
|
01-Deploy-To-AWS/sls-flask-ml-test/utils/__init__.py
|
amitkml/TSAI-DeepVision-EVA4.0-Phase-2
|
f9e232b3eb6ce20f522136523e79208ed85a1f28
|
[
"MIT"
] | 1
|
2021-03-21T08:45:05.000Z
|
2021-03-21T08:45:05.000Z
|
01-Deploy-To-AWS/sls-flask-ml-test/utils/__init__.py
|
amitkml/TSAI-DeepVision-EVA4.0-Phase-2
|
f9e232b3eb6ce20f522136523e79208ed85a1f28
|
[
"MIT"
] | null | null | null |
01-Deploy-To-AWS/sls-flask-ml-test/utils/__init__.py
|
amitkml/TSAI-DeepVision-EVA4.0-Phase-2
|
f9e232b3eb6ce20f522136523e79208ed85a1f28
|
[
"MIT"
] | null | null | null |
from .imagenet_utils import idx2label
from .model_utils import transform_image, classify_image
from .logger import setup_logger
from .upload_utils import allowed_file
| 41.5
| 56
| 0.873494
| 24
| 166
| 5.75
| 0.583333
| 0.23913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006667
| 0.096386
| 166
| 4
| 57
| 41.5
| 0.913333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b63f1f74164c085ea50cbab7a7c7b3c78e2d8ec3
| 20
|
py
|
Python
|
pymm/__init__.py
|
lancekindle/pymm
|
77f0cfc32a6819781cf61e3c311f1340406e3fba
|
[
"MIT"
] | 12
|
2015-09-26T07:52:49.000Z
|
2021-07-09T00:26:09.000Z
|
pymm/__init__.py
|
lancekindle/pymm
|
77f0cfc32a6819781cf61e3c311f1340406e3fba
|
[
"MIT"
] | 2
|
2015-10-06T12:54:25.000Z
|
2015-10-29T17:50:34.000Z
|
pymm/__init__.py
|
lancekindle/pymm
|
77f0cfc32a6819781cf61e3c311f1340406e3fba
|
[
"MIT"
] | 7
|
2015-10-05T13:22:26.000Z
|
2021-09-03T18:32:41.000Z
|
from .pymm import *
| 10
| 19
| 0.7
| 3
| 20
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 1
| 20
| 20
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fcacfa149dcd871ed680e984e72f46019265f5af
| 3,437
|
py
|
Python
|
backend/tests/test_passport.py
|
ryanmahan/police-data-trust
|
e001e699adbc416f26a4f8fa64d56a1ef17e76fb
|
[
"MIT"
] | null | null | null |
backend/tests/test_passport.py
|
ryanmahan/police-data-trust
|
e001e699adbc416f26a4f8fa64d56a1ef17e76fb
|
[
"MIT"
] | null | null | null |
backend/tests/test_passport.py
|
ryanmahan/police-data-trust
|
e001e699adbc416f26a4f8fa64d56a1ef17e76fb
|
[
"MIT"
] | null | null | null |
import pytest
from backend.database.models.passport_request import RequestStatus
from backend.database.models.types.enums import UserRole
from ..database import PassportRequest, User
from .conftest import example_password
def test_create_request(client, db_session, example_user):
res = client.post(
"api/v1/passportRequests",
json={
"role": UserRole.PASSPORT,
"user_id": example_user.id,
},
)
print(res)
db_request = db_session.query(PassportRequest).filter(PassportRequest.user_id == example_user.id).first()
assert (db_request is not None)
assert res.status_code == 200
def test_double_request(client, db_session, example_user):
res = client.post(
"api/v1/passportRequests",
json={
"role": UserRole.PASSPORT,
"user_id": example_user.id,
},
)
db_count = db_session.query(PassportRequest).filter(PassportRequest.user_id == example_user.id).first()
assert (db_count is not None)
assert res.status_code == 200
res = client.post(
"api/v1/passportRequests",
json={
"role": UserRole.PASSPORT,
"user_id": example_user.id,
},
)
db_count = db_session.query(PassportRequest).filter(PassportRequest.user_id == example_user.id).count()
assert db_count == 1
assert res.status_code == 400
def test_approve_request(client, db_session, example_user, admin_user):
request = PassportRequest(
user_id = example_user.id,
role = UserRole.PASSPORT
)
db_session.add(request);
db_session.commit();
login_res = client.post(
"api/v1/auth/login",
json = {
"email": admin_user.email,
"password": example_password,
}
)
res = client.put(
"api/v1/passportRequests/{}/status".format(request.id),
json={
"status": RequestStatus.APPROVED,
},
headers={
"Authorization": "Bearer {0}".format(login_res.json["access_token"])
},
)
db_user = User.get(example_user.id)
assert res.status_code == 200
assert db_user.role == UserRole.PASSPORT
def test_deny_request(client, db_session, example_user, admin_user):
request = PassportRequest(
user_id = example_user.id,
role = UserRole.PASSPORT
)
db_session.add(request);
db_session.commit();
login_res = client.post(
"api/v1/auth/login",
json = {
"email": admin_user.email,
"password": example_password,
}
)
res = client.put(
"api/v1/passportRequests/{}/status".format(request.id),
json={
"status": RequestStatus.DENIED,
},
headers={
"Authorization": "Bearer {0}".format(login_res.json["access_token"])
},
)
db_user = User.get(example_user.id)
assert res.status_code == 200
assert db_user.role == UserRole.PUBLIC
def test_get_request(db_session, client, admin_user, example_user):
request = PassportRequest(
user_id = example_user.id,
role = UserRole.PASSPORT
)
db_session.add(request);
db_session.commit();
login_res = client.post(
"api/v1/auth/login",
json={
"email": admin_user.email,
"password": example_password,
}
)
res = client.get(
"api/v1/passportRequests/{0}".format(request.id),
headers={
"Authorization": "Bearer {0}".format(login_res.json["access_token"])
},
)
assert res.status_code == 200
assert res.json["user_id"] == example_user.id
assert res.json["role"] == request.role
| 26.037879
| 109
| 0.664533
| 421
| 3,437
| 5.218527
| 0.15677
| 0.060082
| 0.071006
| 0.077378
| 0.79381
| 0.781065
| 0.76832
| 0.76832
| 0.7401
| 0.7401
| 0
| 0.011709
| 0.20483
| 3,437
| 132
| 110
| 26.037879
| 0.79217
| 0
| 0
| 0.566372
| 0
| 0
| 0.120128
| 0.04712
| 0
| 0
| 0
| 0
| 0.115044
| 1
| 0.044248
| false
| 0.221239
| 0.044248
| 0
| 0.088496
| 0.00885
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
fcdbcb6e97797fdf0895dab3bb21f3eb00c82cfa
| 2,988
|
gyp
|
Python
|
Deps/libmikmod-gyp/libmikmod.gyp
|
Marisa-Chan/surreal
|
fd8136209941347a4f83645c461a0249dc85a281
|
[
"Artistic-1.0"
] | 4
|
2016-12-22T01:42:44.000Z
|
2018-09-21T22:13:23.000Z
|
Deps/libmikmod-gyp/libmikmod.gyp
|
Marisa-Chan/surreal
|
fd8136209941347a4f83645c461a0249dc85a281
|
[
"Artistic-1.0"
] | null | null | null |
Deps/libmikmod-gyp/libmikmod.gyp
|
Marisa-Chan/surreal
|
fd8136209941347a4f83645c461a0249dc85a281
|
[
"Artistic-1.0"
] | null | null | null |
{
"variables": {
"library%": "shared_library",
"mikmod_dir%": "../libmikmod"
},
"target_defaults": {
"include_dirs": [
"include",
"<(mikmod_dir)/include"
],
"defines": [
"HAVE_CONFIG_H"
],
"cflags": [
"-Wall",
"-finline-functions",
"-funroll-loops",
"-ffast-math"
],
"target_conditions": [
["OS == 'win'", {
"defines": [ "WIN32" ]
}],
["OS != 'win'", {
"defines": [ "unix" ]
}],
["_type == 'shared_library' and OS == 'win'", {
"defines": [ "DLL_EXPORTS" ]
}],
["_type == 'shared_library' and OS == 'linux'", {
"cflags": [ "-fPIC" ]
}]
],
"default_configuration": "Release",
"configurations": {
"Debug": {
"defines": [ "MIKMOD_DEBUG" ],
"cflags": [ "-g3", "-Werror" ],
"msvs_settings": {
"VCCLCompilerTool": {
"RuntimeLibrary": 3
}
}
},
"Release": {
"cflags": [ "-g", "-O2" ],
"msvs_settings": {
"VCCLCompilerTool": {
"RuntimeLibrary": 2
}
}
}
}
},
"targets": [
{
"target_name": "mikmod",
"type": "<(library)",
"product_dir": "../../System",
"sources": [
"<(mikmod_dir)/drivers/drv_nos.c",
"<(mikmod_dir)/drivers/drv_raw.c",
"<(mikmod_dir)/drivers/drv_stdout.c",
"<(mikmod_dir)/drivers/drv_wav.c",
"<(mikmod_dir)/loaders/load_669.c",
"<(mikmod_dir)/loaders/load_amf.c",
"<(mikmod_dir)/loaders/load_asy.c",
"<(mikmod_dir)/loaders/load_dsm.c",
"<(mikmod_dir)/loaders/load_far.c",
"<(mikmod_dir)/loaders/load_gdm.c",
"<(mikmod_dir)/loaders/load_gt2.c",
"<(mikmod_dir)/loaders/load_it.c",
"<(mikmod_dir)/loaders/load_imf.c",
"<(mikmod_dir)/loaders/load_m15.c",
"<(mikmod_dir)/loaders/load_med.c",
"<(mikmod_dir)/loaders/load_mod.c",
"<(mikmod_dir)/loaders/load_mtm.c",
"<(mikmod_dir)/loaders/load_okt.c",
"<(mikmod_dir)/loaders/load_s3m.c",
"<(mikmod_dir)/loaders/load_stm.c",
"<(mikmod_dir)/loaders/load_stx.c",
"<(mikmod_dir)/loaders/load_ult.c",
"<(mikmod_dir)/loaders/load_uni.c",
"<(mikmod_dir)/loaders/load_xm.c",
"<(mikmod_dir)/mmio/mmalloc.c",
"<(mikmod_dir)/mmio/mmerror.c",
"<(mikmod_dir)/mmio/mmio.c",
"<(mikmod_dir)/playercode/mdriver.c",
"<(mikmod_dir)/playercode/mdreg.c",
"<(mikmod_dir)/playercode/mdulaw.c",
"<(mikmod_dir)/playercode/mloader.c",
"<(mikmod_dir)/playercode/mlreg.c",
"<(mikmod_dir)/playercode/mlutil.c",
"<(mikmod_dir)/playercode/mplayer.c",
"<(mikmod_dir)/playercode/munitrk.c",
"<(mikmod_dir)/playercode/mwav.c",
"<(mikmod_dir)/playercode/npertab.c",
"<(mikmod_dir)/playercode/sloader.c",
"<(mikmod_dir)/playercode/virtch.c",
"<(mikmod_dir)/playercode/virtch2.c",
"<(mikmod_dir)/playercode/virtch_common.c"
],
"all_dependent_settings": {
"include_dirs": [
"include",
"<(mikmod_dir)/include"
]
},
"conditions": [
["OS != 'win'", {
"libraries": [ "-lm" ]
}]
]
}
]
}
| 24.491803
| 52
| 0.568942
| 333
| 2,988
| 4.837838
| 0.312312
| 0.24581
| 0.248293
| 0.211049
| 0.399752
| 0.04221
| 0
| 0
| 0
| 0
| 0
| 0.005831
| 0.196452
| 2,988
| 121
| 53
| 24.694215
| 0.66514
| 0
| 0
| 0.189655
| 0
| 0
| 0.677711
| 0.469545
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1e36d20a0a23b9c0c70b4721dede26bafed354f4
| 5,211
|
py
|
Python
|
ckan/ckanext-spatial/ckanext/spatial/tests/functional/test_package.py
|
lorenzoeusepi77/ckan
|
3a620e9c81ba1750d12941c02184092d507f71df
|
[
"Apache-2.0"
] | null | null | null |
ckan/ckanext-spatial/ckanext/spatial/tests/functional/test_package.py
|
lorenzoeusepi77/ckan
|
3a620e9c81ba1750d12941c02184092d507f71df
|
[
"Apache-2.0"
] | null | null | null |
ckan/ckanext-spatial/ckanext/spatial/tests/functional/test_package.py
|
lorenzoeusepi77/ckan
|
3a620e9c81ba1750d12941c02184092d507f71df
|
[
"Apache-2.0"
] | null | null | null |
import json
from nose.tools import assert_equals
from ckan.model import Session
from ckan.lib.helpers import url_for
try:
import ckan.new_tests.helpers as helpers
import ckan.new_tests.factories as factories
except ImportError:
import ckan.tests.helpers as helpers
import ckan.tests.factories as factories
from ckanext.spatial.model import PackageExtent
from ckanext.spatial.geoalchemy_common import legacy_geoalchemy
from ckanext.spatial.tests.base import SpatialTestBase
class TestSpatialExtra(SpatialTestBase, helpers.FunctionalTestBase):
def test_spatial_extra(self):
app = self._get_test_app()
user = factories.User()
env = {'REMOTE_USER': user['name'].encode('ascii')}
dataset = factories.Dataset(user=user)
offset = url_for(controller='package', action='edit', id=dataset['id'])
res = app.get(offset, extra_environ=env)
form = res.forms[1]
form['extras__0__key'] = u'spatial'
form['extras__0__value'] = self.geojson_examples['point']
res = helpers.submit_and_follow(app, form, env, 'save')
assert 'Error' not in res, res
package_extent = Session.query(PackageExtent) \
.filter(PackageExtent.package_id == dataset['id']).first()
geojson = json.loads(self.geojson_examples['point'])
assert_equals(package_extent.package_id, dataset['id'])
if legacy_geoalchemy:
assert_equals(Session.scalar(package_extent.the_geom.x),
geojson['coordinates'][0])
assert_equals(Session.scalar(package_extent.the_geom.y),
geojson['coordinates'][1])
assert_equals(Session.scalar(package_extent.the_geom.srid),
self.db_srid)
else:
from sqlalchemy import func
assert_equals(
Session.query(func.ST_X(package_extent.the_geom)).first()[0],
geojson['coordinates'][0])
assert_equals(
Session.query(func.ST_Y(package_extent.the_geom)).first()[0],
geojson['coordinates'][1])
assert_equals(package_extent.the_geom.srid, self.db_srid)
def test_spatial_extra_edit(self):
app = self._get_test_app()
user = factories.User()
env = {'REMOTE_USER': user['name'].encode('ascii')}
dataset = factories.Dataset(user=user)
offset = url_for(controller='package', action='edit', id=dataset['id'])
res = app.get(offset, extra_environ=env)
form = res.forms[1]
form['extras__0__key'] = u'spatial'
form['extras__0__value'] = self.geojson_examples['point']
res = helpers.submit_and_follow(app, form, env, 'save')
assert 'Error' not in res, res
res = app.get(offset, extra_environ=env)
form = res.forms[1]
form['extras__0__key'] = u'spatial'
form['extras__0__value'] = self.geojson_examples['polygon']
res = helpers.submit_and_follow(app, form, env, 'save')
assert 'Error' not in res, res
package_extent = Session.query(PackageExtent) \
.filter(PackageExtent.package_id == dataset['id']).first()
assert_equals(package_extent.package_id, dataset['id'])
if legacy_geoalchemy:
assert_equals(
Session.scalar(package_extent.the_geom.geometry_type),
'ST_Polygon')
assert_equals(
Session.scalar(package_extent.the_geom.srid),
self.db_srid)
else:
from sqlalchemy import func
assert_equals(
Session.query(
func.ST_GeometryType(package_extent.the_geom)).first()[0],
'ST_Polygon')
assert_equals(package_extent.the_geom.srid, self.db_srid)
def test_spatial_extra_bad_json(self):
app = self._get_test_app()
user = factories.User()
env = {'REMOTE_USER': user['name'].encode('ascii')}
dataset = factories.Dataset(user=user)
offset = url_for(controller='package', action='edit', id=dataset['id'])
res = app.get(offset, extra_environ=env)
form = res.forms[1]
form['extras__0__key'] = u'spatial'
form['extras__0__value'] = u'{"Type":Bad Json]'
res = helpers.webtest_submit(form, extra_environ=env, name='save')
assert 'Error' in res, res
assert 'Spatial' in res
assert 'Error decoding JSON object' in res
def test_spatial_extra_bad_geojson(self):
app = self._get_test_app()
user = factories.User()
env = {'REMOTE_USER': user['name'].encode('ascii')}
dataset = factories.Dataset(user=user)
offset = url_for(controller='package', action='edit', id=dataset['id'])
res = app.get(offset, extra_environ=env)
form = res.forms[1]
form['extras__0__key'] = u'spatial'
form['extras__0__value'] = u'{"Type":"Bad_GeoJSON","a":2}'
res = helpers.webtest_submit(form, extra_environ=env, name='save')
assert 'Error' in res, res
assert 'Spatial' in res
assert 'Error creating geometry' in res
| 35.209459
| 79
| 0.625216
| 633
| 5,211
| 4.908373
| 0.157978
| 0.058577
| 0.035404
| 0.064371
| 0.812037
| 0.797554
| 0.753138
| 0.753138
| 0.710332
| 0.710332
| 0
| 0.005929
| 0.255613
| 5,211
| 147
| 80
| 35.44898
| 0.79505
| 0
| 0
| 0.697248
| 0
| 0
| 0.108233
| 0.005373
| 0
| 0
| 0
| 0
| 0.201835
| 1
| 0.036697
| false
| 0
| 0.12844
| 0
| 0.174312
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1e4807ed7d647e37b97a0870abc07079c3523802
| 65
|
py
|
Python
|
windowsbuild/MSVC2017/python/3.7.7/Lib/site-packages/shiboken2_generator/__init__.py
|
Tech-XCorp/visit-deps
|
23e2bd534bf9c332d6b7d32310495f1f65b1b936
|
[
"BSD-3-Clause"
] | null | null | null |
windowsbuild/MSVC2017/python/3.7.7/Lib/site-packages/shiboken2_generator/__init__.py
|
Tech-XCorp/visit-deps
|
23e2bd534bf9c332d6b7d32310495f1f65b1b936
|
[
"BSD-3-Clause"
] | null | null | null |
windowsbuild/MSVC2017/python/3.7.7/Lib/site-packages/shiboken2_generator/__init__.py
|
Tech-XCorp/visit-deps
|
23e2bd534bf9c332d6b7d32310495f1f65b1b936
|
[
"BSD-3-Clause"
] | null | null | null |
__version__ = "5.14.2.3"
__version_info__ = (5, 14, 2.3, "", "")
| 21.666667
| 39
| 0.569231
| 11
| 65
| 2.545455
| 0.545455
| 0.214286
| 0.285714
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 0.153846
| 65
| 2
| 40
| 32.5
| 0.327273
| 0
| 0
| 0
| 0
| 0
| 0.123077
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1e72af6c4aabdc9305c193502b0bdc6784946984
| 128
|
py
|
Python
|
app/app/calc.py
|
JosephKithome/recipeApi
|
98497f8bf6e9a36a50c26cbc573b0b2fdab826d3
|
[
"MIT"
] | null | null | null |
app/app/calc.py
|
JosephKithome/recipeApi
|
98497f8bf6e9a36a50c26cbc573b0b2fdab826d3
|
[
"MIT"
] | null | null | null |
app/app/calc.py
|
JosephKithome/recipeApi
|
98497f8bf6e9a36a50c26cbc573b0b2fdab826d3
|
[
"MIT"
] | null | null | null |
def add(x, y):
"""Sum two numbers"""
return x + y
def subtract(x, y):
"""Subtract two numbers"""
return x - y
| 14.222222
| 30
| 0.53125
| 20
| 128
| 3.4
| 0.45
| 0.117647
| 0.470588
| 0.5
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.296875
| 128
| 8
| 31
| 16
| 0.755556
| 0.28125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
1e8434aad3565280ab8d9cfa93aba80060cb6017
| 23,147
|
py
|
Python
|
unit_tests/web_tests/test_web_config.py
|
hep-gc/cloud-scheduler-2
|
180d9dc4f8751cf8c8254518e46f83f118187e84
|
[
"Apache-2.0"
] | null | null | null |
unit_tests/web_tests/test_web_config.py
|
hep-gc/cloud-scheduler-2
|
180d9dc4f8751cf8c8254518e46f83f118187e84
|
[
"Apache-2.0"
] | null | null | null |
unit_tests/web_tests/test_web_config.py
|
hep-gc/cloud-scheduler-2
|
180d9dc4f8751cf8c8254518e46f83f118187e84
|
[
"Apache-2.0"
] | null | null | null |
if __name__ == "__main__" or __name__ == "test_web_config":
__package__ = 'cloudscheduler.unit_tests.web_tests'
import unittest
import sys
from . import web_test_setup_cleanup as wtsc
from . import web_test_assertions_v2 as wta
from . import web_test_page_objects as pages
from . import web_test_helpers as helpers
class TestWebConfigCommon(unittest.TestCase):
"""A class for the config tests that should be repeated in all iterations."""
@classmethod
def setUpClass(cls):
cls.page = pages.ConfigPage(cls.driver, cls.gvar['address'])
cls.oversize = cls.gvar['oversize']
def setUp(self):
self.page.get_homepage()
self.page.click_top_nav('System Config')
def test_web_config_find(self):
# Finds the config page
pass
# All tests in this file should reverse themselves if they perform any saved
# action
# Tests are organized by file, because each one is unique
# Currently, the tested files are:
# condor_poller.py
# Tests for condor_poller.py
def test_web_config_update_condor_poller_batch_commit_size(self):
# Changes the condor poller's batch commit size and reverses it
self.page.click_side_button('condor_poller.py')
original = self.page.get_value_batch_commit_size()
self.page.type_batch_commit_size('40')
self.page.click_update_config()
self.assertFalse(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertEqual(self.page.get_value_batch_commit_size(), '40')
# reverse
self.page.type_batch_commit_size(original)
self.page.click_update_config()
def test_web_config_update_condor_poller_batch_commit_size_float(self):
# Tries to change the condor poller's batch commit size to a float
self.page.click_side_button('condor_poller.py')
self.page.type_batch_commit_size('50.5')
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_batch_commit_size(), '50.5')
def test_web_config_update_condor_poller_batch_commit_size_string(self):
# Tries to change the condor poller's batch commit size to a string
self.page.click_side_button('condor_poller.py')
self.page.type_batch_commit_size('invalid-web-test')
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_batch_commit_size(), 'invalid-web-test')
@unittest.skip("No apparent maximum size")
def test_web_config_update_condor_poller_batch_commit_size_too_big(self):
# Tris to change the condor poller's batch commit size to an int that's too large for the database
self.page.click_side_button('condor_poller.py')
self.page.type_batch_commit_size(str(self.oversize['int_11']))
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_batch_commit_size(), str(self.oversize['int_11']))
def test_web_config_update_condor_poller_delete_cycle_interval(self):
# Changes the condor poller's delete cycle interval (and reverses it)
self.page.click_side_button('condor_poller.py')
original = self.page.get_value_delete_cycle_interval()
self.page.type_delete_cycle_interval('5')
self.page.click_update_config()
self.assertFalse(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertEqual(self.page.get_value_delete_cycle_interval(), '5')
# reverse
self.page.type_delete_cycle_interval(original)
self.page.click_update_config()
def test_web_config_update_condor_poller_delete_cycle_interval_float(self):
# Tries to change the condor poller's delete cycle interval to a float
self.page.click_side_button('condor_poller.py')
self.page.type_delete_cycle_interval('1.5')
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_delete_cycle_interval(), '1.5')
def test_web_config_update_condor_poller_delete_cycle_interval_string(self):
# Tries to change the condor poller's delete cycle interval to a string
self.page.click_side_button('condor_poller.py')
self.page.type_delete_cycle_interval('invalid-web-test')
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_delete_cycle_interval(), 'invalid-web-test')
@unittest.skip("No apparent maximum size")
def test_web_config_update_condor_poller_delete_cycle_interval_too_big(self):
# Tries to change the condor poller's delete cycle interval to an int that's too big for the database
self.page.click_side_button('condor_poller.py')
self.page.type_delete_cycle_interval(str(self.oversize['bigint_20']))
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertFalse(self.page.get_value_delete_cycle_interval(), str(self.oversize['bigint_20']))
def test_web_config_update_condor_poller_log_level(self):
# Changes the condor poller's log level and reverses it
self.page.click_side_button('condor_poller.py')
original = self.page.get_text_log_level()
self.page.select_log_level('WARNING')
self.page.click_update_config()
self.assertFalse(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertEqual(self.page.get_text_log_level(), 'WARNING')
# reverse
self.page.select_log_level(original)
self.page.click_update_config()
def test_web_config_update_condor_poller_retire_interval(self):
# Changes the condor poller's retire interval and reverses it
self.page.click_side_button('condor_poller.py')
original = self.page.get_value_retire_interval()
self.page.type_retire_interval('1024')
self.page.click_update_config()
self.assertFalse(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertEqual(self.page.get_value_retire_interval(), '1024')
# reverse
self.page.type_retire_interval(original)
self.page.click_update_config()
def test_web_config_update_condor_poller_retire_interval_float(self):
# Tries to change the condor poller's retire interval to a float
self.page.click_side_button('condor_poller.py')
self.page.type_retire_interval('1200.5')
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_retire_interval(), '1200.5')
def test_web_config_update_condor_poller_retire_interval_string(self):
# Tries to change the condor poller's retire interval to a string
self.page.click_side_button('condor_poller.py')
self.page.type_retire_interval('invalid-web-test')
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_retire_interval(), 'invalid-web-test')
@unittest.skip("No apparent maximum size")
def test_web_config_update_condor_poller_retire_interval_too_big(self):
# Tries to change the condor poller's retire interval to an int that's too big for the database
self.page.click_side_button('condor_poller.py')
self.page.type_retire_interval(str(self.oversize['int_11']))
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.driver.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_retire_interval(), str(self.oversize['int_11']))
def test_web_config_update_condor_poller_retire_off(self):
# Changes the condor poller's retire attribute and reverses it
self.page.click_side_button('condor_poller.py')
self.page.click_retire_off()
self.page.click_update_config()
self.assertFalse(self.page.error_message_displayed())
# TODO: Assertion?
#self.assertEqual(self.page.get_value_batch_commit_size(), '40')
# reverse
self.page.click_retire_off()
self.page.click_update_config()
def test_web_config_update_condor_poller_sleep_interval_command(self):
# Changes the condor poller's command sleep interval and reverses it
self.page.click_side_button('condor_poller.py')
original = self.page.get_value_sleep_interval_command()
self.page.type_sleep_interval_command('16')
self.page.click_update_config()
self.assertFalse(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertEqual(self.page.get_value_sleep_interval_command(), '16')
# reverse
self.page.type_sleep_interval_command(original)
self.page.click_update_config()
def test_web_config_update_condor_poller_sleep_interval_command_float(self):
# Tries to change the condor poller's command sleep interval to a float
self.page.click_side_button('condor_poller.py')
self.page.type_sleep_interval_command('15.5')
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_sleep_interval_command(), '15.5')
def test_web_config_update_condor_poller_sleep_interval_command_string(self):
# Tries to change the condor poller's command sleep interval to a string
self.page.click_side_button('condor_poller.py')
self.page.type_sleep_interval_command('invalid-web-test')
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_sleep_interval_command(), 'invalid-web-test')
@unittest.skip("No apparent maximum size")
def test_web_config_update_condor_poller_sleep_interval_command_too_big(self):
# Tries to change the condor poller's command sleep interval to one that's too big for the database
self.page.click_side_button('condor_poller.py')
self.page.type_sleep_interval_command(str(self.oversize['int_11']))
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_sleep_interval_command(), str(self.oversize['int_11']))
def test_web_config_update_condor_poller_sleep_interval_condor_gsi(self):
# Changes the condor poller's GSI sleep interval and reverses it
self.page.click_side_button('condor_poller.py')
original = self.page.get_value_sleep_interval_condor_gsi()
self.page.type_sleep_interval_condor_gsi('4096')
self.page.click_update_config()
self.assertFalse(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertEqual(self.page.get_value_sleep_interval_condor_gsi(), '4096')
# reverse
self.page.type_sleep_interval_condor_gsi(original)
self.page.click_update_config()
def test_web_config_update_condor_poller_sleep_interval_condor_gsi_float(self):
# Tries to change the condor poller's GSI sleep interval to a float
self.page.click_side_button('condor_poller.py')
self.page.type_sleep_interval_condor_gsi('3600.5')
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_sleep_interval_condor_gsi(), '3600.5')
def test_web_config_update_condor_poller_sleep_interval_condor_gsi_string(self):
# Tries to change the condor poller's GSI sleep interval to a string
self.page.click_side_button('condor_poller.py')
self.page.type_sleep_interval_condor_gsi('invalid-web-test')
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_sleep_interval_condor_gsi(), 'invalid-web-test')
@unittest.skip("No apparent maximum size")
def test_web_config_update_condor_poller_sleep_interval_condor_gsi_too_big(self):
# Tries to change the condor poller's GSI sleep interval to an int that's too big for the database
self.page.click_side_button('condor_poller.py')
self.page.type_sleep_interval_condor_gsi(str(self.oversize['int_11']))
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_sleep_interval_condor_gsi(), str(self.oversize['int_11']))
def test_web_config_update_condor_poller_sleep_interval_job(self):
# Changes the condor poller's job sleep interval and reverses it
self.page.click_side_button('condor_poller.py')
original = self.page.get_value_sleep_interval_job()
self.page.type_sleep_interval_job('16')
self.page.click_update_config()
self.assertFalse(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertEqual(self.page.get_value_sleep_interval_job(), '16')
# reverse
self.page.type_sleep_interval_job(original)
self.page.click_update_config()
def test_web_config_update_condor_poller_sleep_interval_job_float(self):
# Tries to change the condor poller's job sleep interval to a float
self.page.click_side_button('condor_poller.py')
self.page.type_sleep_interval_job('15.5')
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_sleep_interval_job(), '15.5')
def test_web_config_update_condor_poller_sleep_interval_job_string(self):
# Tries to change the condor poller's job sleep interval to a string
self.page.click_side_button('condor_poller.py')
self.page.type_sleep_interval_job('invalid-web-test')
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_sleep_interval_job(), 'invalid-web-test')
@unittest.skip("No apparent maximum size")
def test_web_config_update_condor_poller_sleep_interval_job_too_big(self):
# Tries to change the condor poller's job sleep interval to an int that's too big for the database
self.page.click_side_button('condor_poller.py')
self.page.type_sleep_interval_job(str(self.oversize['int_11']))
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_sleep_interval_job(), str(self.oversize['int_11']))
def test_web_config_update_condor_poller_sleep_interval_machine(self):
# Changes the condor poller's machine sleep interval and reverses it
self.page.click_side_button('condor_poller.py')
original = self.page.get_value_sleep_interval_machine()
self.page.type_sleep_interval_machine('16')
self.page.click_update_config()
self.assertFalse(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertEqual(self.page.get_value_sleep_interval_machine(), '16')
# reverse
self.page.type_sleep_interval_machine(original)
self.page.click_update_config()
def test_web_config_update_condor_poller_sleep_interval_machine_float(self):
# Tries to change the condor poller's machine sleep interval to a float
self.page.click_side_button('condor_poller.py')
self.page.type_sleep_interval_machine('15.5')
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_sleep_interval_machine(), '15.5')
def test_web_config_update_condor_poller_sleep_interval_machine_string(self):
# Tries to change the condor poller's machine sleep interval to a string
self.page.click_side_button('condor_poller.py')
self.page.type_sleep_interval_machine('invalid-web-test')
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_sleep_interval_machine(), 'invalid-web-test')
@unittest.skip("No apparent maximum size")
def test_web_config_update_condor_poller_sleep_interval_machine_too_big(self):
# Tries to change the condor poller's machine sleep interval to an int that's too big for the database
self.page.click_side_button('condor_poller.py')
self.page.type_sleep_interval_machine(str(self.oversize['int_11']))
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_sleep_interval_machine(), str(self.oversize['int_11']))
def test_web_config_update_condor_poller_sleep_interval_worker_gsi(self):
# Changes the condor poller's worker GSI sleep interval and reverses it
self.page.click_side_button('condor_poller.py')
original = self.page.get_value_sleep_interval_worker_gsi()
self.page.type_sleep_interval_worker_gsi('4096')
self.page.click_update_config()
self.assertFalse(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertEqual(self.page.get_value_sleep_interval_worker_gsi(), '4096')
# reverse
self.page.type_sleep_interval_worker_gsi(original)
self.page.click_update_config()
def test_web_config_update_condor_poller_sleep_interval_worker_gsi_float(self):
# Tries to change the condor poller's worker GSI sleep interval to a float
self.page.click_side_button('condor_poller.py')
self.page.type_sleep_interval_worker_gsi('3600.5')
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_sleep_interval_worker_gsi(), '3600.5')
def test_web_config_update_condor_poller_sleep_interval_worker_gsi_string(self):
# Tries to change the condor poller's worker GSI sleep interval to a string
self.page.click_side_button('condor_poller.py')
self.page.type_sleep_interval_worker_gsi('invalid-web-test')
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_sleep_interval_condor_gsi(), 'invalid-web-test')
@unittest.skip("No apparent maximum size")
def test_web_config_update_condor_poller_sleep_interval_worker_gsi_too_big(self):
# Tries to change the condor poler's worker GSI sleep interval to an int that's too big for the database
self.page.click_side_button('condor_poller.py')
self.page.type_sleep_interval_worker_gsi(str(self.oversize['int_11']))
self.page.click_update_config()
self.assertTrue(self.page.error_message_displayed())
self.page.click_top_nav('System Config')
self.assertNotEqual(self.page.get_value_sleep_interval_worker_gsi(), str(self.oversize['int_11']))
@classmethod
def tearDownClass(cls):
wtsc.cleanup(cls)
class TestWebConfigSuperUserFirefox(TestWebConfigCommon):
"""A class to test config operations via the web interface, in Firefox, with a super user."""
@classmethod
def setUpClass(cls):
try:
wtsc.setup(cls, 2, ['config'], browser='firefox')
super(TestWebConfigSuperUserFirefox, cls).setUpClass()
print("\nConfig Tests:")
except:
print("Error in test setup")
super(TestWebConfigSuperUserFirefox, cls).tearDownClass()
raise
class TestWebConfigSuperUserChromium(TestWebConfigCommon):
"""A class to test config operations via the web interface, in Chromium, with a super user."""
@classmethod
def setUpClass(cls):
try:
wtsc.setup(cls, 2, ['config'], browser='chromium')
super(TestWebConfigSuperUserChromium, cls).setUpClass()
print("\nConfig Tests (Chromium):")
except:
print("Error in test setup")
super(TestWebConfigSuperUserChromium, cls).tearDownClass()
raise
class TestWebConfigSuperUserOpera(TestWebConfigCommon):
"""A class to test config operations via the web interface, in Opera, with a super user."""
@classmethod
def setUpClass(cls):
try:
wtsc.setup(cls, 2, ['config'], browser='opera')
super(TestWebConfigSuperUserOpera, cls).setUpClass()
print("\nConfig Tests (Opera):")
except:
print("Error in test setup")
super(TestWebConfigSuperUserOpera, cls).tearDownClass()
raise
class TestWebConfigSuperUserChrome(TestWebConfigCommon):
"""A class to test config operations via the web interface, in Chrome, with a super user."""
@classmethod
def setUpClass(cls):
try:
wtsc.setup(cls, 2, ['config'], browser='chrome')
super(TestWebConfigSuperUserChrome, cls).setUpClass()
print("\nConfig Tests (Chrome):")
except:
print("Error in test setup")
super(TestWebConfigSuperUserChrome, cls).tearDownClass()
raise
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
tests = [ TestWebConfigSuperUserFirefox,
TestWebConfigSuperUserChromium,
TestWebConfigSuperUserOpera,
TestWebConfigSuperUserChrome ]
suite = helpers.parse_command_line_arguments(sys.argv, tests, False)
runner.run(suite)
| 50.760965
| 112
| 0.723722
| 3,116
| 23,147
| 5.053915
| 0.059371
| 0.118364
| 0.093282
| 0.053086
| 0.898209
| 0.882906
| 0.860554
| 0.815405
| 0.802451
| 0.793434
| 0
| 0.007396
| 0.182183
| 23,147
| 455
| 113
| 50.872527
| 0.824511
| 0.143993
| 0
| 0.531792
| 0
| 0
| 0.099362
| 0.001773
| 0
| 0
| 0
| 0.002198
| 0.196532
| 1
| 0.121387
| false
| 0.00289
| 0.017341
| 0
| 0.153179
| 0.023121
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1eb8c42283005adaaac6f07c7c289aa0b7535c66
| 171
|
py
|
Python
|
python/testData/inspections/AddCallSuperSingleStarParamInSuperInit_after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/AddCallSuperSingleStarParamInSuperInit_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/AddCallSuperSingleStarParamInSuperInit_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
class A:
def __init__(self, *, kw_only, optional_kw_only=None):
pass
class B(A):
def __init__(self, *, kw_only):
super().__init__(kw_only=kw_only)
| 24.428571
| 58
| 0.631579
| 26
| 171
| 3.461538
| 0.461538
| 0.333333
| 0.177778
| 0.266667
| 0.4
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22807
| 171
| 7
| 59
| 24.428571
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.166667
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
1ec860624124c8102d6b14fdef152255cdd04425
| 146
|
py
|
Python
|
gits/preprocess/crawlers/crawler.py
|
BabyCakes13/GlacierImagePredictor
|
07665dee1a6f77cf5b10da81df5cca880e2e8551
|
[
"Apache-2.0"
] | null | null | null |
gits/preprocess/crawlers/crawler.py
|
BabyCakes13/GlacierImagePredictor
|
07665dee1a6f77cf5b10da81df5cca880e2e8551
|
[
"Apache-2.0"
] | 38
|
2021-03-08T16:03:56.000Z
|
2021-06-28T12:46:22.000Z
|
gits/preprocess/crawlers/crawler.py
|
BabyCakes13/GlacierImagePredictor
|
07665dee1a6f77cf5b10da81df5cca880e2e8551
|
[
"Apache-2.0"
] | null | null | null |
class Crawler:
def __init__(self, root):
self._root = root
def crawl(self):
pass
def crawl_into(self):
pass
| 14.6
| 29
| 0.554795
| 18
| 146
| 4.166667
| 0.5
| 0.213333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.356164
| 146
| 9
| 30
| 16.222222
| 0.797872
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0.285714
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
1ed31225d65e2bb025d8d6baff027959ecb0a34a
| 43
|
py
|
Python
|
optimus/validation/__init__.py
|
ctsit/optimus
|
e073edf36228c78bb7fb5a863a8808c47bf53496
|
[
"Apache-2.0"
] | null | null | null |
optimus/validation/__init__.py
|
ctsit/optimus
|
e073edf36228c78bb7fb5a863a8808c47bf53496
|
[
"Apache-2.0"
] | 4
|
2017-05-16T15:49:56.000Z
|
2018-09-18T20:02:24.000Z
|
optimus/validation/__init__.py
|
ctsit/optimus
|
e073edf36228c78bb7fb5a863a8808c47bf53496
|
[
"Apache-2.0"
] | 3
|
2017-05-16T13:59:50.000Z
|
2018-10-01T14:19:53.000Z
|
from optimus.validation.validator import *
| 21.5
| 42
| 0.837209
| 5
| 43
| 7.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1ed32df848a71f3683fc504fca07c621c5d5a412
| 59,022
|
py
|
Python
|
tests/test_compiler.py
|
Bakuriu/feanor-csv
|
e69929ae333d118771db4f3a91067d3b5ed84bf3
|
[
"Apache-2.0"
] | 1
|
2018-10-05T02:03:35.000Z
|
2018-10-05T02:03:35.000Z
|
tests/test_compiler.py
|
Bakuriu/feanor-csv
|
e69929ae333d118771db4f3a91067d3b5ed84bf3
|
[
"Apache-2.0"
] | 30
|
2018-07-09T20:37:53.000Z
|
2018-10-05T07:04:55.000Z
|
tests/test_compiler.py
|
Bakuriu/feanor-csv
|
e69929ae333d118771db4f3a91067d3b5ed84bf3
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from feanor.builtin import BuiltInLibrary
from feanor.dsl.ast import *
from feanor.dsl.compiler import *
from feanor.dsl.types import *
from feanor.library import MockLibrary
from feanor.schema import *
class TestTypeInferencer(unittest.TestCase):
def setUp(self):
self.library = BuiltInLibrary({}, random)
self.inferencer = TypeInferencer(self.library.compatibility())
def test_can_infer_type_of_a_type_name_node(self):
got = self.inferencer.infer(TypeNameNode.of('int'))
self.assertEqual(SimpleType('int'), got)
def test_can_infer_type_of_a_type_name_node_with_producer(self):
got = self.inferencer.infer(TypeNameNode.of('int', 'fixed'))
self.assertEqual(SimpleType('int'), got)
def test_inferring_type_of_type_name_node_sets_info_value(self):
tree = TypeNameNode.of('int')
self.inferencer.infer(tree)
self.assertEqual({'type': SimpleType('int')}, tree.info)
def test_inferring_type_of_type_name_node_with_producer_sets_info_value(self):
tree = TypeNameNode.of('int', 'fixed')
self.inferencer.infer(tree)
self.assertEqual({'type': SimpleType('int')}, tree.info)
def test_can_infer_type_of_a_reference_node(self):
expected_type = SimpleType('int')
inferencer = TypeInferencer(self.library.compatibility(), env={'a': expected_type})
got = inferencer.infer(ReferenceNode.of('a'))
self.assertEqual(expected_type, got)
def test_inferring_type_of_reference_node_sets_info_value(self):
expected_type = SimpleType('int')
tree = ReferenceNode.of('a')
inferencer = TypeInferencer(self.library.compatibility(), env={'a': expected_type})
inferencer.infer(tree)
self.assertEqual({'type': expected_type}, tree.info)
def test_can_infer_type_of_call(self):
expected_type = SimpleType('int')
arg_type = SimpleType('float')
env = {'a': arg_type}
inferencer = TypeInferencer(self.library.compatibility(), env=env,
func_env={'func': ([arg_type], expected_type)})
got = inferencer.infer(CallNode.of('func', [ReferenceNode.of('a')]))
self.assertEqual(expected_type, got)
def test_inferring_type_of_call_node_sets_info_value(self):
expected_type = SimpleType('int')
arg_type = SimpleType('float')
env = {'a': arg_type}
arg_node = ReferenceNode.of('a')
tree = CallNode.of('func', [arg_node])
inferencer = TypeInferencer(self.library.compatibility(), env=env,
func_env={'func': ([arg_type], expected_type)})
inferencer.infer(tree)
self.assertEqual({'type': arg_type}, arg_node.info)
self.assertEqual({'type': expected_type}, tree.info)
def test_can_infer_type_of_call_when_called_with_compatible_argument(self):
expected_type = SimpleType('int')
arg_type = SimpleType('float')
env = {'a': SimpleType('int')}
inferencer = TypeInferencer(self.library.compatibility(), env=env,
func_env={'func': ([arg_type], expected_type)})
got = inferencer.infer(CallNode.of('func', [ReferenceNode.of('a')]))
self.assertEqual(expected_type, got)
def test_can_infer_type_of_merge(self):
inferencer = TypeInferencer(compatibility=SimpleCompatibility(upperbound=lambda x, y: y))
got = inferencer.infer(BinaryOpNode.of('+', TypeNameNode.of('int'), TypeNameNode.of('float')))
expected_type = SimpleType('float')
self.assertEqual(expected_type, got)
def test_inferring_type_of_merge_sets_info_value(self):
left_arg = TypeNameNode.of('int')
right_arg = TypeNameNode.of('float')
tree = BinaryOpNode.of('+', left_arg, right_arg)
left_arg_type = SimpleType('int')
right_arg_type = SimpleType('float')
expected_type = SimpleType('float')
inferencer = TypeInferencer(compatibility=SimpleCompatibility(lambda x, y: y))
inferencer.infer(tree)
self.assertEqual({'type': left_arg_type}, left_arg.info)
self.assertEqual({'type': right_arg_type}, right_arg.info)
self.assertEqual({'type': expected_type}, tree.info)
def test_can_infer_type_of_choice(self):
got = self.inferencer.infer(BinaryOpNode.of('|', TypeNameNode.of('int'), TypeNameNode.of('float')))
expected_type = ChoiceType([SimpleType('int'), SimpleType('float')])
self.assertEqual(expected_type, got)
self.assertEqual(1, got.num_outputs)
def test_inferring_type_of_choice_sets_info_value(self):
left_arg = TypeNameNode.of('int')
right_arg = TypeNameNode.of('float')
tree = BinaryOpNode.of('|', left_arg, right_arg)
left_arg_type = SimpleType('int')
right_arg_type = SimpleType('float')
expected_type = ChoiceType([left_arg_type, right_arg_type])
inferencer = TypeInferencer(compatibility=lambda x, y: True)
inferencer.infer(tree)
self.assertEqual({'type': left_arg_type}, left_arg.info)
self.assertEqual({'type': right_arg_type}, right_arg.info)
self.assertEqual({'type': expected_type}, tree.info)
def test_can_infer_type_of_nested_choice(self):
left_inner_choice = BinaryOpNode.of('|', TypeNameNode.of('int'), TypeNameNode.of('float'))
right_inner_choice = BinaryOpNode.of('|', TypeNameNode.of('string'), TypeNameNode.of('float'))
tree = BinaryOpNode.of('|', left_inner_choice, right_inner_choice)
got = self.inferencer.infer(tree)
expected_type = ChoiceType([SimpleType('int'), SimpleType('float'), SimpleType('string'), SimpleType('float')])
self.assertEqual(expected_type, got)
self.assertEqual(1, got.num_outputs)
def test_inferring_type_of_nested_choice_sets_info_value(self):
left_arg = TypeNameNode.of('int')
right_arg = TypeNameNode.of('float')
left_inner_choice = BinaryOpNode.of('|', left_arg, right_arg)
left_arg_2 = TypeNameNode.of('string')
right_arg_2 = TypeNameNode.of('float')
right_inner_choice = BinaryOpNode.of('|', left_arg_2, right_arg_2)
tree = BinaryOpNode.of('|', left_inner_choice, right_inner_choice)
left_arg_type = SimpleType('int')
right_arg_type = SimpleType('float')
left_arg_type_2 = SimpleType('string')
right_arg_type_2 = SimpleType('float')
left_inner_choice_type = ChoiceType([left_arg_type, right_arg_type])
right_inner_choice_type = ChoiceType([left_arg_type_2, right_arg_type_2])
expected_type = ChoiceType([left_arg_type, right_arg_type, left_arg_type_2, right_arg_type_2])
self.inferencer.infer(tree)
self.assertEqual({'type': left_arg_type}, left_arg.info)
self.assertEqual({'type': right_arg_type}, right_arg.info)
self.assertEqual({'type': left_arg_type_2}, left_arg_2.info)
self.assertEqual({'type': right_arg_type_2}, right_arg_2.info)
self.assertEqual({'type': left_inner_choice_type}, left_inner_choice.info)
self.assertEqual({'type': right_inner_choice_type}, right_inner_choice.info)
self.assertEqual({'type': expected_type}, tree.info)
def test_can_infer_type_of_concatenation(self):
got = self.inferencer.infer(BinaryOpNode.of('.', TypeNameNode.of('int'), TypeNameNode.of('float')))
expected_type = ParallelType([SimpleType('int'), SimpleType('float')])
self.assertEqual(expected_type, got)
self.assertEqual(2, got.num_outputs)
def test_inferring_type_of_concatenation_sets_info_value(self):
left_arg = TypeNameNode.of('int')
right_arg = TypeNameNode.of('float')
tree = BinaryOpNode.of('.', left_arg, right_arg)
left_arg_type = SimpleType('int')
right_arg_type = SimpleType('float')
expected_type = ParallelType([left_arg_type, right_arg_type])
self.inferencer.infer(tree)
self.assertEqual({'type': left_arg_type}, left_arg.info)
self.assertEqual({'type': right_arg_type}, right_arg.info)
self.assertEqual({'type': expected_type}, tree.info)
def test_can_infer_type_of_assignment(self):
got = self.inferencer.infer(AssignNode.of(TypeNameNode.of('int'), 'a'))
self.assertEqual(SimpleType('int'), got)
def test_inferring_type_of_assignment_sets_info_value(self):
expr = TypeNameNode.of('int')
tree = AssignNode.of(expr, 'a')
self.inferencer.infer(tree)
expected_type = SimpleType('int')
self.assertEqual({'type': expected_type}, expr.info)
self.assertEqual({'type': expected_type}, tree.info)
def test_can_infer_type_of_simple_projection(self):
env = {'a': ParallelType([SimpleType('int'), SimpleType('float'), SimpleType('string')])}
inferencer = TypeInferencer(self.library.compatibility(), env=env)
got = inferencer.infer(ProjectionNode.of(ReferenceNode.of('a'), 1))
self.assertEqual(SimpleType('float'), got)
def test_inferring_type_of_simple_projection_sets_info_value(self):
expr_type = ParallelType([SimpleType('int'), SimpleType('float'), SimpleType('string')])
env = {'a': expr_type}
expr = ReferenceNode.of('a')
tree = ProjectionNode.of(expr, 1)
inferencer = TypeInferencer(self.library.compatibility(), env=env)
inferencer.infer(tree)
expected_type = SimpleType('float')
self.assertEqual({'type': expr_type}, expr.info)
self.assertEqual({'type': expected_type}, tree.info)
def test_can_infer_type_of_projection_with_multiple_indices(self):
env = {'a': ParallelType([SimpleType('int'), SimpleType('float'), SimpleType('string')])}
inferencer = TypeInferencer(self.library.compatibility(), env=env)
got = inferencer.infer(ProjectionNode.of(ReferenceNode.of('a'), 1, 2))
self.assertEqual(ParallelType([SimpleType('float'), SimpleType('string')]), got)
def test_inferring_type_of_projection_with_multiple_indices_sets_info_value(self):
expr_type = ParallelType([SimpleType('int'), SimpleType('float'), SimpleType('string')])
env = {'a': expr_type}
expr = ReferenceNode.of('a')
tree = ProjectionNode.of(expr, 1, 2)
inferencer = TypeInferencer(self.library.compatibility(), env=env)
inferencer.infer(tree)
expected_type = ParallelType([SimpleType('float'), SimpleType('string')])
self.assertEqual({'type': expr_type}, expr.info)
self.assertEqual({'type': expected_type}, tree.info)
def test_can_infer_type_of_merge_with_multiple_columns(self):
left_inner_types = [SimpleType('int'), SimpleType('float')]
left_ty = ParallelType(left_inner_types)
right_inner_types = [SimpleType('int'), SimpleType('int')]
right_ty = ParallelType(right_inner_types)
env = {
'a': left_ty,
'b': right_ty,
}
left_expr = ReferenceNode.of('a')
right_expr = ReferenceNode.of('b')
inferencer = TypeInferencer(compatibility=SimpleCompatibility(lambda x, y: x), env=env)
got = inferencer.infer(BinaryOpNode.of('+', left_expr, right_expr))
expected_type = ParallelType([SimpleType('int'), SimpleType('float')])
self.assertEqual(expected_type, got)
self.assertEqual(2, expected_type.num_outputs)
def test_inferring_type_of_merge_with_multiple_columns_sets_info_value(self):
left_inner_types = [SimpleType('int'), SimpleType('float')]
left_ty = ParallelType(left_inner_types)
right_inner_types = [SimpleType('int'), SimpleType('int')]
right_ty = ParallelType(right_inner_types)
env = {
'a': left_ty,
'b': right_ty,
}
left_expr = ReferenceNode.of('a')
right_expr = ReferenceNode.of('b')
tree = BinaryOpNode.of('+', left_expr, right_expr)
inferencer = TypeInferencer(compatibility=SimpleCompatibility(lambda x, y: x), env=env)
inferencer.infer(tree)
expected_type = ParallelType([SimpleType('int'), SimpleType('float')])
self.assertEqual({'type': left_ty}, left_expr.info)
self.assertEqual({'type': right_ty}, right_expr.info)
self.assertEqual({'type': expected_type}, tree.info)
def test_can_infer_type_of_let_expression(self):
got = self.inferencer.infer(LetNode.of([('a', TypeNameNode.of('int'))], ReferenceNode.of('a')))
self.assertEqual(SimpleType('int'), got)
def test_inferring_type_of_let_expression_sets_info_value(self):
assignment = AssignNode.of(TypeNameNode.of('int'), 'a')
reference = ReferenceNode.of('a')
tree = LetNode([assignment], reference)
self.inferencer.infer(tree)
self.assertEqual({'type': SimpleType('int')}, assignment.info)
self.assertEqual({'type': SimpleType('int')}, reference.info)
self.assertEqual({'type': SimpleType('int')}, tree.info)
def test_can_infer_type_of_simple_expr(self):
got = self.inferencer.infer(SimpleExprNode.of(LiteralNode.of(10)))
self.assertEqual(SimpleType('int'), got)
def test_inferring_type_of_simple_expr_sets_info_value(self):
literal = LiteralNode.of(10)
tree = SimpleExprNode.of(literal)
self.inferencer.infer(tree)
self.assertEqual({'type': SimpleType('int')}, literal.info)
self.assertEqual({'type': SimpleType('int')}, tree.info)
def test_can_infer_type_of_simple_expr_with_list(self):
got = self.inferencer.infer(SimpleExprNode.of(LiteralNode.of([1,2,3])))
self.assertEqual(ParallelType(3*[SimpleType('int')]), got)
def test_inferring_type_of_simple_expr_with_list_sets_info_value(self):
literal = LiteralNode.of([1, 2, 3])
tree = SimpleExprNode.of(literal)
self.inferencer.infer(tree)
self.assertEqual({'type': ParallelType(3*[SimpleType('int')])}, literal.info)
self.assertEqual({'type': ParallelType(3*[SimpleType('int')])}, tree.info)
def test_can_infer_type_of_simple_expr_with_string(self):
got = self.inferencer.infer(SimpleExprNode.of(LiteralNode.of("ciao")))
self.assertEqual(SimpleType('string'), got)
def test_inferring_type_of_simple_expr_with_string_sets_info_value(self):
literal = LiteralNode.of("ciao")
tree = SimpleExprNode.of(literal)
self.inferencer.infer(tree)
self.assertEqual({'type': SimpleType('string')}, literal.info)
self.assertEqual({'type': SimpleType('string')}, tree.info)
def test_raises_error_when_merging_incompatible_types(self):
with self.assertRaises(TypeError):
self.inferencer.infer(BinaryOpNode.of('+', TypeNameNode.of('int'), TypeNameNode.of('string')))
def test_raises_error_when_merging_incompatible_types_with_more_columns(self):
left_expr = ReferenceNode.of('a')
right_expr = ReferenceNode.of('b')
left_ty = ParallelType([SimpleType('int'), SimpleType('string')])
right_ty = ParallelType([SimpleType('int'), SimpleType('int')])
env = {
'a': left_ty,
'b': right_ty,
}
with self.assertRaises(TypeError):
inferencer = TypeInferencer(self.library.compatibility(), env=env)
inferencer.infer(BinaryOpNode.of('+', left_expr, right_expr))
def test_raises_error_when_reassigning_same_name(self):
# assign inside assign
with self.assertRaises(TypeError):
inferencer = TypeInferencer(self.library.compatibility())
inferencer.infer(AssignNode.of(AssignNode.of(TypeNameNode.of('int'), 'a'), 'a'))
# let inside assign
with self.assertRaises(TypeError):
inferencer = TypeInferencer(self.library.compatibility())
inferencer.infer(AssignNode.of(LetNode.of([('a', TypeNameNode.of('int'))], TypeNameNode.of('int')), 'a'))
# assign inside let
with self.assertRaises(TypeError):
inferencer = TypeInferencer(self.library.compatibility())
inferencer.infer(LetNode.of([('a', AssignNode.of(TypeNameNode.of('int'), 'a'))], TypeNameNode.of('int')))
# let inside let
with self.assertRaises(TypeError):
inferencer = TypeInferencer(self.library.compatibility())
inferencer.infer(LetNode.of([('a', LetNode.of([('a', TypeNameNode.of('int'))], TypeNameNode.of('float')))],
TypeNameNode.of('int')))
def test_raises_error_if_projecting_on_a_non_composite_node(self):
with self.assertRaises(TypeError):
self.inferencer.infer(ProjectionNode.of(TypeNameNode.of('int'), (0, 1)))
def test_raises_error_if_projecting_indices_outside_output_dimension(self):
with self.assertRaises(TypeError) as ctx:
self.inferencer.infer(
ProjectionNode.of(BinaryOpNode.of('.', TypeNameNode.of('int'), TypeNameNode.of('int')), 2))
self.assertEqual('Indices out of range for projection', str(ctx.exception))
def test_raises_error_if_function_called_with_incorrect_number_of_arguments(self):
with self.assertRaises(TypeError) as ctx:
inferencer = TypeInferencer(self.library.compatibility(),
func_env={'ciao': ([SimpleType('int')], SimpleType('int'))})
inferencer.infer(CallNode.of('ciao', [TypeNameNode.of('int'), TypeNameNode.of('float')]))
self.assertEqual('Incorrect number of arguments to function ciao: 2 instead of 1', str(ctx.exception))
def test_raises_error_if_function_called_with_unassignable_argument_type(self):
# FIXME: compatibility is symmetric. So here we have no way of properly type checking a function call
with self.assertRaises(TypeError) as ctx:
inferencer = TypeInferencer(self.library.compatibility(),
func_env={'ciao': ([SimpleType('int')], SimpleType('int'))})
inferencer.infer(CallNode.of('ciao', [TypeNameNode.of('float')]))
self.assertEqual('Incompatible types for argument 0 of ciao: float instead of int', str(ctx.exception))
def test_raises_error_if_function_called_with_incompatible_argument_type(self):
with self.assertRaises(TypeError) as ctx:
inferencer = TypeInferencer(self.library.compatibility(),
func_env={'ciao': ([SimpleType('int')], SimpleType('int'))})
inferencer.infer(CallNode.of('ciao', [TypeNameNode.of('string')]))
self.assertEqual('Incompatible types for argument 0 of ciao: string instead of int', str(ctx.exception))
def test_raises_error_if_function_called_does_not_exist(self):
with self.assertRaises(ValueError) as ctx:
inferencer = TypeInferencer(self.library.compatibility())
inferencer.infer(CallNode.of('ciao', [TypeNameNode.of('string')]))
self.assertEqual("Unknown function 'ciao'", str(ctx.exception))
def test_raises_error_if_merge_on_incompatible_parallel_types(self):
with self.assertRaises(TypeError) as ctx:
inferencer = TypeInferencer(self.library.compatibility(),
env={
'a': ParallelType([SimpleType('int')]),
'b': ParallelType([SimpleType('string')])
})
inferencer.infer(BinaryOpNode.of('+', ReferenceNode.of('a'), ReferenceNode.of('b')))
self.assertEqual("Incompatible types for merge: Parallel(int) and Parallel(string)", str(ctx.exception))
def test_raises_error_if_merge_on_incompatible_types(self):
with self.assertRaises(TypeError) as ctx:
inferencer = TypeInferencer(self.library.compatibility(),
env={'a': ParallelType([SimpleType('int')]), 'b': SimpleType('float')})
inferencer.infer(BinaryOpNode.of('+', ReferenceNode.of('a'), ReferenceNode.of('b')))
self.assertEqual("Incompatible types for merge: Parallel(int) and float", str(ctx.exception))
def test_raises_error_if_trying_to_infer_type_of_literal_outside_simple_expr(self):
with self.assertRaises(TypeError) as ctx:
self.inferencer.infer(LiteralNode.of(5))
self.assertEqual("This expression can only appear inside a simple expression: 5", str(ctx.exception))
class TestPairBasedCompatibility(unittest.TestCase):
def _make_compat(self, pairs):
compat = PairBasedCompatibility()
compat.add_upperbounds(pairs)
return compat
def test_simple_type_is_assignable_to_itself_always(self):
compatibility = self._make_compat(set())
self.assertTrue(compatibility.is_assignable_to(SimpleType('int'), SimpleType('int')))
def test_simple_type_is_not_compatible_to_itself_if_not_provided(self):
compatibility = self._make_compat(set())
self.assertFalse(compatibility.is_compatible(SimpleType('int'), SimpleType('int')))
def test_simple_type_is_compatible_with_itself_if_provided(self):
compatibility = self._make_compat({('int', 'int')})
self.assertTrue(compatibility.is_compatible(SimpleType('int'), SimpleType('int')))
def test_simple_type_is_compatible_with_other_type_if_provided(self):
compatibility = self._make_compat({('int', 'float')})
self.assertTrue(compatibility.is_compatible(SimpleType('int'), SimpleType('float')))
self.assertTrue(compatibility.is_compatible(SimpleType('float'), SimpleType('int')))
def test_simple_type_is_assignable_to_other_type_if_provided(self):
compatibility = self._make_compat({('int', 'float')})
self.assertTrue(compatibility.is_assignable_to(SimpleType('int'), SimpleType('float')))
def test_simple_type_is_not_assignable_to_a_lowerbound(self):
compatibility = self._make_compat({('int', 'float')})
self.assertFalse(compatibility.is_assignable_to(SimpleType('float'), SimpleType('int')))
def test_parallel_type_type_is_assignable_to_itself_always(self):
compatibility = self._make_compat(set())
left = ParallelType([SimpleType('int'), SimpleType('float')])
right = ParallelType([SimpleType('int'), SimpleType('float')])
self.assertTrue(compatibility.is_assignable_to(left, right))
def test_parallel_type_type_is_not_compatible_to_itself_if_not_provided(self):
compatibility = self._make_compat(set())
left = ParallelType([SimpleType('int'), SimpleType('float')])
right = ParallelType([SimpleType('int'), SimpleType('float')])
self.assertFalse(compatibility.is_compatible(left, right))
def test_parallel_type_type_is_compatible_with_itself_if_provided(self):
compatibility = self._make_compat({('int', 'int'), ('float', 'float')})
left = ParallelType([SimpleType('int'), SimpleType('float')])
right = ParallelType([SimpleType('int'), SimpleType('float')])
self.assertTrue(compatibility.is_compatible(left, right))
def test_parallel_type_type_is_compatible_with_other_type_if_provided(self):
compatibility = self._make_compat({('int', 'float', 'string')})
left = ParallelType([SimpleType('int'), SimpleType('float')])
right = ParallelType([SimpleType('float'), SimpleType('string')])
self.assertTrue(compatibility.is_compatible(left, right))
self.assertTrue(compatibility.is_compatible(right, left))
def test_parallel_type_type_is_assignable_to_other_type_if_provided(self):
compatibility = self._make_compat({('int', 'float', 'float')})
left = ParallelType([SimpleType('int'), SimpleType('float')])
right = ParallelType([SimpleType('float'), SimpleType('float')])
self.assertTrue(compatibility.is_assignable_to(left, right))
def test_parallel_type_type_is_not_assignable_to_a_lowerbound(self):
compatibility = self._make_compat({('int', 'float', 'float')})
left = ParallelType([SimpleType('int'), SimpleType('float')])
right = ParallelType([SimpleType('float'), SimpleType('float')])
self.assertFalse(compatibility.is_assignable_to(right, left))
def test_choice_type_type_is_assignable_to_itself_always(self):
compatibility = self._make_compat(set())
left = ChoiceType([SimpleType('int'), SimpleType('float')])
right = ChoiceType([SimpleType('int'), SimpleType('float')])
self.assertTrue(compatibility.is_assignable_to(left, right))
def test_choice_type_type_is_not_compatible_to_itself_if_not_provided(self):
compatibility = self._make_compat(set())
left = ChoiceType([SimpleType('int'), SimpleType('float')])
right = ChoiceType([SimpleType('int'), SimpleType('float')])
self.assertFalse(compatibility.is_compatible(left, right))
def test_choice_type_type_is_not_compatible_with_itself_if_provided_only_self_compatibilities(self):
compatibility = self._make_compat({('int', 'int'), ('float', 'float')})
left = ChoiceType([SimpleType('int'), SimpleType('float')])
right = ChoiceType([SimpleType('int'), SimpleType('float')])
self.assertFalse(compatibility.is_compatible(left, right))
def test_choice_type_type_is_compatible_with_itself_if_provided_all_pairs(self):
compatibility = self._make_compat({('int', 'int', 'float', 'float')})
left = ChoiceType([SimpleType('int'), SimpleType('float')])
right = ChoiceType([SimpleType('int'), SimpleType('float')])
self.assertTrue(compatibility.is_compatible(left, right))
def test_choice_type_type_is_compatible_with_other_type_if_provided_all_pairs(self):
compatibility = self._make_compat({('int', 'float', 'float', 'string', 'string')})
left = ChoiceType([SimpleType('int'), SimpleType('float')])
right = SimpleType('string')
self.assertTrue(compatibility.is_compatible(left, right))
self.assertTrue(compatibility.is_compatible(right, left))
def test_choice_type_type_is_not_compatible_with_other_type_if_provided_only_some_pairs(self):
compatibility = self._make_compat({('int', 'float'), ('float', 'string')})
left = ChoiceType([SimpleType('int'), SimpleType('float')])
right = SimpleType('string')
self.assertFalse(compatibility.is_compatible(left, right))
self.assertFalse(compatibility.is_compatible(right, left))
def test_choice_type_type_is_assignable_to_other_type_if_provided(self):
compatibility = self._make_compat({('int', 'float', 'float')})
left = ChoiceType([SimpleType('int'), SimpleType('float')])
right = ChoiceType([SimpleType('float'), SimpleType('float')])
self.assertTrue(compatibility.is_assignable_to(left, right))
def test_choice_type_type_is_not_assignable_to_a_lowerbound(self):
compatibility = self._make_compat({('int', 'float', 'float')})
left = ChoiceType([SimpleType('int'), SimpleType('float')])
right = ChoiceType([SimpleType('float'), SimpleType('float')])
self.assertFalse(compatibility.is_assignable_to(right, left))
def test_parallel_type_can_be_compatible_with_choice_type(self):
compatibility = self._make_compat({('int', 'int', 'float', 'float')})
left_type = ChoiceType([ParallelType(2 * [SimpleType('int')]), ParallelType(2 * [SimpleType('float')])])
right_type = ParallelType(2 * [SimpleType('float')])
self.assertTrue(compatibility.is_compatible(left_type, right_type))
self.assertTrue(compatibility.is_compatible(right_type, left_type))
def test_simple_is_compatible_with_choice_type(self):
compatibility = self._make_compat({('int', 'int', 'float', 'float')})
left_type = SimpleType('int')
right_type = ChoiceType([SimpleType('int'), SimpleType('float')])
self.assertTrue(compatibility.is_compatible(left_type, right_type))
self.assertTrue(compatibility.is_compatible(right_type, left_type))
def test_any_type_is_compatible_with_anything(self):
compatibility = self._make_compat(set())
self.assertTrue(compatibility.is_compatible(AnyType(), SimpleType('int')))
self.assertTrue(compatibility.is_compatible(SimpleType('int'), AnyType()))
def test_can_assign_anything_to_any_type(self):
compatibility = self._make_compat(set())
self.assertTrue(compatibility.is_assignable_to(SimpleType('int'), AnyType()))
def test_any_type_cannot_be_assigned_to_int(self):
compatibility = self._make_compat(set())
self.assertFalse(compatibility.is_assignable_to(AnyType(), SimpleType('int')))
def test_any_type_can_be_assigned_to_any_type(self):
compatibility = self._make_compat(set())
self.assertTrue(compatibility.is_assignable_to(AnyType(), AnyType()))
class TestDefaultCompatibility(unittest.TestCase):
def setUp(self):
self.compatibility = BuiltInLibrary({}, random).compatibility()
self.compatibility.add_upperbounds({('int', 'int'), ('float', 'float')})
def test_identical_simple_types_are_compatible(self):
self.assertTrue(self.compatibility.is_compatible(SimpleType('int'), SimpleType('int')))
def test_composite_types_of_same_class_with_one_identical_type_are_compatible(self):
for cls in (ChoiceType, ParallelType):
self.assertTrue(
self.compatibility.is_compatible(cls(2 * [SimpleType('int')]), cls(2 * [SimpleType('int')])))
def test_two_non_compatible_simple_types_are_not_compatible(self):
self.assertFalse(self.compatibility.is_compatible(SimpleType('int'), SimpleType('string')))
def test_two_different_compatible_simple_types_are_compatible(self):
self.assertTrue(self.compatibility.is_compatible(SimpleType('int'), SimpleType('float')))
def test_composite_types_of_same_class_with_one_different_compatible_type_are_compatible(self):
for cls in (ChoiceType, ParallelType):
self.assertTrue(
self.compatibility.is_compatible(cls([SimpleType('int')] * 2), cls([SimpleType('float')] * 2)))
def test_composite_types_of_same_class_with_one_different_non_compatible_type_are_not_compatible(self):
for cls in (ChoiceType, ParallelType):
self.assertFalse(
self.compatibility.is_compatible(cls([SimpleType('int')] * 2), cls([SimpleType('string')] * 2)))
def test_composite_types_of_different_class_with_one_identical_type_are_not_compatible(self):
arg = SimpleType('int')
for first in [ChoiceType, ParallelType]:
for second in [sec for sec in (ChoiceType, ParallelType) if sec != first]:
self.assertFalse(self.compatibility.is_compatible(first([arg, arg]), second([arg, arg])))
def test_composite_types_with_multiple_incompatible_types_are_not_compatible(self):
for cls in (ChoiceType, ParallelType):
self.assertFalse(self.compatibility.is_compatible(cls([SimpleType('int'), SimpleType('string')]),
cls([SimpleType('int'), SimpleType('not-string')])))
def test_parallel_type_with_one_dimension_is_compatible_with_simple_type(self):
self.assertTrue(self.compatibility.is_compatible(ParallelType([SimpleType('int')]), SimpleType('int')))
# symmetric case:
self.assertTrue(self.compatibility.is_compatible(SimpleType('int'), ParallelType([SimpleType('int')])))
def test_parallel_type_with_more_than_one_dimension_is_not_compatible_with_simple_type(self):
self.assertFalse(self.compatibility.is_compatible(ParallelType([SimpleType('int')] * 2), SimpleType('int')))
# symmetric case:
self.assertFalse(self.compatibility.is_compatible(SimpleType('int'), ParallelType([SimpleType('int')] * 2)))
def test_choice_type_with_one_dimension_is_compatible_with_simple_type(self):
self.assertTrue(self.compatibility.is_compatible(ChoiceType([SimpleType('int')]), SimpleType('int')))
# symmetric case:
self.assertTrue(self.compatibility.is_compatible(SimpleType('int'), ChoiceType(2 * [SimpleType('int')])))
def test_choice_type_with_more_than_one_dimension_is_not_compatible_with_simple_type(self):
self.assertFalse(
self.compatibility.is_compatible(ChoiceType([ParallelType(2 * [SimpleType('int')])]), SimpleType('int')))
# symmetric case:
self.assertFalse(
self.compatibility.is_compatible(SimpleType('int'), ChoiceType([ParallelType(2 * [SimpleType('int')])])))
class TestCompiler(unittest.TestCase):
def setUp(self):
self.compiler = Compiler(MockLibrary())
def test_can_compile_a_type_name_node_with_no_config(self):
schema = Schema()
schema.add_column('column#0')
schema.add_producer('producer#0', type='int')
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['column#0'],
transformer=IdentityTransformer(1))
got = self.compiler.compile(TypeNameNode.of('int'))
self.assertEqual(schema, got)
def test_compiling_a_type_name_node_with_no_config_sets_info_value(self):
tree = TypeNameNode.of('int')
self.compiler.compile(tree)
expected_info = {
'type': SimpleType('int'),
'assigned_name': None,
'in_names': [],
'out_names': ['producer#0'],
}
self.assertEqual(expected_info, tree.info)
def test_can_compile_a_type_name_node_with_producer_no_config(self):
schema = Schema()
schema.add_column('column#0')
schema.add_producer('producer#0', type='fixed')
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['column#0'],
transformer=IdentityTransformer(1))
got = self.compiler.compile(TypeNameNode.of('int', 'fixed'))
self.assertEqual(schema, got)
def test_compiling_a_type_name_node_with_producer_no_config_sets_info_value(self):
tree = TypeNameNode.of('int', 'fixed')
self.compiler.compile(tree)
expected_info = {
'type': SimpleType('int'),
'assigned_name': None,
'in_names': [],
'out_names': ['producer#0'],
}
self.assertEqual(expected_info, tree.info)
def test_can_compile_a_type_name_node_with_config(self):
schema = Schema()
schema.add_column('column#0')
schema.add_producer('producer#0', type='int', config={'min': 10})
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['column#0'],
transformer=IdentityTransformer(1))
got = self.compiler.compile(TypeNameNode.of('int', config={'min': 10}))
self.assertEqual(schema, got)
def test_compiling_a_type_name_node_with_config_sets_info_value(self):
tree = TypeNameNode.of('int', config={'min': 10})
self.compiler.compile(tree)
expected_info = {
'type': SimpleType('int'),
'assigned_name': None,
'in_names': [],
'out_names': ['producer#0'],
}
self.assertEqual(expected_info, tree.info)
def test_can_compile_a_type_name_node_with_producer_config(self):
schema = Schema()
schema.add_column('column#0')
schema.add_producer('producer#0', type='fixed', config={'value': 10})
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['column#0'],
transformer=IdentityTransformer(1))
got = self.compiler.compile(TypeNameNode.of('int', 'fixed', config={'value': 10}))
self.assertEqual(schema, got)
def test_compiling_a_type_name_node_with_producer_config_sets_info_value(self):
tree = TypeNameNode.of('int', 'fixed', config={'value': 10})
self.compiler.compile(tree)
expected_info = {
'type': SimpleType('int'),
'assigned_name': None,
'in_names': [],
'out_names': ['producer#0'],
}
self.assertEqual(expected_info, tree.info)
def test_can_compile_an_assignment_of_a_type_name(self):
schema = Schema()
schema.add_column('a')
schema.add_producer('producer#0', type='int')
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['a'],
transformer=IdentityTransformer(1))
got = self.compiler.compile(AssignNode.of(TypeNameNode.of('int'), 'a'))
self.assertEqual(schema, got)
def test_compiling_an_assignment_of_a_type_name_sets_info_value(self):
tree = AssignNode.of(TypeNameNode.of('int'), 'a')
self.compiler.compile(tree)
expected_info = {
'type': SimpleType('int'),
'assigned_name': 'a',
'in_names': ['producer#0'],
'out_names': ['a'],
}
self.assertEqual(expected_info, tree.info)
def test_can_compile_concatenation_of_two_type_names(self):
schema = Schema()
schema.add_column('column#0')
schema.add_column('column#1')
schema.add_producer('producer#0', type='int')
schema.add_producer('producer#1', type='int')
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['column#0'],
transformer=IdentityTransformer(1))
schema.add_transformer('transformer#1', inputs=['producer#1'], outputs=['column#1'],
transformer=IdentityTransformer(1))
got = self.compiler.compile(BinaryOpNode.of('.', TypeNameNode.of('int'), TypeNameNode.of('int')))
self.assertEqual(schema, got)
def test_compiling_concatenation_of_two_type_names_sets_info_value(self):
tree = BinaryOpNode.of('.', TypeNameNode.of('int'), TypeNameNode.of('int'))
self.compiler.compile(tree)
expected_info = {
'type': ParallelType([SimpleType('int'), SimpleType('int')]), 'assigned_name': None,
'in_names': ['producer#0', 'producer#1'],
'out_names': ['producer#0', 'producer#1'],
}
self.assertEqual(expected_info, tree.info)
def test_can_compile_choice_of_two_type_names(self):
schema = Schema()
schema.add_column('column#0')
schema.add_producer('producer#0', type='int')
schema.add_producer('producer#1', type='int')
schema.add_transformer('transformer#0', inputs=['producer#0', 'producer#1'], outputs=['transformer#0'],
transformer=ChoiceTransformer(2, 0.5, 0.5))
schema.add_transformer('transformer#1', inputs=['transformer#0'], outputs=['column#0'],
transformer=IdentityTransformer(1))
got = self.compiler.compile(BinaryOpNode.of('|', TypeNameNode.of('int'), TypeNameNode.of('int')))
self.assertEqual(schema, got)
def test_compiling_choice_of_two_type_names_sets_info_value(self):
tree = BinaryOpNode.of('|', TypeNameNode.of('int'), TypeNameNode.of('int'))
self.compiler.compile(tree)
expected_info = {
'type': ChoiceType([SimpleType('int'), SimpleType('int')]), 'assigned_name': None,
'out_names': ['transformer#0'],
'in_names': ['producer#0', 'producer#1'],
}
self.assertEqual(expected_info, tree.info)
def test_can_compile_merge_of_two_type_names(self):
schema = Schema()
schema.add_column('column#0')
schema.add_producer('producer#0', type='int')
schema.add_producer('producer#1', type='int')
schema.add_transformer('transformer#0', inputs=['producer#0', 'producer#1'],
outputs=['transformer#0#0'],
transformer=MergeTransformer(2))
schema.add_transformer('transformer#1', inputs=['transformer#0#0'], outputs=['column#0'],
transformer=IdentityTransformer(1))
got = self.compiler.compile(BinaryOpNode.of('+', TypeNameNode.of('int'), TypeNameNode.of('int')))
self.assertEqual(schema, got)
def test_compiling_merge_of_two_type_names_sets_info_value(self):
tree = BinaryOpNode.of('+', TypeNameNode.of('int'), TypeNameNode.of('int'))
self.compiler.compile(tree)
expected_info = {
'type': SimpleType('int'),
'assigned_name': None,
'in_names': ['producer#0', 'producer#1'],
'out_names': ['transformer#0#0'],
}
self.assertEqual(expected_info, tree.info)
def test_can_compile_reference(self):
schema = Schema()
schema.add_column('a')
schema.add_column('column#1')
schema.add_producer('producer#0', type='int')
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['a'],
transformer=IdentityTransformer(1))
schema.add_transformer('transformer#1', inputs=['producer#0'], outputs=['column#1'],
transformer=IdentityTransformer(1))
got = self.compiler.compile(
BinaryOpNode.of(
'.',
AssignNode.of(TypeNameNode.of('int'), 'a'),
ReferenceNode.of('a')
)
)
self.assertEqual(schema, got)
def test_compiling_reference_sets_info_value(self):
tree = BinaryOpNode.of('.', AssignNode.of(TypeNameNode.of('int'), 'a'), ReferenceNode.of('a'))
self.compiler.compile(tree)
expected_info = {
'type': ParallelType([SimpleType('int'), SimpleType('int')]),
'in_names': ['a', 'producer#0'],
'out_names': ['a', 'producer#0'],
'assigned_name': None,
}
self.assertEqual(expected_info, tree.info)
def test_can_compile_with_two_references_same_values(self):
schema = Schema()
schema.add_column('a')
schema.add_column('column#1')
schema.add_column('column#2')
schema.add_producer('producer#0', type='int')
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['a'],
transformer=IdentityTransformer(1))
schema.add_transformer('transformer#1', inputs=['producer#0'], outputs=['column#1'],
transformer=IdentityTransformer(1))
schema.add_transformer('transformer#2', inputs=['producer#0'], outputs=['column#2'],
transformer=IdentityTransformer(1))
got = self.compiler.compile(
BinaryOpNode.of(
'.',
AssignNode.of(TypeNameNode.of('int'), 'a'),
BinaryOpNode.of(
'.',
ReferenceNode.of('a'),
ReferenceNode.of('a')
)
))
self.assertEqual(schema, got)
def test_compiling_with_two_references_same_values_sets_info_value(self):
tree = BinaryOpNode.of(
'.',
AssignNode.of(TypeNameNode.of('int'), 'a'),
BinaryOpNode.of(
'.',
ReferenceNode.of('a'),
ReferenceNode.of('a'),
)
)
self.compiler.compile(tree)
expected_info = {
'type': ParallelType([SimpleType('int'), SimpleType('int'), SimpleType('int')]),
'assigned_name': None,
'in_names': ['a', 'producer#0', 'producer#0'],
'out_names': ['a', 'producer#0', 'producer#0'],
}
self.assertEqual(expected_info, tree.info)
def test_can_compile_an_assignment_of_two_type_names(self):
schema = Schema()
schema.add_column('a#0')
schema.add_column('a#1')
schema.add_producer('producer#0', type='int')
schema.add_producer('producer#1', type='float')
schema.add_transformer('transformer#0', inputs=['producer#0', 'producer#1'], outputs=['a#0', 'a#1'],
transformer=IdentityTransformer(2))
got = self.compiler.compile(
AssignNode.of(BinaryOpNode.of('.', TypeNameNode.of('int'), TypeNameNode.of('float')), 'a'))
self.assertEqual(schema, got)
def test_compiling_an_assignment_of_two_type_names_sets_info_value(self):
tree = AssignNode.of(BinaryOpNode.of('.', TypeNameNode.of('int'), TypeNameNode.of('float')), 'a')
self.compiler.compile(tree)
expected_info = {
'type': ParallelType([SimpleType('int'), SimpleType('float')]),
'assigned_name': 'a',
'in_names': ['producer#0', 'producer#1'],
'out_names': ['a#0', 'a#1'],
}
self.assertEqual(expected_info, tree.info)
def test_can_compile_projection_of_concatenation(self):
schema = Schema()
schema.add_column('column#0')
schema.add_producer('producer#0', type='int')
schema.add_producer('producer#1', type='float')
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['column#0'],
transformer=IdentityTransformer(1))
got = self.compiler.compile(
ProjectionNode.of(BinaryOpNode.of('.', TypeNameNode.of('int'), TypeNameNode.of('float')), 0))
self.assertEqual(schema, got)
def test_compiling_projection_of_concatenation_sets_info_values(self):
tree = ProjectionNode.of(BinaryOpNode.of('.', TypeNameNode.of('int'), TypeNameNode.of('float')), 0)
self.compiler.compile(tree)
expected_info = {
'type': SimpleType('int'),
'assigned_name': None,
'in_names': ['producer#0', 'producer#1'],
'out_names': ['producer#0'],
}
self.assertEqual(expected_info, tree.info)
def test_can_compile_double_assignment(self):
schema = Schema()
schema.add_column('b')
schema.add_producer('producer#0', type='int')
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['a'],
transformer=IdentityTransformer(1))
schema.add_transformer('transformer#1', inputs=['a'], outputs=['b'], transformer=IdentityTransformer(1))
got = self.compiler.compile(AssignNode.of(AssignNode.of(TypeNameNode.of('int'), 'a'), 'b'))
self.assertEqual(schema, got)
def test_compiling_double_assignment_sets_info_value(self):
tree = AssignNode.of(AssignNode.of(TypeNameNode.of('int'), 'a'), 'b')
self.compiler.compile(tree)
expected_info = {
'type': SimpleType('int'),
'assigned_name': 'b',
'in_names': ['a'],
'out_names': ['b'],
}
self.assertEqual(expected_info, tree.info)
def test_can_compile_triple_assignment(self):
schema = Schema()
schema.add_column('c')
schema.add_producer('producer#0', type='int')
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['a'],
transformer=IdentityTransformer(1))
schema.add_transformer('transformer#1', inputs=['a'], outputs=['b'], transformer=IdentityTransformer(1))
schema.add_transformer('transformer#2', inputs=['b'], outputs=['c'], transformer=IdentityTransformer(1))
got = self.compiler.compile(AssignNode.of(AssignNode.of(AssignNode.of(TypeNameNode.of('int'), 'a'), 'b'), 'c'))
self.assertEqual(schema, got)
def test_compiling_triple_assignment_sets_info_value(self):
tree = AssignNode.of(AssignNode.of(AssignNode.of(TypeNameNode.of('int'), 'a'), 'b'), 'c')
self.compiler.compile(tree)
expected_info = {
'type': SimpleType('int'),
'assigned_name': 'c',
'in_names': ['b'],
'out_names': ['c'],
}
self.assertEqual(expected_info, tree.info)
def test_can_compile_concatenation_with_assignment_inside(self):
schema = Schema()
schema.add_column('a')
schema.add_column('column#1')
schema.add_producer('producer#0', type='int')
schema.add_producer('producer#1', type='float')
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['a'],
transformer=IdentityTransformer(1))
schema.add_transformer('transformer#1', inputs=['producer#1'], outputs=['column#1'],
transformer=IdentityTransformer(1))
got = self.compiler.compile(
BinaryOpNode.of('.', AssignNode.of(TypeNameNode.of('int'), 'a'), TypeNameNode.of('float')))
self.assertEqual(schema, got)
def test_compiling_concatenation_with_assignment_inside_sets_info_value(self):
tree = BinaryOpNode.of('.', AssignNode.of(TypeNameNode.of('int'), 'a'), TypeNameNode.of('float'))
self.compiler.compile(tree)
expected_info = {
'type': ParallelType([SimpleType('int'), SimpleType('float')]),
'assigned_name': None,
'in_names': ['a', 'producer#1'],
'out_names': ['a', 'producer#1'],
}
self.assertEqual(expected_info, tree.info)
def test_can_compile_concatenation_with_assignment_inside_on_concat(self):
schema = Schema()
schema.add_column('a#0')
schema.add_column('a#1')
schema.add_column('column#2')
schema.add_producer('producer#0', type='int')
schema.add_producer('producer#1', type='int')
schema.add_producer('producer#2', type='float')
schema.add_transformer('transformer#0', inputs=['producer#0', 'producer#1'], outputs=['a#0', 'a#1'],
transformer=IdentityTransformer(2))
schema.add_transformer('transformer#1', inputs=['producer#2'], outputs=['column#2'],
transformer=IdentityTransformer(1))
got = self.compiler.compile(BinaryOpNode.of('.', AssignNode.of(
BinaryOpNode.of('.', TypeNameNode.of('int'), TypeNameNode.of('int')), 'a'), TypeNameNode.of('float')))
self.assertEqual(schema, got)
def test_compiling_concatenation_with_assignment_inside_on_concat_sets_info_value(self):
tree = BinaryOpNode.of('.', AssignNode.of(
BinaryOpNode.of('.', TypeNameNode.of('int'), TypeNameNode.of('int')), 'a'), TypeNameNode.of('float'))
self.compiler.compile(tree)
expected_info = {
'type': ParallelType([SimpleType('int'), SimpleType('int'), SimpleType('float')]),
'assigned_name': None,
'in_names': ['a#0', 'a#1', 'producer#2'],
'out_names': ['a#0', 'a#1', 'producer#2'],
}
self.assertEqual(expected_info, tree.info)
def test_can_compile_simple_let_expression(self):
schema = Schema()
schema.add_column('column#0')
schema.add_producer('producer#0', type='int')
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['a'],
transformer=IdentityTransformer(1))
schema.add_transformer('transformer#1', inputs=['producer#0'], outputs=['column#0'],
transformer=IdentityTransformer(1))
got = self.compiler.compile(LetNode.of([('a', TypeNameNode.of('int'))], ReferenceNode.of('a')))
self.assertEqual(schema, got)
def test_can_compile_expression_with_type_config(self):
schema = Schema()
schema.add_column('column#0')
schema.add_producer('producer#0', type='int', config={'min': 10})
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['a'],
transformer=IdentityTransformer(1))
schema.add_transformer('transformer#1', inputs=['producer#0'], outputs=['column#0'],
transformer=IdentityTransformer(1))
got = self.compiler.compile(
LetNode.of([('a', TypeNameNode.of('int', config={'min': 10}))], ReferenceNode.of('a')))
self.assertEqual(schema, got)
def test_when_compiling_multiple_expressions_number_of_outputs_per_expression_is_taken_into_account(self):
schema = Schema()
schema.add_column('INTERO')
schema.add_column('FLOAT')
schema.add_producer('producer#0', type='int')
schema.add_producer('producer#1', type='float')
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['INTERO'],
transformer=IdentityTransformer(1))
schema.add_transformer('transformer#1', inputs=['producer#1'], outputs=['FLOAT'],
transformer=IdentityTransformer(1))
expr = BinaryOpNode.of('.', TypeNameNode.of('int'), TypeNameNode.of('float'))
got = self.compiler.compile(expr, column_names=['INTERO', 'FLOAT'])
self.assertEqual(schema, got)
def test_when_providing_less_than_the_number_of_columns_values_are_selected(self):
schema = Schema()
schema.add_column('a')
schema.add_producer('producer#0', type='int')
schema.add_producer('producer#1', type='float')
schema.add_transformer('transformer#0', inputs=['producer#1'], outputs=['a'],
transformer=IdentityTransformer(1))
expr = BinaryOpNode.of('.', TypeNameNode.of('int'), AssignNode.of(TypeNameNode.of('float'), 'a'))
got = self.compiler.compile(expr, column_names=['a'])
self.assertEqual(schema, got)
def test_can_compile_call_node(self):
library = MockLibrary()
func = lambda x: x
library.register_function('ciao', func, [SimpleType('string')], SimpleType('string'))
compiler = Compiler(library)
schema = Schema()
schema.add_column('a')
schema.add_producer('producer#0', type='string')
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['transformer#0'], transformer=FunctionalTransformer(func))
schema.add_transformer('transformer#1', inputs=['transformer#0'], outputs=['a'], transformer=IdentityTransformer(1))
expr = CallNode.of('ciao', [TypeNameNode.of('string')])
got = compiler.compile(expr, column_names=['a'])
self.assertEqual(schema, got)
def test_can_compile_a_simple_expression(self):
schema = Schema()
schema.add_column('a')
schema.add_producer('producer#0', type='fixed', config={'value': 5})
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['a'], transformer=IdentityTransformer(1))
expr = SimpleExprNode.of(LiteralNode.of(5))
got = self.compiler.compile(expr, column_names=['a'])
self.assertEqual(schema, got)
def test_can_compile_a_reference_to_an_env_var(self):
library = MockLibrary()
library.register_variable('ciao', 5, SimpleType('int'))
compiler = Compiler(library)
schema = Schema()
schema.add_column('a')
schema.add_producer('producer#0', type='fixed', config={'value': 5})
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['a'], transformer=IdentityTransformer(1))
expr = ReferenceNode.of('ciao')
got = compiler.compile(expr, column_names=['a'])
self.assertEqual(schema, got)
def test_multiple_references_to_env_var_do_not_create_more_producers(self):
library = MockLibrary()
library.register_variable('ciao', 5, SimpleType('int'))
compiler = Compiler(library)
schema = Schema()
schema.add_column('a')
schema.add_column('b')
schema.add_producer('producer#0', type='fixed', config={'value': 5})
schema.add_transformer('transformer#0', inputs=['producer#0'], outputs=['a'], transformer=IdentityTransformer(1))
schema.add_transformer('transformer#1', inputs=['producer#0'], outputs=['b'], transformer=IdentityTransformer(1))
expr = BinaryOpNode.of('.', ReferenceNode.of('ciao'), ReferenceNode.of('ciao'))
got = compiler.compile(expr, column_names=['a', 'b'])
self.assertEqual(schema, got)
def test_example_with_merge(self):
schema = Schema()
schema.add_column('transformer#1#0')
schema.add_producer('producer#0', type='int')
schema.add_producer('producer#1', type='int')
schema.add_producer('producer#2', type='float')
schema.add_transformer('transformer#0', inputs=['producer#1'], outputs=['a'],
transformer=IdentityTransformer(1))
schema.add_transformer('transformer#1', inputs=['producer#0', 'a'], outputs=['transformer#1#0'],
transformer=MergeTransformer(2))
expr = BinaryOpNode.of('.',
BinaryOpNode.of('+', TypeNameNode.of('int'), AssignNode.of(TypeNameNode.of('int'), 'a')),
TypeNameNode.of('float'))
got = self.compiler.compile(expr, column_names=['transformer#1#0'])
self.assertEqual(schema, got)
def test_example_with_merge_incompatible_num_of_outputs(self):
expr = BinaryOpNode.of(
'+',
TypeNameNode.of('int'),
BinaryOpNode.of('.', TypeNameNode.of('int'), TypeNameNode.of('int'))
)
with self.assertRaises(TypeError):
self.compiler.compile(expr, column_names=['transformer#1#0'])
def test_example_with_merge_incompatible_num_of_outputs_more_outputs(self):
expr = BinaryOpNode.of(
'+',
BinaryOpNode.of('.', TypeNameNode.of('int'), BinaryOpNode.of('.', TypeNameNode.of('int'), TypeNameNode.of('int'))),
BinaryOpNode.of('.', TypeNameNode.of('int'), TypeNameNode.of('int'))
)
with self.assertRaises(TypeError):
self.compiler.compile(expr, column_names=['transformer#1#0'])
def test_raises_error_with_invalid_binary_operator(self):
with self.assertRaises(TypeError):
self.compiler.compile(BinaryOpNode.of('<', TypeNameNode.of('int'), TypeNameNode.of('float')))
def test_raises_error_when_too_many_columns_specified(self):
with self.assertRaises(TypeError) as ctx:
self.compiler.compile(TypeNameNode.of('int'), column_names=['A', 'B'])
self.assertEqual('defined 2 columns but only 1 values produced.', str(ctx.exception))
def test_raises_error_if_reference_is_not_defined_and_not_a_constant(self):
with self.assertRaises(KeyError) as ctx:
self.compiler.compile(ReferenceNode.of('@non_existent'), column_names=['A'])
self.assertEqual("'@non_existent'", str(ctx.exception))
| 51.547598
| 138
| 0.652011
| 6,610
| 59,022
| 5.570045
| 0.037065
| 0.049785
| 0.038785
| 0.023222
| 0.89937
| 0.872019
| 0.838231
| 0.800532
| 0.760416
| 0.701939
| 0
| 0.007365
| 0.206296
| 59,022
| 1,144
| 139
| 51.592657
| 0.778572
| 0.003982
| 0
| 0.574037
| 0
| 0
| 0.091209
| 0
| 0
| 0
| 0
| 0.000874
| 0.182556
| 1
| 0.134888
| false
| 0
| 0.007099
| 0
| 0.147059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
94a44723921718171c92b4b68b7e734d4fc957ed
| 749
|
py
|
Python
|
app/admin/forms.py
|
lbgutierrez/kimble
|
35a5eb9a6899bd5840dbf88060cadbb60c52f946
|
[
"Apache-2.0"
] | null | null | null |
app/admin/forms.py
|
lbgutierrez/kimble
|
35a5eb9a6899bd5840dbf88060cadbb60c52f946
|
[
"Apache-2.0"
] | null | null | null |
app/admin/forms.py
|
lbgutierrez/kimble
|
35a5eb9a6899bd5840dbf88060cadbb60c52f946
|
[
"Apache-2.0"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms.fields.core import StringField
from wtforms.widgets import TextArea
from wtforms.validators import DataRequired, Length
class CategoryForm( FlaskForm ):
name = StringField( "Nombre", validators=[ DataRequired(), Length( 1, 64 ) ] )
alias = StringField( "Alias", validators=[ DataRequired(), Length( 1, 20 ) ] )
description = StringField( "Descripcion", widget=TextArea() )
class SubcategoryForm( FlaskForm ):
name = StringField( "Nombre", validators=[ DataRequired(), Length( 1, 64 ) ] )
alias = StringField( "Alias", validators=[ DataRequired(), Length( 1, 20 ) ] )
description = StringField( "Descripcion", widget=TextArea(), validators=[ DataRequired(), Length( 1, 64 ) ] )
| 53.5
| 113
| 0.708945
| 75
| 749
| 7.066667
| 0.346667
| 0.203774
| 0.264151
| 0.273585
| 0.662264
| 0.603774
| 0.603774
| 0.603774
| 0.603774
| 0.603774
| 0
| 0.02381
| 0.158879
| 749
| 14
| 113
| 53.5
| 0.81746
| 0
| 0
| 0.333333
| 0
| 0
| 0.058667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
94e36ba1cc9f80daa04caae769647906fb7dfdb7
| 130
|
py
|
Python
|
redirector/schemas.py
|
dyakovri/redirector-api
|
cd576d9b27b4de5777dd2112e9e355e8585dc44e
|
[
"BSD-2-Clause"
] | 1
|
2022-03-23T17:56:15.000Z
|
2022-03-23T17:56:15.000Z
|
redirector/schemas.py
|
dyakovri/redirector-api
|
cd576d9b27b4de5777dd2112e9e355e8585dc44e
|
[
"BSD-2-Clause"
] | null | null | null |
redirector/schemas.py
|
dyakovri/redirector-api
|
cd576d9b27b4de5777dd2112e9e355e8585dc44e
|
[
"BSD-2-Clause"
] | null | null | null |
from pydantic import BaseModel
from pydantic.networks import AnyHttpUrl
class NewRedirectUrl(BaseModel):
url_to: AnyHttpUrl
| 18.571429
| 40
| 0.823077
| 15
| 130
| 7.066667
| 0.666667
| 0.226415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138462
| 130
| 6
| 41
| 21.666667
| 0.946429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
94f80617de0ce79422651cb5bc79ab954d675f56
| 11,154
|
py
|
Python
|
2021/2021_09b.py
|
davidxiao93/Advent-of-Code
|
29503100ae4eb46b048fc3ab68ff0181c6f00ee5
|
[
"MIT"
] | null | null | null |
2021/2021_09b.py
|
davidxiao93/Advent-of-Code
|
29503100ae4eb46b048fc3ab68ff0181c6f00ee5
|
[
"MIT"
] | null | null | null |
2021/2021_09b.py
|
davidxiao93/Advent-of-Code
|
29503100ae4eb46b048fc3ab68ff0181c6f00ee5
|
[
"MIT"
] | null | null | null |
from functools import reduce
input = """9876543234679310943456798433456798998764321357921025689921987899896498799923491297654545679876212347
6987675036678939874567987012567897899975532467892334567890996789789989679895989398743236789865101456
5598983124589598765689765423678956789876543568943565678999765395679876598789678987654345998764313689
4349994235695329976789998634678946789987987679757678989398653234598767459654567998767499899875424568
3298987656795419899896459745989134568998998789968789699219772123987654398753456899989989689986576789
2126598787896998788902349897991013457899999997989994567909765244598875697542346789798764593987897897
1012349898999887567893956999543234967967988656799323459898954355699876989331238996659875691298998956
4123467999598765456789897987654347899659876545678912398767896466789987979210349965545996789399659235
3234598985439878567896789198795456789898765434189329987546789998994699867991467894534987899985432123
4347899876567989678965994349987897899909996521034998675323597899123989659889578943023498999875321014
5456987997689299989254789498798998999919989992129876543213456789239877545678989432145999998989432345
6567896798792109892123569987679999998898767889234997854324597994399765434689996565236789987698544456
7898945689899998763012458976597989987784345678949998985434989219987654323456899854345699996597655678
8949239796958899954124567895435678996543234567897889876599878998698765435678998765496978989498966899
9932198965346789867258789932124567987655123458976778987988769876569876646889019887989869979329987957
9893987896234568954345898743245798998766234567894567899875457987421987756799934999876649868912398946
8789876789195689875457999655466799679879845678913348987654346987630298867897895988965432946794459435
7653245678989799876767899867578954568989658799101234999869234598541349998976789876894321987895568910
9654134789678986988979989878989243456799767895312349898998945987676556789345698765789210398996678999
8765245696567895399898979999599012367899898986423498776566899999887687990296987984694331239989989888
9954346897379964219787767893478943479901999597434569653465678999998798921989896593789452398978998767
0976557898298965398656456794569894998919589439765698542324599689929899439976789432599543987767999656
2998678999997897987642347895698769867898478929876987321015689599845996598765678953498959876656897545
9859789899886789998756468998789655456797569999989996432326796498656789987654567894997899965545998968
7643996798654567949898979659897643245698979989994987543689895329868999998765678949876778964234899879
5432345697543458957999989545998732124569998967943198964599943210979098969876899129875467996446789989
8584557987654569767899993123989841034979987654599979989679985341989197854987989039654346789677899899
7675678998765699898988932034976432149898998863278954393989876832698986543298968998765956799898998789
8776789439897789909977794255987543298787899985369893212399989764567997432129456789979899989949987699
9899899524989896429865689356898655398656799876456789105679899895678987521012359891398797778932397569
9987978939878987898754578967898786499545892998968994323456789989989498432154467910987676567891986478
9876568998767898999743679879969897987656901239879895544678999879897599543265998934977567456789765399
7765459876546999898654798989456998998767893446989796665678998768789987654399899949765432345996996989
6984345995437898759995987892399999439978998669995689776889997656667998969989789899986645567895789878
5493234989425789647889876891987899321989998778934578987999876543459549998875569789998786688934899767
4321049875414678936979965789976678910198989989323469998967987652368932987654414579989887999023987656
5493959954323469324767894679864567891987878993212378999545698710456891099843203458976998942125998543
7989898965434568913456893589653456789876567992105567894434789322567992129874212367895459993349879432
8979767996576679102378921098732345898676456789213458943226798763456789298765673456789345989659765310
9865656889677889293467892129641237899545345698924567899012999654567899349876654597891299978978987821
8654345679898999989679953498432356789321234567897698978929898765678998956987875698910987567899398932
7543234578929898878989769976544578996532356878998789567998769888789987897898986789321297478921239543
5432143456919657667899898989757689987653479989019893478987847999898796789949997996548396567890198656
8961012367898943456789987698768789998954568998929989569876435445989654678929898987657987678954239967
7642123456976432367893297569899898769765679456998678978987321334678965799898769598767998999876349879
8843234568965321245892195479901989859887894367899568989765410123457896893799654329878999899987456989
9754545678976432496789989567899876543998999578965467999876923245569987932679954212989898789998567894
9898758789876545789897678998967989862369998679754345699989874356998898921569896102398767678999978943
9998767894988656896935569549459898973456899789643234987998765459876789932499789213988654589989899432
8789978943299767965423478921298767895569964996532146986799878598765999893987689929876542679878798921
9689989652129878987314567890989856789678953987844299875989989679754666789986567898986321299767687899
6567897541012989796205679999976545679999654598765987654678998798673245678965438957895410987954576778
4489995432123497654317895798765432459898767679876798763567899899542134589875312346689929996543134567
3235789543235698785456954349876721248789878789989899874698999998753234696543201234567898987654235678
2124678965346789896787893212987210187678989892198942976789998769876545987654415345698976798966547899
1014589878456893987899954301297321234589899999977893988894987456998668998765623466989765429987858943
2123578989569902398978975212976542475789789998756789199953986568989889679876734569879954312398969652
3234567897698943469869865323987643567898699999547894349992197689679995566987655698767893202459878943
4345978998997899598756998764598764678987569898769976998989999796598754324499878999859994312378989965
5656899569866968965431279879689985789998498769878989876865778965469843212347989898948975459459999876
6787932499754357896542456998789699899886329856989999765954567894345954353456798797837988678969878997
7898953987643239919757567899897543998765498745899886644212678901234969754668997655126898789998868998
8999654996544128929898978934998631349877899635789765432103789212349898975678986543235679892987657899
9998969875431017999939989325698752356989998523489876643214897423598787896999697655356799921098545989
9997978996652126789129893216799763467899896412678999765625996545987676569896598766587898943985432877
9886899429863245679399789109999878978998765324569769889436789679876543456789439898698987899874321466
8765678910964376899987698998784989989549995445678945996547899798965432387696429999789556789766440355
7654569899875487999896587899543494399929987678799434987667934977994321234597998998995445678954321234
6543456789989568998785476998932359239898998989893223699788999866789410165789896987654324479765535445
5432567899987678997665365767899498998767999699932104567999987654789421256899765698985212356986787568
4321256789999789886543214456798997987656789569543213469765498765678933345998754109876323567897898679
5434345678998998765432102349987856798545993498994999578954329878989654658987653212987434878998929989
6565656799567899876545214498986534987656789597789878989865912989498768767899864324598546789989939999
7676768923456999987756725987995423898967896986678767993999894994239989878998975435987659896765798989
8787879734567898998998999876789545789989954965483458912987789892129796989787896745898789975454447678
9898989656789987889999987494899656789195899754312379909876556789097645692546999856789897654322334589
8969398797996545978899976323678967991024789876106567899985434567998732101235678969897998854310123459
7654249899975323456789985214567898942195678998217879999876524567986544212346989989976799965924265678
8652135987976896568999953107998929769989899999356989998765213456987656434587896492455678999895696799
9543299876989987689659864315789319898878989876467999989874301345699786547998954321234589987689989892
7654987664698998796549874323498901997659878987578997779765432456789987856899967432355678996578678921
9965799543567899987856975434567892987543656898989986569876753697897598767977898645698789975457568910
9899898632356792198977896565678969876532345689995987432987884789966449878956789876899899764325457891
6678997653458999299989979876799349865431334678953294321098765789654323989345999988967998955212345789
4599298964578998988998968987891234986210123689964498753129877897654312993234789999654987742101234699
3989129765679997667987899998910129876433234589876569876534988998973209894365678919869876543232348789
2878939876799896543566999879321236997645345679987893997699999529994698765489799201978997654343469892
3467899987986789432355698765432345698987456789298932398988965410989989876569895412399989765499598921
5679979999895678921234569989543456789998987891019643599877896329879879999678976523989878979988987932
6789568987784569990146678998656567899969898989998754988756899499764768998789897949876767989877656893
9893499976543478989236789239767898998756789878899869876645798987653456799898789498765456799765346789
6912987665421299879345678949879989976545698765789979865434687898432567895965689398754345678974235699
5439876543210987769967899999989567895431987543695491984323456989943458954397899999665265667895127678
6545997654521976458899910989996468986532398654599392395664677979894568965989959876543123456789024568
7666798766439894346778929878987347897747498765678989987775899866789789999878943997651016567892123456
8789899876598789234567898969876456798856569876789778998986798754989899989766959898764323456789236768
9898999987697655139879987655987898949987899987894566989987986543478999876745898759877467897897345679
8967998799798743016791098943498929956798999898913455678999876542359998765636789542976578949985498789
7649876549899752145892987632349547897899498769101234589212987656767899443323498931987989539876569893
8432987632999863236789876545678956789902349854213455678903498987898954321014567890198994321987689912"""
points = dict()
for y, line in enumerate(input.splitlines()):
for x, c in enumerate(line):
points[(x, y)] = int(c)
basin_sizes = []
while True:
basin_seed = next((p for p, h in points.items() if h < 9), None)
if basin_seed is None:
# no more basins
break
basin = set()
points_to_check = [basin_seed]
not_basin = set()
while len(points_to_check) > 0:
p = points_to_check.pop(0)
basin.add(p)
for x, y in [(0, -1), (1, 0), (0, 1), (-1, 0)]:
point_to_check = (p[0] + x, p[1] + y)
if point_to_check in basin or point_to_check in not_basin:
continue
if points.get(point_to_check, 10) < 9:
points_to_check.append(point_to_check)
else:
not_basin.add(point_to_check)
basin_sizes.append(len(basin))
for p in basin:
del points[p]
print(
reduce(
lambda a, b: a*b,
sorted(basin_sizes, reverse=True)[:3]
)
)
| 79.106383
| 111
| 0.948718
| 259
| 11,154
| 40.745174
| 0.606178
| 0.006633
| 0.006823
| 0.000758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.93477
| 0.039268
| 11,154
| 140
| 112
| 79.671429
| 0.050019
| 0.001255
| 0
| 0
| 0
| 0
| 0.906716
| 0.897827
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.007519
| 0
| 0.007519
| 0.007519
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a21f190e1efc65909a85eda327b1f918c610e978
| 20,943
|
py
|
Python
|
src/nanopie/services/http/base.py
|
michaelawyu/nanopie
|
32e84f072d03a6b3eff33920c929546c81969a64
|
[
"Apache-2.0"
] | 1
|
2020-09-02T22:34:10.000Z
|
2020-09-02T22:34:10.000Z
|
src/nanopie/services/http/base.py
|
michaelawyu/nanopie
|
32e84f072d03a6b3eff33920c929546c81969a64
|
[
"Apache-2.0"
] | null | null | null |
src/nanopie/services/http/base.py
|
michaelawyu/nanopie
|
32e84f072d03a6b3eff33920c929546c81969a64
|
[
"Apache-2.0"
] | null | null | null |
"""This module includes the base class for nanopie HTTP services.
"""
from abc import abstractmethod
from typing import Callable, Dict, Optional
from ..base import RPCService
from .foundation import HTTPFoundationHandler
from ...handler import SimpleHandler
from .io import HTTPEndpoint
from .methods import HTTPMethods
from ...serialization.http import HTTPSerializationHandler
from ...serialization.helpers import JSONSerializationHelper
class HTTPService(RPCService):
"""The base class for all HTTP services."""
def __init__(
self,
serialization_helper: Optional[
"SerializationHelper"
] = JSONSerializationHelper(),
*args,
**kwargs
):
"""Initializes an HTTP service.
Args:
serialization_helper (SerializationHelper, Optional): The default
serialization helper for all endpoints.
*args: Other positional arguments. See `RPCService`.
**kwargs: Other keyword arguments. See `RPCService`.
"""
super().__init__(*args, serialization_helper=serialization_helper, **kwargs)
@abstractmethod
def add_endpoint(self, endpoint: "HTTPEndpoint", **kwargs):
"""See the method `RPCService.add_endpoint`."""
pass
def _rest_endpoint(
self,
name: str,
rule: str,
method: str,
serialization_handler: Optional["SerializationHandler"] = None,
authn_handler: Optional["AuthenticationHandler"] = None,
logging_handler: Optional["LoggingHandler"] = None,
tracing_handler: Optional["TracingHandler"] = None,
extras: Optional[Dict] = None,
**options
):
"""Adds a RESTful endpoint.
Args:
name (str): The name of the endpoint.
rule (str): The rule associated with the endpoint.
method (str): The HTTP method associated with the endpoint.
authn_handler (AuthenticationHandler, Optional): The
authentication handler for this endpoint.
logging_handler (LoggingHandler, Optional): The logging handler
for this endpoint.
tracing_handler (TracingHandler, Optional): The tracing handler
for this endpoint.
serialization_helper (SerializationHelper, Optional): The
serialization helper for this endpoint.
extras (Dict, Optional): Additional information about the endpoint.
**options: Other keyword arguments for configuring this endpoint.
They vary according to the transport used.
"""
entrypoint = HTTPFoundationHandler(max_content_length=self.max_content_length)
handler = entrypoint
if authn_handler:
handler = handler.add_route(name=name, handler=authn_handler)
elif self.authn_handler:
handler = handler.add_route(name=name, handler=self.authn_handler)
if logging_handler:
handler = handler.add_route(name=name, handler=logging_handler)
elif self.logging_handler:
handler = handler.add_route(name=name, handler=self.logging_handler)
if tracing_handler:
handler = handler.add_route(name=name, handler=tracing_handler)
elif self.tracing_handler:
handler = handler.add_route(name=name, handler=self.tracing_handler)
if serialization_handler:
handler = handler.add_route(name=name, handler=serialization_handler)
def wrapper(func):
simple_handler = SimpleHandler(func=func)
handler.add_route(name=name, handler=simple_handler)
endpoint = HTTPEndpoint(
name=name,
rule=rule,
method=method,
entrypoint=entrypoint,
extras=extras,
)
self.add_endpoint(endpoint, **options)
return func
return wrapper
def create(
self,
name: str,
rule: str,
data_cls: "ModelMetaCls",
headers_cls: Optional["ModelMetaCls"] = None,
query_args_cls: Optional["ModelMetaCls"] = None,
authn_handler: Optional["AuthenticationHandler"] = None,
logging_handler: Optional["LoggingHandler"] = None,
tracing_handler: Optional["TracingHandler"] = None,
extras: Optional[Dict] = None,
**options
):
"""The decorator for adding a CREATE endpoint.
Args:
name (str): The name of the endpoint.
rule (str): The rule associated with the endpoint.
data_cls (ModelMetaCls): The data model for the request
payload (body).
headers_cls (ModelMetaCls, Optional): The data model for the headers
of the request.
query_args_cls (ModelMetaCls, Optional): The data model for the
query arguments in the URI of the request.
authn_handler (AuthenticationHandler, Optional): The
authentication handler for this endpoint.
logging_handler (LoggingHandler, Optional): The logging handler
for this endpoint.
tracing_handler (TracingHandler, Optional): The tracing handler
for this endpoint.
extras (Dict, Optional): Additional information about the endpoint.
**options: Other keyword arguments for configuring this endpoint.
They vary according to the transport used.
Usage:
```Python
@svc.create(name="create_user", rule="/users/", ...)
def create_user():
# Custom logic
```
"""
serialization_handler = HTTPSerializationHandler(
headers_cls=headers_cls,
query_args_cls=query_args_cls,
data_cls=data_cls,
serialization_helper=self.serialization_helper,
)
return self._rest_endpoint(
name=name,
rule=rule,
method=HTTPMethods.POST,
serialization_handler=serialization_handler,
authn_handler=authn_handler,
logging_handler=logging_handler,
tracing_handler=tracing_handler,
extras=extras,
**options
)
def add_create_endpoint(self, *args, func: Callable, **kwargs):
"""Adds a CREATE endpoint.
Args:
func (Callable): The function to process the request.
*args: Other positional arguments. See the method `create`.
**kwargs: Other keyword arguments. See the method `create`.
"""
return self.create(*args, **kwargs)(func)
def get(
self,
name: str,
rule: str,
data_cls: Optional["ModelMetaCls"] = None,
headers_cls: Optional["ModelMetaCls"] = None,
query_args_cls: Optional["ModelMetaCls"] = None,
authn_handler: Optional["AuthenticationHandler"] = None,
logging_handler: Optional["LoggingHandler"] = None,
tracing_handler: Optional["TracingHandler"] = None,
extras: Optional[Dict] = None,
**options
):
"""The decorator for adding a GET method.
Args:
name (str): The name of the endpoint.
rule (str): The rule associated with the endpoint.
data_cls (ModelMetaCls, Optional): The data model for the request
payload (body).
headers_cls (ModelMetaCls, Optional): The data model for the headers
of the request.
query_args_cls (ModelMetaCls, Optional): The data model for the
query arguments in the URI of the request.
authn_handler (AuthenticationHandler, Optional): The
authentication handler for this endpoint.
logging_handler (LoggingHandler, Optional): The logging handler
for this endpoint.
tracing_handler (TracingHandler, Optional): The tracing handler
for this endpoint.
extras (Dict, Optional): Additional information about the endpoint.
**options: Other keyword arguments for configuring this endpoint.
They vary according to the transport used.
Usage:
```Python
@svc.get(name="get_user", rule="/users/{int:user_id}", ...)
def get_user(user_id):
# Custom logic
```
"""
serialization_handler = HTTPSerializationHandler(
headers_cls=headers_cls,
query_args_cls=query_args_cls,
data_cls=data_cls,
serialization_helper=self.serialization_helper,
)
return self._rest_endpoint(
name=name,
rule=rule,
method=HTTPMethods.GET,
serialization_handler=serialization_handler,
authn_handler=authn_handler,
logging_handler=logging_handler,
tracing_handler=tracing_handler,
extras=extras,
**options
)
def add_get_endpoint(self, *args, func: Callable, **kwargs):
"""Adds a GET endpoint.
Args:
func (Callable): The function to process the request.
*args: Other positional arguments. See the method `get`.
**kwargs: Other keyword arguments. See the method `get`.
"""
return self.get(*args, **kwargs)(func)
def update(
self,
name: str,
rule: str,
data_cls: "ModelMetaCls",
headers_cls: Optional["ModelMetaCls"] = None,
query_args_cls: Optional["ModelMetaCls"] = None,
authn_handler: Optional["AuthenticationHandler"] = None,
logging_handler: Optional["LoggingHandler"] = None,
tracing_handler: Optional["TracingHandler"] = None,
extras: Optional[Dict] = None,
**options
):
"""The decorator for adding an UPDATE method.
Args:
name (str): The name of the endpoint.
rule (str): The rule associated with the endpoint.
data_cls (ModelMetaCls): The data model for the request
payload (body).
headers_cls (ModelMetaCls, Optional): The data model for the headers
of the request.
query_args_cls (ModelMetaCls, Optional): The data model for the
query arguments in the URI of the request.
authn_handler (AuthenticationHandler, Optional): The
authentication handler for this endpoint.
logging_handler (LoggingHandler, Optional): The logging handler
for this endpoint.
tracing_handler (TracingHandler, Optional): The tracing handler
for this endpoint.
extras (Dict, Optional): Additional information about the endpoint.
**options: Other keyword arguments for configuring this endpoint.
They vary according to the transport used.
Usage:
```Python
@svc.update(name="update_user", rule="/users/{int:user_id}", ...)
def update_user(user_id):
# Custom logic
```
"""
serialization_handler = HTTPSerializationHandler(
headers_cls=headers_cls,
query_args_cls=query_args_cls,
data_cls=data_cls,
serialization_helper=self.serialization_helper,
)
return self._rest_endpoint(
name=name,
rule=rule,
method=HTTPMethods.PATCH,
serialization_handler=serialization_handler,
authn_handler=authn_handler,
logging_handler=logging_handler,
tracing_handler=tracing_handler,
extras=extras,
**options
)
def add_update_endpoint(self, *args, func: Callable, **kwargs):
"""Adds an UPDATE endpoint.
Args:
func (Callable): The function to process the request.
*args: Other positional arguments. See the method `update`.
**kwargs: Other keyword arguments. See the method `update`.
"""
return self.update(*args, **kwargs)(func)
def delete(
self,
name: str,
rule: str,
data_cls: Optional["ModelMetaCls"] = None,
headers_cls: Optional["ModelMetaCls"] = None,
query_args_cls: Optional["ModelMetaCls"] = None,
authn_handler: Optional["AuthenticationHandler"] = None,
logging_handler: Optional["LoggingHandler"] = None,
tracing_handler: Optional["TracingHandler"] = None,
extras: Optional[Dict] = None,
**options
):
"""The decorator for adding a DELETE method.
Args:
name (str): The name of the endpoint.
rule (str): The rule associated with the endpoint.
data_cls (ModelMetaCls, Optional): The data model for the request
payload (body).
headers_cls (ModelMetaCls, Optional): The data model for the headers
of the request.
query_args_cls (ModelMetaCls, Optional): The data model for the
query arguments in the URI of the request.
authn_handler (AuthenticationHandler, Optional): The
authentication handler for this endpoint.
logging_handler (LoggingHandler, Optional): The logging handler
for this endpoint.
tracing_handler (TracingHandler, Optional): The tracing handler
for this endpoint.
extras (Dict, Optional): Additional information about the endpoint.
**options: Other keyword arguments for configuring this endpoint.
They vary according to the transport used.
Usage:
```Python
@svc.delete(name="delete_user", rule="/users/{int:user_id}", ...)
def delete_user(user_id):
# Custom logic
```
"""
serialization_handler = HTTPSerializationHandler(
headers_cls=headers_cls,
query_args_cls=query_args_cls,
data_cls=data_cls,
serialization_helper=self.serialization_helper,
)
return self._rest_endpoint(
name=name,
rule=rule,
method=HTTPMethods.DELETE,
serialization_handler=serialization_handler,
authn_handler=authn_handler,
logging_handler=logging_handler,
tracing_handler=tracing_handler,
extras=extras,
**options
)
def add_delete_endpoint(self, *args, func: Callable, **kwargs):
"""Adds a DELETE endpoint.
Args:
func (Callable): The function to process the request.
*args: Other positional arguments. See the method `delete`.
**kwargs: Other keyword arguments. See the method `delete`.
"""
return self.delete(*args, **kwargs)(func)
def list(
self,
name: str,
rule: str,
data_cls: Optional["ModelMetaCls"] = None,
headers_cls: Optional["ModelMetaCls"] = None,
query_args_cls: Optional["ModelMetaCls"] = None,
authn_handler: Optional["AuthenticationHandler"] = None,
logging_handler: Optional["LoggingHandler"] = None,
tracing_handler: Optional["TracingHandler"] = None,
extras: Optional[Dict] = None,
**options
):
"""The decorator for adding a LIST method.
Args:
name (str): The name of the endpoint.
rule (str): The rule associated with the endpoint.
data_cls (ModelMetaCls, Optional): The data model for the request
payload (body).
headers_cls (ModelMetaCls, Optional): The data model for the headers
of the request.
query_args_cls (ModelMetaCls, Optional): The data model for the
query arguments in the URI of the request.
authn_handler (AuthenticationHandler, Optional): The
authentication handler for this endpoint.
logging_handler (LoggingHandler, Optional): The logging handler
for this endpoint.
tracing_handler (TracingHandler, Optional): The tracing handler
for this endpoint.
extras (Dict, Optional): Additional information about the endpoint.
**options: Other keyword arguments for configuring this endpoint.
They vary according to the transport used.
Usage:
```Python
@svc.list(name="list_users", rule="/users/", ...)
def list_users():
# Custom logic
```
"""
serialization_handler = HTTPSerializationHandler(
headers_cls=headers_cls,
query_args_cls=query_args_cls,
data_cls=data_cls,
serialization_helper=self.serialization_helper,
)
return self._rest_endpoint(
name=name,
rule=rule,
method=HTTPMethods.GET,
serialization_handler=serialization_handler,
authn_handler=authn_handler,
logging_handler=logging_handler,
tracing_handler=tracing_handler,
extras=extras,
**options
)
def add_list_endpoint(self, *args, func, **kwargs):
"""Adds a LIST endpoint.
Args:
func (Callable): The function to process the request.
*args: Other positional arguments. See the method `list`.
**kwargs: Other keyword arguments. See the method `list`.
"""
return self.list(*args, **kwargs)(func)
def custom(
self,
name: str,
rule: str,
verb: str,
method: str,
data_cls: Optional["ModelMetaCls"] = None,
headers_cls: Optional["ModelMetaCls"] = None,
query_args_cls: Optional["ModelMetaCls"] = None,
authn_handler: Optional["AuthenticationHandler"] = None,
logging_handler: Optional["LoggingHandler"] = None,
tracing_handler: Optional["TracingHandler"] = None,
extras: Optional[Dict] = None,
**options
):
"""The decorator for adding a custom method.
Args:
name (str): The name of the endpoint.
rule (str): The rule associated with the endpoint.
verb (str): The HTTP verb associated with the endpoint.
data_cls (ModelMetaCls, Optional): The data model for the request
payload (body).
headers_cls (ModelMetaCls, Optional): The data model for the headers
of the request.
query_args_cls (ModelMetaCls, Optional): The data model for the
query arguments in the URI of the request.
authn_handler (AuthenticationHandler, Optional): The
authentication handler for this endpoint.
logging_handler (LoggingHandler, Optional): The logging handler
for this endpoint.
tracing_handler (TracingHandler, Optional): The tracing handler
for this endpoint.
extras (Dict, Optional): Additional information about the endpoint.
**options: Other keyword arguments for configuring this endpoint.
They vary according to the transport used.
Usage:
```Python
@svc.custom(name="verify_user",
rule="/users/{int:user_id}",
verb="GET",
...)
def verify_user(user_id):
# Custom logic
```
"""
if rule.endswith("/"):
rule = rule[:-1]
rule = "{}:{}".format(rule, verb)
if method not in HTTPMethods.supported_methods:
raise ValueError(
"{} is not a supported HTTP method ({}).".format(
method, HTTPMethods.supported_methods
)
)
serialization_handler = HTTPSerializationHandler(
headers_cls=headers_cls,
query_args_cls=query_args_cls,
data_cls=data_cls,
serialization_helper=self.serialization_helper,
)
return self._rest_endpoint(
name=name,
rule=rule,
method=method,
serialization_handler=serialization_handler,
authn_handler=authn_handler,
logging_handler=logging_handler,
tracing_handler=tracing_handler,
extras=extras,
**options
)
def add_custom_endpoint(self, *args, func: Callable, **kwargs):
""" "Adds a custom endpoint.
Args:
func (Callable): The function to process the request.
*args: Other positional arguments. See the method `custom`.
**kwargs: Other keyword arguments. See the method `custom`.
"""
self.custom(*args, **kwargs)(func)
| 38.855288
| 86
| 0.600917
| 2,074
| 20,943
| 5.924783
| 0.066538
| 0.034912
| 0.023438
| 0.037598
| 0.835693
| 0.81307
| 0.806071
| 0.777832
| 0.761556
| 0.736003
| 0
| 0.00007
| 0.318484
| 20,943
| 538
| 87
| 38.927509
| 0.860856
| 0.428974
| 0
| 0.661654
| 0
| 0
| 0.064159
| 0.014399
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06015
| false
| 0.003759
| 0.033835
| 0
| 0.146617
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bf74d45a88cd276b17089e1d4afc5bb933924be4
| 41
|
py
|
Python
|
models/__init__.py
|
mlunax/shrtner-api
|
373e08863145b40ec7e52eccddd911da3f79540a
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
mlunax/shrtner-api
|
373e08863145b40ec7e52eccddd911da3f79540a
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
mlunax/shrtner-api
|
373e08863145b40ec7e52eccddd911da3f79540a
|
[
"MIT"
] | null | null | null |
from .redirect_model import RedirectModel
| 41
| 41
| 0.902439
| 5
| 41
| 7.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 41
| 1
| 41
| 41
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
44ba6ab507bee531594dc45e9b92cf3753e9c32c
| 179
|
py
|
Python
|
paulmann/__init__.py
|
mjekovec2/paulmann-lights
|
b41b9caef611db7461d62fcf17bc7de71ec4e0bd
|
[
"MIT"
] | 1
|
2020-10-13T09:42:01.000Z
|
2020-10-13T09:42:01.000Z
|
paulmann/__init__.py
|
mjekovec2/paulmann-lights
|
b41b9caef611db7461d62fcf17bc7de71ec4e0bd
|
[
"MIT"
] | 1
|
2021-12-26T01:54:41.000Z
|
2022-01-03T07:43:20.000Z
|
paulmann/__init__.py
|
mjekovec2/paulmann-lights
|
b41b9caef611db7461d62fcf17bc7de71ec4e0bd
|
[
"MIT"
] | 2
|
2020-04-21T11:39:30.000Z
|
2021-12-28T20:40:52.000Z
|
from paulmann.paulmann import Paulmann
from paulmann.models import State, Info
from paulmann.exceptions import PaulmannAuthenticationError, PaulmannConnectionError, PaulmannError
| 44.75
| 99
| 0.882682
| 18
| 179
| 8.777778
| 0.555556
| 0.227848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083799
| 179
| 3
| 100
| 59.666667
| 0.963415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
44c69b81a108f1cde0b86a8f52fe4db2f7891714
| 335
|
py
|
Python
|
app/main/__init__.py
|
DuncanArani/NEWS
|
b6cbd2794d5b3f71f66bcfb7feac1510d02c410c
|
[
"MIT"
] | null | null | null |
app/main/__init__.py
|
DuncanArani/NEWS
|
b6cbd2794d5b3f71f66bcfb7feac1510d02c410c
|
[
"MIT"
] | null | null | null |
app/main/__init__.py
|
DuncanArani/NEWS
|
b6cbd2794d5b3f71f66bcfb7feac1510d02c410c
|
[
"MIT"
] | null | null | null |
# We import the Blueprint class from flask. We then initialize the Blueprint class by creating a variable main. The Blueprint class takes in 2 arguments. The name of the blueprint and the __name__ variable to find the location of the blueprint.
from flask import Blueprint
main = Blueprint('main',__name__)
from . import views,errors
| 55.833333
| 244
| 0.8
| 53
| 335
| 4.90566
| 0.490566
| 0.230769
| 0.196154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003559
| 0.161194
| 335
| 5
| 245
| 67
| 0.921708
| 0.722388
| 0
| 0
| 0
| 0
| 0.043956
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
78097666ad3a75f21be90197ad3b55483be0b372
| 106
|
py
|
Python
|
lectures/code/dict_del.py
|
naskoch/python_course
|
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
|
[
"MIT"
] | 4
|
2015-08-10T17:46:55.000Z
|
2020-04-18T21:09:03.000Z
|
lectures/code/dict_del.py
|
naskoch/python_course
|
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
|
[
"MIT"
] | null | null | null |
lectures/code/dict_del.py
|
naskoch/python_course
|
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
|
[
"MIT"
] | 2
|
2019-04-24T03:31:02.000Z
|
2019-05-13T07:36:06.000Z
|
>>> d = {1: 'one', 2: 'two', 3: 'three'}
>>> del d[1]
>>> d
{2: 'two', 3: 'three'}
>>> d.clear()
>>> d
{}
| 13.25
| 40
| 0.358491
| 18
| 106
| 2.111111
| 0.5
| 0.105263
| 0.263158
| 0.526316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072289
| 0.216981
| 106
| 7
| 41
| 15.142857
| 0.385542
| 0
| 0
| 0.285714
| 0
| 0
| 0.179245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
788d2f19cccfe2382f259ab8e0e6e4a79001b1f8
| 155
|
py
|
Python
|
src/processor/processing_node.py
|
PearCoding/SpriteRecourceCompiler
|
34dcd9175f92e580705a2f07998046a05a19329b
|
[
"MIT"
] | 1
|
2016-04-16T21:33:58.000Z
|
2016-04-16T21:33:58.000Z
|
src/processor/processing_node.py
|
PearCoding/SpriteRecourceCompiler
|
34dcd9175f92e580705a2f07998046a05a19329b
|
[
"MIT"
] | null | null | null |
src/processor/processing_node.py
|
PearCoding/SpriteRecourceCompiler
|
34dcd9175f92e580705a2f07998046a05a19329b
|
[
"MIT"
] | null | null | null |
class ProcessingNode:
def execute(self, processor, img):
raise NotImplementedError()
def dependencies(self, processor):
return []
| 22.142857
| 38
| 0.664516
| 14
| 155
| 7.357143
| 0.785714
| 0.252427
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.245161
| 155
| 6
| 39
| 25.833333
| 0.880342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
78ab2a69ced3931fa292f1e6e726c05b71d01b38
| 703
|
py
|
Python
|
app/email.py
|
kilonzijnr/personal-blog
|
c26258a2b5494c0621d663a258213a6990f45e4e
|
[
"MIT"
] | null | null | null |
app/email.py
|
kilonzijnr/personal-blog
|
c26258a2b5494c0621d663a258213a6990f45e4e
|
[
"MIT"
] | null | null | null |
app/email.py
|
kilonzijnr/personal-blog
|
c26258a2b5494c0621d663a258213a6990f45e4e
|
[
"MIT"
] | null | null | null |
from flask_mail import Message
from flask import render_template
from .import mail
def mail_message(subject,template,to,**kwargs):
sender_email = 'vkilonzi05@gmail.com'
email = Message(subject, sender=sender_email, recipients=[to])
email.body = render_template(template + ".txt",**kwargs)
email.html = render_template(template + ".html",**kwargs)
mail.send(email)
def notification_message(subject, template, to, **kwargs):
sender_email = 'vkilonzi05@gmail.com'
email = Message(subject, sender=sender_email, recipients=[to])
email.body = render_template(template + ".txt", **kwargs)
email.html = render_template (template + ".html", **kwargs)
mail.send(email)
| 35.15
| 66
| 0.71835
| 87
| 703
| 5.666667
| 0.252874
| 0.141988
| 0.178499
| 0.097363
| 0.815416
| 0.815416
| 0.815416
| 0.815416
| 0.815416
| 0.815416
| 0
| 0.006667
| 0.146515
| 703
| 19
| 67
| 37
| 0.815
| 0
| 0
| 0.666667
| 0
| 0
| 0.082504
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.2
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
78b60f967f9cb54900f2792e459f29d96c09a302
| 7,971
|
py
|
Python
|
Graphs.py
|
KrishnarajT/IP-Project-Youtube-Video-Downloader
|
12f3e829f82ee96fdc0ec68c79665f85f3725007
|
[
"MIT"
] | 1
|
2020-11-22T06:49:23.000Z
|
2020-11-22T06:49:23.000Z
|
Graphs.py
|
KrishnarajT/IP-Project-Youtube-Video-Downloader
|
12f3e829f82ee96fdc0ec68c79665f85f3725007
|
[
"MIT"
] | 5
|
2021-04-06T18:39:13.000Z
|
2022-03-12T00:55:47.000Z
|
Graphs.py
|
KrishnarajT/IP-Project-Youtube-Video-Downloader
|
12f3e829f82ee96fdc0ec68c79665f85f3725007
|
[
"MIT"
] | 2
|
2020-11-12T13:25:19.000Z
|
2020-11-12T13:33:29.000Z
|
# here is an example graph
import numpy as np
import File_IO as fio
import matplotlib.pyplot as plt
# All These functions plot the graph, and then save the figure in a folder.
# These figues are then accessed by main.py to show in the statistics page.
def plot_views_vs_videos():
"""
Plots a graph by looking at the data stored in the files, and then saves the graph in Assets/Graphs as png.
None -> None
"""
views_list = fio.read.get_views()
plt.xlabel('Videos')
plt.ylabel('views')
plt.bar(range(len(views_list)), views_list, color = 'green')
plt.savefig('Assets/Graphs/views_bar_graph.png')
def plot_views_vs_videos_line():
"""
Plots a graph by looking at the data stored in the files, and then saves the graph in Assets/Graphs as png.
None -> None
"""
views_list = fio.read.get_views()
for i in range(len(views_list)):
if views_list[i] > 3e6:
views_list[i] = 3e6
plt.xlabel('Videos')
plt.ylabel('views')
plt.plot(np.arange(len(views_list)), views_list, color = 'green')
plt.savefig('Assets/Graphs/views_line_graph.png')
def plot_views_vs_videos_hist():
"""
Plots a graph by looking at the data stored in the files,
and then saves the graph in Assets/Graphs as png.
None -> None
"""
views_list = fio.read.get_views()
bin_val = 1000
bins = []
Max = 0
for i in views_list:
if i > Max:
Max = i
for i in range(15):
bins.append(i*bin_val)
plt.hist(views_list, bins, histtype='bar', rwidth=0.8, color = 'yellow')
plt.xlabel('Views')
plt.ylabel('Number of videos')
plt.title('number of videos with views')
plt.savefig('Assets/Graphs/views_hist_graph.png')
def plot_likes_vs_videos_hist():
"""
Plots a graph by looking at the data stored in the files, and then saves the graph in Assets/Graphs as png.
None -> None
"""
# this list is the raw list from the file, contains '\n' at the end that we don't need
video_likes = fio.read.get_likes()
for i in range(len(video_likes)):
if video_likes[i ] is None :
video_likes[i] = 0
bin_val = 1000
bins = []
Max = 0
for i in video_likes:
if i > Max:
Max = i
for i in range(15):
bins.append(i*bin_val)
plt.hist(video_likes, bins, histtype='bar', rwidth=0.8, color = 'yellow')
plt.xlabel('Views')
plt.ylabel('Number of videos')
plt.title('Number of videos vs likes')
plt.savefig('Assets/Graphs/likes_hist_graph.png')
def plot_ratings_vs_videos():
"""
Plots a graph by looking at the data stored in the files, and then saves the graph in Assets/Graphs as png.
None -> None
"""
video_ratings = fio.read.get_ratings()
for i in range(len(video_ratings)):
if video_ratings[i ] is None :
video_ratings[i] = 0.0
plt.xlabel('Videos')
plt.ylabel('Ratings')
# plots a simple graph that is saved as png in Assets/Graphs/. This file can then be accessed by other files.
plt.plot(np.arange(len(video_ratings)), video_ratings, color = 'red')
plt.savefig('Assets/Graphs/ratings_bar_graph.png')
def plot_ratings_vs_videos_hist():
"""
Plots a graph by looking at the data stored in the files, and then saves the graph in Assets/Graphs as png.
None -> None
"""
# this list is the raw list from the file, contains '\n' at the end that we don't need
video_ratings = fio.read.get_ratings()
for i in range(len(video_ratings)):
if video_ratings[i ] is None :
video_ratings[i] = 0.0
bin_val = 0.25
bins = []
for i in range(8):
bins.append(3+i*bin_val)
print(bins)
plt.hist(video_ratings, bins, histtype='bar', rwidth=0.8, color = 'pink')
plt.xlabel('Views')
plt.ylabel('Number of videos')
plt.title('Number of videos vs ratings')
plt.savefig('Assets/Graphs/ratings_hist_graph.png')
def plot_likes_vs_videos():
"""
Plots a graph by looking at the data stored in the files, and then saves the graph in Assets/Graphs as png.
None -> None
"""
# this list is the raw list from the file, contains '\n' at the end that we don't need
video_likes = fio.read.get_likes()
for i in range(len(video_likes)):
if video_likes[i ] is None :
video_likes[i] = 0.0
for i in range(len(video_likes)):
if video_likes[i] > 5e5:
video_likes[i] = 5e5
plt.xlabel('Videos')
plt.ylabel('likes')
# plots a simple graph that is saved as png in Assets/Graphs/. This file can then be accessed by other files.
plt.plot(np.arange(len(video_likes)), video_likes, color = 'green')
plt.savefig('Assets/Graphs/likes_line_graph.png')
def plot_dislikes_vs_videos():
"""
Plots a graph by looking at the data stored in the files, and then saves the graph in Assets/Graphs as png.
None -> None
"""
video_dislikes = fio.read.get_dislikes()
for i in range(len(video_dislikes)):
if video_dislikes[i ] is None :
video_dislikes[i] = 0.0
for i in range(len(video_dislikes)):
if video_dislikes[i] > 6e3:
video_dislikes[i] = 6e3
plt.xlabel('Videos')
plt.ylabel('dislikes')
plt.bar(np.arange(len(video_dislikes)), video_dislikes, color = 'green')
plt.savefig('Assets/Graphs/dislikes_bar_graph.png')
def plot_dislikes_vs_videos_hist():
"""
Plots a graph by looking at the data stored in the files, and then saves the graph in Assets/Graphs as png.
None -> None
"""
# this list is the raw list from the file, contains '\n' at the end that we don't need
video_dislikes = fio.read.get_dislikes()
for i in range(len(video_dislikes)):
if video_dislikes[i ] is None :
video_dislikes[i] = 0
bin_val = 1000
bins = []
Max = 0
for i in video_dislikes:
if i > Max:
Max = i
for i in range(15):
bins.append(i*bin_val)
plt.hist(video_dislikes, bins, histtype='bar', rwidth=0.8, color = 'yellow')
plt.xlabel('Views')
plt.ylabel('Number of videos')
plt.title('Number of videos vs dislikes')
plt.savefig('Assets/Graphs/dislikes_hist_graph.png')
def plot_categories_vs_videos():
"""
Plots a graph by looking at the data stored in the files, and then saves the graph in Assets/Graphs as png.
None -> None
"""
# this list is the raw list from the file, contains '\n' at the end that we don't need
video_categories = fio.read.get_categories()
cat_list = ['Education', 'Science & Technology', 'Music', 'Autos & Vehicles', 'Entertainment', 'Howto & Style', 'People & Blogs']
cat_list_disp = ['Education', 'Science', 'Music', 'Vehicles', 'Entertain', 'How-to', 'People']
sorted_cats = [0 for i in range(len(cat_list))]
for i in range(len(cat_list)):
for j in range(i, len(video_categories)):
if cat_list[i] == video_categories[j]:
sorted_cats[i] += 1
# plots a simple graph that is saved as png in Assets/Graphs/. This file can then be accessed by other files.
plt.bar(cat_list_disp, sorted_cats, color = 'orange')
plt.savefig('Assets/Graphs/categories_bar_graph.png')
def plot_categories_vs_videos_pie():
"""
Plots a graph by looking at the data stored in the files, and then saves the graph in Assets/Graphs as png.
None -> None
"""
# this list is the raw list from the file, contains '\n' at the end that we don't need
video_categories = fio.read.get_categories()
cat_list = ['Education', 'Science & Technology', 'Music', 'Autos & Vehicles', 'Entertainment', 'Howto & Style', 'People & Blogs']
cat_list_disp = ['Education', 'Science', 'Music', 'Vehicles', 'Entertain', 'How-to', 'People']
sorted_cats = [0 for i in range(len(cat_list))]
for i in range(len(cat_list)):
for j in range(i, len(video_categories)):
if cat_list[i] == video_categories[j]:
sorted_cats[i] += 1
# plots a simple graph that is saved as png in Assets/Graphs/. This file can then be accessed by other files.
plt.pie(sorted_cats, labels = cat_list_disp,explode = (0.1, 0, 0, 0, 0, 0, 0), shadow = True )
plt.savefig('Assets/Graphs/categories_pie_chart.png')
plot_views_vs_videos()
plot_ratings_vs_videos_hist()
plot_views_vs_videos_hist()
plot_views_vs_videos_line()
plot_ratings_vs_videos()
plot_likes_vs_videos()
plot_likes_vs_videos_hist()
plot_dislikes_vs_videos()
plot_dislikes_vs_videos_hist()
plot_categories_vs_videos()
plot_categories_vs_videos_pie()
| 31.505929
| 130
| 0.711078
| 1,364
| 7,971
| 4.008065
| 0.102639
| 0.05707
| 0.02195
| 0.034205
| 0.872691
| 0.792574
| 0.773185
| 0.711359
| 0.702579
| 0.697823
| 0
| 0.010241
| 0.16698
| 7,971
| 252
| 131
| 31.630952
| 0.813102
| 0.306988
| 0
| 0.52
| 0
| 0
| 0.182037
| 0.072037
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073333
| false
| 0
| 0.02
| 0
| 0.093333
| 0.006667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
78ba7c9b0000987d9dad7d9a89b0b4459b1456c9
| 171
|
py
|
Python
|
PythonModules/SlicerVmtkCommonLib/__init__.py
|
jcfr/SlicerExtension-VMTK
|
bd031746a645744f150de343f128203ce747ee57
|
[
"Apache-2.0"
] | null | null | null |
PythonModules/SlicerVmtkCommonLib/__init__.py
|
jcfr/SlicerExtension-VMTK
|
bd031746a645744f150de343f128203ce747ee57
|
[
"Apache-2.0"
] | null | null | null |
PythonModules/SlicerVmtkCommonLib/__init__.py
|
jcfr/SlicerExtension-VMTK
|
bd031746a645744f150de343f128203ce747ee57
|
[
"Apache-2.0"
] | null | null | null |
# import the vmtk common libs
from Helper import *
from LevelSetSegmentationLogic import *
from VesselnessFilteringLogic import *
from CenterlineComputationLogic import *
| 28.5
| 40
| 0.842105
| 17
| 171
| 8.470588
| 0.588235
| 0.208333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128655
| 171
| 5
| 41
| 34.2
| 0.966443
| 0.157895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
15501f2e452f86b41baded5e1a494db3bbe7ea02
| 11,547
|
py
|
Python
|
data_log/tests/test_dungeon_log.py
|
Itori/swarfarm
|
7192e2d8bca093b4254023bbec42b6a2b1887547
|
[
"Apache-2.0"
] | 66
|
2017-09-11T04:46:00.000Z
|
2021-03-13T00:02:42.000Z
|
data_log/tests/test_dungeon_log.py
|
Itori/swarfarm
|
7192e2d8bca093b4254023bbec42b6a2b1887547
|
[
"Apache-2.0"
] | 133
|
2017-09-24T21:28:59.000Z
|
2021-04-02T10:35:31.000Z
|
data_log/tests/test_dungeon_log.py
|
Itori/swarfarm
|
7192e2d8bca093b4254023bbec42b6a2b1887547
|
[
"Apache-2.0"
] | 28
|
2017-08-30T19:04:32.000Z
|
2020-11-16T04:09:00.000Z
|
from bestiary.models import Level, GameItem
from data_log import models
from .test_log_views import BaseLogTest
class CairosLogTests(BaseLogTest):
fixtures = ['test_game_items', 'test_levels', 'test_summon_monsters']
def test_dungeon_result(self):
self._do_log('BattleDungeonResult_V2/giants_b10_rune_drop.json')
self.assertEqual(models.DungeonLog.objects.count(), 1)
log = models.DungeonLog.objects.first()
self.assertIsNotNone(log.success)
self.assertIsNotNone(log.clear_time)
def test_level_parsed_correctly(self):
self._do_log('BattleDungeonResult_V2/giants_b5_unknown_scroll.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.level.dungeon.com2us_id, 8001)
self.assertEqual(log.level.floor, 5)
self._do_log('BattleDungeonResult_V2/dragon_b10_rune_drop.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.level.dungeon.com2us_id, 9001)
self.assertEqual(log.level.floor, 10)
self._do_log('BattleDungeonResult_V2/hall_of_dark_small_essence_drop.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.level.dungeon.com2us_id, 1001)
self.assertEqual(log.level.floor, 10)
def test_dungeon_failed(self):
self._do_log('BattleDungeonResult_V2/giants_b10_failed.json')
log = models.DungeonLog.objects.first()
self.assertFalse(log.success)
def test_dungeon_success(self):
self._do_log('BattleDungeonResult_V2/giants_b10_rune_drop.json')
log = models.DungeonLog.objects.first()
self.assertTrue(log.success)
def test_dungeon_rune_drop(self):
self._do_log('BattleDungeonResult_V2/giants_b10_rune_drop.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.runes.count(), 1)
def test_dungeon_item_drop(self):
self._do_log('BattleDungeonResult_V2/giants_b10_harmony_drop.json')
log = models.DungeonLog.objects.first()
# Expect Mana, Energy, Crystal, and Craft Item
self.assertEqual(log.items.count(), 4)
self.assertTrue(log.items.filter(item__category=GameItem.CATEGORY_CURRENCY, item__com2us_id=1).exists())
self.assertTrue(log.items.filter(item__category=GameItem.CATEGORY_CURRENCY, item__com2us_id=102).exists())
self.assertTrue(log.items.filter(item__category=GameItem.CATEGORY_CURRENCY, item__com2us_id=103).exists())
self.assertTrue(log.items.filter(item__category=GameItem.CATEGORY_CRAFT_STUFF, item__com2us_id=4001).exists())
def test_hoh_ignored(self):
self._do_log('BattleDungeonResult_V2/hoh_light_rakshasa.json')
log = models.DungeonLog.objects.first()
self.assertIsNone(log)
def test_essence_drop(self):
self._do_log('BattleDungeonResult_V2/hall_of_dark_small_essence_drop.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.items.filter(item__category=GameItem.CATEGORY_ESSENCE).count(), 1)
item_drop = log.items.filter(item__category=GameItem.CATEGORY_ESSENCE).first()
self.assertEqual(item_drop.item.com2us_id, 11005)
self.assertEqual(item_drop.quantity, 5)
def test_summon_scroll_drop(self):
self._do_log('BattleDungeonResult_V2/giants_b5_unknown_scroll.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.items.filter(item__category=GameItem.CATEGORY_SUMMON_SCROLL).count(), 1)
item_drop = log.items.filter(item__category=GameItem.CATEGORY_SUMMON_SCROLL).first()
self.assertEqual(item_drop.item.com2us_id, 1)
self.assertEqual(item_drop.quantity, 7)
def test_secret_dungeon_drop(self):
self._do_log('BattleDungeonResult_V2/hall_of_light_b1_howl_secret_dungeon_drop.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.secret_dungeons.count(), 1)
sd_drop = log.secret_dungeons.first()
self.assertEqual(sd_drop.level.dungeon.com2us_id, 1051)
def test_monster_drop(self):
self._do_log('BattleDungeonResult_V2/giants_b5_rainbowmon_drop.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.monsters.count(), 1)
def test_artifact_drop(self):
self._do_log('BattleDungeonResult_V2/punisher_b5_artifact_drop.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.artifacts.count(), 1)
def test_conversion_stone_drop(self):
self._do_log('BattleDungeonResult_V2/punisher_b5_conversion_stone_drop.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.items.count(), 2)
item_drop = log.items.get(item__category=GameItem.CATEGORY_ARTIFACT_CRAFT)
self.assertEqual(item_drop.quantity, 8)
class ScenarioLogTests(BaseLogTest):
fixtures = ['test_game_items', 'test_levels', 'test_summon_monsters']
def test_scenario_start(self):
self._do_log('BattleScenarioStart/garen_normal_b1.json')
self.assertEqual(models.DungeonLog.objects.count(), 1)
log = models.DungeonLog.objects.first()
self.assertEqual(log.battle_key, 1)
self.assertIsNone(log.success)
def test_scenario_result(self):
self._do_log('BattleScenarioStart/garen_normal_b1.json')
self._do_log('BattleScenarioResult/garen_normal_rune_drop.json')
self.assertEqual(models.DungeonLog.objects.count(), 1)
log = models.DungeonLog.objects.first()
self.assertIsNotNone(log.success)
self.assertIsNotNone(log.clear_time)
def test_level_parsed_correctly(self):
self._do_log('BattleScenarioStart/garen_normal_b1.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.level.dungeon.com2us_id, 1)
self.assertEqual(log.level.difficulty, Level.DIFFICULTY_NORMAL)
self.assertEqual(log.level.floor, 1)
self._do_log('BattleScenarioStart/kabir_ruins_hard_b1.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.level.dungeon.com2us_id, 3)
self.assertEqual(log.level.difficulty, Level.DIFFICULTY_HARD)
self.assertEqual(log.level.floor, 1)
self._do_log('BattleScenarioStart/faimon_hell_b3.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.level.dungeon.com2us_id, 9)
self.assertEqual(log.level.difficulty, Level.DIFFICULTY_HELL)
self.assertEqual(log.level.floor, 3)
def test_scenario_failed(self):
self._do_log('BattleScenarioStart/garen_normal_b1.json')
self._do_log('BattleScenarioResult/garen_normal_failed.json')
log = models.DungeonLog.objects.first()
self.assertFalse(log.success)
def test_scenario_success(self):
self._do_log('BattleScenarioStart/garen_normal_b1.json')
self._do_log('BattleScenarioResult/garen_normal_rune_drop.json')
log = models.DungeonLog.objects.first()
self.assertTrue(log.success)
def test_scenario_rune_drop(self):
self._do_log('BattleScenarioStart/garen_normal_b1.json')
self._do_log('BattleScenarioResult/garen_normal_rune_drop.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.runes.count(), 1)
def test_scenario_monster_drop(self):
self._do_log('BattleScenarioStart/faimon_hell_b1.json')
self._do_log('BattleScenarioResult/faimon_hell_monster_drop.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.monsters.count(), 1)
def test_scenario_item_drop(self):
self._do_log('BattleScenarioStart/faimon_hell_b1.json')
self._do_log('BattleScenarioResult/faimon_hell_craft_drop.json')
log = models.DungeonLog.objects.first()
# Expect Mana, Energy, and Craft Item
self.assertEqual(log.items.count(), 3)
self.assertTrue(log.items.filter(item__category=GameItem.CATEGORY_CURRENCY, item__com2us_id=102).exists())
self.assertTrue(log.items.filter(item__category=GameItem.CATEGORY_CURRENCY, item__com2us_id=103).exists())
self.assertTrue(log.items.filter(item__category=GameItem.CATEGORY_CRAFT_STUFF, item__com2us_id=1003).exists())
class DimensionHoleTests(BaseLogTest):
fixtures = ['test_game_items', 'test_levels']
def test_dungeon_result(self):
self._do_log('BattleDimensionHoleDungeonResult_V2/beast_men_b1_rune_drop.json')
self.assertEqual(models.DungeonLog.objects.count(), 1)
log = models.DungeonLog.objects.first()
self.assertIsNotNone(log.success)
self.assertIsNotNone(log.clear_time)
def test_practice_not_logged(self):
view = self._do_log('BattleDimensionHoleDungeonResult_V2/ellunia_b1_practice.json')
self.assertEqual(view.status_code, 200)
self.assertEqual(models.DungeonLog.objects.count(), 0)
def test_level_parsed_correctly(self):
self._do_log('BattleDimensionHoleDungeonResult_V2/beast_men_b1_rune_drop.json')
log = models.DungeonLog.objects.latest()
self.assertEqual(log.level.dungeon.com2us_id, 3101)
self.assertEqual(log.level.floor, 1)
self._do_log('BattleDimensionHoleDungeonResult_V2/ellunia_b3_rune_ore_drop.json')
log = models.DungeonLog.objects.latest()
self.assertEqual(log.level.dungeon.com2us_id, 1202)
self.assertEqual(log.level.floor, 3)
self._do_log('BattleDimensionHoleDungeonResult_V2/sanctuary_b3_rune_drop.json')
log = models.DungeonLog.objects.latest()
self.assertEqual(log.level.dungeon.com2us_id, 1101)
self.assertEqual(log.level.floor, 3)
def test_success(self):
self._do_log('BattleDimensionHoleDungeonResult_V2/ellunia_b3_rune_ore_drop.json')
log = models.DungeonLog.objects.latest()
self.assertTrue(log.success)
def test_failed(self):
self._do_log('BattleDimensionHoleDungeonResult_V2/sanctuary_b4_failed.json')
log = models.DungeonLog.objects.latest()
self.assertFalse(log.success)
def test_ancient_rune_drop(self):
self._do_log('BattleDimensionHoleDungeonResult_V2/beast_men_b1_rune_drop.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.runes.count(), 1)
rune = log.runes.first()
self.assertTrue(rune.ancient)
def test_item_drop(self):
self._do_log('BattleDimensionHoleDungeonResult_V2/ellunia_b3_rune_ore_drop.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.items.count(), 2)
self.assertTrue(log.items.filter(item__category=GameItem.CATEGORY_CURRENCY, item__com2us_id=102).exists())
self.assertTrue(log.items.filter(item__category=GameItem.CATEGORY_CRAFT_STUFF, item__com2us_id=9002).exists())
def test_ancient_enchant_gem_drop(self):
self._do_log('BattleDimensionHoleDungeonResult_V2/sanctuary_b5_gem_drop.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.rune_crafts.count(), 1)
craft = log.rune_crafts.first()
self.assertEqual(craft.type, models.DungeonRuneCraftDrop.CRAFT_ANCIENT_GEM)
def test_ancient_grindstone_drop(self):
self._do_log('BattleDimensionHoleDungeonResult_V2/forest_b5_grind_drop.json')
log = models.DungeonLog.objects.first()
self.assertEqual(log.rune_crafts.count(), 1)
craft = log.rune_crafts.first()
self.assertEqual(craft.type, models.DungeonRuneCraftDrop.CRAFT_ANCIENT_GRINDSTONE)
| 47.130612
| 118
| 0.729454
| 1,426
| 11,547
| 5.596774
| 0.107994
| 0.095853
| 0.047362
| 0.114021
| 0.883724
| 0.850019
| 0.80228
| 0.747149
| 0.705049
| 0.660068
| 0
| 0.019545
| 0.162553
| 11,547
| 244
| 119
| 47.32377
| 0.805791
| 0.006928
| 0
| 0.535354
| 0
| 0
| 0.199581
| 0.189288
| 0
| 0
| 0
| 0
| 0.378788
| 1
| 0.151515
| false
| 0
| 0.015152
| 0
| 0.19697
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1552a6105e4adeba0130c66f46fab3a30af9b9ee
| 38
|
py
|
Python
|
newfile.py
|
Alexander-Goossens/cs3240-labdemo
|
505b768dc6fc33f4f6c247aa07be847d8d6863c9
|
[
"MIT"
] | null | null | null |
newfile.py
|
Alexander-Goossens/cs3240-labdemo
|
505b768dc6fc33f4f6c247aa07be847d8d6863c9
|
[
"MIT"
] | null | null | null |
newfile.py
|
Alexander-Goossens/cs3240-labdemo
|
505b768dc6fc33f4f6c247aa07be847d8d6863c9
|
[
"MIT"
] | null | null | null |
print("New file with the new branch")
| 19
| 37
| 0.736842
| 7
| 38
| 4
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 38
| 2
| 37
| 19
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
157d528f821e02dc87dbc1ef7d308c6c6a3f83f6
| 32
|
py
|
Python
|
ofdft_ml/statslib/loader/__init__.py
|
HamletWantToCode/ofdft-ml
|
4115405b6f530cdf8956d0b5b353569ce7c09496
|
[
"MIT"
] | 6
|
2019-01-16T07:00:27.000Z
|
2022-03-18T07:09:25.000Z
|
ofdft_ml/statslib/loader/__init__.py
|
HamletWantToCode/ofdft-ml
|
4115405b6f530cdf8956d0b5b353569ce7c09496
|
[
"MIT"
] | null | null | null |
ofdft_ml/statslib/loader/__init__.py
|
HamletWantToCode/ofdft-ml
|
4115405b6f530cdf8956d0b5b353569ce7c09496
|
[
"MIT"
] | null | null | null |
from .loader import Model_loader
| 32
| 32
| 0.875
| 5
| 32
| 5.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 32
| 1
| 32
| 32
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
158e0a71c912ad047f0146c304338dd50130a4d5
| 59
|
py
|
Python
|
simy/record/__init__.py
|
faical-yannick-congo/similarity
|
4b447a69294e89eb573af16e1153ede0cbdb3b9e
|
[
"MIT"
] | null | null | null |
simy/record/__init__.py
|
faical-yannick-congo/similarity
|
4b447a69294e89eb573af16e1153ede0cbdb3b9e
|
[
"MIT"
] | 10
|
2019-05-01T13:50:30.000Z
|
2019-05-09T18:11:24.000Z
|
simy/record/__init__.py
|
faical-yannick-congo/similarity
|
4b447a69294e89eb573af16e1153ede0cbdb3b9e
|
[
"MIT"
] | 2
|
2019-05-01T13:47:34.000Z
|
2019-05-01T14:03:52.000Z
|
from .CalculationRelaxStatic import CalculationRelaxStatic
| 29.5
| 58
| 0.915254
| 4
| 59
| 13.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067797
| 59
| 2
| 58
| 29.5
| 0.981818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
15bb4a5ec76ee9218457fc33cedac638ef0a923a
| 27,553
|
py
|
Python
|
testing/test_util.py
|
matthewrsj/clisync
|
d89a0b2840494cc6622d45bc046b0589d0023e2e
|
[
"Apache-2.0"
] | null | null | null |
testing/test_util.py
|
matthewrsj/clisync
|
d89a0b2840494cc6622d45bc046b0589d0023e2e
|
[
"Apache-2.0"
] | null | null | null |
testing/test_util.py
|
matthewrsj/clisync
|
d89a0b2840494cc6622d45bc046b0589d0023e2e
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
import stat
import ConfigParser
from StringIO import StringIO
import unittest
from climesync import util
from mock import patch, MagicMock
class UtilTest(unittest.TestCase):
def test_ts_error_no_error(self):
ts_objects = [{"example": "object"}]
assert not util.ts_error(*ts_objects)
def test_ts_error_error(self):
ts_objects = [{"example": "object"}, {"error": 404}]
assert util.ts_error(*ts_objects)
@patch("climesync.util.codecs.open")
@patch("climesync.util.os.chmod")
@patch("climesync.util.os.path")
def test_create_config_default_path(self, mock_path, mock_chmod,
mock_open):
default_path = "~/.climesyncrc"
fullpath = "/path/to/config"
open_args = ["w", "utf-8-sig"]
chmod_args = [stat.S_IRUSR | stat.S_IWUSR]
mock_path.expanduser.return_value = fullpath
util.create_config(path=default_path)
mock_open.assert_called_with(fullpath, *open_args)
mock_chmod.assert_called_with(fullpath, *chmod_args)
@patch("climesync.util.codecs.open")
@patch("climesync.util.os.chmod")
@patch("climesync.util.os.path")
def test_create_config_provided_path(self, mock_path, mock_chmod,
mock_open):
provided_path = "~/.config/climesync/config"
fullpath = "/path/to/config"
open_args = ["w", "utf-8-sig"]
chmod_args = [stat.S_IRUSR | stat.S_IWUSR]
mock_path.expanduser.return_value = fullpath
util.create_config(path=provided_path)
mock_open.assert_called_with(fullpath, *open_args)
mock_chmod.assert_called_with(fullpath, *chmod_args)
@patch("climesync.util.ConfigParser.RawConfigParser")
@patch("climesync.util.codecs.open")
@patch("climesync.util.os.path")
def test_read_config_path_exists(self, mock_path, mock_open, _):
fullpath = "/path/to/config"
mock_path.expanduser.return_value = fullpath
mock_path.isfile.return_value = True
mock_file = MagicMock()
mock_open.return_value.__enter__.return_value = mock_file
mock_configparser = util.read_config()
mock_configparser.readfp.assert_called_with(mock_file)
@patch("climesync.util.ConfigParser.RawConfigParser")
@patch("climesync.util.os.path")
def test_read_config_path_not_exist(self, mock_path, _):
fullpath = "/path/to/config"
mock_path.expanduser.return_value = fullpath
mock_path.isfile.return_value = False
mock_configparser = util.read_config()
mock_configparser.read.assert_not_called()
@patch("climesync.util.codecs.open")
@patch("climesync.util.ConfigParser.RawConfigParser")
@patch("climesync.util.os.path")
def test_read_config_parsing_error(self, mock_path, mock_rawconfigparser,
_):
fullpath = "/path/to/config"
mock_path.expanduser.return_value = fullpath
mock_path.isfile.return_value = True
mock_parser = MagicMock()
mock_parser.readfp.side_effect = ConfigParser.ParsingError("")
mock_rawconfigparser.return_value = mock_parser
result = util.read_config()
assert result is None
@patch("climesync.util.create_config")
@patch("climesync.util.read_config")
@patch("climesync.util.codecs.open")
def test_write_config_file_exists(self, mock_open, mock_read_config,
mock_create_config):
section_name = "climesync"
path = "~/.climesyncrc"
key = "key"
value = "value"
mock_config = MagicMock()
mock_config.sections.return_value = [section_name]
mock_file = MagicMock(spec=file)
mock_open.return_value.__enter__.return_value = mock_file
mock_read_config.return_value = mock_config
util.write_config(key, value, path=path)
mock_create_config.assert_not_called()
mock_config.set.assert_called_with(section_name, key, value)
mock_config.add_section.assert_not_called()
mock_config.write.assert_called_with(mock_file)
@patch("climesync.util.create_config")
@patch("climesync.util.read_config")
@patch("climesync.util.codecs.open")
def test_write_config_file_not_exist(self, mock_open, mock_read_config,
mock_create_config):
section_name = "climesync"
path = "~/.climesyncrc"
key = "key"
value = "value"
mock_config = MagicMock()
mock_config.sections.return_value = []
mock_config.set.side_effect = [ConfigParser.NoSectionError(""), None]
mock_read_config.return_value = mock_config
util.write_config(key, value, path=path)
mock_create_config.assert_called_with(path)
mock_config.add_section.assert_called_with(section_name)
@patch("climesync.util.create_config")
@patch("climesync.util.read_config")
def test_write_config_read_error(self, mock_read_config,
mock_create_config):
path = "~/.climesyncrc"
key = "key"
value = "value"
mock_read_config.return_value = None
util.write_config(key, value, path=path)
mock_create_config.assert_not_called()
@patch("climesync.util.os.path.exists")
def test_session_exists_true(self, mock_exists):
mock_exists.return_value = True
assert util.session_exists()
@patch("climesync.util.os.path.exists")
def test_session_exists_false(self, mock_exists):
mock_exists.return_value = False
assert not util.session_exists()
@patch("climesync.util.session_exists")
@patch("climesync.util.codecs.open")
def test_read_session(self, mock_open, mock_session_exists):
mock_session_object = {
"start_date": "2015-03-14",
"start_time": "09:26",
"project": "px",
"issue_uri": "https://github.com/org/px/issues/42/",
"user": "test"
}
mock_session_file_lines = [
"start_date: 2015-03-14",
"start_time: 09:26",
"project: px",
"issue_uri: https://github.com/org/px/issues/42/",
"user: test"
]
mock_file = MagicMock(spec=file)
mock_file.readlines.return_value = mock_session_file_lines
mock_open.return_value.__enter__.return_value = mock_file
mock_session_exists.return_value = True
result = util.read_session()
assert result == mock_session_object
@patch("climesync.util.session_exists")
@patch("climesync.util.codecs.open")
def test_read_session_no_session(self, mock_open, mock_session_exists):
mock_session_exists.return_value = False
util.read_session()
assert not mock_open.mock_calls
@patch("climesync.util.session_exists")
@patch("climesync.util.codecs.open")
def test_create_session(self, mock_open, mock_session_exists):
mock_session_object = {
"start_date": "2015-03-14",
"start_time": "09:26",
"project": "px",
"issue_uri": "https://github.com/org/px/issues/42/",
"user": "test"
}
mock_file = MagicMock(spec=file)
mock_open.return_value.__enter__.return_value = mock_file
mock_session_exists.return_value = False
util.create_session(mock_session_object)
for k, v in mock_session_object.iteritems():
# Assert that the session data was written to the file
assert any(k in args[0] and v in args[0]
for _, args, __ in mock_file.mock_calls)
@patch("climesync.util.session_exists")
@patch("climesync.util.codecs.open")
def test_create_session_existing_session(self, mock_open,
mock_session_exists):
mock_session_exists.return_value = True
util.create_session({})
mock_open.assert_not_called()
@patch("climesync.util.session_exists")
@patch("climesync.util.os.remove")
def test_clear_session(self, mock_remove, mock_session_exists):
mock_session_exists.return_value = True
util.clear_session()
assert mock_remove.mock_calls
@patch("climesync.util.session_exists")
@patch("climesync.util.os.remove")
def test_clear_session_no_session(self, mock_remove, mock_session_exists):
mock_session_exists.return_value = False
util.clear_session()
assert not mock_remove.mock_calls
def test_construct_clock_out_time(self):
mocked_session = {
"start_date": "2016-03-14",
"start_time": "03:14",
"project": "px",
"activities": "dev docs",
"user": "test"
}
mocked_now = datetime(2016, 3, 14, 4, 14)
mocked_revisions = {"project": "py"}
expected = {
"duration": 3600,
"date_worked": "2016-03-14",
"project": "py",
"activities": ["dev", "docs"],
"user": "test"
}
result = util.construct_clock_out_time(mocked_session, mocked_now,
mocked_revisions, None)
assert result == expected
def test_construct_clock_out_time_no_revisions(self):
mocked_session = {
"start_date": "2016-03-14",
"start_time": "03:14",
"project": "px",
"activities": "dev docs",
"user": "test"
}
mocked_now = datetime(2016, 3, 14, 4, 14)
mocked_revisions = {}
expected = {
"duration": 3600,
"date_worked": "2016-03-14",
"project": "px",
"activities": ["dev", "docs"],
"user": "test"
}
result = util.construct_clock_out_time(mocked_session, mocked_now,
mocked_revisions, None)
assert result == expected
def test_construct_clock_out_time_default_activity(self):
mocked_session = {
"start_date": "2016-03-14",
"start_time": "03:14",
"project": "px",
"user": "test"
}
mocked_now = datetime(2016, 3, 14, 4, 14)
mocked_revisions = {}
mocked_project = {"default_activity": "dev"}
expected = {
"duration": 3600,
"date_worked": "2016-03-14",
"project": "px",
"activities": ["dev"],
"user": "test"
}
result = util.construct_clock_out_time(mocked_session, mocked_now,
mocked_revisions,
mocked_project)
assert result == expected
def test_construct_clock_out_time_invalid_project(self):
mocked_session = {
"start_date": "2016-03-14",
"start_time": "03:14",
"project": "px",
"user": "test"
}
mocked_now = datetime(2016, 3, 14, 4, 14)
mocked_revisions = {}
mocked_project = {"error": "error"}
result = util.construct_clock_out_time(mocked_session, mocked_now,
mocked_revisions,
mocked_project)
assert result == {"error": "Invalid project"}
def test_construct_clock_out_time_no_session(self):
mocked_session = {}
mocked_now = datetime.now()
mocked_revisions = {}
result = util.construct_clock_out_time(mocked_session, mocked_now,
mocked_revisions, None)
assert result == {"error": "No session data"}
def test_construct_clock_out_time_invalid_session(self):
mocked_session = {
"invalid": "session"
}
mocked_now = datetime.now()
mocked_revisions = {}
result = util.construct_clock_out_time(mocked_session, mocked_now,
mocked_revisions, None)
assert result == {"error": "Invalid session data"}
def test_construct_clock_out_time_negative_delta(self):
mocked_session = {
"start_date": "2016-03-14",
"start_time": "03:14",
"project": "px",
"user": "test"
}
mocked_now = datetime(2016, 1, 1, 1, 1)
mocked_revisions = {}
result = util.construct_clock_out_time(mocked_session, mocked_now,
mocked_revisions, None)
assert result == {"error": "Invalid session date/time"}
@patch("climesync.util.sys.stdout", new_callable=StringIO)
def test_print_json_list(self, mock_stdout):
key = "key"
value = "value"
test_response = [{key: value}]
util.print_json(test_response)
assert "{}: {}".format(key, value) in mock_stdout.getvalue()
@patch("climesync.util.sys.stdout", new_callable=StringIO)
def test_print_json_dict(self, mock_stdout):
key = "key"
value = "value"
test_response = {key: value}
util.print_json(test_response)
assert "{}: {}".format(key, value) in mock_stdout.getvalue()
def test_is_time(self):
self.assertFalse(util.is_time("AhBm"))
self.assertFalse(util.is_time("hm"))
self.assertFalse(util.is_time("4h"))
self.assertFalse(util.is_time("10m"))
self.assertFalse(util.is_time("4hm"))
self.assertFalse(util.is_time("h4m"))
self.assertFalse(util.is_time("A4h10m"))
self.assertFalse(util.is_time("4h10mA"))
self.assertFalse(util.is_time("4h1A0m"))
self.assertFalse(util.is_time("4.0h10m"))
self.assertTrue(util.is_time("4h10m"))
self.assertTrue(util.is_time("222355h203402340m"))
self.assertTrue(util.is_time("0h10m"))
def test_to_readable_time(self):
self.assertEqual(util.to_readable_time(60), "0h1m")
self.assertEqual(util.to_readable_time(3600), "1h0m")
self.assertEqual(util.to_readable_time(1000), "0h16m")
@patch("climesync.util.raw_input")
def test_get_field_string(self, mock_raw_input):
prompt = "Prompt"
expected_formatted_prompt = "Prompt: "
mocked_input = "test input"
mock_raw_input.return_value = mocked_input
value = util.get_field(prompt)
assert value == mocked_input
mock_raw_input.assert_called_with(expected_formatted_prompt)
@patch("climesync.util.raw_input")
def test_get_field_string_empty(self, mock_raw_input):
prompt = "Prompt"
mocked_input = ["", "value"]
mock_raw_input.side_effect = mocked_input
value = util.get_field(prompt)
assert value == mocked_input[1]
assert mock_raw_input.call_count == 2
@patch("climesync.util.raw_input")
def test_get_field_string_optional(self, mock_raw_input):
prompt = "Prompt"
expected_formatted_prompt = "(Optional) Prompt: "
mocked_input = ""
mock_raw_input.return_value = mocked_input
value = util.get_field(prompt, optional=True)
assert value == ""
mock_raw_input.assert_called_with(expected_formatted_prompt)
@patch("climesync.util.raw_input")
def test_get_field_string_validated(self, mock_raw_input):
prompt = "Prompt"
validator = ["v1", "v2"]
mocked_input = "v1"
mock_raw_input.return_value = mocked_input
value = util.get_field(prompt, validator=validator)
assert value == "v1"
@patch("climesync.util.raw_input")
def test_get_field_string_invalid(self, mock_raw_input):
prompt = "Prompt"
validator = ["v1", "v2"]
mocked_input = ["v3", "v2"]
mock_raw_input.side_effect = mocked_input
value = util.get_field(prompt, validator=validator)
assert value == "v2"
@patch("climesync.util.raw_input")
def test_get_field_bool_yes(self, mock_raw_input):
prompt = "Prompt"
expected_formatted_prompt = "(y/n) Prompt: "
mocked_input = "Y"
mock_raw_input.return_value = mocked_input
value = util.get_field(prompt, field_type="?")
assert value
mock_raw_input.assert_called_with(expected_formatted_prompt)
@patch("climesync.util.raw_input")
def test_get_field_bool_no(self, mock_raw_input):
prompt = "Prompt"
mocked_input = "N"
mock_raw_input.return_value = mocked_input
value = util.get_field(prompt, field_type="?")
assert not value
@patch("climesync.util.raw_input")
def test_get_field_bool_empty(self, mock_raw_input):
prompt = "Prompt"
mocked_input = ["", "yes"]
mock_raw_input.side_effect = mocked_input
value = util.get_field(prompt, field_type="?")
assert value
assert mock_raw_input.call_count == 2
@patch("climesync.util.raw_input")
def test_get_field_bool_invalid(self, mock_raw_input):
prompt = "Prompt"
mocked_input = ["maybe", "yes"]
mock_raw_input.side_effect = mocked_input
value = util.get_field(prompt, field_type="?")
assert value
assert mock_raw_input.call_count == 2
@patch("climesync.util.raw_input")
def test_get_field_bool_optional(self, mock_raw_input):
prompt = "Prompt"
expected_formatted_prompt = "(Optional) (y/N) Prompt: "
mocked_input = ""
mock_raw_input.return_value = mocked_input
value = util.get_field(prompt, optional=True, field_type="?")
assert value == ""
mock_raw_input.assert_called_with(expected_formatted_prompt)
@patch("climesync.util.raw_input")
def test_get_field_time(self, mock_raw_input):
prompt = "Prompt"
expected_formatted_prompt = "(Time input - <value>h<value>m) Prompt: "
mocked_input = "1h0m"
mock_raw_input.return_value = mocked_input
value = util.get_field(prompt, field_type=":")
assert value == "1h0m"
mock_raw_input.assert_called_with(expected_formatted_prompt)
@patch("climesync.util.raw_input")
def test_get_field_time_invalid(self, mock_raw_input):
prompt = "Prompt"
mocked_input = ["1 hour", "1h0m"]
mock_raw_input.side_effect = mocked_input
value = util.get_field(prompt, field_type=":")
assert value == "1h0m"
@patch("climesync.util.raw_input")
def test_get_field_time_optional(self, mock_raw_input):
prompt = "Prompt"
expected_formatted_prompt = \
"(Optional) (Time input - <value>h<value>m) Prompt: "
mocked_input = ""
mock_raw_input.return_value = mocked_input
value = util.get_field(prompt, optional=True, field_type=":")
assert value == ""
mock_raw_input.assert_called_with(expected_formatted_prompt)
@patch("climesync.util.raw_input")
def test_get_field_list(self, mock_raw_input):
prompt = "Prompt"
expected_formatted_prompt = "(Comma delimited) Prompt: "
mocked_input = " v1 , v2, v3,v4"
mock_raw_input.return_value = mocked_input
value = util.get_field(prompt, field_type="!")
assert value == ["v1", "v2", "v3", "v4"]
mock_raw_input.assert_called_with(expected_formatted_prompt)
@patch("climesync.util.raw_input")
def test_get_field_list_single_value(self, mock_raw_input):
prompt = "Prompt"
mocked_input = "v1"
mock_raw_input.return_value = mocked_input
value = util.get_field(prompt, field_type="!")
assert value == ["v1"]
@patch("climesync.util.raw_input")
def test_get_field_list_empty(self, mock_raw_input):
prompt = "Prompt"
mocked_input = ["", "v1, v2"]
mock_raw_input.side_effect = mocked_input
value = util.get_field(prompt, field_type="!")
assert value == ["v1", "v2"]
@patch("climesync.util.raw_input")
def test_get_field_list_optional(self, mock_raw_input):
prompt = "Prompt"
expected_formatted_prompt = "(Optional) (Comma delimited) Prompt: "
mocked_input = ""
mock_raw_input.return_value = mocked_input
value = util.get_field(prompt, optional=True, field_type="!")
assert value == ""
mock_raw_input.assert_called_with(expected_formatted_prompt)
@patch("climesync.util.raw_input")
def test_get_field_list_validated(self, mock_raw_input):
prompt = "Prompt"
validator = ["v1", "v2", "v3"]
mocked_input = "v1, v3"
mock_raw_input.return_value = mocked_input
value = util.get_field(prompt, field_type="!", validator=validator)
assert value == ["v1", "v3"]
@patch("climesync.util.raw_input")
def test_get_field_list_invalid(self, mock_raw_input):
prompt = "Prompt"
validator = ["v1", "v2", "v3"]
mocked_input = ["v1, v4, v2", "v1, v2"]
mock_raw_input.side_effect = mocked_input
value = util.get_field(prompt, field_type="!", validator=validator)
assert value == ["v1", "v2"]
@patch("climesync.util.raw_input")
def test_get_field_type_invalid(self, mock_raw_input):
prompt = "Prompt"
value = util.get_field(prompt, field_type="invalid")
assert value == ""
@patch("climesync.util.get_field")
def test_get_fields(self, mock_get_field):
fields = [
("strval", "String value", ["str", "val"]),
("*optstrval", "Optional string value"),
("?boolval", "Bool value"),
("*?optboolval", "Optional bool value"),
(":timeval", "Time value"),
("*:opttimeval", "Optional time value"),
("!listval", "List value", ["val1", "val2", "val3"]),
("*!optlistval", "Optional list value")
]
mocked_input = ["str", "", True, False, "1h0m", "0h30m",
["val1", "val2"], []]
expected_values = {
"strval": "str",
"boolval": True,
"optboolval": False,
"timeval": "1h0m",
"opttimeval": "0h30m",
"listval": ["val1", "val2"],
}
mock_get_field.side_effect = mocked_input
values = util.get_fields(fields)
assert values == expected_values
@patch("climesync.util.get_field")
@patch("climesync.util.read_config")
def test_add_kv_pair_redundant(self, mock_read_config, mock_get_field):
key = "redundant"
value = "value"
mock_config = MagicMock()
mock_config.has_option.return_value = True
mock_config.get.return_value = value
mock_read_config.return_value = mock_config
util.add_kv_pair(key, value)
mock_get_field.assert_not_called()
@patch("climesync.util.get_field")
@patch("climesync.util.write_config")
@patch("climesync.util.read_config")
def test_add_kv_pair_no(self, mock_read_config, mock_write_config,
mock_get_field):
key = "key"
value = "value"
path = "~/.climesyncrc"
mock_config = MagicMock()
mock_config.has_option.return_value = True
mock_config.get.return_value = None
mock_read_config.return_value = mock_config
mock_get_field.return_value = False
util.add_kv_pair(key, value, path)
mock_write_config.assert_not_called()
@patch("climesync.util.get_field")
@patch("climesync.util.write_config")
@patch("climesync.util.read_config")
def test_add_kv_pair(self, mock_read_config, mock_write_config,
mock_get_field):
key = "key"
value = "value"
path = "~/.climesyncrc"
mock_config = MagicMock()
mock_config.has_option.return_value = True
mock_config.get.return_value = None
mock_read_config.return_value = mock_config
mock_get_field.return_value = True
util.add_kv_pair(key, value, path)
mock_write_config.assert_called_with(key, value, path)
@patch("climesync.util.get_field")
def test_get_user_permissions(self, mock_get_field):
users = ["userone", "usertwo"]
expected_permissions = {
"userone": {
"member": True,
"spectator": False,
"manager": False
},
"usertwo": {
"member": True,
"spectator": True,
"manager": True
}
}
mocked_input = [True, False, False,
True, True, True]
mock_get_field.side_effect = mocked_input
permissions = util.get_user_permissions(users)
assert permissions == expected_permissions
def test_get_user_permissions_empty(self):
users = []
permissions = util.get_user_permissions(users)
assert not permissions
def test_fix_user_permissions(self):
permissions = {
"userzero": "0",
"userone": "1",
"usertwo": "2",
"userthree": "3",
"userfour": "4",
"userfive": "5",
"usersix": "6",
"userseven": "7"
}
fixed_permissions = {
"userzero": {"member": False, "spectator": False,
"manager": False},
"userone": {"member": False, "spectator": False, "manager": True},
"usertwo": {"member": False, "spectator": True, "manager": False},
"userthree": {"member": False, "spectator": True, "manager": True},
"userfour": {"member": True, "spectator": False, "manager": False},
"userfive": {"member": True, "spectator": False, "manager": True},
"usersix": {"member": True, "spectator": True, "manager": False},
"userseven": {"member": True, "spectator": True, "manager": True}
}
fixed = util.fix_user_permissions(permissions)
self.assertEqual(fixed, fixed_permissions)
def test_fix_args_optional(self):
args = {
"<angle_arg>": "value",
"--long-opt": "[list values]",
"UPPER_ARG": "True",
"--duration": "300",
"--blank-arg": None
}
expected_args = {
"angle_arg": "value",
"long_opt": ["list", "values"],
"upper_arg": True,
"duration": 300,
}
fixed_args = util.fix_args(args, True)
print fixed_args
print expected_args
assert fixed_args == expected_args
def test_fix_args_nonoptional(self):
args = {
"<angle_arg>": "value",
"--long-opt": "[list values]",
"UPPER_ARG": "True",
"--duration": "300",
"--blank-arg": None,
"invalidarg": None
}
expected_args = {
"angle_arg": "value",
"long_opt": ["list", "values"],
"upper_arg": True,
"duration": 300,
"blank_arg": None
}
fixed_args = util.fix_args(args, False)
assert fixed_args == expected_args
| 30.716834
| 79
| 0.60262
| 3,119
| 27,553
| 4.996153
| 0.080154
| 0.035937
| 0.078547
| 0.026952
| 0.836874
| 0.780466
| 0.749984
| 0.715716
| 0.688314
| 0.648271
| 0
| 0.017107
| 0.280768
| 27,553
| 896
| 80
| 30.751116
| 0.769239
| 0.001887
| 0
| 0.568289
| 0
| 0
| 0.166479
| 0.065275
| 0
| 0
| 0
| 0
| 0.139717
| 0
| null | null | 0
| 0.010989
| null | null | 0.009419
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ec61dda6419c5d74844abdcf6c1b6669a0586893
| 28
|
py
|
Python
|
bento/bars/system/__init__.py
|
mrkgnao/bento
|
4c1a06e26a206e3d94a126add810d834a4046ba0
|
[
"MIT"
] | null | null | null |
bento/bars/system/__init__.py
|
mrkgnao/bento
|
4c1a06e26a206e3d94a126add810d834a4046ba0
|
[
"MIT"
] | null | null | null |
bento/bars/system/__init__.py
|
mrkgnao/bento
|
4c1a06e26a206e3d94a126add810d834a4046ba0
|
[
"MIT"
] | null | null | null |
from .cpu_bar import CPUBar
| 14
| 27
| 0.821429
| 5
| 28
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ec7a0a8fd394278db5dc2a415d6d8bca171e88f2
| 2,748
|
py
|
Python
|
Mock Object/Tweet Checker/OrderedMockObjects.py
|
ElizaLo/Software-Testing
|
80561a09ac7e41c64cdb3b3579b61e5130f6d34f
|
[
"MIT"
] | null | null | null |
Mock Object/Tweet Checker/OrderedMockObjects.py
|
ElizaLo/Software-Testing
|
80561a09ac7e41c64cdb3b3579b61e5130f6d34f
|
[
"MIT"
] | null | null | null |
Mock Object/Tweet Checker/OrderedMockObjects.py
|
ElizaLo/Software-Testing
|
80561a09ac7e41c64cdb3b3579b61e5130f6d34f
|
[
"MIT"
] | null | null | null |
import unittest
import pytest
from mock import Mock
import TweetChecker
class TweetTest(unittest.TestCase):
@pytest.mark.run('first')
def test_1(self):
mock_twitter = Mock()
TweetChecker.tweet(mock_twitter, "message")
mock_twitter.PostUpdate.assert_called_with("message")
@pytest.mark.run('second_to_last')
def test_3(self):
mock_twitter = Mock()
TweetChecker.tweet(mock_twitter, "Say hello")
mock_twitter.PostUpdate.assert_called_with("Say helo")
@pytest.mark.run('second')
def test_2(self):
mock_twitter = Mock()
TweetChecker.tweet(mock_twitter, "“Can’t repeat the past?…Why of course you can!” ― F. Scott Fitzgerald, The Great Gatsby")
mock_twitter.PostUpdate.assert_called_with("“Can’t repeat the past?…Why of course you can!” ― F. Scott Fitzgerald, The Great Gatsby")
@pytest.mark.run('last')
def test_4(self):
mock_twitter = Mock()
TweetChecker.tweet(mock_twitter, "Hello, it's me")
mock_twitter.PostUpdate.assert_called_with("Hello, it's me")
'''
def test_3(self):
mock_twitter = Mock()
TweetChecker.tweet(mock_twitter, "He smiled understandingly-much more than understandingly. It was one of those rare smiles with a quality of eternal reassurance in it, that you may come across four or five times in life. It faced--or seemed to face--the whole eternal world for an instant, and then concentrated on you with an irresistible prejudice in your favor. It understood you just as far as you wanted to be understood, believed in you as you would like to believe in yourself, and assured you that it had precisely the impression of you that, at your best, you hoped to convey. ― F. Scott Fitzgerald, The Great Gatsby")
mock_twitter.PostUpdate.assert_called_with("He smiled understandingly-much more than understandingly. It was one of those rare smiles with a quality of eternal reassurance in it, that you may come across four or five times in life. It faced--or seemed to face--the whole eternal world for an instant, and then concentrated on you with an irresistible prejudice in your favor. It understood you just as far as you wanted to be understood, believed in you as you would like to believe in yourself, and assured you that it had precisely the impression of you that, at your best, you hoped to convey. ― F. Scott Fitzgerald, The Great Gatsby")
def test_4(self):
mock_twitter = Mock()
TweetChecker.tweet(mock_twitter, "“He looked at her the way all women want to be looked at by a man.” ― F. Scott Fitzgerald, The Great Gatsby")
mock_twitter.PostUpdate.assert_called_with("message")
'''
if __name__ == '__main__':
unittest.main()
| 53.882353
| 646
| 0.726346
| 427
| 2,748
| 4.592506
| 0.285714
| 0.100969
| 0.045895
| 0.058134
| 0.848037
| 0.848037
| 0.810301
| 0.784294
| 0.736359
| 0.735339
| 0
| 0.00271
| 0.194323
| 2,748
| 50
| 647
| 54.96
| 0.878049
| 0
| 0
| 0.148148
| 0
| 0.074074
| 0.237467
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 1
| 0.148148
| false
| 0
| 0.148148
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ecab81877859930e9054d8a942e9445b80cd23c0
| 68
|
py
|
Python
|
gpvdm_gui/gui/const_ver.py
|
roderickmackenzie/gpvdm
|
914fd2ee93e7202339853acaec1d61d59b789987
|
[
"BSD-3-Clause"
] | 12
|
2016-09-13T08:58:13.000Z
|
2022-01-17T07:04:52.000Z
|
gpvdm_gui/gui/const_ver.py
|
roderickmackenzie/gpvdm
|
914fd2ee93e7202339853acaec1d61d59b789987
|
[
"BSD-3-Clause"
] | 3
|
2017-11-11T12:33:02.000Z
|
2019-03-08T00:48:08.000Z
|
gpvdm_gui/gui/const_ver.py
|
roderickmackenzie/gpvdm
|
914fd2ee93e7202339853acaec1d61d59b789987
|
[
"BSD-3-Clause"
] | 6
|
2019-01-03T06:17:12.000Z
|
2022-01-01T15:59:00.000Z
|
def const_ver():
return "v8.0"
def is_gpvdm_next():
return False
| 11.333333
| 20
| 0.705882
| 12
| 68
| 3.75
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035088
| 0.161765
| 68
| 5
| 21
| 13.6
| 0.754386
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
ecb63eb2bf383c2b250e776e195296d530ee5ead
| 12,468
|
py
|
Python
|
qa/rpc-tests/dao-light-voting-cfund.py
|
mellowsharp/navcoin-core
|
6d4c580efa1e73791a18d8d2d8e9c9e90fd8e780
|
[
"MIT"
] | 1
|
2020-08-28T02:32:47.000Z
|
2020-08-28T02:32:47.000Z
|
qa/rpc-tests/dao-light-voting-cfund.py
|
mellowsharp/navcoin-core
|
6d4c580efa1e73791a18d8d2d8e9c9e90fd8e780
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/dao-light-voting-cfund.py
|
mellowsharp/navcoin-core
|
6d4c580efa1e73791a18d8d2d8e9c9e90fd8e780
|
[
"MIT"
] | 2
|
2020-09-06T20:02:00.000Z
|
2020-11-19T18:47:42.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Navcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import NavCoinTestFramework
from test_framework.cfund_util import *
import time
class LightVotingTest(NavCoinTestFramework):
"""Tests the voting from light wallets"""
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self, split=False):
self.nodes = []
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [["-debug=dao","-dandelion=0"]]*3)
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
def run_test(self):
# Get cfund parameters
blocks_per_voting_cycle = self.nodes[0].cfundstats()["consensus"]["blocksPerVotingCycle"]
self.nodes[0].staking(False)
self.nodes[1].staking(False)
self.nodes[2].staking(False)
activate_softfork(self.nodes[0], "coldstaking_v2")
votingkey = self.nodes[2].getnewaddress()
coldstaking = self.nodes[0].getcoldstakingaddress(self.nodes[1].getnewaddress(),self.nodes[1].getnewaddress(),votingkey)
self.nodes[0].sendtoaddress(votingkey, 100)
self.nodes[0].sendtoaddress(coldstaking, 3000000)
self.nodes[0].sendtoaddress(coldstaking, 3000000)
self.nodes[0].sendtoaddress(coldstaking, 3000000)
self.nodes[0].sendtoaddress(coldstaking, 3000000)
self.nodes[0].sendtoaddress(coldstaking, 3000000)
self.nodes[0].sendtoaddress(coldstaking, 3000000)
self.nodes[0].donatefund(100000)
slow_gen(self.nodes[0], 10)
time.sleep(3)
proposalid0 = self.nodes[0].createproposal(self.nodes[0].getnewaddress(), 100, 3600, "test")["hash"]
slow_gen(self.nodes[0], 1)
start_new_cycle(self.nodes[0])
reversed_hash = reverse_byte_str(proposalid0)
vote_str = '6a' + 'c1' + 'c2' + 'c4' + '20' + reversed_hash
voteno_str = '6a' + 'c1' + 'c2' + 'c5' + '20' + reversed_hash
voteabs_str = '6a' + 'c1' + 'c2' + 'c7' + '20' + reversed_hash
voterm_str = '6a' + 'c1' + 'c2' + 'c8' + '20' + reversed_hash
rawtx=self.nodes[2].createrawtransaction([],{voteabs_str:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
self.nodes[2].generatetoaddress(1, votingkey)
sync_blocks(self.nodes)
self.nodes[1].coinbaseoutputs([self.nodes[1].createrawtransaction(
[],
{voteno_str: 0},
"", 0)])
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
assert_equal(self.nodes[1].getproposal(proposalid0)["votesYes"], 0)
assert_equal(self.nodes[1].getproposal(proposalid0)["votesAbs"], 2)
assert_equal(self.nodes[1].getproposal(proposalid0)["votesNo"], 0)
# remove abstain vote
rawtx=self.nodes[2].createrawtransaction([],{voterm_str:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
self.nodes[2].generatetoaddress(1, votingkey)
time.sleep(3)
sync_blocks(self.nodes)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
assert_equal(self.nodes[1].getproposal(proposalid0)["votesYes"], 0)
assert_equal(self.nodes[1].getproposal(proposalid0)["votesAbs"], 2)
assert_equal(self.nodes[1].getproposal(proposalid0)["votesNo"], 0)
#start new cycle
start_new_cycle(self.nodes[0])
#stake 2 blocks with no vote
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
#set vote to yes
rawtx=self.nodes[2].createrawtransaction([],{vote_str:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
self.nodes[2].generatetoaddress(1, votingkey)
sync_blocks(self.nodes)
time.sleep(3)
#stake 2 blocks with yes vote
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
assert_equal(self.nodes[1].getproposal(proposalid0)["votesYes"], 2)
assert_equal(self.nodes[1].getproposal(proposalid0)["votesAbs"], 0)
assert_equal(self.nodes[1].getproposal(proposalid0)["votesNo"], 0)
#start new cycle
start_new_cycle(self.nodes[0])
#2 yes votes
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
#remove votes
rawtx=self.nodes[2].createrawtransaction([],{voterm_str:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
self.nodes[2].generatetoaddress(1, votingkey)
sync_blocks(self.nodes)
time.sleep(3)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
assert_equal(self.nodes[1].getproposal(proposalid0)["votesYes"], 2)
assert_equal(self.nodes[1].getproposal(proposalid0)["votesAbs"], 0)
assert_equal(self.nodes[1].getproposal(proposalid0)["votesNo"], 0)
#start new cycle
start_new_cycle(self.nodes[0])
rawtx=self.nodes[2].createrawtransaction([],{vote_str:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
self.nodes[2].generatetoaddress(1, votingkey)
sync_blocks(self.nodes)
time.sleep(3)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
# print(self.nodes[0].getproposal(proposalid0)['status'])
paymentReqid0 = self.nodes[0].createpaymentrequest(proposalid0, 10, "preq test")["hash"]
slow_gen(self.nodes[0], 1)
reversed_hash = reverse_byte_str(paymentReqid0)
vote_str = '6a' + 'c1' + 'c3' + 'c4' + '20' + reversed_hash
voteno_str = '6a' + 'c1' + 'c3' + 'c5' + '20' + reversed_hash
voteabs_str = '6a' + 'c1' + 'c3' + 'c7' + '20' + reversed_hash
voterm_str = '6a' + 'c1' + 'c3' + 'c8' + '20' + reversed_hash
rawtx=self.nodes[2].createrawtransaction([],{voteabs_str:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
self.nodes[2].generatetoaddress(1, votingkey)
sync_blocks(self.nodes)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
assert_equal(self.nodes[1].getpaymentrequest(paymentReqid0)["votesYes"], 0)
assert_equal(self.nodes[1].getpaymentrequest(paymentReqid0)["votesAbs"], 2)
assert_equal(self.nodes[1].getpaymentrequest(paymentReqid0)["votesNo"], 0)
rawtx=self.nodes[2].createrawtransaction([],{voterm_str:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
self.nodes[2].generatetoaddress(1, votingkey)
time.sleep(3)
sync_blocks(self.nodes)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
assert_equal(self.nodes[1].getpaymentrequest(paymentReqid0)["votesYes"], 0)
assert_equal(self.nodes[1].getpaymentrequest(paymentReqid0)["votesAbs"], 2)
assert_equal(self.nodes[1].getpaymentrequest(paymentReqid0)["votesNo"], 0)
#start new cycle
start_new_cycle(self.nodes[0])
#stake 2 blocks with no vote
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
#set vote to yes
rawtx=self.nodes[2].createrawtransaction([],{vote_str:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
self.nodes[2].generatetoaddress(1, votingkey)
sync_blocks(self.nodes)
time.sleep(3)
#stake 2 blocks with yes vote
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
assert_equal(self.nodes[1].getpaymentrequest(paymentReqid0)["votesYes"], 2)
assert_equal(self.nodes[1].getpaymentrequest(paymentReqid0)["votesAbs"], 0)
assert_equal(self.nodes[1].getpaymentrequest(paymentReqid0)["votesNo"], 0)
#start new cycle
start_new_cycle(self.nodes[0])
#2 yes votes
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
#remove votes
rawtx=self.nodes[2].createrawtransaction([],{voterm_str:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
self.nodes[2].generatetoaddress(1, votingkey)
sync_blocks(self.nodes)
time.sleep(3)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
assert_equal(self.nodes[1].getpaymentrequest(paymentReqid0)["votesYes"], 2)
assert_equal(self.nodes[1].getpaymentrequest(paymentReqid0)["votesAbs"], 0)
assert_equal(self.nodes[1].getpaymentrequest(paymentReqid0)["votesNo"], 0)
def stake_block(self, node, mature = True):
# Get the current block count to check against while we wait for a stake
blockcount = node.getblockcount()
# Turn staking on
node.staking(True)
# wait for a new block to be mined
while node.getblockcount() == blockcount:
#print("waiting for a new block...")
time.sleep(1)
# We got one
#print("found a new block...")
# Turn staking off
node.staking(False)
# Get the staked block
block_hash = node.getbestblockhash()
# Only mature the blocks if we asked for it
if (mature):
# Make sure the blocks are mature before we check the report
slow_gen(node, 5, 0.5)
self.sync_all()
# return the block hash to the function caller
return block_hash
if __name__ == '__main__':
LightVotingTest().main()
| 41.838926
| 128
| 0.646214
| 1,515
| 12,468
| 5.208581
| 0.124752
| 0.183627
| 0.081105
| 0.077557
| 0.771892
| 0.763148
| 0.760233
| 0.760233
| 0.73134
| 0.73134
| 0
| 0.042301
| 0.209336
| 12,468
| 297
| 129
| 41.979798
| 0.758166
| 0.079082
| 0
| 0.741627
| 0
| 0
| 0.050944
| 0
| 0.038278
| 0
| 0
| 0
| 0.114833
| 1
| 0.019139
| false
| 0
| 0.014354
| 0
| 0.043062
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bf03d9c33206f371612a12551af8879ea4a23748
| 3,546
|
py
|
Python
|
main.py
|
vedenev/receptive_field_research
|
0811daeef16f47ead99be56e6e497261d822c242
|
[
"MIT"
] | null | null | null |
main.py
|
vedenev/receptive_field_research
|
0811daeef16f47ead99be56e6e497261d822c242
|
[
"MIT"
] | null | null | null |
main.py
|
vedenev/receptive_field_research
|
0811daeef16f47ead99be56e6e497261d822c242
|
[
"MIT"
] | null | null | null |
import fire
DEFAULT_SCRIPT = "scripts_list"
def run_script(script: str = DEFAULT_SCRIPT) -> None:
"""
Type python3 main.py to get list of script names
"""
if script == DEFAULT_SCRIPT:
from utils import print_scripts_list
print_scripts_list()
elif script == "experiment_field_size_vs_depth":
from experiments import experiment_field_size_vs_depth
experiment_field_size_vs_depth()
elif script == "experiment_field_size_vs_depth_res":
from experiments import experiment_field_size_vs_depth_res
experiment_field_size_vs_depth_res()
elif script == "plot_field_size_vs_depth":
from visualization_utils import plot_field_size_vs_depth
plot_field_size_vs_depth()
elif script == "experiment_field_size_vs_depth_thiner":
from experiments import experiment_field_size_vs_depth_thiner
experiment_field_size_vs_depth_thiner()
elif script == "experiment_field_size_by_forward_pass":
from experiments import experiment_field_size_by_forward_pass
experiment_field_size_by_forward_pass()
elif script == 'experiment_field_size_by_forward_pass_for_shifted':
from experiments import experiment_field_size_by_forward_pass_for_shifted
experiment_field_size_by_forward_pass_for_shifted()
elif script == "plot_field_size_by_forward_pass":
from visualization_utils import plot_field_size_by_forward_pass
plot_field_size_by_forward_pass()
elif script == "experiment_field_size_by_forward_pass_constant":
from experiments import experiment_field_size_by_forward_pass_constant
experiment_field_size_by_forward_pass_constant()
elif script == "experiment_field_size_resnet50":
from experiments import experiment_field_size_resnet50
experiment_field_size_resnet50()
elif script == "experiment_field_size_vs_depth_res_decomposed_init":
from experiments import experiment_field_size_vs_depth_res_decomposed_init
experiment_field_size_vs_depth_res_decomposed_init()
elif script == "experiment_field_size_by_forward_pass_decomposed_init":
from experiments import experiment_field_size_by_forward_pass_decomposed_init
experiment_field_size_by_forward_pass_decomposed_init()
elif script == "plot_field_size_by_forward_pass_decomposed_init":
from visualization_utils import plot_field_size_by_forward_pass_decomposed_init
plot_field_size_by_forward_pass_decomposed_init()
elif script == "show_dataset":
from visualization_utils import show_dataset
show_dataset()
elif script == "experiment_field_size_vs_depth_dot_circular":
from experiments import experiment_field_size_vs_depth_dot_circular
experiment_field_size_vs_depth_dot_circular()
elif script == "experiment_field_size_resnet50_by_forward_pass":
from experiments import experiment_field_size_resnet50_by_forward_pass
experiment_field_size_resnet50_by_forward_pass()
elif script == "plot_field_size_resnet":
from visualization_utils import plot_field_size_resnet
plot_field_size_resnet()
elif script == "experiment_field_size_by_forward_pass_constant_output_image":
from experiments import experiment_field_size_by_forward_pass_constant_output_image
experiment_field_size_by_forward_pass_constant_output_image()
else:
print("unexpected script name, type python3 main.py to get help")
if __name__ == '__main__':
fire.Fire(run_script)
| 49.943662
| 91
| 0.788494
| 464
| 3,546
| 5.400862
| 0.114224
| 0.172386
| 0.272945
| 0.150838
| 0.865922
| 0.836393
| 0.765363
| 0.610136
| 0.403033
| 0.184358
| 0
| 0.004743
| 0.167513
| 3,546
| 71
| 92
| 49.943662
| 0.844173
| 0.013536
| 0
| 0
| 0
| 0
| 0.208441
| 0.183175
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016393
| false
| 0.393443
| 0.311475
| 0
| 0.327869
| 0.04918
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
bf27d8c9ec0a227958812eb7bf0c4130a1b0cafe
| 34
|
py
|
Python
|
src/modeci_mdf/interfaces/actr/__init__.py
|
singular-value/MDF
|
227216ffb2c9beea8539829b0b891196787d33ee
|
[
"Apache-2.0"
] | null | null | null |
src/modeci_mdf/interfaces/actr/__init__.py
|
singular-value/MDF
|
227216ffb2c9beea8539829b0b891196787d33ee
|
[
"Apache-2.0"
] | null | null | null |
src/modeci_mdf/interfaces/actr/__init__.py
|
singular-value/MDF
|
227216ffb2c9beea8539829b0b891196787d33ee
|
[
"Apache-2.0"
] | null | null | null |
from .exporter import actr_to_mdf
| 17
| 33
| 0.852941
| 6
| 34
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
170af1ca77ba8393a26f612ee7d389872a5429bb
| 26
|
py
|
Python
|
handlers/errors/__init__.py
|
roomdie/KingsEmpiresBot
|
6de6d033318fef1d243dc3347d91b67e84ee285a
|
[
"MIT"
] | null | null | null |
handlers/errors/__init__.py
|
roomdie/KingsEmpiresBot
|
6de6d033318fef1d243dc3347d91b67e84ee285a
|
[
"MIT"
] | null | null | null |
handlers/errors/__init__.py
|
roomdie/KingsEmpiresBot
|
6de6d033318fef1d243dc3347d91b67e84ee285a
|
[
"MIT"
] | null | null | null |
from . import retry_after
| 13
| 25
| 0.807692
| 4
| 26
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1717b9c87c8169fbcbf8eab32fe3d1122a46d802
| 24
|
py
|
Python
|
hycohanz/__init__.py
|
Pablo097/hycohanz
|
123cd6b270ea28fd6dd8b7e85c7af53d512717d4
|
[
"BSD-2-Clause"
] | null | null | null |
hycohanz/__init__.py
|
Pablo097/hycohanz
|
123cd6b270ea28fd6dd8b7e85c7af53d512717d4
|
[
"BSD-2-Clause"
] | null | null | null |
hycohanz/__init__.py
|
Pablo097/hycohanz
|
123cd6b270ea28fd6dd8b7e85c7af53d512717d4
|
[
"BSD-2-Clause"
] | 1
|
2022-03-03T15:41:57.000Z
|
2022-03-03T15:41:57.000Z
|
from .hycohanz import *
| 12
| 23
| 0.75
| 3
| 24
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.