hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0efb8720ad0645c0753d8551dba0dae3a4ea407e
| 1,223
|
py
|
Python
|
decawave_1001_rjg/messages/dwm_config_response.py
|
Richard-Gemmell/decawave-1001-rjg
|
0ee70d1ca0a1a413ef4f634c0f3cd78a084e5c5f
|
[
"MIT"
] | 6
|
2019-08-05T22:16:38.000Z
|
2021-01-16T02:45:26.000Z
|
decawave_1001_rjg/messages/dwm_config_response.py
|
Richard-Gemmell/decawave-1001-rjg
|
0ee70d1ca0a1a413ef4f634c0f3cd78a084e5c5f
|
[
"MIT"
] | null | null | null |
decawave_1001_rjg/messages/dwm_config_response.py
|
Richard-Gemmell/decawave-1001-rjg
|
0ee70d1ca0a1a413ef4f634c0f3cd78a084e5c5f
|
[
"MIT"
] | 3
|
2021-01-16T02:45:29.000Z
|
2022-03-26T21:38:44.000Z
|
from .dwm_response import DwmResponse
class DwmConfigResponse(DwmResponse):
"""Returned by a dwm_cfg_get request """
def __init__(self, message: bytes):
super().__init__(message)
@property
def anchor(self) -> bool:
return (self[6] & 0x20) != 0
@property
def tag(self) -> bool:
return not self.anchor
@property
def initiator(self) -> bool:
return (self[6] & 0x10) != 0
@property
def bridge(self) -> bool:
return (self[6] & 0x08) != 0
@property
def accelerometer_enabled(self) -> bool:
return (self[6] & 0x04) != 0
@property
def two_way_ranging(self) -> bool:
return (self[6] & 0x03) == 0
@property
def low_power_enabled(self) -> bool:
return (self[5] & 0x80) != 0
@property
def location_engine_enabled(self) -> bool:
return (self[5] & 0x40) != 0
@property
def led_enabled(self) -> bool:
return (self[5] & 0x10) != 0
@property
def ble_enabled(self) -> bool:
return (self[5] & 0x08) != 0
@property
def firmware_update_enabled(self) -> bool:
return (self[5] & 0x04) != 0
| 23.519231
| 47
| 0.551922
| 143
| 1,223
| 4.566434
| 0.34965
| 0.185299
| 0.235835
| 0.275651
| 0.355283
| 0.199081
| 0
| 0
| 0
| 0
| 0
| 0.060096
| 0.319706
| 1,223
| 51
| 48
| 23.980392
| 0.72476
| 0.026983
| 0
| 0.297297
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035336
| 0
| 0
| 1
| 0.324324
| false
| 0
| 0.027027
| 0.297297
| 0.675676
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
160042dee1e5fe5dd328de713281d4db7ca326be
| 145
|
py
|
Python
|
app/command/command.py
|
monosloth/console
|
a47e1479320a18a4b5716e87ee275985ebd5825f
|
[
"MIT"
] | null | null | null |
app/command/command.py
|
monosloth/console
|
a47e1479320a18a4b5716e87ee275985ebd5825f
|
[
"MIT"
] | null | null | null |
app/command/command.py
|
monosloth/console
|
a47e1479320a18a4b5716e87ee275985ebd5825f
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
class AbstractCommand(metaclass=ABCMeta):
@abstractmethod
def invoke(self, args):
pass
| 18.125
| 41
| 0.724138
| 15
| 145
| 7
| 0.866667
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206897
| 145
| 7
| 42
| 20.714286
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.2
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
161c2bf12e5d9cb871aeee5d901d0ff8bc60e052
| 116
|
py
|
Python
|
lenses/base.py
|
blandfort/mirror
|
70ae41fd151275d42506d07117aa2ea3ce59ad23
|
[
"MIT"
] | null | null | null |
lenses/base.py
|
blandfort/mirror
|
70ae41fd151275d42506d07117aa2ea3ce59ad23
|
[
"MIT"
] | 6
|
2020-11-06T22:40:05.000Z
|
2022-03-12T00:51:06.000Z
|
lenses/base.py
|
blandfort/mirror
|
70ae41fd151275d42506d07117aa2ea3ce59ad23
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class Lens(ABC):
@abstractmethod
def show(self, rays):
pass
| 11.6
| 35
| 0.646552
| 14
| 116
| 5.357143
| 0.785714
| 0.453333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.275862
| 116
| 9
| 36
| 12.888889
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.2
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
16220350b3dec328bea79f8b9497f3e66d179f02
| 110
|
py
|
Python
|
setup.py
|
Luigi-PastorePica/FreeD
|
e4267275b3edaefe3ca31bc38a5fc0fd3809cab0
|
[
"MIT"
] | null | null | null |
setup.py
|
Luigi-PastorePica/FreeD
|
e4267275b3edaefe3ca31bc38a5fc0fd3809cab0
|
[
"MIT"
] | 19
|
2020-09-30T02:57:33.000Z
|
2020-11-15T21:09:14.000Z
|
setup.py
|
Luigi-PastorePica/FreeD
|
e4267275b3edaefe3ca31bc38a5fc0fd3809cab0
|
[
"MIT"
] | null | null | null |
# Pytest config setup
from setuptools import setup, find_packages
setup(name="src", packages=find_packages())
| 27.5
| 43
| 0.8
| 15
| 110
| 5.733333
| 0.666667
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 110
| 3
| 44
| 36.666667
| 0.868687
| 0.172727
| 0
| 0
| 0
| 0
| 0.033708
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
167fd28ab1c37a93d5946fbd63b263ad16f39097
| 48
|
py
|
Python
|
tests/subcommands/test_local.py
|
jonathan-shemer/chenv
|
e2b86b7a53031a35def1be21ece87a05d74d2919
|
[
"MIT"
] | 3
|
2020-10-15T07:46:48.000Z
|
2021-09-06T20:49:05.000Z
|
tests/subcommands/test_local.py
|
jonathan-shemer/chenv
|
e2b86b7a53031a35def1be21ece87a05d74d2919
|
[
"MIT"
] | 5
|
2021-01-27T11:47:12.000Z
|
2021-08-30T08:49:37.000Z
|
tests/subcommands/test_local.py
|
jonathan-shemer/chenv
|
e2b86b7a53031a35def1be21ece87a05d74d2919
|
[
"MIT"
] | 1
|
2022-03-15T09:29:19.000Z
|
2022-03-15T09:29:19.000Z
|
"""Test cases for the `inputs.local` module."""
| 24
| 47
| 0.666667
| 7
| 48
| 4.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 48
| 1
| 48
| 48
| 0.761905
| 0.854167
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
16803067c001586cd5750fc35f100fa3a26c227a
| 110
|
py
|
Python
|
enthought/block_canvas/app/ui/configurable_import_ui.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/block_canvas/app/ui/configurable_import_ui.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/block_canvas/app/ui/configurable_import_ui.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from blockcanvas.app.ui.configurable_import_ui import *
| 27.5
| 55
| 0.854545
| 15
| 110
| 5.8
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 110
| 3
| 56
| 36.666667
| 0.878788
| 0.109091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
16853577094d5e05ac96edc65a412de8144e742c
| 380
|
py
|
Python
|
bin/transformers/examples/transformers/data/__init__.py
|
shammur/news_categorization_english
|
58282c10f887a90932cbc0fd0dec0c556b98c19d
|
[
"Apache-2.0"
] | null | null | null |
bin/transformers/examples/transformers/data/__init__.py
|
shammur/news_categorization_english
|
58282c10f887a90932cbc0fd0dec0c556b98c19d
|
[
"Apache-2.0"
] | 19
|
2020-03-24T18:15:26.000Z
|
2022-02-10T01:54:04.000Z
|
bin/transformers/examples/transformers/data/__init__.py
|
shammur/news_categorization_english
|
58282c10f887a90932cbc0fd0dec0c556b98c19d
|
[
"Apache-2.0"
] | null | null | null |
from .processors import InputExample, InputFeatures, DataProcessor
from .processors import glue_output_modes, glue_processors, glue_tasks_num_labels, glue_convert_examples_to_features, glue_convert_examples_to_features_multiclass
from .processors import tokenize
from .metrics import is_sklearn_available
if is_sklearn_available():
from .metrics import glue_compute_metrics
| 42.222222
| 162
| 0.873684
| 49
| 380
| 6.346939
| 0.489796
| 0.135048
| 0.192926
| 0.135048
| 0.186495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089474
| 380
| 8
| 163
| 47.5
| 0.898844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.833333
| 0
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
169a33273363f372c46193cf1c0239a9a112a01b
| 67
|
py
|
Python
|
flambeau/misc/__init__.py
|
corenel/flambeau
|
3e90f37ba3d692af6df02da39907132ff9a490da
|
[
"MIT"
] | 1
|
2021-07-15T02:06:23.000Z
|
2021-07-15T02:06:23.000Z
|
flambeau/misc/__init__.py
|
corenel/flambeau
|
3e90f37ba3d692af6df02da39907132ff9a490da
|
[
"MIT"
] | null | null | null |
flambeau/misc/__init__.py
|
corenel/flambeau
|
3e90f37ba3d692af6df02da39907132ff9a490da
|
[
"MIT"
] | null | null | null |
from .config import OrderedEasyDict
from .util import AverageMeter
| 22.333333
| 35
| 0.850746
| 8
| 67
| 7.125
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119403
| 67
| 2
| 36
| 33.5
| 0.966102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
16a046499822e44fd955911dec5e25b76f0587be
| 8,083
|
py
|
Python
|
tests/test_verification.py
|
1beb/django-rest-framework-passwordless
|
e5606e7d764aa4b951a0b7c643f76e27aec84556
|
[
"MIT"
] | null | null | null |
tests/test_verification.py
|
1beb/django-rest-framework-passwordless
|
e5606e7d764aa4b951a0b7c643f76e27aec84556
|
[
"MIT"
] | null | null | null |
tests/test_verification.py
|
1beb/django-rest-framework-passwordless
|
e5606e7d764aa4b951a0b7c643f76e27aec84556
|
[
"MIT"
] | null | null | null |
from rest_framework import status
from rest_framework.authtoken.models import Token
from django.utils.translation import gettext_lazy as _
from rest_framework.test import APITestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from drfpasswordless.settings import api_settings, DEFAULTS
from drfpasswordless.utils import CallbackToken
User = get_user_model()
class AliasEmailVerificationTests(APITestCase):
def setUp(self):
api_settings.PASSWORDLESS_AUTH_TYPES = ['EMAIL']
api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = 'noreply@example.com'
api_settings.PASSWORDLESS_USER_MARK_EMAIL_VERIFIED = True
self.url = reverse('drfpasswordless:auth_email')
self.callback_url = reverse('drfpasswordless:auth_token')
self.verify_url = reverse('drfpasswordless:verify_email')
self.callback_verify = reverse('drfpasswordless:verify_token')
self.email_field_name = api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME
self.email_verified_field_name = api_settings.PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME
def test_email_unverified_to_verified_and_back(self):
email = 'aaron@example.com'
email2 = 'aaron2@example.com'
data = {'email': email}
# create a new user
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user = User.objects.get(**{self.email_field_name: email})
self.assertNotEqual(user, None)
self.assertEqual(getattr(user, self.email_verified_field_name), False)
# Verify a token exists for the user, sign in and check verified again
callback = CallbackToken.objects.filter(user=user, type=CallbackToken.TOKEN_TYPE_AUTH, is_active=True).first()
callback_data = {'email': email, 'token': callback}
callback_response = self.client.post(self.callback_url, callback_data)
self.assertEqual(callback_response.status_code, status.HTTP_200_OK)
# Verify we got the token, then check and see that email_verified is now verified
token = callback_response.data['token']
self.assertEqual(token, Token.objects.get(user=user).key)
# Refresh and see that the endpoint is now verified as True
user.refresh_from_db()
self.assertEqual(getattr(user, self.email_verified_field_name), True)
# Change email, should result in flag changing to false
setattr(user, self.email_field_name, email2)
user.save()
user.refresh_from_db()
self.assertEqual(getattr(user, self.email_verified_field_name), False)
# Verify
self.client.force_authenticate(user)
verify_response = self.client.post(self.verify_url)
self.assertEqual(verify_response.status_code, status.HTTP_200_OK)
# Refresh User
user = User.objects.get(**{self.email_field_name: email2})
self.assertNotEqual(user, None)
self.assertNotEqual(getattr(user, self.email_field_name), None)
self.assertEqual(getattr(user, self.email_verified_field_name), False)
# Post callback token back.
verify_token = CallbackToken.objects.filter(user=user, type=CallbackToken.TOKEN_TYPE_VERIFY, is_active=True).first()
self.assertNotEqual(verify_token, None)
verify_callback_response = self.client.post(self.callback_verify, {'email': email2, 'token': verify_token.key})
self.assertEqual(verify_callback_response.status_code, status.HTTP_200_OK)
# Refresh User
user = User.objects.get(**{self.email_field_name: email2})
self.assertNotEqual(user, None)
self.assertNotEqual(getattr(user, self.email_field_name), None)
self.assertEqual(getattr(user, self.email_verified_field_name), True)
def tearDown(self):
api_settings.PASSWORDLESS_AUTH_TYPES = DEFAULTS['PASSWORDLESS_AUTH_TYPES']
api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = DEFAULTS['PASSWORDLESS_EMAIL_NOREPLY_ADDRESS']
api_settings.PASSWORDLESS_USER_MARK_EMAIL_VERIFIED = DEFAULTS['PASSWORDLESS_USER_MARK_MOBILE_VERIFIED']
class AliasMobileVerificationTests(APITestCase):
def setUp(self):
api_settings.PASSWORDLESS_TEST_SUPPRESSION = True
api_settings.PASSWORDLESS_AUTH_TYPES = ['MOBILE']
api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER = '+15550000000'
api_settings.PASSWORDLESS_USER_MARK_MOBILE_VERIFIED = True
self.url = reverse('drfpasswordless:auth_mobile')
self.callback_url = reverse('drfpasswordless:auth_token')
self.verify_url = reverse('drfpasswordless:verify_mobile')
self.callback_verify = reverse('drfpasswordless:verify_token')
self.mobile_field_name = api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME
self.mobile_verified_field_name = api_settings.PASSWORDLESS_USER_MOBILE_VERIFIED_FIELD_NAME
def test_mobile_unverified_to_verified_and_back(self):
mobile = '+15551234567'
mobile2 = '+15557654321'
data = {'mobile': mobile}
# create a new user
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user = User.objects.get(**{self.mobile_field_name: mobile})
self.assertNotEqual(user, None)
self.assertEqual(getattr(user, self.mobile_verified_field_name), False)
# Verify a token exists for the user, sign in and check verified again
callback = CallbackToken.objects.filter(user=user, type=CallbackToken.TOKEN_TYPE_AUTH, is_active=True).first()
callback_data = {'mobile': mobile, 'token': callback}
callback_response = self.client.post(self.callback_url, callback_data)
self.assertEqual(callback_response.status_code, status.HTTP_200_OK)
# Verify we got the token, then check and see that email_verified is now verified
token = callback_response.data['token']
self.assertEqual(token, Token.objects.get(user=user).key)
# Refresh and see that the endpoint is now verified as True
user.refresh_from_db()
self.assertEqual(getattr(user, self.mobile_verified_field_name), True)
# Change mobile, should result in flag changing to false
setattr(user, self.mobile_field_name, '+15557654321')
user.save()
user.refresh_from_db()
self.assertEqual(getattr(user, self.mobile_verified_field_name), False)
# Verify
self.client.force_authenticate(user)
verify_response = self.client.post(self.verify_url)
self.assertEqual(verify_response.status_code, status.HTTP_200_OK)
# Refresh User
user = User.objects.get(**{self.mobile_field_name: mobile2})
self.assertNotEqual(user, None)
self.assertNotEqual(getattr(user, self.mobile_field_name), None)
self.assertEqual(getattr(user, self.mobile_verified_field_name), False)
# Post callback token back.
verify_token = CallbackToken.objects.filter(user=user, type=CallbackToken.TOKEN_TYPE_VERIFY, is_active=True).first()
self.assertNotEqual(verify_token, None)
verify_callback_response = self.client.post(self.callback_verify, {'mobile': mobile2, 'token': verify_token.key})
self.assertEqual(verify_callback_response.status_code, status.HTTP_200_OK)
# Refresh User
user = User.objects.get(**{self.mobile_field_name: mobile2})
self.assertNotEqual(user, None)
self.assertNotEqual(getattr(user, self.mobile_field_name), None)
self.assertEqual(getattr(user, self.mobile_verified_field_name), True)
def tearDown(self):
api_settings.PASSWORDLESS_TEST_SUPPRESSION = DEFAULTS['PASSWORDLESS_TEST_SUPPRESSION']
api_settings.PASSWORDLESS_AUTH_TYPES = DEFAULTS['PASSWORDLESS_AUTH_TYPES']
api_settings.PASSWORDLESS_MOBILE_NOREPLY_ADDRESS = DEFAULTS['PASSWORDLESS_MOBILE_NOREPLY_NUMBER']
api_settings.PASSWORDLESS_USER_MARK_MOBILE_VERIFIED = DEFAULTS['PASSWORDLESS_USER_MARK_MOBILE_VERIFIED']
| 49.588957
| 124
| 0.732896
| 988
| 8,083
| 5.714575
| 0.11336
| 0.047821
| 0.073326
| 0.04605
| 0.855827
| 0.821289
| 0.792597
| 0.684555
| 0.659936
| 0.652143
| 0
| 0.011754
| 0.179018
| 8,083
| 162
| 125
| 49.895062
| 0.83906
| 0.083632
| 0
| 0.584071
| 0
| 0
| 0.082972
| 0.05915
| 0
| 0
| 0
| 0
| 0.283186
| 1
| 0.053097
| false
| 0.247788
| 0.070796
| 0
| 0.141593
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
16b5146f7770ca10338c64c239f4aa5cdb1a768f
| 41
|
py
|
Python
|
brawlbracket/__init__.py
|
TheLastBanana/BrawlBracket
|
1cad26b6499352b1b282388f4f76bfb4b2b6b4fe
|
[
"BSD-3-Clause"
] | null | null | null |
brawlbracket/__init__.py
|
TheLastBanana/BrawlBracket
|
1cad26b6499352b1b282388f4f76bfb4b2b6b4fe
|
[
"BSD-3-Clause"
] | null | null | null |
brawlbracket/__init__.py
|
TheLastBanana/BrawlBracket
|
1cad26b6499352b1b282388f4f76bfb4b2b6b4fe
|
[
"BSD-3-Clause"
] | null | null | null |
from brawlbracket.app import runWebServer
| 41
| 41
| 0.902439
| 5
| 41
| 7.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 41
| 1
| 41
| 41
| 0.973684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
16ccb388d0926e791ea3ef4694c1e949e1b7561c
| 24
|
py
|
Python
|
labuda/02.py
|
mallimuondu/python-homworks
|
352721a8e77d0b3bdb7a8a54197b6a04e1aec3c0
|
[
"MIT"
] | null | null | null |
labuda/02.py
|
mallimuondu/python-homworks
|
352721a8e77d0b3bdb7a8a54197b6a04e1aec3c0
|
[
"MIT"
] | null | null | null |
labuda/02.py
|
mallimuondu/python-homworks
|
352721a8e77d0b3bdb7a8a54197b6a04e1aec3c0
|
[
"MIT"
] | null | null | null |
x =lambda a, b : a * b
| 12
| 23
| 0.458333
| 6
| 24
| 1.833333
| 0.666667
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.375
| 24
| 2
| 23
| 12
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
16de136b0f2928928edeea686aa2bde66ea9c0b2
| 67
|
py
|
Python
|
src/Division.py
|
leivapaola/Calculator
|
1d7e91f93c3f308c289e34c5872591bfd8bf7cdb
|
[
"MIT"
] | null | null | null |
src/Division.py
|
leivapaola/Calculator
|
1d7e91f93c3f308c289e34c5872591bfd8bf7cdb
|
[
"MIT"
] | null | null | null |
src/Division.py
|
leivapaola/Calculator
|
1d7e91f93c3f308c289e34c5872591bfd8bf7cdb
|
[
"MIT"
] | null | null | null |
def division(a, b):
return "{:.9f}".format(float(a) / float(b))
| 33.5
| 47
| 0.58209
| 11
| 67
| 3.545455
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017544
| 0.149254
| 67
| 2
| 47
| 33.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
bc7764509b165db406c8b95532bf38325a9d3493
| 61
|
py
|
Python
|
CodeWars/Python/6 kyu/Array.diff/main.py
|
opastushkov/codewars-solutions
|
0132a24259a4e87f926048318332dcb4d94858ca
|
[
"MIT"
] | null | null | null |
CodeWars/Python/6 kyu/Array.diff/main.py
|
opastushkov/codewars-solutions
|
0132a24259a4e87f926048318332dcb4d94858ca
|
[
"MIT"
] | null | null | null |
CodeWars/Python/6 kyu/Array.diff/main.py
|
opastushkov/codewars-solutions
|
0132a24259a4e87f926048318332dcb4d94858ca
|
[
"MIT"
] | null | null | null |
def array_diff(a, b):
return [x for x in a if x not in b]
| 30.5
| 39
| 0.622951
| 16
| 61
| 2.3125
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.278689
| 61
| 2
| 39
| 30.5
| 0.840909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
bc81221f69258c3819f8383ec54f19a6baf74f79
| 83
|
py
|
Python
|
afs/service/VLDBServiceError.py
|
chanke/afspy
|
525e7b3b53e58be515f11b83cc59ddb0765ef8e5
|
[
"BSD-2-Clause"
] | null | null | null |
afs/service/VLDBServiceError.py
|
chanke/afspy
|
525e7b3b53e58be515f11b83cc59ddb0765ef8e5
|
[
"BSD-2-Clause"
] | null | null | null |
afs/service/VLDBServiceError.py
|
chanke/afspy
|
525e7b3b53e58be515f11b83cc59ddb0765ef8e5
|
[
"BSD-2-Clause"
] | null | null | null |
from afs.util.AFSError import AFSError
class VLDBServiceError(AFSError):
pass
| 16.6
| 38
| 0.795181
| 10
| 83
| 6.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144578
| 83
| 4
| 39
| 20.75
| 0.929577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
bc8f3762d005e5803bb9673c1c08bf3c62f12df2
| 15,757
|
py
|
Python
|
desktop/core/ext-py/guppy-0.1.10/guppy/heapy/pbhelp.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 11
|
2019-03-20T07:38:35.000Z
|
2021-06-18T09:42:46.000Z
|
desktop/core/ext-py/guppy-0.1.10/guppy/heapy/pbhelp.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 4
|
2021-03-11T04:02:00.000Z
|
2022-03-27T08:31:56.000Z
|
desktop/core/ext-py/guppy-0.1.10/guppy/heapy/pbhelp.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 5
|
2019-06-29T03:13:02.000Z
|
2020-04-23T04:47:11.000Z
|
# AUTOMATICALLY GENERATED BY GENGUPPY
about="(iguppy.gsl.Text\nRecordingInter\np1\n(dp2\nS'tag_configs'\np3\n(dp4\nI0\n((S'spacing1'\np5\nI11\ntp6\n(S'font'\np7\n(S'times'\np8\nI24\nS'bold'\ntttp9\nsI1\n(g6\n(S'tabs'\np10\n(F23.5\nS'center'\np11\nF57\nS'left'\np12\nttp13\n(g7\n(g8\nI12\nS'bold'\ntp14\ntp15\ntp16\nsI2\n(g6\ng15\ntp17\nsI3\n(g6\n(g7\n(g8\nI12\ntp18\ntp19\ntp20\nsI4\n((g5\nI6\ntp21\ng13\ntp22\nsI5\n(g21\n(g7\n(g8\nI10\nS'italic'\ntttp23\nsI6\n(g21\n(g7\n(g8\nI10\ntttp24\nsI7\n(g21\ng19\ntp25\nsI8\n(g19\ntp26\nssS'_gsl_tk_geometry'\np27\nS'400x200'\np28\nsS'_gsl_title'\np29\nS'About Heapy Profile Browser'\np30\nsS'appends'\np31\n(lp32\nI0\naS'Heapy Profile Browser \\n'\np33\naI1\naS'\\t'\naI2\naS'Version'\np34\naI1\naS'\\t'\naI3\naS'0.1\\n'\np35\naI4\naS'\\t'\naI2\naS'Author'\np36\naI4\naS'\\t'\naI3\naS'Sverker Nilsson\\n'\np37\naI4\naS'\\t'\naI2\naS'Email'\np38\naI4\naS'\\t'\naI3\naS'sn@sncs.se\\n'\np39\naI4\naS'\\t'\naI2\naS'License'\np40\naI4\naS'\\t'\naI3\naS'MIT \\n'\np41\naI5\naS'Copyright (c) 2005--2008'\np42\naI6\naS' S. Nilsson Computer System AB Linkoping, Sweden '\np43\naI7\naS'\\n'\nasb."
help='(iguppy.gsl.Text\nRecordingInter\np1\n(dp2\nS\'tag_configs\'\np3\n(dp4\nI0\n((S\'spacing1\'\np5\nI10\ntp6\n(S\'font\'\np7\n(S\'times\'\np8\nI20\nS\'bold\'\ntttp9\nsI1\n(g6\n(g7\n(g8\nI12\nttp10\ntp11\nsI2\n((g5\nI6\ntp12\ng10\ntp13\nsI3\n((g5\nI9\ntp14\n(g7\n(g8\nI16\nS\'bold\'\ntttp15\nsI4\n(g10\ntp16\nsI5\n((S\'lmargin2\'\np17\nI36\ntp18\ng12\n(S\'tabs\'\np19\n(F97.5\nS\'center\'\np20\nF169\nS\'left\'\np21\nttp22\n(S\'lmargin1\'\np23\nI36\ntp24\n(g7\n(g8\nI12\nS\'bold\'\ntp25\ntp26\ntp27\nsI6\n(g18\ng12\ng24\ng26\ntp28\nsI7\n(g18\ng12\ng24\ng10\ntp29\nsI8\n(g22\ntp30\nsI9\n(g12\ng22\ntp31\nsI10\n(g18\ng24\ng10\ntp32\nsI11\n(g18\ng12\n(g19\n(F96\ng20\nF166\ng21\nttp33\ng24\ng26\ntp34\nsI12\n(g12\ng33\ntp35\nsI13\n(g18\ng12\n(g19\n(F71.5\ng20\nF117\ng21\nttp36\ng24\ng26\ntp37\nsI14\n(g36\ntp38\nsI15\n(g12\ng36\ntp39\nsI16\n(g18\ng24\n(g7\n(g8\nI10\nttp40\ntp41\nsI17\n(g18\n(g5\nI8\ntp42\ng24\ng26\ntp43\nsI18\n((g17\nI72\ntp44\n(g23\nI72\ntp45\ng10\ntp46\nsI19\n(g44\ng12\n(g19\n(F125.5\ng20\nF189\ng21\nttp47\ng45\ng26\ntp48\nsI20\n(g44\ng12\ng45\ng26\ntp49\nsI21\n(g44\ng12\ng45\ng10\ntp50\nsI22\n(g47\ntp51\nsI23\n(g12\ng47\ntp52\nsI24\n(g44\ng45\ng26\ntp53\nsI25\n(g44\ng12\n(g19\n(F116.5\ng20\nF171\ng21\nttp54\ng45\ng26\ntp55\nsI26\n(g54\ntp56\nsI27\n(g18\ng12\n(g19\n(F54.5\ng20\nF83\ng21\nttp57\ng24\ng26\ntp58\nsI28\n(g12\ng57\ntp59\nsI29\n(g14\ng10\ntp60\nsI30\n(g44\ng12\n(g19\n(F115.5\ng20\nF169\ng21\nttp61\ng45\ng26\ntp62\nsI31\n(g61\ntp63\nsI32\n(g12\ng61\ntp64\nsI33\n(g44\ng45\ng40\ntp65\nsI34\n(g44\ng12\n(g19\n(F111.5\ng20\nF161\ng21\nttp66\ng45\ng26\ntp67\nsI35\n(g66\ntp68\nsI36\n(g12\ng66\ntp69\nsI37\n(g18\ng42\ng24\ng10\ntp70\nssS\'_gsl_title\'\np71\nS\'Help for Heapy Profile Browser\'\np72\nsS\'appends\'\np73\n(lp74\nI0\naS\'Menus\\n\'\np75\naI1\naS\'Click on the dotted line at the top of a menu to "tear it off": a separate window containing the menu is created. \\n\'\np76\naI3\naS\'File Menu\\n\'\np77\naI5\naS\'\\t\'\naI6\naS\'New Profile Browser\'\np78\naI5\naS\'\\t\'\naI7\naS\'Create a new browser window with the same\\n\'\np79\naI8\naS\'\\t\\t\'\np80\naI7\naS\'file as the one opened in the current window. \\n\'\np81\naI9\naS\'\\t\'\naI6\naS\'Open Profile\'\np82\naI9\naS\'\\t\'\naI7\naS\'Open a profile data file in the current window.\\n\'\np83\naI9\naS\'\\t\'\naI6\naS\'Close Window\'\np84\naI9\naS\'\\t\'\naI7\naS\'Close the current window (exits from Tk if it\\n\'\np85\naI8\nag80\naI7\naS\'was the last browser window). \\n\'\np86\naI9\naS\'\\t\'\naI6\naS\'Clear Cache\'\np87\naI9\naS\'\\t\'\naI7\naS\'Clear the sample cache, releasing its memory.\\n\'\np88\naI8\nag80\naI7\naS\'The cache will be automatically filled again\\n\'\np89\naI8\nag80\naI7\naS\'when needed. \\n\'\np90\naI8\nag80\naI10\naS\'This command is a kind of temporary /\'\np91\naI7\naS\'\\n\'\naI8\nag80\naI10\naS\'experimental feature. I think the cache handling\'\np92\naI7\naS\'\\n\'\naI8\nag80\naI10\naS\'should be made automatic and less memory\'\np93\naI7\naS\'\\n\'\naI8\nag80\naI10\naS\'consuming. \'\np94\naI7\naS\'\\n\'\naI3\naS\'Pane Menu\\n\'\np95\naI11\naS\'\\t\'\naI6\naS\'Show Control Panel\'\np96\naI11\naS\'\\t\'\naI7\naS\'Show the control panel pane.\\n\'\np97\naI12\naS\'\\t\'\naI6\naS\'Show Graph\'\np98\naI12\naS\'\\t\'\naI7\naS\'Show the graph pane.\\n\'\np99\naI12\naS\'\\t\'\naI6\naS\'Show Table\'\np100\naI12\naS\'\\t\'\naI7\naS\'Show the table pane. \\n\'\np101\naI3\naS\'Graph Menu\\n\'\np102\naI13\naS\'\\t\'\naI6\naS\'Bars / Lines\'\np103\naI13\naS\'\\t\'\naI7\naS\'Choose whether the graph should be displayed using bars\\n\'\np104\naI14\nag80\naI7\naS\'or lines. \\n\'\np105\naI14\nag80\naI10\naS\'When using bars, the sample value (size or count) for\'\np106\naI7\naS\'\\n\'\naI14\nag80\naI10\naS\'different kinds of objects will be stacked on top of each\'\np107\naI7\naS\'\\n\'\naI14\nag80\naI10\naS\'other so the total height represents the total value of a\'\np108\naI7\naS\'\\n\'\naI14\nag80\naI10\naS\'sample. When using lines, each line represents the value\'\np109\naI7\naS\'\\n\'\naI14\nag80\naI10\naS\'for a single kind of object. The 10 largest values are\'\np110\naI7\naS\'\\n\'\naI14\nag80\naI10\naS\'shown in each sample point. Each kind has a particular\'\np111\naI7\naS\'\\n\'\naI14\nag80\naI10\naS\'color, choosen arbitrary but it is always the same color\'\np112\naI7\naS\'\\n\'\naI14\nag80\naI10\naS\'for the same kind. The remaing kinds, if any, are shown in\'\np113\naI7\naS\'\\n\'\naI14\nag80\naI10\naS\'black. \'\np114\naI7\naS\'\\n\'\naI15\naS\'\\t\'\naI6\naS\'Size / Count\'\np115\naI15\naS\'\\t\'\naI7\naS\'Choose whether the graph should display the size of\\n\'\np116\naI14\nag80\naI7\naS\'objects of a particular kind or the number of objects of\\n\'\np117\naI14\nag80\naI7\naS\'that kind. \\n\'\np118\naI14\nag80\naI16\naS\'(Note that this affects only the graph, the table will still\'\np119\naI7\naS\'\\n\'\naI14\nag80\naI16\naS\'choose size or kind as it were choosen in the table menu.)\'\np120\naI7\naS\'\\n\'\naI14\nag80\naI7\naS\'\\n\'\naI3\naS\'Table Menu\\n\'\np121\naI17\naS\'Header submenu\\n\'\np122\naI18\naS\'This menu has a choice of header for each column of the table. The data of each column is determined by the header of that column, as well as the headers of previous columns. So if you change the first column header (A/B), the data in that column will change as well as the data under the next header (Size/Count) and the ones that follow. \\n\'\np123\naI19\naS\'\\t\'\naI20\naS\'A / B\'\np124\naI19\naS\'\\t\'\naI21\naS\'Use the sample at the A or B marker in the graph.\\n\'\np125\naI22\nag80\naI18\naS\'The kinds of objects shown in the table under this\'\np126\naI21\naS\'\\n\'\naI22\nag80\naI18\naS\'column are taken from the 10 largest sample values\'\np127\naI21\naS\'\\n\'\naI22\nag80\naI18\naS\'at that point, in the same order as they are shown in\'\np128\naI21\naS\'\\n\'\naI22\nag80\naI18\naS\'the graph. The ordering in the graph depends on\'\np129\naI21\naS\'\\n\'\naI22\nag80\naI18\naS\'the choice of count or size in the graph menu.\'\np130\naI21\naS\'\\n\'\naI22\nag80\naI18\naS\'However, the table may show count or size\'\np131\naI21\naS\'\\n\'\naI22\nag80\naI18\naS\'independent from the choice in the graph. \'\np132\naI21\naS\'\\n\'\naI23\naS\'\\t\'\naI20\nag115\naI23\naS\'\\t\'\naI21\naS\'Show the size or count of the kinds of objects in\\n\'\np133\naI22\nag80\naI21\naS\'each row, taken from those choosen in the A / B\\n\'\np134\naI22\nag80\naI21\naS\'column. \\n\'\np135\naI23\naS\'\\t\'\naI20\naS\'%A:Tot / %B:Tot\'\np136\naI23\naS\'\\t\'\naI21\naS\'Show percentage of the Size / Count column,\\n\'\np137\naI22\nag80\naI21\naS\'relative to the total (size or count) at either the A or\\n\'\np138\naI22\nag80\naI21\naS\'B sample point. \\n\'\np139\naI23\naS\'\\t\'\naI20\naS\'Cumul /\'\np140\naI23\naS\'\\t\'\naI21\naS\'Show either a cumulative sum of the Size / Count\\n\'\np141\naI22\naS\'\\t\'\naI20\naS\'\'\naI24\naS\'A-B / B-A\'\np142\naI22\naS\'\\t\'\naI21\naS\'column, or the difference A-B or B-A. \\n\'\np143\naI22\nag80\naI18\naS\'The cumulative sum is taken by summing from the\'\np144\naI21\naS\'\\n\'\naI22\nag80\naI18\naS\'first table row down to the last row. \'\np145\naI21\naS\'\\n\'\naI23\naS\'\\t\'\naI20\nag136\naI23\naS\'\\t\'\naI21\naS\'Show percentage of the previous field, relative to\\n\'\np146\naI22\nag80\naI21\naS\'either the A or B total. \\n\'\np147\naI23\naS\'\\t\'\naI20\naS\'Kind\'\np148\naI23\naS\'\\t\'\naI21\naS\'Shows the kind of objects. This is currently the only\\n\'\np149\naI22\nag80\naI21\naS\'alternative for this column. The kind shown\\n\'\np150\naI22\nag80\naI21\naS\'corresponds to the color shown in the A / B\\n\'\np151\naI22\nag80\naI21\naS\'column. A special kind is <Other> which\\n\'\np152\naI22\nag80\naI21\naS\'summarizes the remaining data if there were more\\n\'\np153\naI22\nag80\naI21\naS\'than 10 different kinds in the sample. \\n\'\np154\naI17\naS\'Scrollbar submenu\\n\'\np155\naI25\naS\'\\t\'\naI20\naS\'Auto / On / Off\'\np156\naI25\naS\'\\t\'\naI21\naS\'Choose a scrollbar mode. The usual setting is Auto\\n\'\np157\naI26\nag80\naI21\naS\'which shows the scrollbar only when needed. \\n\'\np158\naI3\naS\'Window Menu\\n\'\np159\naI10\naS\'This menu lists the names of all open windows. Selecting one brings it to the top, deiconifying it if necessary. \\n\'\np160\naI3\naS\'Help Menu\\n\'\np161\naI27\naS\'\\t\'\naI6\naS\'About\'\np162\naI27\naS\'\\t\'\naI7\naS\'Version, author, email, copyright.\\n\'\np163\naI28\naS\'\\t\'\naI6\naS\'Help\'\np164\naI28\naS\'\\t\'\naI7\naS\'Open this help window. \\n\'\np165\naI0\naS\'Panes\\n\'\np166\naI1\naS\'There are 3 panes in the main window shown by default. At the top is the Control Panel, at the bottom left the Graph and at the bottom right the Table. \\n\'\np167\naI3\naS\'Control Panel Pane\\n\'\np168\naI29\naS\'This contains controls for the graph and the markers. It also has a quick-exit button and a collect button.\\n\'\np169\naI17\naS\'X / Y axis control\\n\'\np170\naI18\naS\'The two frames in the Control Panel having an X or Y button in the top left corner control each axis of the graph. The X, horizontal, axis shows the sample point. The Y axis shows either the size or count, as choosen in the Graph menu. \\n\'\np171\naI30\naS\'\\t\'\naI20\naS\'X / Y Button\'\np172\naI30\naS\'\\t\'\naI21\naS\'Brings up a menu, currently containing some buttons\\n\'\np173\naI31\nag80\naI21\naS\'that can also be accessed directly in the panel. \\n\'\np174\naI32\naS\'\\t\'\naI20\naS\'Grid button\'\np175\naI32\naS\'\\t\'\naI21\naS\'Select if the graph should show grid lines.\\n\'\np176\naI32\naS\'\\t\'\naI20\naS\'Range buttons\'\np177\naI32\naS\'\\t\'\naI21\naS\'Change the range that is shown in the displayed\\n\'\np178\naI31\naS\'\\t\'\naI20\naS\'\'\naI24\naS\'- / +\'\np179\naI31\naS\'\\t\'\naI21\naS\'portion of the graph. For each time + or - is pressed the\\n\'\np180\naI31\nag80\naI21\naS\'range will be stepped up or down in the sequence (1, 2,\\n\'\np181\naI31\nag80\naI21\naS\'5) and multiples thereoff. \\n\'\np182\naI32\naS\'\\t\'\naI20\naS\'Range field\'\np183\naI32\naS\'\\t\'\naI21\naS\'The current range is shown here, and a new range can\\n\'\np184\naI31\nag80\naI21\naS\'be entered by writing to this field and pressing Enter.\\n\'\np185\naI31\nag80\naI21\naS\'The format is an integer that may be followed by a\\n\'\np186\naI31\nag80\naI21\naS\'multiplier, K, M, G, or T, meaning that the value is\\n\'\np187\naI31\nag80\naI21\naS\'multipled by 1000, 1E6, 1E9, or 1E12 respectively.\\n\'\np188\naI31\nag80\naI21\naS\'The maximum range is 1T. \\n\'\np189\naI17\naS\'A / B sample control\\n\'\np190\naI18\naS\'Each of the frames showing A or B in the top left corner controls one of the sample markers. The current position is shown in the bottom left corner.\'\np191\naI33\naS\'(This is currently not an entry field - TODO - but the marker may be moved long distances by directly dragging it in the Graph frame.) \'\np192\naI18\naS\'\\n\'\naI34\naS\'\\t\'\naI20\naS\'- / + \'\np193\naI34\naS\'\\t\'\naI21\naS\'Step the marker one step to the left (-) or to the right (+).\\n\'\np194\naI35\nag80\naI18\naS\'The table will be updated to show new data if it was set\'\np195\naI21\naS\'\\n\'\naI35\nag80\naI18\naS\'to show such data that were dependent on the marker\'\np196\naI21\naS\'\\n\'\naI35\nag80\naI18\naS\'moved. \'\np197\naI21\naS\'\\n\'\naI35\nag80\naI18\naS\'The graph will show the new marker position. If the\'\np198\naI21\naS\'\\n\'\naI35\nag80\naI18\naS\'marker was outside of the displayed portion of the\'\np199\naI21\naS\'\\n\'\naI35\nag80\naI18\naS\'graph, the graph will scroll so the marker becomes\'\np200\naI21\naS\'\\n\'\naI35\nag80\naI18\naS\'visible. \'\np201\naI21\naS\'\\n\'\naI36\naS\'\\t\'\naI20\naS\'Track button\'\np202\naI36\naS\'\\t\'\naI21\naS\'Press to set the marker to the last sample in the file and\\n\'\np203\naI35\nag80\naI21\naS\'stay at the end as new samples are added. (New\\n\'\np204\naI35\nag80\naI21\naS\'samples are periodically read from the end of the file\\n\'\np205\naI35\nag80\naI21\naS\'when auto-collect is selected via the Collect button.) \\n\'\np206\naI35\nag80\naI18\naS\'Tracking is turned off when the marker is manually\'\np207\naI21\naS\'\\n\'\naI35\nag80\naI18\nag197\naI21\naS\'\\n\'\naI17\naS\'Exit button\\n\'\np208\naI18\naS\'Exits the program, a shortcut for the Exit command in the File menu.\\n\'\np209\naI17\naS\'Collect button\\n\'\np210\naI18\naS\'When selected, the browser will collect new samples from the current file, and will continue to do this periodically.\\n\'\np211\naI33\naS\'Currently it will check the file for new data once a second. \'\np212\naI18\naS\'\\n\'\naI3\naS\'Graph Pane\\n\'\np213\naI10\naS\'This pane shows the currently visible portion of the sample file. It can be scrolled via an horizontal scrollbar. The two markers are shown as buttons labeled A and B above the graph and with lines extending down in the graph. Markers can be moved by the mouse. \\n\'\np214\naI7\naS\'How to move the markers is hopefully quite self evident when tried out but I wrote up some details about it anyway.\\n\'\np215\naI17\naS\'Marker movement details\\n\'\np216\naI37\naS"Holding down the mouse button and moving the mouse moves the underlying marker. Klicking the mouse button over a marker without moving the mouse, selects the marker. While it is selected any movement of the mouse within the graph will move the marker with it. Klicking again anywhere in the graph will deselect the marker. If the marker can be moved, the cursor will be an arrow indicating the direction it can be moved, left or right or both. If the marker can not be moved in any direction, the cursor will show a circle or disc. The marker can not move outside the available samples. Moving the mouse outside of the graph also restricts the movement of the mouse, even if the mouse button is pressed. This is intentional so that the marker can be moved longer distances than the mouse can move. Moving the mouse to the right of the graph, the marker can only be moved to the right - moving back the mouse will not move the marker back until the mouse enters the graph area again. Similarly for the left side. Above or below the graph, the mouse will not move the marker at all but will show a circle to indicate that the mouse may be \'recirculated\' to move back into the graph. \\n"\np217\naI3\naS\'Table Pane\\n\'\np218\naI10\naS\'This pane shows a table based on the configuration set in the Table menu. The sample number and time stamp show in the header. \\n\'\np219\nasb.'
| 3,151.4
| 14,624
| 0.727296
| 2,947
| 15,757
| 3.885646
| 0.268069
| 0.021308
| 0.024976
| 0.015719
| 0.203039
| 0.135272
| 0.109161
| 0.039473
| 0.029168
| 0.012226
| 0
| 0.123004
| 0.08574
| 15,757
| 4
| 14,625
| 3,939.25
| 0.671873
| 0.002221
| 0
| 0
| 1
| 2
| 0.509097
| 0.265394
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bcd4d0f4e63aaadd90ad4f85aee28cffb52c6ada
| 150
|
py
|
Python
|
io-cesium-ion/operators/__init__.py
|
AnalyticalGraphicsInc/ion-blender-exporter
|
7326dd24bed76baeb9894aa29282fa51ee4f6d38
|
[
"Apache-2.0"
] | 6
|
2020-06-25T11:47:57.000Z
|
2022-02-02T01:33:51.000Z
|
io-cesium-ion/operators/__init__.py
|
CesiumGS/cesium-ion-blender-addon
|
7326dd24bed76baeb9894aa29282fa51ee4f6d38
|
[
"Apache-2.0"
] | 8
|
2019-05-29T13:16:09.000Z
|
2019-06-25T18:46:18.000Z
|
io-cesium-ion/operators/__init__.py
|
AnalyticalGraphicsInc/ion-blender-exporter
|
7326dd24bed76baeb9894aa29282fa51ee4f6d38
|
[
"Apache-2.0"
] | 2
|
2019-07-16T07:56:34.000Z
|
2019-10-23T08:20:44.000Z
|
from .token import (GetTokenOperator, ClearTokenOperator)
from .oauth import OAuthOperator
from .upload import ExportUploadOperator, ProgressOperator
| 37.5
| 58
| 0.86
| 14
| 150
| 9.214286
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093333
| 150
| 3
| 59
| 50
| 0.948529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4c01d71b16727db2c965b3f09775417a67ad7634
| 2,343
|
py
|
Python
|
wepppy/nodb/mods/locations/lt/selectors/__init__.py
|
hwbeeson/wepppy
|
6358552df99853c75be8911e7ef943108ae6923e
|
[
"BSD-3-Clause"
] | null | null | null |
wepppy/nodb/mods/locations/lt/selectors/__init__.py
|
hwbeeson/wepppy
|
6358552df99853c75be8911e7ef943108ae6923e
|
[
"BSD-3-Clause"
] | null | null | null |
wepppy/nodb/mods/locations/lt/selectors/__init__.py
|
hwbeeson/wepppy
|
6358552df99853c75be8911e7ef943108ae6923e
|
[
"BSD-3-Clause"
] | null | null | null |
def all_hillslopes(landuse, soils):
return list(landuse.domlc_d.keys())
def _identify_outcrop_mukeys(soils):
outcrop_mukeys = []
_soils = soils.subs_summary
for top in _soils:
desc = _soils[top]['desc'].lower()
if 'melody-rock outcrop' in desc or 'ellispeak-rock outcrop' in desc:
mukey = str(_soils[top]['mukey'])
outcrop_mukeys.append(mukey)
return outcrop_mukeys
def bare_or_sodgrass_or_bunchgrass_selector(landuse, soils):
domlc_d = landuse.domlc_d
topaz_ids = []
for top in domlc_d:
if domlc_d[top] in ['100', '101', '103']:
topaz_ids.append(top)
return topaz_ids
def not_shrub_and_not_outcrop_selector(landuse, soils):
domlc_d = landuse.domlc_d
domsoil_d = soils.domsoil_d
outcrop_mukeys = _identify_outcrop_mukeys(soils)
topaz_ids = []
for top in domsoil_d:
if str(domsoil_d[top]) not in outcrop_mukeys and domlc_d[top] != '104':
topaz_ids.append(top)
return topaz_ids
def shrub_and_not_outcrop_selector(landuse, soils):
domlc_d = landuse.domlc_d
domsoil_d = soils.domsoil_d
outcrop_mukeys = _identify_outcrop_mukeys(soils)
topaz_ids = []
for top in domsoil_d:
if str(domsoil_d[top]) not in outcrop_mukeys and domlc_d[top] == '104':
topaz_ids.append(top)
return topaz_ids
def not_shrub_selector(landuse, soils):
domlc_d = landuse.domlc_d
topaz_ids = []
for top in domlc_d:
if str(domlc_d[top]) != '104':
topaz_ids.append(top)
return topaz_ids
def shrub_selector(landuse, soils):
domlc_d = landuse.domlc_d
topaz_ids = []
for top in domlc_d:
if domlc_d[top] == '104':
topaz_ids.append(top)
return topaz_ids
def outcrop_selector(landuse, soils):
domsoil_d = soils.domsoil_d
outcrop_mukeys = _identify_outcrop_mukeys(soils)
topaz_ids = []
for top in domsoil_d:
if domsoil_d[top] in outcrop_mukeys:
topaz_ids.append(top)
return topaz_ids
def not_outcrop_selector(landuse, soils):
domsoil_d = soils.domsoil_d
outcrop_mukeys = _identify_outcrop_mukeys(soils)
topaz_ids = []
for top in domsoil_d:
if domsoil_d[top] not in outcrop_mukeys:
topaz_ids.append(top)
return topaz_ids
| 23.908163
| 79
| 0.664533
| 332
| 2,343
| 4.364458
| 0.126506
| 0.115942
| 0.044168
| 0.067633
| 0.784679
| 0.784679
| 0.784679
| 0.782609
| 0.782609
| 0.778468
| 0
| 0.011871
| 0.244985
| 2,343
| 97
| 80
| 24.154639
| 0.807236
| 0
| 0
| 0.621212
| 0
| 0
| 0.030316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0
| 0.015152
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4c1867059b33f1e1a63ceade160ac824779cd12f
| 14
|
py
|
Python
|
tests/__init__.py
|
OrtnerMichael/magPyLib
|
4c7e7f56f6e0b915ec0e024c172c460fa80126e5
|
[
"BSD-2-Clause"
] | null | null | null |
tests/__init__.py
|
OrtnerMichael/magPyLib
|
4c7e7f56f6e0b915ec0e024c172c460fa80126e5
|
[
"BSD-2-Clause"
] | 1
|
2019-06-05T19:04:26.000Z
|
2019-06-06T17:23:02.000Z
|
tests/__init__.py
|
OrtnerMichael/magPyLib
|
4c7e7f56f6e0b915ec0e024c172c460fa80126e5
|
[
"BSD-2-Clause"
] | 2
|
2017-03-15T01:45:19.000Z
|
2017-10-30T13:26:35.000Z
|
""" tests """
| 7
| 13
| 0.357143
| 1
| 14
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 14
| 1
| 14
| 14
| 0.454545
| 0.357143
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4c1b56281c919066ab5de3033752e934db4cf03d
| 5,052
|
py
|
Python
|
homebytwo/routes/migrations/0042_activity_performance.py
|
drixselecta/homebytwo
|
29d26ce9f5586943e3b64c95aa4ce9ea7263bd10
|
[
"MIT"
] | 7
|
2018-03-10T20:58:59.000Z
|
2021-08-22T17:18:09.000Z
|
homebytwo/routes/migrations/0042_activity_performance.py
|
HomebyTwo/homebytwo
|
29d26ce9f5586943e3b64c95aa4ce9ea7263bd10
|
[
"MIT"
] | 69
|
2017-02-01T21:15:43.000Z
|
2022-02-26T09:33:27.000Z
|
homebytwo/routes/migrations/0042_activity_performance.py
|
drixselecta/homebytwo
|
29d26ce9f5586943e3b64c95aa4ce9ea7263bd10
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.13 on 2020-07-02 08:00
import django.contrib.postgres.fields
from django.db import migrations, models
import homebytwo.routes.fields
import homebytwo.routes.models.activity
class Migration(migrations.Migration):
dependencies = [
("routes", "0041_activity_streams"),
]
operations = [
migrations.RenameField(
model_name="activity", old_name="totalup", new_name="total_elevation_gain",
),
migrations.RemoveField(model_name="activityperformance", name="flat_param",),
migrations.RemoveField(model_name="activityperformance", name="slope_param",),
migrations.RemoveField(
model_name="activityperformance", name="slope_squared_param",
),
migrations.RemoveField(
model_name="activityperformance", name="total_elevation_gain_param",
),
migrations.RemoveField(model_name="activitytype", name="flat_param",),
migrations.RemoveField(model_name="activitytype", name="slope_param",),
migrations.RemoveField(model_name="activitytype", name="slope_squared_param",),
migrations.RemoveField(
model_name="activitytype", name="total_elevation_gain_param",
),
migrations.AddField(
model_name="activity",
name="commute",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="activityperformance",
name="cv_scores",
field=homebytwo.routes.fields.NumpyArrayField(
base_field=models.FloatField(),
default=homebytwo.routes.models.activity.get_default_array,
size=None,
),
),
migrations.AddField(
model_name="activityperformance",
name="flat_parameter",
field=models.FloatField(default=0.36),
),
migrations.AddField(
model_name="activityperformance",
name="model_score",
field=models.FloatField(default=0.0),
),
migrations.AddField(
model_name="activityperformance",
name="regression_coefficients",
field=homebytwo.routes.fields.NumpyArrayField(
base_field=models.FloatField(),
default=homebytwo.routes.models.activity.get_default_array,
size=None,
),
),
migrations.AddField(
model_name="activityperformance",
name="gear_categories",
field=homebytwo.routes.fields.NumpyArrayField(
base_field=models.CharField(max_length=50),
default=homebytwo.routes.models.activity.get_default_category,
size=None,
),
),
migrations.AddField(
model_name="activityperformance",
name="workout_type_categories",
field=homebytwo.routes.fields.NumpyArrayField(
base_field=models.CharField(max_length=50),
default=homebytwo.routes.models.activity.get_default_category,
size=None,
),
),
migrations.AddField(
model_name="activitytype",
name="gear_categories",
field=homebytwo.routes.fields.NumpyArrayField(
base_field=models.CharField(max_length=50),
default=homebytwo.routes.models.activity.get_default_category,
size=None,
),
),
migrations.AddField(
model_name="activitytype",
name="workout_type_categories",
field=homebytwo.routes.fields.NumpyArrayField(
base_field=models.CharField(max_length=50),
default=homebytwo.routes.models.activity.get_default_category,
size=None,
),
),
migrations.AddField(
model_name="activitytype",
name="flat_parameter",
field=models.FloatField(default=0.36),
),
migrations.AddField(
model_name="activitytype",
name="max_gradient",
field=models.FloatField(default=100.0),
),
migrations.AddField(
model_name="activitytype",
name="max_pace",
field=models.FloatField(default=2.4),
),
migrations.AddField(
model_name="activitytype",
name="min_gradient",
field=models.FloatField(default=-100.0),
),
migrations.AddField(
model_name="activitytype",
name="min_pace",
field=models.FloatField(default=0.12),
),
migrations.AddField(
model_name="activitytype",
name="regression_coefficients",
field=homebytwo.routes.fields.NumpyArrayField(
base_field=models.FloatField(),
default=homebytwo.routes.models.activity.get_default_array,
size=None,
),
),
]
| 36.608696
| 87
| 0.590459
| 438
| 5,052
| 6.616438
| 0.182648
| 0.074534
| 0.119048
| 0.139752
| 0.857143
| 0.824362
| 0.735335
| 0.675638
| 0.557971
| 0.557971
| 0
| 0.013964
| 0.305424
| 5,052
| 137
| 88
| 36.875912
| 0.811912
| 0.009105
| 0
| 0.738462
| 1
| 0
| 0.15048
| 0.032974
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.030769
| 0
| 0.053846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4c270e5d639bb7fb66446071cc39d32cf219dc39
| 22
|
py
|
Python
|
hello.py
|
fredyramix/profiles-rest-api
|
99681e6db15cf3cf661b5b6529e46dd3331a30af
|
[
"MIT"
] | null | null | null |
hello.py
|
fredyramix/profiles-rest-api
|
99681e6db15cf3cf661b5b6529e46dd3331a30af
|
[
"MIT"
] | null | null | null |
hello.py
|
fredyramix/profiles-rest-api
|
99681e6db15cf3cf661b5b6529e46dd3331a30af
|
[
"MIT"
] | null | null | null |
print ('Hola mundo')
| 11
| 21
| 0.636364
| 3
| 22
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
4c31429b48117a5a027c809869276ce590e0c35a
| 100
|
py
|
Python
|
enthought/units/unit_manipulation.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/units/unit_manipulation.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/units/unit_manipulation.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from scimath.units.unit_manipulation import *
| 25
| 45
| 0.85
| 13
| 100
| 6.076923
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11
| 100
| 3
| 46
| 33.333333
| 0.88764
| 0.12
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4c75990a6c182bf118e300d4d9403a81a80d9736
| 302
|
py
|
Python
|
my_velov_assistant/conftest.py
|
thefifthagreement/my-velov
|
6688d22fde510dc93c1064bfb15ab556bb2e1f76
|
[
"MIT"
] | 1
|
2021-05-07T07:16:00.000Z
|
2021-05-07T07:16:00.000Z
|
my_velov_assistant/conftest.py
|
thefifthagreement/my-velov
|
6688d22fde510dc93c1064bfb15ab556bb2e1f76
|
[
"MIT"
] | 7
|
2021-05-12T05:42:23.000Z
|
2022-03-30T21:07:09.000Z
|
my_velov_assistant/conftest.py
|
thefifthagreement/my-velov
|
6688d22fde510dc93c1064bfb15ab556bb2e1f76
|
[
"MIT"
] | null | null | null |
import pytest
from my_velov_assistant.users.models import User
from my_velov_assistant.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
| 20.133333
| 64
| 0.791391
| 40
| 302
| 5.825
| 0.575
| 0.051502
| 0.094421
| 0.171674
| 0.214592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122517
| 302
| 14
| 65
| 21.571429
| 0.879245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.333333
| 0.111111
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
d5b5afca7471a7c1c0480c6a1575ef4a167f02a5
| 51,942
|
py
|
Python
|
evap/results/tests/test_views.py
|
janno42/EvaP
|
3da854058cf5694d96bab2089f8dd6c48c7cfd4a
|
[
"MIT"
] | null | null | null |
evap/results/tests/test_views.py
|
janno42/EvaP
|
3da854058cf5694d96bab2089f8dd6c48c7cfd4a
|
[
"MIT"
] | null | null | null |
evap/results/tests/test_views.py
|
janno42/EvaP
|
3da854058cf5694d96bab2089f8dd6c48c7cfd4a
|
[
"MIT"
] | null | null | null |
import random
from io import StringIO
from unittest.mock import patch
from django.contrib.auth.models import Group
from django.core.cache import caches
from django.core.management import call_command
from django.db import connection
from django.test import override_settings
from django.test.testcases import TestCase
from django.test.utils import CaptureQueriesContext
from django_webtest import WebTest
from model_bakery import baker
from evap.evaluation.models import (
Contribution,
Course,
Degree,
Evaluation,
Question,
Questionnaire,
Semester,
UserProfile,
)
from evap.evaluation.tests.tools import let_user_vote_for_evaluation, make_manager, make_rating_answer_counters
from evap.results.exporters import TextAnswerExporter
from evap.results.tools import cache_results
from evap.results.views import get_evaluations_with_prefetched_data
from evap.staff.tests.utils import WebTestStaffMode, helper_exit_staff_mode, run_in_staff_mode
class TestResultsView(WebTest):
url = "/results/"
@patch("evap.evaluation.models.Evaluation.can_be_seen_by", new=(lambda self, user: True))
def test_multiple_evaluations_per_course(self):
student = baker.make(UserProfile, email="student@institution.example.com")
# course with no evaluations does not show up
course = baker.make(Course)
page = self.app.get(self.url, user=student)
self.assertNotContains(page, course.name)
caches["results"].clear()
# course with one evaluation is a single line with the evaluation's full_name
evaluation = baker.make(
Evaluation,
course=course,
name_en="unique_evaluation_name1",
name_de="foo",
state=Evaluation.State.PUBLISHED,
)
page = self.app.get(self.url, user=student)
self.assertContains(page, evaluation.full_name)
caches["results"].clear()
# course with two evaluations is three lines without using the full names
evaluation2 = baker.make(
Evaluation,
course=course,
name_en="unique_evaluation_name2",
name_de="bar",
state=Evaluation.State.PUBLISHED,
)
page = self.app.get(self.url, user=student)
self.assertContains(page, course.name)
self.assertContains(page, evaluation.name_en)
self.assertContains(page, evaluation2.name_en)
self.assertNotContains(page, evaluation.full_name)
self.assertNotContains(page, evaluation2.full_name)
caches["results"].clear()
@patch("evap.evaluation.models.Evaluation.can_be_seen_by", new=(lambda self, user: True))
def test_order(self):
student = baker.make(UserProfile, email="student@institution.example.com")
course = baker.make(Course)
evaluation1 = baker.make(
Evaluation,
name_de="random_evaluation_d",
name_en="random_evaluation_a",
course=course,
state=Evaluation.State.PUBLISHED,
)
evaluation2 = baker.make(
Evaluation,
name_de="random_evaluation_c",
name_en="random_evaluation_b",
course=course,
state=Evaluation.State.PUBLISHED,
)
page = self.app.get(self.url, user=student).body.decode()
self.assertLess(page.index(evaluation1.name_en), page.index(evaluation2.name_en))
page = self.app.get(self.url, user=student, extra_environ={"HTTP_ACCEPT_LANGUAGE": "de"}).body.decode()
self.assertGreater(page.index(evaluation1.name_de), page.index(evaluation2.name_de))
# using LocMemCache so the cache queries don't show up in the query count that's measured here
@override_settings(
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "testing_cache_default",
},
"sessions": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "testing_cache_results",
},
"results": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "testing_cache_sessions",
},
}
)
@patch("evap.evaluation.models.Evaluation.can_be_seen_by", new=(lambda self, user: True))
def test_num_queries_is_constant(self):
"""
ensures that the number of queries in the user list is constant
and not linear to the number of courses/evaluations
"""
student = baker.make(UserProfile, email="student@institution.example.com")
# warm up some caches
self.app.get(self.url, user=student)
def make_course_with_evaluations(unique_suffix):
course = baker.make(Course)
baker.make(
Evaluation,
course=course,
name_en="foo" + unique_suffix,
name_de="foo" + unique_suffix,
state=Evaluation.State.PUBLISHED,
_voter_count=0,
)
baker.make(
Evaluation,
course=course,
name_en="bar" + unique_suffix,
name_de="bar" + unique_suffix,
state=Evaluation.State.PUBLISHED,
_voter_count=0,
)
# first measure the number of queries with two courses
make_course_with_evaluations("frob")
make_course_with_evaluations("spam")
call_command("refresh_results_cache", stdout=StringIO())
with CaptureQueriesContext(connection) as context:
self.app.get(self.url, user=student)
num_queries_before = context.final_queries - context.initial_queries
# then measure the number of queries with one more course and compare
make_course_with_evaluations("eggs")
call_command("refresh_results_cache", stdout=StringIO())
with CaptureQueriesContext(connection) as context:
self.app.get(self.url, user=student)
num_queries_after = context.final_queries - context.initial_queries
self.assertEqual(num_queries_before, num_queries_after)
# django does not clear the LocMemCache in between tests. clear it here just to be safe.
caches["default"].clear()
caches["sessions"].clear()
caches["results"].clear()
class TestGetEvaluationsWithPrefetchedData(TestCase):
def test_returns_correct_participant_count(self):
"""Regression test for #1248"""
participants = baker.make(UserProfile, _bulk_create=True, _quantity=2)
evaluation = baker.make(
Evaluation,
state=Evaluation.State.PUBLISHED,
_participant_count=2,
_voter_count=2,
participants=participants,
voters=participants,
)
cache_results(evaluation)
participants[0].delete()
evaluation = Evaluation.objects.get(pk=evaluation.pk)
evaluations = get_evaluations_with_prefetched_data([evaluation])
self.assertEqual(evaluations[0].num_participants, 2)
self.assertEqual(evaluations[0].num_voters, 2)
evaluations = get_evaluations_with_prefetched_data(Evaluation.objects.filter(pk=evaluation.pk))
self.assertEqual(evaluations[0].num_participants, 2)
self.assertEqual(evaluations[0].num_voters, 2)
class TestResultsViewContributionWarning(WebTest):
@classmethod
def setUpTestData(cls):
cls.manager = make_manager()
cls.semester = baker.make(Semester, id=3)
contributor = baker.make(UserProfile)
# Set up an evaluation with one question but no answers
student1 = baker.make(UserProfile)
student2 = baker.make(UserProfile)
cls.evaluation = baker.make(
Evaluation,
id=21,
state=Evaluation.State.PUBLISHED,
course=baker.make(Course, semester=cls.semester),
participants=[student1, student2],
voters=[student1, student2],
)
questionnaire = baker.make(Questionnaire)
cls.evaluation.general_contribution.questionnaires.set([questionnaire])
cls.contribution = baker.make(
Contribution,
evaluation=cls.evaluation,
questionnaires=[questionnaire],
contributor=contributor,
)
cls.likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=2)
cls.url = "/results/semester/%s/evaluation/%s" % (cls.semester.id, cls.evaluation.id)
def test_many_answers_evaluation_no_warning(self):
make_rating_answer_counters(self.likert_question, self.contribution, [0, 0, 10, 0, 0])
cache_results(self.evaluation)
page = self.app.get(self.url, user=self.manager, status=200)
self.assertNotIn("Only a few participants answered these questions.", page)
def test_zero_answers_evaluation_no_warning(self):
cache_results(self.evaluation)
page = self.app.get(self.url, user=self.manager, status=200)
self.assertNotIn("Only a few participants answered these questions.", page)
def test_few_answers_evaluation_show_warning(self):
make_rating_answer_counters(self.likert_question, self.contribution, [0, 0, 3, 0, 0])
cache_results(self.evaluation)
page = self.app.get(self.url, user=self.manager, status=200)
self.assertIn("Only a few participants answered these questions.", page)
class TestResultsSemesterEvaluationDetailView(WebTestStaffMode):
url = "/results/semester/2/evaluation/21"
@classmethod
def setUpTestData(cls):
cls.manager = make_manager()
cls.semester = baker.make(Semester, id=2)
contributor = baker.make(UserProfile, email="contributor@institution.example.com")
responsible = baker.make(UserProfile, email="responsible@institution.example.com")
cls.test_users = [cls.manager, contributor, responsible]
# Normal evaluation with responsible and contributor.
cls.evaluation = baker.make(
Evaluation, id=21, state=Evaluation.State.PUBLISHED, course=baker.make(Course, semester=cls.semester)
)
baker.make(
Contribution,
evaluation=cls.evaluation,
contributor=responsible,
role=Contribution.Role.EDITOR,
textanswer_visibility=Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS,
)
cls.contribution = baker.make(
Contribution,
evaluation=cls.evaluation,
contributor=contributor,
role=Contribution.Role.EDITOR,
)
def test_questionnaire_ordering(self):
top_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP)
contributor_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.CONTRIBUTOR)
bottom_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.BOTTOM)
top_heading_question = baker.make(Question, type=Question.HEADING, questionnaire=top_questionnaire, order=0)
top_likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=top_questionnaire, order=1)
contributor_likert_question = baker.make(
Question, type=Question.LIKERT, questionnaire=contributor_questionnaire
)
bottom_heading_question = baker.make(
Question, type=Question.HEADING, questionnaire=bottom_questionnaire, order=0
)
bottom_likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=bottom_questionnaire, order=1)
self.evaluation.general_contribution.questionnaires.set([top_questionnaire, bottom_questionnaire])
self.contribution.questionnaires.set([contributor_questionnaire])
make_rating_answer_counters(top_likert_question, self.evaluation.general_contribution)
make_rating_answer_counters(contributor_likert_question, self.contribution)
make_rating_answer_counters(bottom_likert_question, self.evaluation.general_contribution)
cache_results(self.evaluation)
content = self.app.get(self.url, user=self.manager).body.decode()
top_heading_index = content.index(top_heading_question.text)
top_likert_index = content.index(top_likert_question.text)
contributor_likert_index = content.index(contributor_likert_question.text)
bottom_heading_index = content.index(bottom_heading_question.text)
bottom_likert_index = content.index(bottom_likert_question.text)
self.assertTrue(
top_heading_index < top_likert_index < contributor_likert_index < bottom_heading_index < bottom_likert_index
)
def test_heading_question_filtering(self):
contributor = baker.make(UserProfile)
questionnaire = baker.make(Questionnaire)
heading_question_0 = baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=0)
heading_question_1 = baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=1)
likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=2)
heading_question_2 = baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=3)
contribution = baker.make(
Contribution, evaluation=self.evaluation, questionnaires=[questionnaire], contributor=contributor
)
make_rating_answer_counters(likert_question, contribution)
cache_results(self.evaluation)
page = self.app.get(self.url, user=self.manager)
self.assertNotIn(heading_question_0.text, page)
self.assertIn(heading_question_1.text, page)
self.assertIn(likert_question.text, page)
self.assertNotIn(heading_question_2.text, page)
def test_default_view_is_public(self):
cache_results(self.evaluation)
random.seed(42) # use explicit seed to always choose the same "random" slogan
page_without_get_parameter = self.app.get(self.url, user=self.manager)
random.seed(42)
page_with_get_parameter = self.app.get(self.url + "?view=public", user=self.manager)
random.seed(42)
page_with_random_get_parameter = self.app.get(self.url + "?view=asdf", user=self.manager)
self.assertEqual(page_without_get_parameter.body, page_with_get_parameter.body)
self.assertEqual(page_without_get_parameter.body, page_with_random_get_parameter.body)
def test_wrong_state(self):
helper_exit_staff_mode(self)
evaluation = baker.make(
Evaluation, state=Evaluation.State.REVIEWED, course=baker.make(Course, semester=self.semester)
)
cache_results(evaluation)
url = "/results/semester/%s/evaluation/%s" % (self.semester.id, evaluation.id)
self.app.get(url, user="student@institution.example.com", status=403)
def test_preview_without_rating_answers(self):
evaluation = baker.make(
Evaluation, state=Evaluation.State.EVALUATED, course=baker.make(Course, semester=self.semester)
)
cache_results(evaluation)
url = f"/results/semester/{self.semester.id}/evaluation/{evaluation.id}"
self.app.get(url, user=self.manager)
def test_preview_with_rating_answers(self):
evaluation = baker.make(
Evaluation, state=Evaluation.State.EVALUATED, course=baker.make(Course, semester=self.semester)
)
questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP)
likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=1)
evaluation.general_contribution.questionnaires.set([questionnaire])
participants = baker.make(UserProfile, _bulk_create=True, _quantity=20)
evaluation.participants.set(participants)
evaluation.voters.set(participants)
make_rating_answer_counters(likert_question, evaluation.general_contribution, [20, 0, 0, 0, 0])
cache_results(evaluation)
url = f"/results/semester/{self.semester.id}/evaluation/{evaluation.id}"
self.app.get(url, user=self.manager)
class TestResultsSemesterEvaluationDetailViewFewVoters(WebTest):
@classmethod
def setUpTestData(cls):
make_manager()
cls.semester = baker.make(Semester, id=2)
responsible = baker.make(UserProfile, email="responsible@institution.example.com")
cls.student1 = baker.make(UserProfile, email="student1@institution.example.com")
cls.student2 = baker.make(UserProfile, email="student2@example.com")
students = baker.make(UserProfile, _bulk_create=True, _quantity=10)
students.extend([cls.student1, cls.student2])
cls.evaluation = baker.make(
Evaluation,
id=22,
state=Evaluation.State.IN_EVALUATION,
course=baker.make(Course, semester=cls.semester),
participants=students,
)
questionnaire = baker.make(Questionnaire)
cls.question_grade = baker.make(Question, questionnaire=questionnaire, type=Question.GRADE)
baker.make(Question, questionnaire=questionnaire, type=Question.LIKERT)
cls.evaluation.general_contribution.questionnaires.set([questionnaire])
cls.responsible_contribution = baker.make(
Contribution, contributor=responsible, evaluation=cls.evaluation, questionnaires=[questionnaire]
)
def helper_test_answer_visibility_one_voter(self, user_email, expect_page_not_visible=False):
page = self.app.get("/results/semester/2/evaluation/22", user=user_email, expect_errors=expect_page_not_visible)
if expect_page_not_visible:
self.assertEqual(page.status_code, 403)
else:
self.assertEqual(page.status_code, 200)
number_of_grade_badges = str(page).count("badge-grade")
self.assertEqual(number_of_grade_badges, 5) # 1 evaluation overview and 4 questions
number_of_visible_grade_badges = str(page).count("background-color")
self.assertEqual(number_of_visible_grade_badges, 0)
number_of_disabled_grade_badges = str(page).count("badge-grade badge-disabled")
self.assertEqual(number_of_disabled_grade_badges, 5)
def helper_test_answer_visibility_two_voters(self, user_email):
page = self.app.get("/results/semester/2/evaluation/22", user=user_email)
number_of_grade_badges = str(page).count("badge-grade")
self.assertEqual(number_of_grade_badges, 5) # 1 evaluation overview and 4 questions
number_of_visible_grade_badges = str(page).count("background-color")
self.assertEqual(number_of_visible_grade_badges, 4) # all but average grade in evaluation overview
number_of_disabled_grade_badges = str(page).count("badge-grade badge-disabled")
self.assertEqual(number_of_disabled_grade_badges, 1)
def test_answer_visibility_one_voter(self):
let_user_vote_for_evaluation(self.app, self.student1, self.evaluation)
self.evaluation.end_evaluation()
self.evaluation.end_review()
self.evaluation.publish()
self.evaluation.save()
self.assertEqual(self.evaluation.voters.count(), 1)
with run_in_staff_mode(self):
self.helper_test_answer_visibility_one_voter("manager@institution.example.com")
self.evaluation = Evaluation.objects.get(id=self.evaluation.id)
self.helper_test_answer_visibility_one_voter("responsible@institution.example.com")
self.helper_test_answer_visibility_one_voter("student@institution.example.com", expect_page_not_visible=True)
def test_answer_visibility_two_voters(self):
let_user_vote_for_evaluation(self.app, self.student1, self.evaluation)
let_user_vote_for_evaluation(self.app, self.student2, self.evaluation)
self.evaluation.end_evaluation()
self.evaluation.end_review()
self.evaluation.publish()
self.evaluation.save()
self.assertEqual(self.evaluation.voters.count(), 2)
with run_in_staff_mode(self):
self.helper_test_answer_visibility_two_voters("manager@institution.example.com")
self.helper_test_answer_visibility_two_voters("responsible@institution.example.com")
self.helper_test_answer_visibility_two_voters("student@institution.example.com")
class TestResultsSemesterEvaluationDetailViewPrivateEvaluation(WebTest):
@patch("evap.results.templatetags.results_templatetags.get_grade_color", new=lambda x: (0, 0, 0))
def test_private_evaluation(self):
semester = baker.make(Semester)
manager = make_manager()
student = baker.make(UserProfile, email="student@institution.example.com")
student_external = baker.make(UserProfile, email="student_external@example.com")
contributor = baker.make(UserProfile, email="contributor@institution.example.com")
responsible = baker.make(UserProfile, email="responsible@institution.example.com")
editor = baker.make(UserProfile, email="editor@institution.example.com")
voter1 = baker.make(UserProfile, email="voter1@institution.example.com")
voter2 = baker.make(UserProfile, email="voter2@institution.example.com")
non_participant = baker.make(UserProfile, email="non_participant@institution.example.com")
degree = baker.make(Degree)
course = baker.make(
Course, semester=semester, degrees=[degree], is_private=True, responsibles=[responsible, editor]
)
private_evaluation = baker.make(
Evaluation,
course=course,
state=Evaluation.State.PUBLISHED,
participants=[student, student_external, voter1, voter2],
voters=[voter1, voter2],
)
private_evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)])
baker.make(
Contribution,
evaluation=private_evaluation,
contributor=editor,
role=Contribution.Role.EDITOR,
textanswer_visibility=Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS,
)
baker.make(Contribution, evaluation=private_evaluation, contributor=contributor, role=Contribution.Role.EDITOR)
cache_results(private_evaluation)
url = "/results/"
self.assertNotIn(private_evaluation.full_name, self.app.get(url, user=non_participant))
self.assertIn(private_evaluation.full_name, self.app.get(url, user=student))
self.assertIn(private_evaluation.full_name, self.app.get(url, user=responsible))
self.assertIn(private_evaluation.full_name, self.app.get(url, user=editor))
self.assertIn(private_evaluation.full_name, self.app.get(url, user=contributor))
with run_in_staff_mode(self):
self.assertIn(private_evaluation.full_name, self.app.get(url, user=manager))
self.app.get(url, user=student_external, status=403) # external users can't see results semester view
url = "/results/semester/%s/evaluation/%s" % (semester.id, private_evaluation.id)
self.app.get(url, user=non_participant, status=403)
self.app.get(url, user=student, status=200)
self.app.get(url, user=responsible, status=200)
self.app.get(url, user=editor, status=200)
self.app.get(url, user=contributor, status=200)
with run_in_staff_mode(self):
self.app.get(url, user=manager, status=200)
# this external user participates in the evaluation and can see the results
self.app.get(url, user=student_external, status=200)
class TestResultsTextanswerVisibilityForManager(WebTestStaffMode):
fixtures = ["minimal_test_data_results"]
@classmethod
def setUpTestData(cls):
cls.manager = make_manager()
cache_results(Evaluation.objects.get(id=1))
def test_textanswer_visibility_for_manager_before_publish(self):
evaluation = Evaluation.objects.get(id=1)
evaluation._voter_count = 0 # set these to 0 to make unpublishing work
evaluation._participant_count = 0
evaluation.unpublish()
evaluation.save()
page = self.app.get("/results/semester/1/evaluation/1?view=full", user=self.manager)
self.assertIn(".general_orig_published.", page)
self.assertNotIn(".general_orig_hidden.", page)
self.assertNotIn(".general_orig_published_changed.", page)
self.assertIn(".general_additional_orig_published.", page)
self.assertNotIn(".general_additional_orig_hidden.", page)
self.assertIn(".general_changed_published.", page)
self.assertIn(".contributor_orig_published.", page)
self.assertIn(".contributor_orig_private.", page)
self.assertIn(".responsible_contributor_orig_published.", page)
self.assertNotIn(".responsible_contributor_orig_hidden.", page)
self.assertNotIn(".responsible_contributor_orig_published_changed.", page)
self.assertIn(".responsible_contributor_changed_published.", page)
self.assertIn(".responsible_contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_notreviewed.", page)
self.assertIn(".responsible_contributor_additional_orig_published.", page)
self.assertNotIn(".responsible_contributor_additional_orig_hidden.", page)
def test_textanswer_visibility_for_manager(self):
page = self.app.get("/results/semester/1/evaluation/1?view=full", user=self.manager)
self.assertIn(".general_orig_published.", page)
self.assertNotIn(".general_orig_hidden.", page)
self.assertNotIn(".general_orig_published_changed.", page)
self.assertIn(".general_additional_orig_published.", page)
self.assertNotIn(".general_additional_orig_hidden.", page)
self.assertIn(".general_changed_published.", page)
self.assertIn(".contributor_orig_published.", page)
self.assertIn(".contributor_orig_private.", page)
self.assertIn(".responsible_contributor_orig_published.", page)
self.assertNotIn(".responsible_contributor_orig_hidden.", page)
self.assertNotIn(".responsible_contributor_orig_published_changed.", page)
self.assertIn(".responsible_contributor_changed_published.", page)
self.assertIn(".responsible_contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_notreviewed.", page)
self.assertIn(".responsible_contributor_additional_orig_published.", page)
self.assertNotIn(".responsible_contributor_additional_orig_hidden.", page)
class TestResultsTextanswerVisibility(WebTest):
fixtures = ["minimal_test_data_results"]
@classmethod
def setUpTestData(cls):
cache_results(Evaluation.objects.get(id=1))
def test_textanswer_visibility_for_responsible(self):
page = self.app.get("/results/semester/1/evaluation/1", user="responsible@institution.example.com")
self.assertIn(".general_orig_published.", page)
self.assertNotIn(".general_orig_hidden.", page)
self.assertNotIn(".general_orig_published_changed.", page)
self.assertIn(".general_additional_orig_published.", page)
self.assertNotIn(".general_additional_orig_hidden.", page)
self.assertIn(".general_changed_published.", page)
self.assertNotIn(".contributor_orig_published.", page)
self.assertNotIn(".contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_published.", page)
self.assertNotIn(".responsible_contributor_orig_hidden.", page)
self.assertNotIn(".responsible_contributor_orig_published_changed.", page)
self.assertNotIn(".responsible_contributor_changed_published.", page)
self.assertNotIn(".responsible_contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_notreviewed.", page)
self.assertNotIn(".responsible_contributor_additional_orig_published.", page)
self.assertNotIn(".responsible_contributor_additional_orig_hidden.", page)
def test_textanswer_visibility_for_responsible_contributor(self):
page = self.app.get("/results/semester/1/evaluation/1", user="responsible_contributor@institution.example.com")
self.assertIn(".general_orig_published.", page)
self.assertNotIn(".general_orig_hidden.", page)
self.assertNotIn(".general_orig_published_changed.", page)
self.assertIn(".general_additional_orig_published.", page)
self.assertNotIn(".general_additional_orig_hidden.", page)
self.assertIn(".general_changed_published.", page)
self.assertNotIn(".contributor_orig_published.", page)
self.assertNotIn(".contributor_orig_private.", page)
self.assertIn(".responsible_contributor_orig_published.", page)
self.assertNotIn(".responsible_contributor_orig_hidden.", page)
self.assertNotIn(".responsible_contributor_orig_published_changed.", page)
self.assertIn(".responsible_contributor_changed_published.", page)
self.assertIn(".responsible_contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_notreviewed.", page)
self.assertIn(".responsible_contributor_additional_orig_published.", page)
self.assertNotIn(".responsible_contributor_additional_orig_hidden.", page)
def test_textanswer_visibility_for_delegate_for_responsible(self):
page = self.app.get("/results/semester/1/evaluation/1", user="delegate_for_responsible@institution.example.com")
self.assertIn(".general_orig_published.", page)
self.assertNotIn(".general_orig_hidden.", page)
self.assertNotIn(".general_orig_published_changed.", page)
self.assertIn(".general_additional_orig_published.", page)
self.assertNotIn(".general_additional_orig_hidden.", page)
self.assertIn(".general_changed_published.", page)
self.assertNotIn(".contributor_orig_published.", page)
self.assertNotIn(".contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_published.", page)
self.assertNotIn(".responsible_contributor_orig_hidden.", page)
self.assertNotIn(".responsible_contributor_orig_published_changed.", page)
self.assertNotIn(".responsible_contributor_changed_published.", page)
self.assertNotIn(".responsible_contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_notreviewed.", page)
self.assertNotIn(".responsible_contributor_additional_orig_published.", page)
self.assertNotIn(".responsible_contributor_additional_orig_hidden.", page)
def test_textanswer_visibility_for_contributor(self):
page = self.app.get("/results/semester/1/evaluation/1", user="contributor@institution.example.com")
self.assertNotIn(".general_orig_published.", page)
self.assertNotIn(".general_orig_hidden.", page)
self.assertNotIn(".general_orig_published_changed.", page)
self.assertNotIn(".general_additional_orig_published.", page)
self.assertNotIn(".general_additional_orig_hidden.", page)
self.assertNotIn(".general_changed_published.", page)
self.assertIn(".contributor_orig_published.", page)
self.assertIn(".contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_published.", page)
self.assertNotIn(".responsible_contributor_orig_hidden.", page)
self.assertNotIn(".responsible_contributor_orig_published_changed.", page)
self.assertNotIn(".responsible_contributor_changed_published.", page)
self.assertNotIn(".responsible_contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_notreviewed.", page)
self.assertNotIn(".responsible_contributor_additional_orig_published.", page)
self.assertNotIn(".responsible_contributor_additional_orig_hidden.", page)
def test_textanswer_visibility_for_delegate_for_contributor(self):
page = self.app.get("/results/semester/1/evaluation/1", user="delegate_for_contributor@institution.example.com")
self.assertNotIn(".general_orig_published.", page)
self.assertNotIn(".general_orig_hidden.", page)
self.assertNotIn(".general_orig_published_changed.", page)
self.assertNotIn(".general_additional_orig_published.", page)
self.assertNotIn(".general_additional_orig_hidden.", page)
self.assertNotIn(".general_changed_published.", page)
self.assertIn(".contributor_orig_published.", page)
self.assertNotIn(".contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_published.", page)
self.assertNotIn(".responsible_contributor_orig_hidden.", page)
self.assertNotIn(".responsible_contributor_orig_published_changed.", page)
self.assertNotIn(".responsible_contributor_changed_published.", page)
self.assertNotIn(".responsible_contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_notreviewed.", page)
self.assertNotIn(".responsible_contributor_additional_orig_published.", page)
self.assertNotIn(".responsible_contributor_additional_orig_hidden.", page)
def test_textanswer_visibility_for_contributor_general_textanswers(self):
page = self.app.get(
"/results/semester/1/evaluation/1", user="contributor_general_textanswers@institution.example.com"
)
self.assertIn(".general_orig_published.", page)
self.assertNotIn(".general_orig_hidden.", page)
self.assertNotIn(".general_orig_published_changed.", page)
self.assertIn(".general_additional_orig_published.", page)
self.assertNotIn(".general_additional_orig_hidden.", page)
self.assertIn(".general_changed_published.", page)
self.assertNotIn(".contributor_orig_published.", page)
self.assertNotIn(".contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_published.", page)
self.assertNotIn(".responsible_contributor_orig_hidden.", page)
self.assertNotIn(".responsible_contributor_orig_published_changed.", page)
self.assertNotIn(".responsible_contributor_changed_published.", page)
self.assertNotIn(".responsible_contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_notreviewed.", page)
self.assertNotIn(".responsible_contributor_additional_orig_published.", page)
self.assertNotIn(".responsible_contributor_additional_orig_hidden.", page)
def test_textanswer_visibility_for_student(self):
page = self.app.get("/results/semester/1/evaluation/1", user="student@institution.example.com")
self.assertNotIn(".general_orig_published.", page)
self.assertNotIn(".general_orig_hidden.", page)
self.assertNotIn(".general_orig_published_changed.", page)
self.assertNotIn(".general_additional_orig_published.", page)
self.assertNotIn(".general_additional_orig_hidden.", page)
self.assertNotIn(".general_changed_published.", page)
self.assertNotIn(".contributor_orig_published.", page)
self.assertNotIn(".contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_published.", page)
self.assertNotIn(".responsible_contributor_orig_hidden.", page)
self.assertNotIn(".responsible_contributor_orig_published_changed.", page)
self.assertNotIn(".responsible_contributor_changed_published.", page)
self.assertNotIn(".responsible_contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_notreviewed.", page)
self.assertNotIn(".responsible_contributor_additional_orig_published.", page)
self.assertNotIn(".responsible_contributor_additional_orig_hidden.", page)
def test_textanswer_visibility_for_student_external(self):
# the external user does not participate in or contribute to the evaluation and therefore can't see the results
self.app.get("/results/semester/1/evaluation/1", user="student_external@example.com", status=403)
def test_textanswer_visibility_info_is_shown(self):
page = self.app.get("/results/semester/1/evaluation/1", user="contributor@institution.example.com")
self.assertRegex(page.body.decode(), r"can be seen by:<br />\s*contributor user")
def test_textanswer_visibility_info_for_proxy_user(self):
page = self.app.get("/results/semester/1/evaluation/1", user="responsible@institution.example.com")
self.assertIn("responsible_contributor user (1 person)", page)
class TestResultsOtherContributorsListOnExportView(WebTest):
@classmethod
def setUpTestData(cls):
cls.semester = baker.make(Semester, id=2)
responsible = baker.make(UserProfile, email="responsible@institution.example.com")
cls.evaluation = baker.make(
Evaluation,
id=21,
state=Evaluation.State.PUBLISHED,
course=baker.make(Course, semester=cls.semester, responsibles=[responsible]),
)
questionnaire = baker.make(Questionnaire)
baker.make(Question, questionnaire=questionnaire, type=Question.LIKERT)
cls.evaluation.general_contribution.questionnaires.set([questionnaire])
baker.make(
Contribution,
evaluation=cls.evaluation,
contributor=responsible,
questionnaires=[questionnaire],
role=Contribution.Role.EDITOR,
textanswer_visibility=Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS,
)
cls.other_contributor_1 = baker.make(UserProfile, email="other_contributor_1@institution.example.com")
baker.make(
Contribution,
evaluation=cls.evaluation,
contributor=cls.other_contributor_1,
questionnaires=[questionnaire],
textanswer_visibility=Contribution.TextAnswerVisibility.OWN_TEXTANSWERS,
)
cls.other_contributor_2 = baker.make(UserProfile, email="other_contributor_2@institution.example.com")
baker.make(
Contribution,
evaluation=cls.evaluation,
contributor=cls.other_contributor_2,
questionnaires=[questionnaire],
textanswer_visibility=Contribution.TextAnswerVisibility.OWN_TEXTANSWERS,
)
cache_results(cls.evaluation)
def test_contributor_list(self):
url = "/results/semester/{}/evaluation/{}?view=export".format(self.semester.id, self.evaluation.id)
page = self.app.get(url, user="responsible@institution.example.com")
self.assertIn("<li>{}</li>".format(self.other_contributor_1.full_name), page)
self.assertIn("<li>{}</li>".format(self.other_contributor_2.full_name), page)
class TestResultsTextanswerVisibilityForExportView(WebTest):
fixtures = ["minimal_test_data_results"]
@classmethod
def setUpTestData(cls):
cls.manager = make_manager()
cache_results(Evaluation.objects.get(id=1))
def test_textanswer_visibility_for_responsible(self):
page = self.app.get("/results/semester/1/evaluation/1?view=export", user="responsible@institution.example.com")
self.assertIn(".general_orig_published.", page)
self.assertNotIn(".general_orig_hidden.", page)
self.assertNotIn(".general_orig_published_changed.", page)
self.assertIn(".general_changed_published.", page)
self.assertNotIn(".contributor_orig_published.", page)
self.assertNotIn(".contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_published.", page)
self.assertNotIn(".responsible_contributor_orig_hidden.", page)
self.assertNotIn(".responsible_contributor_orig_published_changed.", page)
self.assertNotIn(".responsible_contributor_changed_published.", page)
self.assertNotIn(".responsible_contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_notreviewed.", page)
def test_textanswer_visibility_for_responsible_contributor(self):
page = self.app.get(
"/results/semester/1/evaluation/1?view=export", user="responsible_contributor@institution.example.com"
)
self.assertIn(".general_orig_published.", page)
self.assertNotIn(".general_orig_hidden.", page)
self.assertNotIn(".general_orig_published_changed.", page)
self.assertIn(".general_changed_published.", page)
self.assertNotIn(".contributor_orig_published.", page)
self.assertNotIn(".contributor_orig_private.", page)
self.assertIn(".responsible_contributor_orig_published.", page)
self.assertNotIn(".responsible_contributor_orig_hidden.", page)
self.assertNotIn(".responsible_contributor_orig_published_changed.", page)
self.assertIn(".responsible_contributor_changed_published.", page)
self.assertNotIn(".responsible_contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_notreviewed.", page)
def test_textanswer_visibility_for_contributor(self):
page = self.app.get("/results/semester/1/evaluation/1?view=export", user="contributor@institution.example.com")
self.assertNotIn(".general_orig_published.", page)
self.assertNotIn(".general_orig_hidden.", page)
self.assertNotIn(".general_orig_published_changed.", page)
self.assertNotIn(".general_changed_published.", page)
self.assertIn(".contributor_orig_published.", page)
self.assertNotIn(".contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_published.", page)
self.assertNotIn(".responsible_contributor_orig_hidden.", page)
self.assertNotIn(".responsible_contributor_orig_published_changed.", page)
self.assertNotIn(".responsible_contributor_changed_published.", page)
self.assertNotIn(".responsible_contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_notreviewed.", page)
def test_textanswer_visibility_for_contributor_general_textanswers(self):
page = self.app.get(
"/results/semester/1/evaluation/1?view=export",
user="contributor_general_textanswers@institution.example.com",
)
self.assertIn(".general_orig_published.", page)
self.assertNotIn(".general_orig_hidden.", page)
self.assertNotIn(".general_orig_published_changed.", page)
self.assertIn(".general_changed_published.", page)
self.assertNotIn(".contributor_orig_published.", page)
self.assertNotIn(".contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_published.", page)
self.assertNotIn(".responsible_contributor_orig_hidden.", page)
self.assertNotIn(".responsible_contributor_orig_published_changed.", page)
self.assertNotIn(".responsible_contributor_changed_published.", page)
self.assertNotIn(".responsible_contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_notreviewed.", page)
def test_textanswer_visibility_for_student(self):
page = self.app.get("/results/semester/1/evaluation/1?view=export", user="student@institution.example.com")
self.assertNotIn(".general_orig_published.", page)
self.assertNotIn(".general_orig_hidden.", page)
self.assertNotIn(".general_orig_published_changed.", page)
self.assertNotIn(".general_changed_published.", page)
self.assertNotIn(".contributor_orig_published.", page)
self.assertNotIn(".contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_published.", page)
self.assertNotIn(".responsible_contributor_orig_hidden.", page)
self.assertNotIn(".responsible_contributor_orig_published_changed.", page)
self.assertNotIn(".responsible_contributor_changed_published.", page)
self.assertNotIn(".responsible_contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_notreviewed.", page)
def test_textanswer_visibility_for_manager(self):
with run_in_staff_mode(self):
contributor_id = UserProfile.objects.get(email="responsible@institution.example.com").id
page = self.app.get(
"/results/semester/1/evaluation/1?view=export&contributor_id={}".format(contributor_id),
user="manager@institution.example.com",
)
self.assertIn(".general_orig_published.", page)
self.assertNotIn(".general_orig_hidden.", page)
self.assertNotIn(".general_orig_published_changed.", page)
self.assertIn(".general_changed_published.", page)
self.assertNotIn(".contributor_orig_published.", page)
self.assertNotIn(".contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_published.", page)
self.assertNotIn(".responsible_contributor_orig_hidden.", page)
self.assertNotIn(".responsible_contributor_orig_published_changed.", page)
self.assertNotIn(".responsible_contributor_changed_published.", page)
self.assertNotIn(".responsible_contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_notreviewed.", page)
def test_textanswer_visibility_for_manager_contributor(self):
manager_group = Group.objects.get(name="Manager")
contributor = UserProfile.objects.get(email="contributor@institution.example.com")
contributor.groups.add(manager_group)
page = self.app.get(
"/results/semester/1/evaluation/1?view=export&contributor_id={}".format(contributor.id),
user="contributor@institution.example.com",
)
self.assertNotIn(".general_orig_published.", page)
self.assertNotIn(".general_orig_hidden.", page)
self.assertNotIn(".general_orig_published_changed.", page)
self.assertNotIn(".general_changed_published.", page)
self.assertIn(".contributor_orig_published.", page)
self.assertNotIn(".contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_published.", page)
self.assertNotIn(".responsible_contributor_orig_hidden.", page)
self.assertNotIn(".responsible_contributor_orig_published_changed.", page)
self.assertNotIn(".responsible_contributor_changed_published.", page)
self.assertNotIn(".responsible_contributor_orig_private.", page)
self.assertNotIn(".responsible_contributor_orig_notreviewed.", page)
class TestArchivedResults(WebTest):
@classmethod
def setUpTestData(cls):
cls.semester = baker.make(Semester)
cls.manager = make_manager()
cls.reviewer = baker.make(
UserProfile, email="reviewer@institution.example.com", groups=[Group.objects.get(name="Reviewer")]
)
cls.student = baker.make(UserProfile, email="student@institution.example.com")
cls.student_external = baker.make(UserProfile, email="student_external@example.com")
cls.contributor = baker.make(UserProfile, email="contributor@institution.example.com")
cls.responsible = baker.make(UserProfile, email="responsible@institution.example.com")
course = baker.make(Course, semester=cls.semester, degrees=[baker.make(Degree)], responsibles=[cls.responsible])
cls.evaluation = baker.make(
Evaluation,
course=course,
state=Evaluation.State.PUBLISHED,
participants=[cls.student, cls.student_external],
voters=[cls.student, cls.student_external],
)
cls.evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)])
baker.make(
Contribution,
evaluation=cls.evaluation,
contributor=cls.responsible,
role=Contribution.Role.EDITOR,
textanswer_visibility=Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS,
)
baker.make(Contribution, evaluation=cls.evaluation, contributor=cls.contributor)
cache_results(cls.evaluation)
@patch("evap.results.templatetags.results_templatetags.get_grade_color", new=lambda x: (0, 0, 0))
def test_unarchived_results(self):
url = "/results/"
self.assertIn(self.evaluation.full_name, self.app.get(url, user=self.student))
self.assertIn(self.evaluation.full_name, self.app.get(url, user=self.responsible))
self.assertIn(self.evaluation.full_name, self.app.get(url, user=self.contributor))
self.assertIn(self.evaluation.full_name, self.app.get(url, user=self.manager))
self.assertIn(self.evaluation.full_name, self.app.get(url, user=self.reviewer))
self.app.get(url, user=self.student_external, status=403) # external users can't see results semester view
url = "/results/semester/%s/evaluation/%s" % (self.semester.id, self.evaluation.id)
self.app.get(url, user=self.student, status=200)
self.app.get(url, user=self.responsible, status=200)
self.app.get(url, user=self.contributor, status=200)
self.app.get(url, user=self.manager, status=200)
self.app.get(url, user=self.reviewer, status=200)
self.app.get(url, user=self.student_external, status=200)
def test_archived_results(self):
self.semester.archive_results()
url = "/results/semester/%s/evaluation/%s" % (self.semester.id, self.evaluation.id)
self.app.get(url, user=self.student, status=403)
self.app.get(url, user=self.responsible, status=200)
self.app.get(url, user=self.contributor, status=200)
with run_in_staff_mode(self):
self.app.get(url, user=self.manager, status=200)
self.app.get(url, user=self.reviewer, status=403)
self.app.get(url, user=self.student_external, status=403)
class TestTextAnswerExportView(WebTest):
@classmethod
def setUpTestData(cls):
cls.reviewer = baker.make(
UserProfile,
email="reviewer@institution.example.com",
groups=[Group.objects.get(name="Reviewer")],
)
evaluation = baker.make(Evaluation, state=Evaluation.State.PUBLISHED)
cache_results(evaluation)
cls.url = f"/results/evaluation/{evaluation.id}/text_answers_export"
def test_file_sent(self):
def mock(_self, res):
res.write(b"1337")
with patch.object(TextAnswerExporter, "export", mock):
with run_in_staff_mode(self):
response = self.app.get(self.url, user=self.reviewer, status=200)
self.assertEqual(response.headers["Content-Type"], "application/vnd.ms-excel")
self.assertEqual(response.content, b"1337")
@patch("evap.results.exporters.TextAnswerExporter.export")
def test_permission_denied(self, export_method):
manager = make_manager()
student = baker.make(UserProfile, email="student@institution.example.com")
self.app.get(self.url, user=student, status=403)
export_method.assert_not_called()
with run_in_staff_mode(self):
self.app.get(self.url, user=self.reviewer, status=200)
export_method.assert_called_once()
export_method.reset_mock()
with run_in_staff_mode(self):
self.app.get(self.url, user=manager, status=200)
export_method.assert_called_once()
| 51.478692
| 120
| 0.707789
| 5,649
| 51,942
| 6.265534
| 0.066029
| 0.055603
| 0.093406
| 0.08476
| 0.800559
| 0.770498
| 0.739278
| 0.708708
| 0.665565
| 0.627762
| 0
| 0.006901
| 0.185438
| 51,942
| 1,008
| 121
| 51.529762
| 0.829638
| 0.024277
| 0
| 0.583333
| 0
| 0
| 0.246415
| 0.232845
| 0
| 0
| 0
| 0
| 0.326389
| 1
| 0.061343
| false
| 0
| 0.020833
| 0
| 0.101852
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d5c4ee0e7a5c52b43965785c5fc09749edcb44e9
| 21
|
py
|
Python
|
logparser/LogSig/__init__.py
|
tyronevb/logparser
|
3dcd1e1892fb65c344f3b5010298e3dfd88f33ed
|
[
"MIT"
] | 2
|
2021-05-24T06:56:46.000Z
|
2021-05-24T06:56:48.000Z
|
logparser/LogSig/__init__.py
|
tyronevb/logparser
|
3dcd1e1892fb65c344f3b5010298e3dfd88f33ed
|
[
"MIT"
] | null | null | null |
logparser/LogSig/__init__.py
|
tyronevb/logparser
|
3dcd1e1892fb65c344f3b5010298e3dfd88f33ed
|
[
"MIT"
] | 1
|
2022-01-20T11:01:43.000Z
|
2022-01-20T11:01:43.000Z
|
from .LogSig import *
| 21
| 21
| 0.761905
| 3
| 21
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 21
| 1
| 21
| 21
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d5e7aac51158290e5d848f9d68bdcfa7f99bc080
| 56
|
py
|
Python
|
pytorch_widedeep/models/image/__init__.py
|
TangleSpace/pytorch-widedeep
|
ccc55a15c1b3205ffc8c054abc5cd25cba9ccdff
|
[
"MIT"
] | null | null | null |
pytorch_widedeep/models/image/__init__.py
|
TangleSpace/pytorch-widedeep
|
ccc55a15c1b3205ffc8c054abc5cd25cba9ccdff
|
[
"MIT"
] | null | null | null |
pytorch_widedeep/models/image/__init__.py
|
TangleSpace/pytorch-widedeep
|
ccc55a15c1b3205ffc8c054abc5cd25cba9ccdff
|
[
"MIT"
] | null | null | null |
from pytorch_widedeep.models.image.vision import Vision
| 28
| 55
| 0.875
| 8
| 56
| 6
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 56
| 1
| 56
| 56
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e68ca684b012b77a625308c02cf9fa153f8c823b
| 33
|
py
|
Python
|
hello.py
|
WeiLiqiang/python
|
c454d1e6627c746c2b024f66232a39ba0fc68b36
|
[
"Apache-2.0"
] | 1
|
2017-11-02T03:56:40.000Z
|
2017-11-02T03:56:40.000Z
|
hello.py
|
WeiLiqiang/python
|
c454d1e6627c746c2b024f66232a39ba0fc68b36
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
WeiLiqiang/python
|
c454d1e6627c746c2b024f66232a39ba0fc68b36
|
[
"Apache-2.0"
] | null | null | null |
# coding: UTF-8
print "你好,python"
| 16.5
| 17
| 0.69697
| 6
| 33
| 3.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 0.121212
| 33
| 2
| 17
| 16.5
| 0.758621
| 0.393939
| 0
| 0
| 0
| 0
| 0.473684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
e69886d879941349151aa229f403a732b6ce8d98
| 273
|
py
|
Python
|
src/sage/combinat/integer_lists/__init__.py
|
fredstro/sage
|
c936d2cda81ec7ec3552a3bdb29c994b40d1bb24
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/integer_lists/__init__.py
|
fredstro/sage
|
c936d2cda81ec7ec3552a3bdb29c994b40d1bb24
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/integer_lists/__init__.py
|
fredstro/sage
|
c936d2cda81ec7ec3552a3bdb29c994b40d1bb24
|
[
"BSL-1.0"
] | null | null | null |
from base import IntegerListsBackend, Envelope
from lists import IntegerLists
from invlex import IntegerListsLex
from sage.structure.sage_object import register_unpickle_override
register_unpickle_override('sage.combinat.integer_list', 'IntegerListsLex', IntegerListsLex)
| 39
| 92
| 0.875458
| 31
| 273
| 7.516129
| 0.580645
| 0.137339
| 0.206009
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 273
| 6
| 93
| 45.5
| 0.924603
| 0
| 0
| 0
| 0
| 0
| 0.150183
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e6a34c66efccf6675769cab3a5003b25eb856ed3
| 335
|
py
|
Python
|
src/falcon_oas/__init__.py
|
sisp/falcon-oas
|
fc135f72d27b4eba32b1b80d0486d5fb474a5ddc
|
[
"Apache-2.0"
] | 6
|
2019-02-15T11:09:53.000Z
|
2021-06-09T16:06:56.000Z
|
src/falcon_oas/__init__.py
|
sisp/falcon-oas
|
fc135f72d27b4eba32b1b80d0486d5fb474a5ddc
|
[
"Apache-2.0"
] | 35
|
2019-04-01T04:09:00.000Z
|
2021-04-20T17:40:54.000Z
|
src/falcon_oas/__init__.py
|
sisp/falcon-oas
|
fc135f72d27b4eba32b1b80d0486d5fb474a5ddc
|
[
"Apache-2.0"
] | 5
|
2019-04-16T16:04:49.000Z
|
2021-12-10T07:35:38.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .__version__ import __version__ # noqa: F401
from .factories import OAS # noqa: F401
from .middlewares import Middleware # noqa: F401
from .request import Request # noqa: F401
| 33.5
| 50
| 0.81791
| 43
| 335
| 5.744186
| 0.395349
| 0.161943
| 0.259109
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041958
| 0.146269
| 335
| 9
| 51
| 37.222222
| 0.821678
| 0.128358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.125
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e6ad4dcaa4d311679ae98438e96e7d50fe916cb2
| 21,252
|
py
|
Python
|
k2/python/k2/autograd.py
|
freewym/k2
|
67c4328be96b65249b5adf690a24999f4bbb4b97
|
[
"MIT"
] | null | null | null |
k2/python/k2/autograd.py
|
freewym/k2
|
67c4328be96b65249b5adf690a24999f4bbb4b97
|
[
"MIT"
] | null | null | null |
k2/python/k2/autograd.py
|
freewym/k2
|
67c4328be96b65249b5adf690a24999f4bbb4b97
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
# See ../../../LICENSE for clarification regarding multiple authors
from typing import List, Tuple
import torch
import _k2
from .fsa import Fsa
from .dense_fsa_vec import DenseFsaVec
class _GetTotScoresFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, fsas: Fsa, log_semiring: bool, use_float_scores: bool,
unused_scores: torch.Tensor) -> torch.Tensor:
'''Compute the total loglikes of an FsaVec.
Args:
fsas:
The input FsaVec.
log_semiring:
True to use log semiring.
False to use tropical semiring.
use_float_scores:
True to use float, i.e., single precision floating point,
to compute log likes. False to use double precision.
unused_scores:
It is used only for backward propagation purpose.
It equals to `fsas.scores`.
Returns:
The forward loglike contained in a 1-D tensor.
If `use_float_scores==True`, its dtype is `torch.float32`;
it is `torch.float64` otherwise.
'''
# the .detach() below avoids a reference cycle; if we didn't do that,
# the backward_fn of tot_scores would be set to this object, giving
# `fsas` a reference to this object, which also has a reference
# to `fsas`.
if log_semiring is False:
tot_scores = fsas.get_tot_scores_tropical(
use_float_scores).detach()
else:
tot_scores = fsas.get_tot_scores_log(use_float_scores).detach()
# NOTE: since `fsas`, `log_semiring` and `use_float_scores` are
# not tensors, they are saved as attributes of `ctx`.
ctx.fsas = fsas
ctx.log_semiring = log_semiring
ctx.use_float_scores = use_float_scores
ctx.save_for_backward(unused_scores)
return tot_scores
@staticmethod
def backward(ctx, tot_scores_grad: torch.Tensor
) -> Tuple[None, None, None, torch.Tensor]: # noqa
fsas = ctx.fsas
log_semiring = ctx.log_semiring
use_float_scores = ctx.use_float_scores
scores, = ctx.saved_tensors
if log_semiring is False:
entering_arcs = fsas.get_entering_arcs(use_float_scores)
_, ragged_int = _k2.shortest_path(fsas.arcs, entering_arcs)
if use_float_scores:
out_grad = _k2._get_tot_scores_float_tropical_backward(
fsas.arcs, ragged_int, tot_scores_grad)
else:
out_grad = _k2._get_tot_scores_double_tropical_backward(
fsas.arcs, ragged_int, tot_scores_grad)
# We return four values since the `forward` method accepts four
# arguments (excluding ctx).
# fsas, log_semiring, use_float_scores, unused_scores
return None, None, None, out_grad
else:
forward_scores = fsas.get_forward_scores_log(use_float_scores)
backward_scores = fsas.get_backward_scores_log(use_float_scores)
if use_float_scores:
func = _k2._get_arc_scores_float
bprop_func = _k2._get_tot_scores_float_log_backward
else:
func = _k2._get_arc_scores_double
bprop_func = _k2._get_tot_scores_double_log_backward
arc_scores = func(fsas=fsas.arcs,
forward_scores=forward_scores,
backward_scores=backward_scores)
out_grad = bprop_func(fsas.arcs, arc_scores, tot_scores_grad)
return None, None, None, out_grad
class _IntersectDensePrunedFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, a_fsas: Fsa, b_fsas: DenseFsaVec, out_fsa: List[Fsa],
search_beam: float, output_beam: float, min_active_states: int,
max_active_states: int, unused_scores_a: torch.Tensor,
unused_scores_b: torch.Tensor) -> torch.Tensor:
'''Intersect array of FSAs on CPU/GPU.
Args:
a_fsas:
Input FsaVec, i.e., `decoding graphs`, one per sequence. It might
just be a linear sequence of phones, or might be something more
complicated. Must have either `a_fsas.shape[0] == b_fsas.dim0()`, or
`a_fsas.shape[0] == 1` in which case the graph is shared.
b_fsas:
Input FSAs that correspond to neural network output.
out_fsa:
A list containing ONLY one entry which will be set to the
generated FSA on return. We pass it as a list since the return
value can only be types of torch.Tensor in the `forward` function.
search_beam:
Decoding beam, e.g. 20. Smaller is faster, larger is more exact
(less pruning). This is the default value; it may be modified by
`min_active_states` and `max_active_states`.
output_beam:
Pruning beam for the output of intersection (vs. best path);
equivalent to kaldi's lattice-beam. E.g. 8.
max_active_states:
Maximum number of FSA states that are allowed to be active on any
given frame for any given intersection/composition task. This is
advisory, in that it will try not to exceed that but may not always
succeed. You can use a very large number if no constraint is needed.
min_active_states:
Minimum number of FSA states that are allowed to be active on any
given frame for any given intersection/composition task. This is
advisory, in that it will try not to have fewer than this number
active. Set it to zero if there is no constraint.
unused_scores_a:
It equals to `a_fsas.scores` and its sole purpose is for back
propagation.
unused_scores_b:
It equals to `b_fsas.scores` and its sole purpose is for back
propagation.
Returns:
Return `out_fsa[0].scores`.
'''
assert len(out_fsa) == 1
ragged_arc, arc_map_a, arc_map_b = _k2.intersect_dense_pruned(
a_fsas=a_fsas.arcs,
b_fsas=b_fsas.dense_fsa_vec,
search_beam=search_beam,
output_beam=output_beam,
min_active_states=min_active_states,
max_active_states=max_active_states)
out_fsa[0] = Fsa(ragged_arc)
for name, a_value in a_fsas.named_tensor_attr(include_scores=False):
value = _k2.index_select(a_value, arc_map_a)
setattr(out_fsa[0], name, value)
for name, a_value in a_fsas.named_non_tensor_attr():
setattr(out_fsa[0], name, a_value)
ctx.arc_map_a = arc_map_a
ctx.arc_map_b = arc_map_b
ctx.save_for_backward(unused_scores_a, unused_scores_b)
return out_fsa[0].scores
@staticmethod
def backward(ctx, out_fsa_grad: torch.Tensor) \
-> Tuple[None, None, None, None, None, None, None, torch.Tensor, torch.Tensor]: # noqa
a_scores, b_scores = ctx.saved_tensors
arc_map_a = ctx.arc_map_a
arc_map_b = ctx.arc_map_b
grad_a = torch.zeros(a_scores.size(0),
dtype=torch.float32,
device=a_scores.device,
requires_grad=False)
grad_b = torch.zeros(
*b_scores.shape,
dtype=torch.float32,
device=b_scores.device,
requires_grad=False).contiguous() # will use its `view()` later
_k2.index_add(arc_map_a, out_fsa_grad, grad_a)
_k2.index_add(arc_map_b, out_fsa_grad, grad_b.view(-1))
return None, None, None, None, None, None, None, grad_a, grad_b
class _IntersectDenseFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, a_fsas: Fsa, b_fsas: DenseFsaVec, out_fsa: List[Fsa],
output_beam: float, unused_scores_a: torch.Tensor,
unused_scores_b: torch.Tensor) -> torch.Tensor:
'''Intersect array of FSAs on CPU/GPU.
Args:
a_fsas:
Input FsaVec, i.e., `decoding graphs`, one per sequence. It might
just be a linear sequence of phones, or might be something more
complicated. Must have either `a_fsas.shape[0] == b_fsas.dim0()`, or
`a_fsas.shape[0] == 1` in which case the graph is shared.
b_fsas:
Input FSAs that correspond to neural network output.
out_fsa:
A list containing ONLY one entry which will be set to the
generated FSA on return. We pass it as a list since the return
value can only be types of torch.Tensor in the `forward` function.
search_beam:
Decoding beam, e.g. 20. Smaller is faster, larger is more exact
(less pruning). This is the default value; it may be modified by
`min_active_states` and `max_active_states`.
output_beam:
Pruning beam for the output of intersection (vs. best path);
equivalent to kaldi's lattice-beam. E.g. 8.
max_active_states:
Maximum number of FSA states that are allowed to be active on any
given frame for any given intersection/composition task. This is
advisory, in that it will try not to exceed that but may not always
succeed. You can use a very large number if no constraint is needed.
min_active_states:
Minimum number of FSA states that are allowed to be active on any
given frame for any given intersection/composition task. This is
advisory, in that it will try not to have fewer than this number
active. Set it to zero if there is no constraint.
unused_scores_a:
It equals to `a_fsas.scores` and its sole purpose is for back
propagation.
unused_scores_b:
It equals to `b_fsas.scores` and its sole purpose is for back
propagation.
Returns:
Return `out_fsa[0].scores`.
'''
assert len(out_fsa) == 1
ragged_arc, arc_map_a, arc_map_b = _k2.intersect_dense(
a_fsas=a_fsas.arcs,
b_fsas=b_fsas.dense_fsa_vec,
output_beam=output_beam)
out_fsa[0] = Fsa(ragged_arc)
for name, a_value in a_fsas.named_tensor_attr(include_scores=False):
value = _k2.index_select(a_value, arc_map_a)
setattr(out_fsa[0], name, value)
for name, a_value in a_fsas.named_non_tensor_attr():
setattr(out_fsa[0], name, a_value)
ctx.arc_map_a = arc_map_a
ctx.arc_map_b = arc_map_b
ctx.save_for_backward(unused_scores_a, unused_scores_b)
return out_fsa[0].scores
@staticmethod
def backward(ctx, out_fsa_grad: torch.Tensor) \
-> Tuple[None, None, None, None, torch.Tensor, torch.Tensor]: # noqa
a_scores, b_scores = ctx.saved_tensors
arc_map_a = ctx.arc_map_a
arc_map_b = ctx.arc_map_b
grad_a = torch.zeros(a_scores.size(0),
dtype=torch.float32,
device=a_scores.device,
requires_grad=False)
grad_b = torch.zeros(
*b_scores.shape,
dtype=torch.float32,
device=b_scores.device,
requires_grad=False).contiguous() # will use its `view()` later
_k2.index_add(arc_map_a, out_fsa_grad, grad_a)
_k2.index_add(arc_map_b, out_fsa_grad, grad_b.view(-1))
return None, None, None, None, grad_a, grad_b
class _IndexSelectFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, src: torch.Tensor, index: torch.Tensor) -> torch.Tensor:
'''Returns a new tensor which indexes the input tensor along dimension 0
using the entries in `index`.
If the entry in `index` is -1, then the corresponding entry in the
returned tensor is 0.
Caution:
`index.dtype == torch.int32` and `index.ndim == 1`.
Args:
src:
The input tensor. Either 1-D or 2-D with dtype torch.int32 or
torch.float32.
index:
1-D tensor of dtype torch.int32 containing the indexes.
If an entry is -1, the corresponding entry in the returned value
is 0. The elements of `index` should be in the range
`[-1..src.shape[0]-1]`.
Returns:
A tensor with shape (index.numel(), *src.shape[1:]) and dtype the
same as `src`, e.g. if `src.ndim == 1`, ans.shape would be
(index.shape[0],); if `src.ndim == 2`, ans.shape would be
(index.shape[0], src.shape[1]).
Will satisfy `ans[i] == src[index[i]]` if `src.ndim == 1`,
or `ans[i,j] == src[index[i],j]` if `src.ndim == 2`, except for
entries where `index[i] == -1` which will be zero.
'''
ctx.save_for_backward(src, index)
return _k2.index_select(src, index)
@staticmethod
def backward(ctx, out_grad) -> Tuple[torch.Tensor, None]:
src, index = ctx.saved_tensors
ans = torch.zeros(src.size(0),
dtype=torch.float32,
device=src.device,
requires_grad=False)
_k2.index_add(index, out_grad, ans)
return ans, None
class _UnionFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, fsas: Fsa, out_fsa: List[Fsa],
unused_fsas_scores: torch.Tensor) -> torch.Tensor:
'''Compute the union of all fsas in a FsaVec.
Args:
fsas:
The input FsaVec. Caution: We require that each fsa in the FsaVec
is non-empty (i.e., with at least two states).
out_fsa:
A list containing one entry. Since this function can only return
values of type `torch.Tensor`, we return the union result in the
list.
unused_fsas_scores:
It is the same as `fsas.scores`, whose sole purpose is for autograd.
It is not used in this function.
'''
need_arc_map = True
ragged_arc, arc_map = _k2.union(fsas.arcs, need_arc_map)
out_fsa[0] = Fsa(ragged_arc)
for name, value in fsas.named_tensor_attr(include_scores=False):
value = _k2.index_select(value, arc_map)
setattr(out_fsa[0], name, value)
for name, value in fsas.named_non_tensor_attr():
setattr(out_fsa[0], name, value)
ctx.arc_map = arc_map
ctx.save_for_backward(unused_fsas_scores)
return out_fsa[0].scores # the return value will be discarded
@staticmethod
def backward(ctx, out_fsa_grad: torch.Tensor
) -> Tuple[None, None, torch.Tensor]: # noqa
arc_map = ctx.arc_map
fsas_scores, = ctx.saved_tensors
ans = torch.zeros(fsas_scores.size(0),
dtype=torch.float32,
device=fsas_scores.device,
requires_grad=False)
_k2.index_add(arc_map, out_fsa_grad, ans)
return None, None, ans
def get_tot_scores(fsas: Fsa, log_semiring: bool,
use_float_scores: bool) -> torch.Tensor:
'''Compute the total loglikes of an FsaVec.
Args:
fsas:
The input FsaVec.
log_semiring:
True to use log semiring.
False to use tropical semiring.
use_float_scores:
True to use float, i.e., single precision floating point,
to compute log likes. False to use double precision.
Returns:
The forward loglike contained in a 1-D tensor.
If `use_float_scores==True`, its dtype is `torch.float32`;
it is `torch.float64` otherwise.
'''
tot_scores = _GetTotScoresFunction.apply(fsas, log_semiring,
use_float_scores, fsas.scores)
return tot_scores
def intersect_dense_pruned(a_fsas: Fsa, b_fsas: DenseFsaVec,
search_beam: float, output_beam: float,
min_active_states: int,
max_active_states: int) -> Fsa:
'''Intersect array of FSAs on CPU/GPU.
Caution:
`a_fsas` MUST be arc sorted.
Args:
a_fsas:
Input FsaVec, i.e., `decoding graphs`, one per sequence. It might just
be a linear sequence of phones, or might be something more complicated.
Must have either `a_fsas.shape[0] == b_fsas.dim0()`, or
`a_fsas.shape[0] == 1` in which case the graph is shared.
b_fsas:
Input FSAs that correspond to neural network output.
search_beam:
Decoding beam, e.g. 20. Smaller is faster, larger is more exact
(less pruning). This is the default value; it may be modified by
`min_active_states` and `max_active_states`.
output_beam:
Beam to prune output, similar to lattice-beam in Kaldi. Relative
to best path of output.
min_active_states:
Minimum number of FSA states that are allowed to be active on any given
frame for any given intersection/composition task. This is advisory,
in that it will try not to have fewer than this number active.
Set it to zero if there is no constraint.
max_active_states:
Maximum number of FSA states that are allowed to be active on any given
frame for any given intersection/composition task. This is advisory,
in that it will try not to exceed that but may not always succeed.
You can use a very large number if no constraint is needed.
Returns:
The result of the intersection.
'''
out_fsa = [0]
# the following return value is discarded since it is already contained
# in `out_fsa[0].scores`
_IntersectDensePrunedFunction.apply(a_fsas, b_fsas, out_fsa, search_beam,
output_beam, min_active_states,
max_active_states, a_fsas.scores,
b_fsas.scores)
return out_fsa[0]
def intersect_dense(a_fsas: Fsa, b_fsas: DenseFsaVec,
output_beam: float) -> Fsa:
'''Intersect array of FSAs on CPU/GPU.
Caution:
`a_fsas` MUST be arc sorted.
Args:
a_fsas:
Input FsaVec, i.e., `decoding graphs`, one per sequence. It might just
be a linear sequence of phones, or might be something more complicated.
Must have either `a_fsas.shape[0] == b_fsas.dim0()`, or
`a_fsas.shape[0] == 1` in which case the graph is shared.
b_fsas:
Input FSAs that correspond to neural network output.
output_beam:
Beam to prune output, similar to lattice-beam in Kaldi. Relative
to best path of output.
Returns:
The result of the intersection (pruned to `output_beam`; this pruning
is exact, it uses forward and backward scores.
'''
out_fsa = [0]
# the following return value is discarded since it is already contained
# in `out_fsa[0].scores`
_IntersectDenseFunction.apply(a_fsas, b_fsas, out_fsa,
output_beam, a_fsas.scores,
b_fsas.scores)
return out_fsa[0]
def index_select(src: torch.Tensor, index: torch.Tensor) -> torch.Tensor:
'''Returns a new tensor which indexes the input tensor along dimension 0
using the entries in `index`.
If the entry in `index` is -1, then the corresponding entry in the
returned tensor is 0.
Caution:
`index.dtype == torch.int32` and `index.ndim == 1`.
Args:
src:
The input tensor. Either 1-D or 2-D with dtype torch.int32 or
torch.float32.
index:
1-D tensor of dtype torch.int32 containing the indexes.
If an entry is -1, the corresponding entry in the returned value
is 0. The elements of `index` should be in the range
`[-1..src.shape[0]-1]`.
Returns:
A tensor with shape (index.numel(), *src.shape[1:]) and dtype the
same as `src`, e.g. if `src.ndim == 1`, ans.shape would be
(index.shape[0],); if `src.ndim == 2`, ans.shape would be
(index.shape[0], src.shape[1]).
Will satisfy `ans[i] == src[index[i]]` if `src.ndim == 1`,
or `ans[i,j] == src[index[i],j]` if `src.ndim == 2`, except for
entries where `index[i] == -1` which will be zero.
'''
ans = _IndexSelectFunction.apply(src, index)
return ans
def union(fsas: Fsa) -> Fsa:
'''Compute the union of a FsaVec.
Caution:
We require that every fsa in fsas is non-empty, i.e.,
contains at least two states
Args:
fsas:
A FsaVec. That is, len(fsas.shape) == 3.
Returns:
A single Fsa that is the union of the input fsas.
'''
out_fsa = [0] # as a placeholder
_UnionFunction.apply(fsas, out_fsa, fsas.scores)
return out_fsa[0]
| 39.649254
| 98
| 0.61246
| 2,944
| 21,252
| 4.237092
| 0.103601
| 0.019721
| 0.012346
| 0.012827
| 0.812089
| 0.775934
| 0.722222
| 0.700176
| 0.695687
| 0.670595
| 0
| 0.010923
| 0.310747
| 21,252
| 535
| 99
| 39.723364
| 0.840661
| 0.480755
| 0
| 0.502513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01005
| 1
| 0.075377
| false
| 0
| 0.025126
| 0
| 0.20603
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e6b4f8cf498854b319742891136f9c0a5ba78e38
| 80,206
|
py
|
Python
|
protocols/participant_1_0_3.py
|
Lucioric2000/GelReportModels
|
1704cdea3242d5b46c8b81ef46553ccae2799435
|
[
"Apache-2.0"
] | null | null | null |
protocols/participant_1_0_3.py
|
Lucioric2000/GelReportModels
|
1704cdea3242d5b46c8b81ef46553ccae2799435
|
[
"Apache-2.0"
] | null | null | null |
protocols/participant_1_0_3.py
|
Lucioric2000/GelReportModels
|
1704cdea3242d5b46c8b81ef46553ccae2799435
|
[
"Apache-2.0"
] | null | null | null |
"""
DO NOT EDIT THIS FILE!!
This file is automatically generated by the process_schemas.py program
in the scripts directory. It is not intended to be edited directly. If
you need to update the GEL protocol classes, please run the script
on the appropriate schema version.
"""
from protocols.protocol import ProtocolElement
from protocols.protocol import SearchRequest
from protocols.protocol import SearchResponse
from protocols.protocol import avro_parse
import avro.schema
version = '1.0.3'
class AdoptedStatus(object):
"""
adoptedin means adopted into the family adoptedout means child
belonged to the family and was adopted out
"""
notadopted = "notadopted"
adoptedin = "adoptedin"
adoptedout = "adoptedout"
def __hash__(self):
return str(self).__hash__()
class AffectionStatus(object):
"""
Affection Status
"""
UNAFFECTED = "UNAFFECTED"
AFFECTED = "AFFECTED"
UNCERTAIN = "UNCERTAIN"
def __hash__(self):
return str(self).__hash__()
class AgeOfOnset(object):
"""
No documentation
"""
EMBRYONAL_ONSET = "EMBRYONAL_ONSET"
FETAL_ONSET = "FETAL_ONSET"
NEONATAL_ONSET = "NEONATAL_ONSET"
INFANTILE_ONSET = "INFANTILE_ONSET"
CHILDHOOD_ONSET = "CHILDHOOD_ONSET"
JUVENILE_ONSET = "JUVENILE_ONSET"
YOUNG_ADULT_ONSET = "YOUNG_ADULT_ONSET"
LATE_ONSET = "LATE_ONSET"
MIDDLE_AGE_ONSET = "MIDDLE_AGE_ONSET"
def __hash__(self):
return str(self).__hash__()
class AnalysisPanel(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "AnalysisPanel", "namespace": "org.gel.models.participant.avro",
"fields": [{"name": "specificDisease", "type": "string"}, {"name": "panelName", "type": "string"},
{"name": "panelVersion", "type": ["null", "string"]}, {"name": "reviewOutcome", "type": "string"},
{"name": "multipleGeneticOrigins", "type": "string"}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"multipleGeneticOrigins",
"panelName",
"panelVersion",
"reviewOutcome",
"specificDisease",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'multipleGeneticOrigins', 'panelName', 'panelVersion',
'reviewOutcome', 'specificDisease'
]
def __init__(self, **kwargs):
self.multipleGeneticOrigins = kwargs.get(
'multipleGeneticOrigins', None)
self.panelName = kwargs.get(
'panelName', None)
self.panelVersion = kwargs.get(
'panelVersion', None)
self.reviewOutcome = kwargs.get(
'reviewOutcome', None)
self.specificDisease = kwargs.get(
'specificDisease', None)
class Ancestries(ProtocolElement):
"""
Ancestries, defined as Ethnic category(ies) and Chi-square test
"""
_schemaSource = """
{"type": "record", "name": "Ancestries", "namespace": "org.gel.models.participant.avro", "doc": "",
"fields": [{"name": "mothersEthnicOrigin", "type": ["null", {"type": "enum", "name":
"EthnicCategory", "doc": "", "symbols": ["D", "E", "F", "G", "A", "B", "C", "L", "M", "N", "H", "J",
"K", "P", "S", "R", "Z"]}], "doc": ""}, {"name": "mothersOtherRelevantAncestry", "type": ["null",
"string"], "doc": ""}, {"name": "fathersEthnicOrigin", "type": ["null", "EthnicCategory"], "doc":
""}, {"name": "fathersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"chiSquare1KGenomesPhase3Pop", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "ChiSquare1KGenomesPhase3Pop", "doc": "", "fields": [{"name": "kgSuperPopCategory", "type":
{"type": "enum", "name": "KgSuperPopCategory", "doc": "", "symbols": ["AFR", "AMR", "EAS", "EUR",
"SAS"]}, "doc": ""}, {"name": "kgPopCategory", "type": ["null", {"type": "enum", "name":
"KgPopCategory", "doc": "", "symbols": ["ACB", "ASW", "BEB", "CDX", "CEU", "CHB", "CHS", "CLM",
"ESN", "FIN", "GBR", "GIH", "GWD", "IBS", "ITU", "JPT", "KHV", "LWK", "MSL", "MXL", "PEL", "PJL",
"PUR", "STU", "TSI", "YRI"]}], "doc": ""}, {"name": "chiSquare", "type": "double", "doc": ""}]}}],
"doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"chiSquare1KGenomesPhase3Pop",
"fathersEthnicOrigin",
"fathersOtherRelevantAncestry",
"mothersEthnicOrigin",
"mothersOtherRelevantAncestry",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'chiSquare1KGenomesPhase3Pop': ChiSquare1KGenomesPhase3Pop,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'chiSquare1KGenomesPhase3Pop': ChiSquare1KGenomesPhase3Pop,
}
return embeddedTypes[fieldName]
__slots__ = [
'chiSquare1KGenomesPhase3Pop', 'fathersEthnicOrigin',
'fathersOtherRelevantAncestry', 'mothersEthnicOrigin',
'mothersOtherRelevantAncestry'
]
def __init__(self, **kwargs):
self.chiSquare1KGenomesPhase3Pop = kwargs.get(
'chiSquare1KGenomesPhase3Pop', None)
self.fathersEthnicOrigin = kwargs.get(
'fathersEthnicOrigin', None)
self.fathersOtherRelevantAncestry = kwargs.get(
'fathersOtherRelevantAncestry', None)
self.mothersEthnicOrigin = kwargs.get(
'mothersEthnicOrigin', None)
self.mothersOtherRelevantAncestry = kwargs.get(
'mothersOtherRelevantAncestry', None)
class CancerParticipant(ProtocolElement):
"""
This defines a Cancer Participant
"""
_schemaSource = """
{"type": "record", "name": "CancerParticipant", "namespace": "org.gel.models.participant.avro",
"doc": "", "fields": [{"name": "yearOfBirth", "type": ["null", "int"]}, {"name": "morphology",
"type": ["null", {"type": "array", "items": "string"}]}, {"name": "readyForAnalysis", "type":
"boolean"}, {"name": "consentStatus", "type": ["null", {"type": "record", "name": "ConsentStatus",
"doc": "", "fields": [{"name": "programmeConsent", "type": "boolean", "doc": "", "default": false},
{"name": "primaryFindingConsent", "type": "boolean", "doc": "", "default": false}, {"name":
"secondaryFindingConsent", "type": "boolean", "doc": "", "default": false}, {"name":
"carrierStatusConsent", "type": "boolean", "doc": "", "default": false}]}], "doc": ""}, {"name":
"center", "type": ["null", "string"], "doc": ""}, {"name": "individualId", "type": "string", "doc":
""}, {"name": "primaryDiagnosisDisease", "type": ["null", {"type": "array", "items": "string"}],
"doc": ""}, {"name": "primaryDiagnosisSubDisease", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "sex", "type": {"type": "enum", "name": "Sex", "symbols":
["FEMALE", "MALE", "UNKNOWN"]}, "doc": ""}, {"name": "additionalInformation", "type": ["null",
{"type": "map", "values": "string"}], "doc": ""}, {"name": "assignedICD10", "type": ["null",
{"type": "array", "items": "string"}], "doc": ""}, {"name": "tumourSamples", "type": {"type":
"array", "items": {"type": "record", "name": "TumourSample", "fields": [{"name": "sampleId", "type":
"string", "doc": ""}, {"name": "labSampleId", "type": "int", "doc": ""}, {"name": "LDPCode", "type":
"string", "doc": ""}, {"name": "tumourId", "type": "string", "doc": ""}, {"name": "programmePhase",
"type": ["null", {"type": "enum", "name": "ProgrammePhase", "symbols": ["CRUK", "OXFORD", "CLL",
"IIP", "MAIN", "EXPT"]}], "doc": ""}, {"name": "diseaseType", "type": ["null", {"type": "enum",
"name": "diseaseType", "symbols": ["ADULT_GLIOMA", "BLADDER", "BREAST",
"CARCINOMA_OF_UNKNOWN_PRIMARY", "CHILDHOOD", "COLORECTAL", "ENDOMETRIAL_CARCINOMA", "HAEMONC",
"HEPATOPANCREATOBILIARY", "LUNG", "MALIGNANT_MELANOMA", "NASOPHARYNGEAL", "ORAL_OROPHARYNGEAL",
"OVARIAN", "PROSTATE", "RENAL", "SARCOMA", "SINONASAL", "TESTICULAR_GERM_CELL_TUMOURS",
"UPPER_GASTROINTESTINAL", "NON_HODGKINS_B_CELL_LYMPHOMA_LOW_MOD_GRADE", "CLASSICAL_HODGKINS",
"NODULAR_LYMPHOCYTE_PREDOMINANT_HODGKINS", "T_CELL_LYMPHOMA"]}], "doc": ""}, {"name":
"diseaseSubType", "type": ["null", "string"], "doc": ""}, {"name": "clinicalSampleDateTime", "type":
["null", "string"], "doc": ""}, {"name": "tumourType", "type": ["null", {"type": "enum", "name":
"TumourType", "symbols": ["PRIMARY", "METASTATIC_RECURRENCE", "RECURRENCE_OF_PRIMARY_TUMOUR",
"METASTASES"]}], "doc": ""}, {"name": "tumourContent", "type": ["null", {"type": "enum", "name":
"TumourContent", "symbols": ["High", "Medium", "Low"]}], "doc": ""}, {"name": "source", "type":
["null", {"type": "enum", "name": "SampleSource", "symbols": ["TUMOUR",
"BONE_MARROW_ASPIRATE_TUMOUR_SORTED_CELLS", "BONE_MARROW_ASPIRATE_TUMOUR_CELLS", "BLOOD", "SALIVA",
"FIBROBLAST", "TISSUE"]}], "doc": ""}, {"name": "preparationMethod", "type": ["null", {"type":
"enum", "name": "PreparationMethod", "symbols": ["EDTA", "ORAGENE", "FF", "FFPE",
"CD128_SORTED_CELLS", "ASPIRATE"]}], "doc": ""}, {"name": "tissueSource", "type": ["null", {"type":
"enum", "name": "TissueSource", "symbols": ["BMA_TUMOUR_SORTED_CELLS", "CT_GUIDED_BIOPSY",
"ENDOSCOPIC_BIOPSY", "ENDOSCOPIC_ULTRASOUND_GUIDED_BIOPSY", "ENDOSCOPIC_ULTRASOUND_GUIDED_FNA",
"LAPAROSCOPIC_BIOPSY", "LAPAROSCOPIC_EXCISION", "MRI_GUIDED_BIOPSY", "NON_GUIDED_BIOPSY",
"SURGICAL_RESECTION", "STEREOTACTICALLY_GUIDED_BIOPSY", "USS_GUIDED_BIOPSY",
"NON_STANDARD_BIOPSY"]}], "doc": ""}, {"name": "product", "type": ["null", {"type": "enum", "name":
"Product", "symbols": ["DNA", "RNA"]}], "doc": ""}, {"name": "morphologyICD", "type": ["null",
"string"], "doc": ""}, {"name": "morphologySnomedCT", "type": ["null", "string"], "doc": ""},
{"name": "morphologySnomedRT", "type": ["null", "string"], "doc": ""}, {"name": "topographyICD",
"type": ["null", "string"], "doc": ""}, {"name": "topographySnomedCT", "type": ["null", "string"],
"doc": ""}, {"name": "topographySnomedRT", "type": ["null", "string"], "doc": ""}]}}}, {"name":
"germlineSamples", "type": {"type": "array", "items": {"type": "record", "name": "GermlineSample",
"fields": [{"name": "sampleId", "type": "string", "doc": ""}, {"name": "labSampleId", "type": "int",
"doc": ""}, {"name": "LDPCode", "type": "string", "doc": ""}, {"name": "source", "type": ["null",
"SampleSource"], "doc": ""}, {"name": "product", "type": ["null", "Product"], "doc": ""}, {"name":
"preparationMethod", "type": ["null", "PreparationMethod"], "doc": ""}, {"name": "programmePhase",
"type": ["null", "ProgrammePhase"], "doc": ""}, {"name": "clinicalSampleDateTime", "type": ["null",
"string"], "doc": ""}]}}}, {"name": "matchedSamples", "type": {"type": "array", "items": {"type":
"record", "name": "MatchedSamples", "doc": "", "fields": [{"name": "germlineSampleId", "type":
["null", "string"], "doc": ""}, {"name": "tumourSampleId", "type": ["null", "string"], "doc":
""}]}}}, {"name": "versionControl", "type": ["null", {"type": "record", "name": "VersionControl",
"fields": [{"name": "GitVersionControl", "type": "string", "doc": "", "default": "1.0.3"}]}], "doc":
""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"additionalInformation",
"assignedICD10",
"center",
"consentStatus",
"germlineSamples",
"individualId",
"matchedSamples",
"morphology",
"primaryDiagnosisDisease",
"primaryDiagnosisSubDisease",
"readyForAnalysis",
"sex",
"tumourSamples",
"versionControl",
"yearOfBirth",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'consentStatus': ConsentStatus,
'germlineSamples': GermlineSample,
'matchedSamples': MatchedSamples,
'tumourSamples': TumourSample,
'versionControl': VersionControl,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'consentStatus': ConsentStatus,
'germlineSamples': GermlineSample,
'matchedSamples': MatchedSamples,
'tumourSamples': TumourSample,
'versionControl': VersionControl,
}
return embeddedTypes[fieldName]
__slots__ = [
'additionalInformation', 'assignedICD10', 'center',
'consentStatus', 'germlineSamples', 'individualId',
'matchedSamples', 'morphology', 'primaryDiagnosisDisease',
'primaryDiagnosisSubDisease', 'readyForAnalysis', 'sex',
'tumourSamples', 'versionControl', 'yearOfBirth'
]
def __init__(self, **kwargs):
self.additionalInformation = kwargs.get(
'additionalInformation', None)
self.assignedICD10 = kwargs.get(
'assignedICD10', None)
self.center = kwargs.get(
'center', None)
self.consentStatus = kwargs.get(
'consentStatus', None)
self.germlineSamples = kwargs.get(
'germlineSamples', None)
self.individualId = kwargs.get(
'individualId', None)
self.matchedSamples = kwargs.get(
'matchedSamples', None)
self.morphology = kwargs.get(
'morphology', None)
self.primaryDiagnosisDisease = kwargs.get(
'primaryDiagnosisDisease', None)
self.primaryDiagnosisSubDisease = kwargs.get(
'primaryDiagnosisSubDisease', None)
self.readyForAnalysis = kwargs.get(
'readyForAnalysis', None)
self.sex = kwargs.get(
'sex', None)
self.tumourSamples = kwargs.get(
'tumourSamples', None)
self.versionControl = kwargs.get(
'versionControl', None)
self.yearOfBirth = kwargs.get(
'yearOfBirth', None)
class ChiSquare1KGenomesPhase3Pop(ProtocolElement):
"""
Chi-square test for goodness of fit of this sample to 1000 Genomes
Phase 3 populations
"""
_schemaSource = """
{"type": "record", "name": "ChiSquare1KGenomesPhase3Pop", "namespace":
"org.gel.models.participant.avro", "doc": "", "fields": [{"name": "kgSuperPopCategory", "type":
{"type": "enum", "name": "KgSuperPopCategory", "doc": "", "symbols": ["AFR", "AMR", "EAS", "EUR",
"SAS"]}, "doc": ""}, {"name": "kgPopCategory", "type": ["null", {"type": "enum", "name":
"KgPopCategory", "doc": "", "symbols": ["ACB", "ASW", "BEB", "CDX", "CEU", "CHB", "CHS", "CLM",
"ESN", "FIN", "GBR", "GIH", "GWD", "IBS", "ITU", "JPT", "KHV", "LWK", "MSL", "MXL", "PEL", "PJL",
"PUR", "STU", "TSI", "YRI"]}], "doc": ""}, {"name": "chiSquare", "type": "double", "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"chiSquare",
"kgPopCategory",
"kgSuperPopCategory",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'chiSquare', 'kgPopCategory', 'kgSuperPopCategory'
]
def __init__(self, **kwargs):
self.chiSquare = kwargs.get(
'chiSquare', None)
self.kgPopCategory = kwargs.get(
'kgPopCategory', None)
self.kgSuperPopCategory = kwargs.get(
'kgSuperPopCategory', None)
class ConsentStatus(ProtocolElement):
"""
Consent Status
"""
_schemaSource = """
{"type": "record", "name": "ConsentStatus", "namespace": "org.gel.models.participant.avro", "doc":
"", "fields": [{"name": "programmeConsent", "type": "boolean", "doc": "", "default": false},
{"name": "primaryFindingConsent", "type": "boolean", "doc": "", "default": false}, {"name":
"secondaryFindingConsent", "type": "boolean", "doc": "", "default": false}, {"name":
"carrierStatusConsent", "type": "boolean", "doc": "", "default": false}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'carrierStatusConsent', 'primaryFindingConsent',
'programmeConsent', 'secondaryFindingConsent'
]
def __init__(self, **kwargs):
self.carrierStatusConsent = kwargs.get(
'carrierStatusConsent', False)
self.primaryFindingConsent = kwargs.get(
'primaryFindingConsent', False)
self.programmeConsent = kwargs.get(
'programmeConsent', False)
self.secondaryFindingConsent = kwargs.get(
'secondaryFindingConsent', False)
class DiseasePenetrance(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "DiseasePenetrance", "namespace": "org.gel.models.participant.avro",
"fields": [{"name": "specificDisease", "type": "string"}, {"name": "penetrance", "type": {"type":
"enum", "name": "Penetrance", "doc": "", "symbols": ["complete", "incomplete"]}}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"penetrance",
"specificDisease",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'penetrance', 'specificDisease'
]
def __init__(self, **kwargs):
self.penetrance = kwargs.get(
'penetrance', None)
self.specificDisease = kwargs.get(
'specificDisease', None)
class Disorder(ProtocolElement):
"""
This is quite GEL specific. This is the way is stored in
ModelCatalogue and PanelApp. Currently all specific disease
titles are assigned to a disease subgroup so really only
specificDisease needs to be completed but we add the others
for generality
"""
_schemaSource = """
{"type": "record", "name": "Disorder", "namespace": "org.gel.models.participant.avro", "doc": "",
"fields": [{"name": "diseaseGroup", "type": ["null", "string"], "doc": ""}, {"name":
"diseaseSubGroup", "type": ["null", "string"], "doc": ""}, {"name": "specificDisease", "type":
["null", "string"], "doc": ""}, {"name": "ageOfOnset", "type": ["null", "float"], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"ageOfOnset",
"diseaseGroup",
"diseaseSubGroup",
"specificDisease",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'ageOfOnset', 'diseaseGroup', 'diseaseSubGroup',
'specificDisease'
]
def __init__(self, **kwargs):
self.ageOfOnset = kwargs.get(
'ageOfOnset', None)
self.diseaseGroup = kwargs.get(
'diseaseGroup', None)
self.diseaseSubGroup = kwargs.get(
'diseaseSubGroup', None)
self.specificDisease = kwargs.get(
'specificDisease', None)
class EthnicCategory(object):
"""
This is the list of ethnicities in ONS16 * `D`: Mixed: White
and Black Caribbean * `E`: Mixed: White and Black African
* `F`: Mixed: White and Asian * `G`: Mixed: Any other mixed
background * `A`: White: British * `B`: White: Irish
* `C`: White: Any other White background * `L`: Asian or
Asian British: Any other Asian background * `M`: Black or
Black British: Caribbean * `N`: Black or Black British:
African * `H`: Asian or Asian British: Indian * `J`:
Asian or Asian British: Pakistani * `K`: Asian or Asian
British: Bangladeshi * `P`: Black or Black British: Any other
Black background * `S`: Other Ethnic Groups: Any other ethnic
group * `R`: Other Ethnic Groups: Chinese * `Z`: Not
stated
"""
D = "D"
E = "E"
F = "F"
G = "G"
A = "A"
B = "B"
C = "C"
L = "L"
M = "M"
N = "N"
H = "H"
J = "J"
K = "K"
P = "P"
S = "S"
R = "R"
Z = "Z"
def __hash__(self):
return str(self).__hash__()
class FamilyQCState(object):
"""
FamilyQCState
"""
noState = "noState"
passedMedicalReviewReadyForInterpretation = "passedMedicalReviewReadyForInterpretation"
passedMedicalReviewNotReadyForInterpretation = "passedMedicalReviewNotReadyForInterpretation"
queryToGel = "queryToGel"
queryToGMC = "queryToGMC"
failed = "failed"
def __hash__(self):
return str(self).__hash__()
class GermlineSample(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "GermlineSample", "namespace": "org.gel.models.participant.avro",
"fields": [{"name": "sampleId", "type": "string", "doc": ""}, {"name": "labSampleId", "type": "int",
"doc": ""}, {"name": "LDPCode", "type": "string", "doc": ""}, {"name": "source", "type": ["null",
{"type": "enum", "name": "SampleSource", "symbols": ["TUMOUR",
"BONE_MARROW_ASPIRATE_TUMOUR_SORTED_CELLS", "BONE_MARROW_ASPIRATE_TUMOUR_CELLS", "BLOOD", "SALIVA",
"FIBROBLAST", "TISSUE"]}], "doc": ""}, {"name": "product", "type": ["null", {"type": "enum", "name":
"Product", "symbols": ["DNA", "RNA"]}], "doc": ""}, {"name": "preparationMethod", "type": ["null",
{"type": "enum", "name": "PreparationMethod", "symbols": ["EDTA", "ORAGENE", "FF", "FFPE",
"CD128_SORTED_CELLS", "ASPIRATE"]}], "doc": ""}, {"name": "programmePhase", "type": ["null",
{"type": "enum", "name": "ProgrammePhase", "symbols": ["CRUK", "OXFORD", "CLL", "IIP", "MAIN",
"EXPT"]}], "doc": ""}, {"name": "clinicalSampleDateTime", "type": ["null", "string"], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"LDPCode",
"clinicalSampleDateTime",
"labSampleId",
"preparationMethod",
"product",
"programmePhase",
"sampleId",
"source",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'LDPCode', 'clinicalSampleDateTime', 'labSampleId',
'preparationMethod', 'product', 'programmePhase', 'sampleId',
'source'
]
def __init__(self, **kwargs):
self.LDPCode = kwargs.get(
'LDPCode', None)
self.clinicalSampleDateTime = kwargs.get(
'clinicalSampleDateTime', None)
self.labSampleId = kwargs.get(
'labSampleId', None)
self.preparationMethod = kwargs.get(
'preparationMethod', None)
self.product = kwargs.get(
'product', None)
self.programmePhase = kwargs.get(
'programmePhase', None)
self.sampleId = kwargs.get(
'sampleId', None)
self.source = kwargs.get(
'source', None)
class HpoTerm(ProtocolElement):
"""
This defines an HPO term and its modifiers (possibly multiple)
If HPO term presence is unknown we don't have a entry on the list
"""
_schemaSource = """
{"type": "record", "name": "HpoTerm", "namespace": "org.gel.models.participant.avro", "doc": "",
"fields": [{"name": "term", "type": "string", "doc": ""}, {"name": "termPresence", "type": ["null",
{"type": "enum", "name": "TernaryOption", "doc": "", "symbols": ["yes", "no", "unknown"]}], "doc":
""}, {"name": "hpoBuildNumber", "type": ["null", "string"], "doc": ""}, {"name": "modifiers",
"type": ["null", {"type": "record", "name": "HpoTermModifiers", "fields": [{"name": "laterality",
"type": ["null", {"type": "enum", "name": "Laterality", "symbols": ["RIGHT", "UNILATERAL",
"BILATERAL", "LEFT"]}]}, {"name": "progression", "type": ["null", {"type": "enum", "name":
"Progression", "symbols": ["PROGRESSIVE", "NONPROGRESSIVE"]}]}, {"name": "severity", "type":
["null", {"type": "enum", "name": "Severity", "symbols": ["BORDERLINE", "MILD", "MODERATE",
"SEVERE", "PROFOUND"]}]}, {"name": "spatialPattern", "type": ["null", {"type": "enum", "name":
"SpatialPattern", "symbols": ["DISTAL", "GENERALIZED", "LOCALIZED", "PROXIMAL"]}]}]}], "doc": ""},
{"name": "ageOfOnset", "type": ["null", {"type": "enum", "name": "AgeOfOnset", "symbols":
["EMBRYONAL_ONSET", "FETAL_ONSET", "NEONATAL_ONSET", "INFANTILE_ONSET", "CHILDHOOD_ONSET",
"JUVENILE_ONSET", "YOUNG_ADULT_ONSET", "LATE_ONSET", "MIDDLE_AGE_ONSET"]}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"ageOfOnset",
"hpoBuildNumber",
"modifiers",
"term",
"termPresence",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'modifiers': HpoTermModifiers,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'modifiers': HpoTermModifiers,
}
return embeddedTypes[fieldName]
__slots__ = [
'ageOfOnset', 'hpoBuildNumber', 'modifiers', 'term',
'termPresence'
]
def __init__(self, **kwargs):
self.ageOfOnset = kwargs.get(
'ageOfOnset', None)
self.hpoBuildNumber = kwargs.get(
'hpoBuildNumber', None)
self.modifiers = kwargs.get(
'modifiers', None)
self.term = kwargs.get(
'term', None)
self.termPresence = kwargs.get(
'termPresence', None)
class HpoTermModifiers(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "HpoTermModifiers", "namespace": "org.gel.models.participant.avro",
"fields": [{"name": "laterality", "type": ["null", {"type": "enum", "name": "Laterality", "symbols":
["RIGHT", "UNILATERAL", "BILATERAL", "LEFT"]}]}, {"name": "progression", "type": ["null", {"type":
"enum", "name": "Progression", "symbols": ["PROGRESSIVE", "NONPROGRESSIVE"]}]}, {"name": "severity",
"type": ["null", {"type": "enum", "name": "Severity", "symbols": ["BORDERLINE", "MILD", "MODERATE",
"SEVERE", "PROFOUND"]}]}, {"name": "spatialPattern", "type": ["null", {"type": "enum", "name":
"SpatialPattern", "symbols": ["DISTAL", "GENERALIZED", "LOCALIZED", "PROXIMAL"]}]}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"laterality",
"progression",
"severity",
"spatialPattern",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'laterality', 'progression', 'severity', 'spatialPattern'
]
def __init__(self, **kwargs):
self.laterality = kwargs.get(
'laterality', None)
self.progression = kwargs.get(
'progression', None)
self.severity = kwargs.get(
'severity', None)
self.spatialPattern = kwargs.get(
'spatialPattern', None)
class InbreedingCoefficient(ProtocolElement):
"""
Inbreeding coefficient
"""
_schemaSource = """
{"type": "record", "name": "InbreedingCoefficient", "namespace": "org.gel.models.participant.avro",
"doc": "", "fields": [{"name": "sampleId", "type": "string", "doc": ""}, {"name": "program", "type":
"string", "doc": ""}, {"name": "version", "type": "string", "doc": ""}, {"name": "estimationMethod",
"type": "string", "doc": ""}, {"name": "coefficient", "type": "double", "doc": ""}, {"name":
"standardError", "type": ["null", "double"], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"coefficient",
"estimationMethod",
"program",
"sampleId",
"standardError",
"version",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'coefficient', 'estimationMethod', 'program', 'sampleId',
'standardError', 'version'
]
def __init__(self, **kwargs):
self.coefficient = kwargs.get(
'coefficient', None)
self.estimationMethod = kwargs.get(
'estimationMethod', None)
self.program = kwargs.get(
'program', None)
self.sampleId = kwargs.get(
'sampleId', None)
self.standardError = kwargs.get(
'standardError', None)
self.version = kwargs.get(
'version', None)
class KgPopCategory(object):
"""
1K Population
"""
ACB = "ACB"
ASW = "ASW"
BEB = "BEB"
CDX = "CDX"
CEU = "CEU"
CHB = "CHB"
CHS = "CHS"
CLM = "CLM"
ESN = "ESN"
FIN = "FIN"
GBR = "GBR"
GIH = "GIH"
GWD = "GWD"
IBS = "IBS"
ITU = "ITU"
JPT = "JPT"
KHV = "KHV"
LWK = "LWK"
MSL = "MSL"
MXL = "MXL"
PEL = "PEL"
PJL = "PJL"
PUR = "PUR"
STU = "STU"
TSI = "TSI"
YRI = "YRI"
def __hash__(self):
return str(self).__hash__()
class KgSuperPopCategory(object):
"""
1K Super Population
"""
AFR = "AFR"
AMR = "AMR"
EAS = "EAS"
EUR = "EUR"
SAS = "SAS"
def __hash__(self):
return str(self).__hash__()
class Laterality(object):
"""
No documentation
"""
RIGHT = "RIGHT"
UNILATERAL = "UNILATERAL"
BILATERAL = "BILATERAL"
LEFT = "LEFT"
def __hash__(self):
return str(self).__hash__()
class LifeStatus(object):
"""
Life Status
"""
ALIVE = "ALIVE"
ABORTED = "ABORTED"
DECEASED = "DECEASED"
UNBORN = "UNBORN"
STILLBORN = "STILLBORN"
MISCARRIAGE = "MISCARRIAGE"
def __hash__(self):
return str(self).__hash__()
class MatchedSamples(ProtocolElement):
"""
This defines a pair of germline and tumor, this pair should/must
be analyzed together
"""
_schemaSource = """
{"type": "record", "name": "MatchedSamples", "namespace": "org.gel.models.participant.avro", "doc":
"", "fields": [{"name": "germlineSampleId", "type": ["null", "string"], "doc": ""}, {"name":
"tumourSampleId", "type": ["null", "string"], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"germlineSampleId",
"tumourSampleId",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'germlineSampleId', 'tumourSampleId'
]
def __init__(self, **kwargs):
self.germlineSampleId = kwargs.get(
'germlineSampleId', None)
self.tumourSampleId = kwargs.get(
'tumourSampleId', None)
class Method(object):
"""
No documentation
"""
RESECTION = "RESECTION"
BIOPSY = "BIOPSY"
BLOOD = "BLOOD"
def __hash__(self):
return str(self).__hash__()
class ParticipantQCState(object):
"""
QCState Status
"""
noState = "noState"
passedMedicalReviewReadyForInterpretation = "passedMedicalReviewReadyForInterpretation"
passedMedicalReviewNotReadyForInterpretation = "passedMedicalReviewNotReadyForInterpretation"
queryToGel = "queryToGel"
queryToGMC = "queryToGMC"
failed = "failed"
def __hash__(self):
return str(self).__hash__()
class Pedigree(ProtocolElement):
"""
This is the concept of a family with associated phenotypes as
present in the record RDParticipant
"""
_schemaSource = """
{"type": "record", "name": "Pedigree", "namespace": "org.gel.models.participant.avro", "doc": "",
"fields": [{"name": "versionControl", "type": ["null", {"type": "record", "name": "VersionControl",
"fields": [{"name": "GitVersionControl", "type": "string", "doc": "", "default": "1.0.3"}]}], "doc":
""}, {"name": "LDPCode", "type": ["null", "string"]}, {"name": "familyId", "type": "string", "doc":
""}, {"name": "members", "type": {"type": "array", "items": {"type": "record", "name":
"PedigreeMember", "doc": "", "fields": [{"name": "pedigreeId", "type": ["null", "int"], "doc": ""},
{"name": "isProband", "type": ["null", "boolean"], "doc": ""}, {"name": "participantId", "type":
["null", "string"], "doc": ""}, {"name": "participantQCState", "type": ["null", {"type": "enum",
"name": "ParticipantQCState", "doc": "", "symbols": ["noState",
"passedMedicalReviewReadyForInterpretation", "passedMedicalReviewNotReadyForInterpretation",
"queryToGel", "queryToGMC", "failed"]}], "doc": ""}, {"name": "gelSuperFamilyId", "type": ["null",
"string"], "doc": ""}, {"name": "sex", "type": {"type": "enum", "name": "Sex", "doc": "", "symbols":
["MALE", "FEMALE", "UNKNOWN"]}, "doc": ""}, {"name": "personKaryotypicSex", "type": ["null",
{"type": "enum", "name": "PersonKaryotipicSex", "doc": "", "symbols": ["UNKNOWN", "XX", "XY", "XO",
"XXY", "XXX", "XXYY", "XXXY", "XXXX", "XYY", "OTHER"]}], "doc": ""}, {"name": "yearOfBirth", "type":
["null", "int"], "doc": ""}, {"name": "fatherId", "type": ["null", "int"], "doc": ""}, {"name":
"motherId", "type": ["null", "int"], "doc": ""}, {"name": "superFatherId", "type": ["null", "int"],
"doc": ""}, {"name": "superMotherId", "type": ["null", "int"], "doc": ""}, {"name": "twinGroup",
"type": ["null", "int"], "doc": ""}, {"name": "monozygotic", "type": ["null", {"type": "enum",
"name": "TernaryOption", "doc": "", "symbols": ["yes", "no", "unknown"]}], "doc": ""}, {"name":
"adoptedStatus", "type": ["null", {"type": "enum", "name": "AdoptedStatus", "doc": "", "symbols":
["notadopted", "adoptedin", "adoptedout"]}], "doc": ""}, {"name": "lifeStatus", "type": ["null",
{"type": "enum", "name": "LifeStatus", "doc": "", "symbols": ["ALIVE", "ABORTED", "DECEASED",
"UNBORN", "STILLBORN", "MISCARRIAGE"]}], "doc": ""}, {"name": "consanguineousParents", "type":
["null", "TernaryOption"], "doc": ""}, {"name": "affectionStatus", "type": ["null", {"type": "enum",
"name": "AffectionStatus", "doc": "", "symbols": ["UNAFFECTED", "AFFECTED", "UNCERTAIN"]}], "doc":
""}, {"name": "disorderList", "type": ["null", {"type": "array", "items": {"type": "record", "name":
"Disorder", "doc": "", "fields": [{"name": "diseaseGroup", "type": ["null", "string"], "doc": ""},
{"name": "diseaseSubGroup", "type": ["null", "string"], "doc": ""}, {"name": "specificDisease",
"type": ["null", "string"], "doc": ""}, {"name": "ageOfOnset", "type": ["null", "float"], "doc":
""}]}}], "doc": ""}, {"name": "hpoTermList", "type": ["null", {"type": "array", "items": {"type":
"record", "name": "HpoTerm", "doc": "", "fields": [{"name": "term", "type": "string", "doc": ""},
{"name": "termPresence", "type": ["null", "TernaryOption"], "doc": ""}, {"name": "hpoBuildNumber",
"type": ["null", "string"], "doc": ""}, {"name": "modifiers", "type": ["null", {"type": "record",
"name": "HpoTermModifiers", "fields": [{"name": "laterality", "type": ["null", {"type": "enum",
"name": "Laterality", "symbols": ["RIGHT", "UNILATERAL", "BILATERAL", "LEFT"]}]}, {"name":
"progression", "type": ["null", {"type": "enum", "name": "Progression", "symbols": ["PROGRESSIVE",
"NONPROGRESSIVE"]}]}, {"name": "severity", "type": ["null", {"type": "enum", "name": "Severity",
"symbols": ["BORDERLINE", "MILD", "MODERATE", "SEVERE", "PROFOUND"]}]}, {"name": "spatialPattern",
"type": ["null", {"type": "enum", "name": "SpatialPattern", "symbols": ["DISTAL", "GENERALIZED",
"LOCALIZED", "PROXIMAL"]}]}]}], "doc": ""}, {"name": "ageOfOnset", "type": ["null", {"type": "enum",
"name": "AgeOfOnset", "symbols": ["EMBRYONAL_ONSET", "FETAL_ONSET", "NEONATAL_ONSET",
"INFANTILE_ONSET", "CHILDHOOD_ONSET", "JUVENILE_ONSET", "YOUNG_ADULT_ONSET", "LATE_ONSET",
"MIDDLE_AGE_ONSET"]}], "doc": ""}]}}], "doc": ""}, {"name": "ancestries", "type": ["null", {"type":
"record", "name": "Ancestries", "doc": "", "fields": [{"name": "mothersEthnicOrigin", "type":
["null", {"type": "enum", "name": "EthnicCategory", "doc": "", "symbols": ["D", "E", "F", "G", "A",
"B", "C", "L", "M", "N", "H", "J", "K", "P", "S", "R", "Z"]}], "doc": ""}, {"name":
"mothersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"fathersEthnicOrigin", "type": ["null", "EthnicCategory"], "doc": ""}, {"name":
"fathersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"chiSquare1KGenomesPhase3Pop", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "ChiSquare1KGenomesPhase3Pop", "doc": "", "fields": [{"name": "kgSuperPopCategory", "type":
{"type": "enum", "name": "KgSuperPopCategory", "doc": "", "symbols": ["AFR", "AMR", "EAS", "EUR",
"SAS"]}, "doc": ""}, {"name": "kgPopCategory", "type": ["null", {"type": "enum", "name":
"KgPopCategory", "doc": "", "symbols": ["ACB", "ASW", "BEB", "CDX", "CEU", "CHB", "CHS", "CLM",
"ESN", "FIN", "GBR", "GIH", "GWD", "IBS", "ITU", "JPT", "KHV", "LWK", "MSL", "MXL", "PEL", "PJL",
"PUR", "STU", "TSI", "YRI"]}], "doc": ""}, {"name": "chiSquare", "type": "double", "doc": ""}]}}],
"doc": ""}]}], "doc": ""}, {"name": "consentStatus", "type": ["null", {"type": "record", "name":
"ConsentStatus", "doc": "", "fields": [{"name": "programmeConsent", "type": "boolean", "doc": "",
"default": false}, {"name": "primaryFindingConsent", "type": "boolean", "doc": "", "default":
false}, {"name": "secondaryFindingConsent", "type": "boolean", "doc": "", "default": false},
{"name": "carrierStatusConsent", "type": "boolean", "doc": "", "default": false}]}], "doc": ""},
{"name": "samples", "type": ["null", {"type": "array", "items": {"type": "record", "name": "Sample",
"fields": [{"name": "sampleId", "type": "string", "doc": ""}, {"name": "labSampleId", "type": "int",
"doc": ""}, {"name": "source", "type": ["null", {"type": "enum", "name": "SampleSource", "symbols":
["BLOOD", "SALIVA", "FIBROBLAST", "TISSUE"]}], "doc": ""}, {"name": "product", "type": ["null",
{"type": "enum", "name": "Product", "symbols": ["DNA", "RNA"]}], "doc": ""}, {"name":
"preparationMethod", "type": ["null", {"type": "enum", "name": "PreparationMethod", "symbols":
["EDTA", "ORAGENE", "FF", "FFPE", "CD128_SORTED_CELLS", "ASPIRATE"]}], "doc": ""}]}}], "doc": ""},
{"name": "inbreedingCoefficient", "type": ["null", {"type": "record", "name":
"InbreedingCoefficient", "doc": "", "fields": [{"name": "sampleId", "type": "string", "doc": ""},
{"name": "program", "type": "string", "doc": ""}, {"name": "version", "type": "string", "doc": ""},
{"name": "estimationMethod", "type": "string", "doc": ""}, {"name": "coefficient", "type": "double",
"doc": ""}, {"name": "standardError", "type": ["null", "double"], "doc": ""}]}], "doc": ""},
{"name": "additionalInformation", "type": ["null", {"type": "map", "values": "string"}], "doc":
""}]}}}, {"name": "analysisPanels", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "AnalysisPanel", "fields": [{"name": "specificDisease", "type": "string"}, {"name":
"panelName", "type": "string"}, {"name": "panelVersion", "type": ["null", "string"]}, {"name":
"reviewOutcome", "type": "string"}, {"name": "multipleGeneticOrigins", "type": "string"}]}}]},
{"name": "diseasePenetrances", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "DiseasePenetrance", "fields": [{"name": "specificDisease", "type": "string"}, {"name":
"penetrance", "type": {"type": "enum", "name": "Penetrance", "doc": "", "symbols": ["complete",
"incomplete"]}}]}}]}, {"name": "readyForAnalysis", "type": "boolean"}, {"name": "familyQCState",
"type": ["null", {"type": "enum", "name": "FamilyQCState", "doc": "", "symbols": ["noState",
"passedMedicalReviewReadyForInterpretation", "passedMedicalReviewNotReadyForInterpretation",
"queryToGel", "queryToGMC", "failed"]}]}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"LDPCode",
"analysisPanels",
"diseasePenetrances",
"familyId",
"familyQCState",
"members",
"readyForAnalysis",
"versionControl",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'analysisPanels': AnalysisPanel,
'diseasePenetrances': DiseasePenetrance,
'members': PedigreeMember,
'versionControl': VersionControl,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'analysisPanels': AnalysisPanel,
'diseasePenetrances': DiseasePenetrance,
'members': PedigreeMember,
'versionControl': VersionControl,
}
return embeddedTypes[fieldName]
__slots__ = [
'LDPCode', 'analysisPanels', 'diseasePenetrances', 'familyId',
'familyQCState', 'members', 'readyForAnalysis',
'versionControl'
]
def __init__(self, **kwargs):
self.LDPCode = kwargs.get(
'LDPCode', None)
self.analysisPanels = kwargs.get(
'analysisPanels', None)
self.diseasePenetrances = kwargs.get(
'diseasePenetrances', None)
self.familyId = kwargs.get(
'familyId', None)
self.familyQCState = kwargs.get(
'familyQCState', None)
self.members = kwargs.get(
'members', None)
self.readyForAnalysis = kwargs.get(
'readyForAnalysis', None)
self.versionControl = kwargs.get(
'versionControl', None)
class PedigreeMember(ProtocolElement):
"""
This defines a RD Participant (demographics and pedigree
information)
"""
_schemaSource = """
{"type": "record", "name": "PedigreeMember", "namespace": "org.gel.models.participant.avro", "doc":
"", "fields": [{"name": "pedigreeId", "type": ["null", "int"], "doc": ""}, {"name": "isProband",
"type": ["null", "boolean"], "doc": ""}, {"name": "participantId", "type": ["null", "string"],
"doc": ""}, {"name": "participantQCState", "type": ["null", {"type": "enum", "name":
"ParticipantQCState", "doc": "", "symbols": ["noState", "passedMedicalReviewReadyForInterpretation",
"passedMedicalReviewNotReadyForInterpretation", "queryToGel", "queryToGMC", "failed"]}], "doc": ""},
{"name": "gelSuperFamilyId", "type": ["null", "string"], "doc": ""}, {"name": "sex", "type":
{"type": "enum", "name": "Sex", "doc": "", "symbols": ["MALE", "FEMALE", "UNKNOWN"]}, "doc": ""},
{"name": "personKaryotypicSex", "type": ["null", {"type": "enum", "name": "PersonKaryotipicSex",
"doc": "", "symbols": ["UNKNOWN", "XX", "XY", "XO", "XXY", "XXX", "XXYY", "XXXY", "XXXX", "XYY",
"OTHER"]}], "doc": ""}, {"name": "yearOfBirth", "type": ["null", "int"], "doc": ""}, {"name":
"fatherId", "type": ["null", "int"], "doc": ""}, {"name": "motherId", "type": ["null", "int"],
"doc": ""}, {"name": "superFatherId", "type": ["null", "int"], "doc": ""}, {"name": "superMotherId",
"type": ["null", "int"], "doc": ""}, {"name": "twinGroup", "type": ["null", "int"], "doc": ""},
{"name": "monozygotic", "type": ["null", {"type": "enum", "name": "TernaryOption", "doc": "",
"symbols": ["yes", "no", "unknown"]}], "doc": ""}, {"name": "adoptedStatus", "type": ["null",
{"type": "enum", "name": "AdoptedStatus", "doc": "", "symbols": ["notadopted", "adoptedin",
"adoptedout"]}], "doc": ""}, {"name": "lifeStatus", "type": ["null", {"type": "enum", "name":
"LifeStatus", "doc": "", "symbols": ["ALIVE", "ABORTED", "DECEASED", "UNBORN", "STILLBORN",
"MISCARRIAGE"]}], "doc": ""}, {"name": "consanguineousParents", "type": ["null", "TernaryOption"],
"doc": ""}, {"name": "affectionStatus", "type": ["null", {"type": "enum", "name": "AffectionStatus",
"doc": "", "symbols": ["UNAFFECTED", "AFFECTED", "UNCERTAIN"]}], "doc": ""}, {"name":
"disorderList", "type": ["null", {"type": "array", "items": {"type": "record", "name": "Disorder",
"doc": "", "fields": [{"name": "diseaseGroup", "type": ["null", "string"], "doc": ""}, {"name":
"diseaseSubGroup", "type": ["null", "string"], "doc": ""}, {"name": "specificDisease", "type":
["null", "string"], "doc": ""}, {"name": "ageOfOnset", "type": ["null", "float"], "doc": ""}]}}],
"doc": ""}, {"name": "hpoTermList", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "HpoTerm", "doc": "", "fields": [{"name": "term", "type": "string", "doc": ""}, {"name":
"termPresence", "type": ["null", "TernaryOption"], "doc": ""}, {"name": "hpoBuildNumber", "type":
["null", "string"], "doc": ""}, {"name": "modifiers", "type": ["null", {"type": "record", "name":
"HpoTermModifiers", "fields": [{"name": "laterality", "type": ["null", {"type": "enum", "name":
"Laterality", "symbols": ["RIGHT", "UNILATERAL", "BILATERAL", "LEFT"]}]}, {"name": "progression",
"type": ["null", {"type": "enum", "name": "Progression", "symbols": ["PROGRESSIVE",
"NONPROGRESSIVE"]}]}, {"name": "severity", "type": ["null", {"type": "enum", "name": "Severity",
"symbols": ["BORDERLINE", "MILD", "MODERATE", "SEVERE", "PROFOUND"]}]}, {"name": "spatialPattern",
"type": ["null", {"type": "enum", "name": "SpatialPattern", "symbols": ["DISTAL", "GENERALIZED",
"LOCALIZED", "PROXIMAL"]}]}]}], "doc": ""}, {"name": "ageOfOnset", "type": ["null", {"type": "enum",
"name": "AgeOfOnset", "symbols": ["EMBRYONAL_ONSET", "FETAL_ONSET", "NEONATAL_ONSET",
"INFANTILE_ONSET", "CHILDHOOD_ONSET", "JUVENILE_ONSET", "YOUNG_ADULT_ONSET", "LATE_ONSET",
"MIDDLE_AGE_ONSET"]}], "doc": ""}]}}], "doc": ""}, {"name": "ancestries", "type": ["null", {"type":
"record", "name": "Ancestries", "doc": "", "fields": [{"name": "mothersEthnicOrigin", "type":
["null", {"type": "enum", "name": "EthnicCategory", "doc": "", "symbols": ["D", "E", "F", "G", "A",
"B", "C", "L", "M", "N", "H", "J", "K", "P", "S", "R", "Z"]}], "doc": ""}, {"name":
"mothersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"fathersEthnicOrigin", "type": ["null", "EthnicCategory"], "doc": ""}, {"name":
"fathersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"chiSquare1KGenomesPhase3Pop", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "ChiSquare1KGenomesPhase3Pop", "doc": "", "fields": [{"name": "kgSuperPopCategory", "type":
{"type": "enum", "name": "KgSuperPopCategory", "doc": "", "symbols": ["AFR", "AMR", "EAS", "EUR",
"SAS"]}, "doc": ""}, {"name": "kgPopCategory", "type": ["null", {"type": "enum", "name":
"KgPopCategory", "doc": "", "symbols": ["ACB", "ASW", "BEB", "CDX", "CEU", "CHB", "CHS", "CLM",
"ESN", "FIN", "GBR", "GIH", "GWD", "IBS", "ITU", "JPT", "KHV", "LWK", "MSL", "MXL", "PEL", "PJL",
"PUR", "STU", "TSI", "YRI"]}], "doc": ""}, {"name": "chiSquare", "type": "double", "doc": ""}]}}],
"doc": ""}]}], "doc": ""}, {"name": "consentStatus", "type": ["null", {"type": "record", "name":
"ConsentStatus", "doc": "", "fields": [{"name": "programmeConsent", "type": "boolean", "doc": "",
"default": false}, {"name": "primaryFindingConsent", "type": "boolean", "doc": "", "default":
false}, {"name": "secondaryFindingConsent", "type": "boolean", "doc": "", "default": false},
{"name": "carrierStatusConsent", "type": "boolean", "doc": "", "default": false}]}], "doc": ""},
{"name": "samples", "type": ["null", {"type": "array", "items": {"type": "record", "name": "Sample",
"fields": [{"name": "sampleId", "type": "string", "doc": ""}, {"name": "labSampleId", "type": "int",
"doc": ""}, {"name": "source", "type": ["null", {"type": "enum", "name": "SampleSource", "symbols":
["BLOOD", "SALIVA", "FIBROBLAST", "TISSUE"]}], "doc": ""}, {"name": "product", "type": ["null",
{"type": "enum", "name": "Product", "symbols": ["DNA", "RNA"]}], "doc": ""}, {"name":
"preparationMethod", "type": ["null", {"type": "enum", "name": "PreparationMethod", "symbols":
["EDTA", "ORAGENE", "FF", "FFPE", "CD128_SORTED_CELLS", "ASPIRATE"]}], "doc": ""}]}}], "doc": ""},
{"name": "inbreedingCoefficient", "type": ["null", {"type": "record", "name":
"InbreedingCoefficient", "doc": "", "fields": [{"name": "sampleId", "type": "string", "doc": ""},
{"name": "program", "type": "string", "doc": ""}, {"name": "version", "type": "string", "doc": ""},
{"name": "estimationMethod", "type": "string", "doc": ""}, {"name": "coefficient", "type": "double",
"doc": ""}, {"name": "standardError", "type": ["null", "double"], "doc": ""}]}], "doc": ""},
{"name": "additionalInformation", "type": ["null", {"type": "map", "values": "string"}], "doc":
""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"additionalInformation",
"adoptedStatus",
"affectionStatus",
"ancestries",
"consanguineousParents",
"consentStatus",
"disorderList",
"fatherId",
"gelSuperFamilyId",
"hpoTermList",
"inbreedingCoefficient",
"isProband",
"lifeStatus",
"monozygotic",
"motherId",
"participantId",
"participantQCState",
"pedigreeId",
"personKaryotypicSex",
"samples",
"sex",
"superFatherId",
"superMotherId",
"twinGroup",
"yearOfBirth",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'ancestries': Ancestries,
'consentStatus': ConsentStatus,
'disorderList': Disorder,
'hpoTermList': HpoTerm,
'inbreedingCoefficient': InbreedingCoefficient,
'samples': Sample,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'ancestries': Ancestries,
'consentStatus': ConsentStatus,
'disorderList': Disorder,
'hpoTermList': HpoTerm,
'inbreedingCoefficient': InbreedingCoefficient,
'samples': Sample,
}
return embeddedTypes[fieldName]
__slots__ = [
'additionalInformation', 'adoptedStatus', 'affectionStatus',
'ancestries', 'consanguineousParents', 'consentStatus',
'disorderList', 'fatherId', 'gelSuperFamilyId', 'hpoTermList',
'inbreedingCoefficient', 'isProband', 'lifeStatus',
'monozygotic', 'motherId', 'participantId',
'participantQCState', 'pedigreeId', 'personKaryotypicSex',
'samples', 'sex', 'superFatherId', 'superMotherId',
'twinGroup', 'yearOfBirth'
]
def __init__(self, **kwargs):
self.additionalInformation = kwargs.get(
'additionalInformation', None)
self.adoptedStatus = kwargs.get(
'adoptedStatus', None)
self.affectionStatus = kwargs.get(
'affectionStatus', None)
self.ancestries = kwargs.get(
'ancestries', None)
self.consanguineousParents = kwargs.get(
'consanguineousParents', None)
self.consentStatus = kwargs.get(
'consentStatus', None)
self.disorderList = kwargs.get(
'disorderList', None)
self.fatherId = kwargs.get(
'fatherId', None)
self.gelSuperFamilyId = kwargs.get(
'gelSuperFamilyId', None)
self.hpoTermList = kwargs.get(
'hpoTermList', None)
self.inbreedingCoefficient = kwargs.get(
'inbreedingCoefficient', None)
self.isProband = kwargs.get(
'isProband', None)
self.lifeStatus = kwargs.get(
'lifeStatus', None)
self.monozygotic = kwargs.get(
'monozygotic', None)
self.motherId = kwargs.get(
'motherId', None)
self.participantId = kwargs.get(
'participantId', None)
self.participantQCState = kwargs.get(
'participantQCState', None)
self.pedigreeId = kwargs.get(
'pedigreeId', None)
self.personKaryotypicSex = kwargs.get(
'personKaryotypicSex', None)
self.samples = kwargs.get(
'samples', None)
self.sex = kwargs.get(
'sex', None)
self.superFatherId = kwargs.get(
'superFatherId', None)
self.superMotherId = kwargs.get(
'superMotherId', None)
self.twinGroup = kwargs.get(
'twinGroup', None)
self.yearOfBirth = kwargs.get(
'yearOfBirth', None)
class Penetrance(object):
"""
Penetrance assumed in the analysis
"""
complete = "complete"
incomplete = "incomplete"
def __hash__(self):
return str(self).__hash__()
class PersonKaryotipicSex(object):
"""
Karyotipic Sex
"""
UNKNOWN = "UNKNOWN"
XX = "XX"
XY = "XY"
XO = "XO"
XXY = "XXY"
XXX = "XXX"
XXYY = "XXYY"
XXXY = "XXXY"
XXXX = "XXXX"
XYY = "XYY"
OTHER = "OTHER"
def __hash__(self):
return str(self).__hash__()
class PreparationMethod(object):
"""
No documentation
"""
EDTA = "EDTA"
ORAGENE = "ORAGENE"
FF = "FF"
FFPE = "FFPE"
CD128_SORTED_CELLS = "CD128_SORTED_CELLS"
ASPIRATE = "ASPIRATE"
def __hash__(self):
return str(self).__hash__()
class Product(object):
"""
No documentation
"""
DNA = "DNA"
RNA = "RNA"
def __hash__(self):
return str(self).__hash__()
class ProgrammePhase(object):
"""
No documentation
"""
CRUK = "CRUK"
OXFORD = "OXFORD"
CLL = "CLL"
IIP = "IIP"
MAIN = "MAIN"
EXPT = "EXPT"
def __hash__(self):
return str(self).__hash__()
class Progression(object):
"""
No documentation
"""
PROGRESSIVE = "PROGRESSIVE"
NONPROGRESSIVE = "NONPROGRESSIVE"
def __hash__(self):
return str(self).__hash__()
class RDFamilyChange(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "RDFamilyChange", "namespace": "org.gel.models.participant.avro",
"fields": [{"name": "FamilyId", "type": "string", "doc": ""}, {"name": "code", "type": {"type":
"enum", "name": "RDFamilyChangeCode", "doc": "", "symbols": ["FamilyAdded", "FamilyDeleted",
"ProbandChanged", "ParticipantAdded", "ParticipantRemoved", "ConsentStatusChanged",
"AffectionStatusChanged", "PanelAssignmentChanged", "SexChanged", "SampleChanged"]}, "doc": ""},
{"name": "Family", "type": {"type": "record", "name": "Pedigree", "doc": "", "fields": [{"name":
"versionControl", "type": ["null", {"type": "record", "name": "VersionControl", "fields": [{"name":
"GitVersionControl", "type": "string", "doc": "", "default": "1.0.3"}]}], "doc": ""}, {"name":
"LDPCode", "type": ["null", "string"]}, {"name": "familyId", "type": "string", "doc": ""}, {"name":
"members", "type": {"type": "array", "items": {"type": "record", "name": "PedigreeMember", "doc":
"", "fields": [{"name": "pedigreeId", "type": ["null", "int"], "doc": ""}, {"name": "isProband",
"type": ["null", "boolean"], "doc": ""}, {"name": "participantId", "type": ["null", "string"],
"doc": ""}, {"name": "participantQCState", "type": ["null", {"type": "enum", "name":
"ParticipantQCState", "doc": "", "symbols": ["noState", "passedMedicalReviewReadyForInterpretation",
"passedMedicalReviewNotReadyForInterpretation", "queryToGel", "queryToGMC", "failed"]}], "doc": ""},
{"name": "gelSuperFamilyId", "type": ["null", "string"], "doc": ""}, {"name": "sex", "type":
{"type": "enum", "name": "Sex", "doc": "", "symbols": ["MALE", "FEMALE", "UNKNOWN"]}, "doc": ""},
{"name": "personKaryotypicSex", "type": ["null", {"type": "enum", "name": "PersonKaryotipicSex",
"doc": "", "symbols": ["UNKNOWN", "XX", "XY", "XO", "XXY", "XXX", "XXYY", "XXXY", "XXXX", "XYY",
"OTHER"]}], "doc": ""}, {"name": "yearOfBirth", "type": ["null", "int"], "doc": ""}, {"name":
"fatherId", "type": ["null", "int"], "doc": ""}, {"name": "motherId", "type": ["null", "int"],
"doc": ""}, {"name": "superFatherId", "type": ["null", "int"], "doc": ""}, {"name": "superMotherId",
"type": ["null", "int"], "doc": ""}, {"name": "twinGroup", "type": ["null", "int"], "doc": ""},
{"name": "monozygotic", "type": ["null", {"type": "enum", "name": "TernaryOption", "doc": "",
"symbols": ["yes", "no", "unknown"]}], "doc": ""}, {"name": "adoptedStatus", "type": ["null",
{"type": "enum", "name": "AdoptedStatus", "doc": "", "symbols": ["notadopted", "adoptedin",
"adoptedout"]}], "doc": ""}, {"name": "lifeStatus", "type": ["null", {"type": "enum", "name":
"LifeStatus", "doc": "", "symbols": ["ALIVE", "ABORTED", "DECEASED", "UNBORN", "STILLBORN",
"MISCARRIAGE"]}], "doc": ""}, {"name": "consanguineousParents", "type": ["null", "TernaryOption"],
"doc": ""}, {"name": "affectionStatus", "type": ["null", {"type": "enum", "name": "AffectionStatus",
"doc": "", "symbols": ["UNAFFECTED", "AFFECTED", "UNCERTAIN"]}], "doc": ""}, {"name":
"disorderList", "type": ["null", {"type": "array", "items": {"type": "record", "name": "Disorder",
"doc": "", "fields": [{"name": "diseaseGroup", "type": ["null", "string"], "doc": ""}, {"name":
"diseaseSubGroup", "type": ["null", "string"], "doc": ""}, {"name": "specificDisease", "type":
["null", "string"], "doc": ""}, {"name": "ageOfOnset", "type": ["null", "float"], "doc": ""}]}}],
"doc": ""}, {"name": "hpoTermList", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "HpoTerm", "doc": "", "fields": [{"name": "term", "type": "string", "doc": ""}, {"name":
"termPresence", "type": ["null", "TernaryOption"], "doc": ""}, {"name": "hpoBuildNumber", "type":
["null", "string"], "doc": ""}, {"name": "modifiers", "type": ["null", {"type": "record", "name":
"HpoTermModifiers", "fields": [{"name": "laterality", "type": ["null", {"type": "enum", "name":
"Laterality", "symbols": ["RIGHT", "UNILATERAL", "BILATERAL", "LEFT"]}]}, {"name": "progression",
"type": ["null", {"type": "enum", "name": "Progression", "symbols": ["PROGRESSIVE",
"NONPROGRESSIVE"]}]}, {"name": "severity", "type": ["null", {"type": "enum", "name": "Severity",
"symbols": ["BORDERLINE", "MILD", "MODERATE", "SEVERE", "PROFOUND"]}]}, {"name": "spatialPattern",
"type": ["null", {"type": "enum", "name": "SpatialPattern", "symbols": ["DISTAL", "GENERALIZED",
"LOCALIZED", "PROXIMAL"]}]}]}], "doc": ""}, {"name": "ageOfOnset", "type": ["null", {"type": "enum",
"name": "AgeOfOnset", "symbols": ["EMBRYONAL_ONSET", "FETAL_ONSET", "NEONATAL_ONSET",
"INFANTILE_ONSET", "CHILDHOOD_ONSET", "JUVENILE_ONSET", "YOUNG_ADULT_ONSET", "LATE_ONSET",
"MIDDLE_AGE_ONSET"]}], "doc": ""}]}}], "doc": ""}, {"name": "ancestries", "type": ["null", {"type":
"record", "name": "Ancestries", "doc": "", "fields": [{"name": "mothersEthnicOrigin", "type":
["null", {"type": "enum", "name": "EthnicCategory", "doc": "", "symbols": ["D", "E", "F", "G", "A",
"B", "C", "L", "M", "N", "H", "J", "K", "P", "S", "R", "Z"]}], "doc": ""}, {"name":
"mothersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"fathersEthnicOrigin", "type": ["null", "EthnicCategory"], "doc": ""}, {"name":
"fathersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"chiSquare1KGenomesPhase3Pop", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "ChiSquare1KGenomesPhase3Pop", "doc": "", "fields": [{"name": "kgSuperPopCategory", "type":
{"type": "enum", "name": "KgSuperPopCategory", "doc": "", "symbols": ["AFR", "AMR", "EAS", "EUR",
"SAS"]}, "doc": ""}, {"name": "kgPopCategory", "type": ["null", {"type": "enum", "name":
"KgPopCategory", "doc": "", "symbols": ["ACB", "ASW", "BEB", "CDX", "CEU", "CHB", "CHS", "CLM",
"ESN", "FIN", "GBR", "GIH", "GWD", "IBS", "ITU", "JPT", "KHV", "LWK", "MSL", "MXL", "PEL", "PJL",
"PUR", "STU", "TSI", "YRI"]}], "doc": ""}, {"name": "chiSquare", "type": "double", "doc": ""}]}}],
"doc": ""}]}], "doc": ""}, {"name": "consentStatus", "type": ["null", {"type": "record", "name":
"ConsentStatus", "doc": "", "fields": [{"name": "programmeConsent", "type": "boolean", "doc": "",
"default": false}, {"name": "primaryFindingConsent", "type": "boolean", "doc": "", "default":
false}, {"name": "secondaryFindingConsent", "type": "boolean", "doc": "", "default": false},
{"name": "carrierStatusConsent", "type": "boolean", "doc": "", "default": false}]}], "doc": ""},
{"name": "samples", "type": ["null", {"type": "array", "items": {"type": "record", "name": "Sample",
"fields": [{"name": "sampleId", "type": "string", "doc": ""}, {"name": "labSampleId", "type": "int",
"doc": ""}, {"name": "source", "type": ["null", {"type": "enum", "name": "SampleSource", "symbols":
["BLOOD", "SALIVA", "FIBROBLAST", "TISSUE"]}], "doc": ""}, {"name": "product", "type": ["null",
{"type": "enum", "name": "Product", "symbols": ["DNA", "RNA"]}], "doc": ""}, {"name":
"preparationMethod", "type": ["null", {"type": "enum", "name": "PreparationMethod", "symbols":
["EDTA", "ORAGENE", "FF", "FFPE", "CD128_SORTED_CELLS", "ASPIRATE"]}], "doc": ""}]}}], "doc": ""},
{"name": "inbreedingCoefficient", "type": ["null", {"type": "record", "name":
"InbreedingCoefficient", "doc": "", "fields": [{"name": "sampleId", "type": "string", "doc": ""},
{"name": "program", "type": "string", "doc": ""}, {"name": "version", "type": "string", "doc": ""},
{"name": "estimationMethod", "type": "string", "doc": ""}, {"name": "coefficient", "type": "double",
"doc": ""}, {"name": "standardError", "type": ["null", "double"], "doc": ""}]}], "doc": ""},
{"name": "additionalInformation", "type": ["null", {"type": "map", "values": "string"}], "doc":
""}]}}}, {"name": "analysisPanels", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "AnalysisPanel", "fields": [{"name": "specificDisease", "type": "string"}, {"name":
"panelName", "type": "string"}, {"name": "panelVersion", "type": ["null", "string"]}, {"name":
"reviewOutcome", "type": "string"}, {"name": "multipleGeneticOrigins", "type": "string"}]}}]},
{"name": "diseasePenetrances", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "DiseasePenetrance", "fields": [{"name": "specificDisease", "type": "string"}, {"name":
"penetrance", "type": {"type": "enum", "name": "Penetrance", "doc": "", "symbols": ["complete",
"incomplete"]}}]}}]}, {"name": "readyForAnalysis", "type": "boolean"}, {"name": "familyQCState",
"type": ["null", {"type": "enum", "name": "FamilyQCState", "doc": "", "symbols": ["noState",
"passedMedicalReviewReadyForInterpretation", "passedMedicalReviewNotReadyForInterpretation",
"queryToGel", "queryToGMC", "failed"]}]}]}, "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"Family",
"FamilyId",
"code",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'Family': Pedigree,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'Family': Pedigree,
}
return embeddedTypes[fieldName]
__slots__ = [
'Family', 'FamilyId', 'code'
]
def __init__(self, **kwargs):
self.Family = kwargs.get(
'Family', Pedigree())
self.FamilyId = kwargs.get(
'FamilyId', None)
self.code = kwargs.get(
'code', None)
class RDFamilyChangeCode(object):
"""
This code define the change type: * `FamilyAdded`: This is a
new family. * `FamilyDeleted`: This family should be removed.
* `ProbandChanged`: The proband participant is now a different
member of the family. * `ParticipantAdded`: A new participant
has been sequenced and added to the family. *
`ParticipantRemoved`: A participant has been removed. *
`ConsentStatusChanged`: One or more participant in this family has
a different consent status. * `AffectionStatusChanged`:
HPOterms or Disorder changed in one or more participants in this
family. * `PanelAssignmentChanged`: Gene Panels has changed in
this family. * `SexChanged`: Sex has changed for one or more
participants in this family. * `SampleChanged`: The sample/s
associated to one or more participant in this family has changed.
"""
FamilyAdded = "FamilyAdded"
FamilyDeleted = "FamilyDeleted"
ProbandChanged = "ProbandChanged"
ParticipantAdded = "ParticipantAdded"
ParticipantRemoved = "ParticipantRemoved"
ConsentStatusChanged = "ConsentStatusChanged"
AffectionStatusChanged = "AffectionStatusChanged"
PanelAssignmentChanged = "PanelAssignmentChanged"
SexChanged = "SexChanged"
SampleChanged = "SampleChanged"
def __hash__(self):
return str(self).__hash__()
class Sample(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "Sample", "namespace": "org.gel.models.participant.avro", "fields":
[{"name": "sampleId", "type": "string", "doc": ""}, {"name": "labSampleId", "type": "int", "doc":
""}, {"name": "source", "type": ["null", {"type": "enum", "name": "SampleSource", "symbols":
["BLOOD", "SALIVA", "FIBROBLAST", "TISSUE"]}], "doc": ""}, {"name": "product", "type": ["null",
{"type": "enum", "name": "Product", "symbols": ["DNA", "RNA"]}], "doc": ""}, {"name":
"preparationMethod", "type": ["null", {"type": "enum", "name": "PreparationMethod", "symbols":
["EDTA", "ORAGENE", "FF", "FFPE", "CD128_SORTED_CELLS", "ASPIRATE"]}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"labSampleId",
"preparationMethod",
"product",
"sampleId",
"source",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'labSampleId', 'preparationMethod', 'product', 'sampleId',
'source'
]
def __init__(self, **kwargs):
self.labSampleId = kwargs.get(
'labSampleId', None)
self.preparationMethod = kwargs.get(
'preparationMethod', None)
self.product = kwargs.get(
'product', None)
self.sampleId = kwargs.get(
'sampleId', None)
self.source = kwargs.get(
'source', None)
class SampleSource(object):
"""
No documentation
"""
TUMOUR = "TUMOUR"
BONE_MARROW_ASPIRATE_TUMOUR_SORTED_CELLS = "BONE_MARROW_ASPIRATE_TUMOUR_SORTED_CELLS"
BONE_MARROW_ASPIRATE_TUMOUR_CELLS = "BONE_MARROW_ASPIRATE_TUMOUR_CELLS"
BLOOD = "BLOOD"
SALIVA = "SALIVA"
FIBROBLAST = "FIBROBLAST"
TISSUE = "TISSUE"
def __hash__(self):
return str(self).__hash__()
class SensitiveInformation(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "SensitiveInformation", "namespace": "org.gel.models.participant.avro",
"fields": [{"name": "versionControl", "type": {"type": "record", "name": "VersionControl", "fields":
[{"name": "GitVersionControl", "type": "string", "doc": "", "default": "1.0.3"}]}, "doc": ""},
{"name": "gelID", "type": "string"}, {"name": "externalIds", "type": ["null", {"type": "array",
"items": "string"}]}, {"name": "genomicMedicineCenter", "type": ["null", "string"]}, {"name":
"fullNameOfResponsibleConsultant", "type": ["null", "string"]}, {"name": "contactNumber", "type":
["null", "string"]}, {"name": "hospitalOfResponsibleConsultant", "type": ["null", "string"]},
{"name": "centerSampleId", "type": ["null", "string"]}, {"name": "originatingCenter", "type":
["null", "string"]}, {"name": "centerPatientId", "type": ["null", "string"]}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"centerPatientId",
"centerSampleId",
"contactNumber",
"externalIds",
"fullNameOfResponsibleConsultant",
"gelID",
"genomicMedicineCenter",
"hospitalOfResponsibleConsultant",
"originatingCenter",
"versionControl",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'versionControl': VersionControl,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'versionControl': VersionControl,
}
return embeddedTypes[fieldName]
__slots__ = [
'centerPatientId', 'centerSampleId', 'contactNumber',
'externalIds', 'fullNameOfResponsibleConsultant', 'gelID',
'genomicMedicineCenter', 'hospitalOfResponsibleConsultant',
'originatingCenter', 'versionControl'
]
def __init__(self, **kwargs):
self.centerPatientId = kwargs.get(
'centerPatientId', None)
self.centerSampleId = kwargs.get(
'centerSampleId', None)
self.contactNumber = kwargs.get(
'contactNumber', None)
self.externalIds = kwargs.get(
'externalIds', None)
self.fullNameOfResponsibleConsultant = kwargs.get(
'fullNameOfResponsibleConsultant', None)
self.gelID = kwargs.get(
'gelID', None)
self.genomicMedicineCenter = kwargs.get(
'genomicMedicineCenter', None)
self.hospitalOfResponsibleConsultant = kwargs.get(
'hospitalOfResponsibleConsultant', None)
self.originatingCenter = kwargs.get(
'originatingCenter', None)
self.versionControl = kwargs.get(
'versionControl', VersionControl())
class Severity(object):
"""
No documentation
"""
BORDERLINE = "BORDERLINE"
MILD = "MILD"
MODERATE = "MODERATE"
SEVERE = "SEVERE"
PROFOUND = "PROFOUND"
def __hash__(self):
return str(self).__hash__()
class Sex(object):
"""
No documentation
"""
FEMALE = "FEMALE"
MALE = "MALE"
UNKNOWN = "UNKNOWN"
def __hash__(self):
return str(self).__hash__()
class SpatialPattern(object):
"""
No documentation
"""
DISTAL = "DISTAL"
GENERALIZED = "GENERALIZED"
LOCALIZED = "LOCALIZED"
PROXIMAL = "PROXIMAL"
def __hash__(self):
return str(self).__hash__()
class TernaryOption(object):
"""
This defines a yes/no/unknown case
"""
yes = "yes"
no = "no"
unknown = "unknown"
def __hash__(self):
return str(self).__hash__()
class TissueSource(object):
"""
No documentation
"""
BMA_TUMOUR_SORTED_CELLS = "BMA_TUMOUR_SORTED_CELLS"
CT_GUIDED_BIOPSY = "CT_GUIDED_BIOPSY"
ENDOSCOPIC_BIOPSY = "ENDOSCOPIC_BIOPSY"
ENDOSCOPIC_ULTRASOUND_GUIDED_BIOPSY = "ENDOSCOPIC_ULTRASOUND_GUIDED_BIOPSY"
ENDOSCOPIC_ULTRASOUND_GUIDED_FNA = "ENDOSCOPIC_ULTRASOUND_GUIDED_FNA"
LAPAROSCOPIC_BIOPSY = "LAPAROSCOPIC_BIOPSY"
LAPAROSCOPIC_EXCISION = "LAPAROSCOPIC_EXCISION"
MRI_GUIDED_BIOPSY = "MRI_GUIDED_BIOPSY"
NON_GUIDED_BIOPSY = "NON_GUIDED_BIOPSY"
SURGICAL_RESECTION = "SURGICAL_RESECTION"
STEREOTACTICALLY_GUIDED_BIOPSY = "STEREOTACTICALLY_GUIDED_BIOPSY"
USS_GUIDED_BIOPSY = "USS_GUIDED_BIOPSY"
NON_STANDARD_BIOPSY = "NON_STANDARD_BIOPSY"
def __hash__(self):
return str(self).__hash__()
class TumourContent(object):
"""
No documentation
"""
High = "High"
Medium = "Medium"
Low = "Low"
def __hash__(self):
return str(self).__hash__()
class TumourSample(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "TumourSample", "namespace": "org.gel.models.participant.avro", "fields":
[{"name": "sampleId", "type": "string", "doc": ""}, {"name": "labSampleId", "type": "int", "doc":
""}, {"name": "LDPCode", "type": "string", "doc": ""}, {"name": "tumourId", "type": "string", "doc":
""}, {"name": "programmePhase", "type": ["null", {"type": "enum", "name": "ProgrammePhase",
"symbols": ["CRUK", "OXFORD", "CLL", "IIP", "MAIN", "EXPT"]}], "doc": ""}, {"name": "diseaseType",
"type": ["null", {"type": "enum", "name": "diseaseType", "symbols": ["ADULT_GLIOMA", "BLADDER",
"BREAST", "CARCINOMA_OF_UNKNOWN_PRIMARY", "CHILDHOOD", "COLORECTAL", "ENDOMETRIAL_CARCINOMA",
"HAEMONC", "HEPATOPANCREATOBILIARY", "LUNG", "MALIGNANT_MELANOMA", "NASOPHARYNGEAL",
"ORAL_OROPHARYNGEAL", "OVARIAN", "PROSTATE", "RENAL", "SARCOMA", "SINONASAL",
"TESTICULAR_GERM_CELL_TUMOURS", "UPPER_GASTROINTESTINAL",
"NON_HODGKINS_B_CELL_LYMPHOMA_LOW_MOD_GRADE", "CLASSICAL_HODGKINS",
"NODULAR_LYMPHOCYTE_PREDOMINANT_HODGKINS", "T_CELL_LYMPHOMA"]}], "doc": ""}, {"name":
"diseaseSubType", "type": ["null", "string"], "doc": ""}, {"name": "clinicalSampleDateTime", "type":
["null", "string"], "doc": ""}, {"name": "tumourType", "type": ["null", {"type": "enum", "name":
"TumourType", "symbols": ["PRIMARY", "METASTATIC_RECURRENCE", "RECURRENCE_OF_PRIMARY_TUMOUR",
"METASTASES"]}], "doc": ""}, {"name": "tumourContent", "type": ["null", {"type": "enum", "name":
"TumourContent", "symbols": ["High", "Medium", "Low"]}], "doc": ""}, {"name": "source", "type":
["null", {"type": "enum", "name": "SampleSource", "symbols": ["TUMOUR",
"BONE_MARROW_ASPIRATE_TUMOUR_SORTED_CELLS", "BONE_MARROW_ASPIRATE_TUMOUR_CELLS", "BLOOD", "SALIVA",
"FIBROBLAST", "TISSUE"]}], "doc": ""}, {"name": "preparationMethod", "type": ["null", {"type":
"enum", "name": "PreparationMethod", "symbols": ["EDTA", "ORAGENE", "FF", "FFPE",
"CD128_SORTED_CELLS", "ASPIRATE"]}], "doc": ""}, {"name": "tissueSource", "type": ["null", {"type":
"enum", "name": "TissueSource", "symbols": ["BMA_TUMOUR_SORTED_CELLS", "CT_GUIDED_BIOPSY",
"ENDOSCOPIC_BIOPSY", "ENDOSCOPIC_ULTRASOUND_GUIDED_BIOPSY", "ENDOSCOPIC_ULTRASOUND_GUIDED_FNA",
"LAPAROSCOPIC_BIOPSY", "LAPAROSCOPIC_EXCISION", "MRI_GUIDED_BIOPSY", "NON_GUIDED_BIOPSY",
"SURGICAL_RESECTION", "STEREOTACTICALLY_GUIDED_BIOPSY", "USS_GUIDED_BIOPSY",
"NON_STANDARD_BIOPSY"]}], "doc": ""}, {"name": "product", "type": ["null", {"type": "enum", "name":
"Product", "symbols": ["DNA", "RNA"]}], "doc": ""}, {"name": "morphologyICD", "type": ["null",
"string"], "doc": ""}, {"name": "morphologySnomedCT", "type": ["null", "string"], "doc": ""},
{"name": "morphologySnomedRT", "type": ["null", "string"], "doc": ""}, {"name": "topographyICD",
"type": ["null", "string"], "doc": ""}, {"name": "topographySnomedCT", "type": ["null", "string"],
"doc": ""}, {"name": "topographySnomedRT", "type": ["null", "string"], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"LDPCode",
"clinicalSampleDateTime",
"diseaseSubType",
"diseaseType",
"labSampleId",
"morphologyICD",
"morphologySnomedCT",
"morphologySnomedRT",
"preparationMethod",
"product",
"programmePhase",
"sampleId",
"source",
"tissueSource",
"topographyICD",
"topographySnomedCT",
"topographySnomedRT",
"tumourContent",
"tumourId",
"tumourType",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'LDPCode', 'clinicalSampleDateTime', 'diseaseSubType',
'diseaseType', 'labSampleId', 'morphologyICD',
'morphologySnomedCT', 'morphologySnomedRT',
'preparationMethod', 'product', 'programmePhase', 'sampleId',
'source', 'tissueSource', 'topographyICD',
'topographySnomedCT', 'topographySnomedRT', 'tumourContent',
'tumourId', 'tumourType'
]
def __init__(self, **kwargs):
self.LDPCode = kwargs.get(
'LDPCode', None)
self.clinicalSampleDateTime = kwargs.get(
'clinicalSampleDateTime', None)
self.diseaseSubType = kwargs.get(
'diseaseSubType', None)
self.diseaseType = kwargs.get(
'diseaseType', None)
self.labSampleId = kwargs.get(
'labSampleId', None)
self.morphologyICD = kwargs.get(
'morphologyICD', None)
self.morphologySnomedCT = kwargs.get(
'morphologySnomedCT', None)
self.morphologySnomedRT = kwargs.get(
'morphologySnomedRT', None)
self.preparationMethod = kwargs.get(
'preparationMethod', None)
self.product = kwargs.get(
'product', None)
self.programmePhase = kwargs.get(
'programmePhase', None)
self.sampleId = kwargs.get(
'sampleId', None)
self.source = kwargs.get(
'source', None)
self.tissueSource = kwargs.get(
'tissueSource', None)
self.topographyICD = kwargs.get(
'topographyICD', None)
self.topographySnomedCT = kwargs.get(
'topographySnomedCT', None)
self.topographySnomedRT = kwargs.get(
'topographySnomedRT', None)
self.tumourContent = kwargs.get(
'tumourContent', None)
self.tumourId = kwargs.get(
'tumourId', None)
self.tumourType = kwargs.get(
'tumourType', None)
class TumourType(object):
"""
No documentation
"""
PRIMARY = "PRIMARY"
METASTATIC_RECURRENCE = "METASTATIC_RECURRENCE"
RECURRENCE_OF_PRIMARY_TUMOUR = "RECURRENCE_OF_PRIMARY_TUMOUR"
METASTASES = "METASTASES"
def __hash__(self):
return str(self).__hash__()
class VersionControl(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "VersionControl", "namespace": "org.gel.models.participant.avro",
"fields": [{"name": "GitVersionControl", "type": "string", "doc": "", "default": "1.0.3"}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'GitVersionControl'
]
def __init__(self, **kwargs):
self.GitVersionControl = kwargs.get(
'GitVersionControl', '1.0.3')
class diseaseType(object):
"""
No documentation
"""
ADULT_GLIOMA = "ADULT_GLIOMA"
BLADDER = "BLADDER"
BREAST = "BREAST"
CARCINOMA_OF_UNKNOWN_PRIMARY = "CARCINOMA_OF_UNKNOWN_PRIMARY"
CHILDHOOD = "CHILDHOOD"
COLORECTAL = "COLORECTAL"
ENDOMETRIAL_CARCINOMA = "ENDOMETRIAL_CARCINOMA"
HAEMONC = "HAEMONC"
HEPATOPANCREATOBILIARY = "HEPATOPANCREATOBILIARY"
LUNG = "LUNG"
MALIGNANT_MELANOMA = "MALIGNANT_MELANOMA"
NASOPHARYNGEAL = "NASOPHARYNGEAL"
ORAL_OROPHARYNGEAL = "ORAL_OROPHARYNGEAL"
OVARIAN = "OVARIAN"
PROSTATE = "PROSTATE"
RENAL = "RENAL"
SARCOMA = "SARCOMA"
SINONASAL = "SINONASAL"
TESTICULAR_GERM_CELL_TUMOURS = "TESTICULAR_GERM_CELL_TUMOURS"
UPPER_GASTROINTESTINAL = "UPPER_GASTROINTESTINAL"
NON_HODGKINS_B_CELL_LYMPHOMA_LOW_MOD_GRADE = "NON_HODGKINS_B_CELL_LYMPHOMA_LOW_MOD_GRADE"
CLASSICAL_HODGKINS = "CLASSICAL_HODGKINS"
NODULAR_LYMPHOCYTE_PREDOMINANT_HODGKINS = "NODULAR_LYMPHOCYTE_PREDOMINANT_HODGKINS"
T_CELL_LYMPHOMA = "T_CELL_LYMPHOMA"
def __hash__(self):
return str(self).__hash__()
| 41.471562
| 100
| 0.585817
| 6,942
| 80,206
| 6.657736
| 0.078508
| 0.041715
| 0.033493
| 0.029772
| 0.755983
| 0.727812
| 0.69923
| 0.665801
| 0.619694
| 0.609157
| 0
| 0.001588
| 0.191482
| 80,206
| 1,933
| 101
| 41.493016
| 0.711124
| 0.04269
| 0
| 0.5013
| 1
| 0.240572
| 0.636106
| 0.097514
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054616
| false
| 0.007152
| 0.003251
| 0.017555
| 0.304941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc02e55ee42e94f6f2e37688fcd818c62a2c20ae
| 159
|
py
|
Python
|
verboze/asgi.py
|
Verbozeteam/web
|
2aecd67ec823e9d6ac243d6f8a71849dd0f9ed9d
|
[
"MIT"
] | 1
|
2018-12-17T15:31:03.000Z
|
2018-12-17T15:31:03.000Z
|
verboze/asgi.py
|
Verbozeteam/web
|
2aecd67ec823e9d6ac243d6f8a71849dd0f9ed9d
|
[
"MIT"
] | null | null | null |
verboze/asgi.py
|
Verbozeteam/web
|
2aecd67ec823e9d6ac243d6f8a71849dd0f9ed9d
|
[
"MIT"
] | null | null | null |
import os
from channels.asgi import get_channel_layer
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "verboze.settings")
channel_layer = get_channel_layer()
| 26.5
| 67
| 0.836478
| 22
| 159
| 5.727273
| 0.636364
| 0.285714
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075472
| 159
| 6
| 68
| 26.5
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.2375
| 0.1375
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
fc2bfa287025df1cec20e4b0d7bf89211c912705
| 520
|
py
|
Python
|
authentication/admin.py
|
luisza/vcl_django
|
43d04f7951cb8805502e51f6f6360c7ec63215cc
|
[
"Apache-2.0"
] | null | null | null |
authentication/admin.py
|
luisza/vcl_django
|
43d04f7951cb8805502e51f6f6360c7ec63215cc
|
[
"Apache-2.0"
] | null | null | null |
authentication/admin.py
|
luisza/vcl_django
|
43d04f7951cb8805502e51f6f6360c7ec63215cc
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from authentication.models import (User,
Usergroup,
Usergroupmembers,
Shibauth,
Localauth)
admin.site.register([User,
Usergroup,
Usergroupmembers,
Shibauth,
Localauth])
| 37.142857
| 53
| 0.363462
| 26
| 520
| 7.269231
| 0.615385
| 0.137566
| 0.306878
| 0.391534
| 0.486772
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.598077
| 520
| 14
| 54
| 37.142857
| 0.904306
| 0.05
| 0
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.181818
| 0
| 0.181818
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc3871378a173b9f757032dbb802ee8af2aed424
| 38
|
py
|
Python
|
teste.py
|
buggers/bugfactory.github.io
|
364a57033750930337dbe1f7c39583db815467b4
|
[
"MIT"
] | null | null | null |
teste.py
|
buggers/bugfactory.github.io
|
364a57033750930337dbe1f7c39583db815467b4
|
[
"MIT"
] | null | null | null |
teste.py
|
buggers/bugfactory.github.io
|
364a57033750930337dbe1f7c39583db815467b4
|
[
"MIT"
] | null | null | null |
name = raw_input('dasdsa')
print name
| 12.666667
| 26
| 0.736842
| 6
| 38
| 4.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 38
| 2
| 27
| 19
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
fc4098a041905e68a2b8847659b0e1079bf89f06
| 431
|
py
|
Python
|
tests/rules/test_ls_lah.py
|
lardnicus/oops
|
2cabcdb6726f4583f954d5f3671574bd18c7fdf2
|
[
"MIT"
] | null | null | null |
tests/rules/test_ls_lah.py
|
lardnicus/oops
|
2cabcdb6726f4583f954d5f3671574bd18c7fdf2
|
[
"MIT"
] | null | null | null |
tests/rules/test_ls_lah.py
|
lardnicus/oops
|
2cabcdb6726f4583f954d5f3671574bd18c7fdf2
|
[
"MIT"
] | null | null | null |
from mock import patch, Mock
from oops.rules.ls_lah import match, get_new_command
def test_match():
assert match(Mock(script='ls file.py'), None)
assert match(Mock(script='ls /opt'), None)
assert not match(Mock(script='ls -lah /opt'), None)
def test_get_new_command():
assert get_new_command(Mock(script='ls file.py'), None) == 'ls -lah file.py'
assert get_new_command(Mock(script='ls'), None) == 'ls -lah'
| 30.785714
| 80
| 0.693735
| 71
| 431
| 4.056338
| 0.295775
| 0.173611
| 0.208333
| 0.177083
| 0.444444
| 0.326389
| 0.215278
| 0
| 0
| 0
| 0
| 0
| 0.153132
| 431
| 13
| 81
| 33.153846
| 0.789041
| 0
| 0
| 0
| 0
| 0
| 0.146172
| 0
| 0
| 0
| 0
| 0
| 0.555556
| 1
| 0.222222
| true
| 0
| 0.222222
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc59861de2584c2668ef54b7e10277a29ef393b9
| 197
|
py
|
Python
|
twitteruser/admin.py
|
BethanyFolino/twitterclone
|
7dcdde05786575e2508f9ecde148202f387f9128
|
[
"MIT"
] | null | null | null |
twitteruser/admin.py
|
BethanyFolino/twitterclone
|
7dcdde05786575e2508f9ecde148202f387f9128
|
[
"MIT"
] | null | null | null |
twitteruser/admin.py
|
BethanyFolino/twitterclone
|
7dcdde05786575e2508f9ecde148202f387f9128
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from twitteruser.models import TwitterUser
# Register your models here.
admin.site.register(TwitterUser, UserAdmin)
| 32.833333
| 47
| 0.84264
| 26
| 197
| 6.384615
| 0.5
| 0.120482
| 0.204819
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096447
| 197
| 6
| 48
| 32.833333
| 0.932584
| 0.13198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5d92cf25118e4fef71a470b08908134836789b26
| 312
|
py
|
Python
|
poker_utils.py
|
zouyapeng/Texas-Hold-em-Player
|
d4b4ec12325845a894cfb66f885a2f2d067c04f5
|
[
"Apache-2.0"
] | null | null | null |
poker_utils.py
|
zouyapeng/Texas-Hold-em-Player
|
d4b4ec12325845a894cfb66f885a2f2d067c04f5
|
[
"Apache-2.0"
] | null | null | null |
poker_utils.py
|
zouyapeng/Texas-Hold-em-Player
|
d4b4ec12325845a894cfb66f885a2f2d067c04f5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2018/6/29 11:24
# @Author : Bob Zou
# @Mail : bob_zou@trendmicro.com
# @File : poker_utils
# @Software: PyCharm
# @Function:
from django.db import models
class Player(models.Model):
pass
class Game(models.Model):
pass
class Round(models.Model):
pass
| 14.857143
| 35
| 0.634615
| 43
| 312
| 4.55814
| 0.744186
| 0.168367
| 0.229592
| 0.204082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049383
| 0.221154
| 312
| 21
| 36
| 14.857143
| 0.757202
| 0.49359
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.428571
| 0.142857
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
5da77a712d591729ceb71914f14335724537545c
| 31
|
py
|
Python
|
masq/cms/utils/__init__.py
|
tehdiplomat/hidden-role-games
|
e6fdc132700db8d12fd57f08200a499cdc5bef7d
|
[
"MIT"
] | null | null | null |
masq/cms/utils/__init__.py
|
tehdiplomat/hidden-role-games
|
e6fdc132700db8d12fd57f08200a499cdc5bef7d
|
[
"MIT"
] | null | null | null |
masq/cms/utils/__init__.py
|
tehdiplomat/hidden-role-games
|
e6fdc132700db8d12fd57f08200a499cdc5bef7d
|
[
"MIT"
] | null | null | null |
#from cms.models.polls import *
| 31
| 31
| 0.774194
| 5
| 31
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.857143
| 0.967742
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5dbefca05b30757d26ac684fa5ad02e5eeaf40b1
| 199
|
py
|
Python
|
hypernets/conf/__init__.py
|
Enpen/Hypernets
|
5fbf01412ffaef310855d98f52f8cc169e96246b
|
[
"Apache-2.0"
] | 1,080
|
2020-06-22T07:44:22.000Z
|
2022-03-22T07:46:48.000Z
|
hypernets/conf/__init__.py
|
Enpen/Hypernets
|
5fbf01412ffaef310855d98f52f8cc169e96246b
|
[
"Apache-2.0"
] | 24
|
2020-08-06T02:06:37.000Z
|
2022-03-31T03:34:35.000Z
|
hypernets/conf/__init__.py
|
Enpen/Hypernets
|
5fbf01412ffaef310855d98f52f8cc169e96246b
|
[
"Apache-2.0"
] | 170
|
2020-08-14T08:39:18.000Z
|
2022-03-23T12:58:17.000Z
|
from traitlets import Unicode, Unicode as String, Bool, Int, Float, Enum, List, Dict, Union
from ._configuration import Configurable, configure, observe, configure_and_observe, generate_config_file
| 49.75
| 105
| 0.819095
| 26
| 199
| 6.076923
| 0.807692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115578
| 199
| 3
| 106
| 66.333333
| 0.897727
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5dc443510a3b499f63cbbb49e60128bb0c0707ac
| 8,303
|
py
|
Python
|
04 - Parser/src/lexer/unicode_chars.py
|
masyagin1998/TFL
|
e92bedd513855348e1e5648e91abecc3b5c1ad10
|
[
"MIT"
] | 7
|
2018-10-05T13:12:00.000Z
|
2021-06-08T08:49:11.000Z
|
04 - Parser/src/lexer/unicode_chars.py
|
masyagin1998/TFL
|
e92bedd513855348e1e5648e91abecc3b5c1ad10
|
[
"MIT"
] | null | null | null |
04 - Parser/src/lexer/unicode_chars.py
|
masyagin1998/TFL
|
e92bedd513855348e1e5648e91abecc3b5c1ad10
|
[
"MIT"
] | null | null | null |
# 'Uppercase letter (Lu)', 'Lowercase letter (Ll)',
# 'Titlecase letter(Lt)', 'Modifier letter (Lm)', 'Other letter (Lo)'
LETTER = (
u'[A-Za-z\xaa\xb5\xba\xc0-\xd6\xd8-\xf6'
u'\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376'
u'\u0377\u037a-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5'
u'\u03f7-\u0481\u048a-\u0523\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea'
u'\u05f0-\u05f2\u0621-\u064a\u066e\u066f\u0671-\u06d3\u06d5\u06e5\u06e6'
u'\u06ee\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1'
u'\u07ca-\u07ea\u07f4\u07f5\u07fa\u0904-\u0939\u093d\u0950\u0958-\u0961'
u'\u0971\u0972\u097b-\u097f\u0985-\u098c\u098f\u0990\u0993-\u09a8'
u'\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc\u09dd\u09df-\u09e1'
u'\u09f0\u09f1\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32'
u'\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74'
u'\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3'
u'\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0b05-\u0b0c\u0b0f\u0b10'
u'\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d\u0b5c\u0b5d'
u'\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99'
u'\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0'
u'\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d'
u'\u0c58\u0c59\u0c60\u0c61\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8'
u'\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0d05-\u0d0c'
u'\u0d0e-\u0d10\u0d12-\u0d28\u0d2a-\u0d39\u0d3d\u0d60\u0d61\u0d7a-\u0d7f'
u'\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30'
u'\u0e32\u0e33\u0e40-\u0e46\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d'
u'\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab'
u'\u0ead-\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc\u0edd\u0f00'
u'\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8b\u1000-\u102a\u103f\u1050-\u1055'
u'\u105a-\u105d\u1061\u1065\u1066\u106e-\u1070\u1075-\u1081\u108e'
u'\u10a0-\u10c5\u10d0-\u10fa\u10fc\u1100-\u1159\u115f-\u11a2\u11a8-\u11f9'
u'\u1200-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288'
u'\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5'
u'\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f'
u'\u13a0-\u13f4\u1401-\u166c\u166f-\u1676\u1681-\u169a\u16a0-\u16ea'
u'\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c'
u'\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1877\u1880-\u18a8\u18aa'
u'\u1900-\u191c\u1950-\u196d\u1970-\u1974\u1980-\u19a9\u19c1-\u19c7'
u'\u1a00-\u1a16\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae\u1baf'
u'\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1d00-\u1dbf\u1e00-\u1f15'
u'\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d'
u'\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc'
u'\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071'
u'\u207f\u2090-\u2094\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124'
u'\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e'
u'\u2183\u2184\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2c6f\u2c71-\u2c7d'
u'\u2c80-\u2ce4\u2d00-\u2d25\u2d30-\u2d65\u2d6f\u2d80-\u2d96\u2da0-\u2da6'
u'\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce'
u'\u2dd0-\u2dd6\u2dd8-\u2dde\u2e2f\u3005\u3006\u3031-\u3035\u303b\u303c'
u'\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d'
u'\u3131-\u318e\u31a0-\u31b7\u31f0-\u31ff\u3400\u4db5\u4e00\u9fc3'
u'\ua000-\ua48c\ua500-\ua60c\ua610-\ua61f\ua62a\ua62b\ua640-\ua65f'
u'\ua662-\ua66e\ua67f-\ua697\ua717-\ua71f\ua722-\ua788\ua78b\ua78c'
u'\ua7fb-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873'
u'\ua882-\ua8b3\ua90a-\ua925\ua930-\ua946\uaa00-\uaa28\uaa40-\uaa42'
u'\uaa44-\uaa4b\uac00\ud7a3\uf900-\ufa2d\ufa30-\ufa6a\ufa70-\ufad9'
u'\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c'
u'\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f'
u'\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff21-\uff3a'
u'\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7'
u'\uffda-\uffdc]'
)
NON_SPACING_MARK = (
u'[\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5'
u'\u05c7\u0610-\u061a\u064b-\u065e\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7'
u'\u06e8\u06ea-\u06ed\u0711\u0730-\u074a\u07a6-\u07b0\u07eb-\u07f3'
u'\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0900-\u0902\u093c'
u'\u0941-\u0948\u094d\u0951-\u0955\u0962\u0963\u0981\u09bc\u09c1-\u09c4'
u'\u09cd\u09e2\u09e3\u0a01\u0a02\u0a3c\u0a41\u0a42\u0a47\u0a48'
u'\u0a4b-\u0a4d\u0a51\u0a70\u0a71\u0a75\u0a81\u0a82\u0abc\u0ac1-\u0ac5'
u'\u0ac7\u0ac8\u0acd\u0ae2\u0ae3\u0b01\u0b3c\u0b3f\u0b41-\u0b44\u0b4d'
u'\u0b56\u0b62\u0b63\u0b82\u0bc0\u0bcd\u0c3e-\u0c40\u0c46-\u0c48'
u'\u0c4a-\u0c4d\u0c55\u0c56\u0c62\u0c63\u0cbc\u0cbf\u0cc6\u0ccc\u0ccd'
u'\u0ce2\u0ce3\u0d41-\u0d44\u0d4d\u0d62\u0d63\u0dca\u0dd2-\u0dd4\u0dd6'
u'\u0e31\u0e34-\u0e3a\u0e47-\u0e4e\u0eb1\u0eb4-\u0eb9\u0ebb\u0ebc'
u'\u0ec8-\u0ecd\u0f18\u0f19\u0f35\u0f37\u0f39\u0f71-\u0f7e\u0f80-\u0f84'
u'\u0f86\u0f87\u0f90-\u0f97\u0f99-\u0fbc\u0fc6\u102d-\u1030\u1032-\u1037'
u'\u1039\u103a\u103d\u103e\u1058\u1059\u105e-\u1060\u1071-\u1074\u1082'
u'\u1085\u1086\u108d\u109d\u135f\u1712-\u1714\u1732-\u1734\u1752\u1753'
u'\u1772\u1773\u17b7-\u17bd\u17c6\u17c9-\u17d3\u17dd\u180b-\u180d\u18a9'
u'\u1920-\u1922\u1927\u1928\u1932\u1939-\u193b\u1a17\u1a18\u1a56'
u'\u1a58-\u1a5e\u1a60\u1a62\u1a65-\u1a6c\u1a73-\u1a7c\u1a7f\u1b00-\u1b03'
u'\u1b34\u1b36-\u1b3a\u1b3c\u1b42\u1b6b-\u1b73\u1b80\u1b81\u1ba2-\u1ba5'
u'\u1ba8\u1ba9\u1c2c-\u1c33\u1c36\u1c37\u1cd0-\u1cd2\u1cd4-\u1ce0'
u'\u1ce2-\u1ce8\u1ced\u1dc0-\u1de6\u1dfd-\u1dff\u20d0-\u20dc\u20e1'
u'\u20e5-\u20f0\u2cef-\u2cf1\u2de0-\u2dff\u302a-\u302f\u3099\u309a\ua66f'
u'\ua67c\ua67d\ua6f0\ua6f1\ua802\ua806\ua80b\ua825\ua826\ua8c4'
u'\ua8e0-\ua8f1\ua926-\ua92d\ua947-\ua951\ua980-\ua982\ua9b3\ua9b6-\ua9b9'
u'\ua9bc\uaa29-\uaa2e\uaa31\uaa32\uaa35\uaa36\uaa43\uaa4c\uaab0'
u'\uaab2-\uaab4\uaab7\uaab8\uaabe\uaabf\uaac1\uabe5\uabe8\uabed\ufb1e'
u'\ufe00-\ufe0f\ufe20-\ufe26]'
)
COMBINING_SPACING_MARK = (
u'[\u0903\u093e-\u0940\u0949-\u094c\u094e\u0982\u0983\u09be-\u09c0\u09c7'
u'\u09c8\u09cb\u09cc\u09d7\u0a03\u0a3e-\u0a40\u0a83\u0abe-\u0ac0\u0ac9'
u'\u0acb\u0acc\u0b02\u0b03\u0b3e\u0b40\u0b47\u0b48\u0b4b\u0b4c\u0b57'
u'\u0bbe\u0bbf\u0bc1\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcc\u0bd7\u0c01-\u0c03'
u'\u0c41-\u0c44\u0c82\u0c83\u0cbe\u0cc0-\u0cc4\u0cc7\u0cc8\u0cca\u0ccb'
u'\u0cd5\u0cd6\u0d02\u0d03\u0d3e-\u0d40\u0d46-\u0d48\u0d4a-\u0d4c\u0d57'
u'\u0d82\u0d83\u0dcf-\u0dd1\u0dd8-\u0ddf\u0df2\u0df3\u0f3e\u0f3f\u0f7f'
u'\u102b\u102c\u1031\u1038\u103b\u103c\u1056\u1057\u1062-\u1064'
u'\u1067-\u106d\u1083\u1084\u1087-\u108c\u108f\u109a-\u109c\u17b6'
u'\u17be-\u17c5\u17c7\u17c8\u1923-\u1926\u1929-\u192b\u1930\u1931'
u'\u1933-\u1938\u19b0-\u19c0\u19c8\u19c9\u1a19-\u1a1b\u1a55\u1a57\u1a61'
u'\u1a63\u1a64\u1a6d-\u1a72\u1b04\u1b35\u1b3b\u1b3d-\u1b41\u1b43\u1b44'
u'\u1b82\u1ba1\u1ba6\u1ba7\u1baa\u1c24-\u1c2b\u1c34\u1c35\u1ce1\u1cf2'
u'\ua823\ua824\ua827\ua880\ua881\ua8b4-\ua8c3\ua952\ua953\ua983\ua9b4'
u'\ua9b5\ua9ba\ua9bb\ua9bd-\ua9c0\uaa2f\uaa30\uaa33\uaa34\uaa4d\uaa7b'
u'\uabe3\uabe4\uabe6\uabe7\uabe9\uabea\uabec]'
)
COMBINING_MARK = u'%s|%s' % (NON_SPACING_MARK, COMBINING_SPACING_MARK)
CONNECTOR_PUNCTUATION = u'[_\u203f\u2040\u2054\ufe33\ufe34\ufe4d-\ufe4f\uff3f]'
DIGIT = (
u'[0-9\u0660-\u0669\u06f0-\u06f9\u07c0-\u07c9\u0966-\u096f'
u'\u09e6-\u09ef\u0a66-\u0a6f\u0ae6-\u0aef\u0b66-\u0b6f\u0be6-\u0bef' # noqa: E501,W293
u'\u0c66-\u0c6f\u0ce6-\u0cef\u0d66-\u0d6f\u0e50-\u0e59\u0ed0-\u0ed9'
u'\u0f20-\u0f29\u1040-\u1049\u1090-\u1099\u17e0-\u17e9\u1810-\u1819'
u'\u1946-\u194f\u19d0-\u19da\u1a80-\u1a89\u1a90-\u1a99\u1b50-\u1b59'
u'\u1bb0-\u1bb9\u1c40-\u1c49\u1c50-\u1c59\ua620-\ua629\ua8d0-\ua8d9'
u'\ua900-\ua909\ua9d0-\ua9d9\uaa50-\uaa59\uabf0-\uabf9\uff10-\uff19]'
)
| 65.896825
| 91
| 0.716247
| 1,279
| 8,303
| 4.641126
| 0.902267
| 0.007412
| 0.004717
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.381304
| 0.071059
| 8,303
| 125
| 92
| 66.424
| 0.388305
| 0.016018
| 0
| 0
| 0
| 0.516949
| 0.872153
| 0.869826
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b901c47f70787cd225f7c34a4b0d9be27f96a5ed
| 163
|
py
|
Python
|
main/admin.py
|
aryaputra28/covidify-PerancanganWeb
|
34d6d0017f44248c172fc58e6e1b138e23e68a95
|
[
"Unlicense"
] | null | null | null |
main/admin.py
|
aryaputra28/covidify-PerancanganWeb
|
34d6d0017f44248c172fc58e6e1b138e23e68a95
|
[
"Unlicense"
] | null | null | null |
main/admin.py
|
aryaputra28/covidify-PerancanganWeb
|
34d6d0017f44248c172fc58e6e1b138e23e68a95
|
[
"Unlicense"
] | null | null | null |
from django.contrib import admin
from .models import Feedback, Pengguna
# Register your models here.
admin.site.register(Feedback)
admin.site.register(Pengguna)
| 20.375
| 38
| 0.809816
| 22
| 163
| 6
| 0.545455
| 0.136364
| 0.257576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110429
| 163
| 7
| 39
| 23.285714
| 0.910345
| 0.159509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
f8dce1cd078e8ee2d1412f7a1f2231d26e3e0ce3
| 390
|
py
|
Python
|
Python-Leetcode/Array/1365. How Many Numbers Are Smaller Than the Current Number.py
|
HughesZhang73/Python-Master
|
607110e4326f4b51ffa7e2ade4edcecd26e52298
|
[
"MIT"
] | 2
|
2020-11-05T02:32:14.000Z
|
2020-12-22T14:06:38.000Z
|
Python-Leetcode/Array/1365. How Many Numbers Are Smaller Than the Current Number.py
|
HughesZhang73/Python-Master
|
607110e4326f4b51ffa7e2ade4edcecd26e52298
|
[
"MIT"
] | null | null | null |
Python-Leetcode/Array/1365. How Many Numbers Are Smaller Than the Current Number.py
|
HughesZhang73/Python-Master
|
607110e4326f4b51ffa7e2ade4edcecd26e52298
|
[
"MIT"
] | null | null | null |
# def smallerNumbersThanCurrent(nums: list) -> list:
# ans = []
# for i in nums:
# temp = nums.copy()
# count = 0
# temp.remove(i)
# for j in temp:
# if i > j:
# count += 1
# ans.append(count)
# return ans
def smallerNumbersThanCurrent(nums: list) -> list:
print(smallerNumbersThanCurrent([8,1,2,2,3]))
| 18.571429
| 52
| 0.512821
| 44
| 390
| 4.545455
| 0.5
| 0.28
| 0.32
| 0.36
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027668
| 0.351282
| 390
| 21
| 53
| 18.571429
| 0.762846
| 0.571795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
f8e1b3c3bfda7e3c52c777386b177f051ccbf65f
| 50
|
py
|
Python
|
animatplot/testing/__init__.py
|
eric-erki/animatplot
|
38a24c3301fbb82a237758fc42f6f4d59275721f
|
[
"MIT"
] | null | null | null |
animatplot/testing/__init__.py
|
eric-erki/animatplot
|
38a24c3301fbb82a237758fc42f6f4d59275721f
|
[
"MIT"
] | null | null | null |
animatplot/testing/__init__.py
|
eric-erki/animatplot
|
38a24c3301fbb82a237758fc42f6f4d59275721f
|
[
"MIT"
] | null | null | null |
from .tools import BunchOFiles, animation_compare
| 25
| 49
| 0.86
| 6
| 50
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 50
| 1
| 50
| 50
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5d07a1d5afad6d91bf9fc20883cadff0f4f50b98
| 130
|
py
|
Python
|
worker.py
|
AndreAmorim05/flask-celery-mail
|
b95cb3b02c805980b8d7aef9d7d851fe8b5fc6fe
|
[
"MIT"
] | null | null | null |
worker.py
|
AndreAmorim05/flask-celery-mail
|
b95cb3b02c805980b8d7aef9d7d851fe8b5fc6fe
|
[
"MIT"
] | null | null | null |
worker.py
|
AndreAmorim05/flask-celery-mail
|
b95cb3b02c805980b8d7aef9d7d851fe8b5fc6fe
|
[
"MIT"
] | null | null | null |
from flaskcelerymail.app import create_app
from flaskcelerymail.ext.celery import init_celery
celery = init_celery(create_app())
| 26
| 50
| 0.846154
| 18
| 130
| 5.888889
| 0.444444
| 0.358491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092308
| 130
| 4
| 51
| 32.5
| 0.898305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5d3b376046eba5518bad4b6446bb9ed2904e6966
| 59
|
py
|
Python
|
pudb/contrib/__init__.py
|
georgepar/pudb
|
c7c213604ab136a5de87fa465ceed910d7f3eee2
|
[
"MIT"
] | null | null | null |
pudb/contrib/__init__.py
|
georgepar/pudb
|
c7c213604ab136a5de87fa465ceed910d7f3eee2
|
[
"MIT"
] | null | null | null |
pudb/contrib/__init__.py
|
georgepar/pudb
|
c7c213604ab136a5de87fa465ceed910d7f3eee2
|
[
"MIT"
] | null | null | null |
from pudb.contrib.stringifiers import CONTRIB_STRINGIFIERS
| 29.5
| 58
| 0.898305
| 7
| 59
| 7.428571
| 0.714286
| 0.730769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067797
| 59
| 1
| 59
| 59
| 0.945455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5d3ccfc7062c62710c12e72708e15729ffbb2703
| 201
|
py
|
Python
|
geoshops/nearbyshops/admin.py
|
syberflea/materials
|
54f44725b40edf00c1b523d7a85b34a85014d7eb
|
[
"MIT"
] | 3,682
|
2018-05-07T19:45:24.000Z
|
2022-03-31T15:19:10.000Z
|
geoshops/nearbyshops/admin.py
|
sribarrow/materials
|
c17c4a4d6f8487e59eac1df8c88ca92b73d6d2a5
|
[
"MIT"
] | 148
|
2018-05-15T21:18:49.000Z
|
2022-03-21T11:25:39.000Z
|
geoshops/nearbyshops/admin.py
|
sribarrow/materials
|
c17c4a4d6f8487e59eac1df8c88ca92b73d6d2a5
|
[
"MIT"
] | 5,535
|
2018-05-25T23:36:08.000Z
|
2022-03-31T16:55:52.000Z
|
from django.contrib import admin
from django.contrib.gis.admin import OSMGeoAdmin
from .models import Shop
@admin.register(Shop)
class ShopAdmin(OSMGeoAdmin):
list_display = ("name", "location")
| 22.333333
| 48
| 0.776119
| 26
| 201
| 5.961538
| 0.615385
| 0.129032
| 0.219355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.124378
| 201
| 8
| 49
| 25.125
| 0.880682
| 0
| 0
| 0
| 0
| 0
| 0.059701
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5d48872c3d41eb2019401f606940704906b7f049
| 116
|
py
|
Python
|
script/generate.py
|
tetianakravchenko/beats
|
6aec024e0ab8239791be20885d6d3c58697d18cd
|
[
"ECL-2.0",
"Apache-2.0"
] | 9,729
|
2015-12-02T12:44:19.000Z
|
2022-03-31T13:26:12.000Z
|
script/generate.py
|
tetianakravchenko/beats
|
6aec024e0ab8239791be20885d6d3c58697d18cd
|
[
"ECL-2.0",
"Apache-2.0"
] | 25,281
|
2015-12-02T08:46:55.000Z
|
2022-03-31T23:26:12.000Z
|
script/generate.py
|
tetianakravchenko/beats
|
6aec024e0ab8239791be20885d6d3c58697d18cd
|
[
"ECL-2.0",
"Apache-2.0"
] | 5,239
|
2015-12-02T09:22:33.000Z
|
2022-03-31T15:11:58.000Z
|
if __name__ == "__main__":
print("This script is deprecated. Please use `mage GenerateCustomBeat`")
exit(1)
| 29
| 76
| 0.698276
| 14
| 116
| 5.214286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010526
| 0.181034
| 116
| 3
| 77
| 38.666667
| 0.757895
| 0
| 0
| 0
| 1
| 0
| 0.612069
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5d5fc1e755feccff20038e384ab6d6679e61167b
| 19,937
|
py
|
Python
|
staff_manage_sdk/model/topboard/issue_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
staff_manage_sdk/model/topboard/issue_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
staff_manage_sdk/model/topboard/issue_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: issue.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from staff_manage_sdk.model.topboard import product_pb2 as staff__manage__sdk_dot_model_dot_topboard_dot_product__pb2
from staff_manage_sdk.model.topboard import sprint_pb2 as staff__manage__sdk_dot_model_dot_topboard_dot_sprint__pb2
from staff_manage_sdk.model.cmdb import user_pb2 as staff__manage__sdk_dot_model_dot_cmdb_dot_user__pb2
from staff_manage_sdk.model.topboard import attachment_pb2 as staff__manage__sdk_dot_model_dot_topboard_dot_attachment__pb2
from staff_manage_sdk.model.topboard import comment_pb2 as staff__manage__sdk_dot_model_dot_topboard_dot_comment__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='issue.proto',
package='topboard',
syntax='proto3',
serialized_options=_b('ZBgo.easyops.local/contracts/protorepo-models/easyops/model/topboard'),
serialized_pb=_b('\n\x0bissue.proto\x12\x08topboard\x1a-staff_manage_sdk/model/topboard/product.proto\x1a,staff_manage_sdk/model/topboard/sprint.proto\x1a&staff_manage_sdk/model/cmdb/user.proto\x1a\x30staff_manage_sdk/model/topboard/attachment.proto\x1a-staff_manage_sdk/model/topboard/comment.proto\"\xc4\x06\n\x05Issue\x12\x1f\n\x06parent\x18\x01 \x03(\x0b\x32\x0f.topboard.Issue\x12!\n\x08subtasks\x18\x02 \x03(\x0b\x32\x0f.topboard.Issue\x12\"\n\x07product\x18\x03 \x03(\x0b\x32\x11.topboard.Product\x12 \n\x06sprint\x18\x04 \x03(\x0b\x32\x10.topboard.Sprint\x12\x1f\n\x0bsubscribers\x18\x05 \x03(\x0b\x32\n.cmdb.User\x12\x1c\n\x08\x61ssignee\x18\x06 \x03(\x0b\x32\n.cmdb.User\x12\x1c\n\x08reporter\x18\x07 \x03(\x0b\x32\n.cmdb.User\x12)\n\x0b\x61ttachments\x18\x08 \x03(\x0b\x32\x14.topboard.Attachment\x12#\n\x08\x63omments\x18\t \x03(\x0b\x32\x11.topboard.Comment\x12,\n\tissueFrom\x18\n \x03(\x0b\x32\x19.topboard.Issue.IssueFrom\x12\x1a\n\x06tester\x18\x0b \x03(\x0b\x32\n.cmdb.User\x12\x0c\n\x04name\x18\x0c \x01(\t\x12\x12\n\ninstanceId\x18\r \x01(\t\x12\x0f\n\x07\x63reator\x18\x0e \x01(\t\x12\r\n\x05\x63time\x18\x0f \x01(\t\x12\r\n\x05title\x18\x10 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x11 \x01(\t\x12\x10\n\x08priority\x18\x12 \x01(\t\x12\x0c\n\x04type\x18\x13 \x01(\t\x12\x0c\n\x04step\x18\x14 \x01(\t\x12$\n\x05links\x18\x15 \x03(\x0b\x32\x15.topboard.Issue.Links\x12\x12\n\nstoryPoint\x18\x16 \x01(\t\x12\x12\n\nresolution\x18\x17 \x01(\t\x12\x0e\n\x06status\x18\x18 \x01(\t\x12&\n\x06images\x18\x19 \x03(\x0b\x32\x16.topboard.Issue.Images\x12\x0f\n\x07\x62ugType\x18\x1a \x01(\t\x12\x16\n\x0eresponsibility\x18\x1b \x01(\t\x1a-\n\tIssueFrom\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ninstanceId\x18\x02 \x01(\t\x1a#\n\x05Links\x12\r\n\x05title\x18\x01 \x01(\t\x12\x0b\n\x03url\x18\x02 \x01(\t\x1a#\n\x06Images\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0b\n\x03url\x18\x02 \x01(\tBDZBgo.easyops.local/contracts/protorepo-models/easyops/model/topboardb\x06proto3')
,
dependencies=[staff__manage__sdk_dot_model_dot_topboard_dot_product__pb2.DESCRIPTOR,staff__manage__sdk_dot_model_dot_topboard_dot_sprint__pb2.DESCRIPTOR,staff__manage__sdk_dot_model_dot_cmdb_dot_user__pb2.DESCRIPTOR,staff__manage__sdk_dot_model_dot_topboard_dot_attachment__pb2.DESCRIPTOR,staff__manage__sdk_dot_model_dot_topboard_dot_comment__pb2.DESCRIPTOR,])
_ISSUE_ISSUEFROM = _descriptor.Descriptor(
name='IssueFrom',
full_name='topboard.Issue.IssueFrom',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='topboard.Issue.IssueFrom.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceId', full_name='topboard.Issue.IssueFrom.instanceId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=973,
serialized_end=1018,
)
_ISSUE_LINKS = _descriptor.Descriptor(
name='Links',
full_name='topboard.Issue.Links',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='title', full_name='topboard.Issue.Links.title', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='url', full_name='topboard.Issue.Links.url', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1020,
serialized_end=1055,
)
_ISSUE_IMAGES = _descriptor.Descriptor(
name='Images',
full_name='topboard.Issue.Images',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='topboard.Issue.Images.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='url', full_name='topboard.Issue.Images.url', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1057,
serialized_end=1092,
)
_ISSUE = _descriptor.Descriptor(
name='Issue',
full_name='topboard.Issue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parent', full_name='topboard.Issue.parent', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subtasks', full_name='topboard.Issue.subtasks', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product', full_name='topboard.Issue.product', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sprint', full_name='topboard.Issue.sprint', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subscribers', full_name='topboard.Issue.subscribers', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='assignee', full_name='topboard.Issue.assignee', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reporter', full_name='topboard.Issue.reporter', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='attachments', full_name='topboard.Issue.attachments', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='comments', full_name='topboard.Issue.comments', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='issueFrom', full_name='topboard.Issue.issueFrom', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tester', full_name='topboard.Issue.tester', index=10,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='topboard.Issue.name', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceId', full_name='topboard.Issue.instanceId', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='topboard.Issue.creator', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ctime', full_name='topboard.Issue.ctime', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='title', full_name='topboard.Issue.title', index=15,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='topboard.Issue.description', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='priority', full_name='topboard.Issue.priority', index=17,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='topboard.Issue.type', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='step', full_name='topboard.Issue.step', index=19,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='links', full_name='topboard.Issue.links', index=20,
number=21, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='storyPoint', full_name='topboard.Issue.storyPoint', index=21,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resolution', full_name='topboard.Issue.resolution', index=22,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='topboard.Issue.status', index=23,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='images', full_name='topboard.Issue.images', index=24,
number=25, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bugType', full_name='topboard.Issue.bugType', index=25,
number=26, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='responsibility', full_name='topboard.Issue.responsibility', index=26,
number=27, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ISSUE_ISSUEFROM, _ISSUE_LINKS, _ISSUE_IMAGES, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=256,
serialized_end=1092,
)
_ISSUE_ISSUEFROM.containing_type = _ISSUE
_ISSUE_LINKS.containing_type = _ISSUE
_ISSUE_IMAGES.containing_type = _ISSUE
_ISSUE.fields_by_name['parent'].message_type = _ISSUE
_ISSUE.fields_by_name['subtasks'].message_type = _ISSUE
_ISSUE.fields_by_name['product'].message_type = staff__manage__sdk_dot_model_dot_topboard_dot_product__pb2._PRODUCT
_ISSUE.fields_by_name['sprint'].message_type = staff__manage__sdk_dot_model_dot_topboard_dot_sprint__pb2._SPRINT
_ISSUE.fields_by_name['subscribers'].message_type = staff__manage__sdk_dot_model_dot_cmdb_dot_user__pb2._USER
_ISSUE.fields_by_name['assignee'].message_type = staff__manage__sdk_dot_model_dot_cmdb_dot_user__pb2._USER
_ISSUE.fields_by_name['reporter'].message_type = staff__manage__sdk_dot_model_dot_cmdb_dot_user__pb2._USER
_ISSUE.fields_by_name['attachments'].message_type = staff__manage__sdk_dot_model_dot_topboard_dot_attachment__pb2._ATTACHMENT
_ISSUE.fields_by_name['comments'].message_type = staff__manage__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT
_ISSUE.fields_by_name['issueFrom'].message_type = _ISSUE_ISSUEFROM
_ISSUE.fields_by_name['tester'].message_type = staff__manage__sdk_dot_model_dot_cmdb_dot_user__pb2._USER
_ISSUE.fields_by_name['links'].message_type = _ISSUE_LINKS
_ISSUE.fields_by_name['images'].message_type = _ISSUE_IMAGES
DESCRIPTOR.message_types_by_name['Issue'] = _ISSUE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Issue = _reflection.GeneratedProtocolMessageType('Issue', (_message.Message,), {
'IssueFrom' : _reflection.GeneratedProtocolMessageType('IssueFrom', (_message.Message,), {
'DESCRIPTOR' : _ISSUE_ISSUEFROM,
'__module__' : 'issue_pb2'
# @@protoc_insertion_point(class_scope:topboard.Issue.IssueFrom)
})
,
'Links' : _reflection.GeneratedProtocolMessageType('Links', (_message.Message,), {
'DESCRIPTOR' : _ISSUE_LINKS,
'__module__' : 'issue_pb2'
# @@protoc_insertion_point(class_scope:topboard.Issue.Links)
})
,
'Images' : _reflection.GeneratedProtocolMessageType('Images', (_message.Message,), {
'DESCRIPTOR' : _ISSUE_IMAGES,
'__module__' : 'issue_pb2'
# @@protoc_insertion_point(class_scope:topboard.Issue.Images)
})
,
'DESCRIPTOR' : _ISSUE,
'__module__' : 'issue_pb2'
# @@protoc_insertion_point(class_scope:topboard.Issue)
})
_sym_db.RegisterMessage(Issue)
_sym_db.RegisterMessage(Issue.IssueFrom)
_sym_db.RegisterMessage(Issue.Links)
_sym_db.RegisterMessage(Issue.Images)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 48.508516
| 1,988
| 0.744295
| 2,729
| 19,937
| 5.117259
| 0.083547
| 0.059005
| 0.042392
| 0.055639
| 0.72961
| 0.711493
| 0.702757
| 0.661797
| 0.643108
| 0.636591
| 0
| 0.042593
| 0.125044
| 19,937
| 410
| 1,989
| 48.626829
| 0.757968
| 0.020364
| 0
| 0.643045
| 1
| 0.002625
| 0.182624
| 0.137391
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.026247
| 0
| 0.026247
| 0.013123
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5d60686c602365067559a8bbce499b8ec83fb080
| 31
|
py
|
Python
|
keypad_racer/__main__.py
|
encukou/keypad-racer
|
57a832b3e0d06aa79f2205630b5da563cf2d09c5
|
[
"MIT",
"CC-BY-3.0",
"Unlicense"
] | null | null | null |
keypad_racer/__main__.py
|
encukou/keypad-racer
|
57a832b3e0d06aa79f2205630b5da563cf2d09c5
|
[
"MIT",
"CC-BY-3.0",
"Unlicense"
] | null | null | null |
keypad_racer/__main__.py
|
encukou/keypad-racer
|
57a832b3e0d06aa79f2205630b5da563cf2d09c5
|
[
"MIT",
"CC-BY-3.0",
"Unlicense"
] | null | null | null |
from . import game
game.run()
| 7.75
| 18
| 0.677419
| 5
| 31
| 4.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193548
| 31
| 3
| 19
| 10.333333
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5d63b63c62b854775a623df503481ff000d7e47f
| 774
|
py
|
Python
|
mystocks_backend/venv/lib/python3.8/site-packages/flask_jwt_extended/__init__.py
|
SQinSQi/SQ_StockWeb
|
a33cae33e5648575132d0ea442d100f93c79afb2
|
[
"MIT"
] | 1
|
2022-02-17T02:49:08.000Z
|
2022-02-17T02:49:08.000Z
|
mystocks_backend/venv/lib/python3.8/site-packages/flask_jwt_extended/__init__.py
|
SQinSQi/SQ_StockWeb
|
a33cae33e5648575132d0ea442d100f93c79afb2
|
[
"MIT"
] | null | null | null |
mystocks_backend/venv/lib/python3.8/site-packages/flask_jwt_extended/__init__.py
|
SQinSQi/SQ_StockWeb
|
a33cae33e5648575132d0ea442d100f93c79afb2
|
[
"MIT"
] | null | null | null |
from .jwt_manager import JWTManager
from .utils import create_access_token
from .utils import create_refresh_token
from .utils import current_user
from .utils import decode_token
from .utils import get_csrf_token
from .utils import get_current_user
from .utils import get_jti
from .utils import get_jwt
from .utils import get_jwt_header
from .utils import get_jwt_identity
from .utils import get_jwt_request_location
from .utils import get_unverified_jwt_headers
from .utils import set_access_cookies
from .utils import set_refresh_cookies
from .utils import unset_access_cookies
from .utils import unset_jwt_cookies
from .utils import unset_refresh_cookies
from .view_decorators import jwt_required
from .view_decorators import verify_jwt_in_request
__version__ = "4.2.3"
| 33.652174
| 50
| 0.859173
| 123
| 774
| 5.056911
| 0.276423
| 0.245981
| 0.409968
| 0.231511
| 0.477492
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004348
| 0.108527
| 774
| 22
| 51
| 35.181818
| 0.897101
| 0
| 0
| 0
| 0
| 0
| 0.00646
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.952381
| 0
| 0.952381
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5d6f55f027a516654816f74633e8c1a5e1954df8
| 191
|
py
|
Python
|
vmaig_blog/uwsgi-2.0.14/contrib/spoolqueue/tasks.py
|
StanYaha/Blog
|
3cb38918e14ebe6ce2e2952ef272de116849910d
|
[
"BSD-3-Clause"
] | 1
|
2018-11-24T16:10:49.000Z
|
2018-11-24T16:10:49.000Z
|
vmaig_blog/uwsgi-2.0.14/contrib/spoolqueue/tasks.py
|
StanYaha/Blog
|
3cb38918e14ebe6ce2e2952ef272de116849910d
|
[
"BSD-3-Clause"
] | null | null | null |
vmaig_blog/uwsgi-2.0.14/contrib/spoolqueue/tasks.py
|
StanYaha/Blog
|
3cb38918e14ebe6ce2e2952ef272de116849910d
|
[
"BSD-3-Clause"
] | null | null | null |
from tasksconsumer import *
@queueconsumer('fast', 4)
def fast_queue(arguments):
print "fast", arguments
@queueconsumer('slow')
def slow_queue(arguments):
print "foobar", arguments
| 19.1
| 29
| 0.732984
| 22
| 191
| 6.272727
| 0.545455
| 0.202899
| 0.275362
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006098
| 0.141361
| 191
| 9
| 30
| 21.222222
| 0.835366
| 0
| 0
| 0
| 0
| 0
| 0.094241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.142857
| null | null | 0.285714
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5d74d40b0e9d141426776ad9ac6c5bb76b1ee147
| 431
|
py
|
Python
|
dnsdb_mock_apiserver/models.py
|
dnsdb-team/dnsdb-mock-apiserver
|
7a03d98406fea0e4145fdfbebb433982d984b1e8
|
[
"BSD-4-Clause"
] | null | null | null |
dnsdb_mock_apiserver/models.py
|
dnsdb-team/dnsdb-mock-apiserver
|
7a03d98406fea0e4145fdfbebb433982d984b1e8
|
[
"BSD-4-Clause"
] | null | null | null |
dnsdb_mock_apiserver/models.py
|
dnsdb-team/dnsdb-mock-apiserver
|
7a03d98406fea0e4145fdfbebb433982d984b1e8
|
[
"BSD-4-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
class User(object):
def __init__(self, username, password, remaining_request=100):
self.username = username
self.password = password
self.remaining_request = remaining_request
def __str__(self):
return json.dumps({'username': self.username, 'password': self.password, 'remaining_request': self.remaining_request})
| 30.785714
| 126
| 0.707657
| 49
| 431
| 5.857143
| 0.469388
| 0.278746
| 0.139373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011364
| 0.183295
| 431
| 13
| 127
| 33.153846
| 0.803977
| 0.048724
| 0
| 0
| 0
| 0
| 0.080882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0.333333
| 0.222222
| 0.111111
| 0.666667
| 0.111111
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
53ab6e69eb15056a9b07033531e1c18cc28fb36d
| 112
|
py
|
Python
|
meet_and_play/game_announcement/admin.py
|
ImIFilm/meet_and_play_project
|
316301b2ce474f3470da342b53196b9c901cc234
|
[
"MIT"
] | null | null | null |
meet_and_play/game_announcement/admin.py
|
ImIFilm/meet_and_play_project
|
316301b2ce474f3470da342b53196b9c901cc234
|
[
"MIT"
] | null | null | null |
meet_and_play/game_announcement/admin.py
|
ImIFilm/meet_and_play_project
|
316301b2ce474f3470da342b53196b9c901cc234
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Game_Announcement
admin.site.register(Game_Announcement)
| 18.666667
| 38
| 0.848214
| 15
| 112
| 6.2
| 0.666667
| 0.344086
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098214
| 112
| 5
| 39
| 22.4
| 0.920792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
53de071ee95e5f3377e87d91689f726e2c04c537
| 311
|
py
|
Python
|
hypergan/backends/cpu_backend.py
|
limberc/HyperGAN
|
b074e74abf0ed9b81bd52084706e3707a47e0fe2
|
[
"MIT"
] | 889
|
2016-08-27T01:37:35.000Z
|
2018-10-07T19:47:56.000Z
|
hypergan/backends/cpu_backend.py
|
limberc/HyperGAN
|
b074e74abf0ed9b81bd52084706e3707a47e0fe2
|
[
"MIT"
] | 101
|
2016-11-30T03:34:02.000Z
|
2018-10-02T13:50:52.000Z
|
hypergan/backends/cpu_backend.py
|
limberc/HyperGAN
|
b074e74abf0ed9b81bd52084706e3707a47e0fe2
|
[
"MIT"
] | 145
|
2016-09-27T06:56:24.000Z
|
2018-09-25T16:09:28.000Z
|
from .backend import Backend
class CPUBackend(Backend):
def __init__(self, trainable_gan, devices):
self.trainable_gan = trainable_gan
self.trainable_gan.to('cpu')
def save(self):
self.trainable_gan.save_locally()
def step(self):
self.trainable_gan.trainer.step()
| 23.923077
| 47
| 0.681672
| 39
| 311
| 5.153846
| 0.435897
| 0.358209
| 0.39801
| 0.199005
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.215434
| 311
| 12
| 48
| 25.916667
| 0.82377
| 0
| 0
| 0
| 0
| 0
| 0.009646
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.111111
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
0708f7008c14ca7428978f4c27143d65469e8eb0
| 46
|
py
|
Python
|
Chapter 01/Chap01_Example1.152.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 01/Chap01_Example1.152.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 01/Chap01_Example1.152.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
fs3 = {1,2,3,4}
f1 = frozenset(fs3)
f1[0] = 5
| 11.5
| 19
| 0.543478
| 11
| 46
| 2.272727
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.27027
| 0.195652
| 46
| 3
| 20
| 15.333333
| 0.405405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
074a0633017dbe5352afc6bf9bba945e2431059b
| 25
|
py
|
Python
|
Sorting/Counting_sort.py
|
Toughee/Code-samples
|
08ea6815f6bffee8fd2c31180e7a3bcb905d6106
|
[
"MIT"
] | null | null | null |
Sorting/Counting_sort.py
|
Toughee/Code-samples
|
08ea6815f6bffee8fd2c31180e7a3bcb905d6106
|
[
"MIT"
] | null | null | null |
Sorting/Counting_sort.py
|
Toughee/Code-samples
|
08ea6815f6bffee8fd2c31180e7a3bcb905d6106
|
[
"MIT"
] | null | null | null |
# Counting sort algorithm
| 25
| 25
| 0.84
| 3
| 25
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 25
| 1
| 25
| 25
| 0.954545
| 0.92
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
074c2577c954260ae0854f5abcd1e0f1d9e8a7d1
| 78
|
py
|
Python
|
apps/project/models/__init__.py
|
picsldev/pyerp
|
e998e3e99a4e45033d54a6b1df50697f7288f67f
|
[
"MIT"
] | 115
|
2019-08-18T16:12:54.000Z
|
2022-03-29T14:17:20.000Z
|
apps/project/models/__init__.py
|
picsldev/pyerp
|
e998e3e99a4e45033d54a6b1df50697f7288f67f
|
[
"MIT"
] | 22
|
2019-09-09T01:34:54.000Z
|
2022-03-12T00:33:40.000Z
|
apps/project/models/__init__.py
|
picsldev/pyerp
|
e998e3e99a4e45033d54a6b1df50697f7288f67f
|
[
"MIT"
] | 83
|
2019-08-17T17:09:20.000Z
|
2022-03-25T04:46:53.000Z
|
from .bug import PyBug
from .project import PyProject
from .task import PyTask
| 26
| 30
| 0.820513
| 12
| 78
| 5.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141026
| 78
| 3
| 31
| 26
| 0.955224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4aebc1d2337d6d279043a7d0cf2caaf92ac8be3d
| 9,135
|
py
|
Python
|
tests/Turbomole/test_Turbomole.py
|
CMargreitter/Icolos
|
fd7b664ce177df875fefa910dc4d5c574b521cb3
|
[
"Apache-2.0"
] | 11
|
2022-01-30T14:36:13.000Z
|
2022-03-22T09:40:57.000Z
|
tests/Turbomole/test_Turbomole.py
|
CMargreitter/Icolos
|
fd7b664ce177df875fefa910dc4d5c574b521cb3
|
[
"Apache-2.0"
] | 2
|
2022-03-23T07:56:49.000Z
|
2022-03-24T12:01:42.000Z
|
tests/Turbomole/test_Turbomole.py
|
CMargreitter/Icolos
|
fd7b664ce177df875fefa910dc4d5c574b521cb3
|
[
"Apache-2.0"
] | 8
|
2022-01-28T10:32:31.000Z
|
2022-03-22T09:40:59.000Z
|
import unittest
import os
from icolos.core.workflow_steps.calculation.turbomole import StepTurbomole
from icolos.utils.enums.step_enums import StepBaseEnum, StepTurbomoleEnum
from icolos.utils.enums.program_parameters import TurbomoleEnum
from tests.tests_paths import (
PATHS_EXAMPLEDATA,
export_unit_test_env_vars,
get_mol_as_Compound,
get_mol_as_Conformer,
MAIN_CONFIG,
)
from icolos.utils.enums.compound_enums import ConformerContainerEnum
from icolos.utils.general.files_paths import attach_root_path
import time
_SBE = StepBaseEnum
_TE = TurbomoleEnum()
_COE = ConformerContainerEnum()
_STE = StepTurbomoleEnum()
class Test_Turbomole(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._test_dir = attach_root_path("tests/junk/Turbomole")
if not os.path.isdir(cls._test_dir):
os.makedirs(cls._test_dir)
export_unit_test_env_vars()
def setUp(self):
# initialize a Compound with 1 Enumeration and 2 Conformers (done by OMEGA)
_paracetamol_molecule = get_mol_as_Compound(PATHS_EXAMPLEDATA.PARACETAMOL_PATH)
confs = get_mol_as_Conformer(PATHS_EXAMPLEDATA.CLUSTERING_11CONFS)
_paracetamol_molecule[0].add_conformers(confs, auto_update=True)
self._paracetamol_molecule = _paracetamol_molecule
@classmethod
def tearDownClass(cls):
pass
def test_Turbomole_run_ridft_single_core(self):
step_conf = {
_SBE.STEPID: "01_turbomole",
_SBE.STEP_TYPE: _SBE.STEP_TURBOMOLE,
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load turbomole/73",
_SBE.EXEC_PARALLELIZATION: {_SBE.EXEC_PARALLELIZATION_CORES: 1},
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {},
},
_SBE.SETTINGS_ADDITIONAL: {
_TE.TM_CONFIG_DIR: MAIN_CONFIG["TURBOMOLE_CONFIG"],
_TE.TM_CONFIG_BASENAME: "b97-3c-ri-d3-def2-mtzvp-int-nosym-charge",
_TE.TM_CONFIG_COSMO: os.path.join(
MAIN_CONFIG["TURBOMOLE_CONFIG"], "cosmoprep_eps80.tm"
),
_STE.EXECUTION_MODE: _TE.TM_RIDFT,
},
},
}
os.environ["PARA_ARCH"] = "MPI"
os.environ["PARNODES"] = "4"
tm_step = StepTurbomole(**step_conf)
tm_step.data.compounds = [self._paracetamol_molecule]
# conformer coordinates should not be touched by the execution
self.assertListEqual(
list(
tm_step.get_compounds()[0][0][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
),
[0.8785, 0.6004, -0.2173],
)
tm_step.execute()
self.assertListEqual(
list(
tm_step.get_compounds()[0][0][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
),
[0.8785, 0.6004, -0.2173],
)
cosmofile = tm_step.get_compounds()[0][0][0].get_extra_data()[
_COE.EXTRA_DATA_COSMOFILE
]
coordfile = tm_step.get_compounds()[0][0][0].get_extra_data()[
_COE.EXTRA_DATA_COORDFILE
]
self.assertTrue("basgrd points= 9806" in cosmofile[5])
# check write-out
out_path = os.path.join(self._test_dir, "paracetamol_conf1_CosmoFile")
with open(out_path, "w") as f:
f.writelines(cosmofile)
stat_inf = os.stat(out_path)
self.assertEqual(stat_inf.st_size, 132018)
out_path = os.path.join(self._test_dir, "paracetamol_conf1_CoordFile")
with open(out_path, "w") as f:
f.writelines(coordfile)
stat_inf = os.stat(out_path)
self.assertEqual(stat_inf.st_size, 13544)
def test_Turbomole_run_ridft_dual_core(self):
step_conf = {
_SBE.STEPID: "01_turbomole",
_SBE.STEP_TYPE: _SBE.STEP_TURBOMOLE,
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load turbomole/73",
_SBE.EXEC_PARALLELIZATION: {_SBE.EXEC_PARALLELIZATION_CORES: 2},
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {},
},
_SBE.SETTINGS_ADDITIONAL: {
_TE.TM_CONFIG_DIR: MAIN_CONFIG["TURBOMOLE_CONFIG"],
_TE.TM_CONFIG_BASENAME: "b97-3c-ri-d3-def2-mtzvp-int-nosym-charge",
_TE.TM_CONFIG_COSMO: os.path.join(
MAIN_CONFIG["TURBOMOLE_CONFIG"], "cosmoprep_eps80.tm"
),
_STE.EXECUTION_MODE: _TE.TM_RIDFT,
},
},
}
os.environ["PARA_ARCH"] = "MPI"
os.environ["PARNODES"] = "4"
tm_step = StepTurbomole(**step_conf)
tm_step.data.compounds = [self._paracetamol_molecule]
# conformer coordinates should not be touched by the execution
self.assertListEqual(
list(
tm_step.get_compounds()[0][0][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
),
[5.3347, 12.9328, 24.6745],
)
t1 = time.time()
tm_step.execute()
t2 = time.time()
self.assertLess(t2 - t1, 50)
self.assertListEqual(
list(
tm_step.get_compounds()[0][0][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
),
[0.8785, 0.6004, -0.2173],
)
cosmofile = tm_step.get_compounds()[0][0][0].get_extra_data()[
_COE.EXTRA_DATA_COSMOFILE
]
coordfile = tm_step.get_compounds()[0][0][0].get_extra_data()[
_COE.EXTRA_DATA_COORDFILE
]
self.assertTrue("basgrd points= 9806" in cosmofile[5])
# check write-out
out_path = os.path.join(self._test_dir, "paracetamol_conf1_CosmoFile")
with open(out_path, "w") as f:
f.writelines(cosmofile)
stat_inf = os.stat(out_path)
self.assertEqual(stat_inf.st_size, 132018)
out_path = os.path.join(self._test_dir, "paracetamole_conf1_CoordFile")
with open(out_path, "w") as f:
f.writelines(coordfile)
stat_inf = os.stat(out_path)
self.assertEqual(stat_inf.st_size, 13544)
def test_Turbomole_run_jobex(self):
step_conf = {
_SBE.STEPID: "01_turbomole",
_SBE.STEP_TYPE: _SBE.STEP_TURBOMOLE,
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load turbomole/73",
_SBE.EXEC_PARALLELIZATION: {_SBE.EXEC_PARALLELIZATION_CORES: 2},
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: ["-ri"],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
_TE.TM_JOBEX_C: 70,
_TE.TM_JOBEX_GCART: 3,
},
},
_SBE.SETTINGS_ADDITIONAL: {
_TE.TM_CONFIG_DIR: MAIN_CONFIG["TURBOMOLE_CONFIG"],
_TE.TM_CONFIG_BASENAME: "b97-3c-ri-d3-def2-mtzvp-int-charge",
_TE.TM_CONFIG_COSMO: os.path.join(
MAIN_CONFIG["TURBOMOLE_CONFIG"], "cosmoprep_eps80.tm"
),
_STE.EXECUTION_MODE: _TE.TM_JOBEX,
},
},
}
os.environ["PARA_ARCH"] = "MPI"
os.environ["PARNODES"] = "3"
tm_step = StepTurbomole(**step_conf)
tm_step.data.compounds = [self._paracetamol_molecule]
# conformer coordinates should be touched by the execution (this is geo opt)
self.assertListEqual(
list(
tm_step.get_compounds()[0][0][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
),
[5.3347, 12.9328, 24.6745],
)
tm_step.execute()
self.assertListEqual(
list(
tm_step.get_compounds()[0][0][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
),
[-0.7887, -0.0618, 0.1129],
)
cosmofile = tm_step.get_compounds()[0][0][0].get_extra_data()[
_COE.EXTRA_DATA_COSMOFILE
]
self.assertTrue("nspa= 92" in cosmofile[5])
# check write-out
out_path = os.path.join(self._test_dir, "paracetamol_conf1_CosmoFile_jobex")
with open(out_path, "w") as f:
f.writelines(cosmofile)
stat_inf = os.stat(out_path)
self.assertEqual(stat_inf.st_size, 115864)
| 35.964567
| 87
| 0.561905
| 982
| 9,135
| 4.887984
| 0.191446
| 0.010833
| 0.020625
| 0.04125
| 0.757708
| 0.738125
| 0.738125
| 0.738125
| 0.730417
| 0.730417
| 0
| 0.039119
| 0.33399
| 9,135
| 253
| 88
| 36.106719
| 0.749836
| 0.034811
| 0
| 0.642534
| 0
| 0
| 0.074583
| 0.029061
| 0
| 0
| 0
| 0
| 0.067873
| 1
| 0.027149
| false
| 0.004525
| 0.040724
| 0
| 0.072398
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4aecdcd710ff9de3e7b8deee9e656eedef332e0f
| 128
|
py
|
Python
|
Blob_Lib/assimp-5.2.3/assimp/port/PyAssimp/pyassimp/errors.py
|
antholuo/Blob_Traffic
|
5d6acf88044e9abc63c0ff356714179eaa4b75bf
|
[
"MIT"
] | null | null | null |
Blob_Lib/assimp-5.2.3/assimp/port/PyAssimp/pyassimp/errors.py
|
antholuo/Blob_Traffic
|
5d6acf88044e9abc63c0ff356714179eaa4b75bf
|
[
"MIT"
] | null | null | null |
Blob_Lib/assimp-5.2.3/assimp/port/PyAssimp/pyassimp/errors.py
|
antholuo/Blob_Traffic
|
5d6acf88044e9abc63c0ff356714179eaa4b75bf
|
[
"MIT"
] | null | null | null |
version https://git-lfs.github.com/spec/v1
oid sha256:6f9538577cd29e2057bac60cfe3faa79138d8b30afc5d6af856b7adb308b6708
size 146
| 32
| 75
| 0.882813
| 13
| 128
| 8.692308
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.368852
| 0.046875
| 128
| 3
| 76
| 42.666667
| 0.557377
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ab08f28ff8bdd2d9b50bf98e224a0ed0c14b3a38
| 247
|
py
|
Python
|
octicons16px/dash.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | 1
|
2021-01-28T06:47:39.000Z
|
2021-01-28T06:47:39.000Z
|
octicons16px/dash.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | null | null | null |
octicons16px/dash.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | null | null | null |
OCTICON_DASH = """
<svg class="octicon octicon-dash" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16"><path fill-rule="evenodd" d="M2 8a.75.75 0 01.75-.75h10.5a.75.75 0 010 1.5H2.75A.75.75 0 012 8z"></path></svg>
"""
| 49.4
| 222
| 0.65587
| 51
| 247
| 3.156863
| 0.627451
| 0.074534
| 0.093168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.239819
| 0.105263
| 247
| 4
| 223
| 61.75
| 0.488688
| 0
| 0
| 0
| 0
| 0.333333
| 0.910569
| 0.085366
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ab1c29c4329653ac92d70acc7d3b523f0a57ca32
| 292
|
py
|
Python
|
notramp/__init__.py
|
simakro/NoTrAmp
|
a11b43700de494c8676fd0bb51242499283dbbd0
|
[
"BSD-2-Clause"
] | null | null | null |
notramp/__init__.py
|
simakro/NoTrAmp
|
a11b43700de494c8676fd0bb51242499283dbbd0
|
[
"BSD-2-Clause"
] | null | null | null |
notramp/__init__.py
|
simakro/NoTrAmp
|
a11b43700de494c8676fd0bb51242499283dbbd0
|
[
"BSD-2-Clause"
] | null | null | null |
#Copyright (c) 2022, Simon Magin (simakro)
#BSD-2 license
#see LICENSE file for license details
#__doc__=
"""
NoTrAmp is a Tool for read-depth normalization and trimming of amplicon
reads generated with long read technologies (ONT/PacBio).
"""
# import os
from .version import __version__
| 22.461538
| 71
| 0.763699
| 42
| 292
| 5.119048
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020243
| 0.15411
| 292
| 12
| 72
| 24.333333
| 0.850202
| 0.818493
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
db67df168bca1446cde7e86ae6e8b32d0bd9f05f
| 256
|
py
|
Python
|
twoops_tracker/py/twoopstracker/authentication/apps.py
|
CodeForAfrica/api
|
c4c79225c27284052aced8dd0805108885766308
|
[
"MIT"
] | null | null | null |
twoops_tracker/py/twoopstracker/authentication/apps.py
|
CodeForAfrica/api
|
c4c79225c27284052aced8dd0805108885766308
|
[
"MIT"
] | null | null | null |
twoops_tracker/py/twoopstracker/authentication/apps.py
|
CodeForAfrica/api
|
c4c79225c27284052aced8dd0805108885766308
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class AuthenticationConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "twoopstracker.authentication"
def ready(self):
import twoopstracker.authentication.signals # noqa
| 25.6
| 59
| 0.757813
| 26
| 256
| 7.384615
| 0.807692
| 0.28125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164063
| 256
| 9
| 60
| 28.444444
| 0.897196
| 0.015625
| 0
| 0
| 0
| 0
| 0.228
| 0.228
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
db833489b6120246835a5fc8362b7cc071058cc5
| 327
|
py
|
Python
|
scrud_django/utils.py
|
Django-Stack-Backend/Django-backend-React-frontend
|
4c814ab9b97d70a259d4b93e30d118deba9831fd
|
[
"BSD-3-Clause"
] | 1
|
2021-11-22T20:39:26.000Z
|
2021-11-22T20:39:26.000Z
|
scrud_django/utils.py
|
Django-Stack-Backend/Django-backend-React-frontend
|
4c814ab9b97d70a259d4b93e30d118deba9831fd
|
[
"BSD-3-Clause"
] | null | null | null |
scrud_django/utils.py
|
Django-Stack-Backend/Django-backend-React-frontend
|
4c814ab9b97d70a259d4b93e30d118deba9831fd
|
[
"BSD-3-Clause"
] | null | null | null |
def link_content(url, rel, content_type):
return f"<{url}>; rel=\"{rel}\"; type=\"{content_type}\""
def get_string_or_evaluate(string_or_func, *args, **kwargs):
if not string_or_func:
return None
if isinstance(string_or_func, str):
return string_or_func
return string_or_func(*args, **kwargs)
| 29.727273
| 61
| 0.678899
| 48
| 327
| 4.291667
| 0.416667
| 0.23301
| 0.291262
| 0.15534
| 0.213592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180428
| 327
| 10
| 62
| 32.7
| 0.768657
| 0
| 0
| 0
| 0
| 0
| 0.067278
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.125
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
db975a8c1b493de239dc395f25a598337d4ca606
| 674
|
py
|
Python
|
scripts/patches/groundstation.py
|
compose-x/troposphere
|
9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/patches/groundstation.py
|
compose-x/troposphere
|
9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/patches/groundstation.py
|
compose-x/troposphere
|
9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4
|
[
"BSD-2-Clause"
] | null | null | null |
patches = [
{
"op": "move",
"from": "/PropertyTypes/AWS::GroundStation::Config.FrequencyBandwidth",
"path": "/PropertyTypes/AWS::GroundStation::Config.Bandwidth",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::GroundStation::Config.SpectrumConfig/Properties/Bandwidth/Type",
"value": "Bandwidth",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::GroundStation::Config.AntennaUplinkConfig/Properties/SpectrumConfig/Type",
"value": "SpectrumConfig",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::GroundStation::Config.UplinkSpectrumConfig",
},
]
| 30.636364
| 111
| 0.590504
| 49
| 674
| 8.122449
| 0.387755
| 0.201005
| 0.364322
| 0.439698
| 0.482412
| 0.286432
| 0.286432
| 0.286432
| 0
| 0
| 0
| 0
| 0.228487
| 674
| 21
| 112
| 32.095238
| 0.765385
| 0
| 0
| 0.095238
| 0
| 0
| 0.64095
| 0.514837
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
db9b183e377440fdc2d79858f088710b7181df57
| 152
|
py
|
Python
|
warehouse/warehouse/doctype/building_parameters/test_building_parameters.py
|
vijith2121/update_warehouse
|
a7a15784708a87fc4684377ba3617ae1889e11f1
|
[
"MIT"
] | null | null | null |
warehouse/warehouse/doctype/building_parameters/test_building_parameters.py
|
vijith2121/update_warehouse
|
a7a15784708a87fc4684377ba3617ae1889e11f1
|
[
"MIT"
] | null | null | null |
warehouse/warehouse/doctype/building_parameters/test_building_parameters.py
|
vijith2121/update_warehouse
|
a7a15784708a87fc4684377ba3617ae1889e11f1
|
[
"MIT"
] | 1
|
2021-11-30T08:35:26.000Z
|
2021-11-30T08:35:26.000Z
|
# Copyright (c) 2021, wahni and Contributors
# See license.txt
# import frappe
import unittest
class TestBuildingParameters(unittest.TestCase):
pass
| 16.888889
| 48
| 0.789474
| 18
| 152
| 6.666667
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030534
| 0.138158
| 152
| 8
| 49
| 19
| 0.885496
| 0.473684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
dbac01fe61a2f159a6802c5bc8517a713c6a8546
| 7,427
|
py
|
Python
|
src/english_text_normalization_tests/test_money_normalization.py
|
jasminsternkopf/english_text_normalization
|
8f797e01e8fcc7d5a83497a92a4fc204603be484
|
[
"MIT"
] | null | null | null |
src/english_text_normalization_tests/test_money_normalization.py
|
jasminsternkopf/english_text_normalization
|
8f797e01e8fcc7d5a83497a92a4fc204603be484
|
[
"MIT"
] | null | null | null |
src/english_text_normalization_tests/test_money_normalization.py
|
jasminsternkopf/english_text_normalization
|
8f797e01e8fcc7d5a83497a92a4fc204603be484
|
[
"MIT"
] | null | null | null |
from english_text_normalization.adjustments.money_normalization import (
normalize_pence, normalize_pounds, normalize_pounds_shillings_and_pence,
normalize_shillings, normalize_shillings_and_pounds_with_dots,
normalize_shillings_and_pounds_without_dots)
# region normalize_pounds
def test_normalize_pounds__number_of_pounds_contains_comma():
text = " L12,345 2s. 3d."
res = normalize_pounds(text)
assert res == " 12,345 pounds 2s. 3d."
def test_normalize_pounds__dot_after_number_of_pounds():
text = " L12. 2s. 3d."
res = normalize_pounds(text)
assert res == " 12 pounds 2s. 3d."
def test_normalize_pounds__only_numbers_directly_after_L():
text = " L12 2s. 3d."
res = normalize_pounds(text)
assert res == " 12 pounds 2s. 3d."
def test_normalize_pounds__dot_after_L__no_shillings_or_pence():
text = " L.10,875,870 "
res = normalize_pounds(text)
assert res == " 10,875,870 pounds "
def test_normalize_pounds__dot_after_L():
text = " L.499,833, 11s. 6d."
res = normalize_pounds(text)
assert res == " 499,833 pounds 11s. 6d."
def test_normalize_pounds__space_after_L():
text = " L 12,345 2s. 3d."
res = normalize_pounds(text)
assert res == " 12,345 pounds 2s. 3d."
def test_normalize_pounds__dot_and_space_after_L():
text = " L. 12,345 2s. 3d."
res = normalize_pounds(text)
assert res == " 12,345 pounds 2s. 3d."
def test_normalize_pounds__one_pound():
text = " L.1 2s. 3d."
res = normalize_pounds(text)
assert res == " one pound 2s. 3d."
def test_normalize_pounds__one_and_comma_after_L_but_is_not_one():
text = " L.1,000 2s. 3d."
res = normalize_pounds(text)
assert res == " 1,000 pounds 2s. 3d."
# endregion
# region normalize_shillings
def test_normalize_shillings__dot_and_space_after_s():
text = " 2s. 3d."
res = normalize_shillings(text)
assert res == " 2 shillings 3d."
def test_normalize_shillings__dot_and_space_after_number():
text = " 2 s. 3 d."
res = normalize_shillings(text)
assert res == " 2 shillings 3 d."
def test_normalize_shillings__space_after_s():
text = " 2s 3d."
res = normalize_shillings(text)
assert res == " 2 shillings 3d."
def test_normalize_shillings__only_dot_after_s():
text = " 2s.3d."
res = normalize_shillings(text)
assert res == " 2 shillings 3d."
def test_normalize_shillings__number_of_shillings_consists_of_two_digits():
text = " 12s. 3d."
res = normalize_shillings(text)
assert res == " 12 shillings 3d."
# endregion
# region normalize_shillings_and_pounds_without_dots
def test_normalize_shillings_and_pounds_without_dots__space_after_number():
text = " 2 s 3 d."
res = normalize_shillings_and_pounds_without_dots(text)
assert res == " 2 shillings 3 pence."
def test_normalize_shillings_and_pounds_without_dots():
text = " 2s 3d."
res = normalize_shillings_and_pounds_without_dots(text)
assert res == " 2 shillings 3 pence."
def test_normalize_shillings_and_pounds_without_dots__comma_after_shillings():
text = " 2s, 3d."
res = normalize_shillings_and_pounds_without_dots(text)
assert res == " 2 shillings 3 pence."
# endregion
# region normalize_shillings_and_pounds_with_dots
def test_normalize_shillings_and_pounds_with_dots():
text = " 2s. 3d. "
res = normalize_shillings_and_pounds_with_dots(text)
assert res == " 2 shillings 3 pence "
def test_normalize_shillings_and_pounds_with_dots__space_after_number():
text = " 2 s. 3 d. "
res = normalize_shillings_and_pounds_with_dots(text)
assert res == " 2 shillings 3 pence "
def test_normalize_shillings_and_pounds_with_dots__comma_after_shillings():
text = " 2s., 3d. "
res = normalize_shillings_and_pounds_with_dots(text)
assert res == " 2 shillings 3 pence "
# endregion
# region normalize_pence
def test_normalize_pence__one_penny():
text = " 1d. "
res = normalize_pence(text)
assert res == " one penny "
def test_normalize_pence__one_penny__space_after_one():
text = " 1 d. "
res = normalize_pence(text)
assert res == " one penny "
def test_normalize_pence__one_penny__no_dot_after_d():
text = " 1d "
res = normalize_pence(text)
assert res == " one penny "
def test_normalize_pence__word_after_one_do_not_normalize():
text = " 1 dozen"
res = normalize_pence(text)
assert res == text
def test_normalize_pence__word_after_four_do_not_normalize():
text = " 4 dozen"
res = normalize_pence(text)
assert res == text
def test_normalize_pence__and_a_half_pence():
text = " 11-1/2d "
res = normalize_pence(text)
assert res == " 11 and a half pence "
def test_normalize_pence__and_a_half_pence__dot_after_d():
text = " 11-1/2d. "
res = normalize_pence(text)
assert res == " 11 and a half pence "
def test_normalize_pence__and_a_half_pence__no_hyphen_before_half():
text = " 11/2d "
res = normalize_pence(text)
assert res == " 1 and a half pence "
def test_normalize_pence__and_a_half_pence__space_after_half():
text = " 11-1/2 d "
res = normalize_pence(text)
assert res == " 11 and a half pence "
def test_normalize_pence__10_pence():
text = " 10d. "
res = normalize_pence(text)
assert res == " 10 pence "
def test_normalize_pence__4_pence__space_after_number():
text = " 4 d. "
res = normalize_pence(text)
assert res == " 4 pence "
def test_normalize_pence__4_pence__no_dot_after_d():
text = " 4d "
res = normalize_pence(text)
assert res == " 4 pence "
def test_normalize_pence__number_of_pence_consists_of_more_than_one_char():
text = " 11-1/4d. "
res = normalize_pence(text)
assert res == " 11-1/4 pence "
# endregion
# region normalize_pounds_shillings_and_pence
def test_normalize_pounds_shillings_and_pence__all_three():
text = " L12 2s. 3d. "
res = normalize_pounds_shillings_and_pence(text)
assert res == " 12 pounds 2 shillings 3 pence "
def test_normalize_pounds_shillings_and_pence__all_three__with_commata():
text = " L12, 2s., 3d. "
res = normalize_pounds_shillings_and_pence(text)
assert res == " 12 pounds 2 shillings 3 pence "
def test_normalize_pounds_shillings_and_pence__only_pounds():
text = " L12. "
res = normalize_pounds_shillings_and_pence(text)
assert res == " 12 pounds "
def test_normalize_pounds_shillings_and_pence__only_shillings_and_pence_without_dots():
text = " 2s 3d "
res = normalize_pounds_shillings_and_pence(text)
assert res == " 2 shillings 3 pence "
def test_normalize_pounds_shillings_and_pence__only_shillings_and_pence_without_spaces_but_with_dots():
text = " 2s.3d. "
res = normalize_pounds_shillings_and_pence(text)
assert res == " 2 shillings 3 pence "
def test_normalize_pounds_shillings_and_pence__only_shillings():
text = " 3s. "
res = normalize_pounds_shillings_and_pence(text)
assert res == " 3 shillings "
def test_normalize_pounds_shillings_and_pence__only_pence():
text = " 6d. "
res = normalize_pounds_shillings_and_pence(text)
assert res == " 6 pence "
def test_normalize_pounds_shillings_and_pence__pence_number_contains_a_half():
text = " L1, 1s., 11/2d. "
res = normalize_pounds_shillings_and_pence(text)
assert res == " one pound one shilling 1 and a half pence "
def test_normalize_pounds_shillings_and_pence__all_three_but_only_pence_non_zero():
text = " L0. 0s. 3d. "
res = normalize_pounds_shillings_and_pence(text)
assert res == " 0 pounds 0 shillings 3 pence "
# endregion
| 22.993808
| 103
| 0.742157
| 1,095
| 7,427
| 4.57169
| 0.084932
| 0.05873
| 0.134239
| 0.079105
| 0.834598
| 0.799241
| 0.742509
| 0.698362
| 0.634638
| 0.602477
| 0
| 0.039729
| 0.166285
| 7,427
| 322
| 104
| 23.065217
| 0.768734
| 0.037162
| 0
| 0.511628
| 0
| 0
| 0.164612
| 0
| 0
| 0
| 0
| 0
| 0.244186
| 1
| 0.244186
| false
| 0
| 0.005814
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
dbb075b547eb4199bb5424a86376c4047321ada1
| 110
|
py
|
Python
|
gravtools/constants/__init__.py
|
JWKennington/gravtools
|
188229ed2061958012cf7338a5eebd2ef0a399cc
|
[
"MIT"
] | null | null | null |
gravtools/constants/__init__.py
|
JWKennington/gravtools
|
188229ed2061958012cf7338a5eebd2ef0a399cc
|
[
"MIT"
] | null | null | null |
gravtools/constants/__init__.py
|
JWKennington/gravtools
|
188229ed2061958012cf7338a5eebd2ef0a399cc
|
[
"MIT"
] | null | null | null |
"""Flatten the constants package"""
from .merger import MergerParameters
from .observatory import Observatory
| 27.5
| 36
| 0.818182
| 12
| 110
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109091
| 110
| 4
| 37
| 27.5
| 0.918367
| 0.263636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9166a1441a5abfb20ea3e71c7778f44312b59f33
| 187,074
|
py
|
Python
|
import_export_batches/views_admin.py
|
gknorman/WeVoteServer
|
c281d831dff8f0b3149c66d805b4e5f94be80775
|
[
"MIT"
] | 2
|
2021-05-14T04:24:18.000Z
|
2021-10-05T05:34:13.000Z
|
import_export_batches/views_admin.py
|
gknorman/WeVoteServer
|
c281d831dff8f0b3149c66d805b4e5f94be80775
|
[
"MIT"
] | null | null | null |
import_export_batches/views_admin.py
|
gknorman/WeVoteServer
|
c281d831dff8f0b3149c66d805b4e5f94be80775
|
[
"MIT"
] | null | null | null |
# import_export_batches/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .models import ACTIVITY_NOTICE_PROCESS, API_REFRESH_REQUEST, \
BatchDescription, BatchHeader, BatchHeaderMap, BatchManager, \
BatchProcess, BatchProcessAnalyticsChunk, BatchProcessBallotItemChunk, BatchProcessLogEntry, BatchProcessManager, \
BatchRow, BatchRowActionBallotItem, BatchRowActionPollingLocation, \
BatchSet, \
CONTEST_OFFICE, ELECTED_OFFICE, IMPORT_BALLOT_ITEM, \
BATCH_IMPORT_KEYS_ACCEPTED_FOR_CANDIDATES, BATCH_IMPORT_KEYS_ACCEPTED_FOR_CONTEST_OFFICES, \
BATCH_IMPORT_KEYS_ACCEPTED_FOR_ELECTED_OFFICES, BATCH_IMPORT_KEYS_ACCEPTED_FOR_MEASURES, \
BATCH_IMPORT_KEYS_ACCEPTED_FOR_ORGANIZATIONS, BATCH_IMPORT_KEYS_ACCEPTED_FOR_POLITICIANS, \
BATCH_IMPORT_KEYS_ACCEPTED_FOR_POSITIONS, BATCH_IMPORT_KEYS_ACCEPTED_FOR_BALLOT_ITEMS, \
BATCH_SET_SOURCE_IMPORT_BALLOTPEDIA_BALLOT_ITEMS, BATCH_SET_SOURCE_IMPORT_CTCL_BALLOT_ITEMS, \
BATCH_SET_SOURCE_IMPORT_VOTE_USA_BALLOT_ITEMS, \
IMPORT_CREATE, IMPORT_DELETE, IMPORT_ALREADY_DELETED, IMPORT_ADD_TO_EXISTING, IMPORT_POLLING_LOCATION, \
IMPORT_VOTER, REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS, \
REFRESH_BALLOT_ITEMS_FROM_VOTERS, RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS
from .controllers import create_batch_header_translation_suggestions, create_batch_row_actions, \
update_or_create_batch_header_mapping, export_voter_list_with_emails, import_data_from_batch_row_actions
from .controllers_batch_process import process_next_activity_notices, process_next_ballot_items, \
process_next_general_maintenance
from .controllers_ballotpedia import store_ballotpedia_json_response_to_import_batch_system
from admin_tools.views import redirect_to_sign_in_page
from ballot.models import BallotReturnedListManager, BallotReturnedManager, MEASURE, CANDIDATE, POLITICIAN
import csv
from datetime import date
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.contrib.messages import get_messages
from django.db.models import Q
from django.utils.timezone import now
from django.urls import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.utils.http import urlquote
from election.models import Election, ElectionManager
from exception.models import handle_exception
from import_export_ballotpedia.controllers import groom_ballotpedia_data_for_processing, \
process_ballotpedia_voter_districts, BALLOTPEDIA_API_SAMPLE_BALLOT_RESULTS_URL
from import_export_ctcl.controllers import CTCL_VOTER_INFO_URL
import json
from polling_location.models import PollingLocation, PollingLocationManager
from position.models import POSITION
import random
import requests
from voter.models import voter_has_authority
from voter_guide.models import ORGANIZATION_WORD
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, positive_value_exists, STATE_CODE_MAP
MAP_POINTS_RETRIEVED_EACH_BATCH_CHUNK = 125 # 125. Formerly 250 and 111
logger = wevote_functions.admin.get_logger(__name__)
@login_required
def batches_home_view(request):
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# Create a voter_device_id and voter in the database if one doesn't exist yet
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
template_values = {
'google_civic_election_id': google_civic_election_id,
}
response = render(request, 'import_export_batches/index.html', template_values)
return response
@login_required
def batch_list_view(request):
"""
Display a list of import batches
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
kind_of_batch = request.GET.get('kind_of_batch', '')
batch_file = request.GET.get('batch_file', '')
batch_uri = request.GET.get('batch_uri', '')
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
polling_location_we_vote_id = request.GET.get('polling_location_we_vote_id', '')
polling_location_city = request.GET.get('polling_location_city', '')
polling_location_zip = request.GET.get('polling_location_zip', '')
show_all_elections = positive_value_exists(request.GET.get('show_all_elections', False))
messages_on_stage = get_messages(request)
batch_list_found = False
modified_batch_list = []
batch_manager = BatchManager()
try:
batch_list_query = BatchDescription.objects.order_by('-batch_header_id')
if positive_value_exists(kind_of_batch):
batch_list_query = batch_list_query.filter(kind_of_batch__iexact=kind_of_batch)
if positive_value_exists(google_civic_election_id):
batch_list_query = batch_list_query.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(google_civic_election_id):
batch_list = list(batch_list_query)
else:
batch_list = batch_list_query[:50]
if len(batch_list):
batch_list_found = True
for one_batch in batch_list:
one_batch.batch_row_action_count = batch_manager.fetch_batch_row_action_count(
one_batch.batch_header_id, kind_of_batch)
one_batch.batch_row_action_to_update_count = batch_manager.fetch_batch_row_action_count(
one_batch.batch_header_id, kind_of_batch, IMPORT_ADD_TO_EXISTING)
one_batch.batch_row_count = batch_manager.fetch_batch_row_count(one_batch.batch_header_id)
modified_batch_list.append(one_batch)
except BatchDescription.DoesNotExist:
# This is fine
batch_list_found = False
pass
polling_location_found = False
polling_location = PollingLocation()
polling_location_manager = PollingLocationManager()
election_state = ''
if not polling_location_found and positive_value_exists(polling_location_we_vote_id):
results = polling_location_manager.retrieve_polling_location_by_id(0, polling_location_we_vote_id)
if results['polling_location_found']:
polling_location = results['polling_location']
polling_location_we_vote_id = polling_location.we_vote_id
polling_location_id = polling_location.id
polling_location_found = True
election_state = polling_location.state
election_manager = ElectionManager()
if google_civic_election_id:
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
election = results['election']
election_state = election.get_election_state()
polling_location_list = []
results = polling_location_manager.retrieve_polling_locations_in_city_or_state(
election_state, polling_location_city, polling_location_zip)
if results['polling_location_list_found']:
polling_location_list = results['polling_location_list']
if kind_of_batch == ORGANIZATION_WORD or kind_of_batch == ELECTED_OFFICE \
or kind_of_batch == POLITICIAN or kind_of_batch == IMPORT_POLLING_LOCATION:
# We do not want to ask the person importing the file for an election, because it isn't used
ask_for_election = False
election_list = []
else:
ask_for_election = True
if positive_value_exists(show_all_elections):
results = election_manager.retrieve_elections()
election_list = results['election_list']
else:
results = election_manager.retrieve_upcoming_elections()
election_list = results['election_list']
# Make sure we always include the current election in the election_list, even if it is older
if positive_value_exists(google_civic_election_id):
this_election_found = False
for one_election in election_list:
if convert_to_int(one_election.google_civic_election_id) == \
convert_to_int(google_civic_election_id):
this_election_found = True
break
if not this_election_found:
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
one_election = results['election']
election_list.append(one_election)
template_values = {
'messages_on_stage': messages_on_stage,
'batch_list': modified_batch_list,
'ask_for_election': ask_for_election,
'election_list': election_list,
'kind_of_batch': kind_of_batch,
'batch_file': batch_file,
'batch_uri': batch_uri,
'google_civic_election_id': convert_to_int(google_civic_election_id),
'polling_location_we_vote_id': polling_location_we_vote_id,
'polling_location': polling_location,
'polling_location_list': polling_location_list,
'polling_location_city': polling_location_city,
'polling_location_zip': polling_location_zip,
'show_all_elections': show_all_elections,
}
return render(request, 'import_export_batches/batch_list.html', template_values)
@login_required
def batch_list_process_view(request):
"""
Load in a new batch to start the importing process
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
kind_of_batch = request.POST.get('kind_of_batch', '')
batch_uri = request.POST.get('batch_uri', '')
batch_uri_encoded = urlquote(batch_uri) if positive_value_exists(batch_uri) else ""
google_civic_election_id = request.POST.get('google_civic_election_id', 0)
polling_location_we_vote_id = request.POST.get('polling_location_we_vote_id', "")
polling_location_city = request.POST.get('polling_location_city', '')
polling_location_zip = request.POST.get('polling_location_zip', '')
show_all_elections = positive_value_exists(request.POST.get('show_all_elections', ""))
state_code = request.POST.get('state_code', "")
if kind_of_batch not in (CANDIDATE, CONTEST_OFFICE, ELECTED_OFFICE, IMPORT_BALLOT_ITEM, IMPORT_POLLING_LOCATION,
MEASURE, ORGANIZATION_WORD, POSITION, POLITICIAN):
messages.add_message(request, messages.ERROR, 'The kind_of_batch is required for a batch import.')
return HttpResponseRedirect(reverse('import_export_batches:batch_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch) +
"&google_civic_election_id=" + str(google_civic_election_id) +
"&polling_location_we_vote_id=" + str(polling_location_we_vote_id) +
"&polling_location_city=" + str(polling_location_city) +
"&polling_location_zip=" + str(polling_location_zip) +
"&show_all_elections=" + str(show_all_elections) +
"&batch_uri=" + batch_uri_encoded)
# If here we know we have the required variables
organization_we_vote_id = request.POST.get('organization_we_vote_id', '')
# Was form submitted, or was election just changed?
import_batch_button = request.POST.get('import_batch_button', '')
batch_file = None
if positive_value_exists(import_batch_button):
try:
if request.method == 'POST' and request.FILES['batch_file']:
batch_file = request.FILES['batch_file']
except KeyError:
pass
# Make sure we have a file to process // Used to only be able to import IMPORT_BALLOT_ITEM from file
if kind_of_batch in [IMPORT_POLLING_LOCATION, ORGANIZATION_WORD] and not batch_file:
messages.add_message(request, messages.ERROR, 'Please select a file to import.')
return HttpResponseRedirect(reverse('import_export_batches:batch_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch) +
"&polling_location_we_vote_id=" + str(polling_location_we_vote_id) +
"&google_civic_election_id=" + str(google_civic_election_id) +
"&polling_location_city=" + str(polling_location_city) +
"&polling_location_zip=" + str(polling_location_zip) +
"&show_all_elections=" + str(show_all_elections) +
"&batch_uri=" + batch_uri_encoded)
# Make sure we have a Google Civic Election ID *unless* we are uploading an organization
if kind_of_batch not in [IMPORT_POLLING_LOCATION, ORGANIZATION_WORD] \
and not positive_value_exists(google_civic_election_id):
messages.add_message(request, messages.ERROR, 'This kind_of_batch (\"{kind_of_batch}\") requires you '
'to choose an election.'.format(kind_of_batch=kind_of_batch))
return HttpResponseRedirect(reverse('import_export_batches:batch_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch) +
"&polling_location_we_vote_id=" + str(polling_location_we_vote_id) +
"&google_civic_election_id=" + str(google_civic_election_id) +
"&polling_location_city=" + str(polling_location_city) +
"&polling_location_zip=" + str(polling_location_zip) +
"&show_all_elections=" + str(show_all_elections) +
"&batch_uri=" + batch_uri_encoded)
# Make sure we have a polling_location_we_vote_id
# if kind_of_batch in IMPORT_BALLOT_ITEM and not positive_value_exists(polling_location_we_vote_id):
# messages.add_message(request, messages.ERROR, 'This kind_of_batch (\"{kind_of_batch}\") requires you '
# 'to choose a map point.'
# ''.format(kind_of_batch=kind_of_batch))
# return HttpResponseRedirect(reverse('import_export_batches:batch_list', args=()) +
# "?kind_of_batch=" + str(kind_of_batch) +
# "&polling_location_we_vote_id=" + str(polling_location_we_vote_id) +
# "&google_civic_election_id=" + str(google_civic_election_id) +
# "&polling_location_city=" + str(polling_location_city) +
# "&polling_location_zip=" + str(polling_location_zip) +
# "&show_all_elections=" + str(show_all_elections) +
# "&batch_uri=" + batch_uri_encoded)
election_name = "" # For printing status
if positive_value_exists(google_civic_election_id):
election_manager = ElectionManager()
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
election = results['election']
election_name = election.election_name
batch_header_id = 0
if positive_value_exists(import_batch_button): # If the button was pressed...
batch_manager = BatchManager()
if batch_file is not None:
results = batch_manager.create_batch_from_local_file_upload(
batch_file, kind_of_batch, google_civic_election_id, organization_we_vote_id,
polling_location_we_vote_id)
if results['batch_saved']:
messages.add_message(request, messages.INFO, 'Import batch for {election_name} election saved.'
''.format(election_name=election_name))
batch_header_id = results['batch_header_id']
else:
messages.add_message(request, messages.ERROR, results['status'])
elif positive_value_exists(batch_uri):
if "api.ballotpedia.org" in batch_uri:
# response = requests.get(VOTER_INFO_URL, params={
# "key": GOOGLE_CIVIC_API_KEY,
# "address": text_for_map_search,
# "electionId": incoming_google_civic_election_id,
# })
response = requests.get(batch_uri)
structured_json = json.loads(response.text)
if "api/contains" in batch_uri:
contains_api = True
else:
contains_api = False
groom_results = groom_ballotpedia_data_for_processing(structured_json, google_civic_election_id,
state_code, contains_api)
modified_json_list = groom_results['modified_json_list']
kind_of_batch = groom_results['kind_of_batch']
if contains_api:
ballot_items_results = process_ballotpedia_voter_districts(
google_civic_election_id, state_code, modified_json_list, polling_location_we_vote_id)
if ballot_items_results['ballot_items_found']:
modified_json_list = ballot_items_results['ballot_item_dict_list']
results = store_ballotpedia_json_response_to_import_batch_system(
modified_json_list, google_civic_election_id, kind_of_batch) # Add state_code=state_code ?
else:
# check file type
filetype = batch_manager.find_file_type(batch_uri)
if "xml" in filetype:
# file is XML
# Retrieve the VIP data from XML
results = batch_manager.create_batch_vip_xml(batch_uri, kind_of_batch, google_civic_election_id,
organization_we_vote_id)
else:
results = batch_manager.create_batch_from_uri(
batch_uri, kind_of_batch, google_civic_election_id, organization_we_vote_id)
if results['batch_saved']:
messages.add_message(request, messages.INFO, 'Import batch-batch_saved for '
'{election_name} election saved.'
''.format(election_name=election_name))
batch_header_id = results['batch_header_id']
else:
messages.add_message(request, messages.ERROR, results['status'])
if positive_value_exists(batch_header_id):
# Go straight to the new batch
return HttpResponseRedirect(reverse('import_export_batches:batch_action_list', args=()) +
"?batch_header_id=" + str(batch_header_id) +
"&kind_of_batch=" + str(kind_of_batch) +
"&polling_location_we_vote_id=" + str(polling_location_we_vote_id) +
"&google_civic_election_id=" + str(google_civic_election_id) +
"&batch_uri=" + batch_uri_encoded)
else:
# Go to the batch listing page
return HttpResponseRedirect(reverse('import_export_batches:batch_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch) +
"&polling_location_we_vote_id=" + str(polling_location_we_vote_id) +
"&google_civic_election_id=" + str(google_civic_election_id) +
"&polling_location_city=" + str(polling_location_city) +
"&polling_location_zip=" + str(polling_location_zip) +
"&show_all_elections=" + str(show_all_elections) +
"&batch_uri=" + batch_uri_encoded)
@login_required
def batch_action_list_view(request):
"""
Display row-by-row details of batch actions being reviewed, leading up to processing an entire batch.
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
batch_set_list = []
polling_location_we_vote_id = ""
batch_header_id = convert_to_int(request.GET.get('batch_header_id', 0))
kind_of_batch = request.GET.get('kind_of_batch', '')
show_all = request.GET.get('show_all', False)
state_code = request.GET.get('state_code', '')
position_owner_organization_we_vote_id = request.GET.get('position_owner_organization_we_vote_id', '')
if not positive_value_exists(batch_header_id):
messages.add_message(request, messages.ERROR, 'Batch_header_id required.')
return HttpResponseRedirect(reverse('import_export_batches:batch_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch))
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
batch_set_id = 0
try:
batch_description = BatchDescription.objects.get(batch_header_id=batch_header_id)
batch_description_found = True
batch_set_id = batch_description.batch_set_id
google_civic_election_id = batch_description.google_civic_election_id
polling_location_we_vote_id = batch_description.polling_location_we_vote_id
except BatchDescription.DoesNotExist:
# This is fine
batch_description = BatchDescription()
batch_description_found = False
batch_set_list_found = False
# if batch_set_id exists, send data sets associated with this batch_set_id
if positive_value_exists(batch_set_id):
try:
batch_set_list = BatchSet.objects.get(id=batch_set_id)
if batch_set_list:
batch_set_list_found = True
except BatchSet.DoesNotExist:
# This is fine
batch_set_list = BatchSet()
batch_set_list_found = False
try:
batch_header_map = BatchHeaderMap.objects.get(batch_header_id=batch_header_id)
except BatchHeaderMap.DoesNotExist:
# This is fine
batch_header_map = BatchHeaderMap()
batch_list_found = False
batch_row_count = 0
try:
batch_row_count_query = BatchRow.objects.order_by('id')
batch_row_count_query = batch_row_count_query.filter(batch_header_id=batch_header_id)
if positive_value_exists(state_code):
batch_row_count_query = batch_row_count_query.filter(state_code__iexact=state_code)
batch_row_count = batch_row_count_query.count()
batch_row_query = BatchRow.objects.order_by('id')
batch_row_query = batch_row_query.filter(batch_header_id=batch_header_id)
if positive_value_exists(state_code):
batch_row_query = batch_row_query.filter(state_code__iexact=state_code)
batch_row_list = list(batch_row_query)
else:
if positive_value_exists(show_all):
batch_row_list = list(batch_row_query)
else:
batch_row_list = batch_row_query[:200]
if len(batch_row_list):
batch_list_found = True
except BatchDescription.DoesNotExist:
# This is fine
batch_row_list = []
batch_list_found = False
modified_batch_row_list = []
active_state_codes = []
batch_manager = BatchManager()
if batch_list_found:
for one_batch_row in batch_row_list:
if positive_value_exists(one_batch_row.state_code):
if one_batch_row.state_code not in active_state_codes:
active_state_codes.append(one_batch_row.state_code)
if kind_of_batch == CANDIDATE:
existing_results = batch_manager.retrieve_batch_row_action_candidate(batch_header_id, one_batch_row.id)
if existing_results['batch_row_action_found']:
one_batch_row.batch_row_action = existing_results['batch_row_action_candidate']
one_batch_row.kind_of_batch = CANDIDATE
one_batch_row.batch_row_action_exists = True
else:
one_batch_row.batch_row_action_exists = False
modified_batch_row_list.append(one_batch_row)
elif kind_of_batch == CONTEST_OFFICE:
existing_results = batch_manager.retrieve_batch_row_action_contest_office(batch_header_id,
one_batch_row.id)
if existing_results['batch_row_action_found']:
one_batch_row.batch_row_action = existing_results['batch_row_action_contest_office']
one_batch_row.kind_of_batch = CONTEST_OFFICE
one_batch_row.batch_row_action_exists = True
else:
one_batch_row.batch_row_action_exists = False
modified_batch_row_list.append(one_batch_row)
elif kind_of_batch == ELECTED_OFFICE:
existing_results = batch_manager.retrieve_batch_row_action_elected_office(batch_header_id,
one_batch_row.id)
if existing_results['batch_row_action_found']:
one_batch_row.batch_row_action = existing_results['batch_row_action_elected_office']
one_batch_row.kind_of_batch = ELECTED_OFFICE
one_batch_row.batch_row_action_exists = True
else:
one_batch_row.batch_row_action_exists = False
modified_batch_row_list.append(one_batch_row)
elif kind_of_batch == IMPORT_BALLOT_ITEM:
# Retrieve Creates and Updates
existing_results = \
batch_manager.retrieve_batch_row_action_ballot_item(batch_header_id, one_batch_row.id)
if existing_results['batch_row_action_found']:
one_batch_row.batch_row_action = existing_results['batch_row_action_ballot_item']
one_batch_row.kind_of_batch = IMPORT_BALLOT_ITEM
one_batch_row.batch_row_action_exists = True
else:
one_batch_row.batch_row_action_exists = False
modified_batch_row_list.append(one_batch_row)
# Retrieve Deletes
elif kind_of_batch == IMPORT_POLLING_LOCATION:
# Retrieve Creates and Updates
existing_results = \
batch_manager.retrieve_batch_row_action_polling_location(batch_header_id, one_batch_row.id)
if existing_results['batch_row_action_found']:
one_batch_row.batch_row_action = existing_results['batch_row_action_polling_location']
one_batch_row.kind_of_batch = IMPORT_POLLING_LOCATION
one_batch_row.batch_row_action_exists = True
else:
one_batch_row.batch_row_action_exists = False
modified_batch_row_list.append(one_batch_row)
elif kind_of_batch == IMPORT_VOTER:
existing_results = \
batch_manager.retrieve_batch_row_action_ballot_item(batch_header_id, one_batch_row.id)
if existing_results['batch_row_action_found']:
one_batch_row.batch_row_action = existing_results['batch_row_action_ballot_item']
one_batch_row.kind_of_batch = IMPORT_VOTER
one_batch_row.batch_row_action_exists = True
else:
one_batch_row.batch_row_action_exists = False
modified_batch_row_list.append(one_batch_row)
elif kind_of_batch == MEASURE:
existing_results = batch_manager.retrieve_batch_row_action_measure(batch_header_id, one_batch_row.id)
if existing_results['batch_row_action_found']:
one_batch_row.batch_row_action = existing_results['batch_row_action_measure']
one_batch_row.kind_of_batch = MEASURE
one_batch_row.batch_row_action_exists = True
else:
one_batch_row.batch_row_action_exists = False
modified_batch_row_list.append(one_batch_row)
elif kind_of_batch == ORGANIZATION_WORD:
existing_results = batch_manager.retrieve_batch_row_action_organization(batch_header_id,
one_batch_row.id)
if existing_results['batch_row_action_found']:
one_batch_row.batch_row_action = existing_results['batch_row_action_organization']
one_batch_row.kind_of_batch = ORGANIZATION_WORD
one_batch_row.batch_row_action_exists = True
else:
one_batch_row.batch_row_action_exists = False
modified_batch_row_list.append(one_batch_row)
elif kind_of_batch == POLITICIAN:
existing_results = batch_manager.retrieve_batch_row_action_politician(batch_header_id, one_batch_row.id)
if existing_results['batch_row_action_found']:
one_batch_row.batch_row_action = existing_results['batch_row_action_politician']
one_batch_row.kind_of_batch = POLITICIAN
one_batch_row.batch_row_action_exists = True
else:
one_batch_row.batch_row_action_exists = False
modified_batch_row_list.append(one_batch_row)
elif kind_of_batch == POSITION:
existing_results = batch_manager.retrieve_batch_row_action_position(batch_header_id, one_batch_row.id)
if existing_results['batch_row_action_found']:
one_batch_row.batch_row_action = existing_results['batch_row_action_position']
one_batch_row.kind_of_batch = POSITION
one_batch_row.batch_row_action_exists = True
else:
one_batch_row.batch_row_action_exists = False
modified_batch_row_list.append(one_batch_row)
if kind_of_batch == IMPORT_BALLOT_ITEM:
results = batch_manager.retrieve_batch_row_action_ballot_item_list(
batch_header_id, limit_to_kind_of_action_list=[IMPORT_DELETE, IMPORT_ALREADY_DELETED])
if results['batch_row_action_list_found']:
batch_row_action_list = results['batch_row_action_list']
for batch_row_action_ballot_item in batch_row_action_list:
one_batch_row = BatchRow()
one_batch_row.batch_header_id = batch_header_id
one_batch_row.batch_row_action = batch_row_action_ballot_item
one_batch_row.kind_of_batch = IMPORT_BALLOT_ITEM
one_batch_row.batch_row_action_exists = True
modified_batch_row_list.append(one_batch_row)
election_query = Election.objects.order_by('-election_day_text')
election_list = list(election_query)
# TODO Retrieve and send a list of polling_locations to choose from into the template
polling_location_list = []
if kind_of_batch == IMPORT_BALLOT_ITEM:
polling_location_list = []
filtered_state_list = []
state_list = STATE_CODE_MAP
sorted_state_list = sorted(state_list.items())
for one_state in sorted_state_list:
if one_state[0].lower() in active_state_codes:
filtered_state_list.append(one_state)
messages.add_message(request, messages.INFO, 'Batch Row Count: {batch_row_count}'
''.format(batch_row_count=batch_row_count))
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'batch_header_id': batch_header_id,
'batch_description': batch_description,
'batch_set_id': batch_set_id,
'batch_header_map': batch_header_map,
'batch_set_list': batch_set_list,
'batch_row_list': modified_batch_row_list,
'election_list': election_list,
'kind_of_batch': kind_of_batch,
'google_civic_election_id': google_civic_election_id,
'polling_location_we_vote_id': polling_location_we_vote_id,
'state_code': state_code,
'state_list': filtered_state_list,
'position_owner_organization_we_vote_id': position_owner_organization_we_vote_id,
}
return render(request, 'import_export_batches/batch_action_list.html', template_values)
@login_required
def batch_action_list_export_view(request):
"""
Export batch list as a csv file.
:param request: HTTP request object.
:return response: HttpResponse object with csv export data.
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
batch_set_list = []
batch_header_id = convert_to_int(request.GET.get('batch_header_id', 0))
kind_of_batch = request.GET.get('kind_of_batch', '')
state_code = request.GET.get('state_code', '')
if not positive_value_exists(batch_header_id):
messages.add_message(request, messages.ERROR, 'Batch_header_id required.')
return HttpResponseRedirect(reverse('import_export_batches:batch_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch))
batch_set_id = 0
try:
batch_description = BatchDescription.objects.get(batch_header_id=batch_header_id)
except BatchDescription.DoesNotExist:
# This is fine
batch_description = BatchDescription()
# if batch_set_id exists, send data sets associated with this batch_set_id
if positive_value_exists(batch_set_id):
try:
batch_set_list = BatchSet.objects.get(id=batch_set_id)
if batch_set_list:
batch_set_list_found = True
except BatchSet.DoesNotExist:
# This is fine
batch_set_list = BatchSet()
batch_set_list_found = False
try:
batch_header_map = BatchHeaderMap.objects.get(batch_header_id=batch_header_id)
except BatchHeaderMap.DoesNotExist:
# This is fine
batch_header_map = BatchHeaderMap()
batch_list_found = False
try:
batch_row_query = BatchRow.objects.order_by('id')
batch_row_query = batch_row_query.filter(batch_header_id=batch_header_id)
if positive_value_exists(state_code):
batch_row_query = batch_row_query.filter(state_code__iexact=state_code)
batch_row_list = list(batch_row_query)
if len(batch_row_list):
batch_list_found = True
except BatchDescription.DoesNotExist:
# This is fine
batch_row_list = []
batch_list_found = False
if not batch_list_found:
messages.add_message(request, messages.ERROR, 'No voters found to export.')
return HttpResponseRedirect(reverse('import_export_batches:batch_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch) +
"&batch_header_id=" + str(batch_header_id)
)
# get header/first row information
header_opts = BatchHeaderMap._meta
header_field_names = []
for field in header_opts.fields:
if field.name not in ['id', 'batch_header_id']:
header_field_names.append(field.name)
# get row information
# Dale 2020-July This isn't very robust. Shifts over the rows when exporting Polling locations.
row_opts = BatchRow._meta
row_field_names = []
for field in row_opts.fields:
if field.name not in ['id', 'batch_header_id', 'batch_row_analyzed', 'batch_row_created']:
if kind_of_batch == 'IMPORT_VOTER':
if field.name not in \
['state_code', 'google_civic_election_id', 'polling_location_we_vote_id', 'voter_id']:
row_field_names.append(field.name)
else:
row_field_names.append(field.name)
header_list = [getattr(batch_header_map, field) for field in header_field_names]
if kind_of_batch not in ['IMPORT_POLLING_LOCATION', 'IMPORT_VOTER']:
header_list.insert(0, 'google_civic_election_id')
header_list.insert(0, 'state_code')
# - Filter out headers that are None.
header_list = list(filter(None, header_list))
# create response for csv file
response = export_csv(batch_row_list, header_list, row_field_names, batch_description)
return response
@login_required
def batch_row_action_list_export_view(request):
"""
Export the batch_row_action's (as opposed to the raw incoming values) as a csv file.
:param request: HTTP request object.
:return response: HttpResponse object with csv export data.
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
batch_set_list = []
batch_header_id = convert_to_int(request.GET.get('batch_header_id', 0))
kind_of_batch = request.GET.get('kind_of_batch', '')
state_code = request.GET.get('state_code', '')
if not positive_value_exists(batch_header_id):
messages.add_message(request, messages.ERROR, 'Batch_header_id required.')
return HttpResponseRedirect(reverse('import_export_batches:batch_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch))
batch_set_id = 0
try:
batch_description = BatchDescription.objects.get(batch_header_id=batch_header_id)
except BatchDescription.DoesNotExist:
# This is fine
batch_description = BatchDescription()
# if batch_set_id exists, send data sets associated with this batch_set_id
if positive_value_exists(batch_set_id):
try:
batch_set_list = BatchSet.objects.get(id=batch_set_id)
if batch_set_list:
batch_set_list_found = True
except BatchSet.DoesNotExist:
# This is fine
batch_set_list = BatchSet()
batch_set_list_found = False
try:
batch_header_map = BatchHeaderMap.objects.get(batch_header_id=batch_header_id)
except BatchHeaderMap.DoesNotExist:
# This is fine
batch_header_map = BatchHeaderMap()
batch_list_found = False
batch_row_list = []
try:
if kind_of_batch == 'IMPORT_POLLING_LOCATION':
batch_row_action_query = BatchRowActionPollingLocation.objects.order_by('id')
batch_row_action_query = batch_row_action_query.filter(batch_header_id=batch_header_id)
if positive_value_exists(state_code):
batch_row_action_query = batch_row_action_query.filter(state_code__iexact=state_code)
batch_row_list = list(batch_row_action_query)
if len(batch_row_list):
batch_list_found = True
except BatchDescription.DoesNotExist:
# This is fine
batch_row_list = []
batch_list_found = False
if not batch_list_found:
messages.add_message(request, messages.ERROR, 'No voters found to export.')
return HttpResponseRedirect(reverse('import_export_batches:batch_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch) +
"&batch_header_id=" + str(batch_header_id)
)
# # get header/first row information
# header_opts = BatchHeaderMap._meta
header_field_names = []
# for field in header_opts.fields:
# if field.name not in ['id', 'batch_header_id']:
# header_field_names.append(field.name)
# get row information
header_list = []
row_field_names = []
if kind_of_batch == 'IMPORT_POLLING_LOCATION':
row_opts = BatchRowActionPollingLocation._meta
for field in row_opts.fields:
row_field_names.append(field.name)
header_list = row_field_names
# header_list = [getattr(batch_header_map, field) for field in header_field_names]
# if kind_of_batch not in ['IMPORT_POLLING_LOCATION', 'IMPORT_VOTER']:
# header_list.insert(0, 'google_civic_election_id')
# header_list.insert(0, 'state_code')
# # - Filter out headers that are None.
# header_list = list(filter(None, header_list))
# create response for csv file
response = export_csv(batch_row_list, header_list, row_field_names, batch_description)
return response
def export_csv(batch_row_list, header_list, row_field_names, batch_description=None, filename=None):
"""
Helper function that creates a HttpResponse with csv data
:param batch_row_list: list of objects to export as csv
:param header_list: list of column headers for csv data
:param row_field_names: list of the object fields to be exported
:param batch_description: optional description of the batch to export
:param filename: optional name of csv file
:return response: HttpResponse with text/csv data
"""
export_filename = "voter_export"
if batch_description and not filename:
export_filename = batch_description.batch_name
elif filename:
export_filename = filename
export_filename += ".csv"
response = HttpResponse(content_type="text/csv")
response['Content-Disposition'] = 'attachment; filename="{0}"'.format(export_filename)
csv_writer = csv.writer(response)
csv_writer.writerow(header_list)
# output header/first row to csv
for obj in batch_row_list:
# csv_writer.writerow([getattr(obj, field) for field in row_field_names])
one_row = []
for field in row_field_names:
one_row.append(getattr(obj, field))
csv_writer.writerow(one_row)
return response
@login_required
def batch_action_list_export_voters_view(request):
"""
View used to create a csv export file of voters registered for the newsletter
:param request:
:return: HttpResponse with csv information of voters
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# get parameters from request object
kind_of_batch = request.GET.get('kind_of_batch', IMPORT_VOTER)
batch_header_id = request.GET.get('batch_header_id', 0)
google_civic_election_id = request.GET.get('google_civic_election_id', '')
organization_we_vote_id = request.GET.get('organization_we_vote_id', '')
result = export_voter_list_with_emails()
messages.add_message(request, messages.INFO, 'Batch Action Export Voters: '
'Batch kind: {kind_of_batch}'
''.format(kind_of_batch=kind_of_batch))
filename = 'voter_export.csv'
batch_manager = BatchManager()
batch_created_result = dict()
if result and result['voter_list']:
# Create batch of voters registered for newsletter
batch_created_result = batch_manager.create_batch_from_voter_object_list(result['voter_list'])
if batch_created_result and batch_created_result['batch_header_id']:
batch_header_id = batch_created_result['batch_header_id']
return HttpResponseRedirect(reverse('import_export_batches:batch_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch) +
"&batch_header_id=" + str(batch_header_id)
)
@login_required
def batch_action_list_analyze_process_view(request):
"""
Create BatchRowActions for either all of the BatchRows for batch_header_id, or only one with batch_row_id
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
batch_header_id = convert_to_int(request.GET.get('batch_header_id', 0))
batch_row_id = convert_to_int(request.GET.get('batch_row_id', 0))
kind_of_batch = request.GET.get('kind_of_batch', '')
state_code = request.GET.get('state_code', '')
delete_analysis_only = positive_value_exists(request.GET.get('delete_analysis_only', False))
if state_code == "None":
state_code = ""
if not positive_value_exists(batch_header_id):
messages.add_message(request, messages.ERROR, 'Batch_header_id required.')
return HttpResponseRedirect(reverse('import_export_batches:batch_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch))
# if create_actions_button in (MEASURE, ELECTED_OFFICE, CANDIDATE, ORGANIZATION_WORD,
# POSITION, POLITICIAN, IMPORT_BALLOT_ITEM)
# Run the analysis of either A) every row in this batch, or B) Just the batch_row_id specified within this batch
results = create_batch_row_actions(batch_header_id=batch_header_id, batch_description=None,
batch_row_id=batch_row_id, state_code=state_code,
delete_analysis_only=delete_analysis_only)
kind_of_batch = results['kind_of_batch']
messages.add_message(request, messages.INFO, 'Batch Actions: '
'Batch kind: {kind_of_batch}, '
'Created:{created} '
''.format(kind_of_batch=kind_of_batch,
created=results['number_of_batch_actions_created']))
return HttpResponseRedirect(reverse('import_export_batches:batch_action_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch) +
"&batch_header_id=" + str(batch_header_id) +
"&state_code=" + str(state_code)
)
@login_required
def batch_header_mapping_view(request):
"""
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
batch_header_id = convert_to_int(request.GET.get('batch_header_id', 0))
kind_of_batch = request.GET.get('kind_of_batch', '')
if not positive_value_exists(batch_header_id):
messages.add_message(request, messages.ERROR, 'Batch_header_id required.')
return HttpResponseRedirect(reverse('import_export_batches:batch_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch))
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
batch_set_id = 0
try:
batch_description = BatchDescription.objects.get(batch_header_id=batch_header_id)
batch_set_id = batch_description.batch_set_id
kind_of_batch = batch_description.kind_of_batch
except BatchDescription.DoesNotExist:
# This is fine
batch_description = BatchDescription()
# if batch_set_id exists, send data sets associated with this batch_set_id
if positive_value_exists(batch_set_id):
try:
batch_set_list = BatchSet.objects.get(id=batch_set_id)
except BatchSet.DoesNotExist:
# This is fine
batch_set_list = BatchSet()
try:
batch_header = BatchHeader.objects.get(id=batch_header_id)
except BatchHeader.DoesNotExist:
# This is fine
batch_header = BatchHeader()
try:
batch_header_map = BatchHeaderMap.objects.get(batch_header_id=batch_header_id)
except BatchHeaderMap.DoesNotExist:
# This is fine
batch_header_map = BatchHeaderMap()
if kind_of_batch == CANDIDATE:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_CANDIDATES
elif kind_of_batch == CONTEST_OFFICE:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_CONTEST_OFFICES
elif kind_of_batch == ELECTED_OFFICE:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_ELECTED_OFFICES
elif kind_of_batch == MEASURE:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_MEASURES
elif kind_of_batch == ORGANIZATION_WORD:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_ORGANIZATIONS
elif kind_of_batch == POLITICIAN:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_POLITICIANS
elif kind_of_batch == POSITION:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_POSITIONS
elif kind_of_batch == IMPORT_BALLOT_ITEM:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_BALLOT_ITEMS
else:
batch_import_keys_accepted = {}
sorted_batch_import_keys_accepted = sorted(batch_import_keys_accepted.items())
try:
batch_row_list = BatchRow.objects.all()
batch_row_list = batch_row_list.filter(batch_header_id=batch_header_id)[:3] # Limit to 3 rows
except BatchDescription.DoesNotExist:
# This is fine
batch_row_list = []
election_list = Election.objects.order_by('-election_day_text')
messages_on_stage = get_messages(request)
if batch_set_id:
template_values = {
'messages_on_stage': messages_on_stage,
'batch_header_id': batch_header_id,
'batch_description': batch_description,
'batch_set_id': batch_set_id,
'batch_header': batch_header,
'batch_header_map': batch_header_map,
'batch_import_keys_accepted': sorted_batch_import_keys_accepted,
'batch_row_list': batch_row_list,
'batch_set_list': batch_set_list,
'election_list': election_list,
'kind_of_batch': kind_of_batch,
'google_civic_election_id': google_civic_election_id,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
'batch_header_id': batch_header_id,
'batch_description': batch_description,
'batch_set_id': batch_set_id,
'batch_header': batch_header,
'batch_header_map': batch_header_map,
'batch_import_keys_accepted': sorted_batch_import_keys_accepted,
'batch_row_list': batch_row_list,
'election_list': election_list,
'kind_of_batch': kind_of_batch,
'google_civic_election_id': google_civic_election_id,
}
return render(request, 'import_export_batches/batch_header_mapping.html', template_values)
@login_required
def batch_header_mapping_process_view(request):
"""
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
batch_header_id = convert_to_int(request.GET.get('batch_header_id', 0))
save_header_mapping_button = request.GET.get('save_header_mapping_button', '')
kind_of_batch = ""
if not positive_value_exists(batch_header_id):
messages.add_message(request, messages.ERROR, 'Batch_header_id required.')
return HttpResponseRedirect(reverse('import_export_batches:batch_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch))
batch_set_id = 0
try:
batch_description = BatchDescription.objects.get(batch_header_id=batch_header_id)
batch_set_id = batch_description.batch_set_id
kind_of_batch = batch_description.kind_of_batch
except BatchDescription.DoesNotExist:
# This is fine
batch_description = BatchDescription()
# Put all incoming header_mapping values into a dict
incoming_header_map_values = {
'batch_header_map_000': request.GET.get('batch_header_map_000', ''),
'batch_header_map_001': request.GET.get('batch_header_map_001', ''),
'batch_header_map_002': request.GET.get('batch_header_map_002', ''),
'batch_header_map_003': request.GET.get('batch_header_map_003', ''),
'batch_header_map_004': request.GET.get('batch_header_map_004', ''),
'batch_header_map_005': request.GET.get('batch_header_map_005', ''),
'batch_header_map_006': request.GET.get('batch_header_map_006', ''),
'batch_header_map_007': request.GET.get('batch_header_map_007', ''),
'batch_header_map_008': request.GET.get('batch_header_map_008', ''),
'batch_header_map_009': request.GET.get('batch_header_map_009', ''),
'batch_header_map_010': request.GET.get('batch_header_map_010', ''),
'batch_header_map_011': request.GET.get('batch_header_map_011', ''),
'batch_header_map_012': request.GET.get('batch_header_map_012', ''),
'batch_header_map_013': request.GET.get('batch_header_map_013', ''),
'batch_header_map_014': request.GET.get('batch_header_map_014', ''),
'batch_header_map_015': request.GET.get('batch_header_map_015', ''),
'batch_header_map_016': request.GET.get('batch_header_map_016', ''),
'batch_header_map_017': request.GET.get('batch_header_map_017', ''),
'batch_header_map_018': request.GET.get('batch_header_map_018', ''),
'batch_header_map_019': request.GET.get('batch_header_map_019', ''),
'batch_header_map_020': request.GET.get('batch_header_map_020', ''),
}
batch_header_mapping_results = update_or_create_batch_header_mapping(
batch_header_id, kind_of_batch, incoming_header_map_values)
try:
batch_header = BatchHeader.objects.get(id=batch_header_id)
batch_header_found = True
except BatchHeader.DoesNotExist:
# This is fine
batch_header = BatchHeader()
batch_header_found = False
suggestions_created = 0
if batch_header_found:
batch_header_translation_results = create_batch_header_translation_suggestions(
batch_header, kind_of_batch, incoming_header_map_values)
suggestions_created = batch_header_translation_results['suggestions_created']
messages.add_message(request, messages.INFO, 'Batch Header Mapping Updated: '
'Batch kind: {kind_of_batch}, '
'suggestions_created: {suggestions_created}, '
''.format(kind_of_batch=kind_of_batch,
suggestions_created=suggestions_created))
return HttpResponseRedirect(reverse('import_export_batches:batch_action_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch) +
"&batch_header_id=" + str(batch_header_id))
@login_required
def batch_action_list_assign_election_to_rows_process_view(request):
"""
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
batch_row_list_found = False
batch_row_list = []
batch_header_id = convert_to_int(request.GET.get('batch_header_id', 0))
batch_row_id = convert_to_int(request.GET.get('batch_row_id', 0))
kind_of_batch = request.GET.get('kind_of_batch', '')
kind_of_action = request.GET.get('kind_of_action')
state_code = request.GET.get('state_code', '')
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
# do for entire batch_rows
try:
batch_header_map = BatchHeaderMap.objects.get(batch_header_id=batch_header_id)
batch_header_map_found = True
except BatchHeaderMap.DoesNotExist:
# This is fine
batch_header_map = BatchHeaderMap()
batch_header_map_found = False
if batch_header_map_found:
try:
batch_row_query = BatchRow.objects.all()
batch_row_query = batch_row_query.filter(batch_header_id=batch_header_id)
if positive_value_exists(batch_row_id):
batch_row_query = batch_row_query.filter(id=batch_row_id)
if positive_value_exists(state_code):
batch_row_query = batch_row_query.filter(state_code__iexact=state_code)
batch_row_list = list(batch_row_query)
if len(batch_row_list):
batch_row_list_found = True
except BatchDescription.DoesNotExist:
# This is fine
batch_row_list_found = False
pass
if batch_header_map_found and batch_row_list_found:
for one_batch_row in batch_row_list:
try:
one_batch_row.google_civic_election_id = google_civic_election_id
one_batch_row.save()
except Exception as e:
pass
# messages.add_message(request, messages.INFO,
# 'Kind of Batch: {kind_of_batch}, ' 'Number Created: {created} '
# ''.format(kind_of_batch=kind_of_batch,
# created=results['number_of_table_rows_created']))
return HttpResponseRedirect(reverse('import_export_batches:batch_action_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch) +
"&batch_header_id=" + str(batch_header_id) +
"&state_code=" + str(state_code) +
"&google_civic_election_id=" + str(google_civic_election_id)
)
@login_required
def batch_action_list_update_or_create_process_view(request):
"""
Use batch_row_action entries and create live data
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
batch_row_list_found = False
status = ""
batch_header_id = convert_to_int(request.GET.get('batch_header_id', 0))
batch_row_id = convert_to_int(request.GET.get('batch_row_id', 0))
ballot_item_id = convert_to_int(request.GET.get('ballot_item_id', 0))
kind_of_batch = request.GET.get('kind_of_batch', '')
kind_of_action = request.GET.get('kind_of_action')
state_code = request.GET.get('state_code', '')
# do for entire batch_rows
try:
batch_header_map = BatchHeaderMap.objects.get(batch_header_id=batch_header_id)
batch_header_map_found = True
except BatchHeaderMap.DoesNotExist:
# This is fine
batch_header_map = BatchHeaderMap()
batch_header_map_found = False
if batch_header_map_found:
try:
batch_row_query = BatchRow.objects.all()
batch_row_query = batch_row_query.filter(batch_header_id=batch_header_id)
if positive_value_exists(batch_row_id):
batch_row_query = batch_row_query.filter(id=batch_row_id)
if positive_value_exists(state_code):
batch_row_query = batch_row_query.filter(state_code__iexact=state_code)
batch_row_list = list(batch_row_query)
if len(batch_row_list):
batch_row_list_found = True
except BatchDescription.DoesNotExist:
# This is fine
batch_row_list_found = False
pass
if batch_header_map_found and batch_row_list_found:
results = import_data_from_batch_row_actions(
kind_of_batch, kind_of_action, batch_header_id, batch_row_id, state_code, ballot_item_id=ballot_item_id)
if kind_of_action == IMPORT_CREATE:
if results['success']:
messages.add_message(request, messages.INFO,
'Kind of Batch: {kind_of_batch}, ' 'Number Created: {created} '
''.format(kind_of_batch=kind_of_batch,
created=results['number_of_table_rows_created']))
else:
status += results['status']
messages.add_message(request, messages.ERROR, 'Batch kind: {kind_of_batch} create failed: {status}'
''.format(kind_of_batch=kind_of_batch,
status=status))
elif kind_of_action == IMPORT_ADD_TO_EXISTING:
if results['success']:
messages.add_message(request, messages.INFO,
'Kind of Batch: {kind_of_batch}, ' 'Number Updated: {updated} '
''.format(kind_of_batch=kind_of_batch,
updated=results['number_of_table_rows_updated']))
else:
status += results['status']
messages.add_message(request, messages.ERROR,
'Batch kind: {kind_of_batch} UPDATE_FAILED-UPDATE_MAY_NOT_BE_SUPPORTED_YET, '
'status: {status} '
''.format(kind_of_batch=kind_of_batch, status=status))
elif kind_of_action == IMPORT_DELETE:
if results['success']:
messages.add_message(request, messages.INFO,
'Kind of Batch: {kind_of_batch}, ' 'Number Deleted: {deleted} '
''.format(kind_of_batch=kind_of_batch,
deleted=results['number_of_table_rows_deleted']))
else:
status += results['status']
messages.add_message(request, messages.ERROR, 'Batch kind: {kind_of_batch} delete failed: {status}'
''.format(kind_of_batch=kind_of_batch,
status=status))
else:
status += results['status']
messages.add_message(request, messages.ERROR, 'Batch kind: {kind_of_batch} import status: {status}'
''.format(kind_of_batch=kind_of_batch,
status=status))
return HttpResponseRedirect(reverse('import_export_batches:batch_list', args=()))
return HttpResponseRedirect(reverse('import_export_batches:batch_action_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch) +
"&batch_header_id=" + str(batch_header_id) +
"&state_code=" + str(state_code)
)
@login_required
def batch_set_list_view(request):
"""
Display a list of import batch set
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# kind_of_batch = request.GET.get('kind_of_batch', '')
batch_file = request.GET.get('batch_file', '')
batch_uri = request.GET.get('batch_uri', '')
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
batch_set_id = convert_to_int(request.GET.get('batch_set_id', 0))
batch_process_id = convert_to_int(request.GET.get('batch_process_id', 0))
limit = request.GET.get('limit', 25)
show_status_statistics = request.GET.get('show_status_statistics', False)
show_status_statistics = positive_value_exists(show_status_statistics)
state_code = request.GET.get('state_code', '')
messages_on_stage = get_messages(request)
batch_set_list_found = False
try:
batch_set_query = BatchSet.objects.order_by('-import_date')
# batch_set_list = batch_set_list.exclude(batch_set_id__isnull=True)
if positive_value_exists(google_civic_election_id):
batch_set_query = batch_set_query.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(batch_process_id):
batch_set_query = batch_set_query.filter(batch_process_id=batch_process_id)
if positive_value_exists(batch_set_id):
batch_set_query = batch_set_query.filter(id=batch_set_id)
if positive_value_exists(state_code):
batch_set_query = batch_set_query.filter(state_code__iexact=state_code)
batch_set_list = batch_set_query[:limit]
if len(batch_set_list):
batch_set_list_found = True
except BatchSet.DoesNotExist:
# This is fine
batch_set_list = []
batch_set_list_found = False
pass
if positive_value_exists(show_status_statistics):
for one_batch_set in batch_set_list:
batch_description_query = BatchDescription.objects.filter(batch_set_id=one_batch_set.id)
batch_description = batch_description_query.first()
batch_description_query = BatchDescription.objects.filter(batch_set_id=one_batch_set.id)
one_batch_set.batch_description_total_rows_count = batch_description_query.count()
batch_description_query = BatchDescription.objects.filter(batch_set_id=one_batch_set.id)
batch_description_query = batch_description_query.exclude(batch_description_analyzed=True)
one_batch_set.batch_description_not_analyzed_count = batch_description_query.count()
batch_row_action_query = BatchRowActionBallotItem.objects.filter(batch_set_id=one_batch_set.id)
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=IMPORT_DELETE)
one_batch_set.batch_description_to_delete_count = batch_row_action_query.count()
batch_row_action_query = BatchRowActionBallotItem.objects.filter(batch_set_id=one_batch_set.id)
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=IMPORT_ALREADY_DELETED)
one_batch_set.batch_description_already_deleted_count = batch_row_action_query.count()
if positive_value_exists(one_batch_set.batch_description_total_rows_count):
try:
if batch_description.kind_of_batch == IMPORT_BALLOT_ITEM:
batch_row_action_query = BatchRowActionBallotItem.objects.filter(batch_set_id=one_batch_set.id)
batch_row_action_query = batch_row_action_query.filter(kind_of_action=IMPORT_CREATE)
one_batch_set.batch_description_not_created_count = batch_row_action_query.count()
except Exception as e:
pass
election_list = Election.objects.order_by('-election_day_text')
if batch_set_list_found:
template_values = {
'batch_file': batch_file,
'batch_process_id': batch_process_id,
'batch_set_id': batch_set_id,
'batch_set_list': batch_set_list,
'batch_uri': batch_uri,
'google_civic_election_id': google_civic_election_id,
'election_list': election_list,
'messages_on_stage': messages_on_stage,
'show_status_statistics': show_status_statistics,
'state_code': state_code,
}
else:
template_values = {
'batch_file': batch_file,
'batch_process_id': batch_process_id,
'batch_set_id': batch_set_id,
'batch_uri': batch_uri,
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
'messages_on_stage': messages_on_stage,
'show_status_statistics': show_status_statistics,
'state_code': state_code,
}
return render(request, 'import_export_batches/batch_set_list.html', template_values)
@login_required
def batch_set_list_process_view(request):
"""
Load in a new batch set to start the importing process
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
batch_uri = request.POST.get('batch_uri', '')
batch_process_id = convert_to_int(request.POST.get('batch_process_id', 0))
batch_set_id = convert_to_int(request.POST.get('batch_set_id', 0))
google_civic_election_id = request.POST.get('google_civic_election_id', 0)
organization_we_vote_id = request.POST.get('organization_we_vote_id', '')
# Was form submitted, or was election just changed?
import_batch_button = request.POST.get('import_batch_button', '')
show_status_statistics = request.POST.get('show_status_statistics', False)
show_status_statistics = positive_value_exists(show_status_statistics)
state_code = request.POST.get('state_code', '')
batch_uri_encoded = urlquote(batch_uri) if positive_value_exists(batch_uri) else ""
batch_file = None
# Store contents of spreadsheet?
# if not positive_value_exists(google_civic_election_id):
# messages.add_message(request, messages.ERROR, 'This batch set requires you to choose an election.')
# return HttpResponseRedirect(reverse('import_export_batches:batch_set_list', args=()) +
# "?batch_uri=" + batch_uri_encoded)
election_manager = ElectionManager()
election_name = ""
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
election = results['election']
election_name = election.election_name
if positive_value_exists(import_batch_button): # If the button was pressed...
batch_manager = BatchManager()
try:
if request.method == 'POST' and request.FILES['batch_file']:
batch_file = request.FILES['batch_file']
except KeyError:
pass
if batch_file is not None:
results = batch_manager.create_batch_set_vip_xml(
batch_file, batch_uri, google_civic_election_id, organization_we_vote_id)
if results['batch_saved']:
messages.add_message(request, messages.INFO, 'Import batch_set_list for {election_name} election saved.'
''.format(election_name=election_name))
else:
messages.add_message(request, messages.ERROR, results['status'])
elif positive_value_exists(batch_uri):
# check file type
filetype = batch_manager.find_file_type(batch_uri)
if "xml" in filetype:
# file is XML
# Retrieve the VIP data from XML
results = batch_manager.create_batch_set_vip_xml(
batch_file, batch_uri, google_civic_election_id, organization_we_vote_id)
else:
pass
# results = batch_manager.create_batch(batch_uri, google_civic_election_id, organization_we_vote_id)
if 'batch_saved' in results and results['batch_saved']:
messages.add_message(request, messages.INFO, 'Import batch_set_list-batch_saved for '
'{election_name} election saved.'
''.format(election_name=election_name))
else:
messages.add_message(request, messages.ERROR, results['status'])
return HttpResponseRedirect(reverse('import_export_batches:batch_set_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&batch_process_id=" + str(batch_process_id) +
"&batch_set_id=" + str(batch_set_id) +
"&state_code=" + str(state_code) +
"&show_status_statistics=" + str(show_status_statistics) +
"&batch_uri=" + batch_uri_encoded)
@login_required
def batch_process_system_toggle_view(request):
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
# ACTIVITY_NOTICE_PROCESS, API_REFRESH_REQUEST, BALLOT_ITEMS, SEARCH_TWITTER
kind_of_process = request.GET.get('kind_of_process', '')
kind_of_processes_to_show = request.GET.get('kind_of_processes_to_show', '')
show_checked_out_processes_only = request.GET.get('show_checked_out_processes_only', '')
show_active_processes_only = request.GET.get('show_active_processes_only', '')
show_paused_processes_only = request.GET.get('show_paused_processes_only', '')
include_frequent_processes = request.GET.get('include_frequent_processes', '')
from wevote_settings.models import WeVoteSettingsManager
we_vote_settings_manager = WeVoteSettingsManager()
if kind_of_process == 'ACTIVITY_NOTICE_PROCESS':
setting_name = 'batch_process_system_activity_notices_on'
elif kind_of_process == 'API_REFRESH_REQUEST':
setting_name = 'batch_process_system_api_refresh_on'
elif kind_of_process == 'BALLOT_ITEMS':
setting_name = 'batch_process_system_ballot_items_on'
elif kind_of_process == 'CALCULATE_ANALYTICS':
setting_name = 'batch_process_system_calculate_analytics_on'
elif kind_of_process == 'SEARCH_TWITTER':
setting_name = 'batch_process_system_search_twitter_on'
else:
setting_name = 'batch_process_system_on'
results = we_vote_settings_manager.fetch_setting_results(setting_name=setting_name, read_only=False)
if results['we_vote_setting_found']:
we_vote_setting = results['we_vote_setting']
we_vote_setting.boolean_value = not we_vote_setting.boolean_value
we_vote_setting.save()
else:
messages.add_message(request, messages.ERROR, "CANNOT_FIND_WE_VOTE_SETTING-batch_process_system_on")
return HttpResponseRedirect(reverse('import_export_batches:batch_process_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code) +
"&kind_of_processes_to_show=" + str(kind_of_processes_to_show) +
"&show_checked_out_processes_only=" + str(show_checked_out_processes_only) +
"&show_active_processes_only=" + str(show_active_processes_only) +
"&show_paused_processes_only=" + str(show_paused_processes_only) +
"&include_frequent_processes=" + str(include_frequent_processes)
)
@login_required
def batch_process_list_view(request):
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
status = ""
success = True
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
include_frequent_processes = request.GET.get('include_frequent_processes', False)
kind_of_processes_to_show = request.GET.get('kind_of_processes_to_show', '')
state_code = request.GET.get('state_code', '')
show_all_elections = positive_value_exists(request.GET.get('show_all_elections', False))
show_active_processes_only = request.GET.get('show_active_processes_only', False)
show_paused_processes_only = request.GET.get('show_paused_processes_only', False)
show_checked_out_processes_only = request.GET.get('show_checked_out_processes_only', False)
batch_process_id = convert_to_int(request.GET.get('batch_process_id', 0))
batch_process_search = request.GET.get('batch_process_search', '')
batch_process_list = []
select_for_changing_batch_process_ids = request.POST.getlist('select_for_marking_checks[]')
which_marking = request.POST.get("which_marking", None) # What to do with check marks
# Make sure 'which_marking' is one of the allowed Filter fields
if which_marking and which_marking not in ["pause_process", "unpause_process", None]:
messages.add_message(request, messages.ERROR,
'The filter you are trying to update is not recognized: {which_marking}'
''.format(which_marking=which_marking))
return HttpResponseRedirect(reverse('import_export_batches:batch_process_list', args=()))
error_count = 0
items_processed_successfully = 0
if which_marking and select_for_changing_batch_process_ids:
# Get these values from hidden POST fields
batch_process_search = request.POST.get('batch_process_search', '')
google_civic_election_id = convert_to_int(request.POST.get('google_civic_election_id', 0))
show_all_elections = positive_value_exists(request.POST.get('show_all_elections', False))
state_code = request.POST.get('state_code', '') # Already retrieved with GET, now retrieving with POST
for one_batch_process_id in select_for_changing_batch_process_ids:
try:
one_batch_process = BatchProcess.objects.get(id=one_batch_process_id)
if which_marking == "pause_process":
one_batch_process.batch_process_paused = True
elif which_marking == "unpause_process":
one_batch_process.batch_process_paused = False
one_batch_process.save()
items_processed_successfully += 1
status += 'BATCH_PROCESS_UPDATED '
except BatchProcess.MultipleObjectsReturned as e:
status += 'MULTIPLE_MATCHING_BATCH_PROCESSES_FOUND '
error_count += 1
except BatchProcess.DoesNotExist:
status += "RETRIEVE_BATCH_PROCESS_NOT_FOUND "
error_count += 1
except Exception as e:
status += 'BATCH_PROCESS_GENERAL_ERROR ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
error_count += 1
messages.add_message(request, messages.INFO,
'Batch Processes paused/unpaused successfully: {items_processed_successfully}, '
'errors: {error_count}'
''.format(error_count=error_count,
items_processed_successfully=items_processed_successfully))
election_manager = ElectionManager()
if positive_value_exists(show_all_elections):
results = election_manager.retrieve_elections()
election_list = results['election_list']
else:
results = election_manager.retrieve_upcoming_elections()
election_list = results['election_list']
try:
batch_process_queryset = BatchProcess.objects.all()
if positive_value_exists(batch_process_id):
batch_process_queryset = batch_process_queryset.filter(id=batch_process_id)
if positive_value_exists(google_civic_election_id):
batch_process_queryset = batch_process_queryset.filter(google_civic_election_id=google_civic_election_id)
elif positive_value_exists(show_all_elections):
# Return offices from all elections
pass
else:
# Limit this search to upcoming_elections only
google_civic_election_id_list = [0]
for one_election in election_list:
google_civic_election_id_list.append(one_election.google_civic_election_id)
batch_process_queryset = batch_process_queryset.filter(
google_civic_election_id__in=google_civic_election_id_list)
if positive_value_exists(state_code):
batch_process_queryset = batch_process_queryset.filter(state_code__iexact=state_code)
if positive_value_exists(show_active_processes_only):
batch_process_queryset = batch_process_queryset.filter(date_completed__isnull=True)
batch_process_queryset = batch_process_queryset.exclude(batch_process_paused=True)
if positive_value_exists(show_paused_processes_only):
batch_process_queryset = batch_process_queryset.filter(batch_process_paused=True)
if positive_value_exists(show_checked_out_processes_only):
batch_process_queryset = batch_process_queryset.filter(date_completed__isnull=True)
batch_process_queryset = batch_process_queryset.filter(date_started__isnull=False)
batch_process_queryset = batch_process_queryset.exclude(batch_process_paused=True)
if positive_value_exists(kind_of_processes_to_show):
if kind_of_processes_to_show == "ACTIVITY_NOTICE_PROCESS":
activity_notice_processes = ['ACTIVITY_NOTICE_PROCESS']
batch_process_queryset = batch_process_queryset.filter(kind_of_process__in=activity_notice_processes)
elif kind_of_processes_to_show == "ANALYTICS_ACTION":
analytics_processes = [
'AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID',
'AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT',
'CALCULATE_ORGANIZATION_DAILY_METRICS',
'CALCULATE_ORGANIZATION_ELECTION_METRICS',
'CALCULATE_SITEWIDE_DAILY_METRICS',
'CALCULATE_SITEWIDE_VOTER_METRICS']
batch_process_queryset = batch_process_queryset.filter(kind_of_process__in=analytics_processes)
elif kind_of_processes_to_show == "API_REFRESH_REQUEST":
api_refresh_processes = ['API_REFRESH_REQUEST']
batch_process_queryset = batch_process_queryset.filter(kind_of_process__in=api_refresh_processes)
elif kind_of_processes_to_show == "BALLOT_ITEMS":
ballot_item_processes = [
'REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS',
'REFRESH_BALLOT_ITEMS_FROM_VOTERS',
'RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS']
batch_process_queryset = batch_process_queryset.filter(kind_of_process__in=ballot_item_processes)
elif kind_of_processes_to_show == "SEARCH_TWITTER":
search_twitter_processes = ['SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE']
batch_process_queryset = batch_process_queryset.filter(kind_of_process__in=search_twitter_processes)
elif positive_value_exists(include_frequent_processes):
# Don't modify the query
pass
else:
exclude_list = [ACTIVITY_NOTICE_PROCESS, API_REFRESH_REQUEST]
batch_process_queryset = batch_process_queryset.exclude(kind_of_process__in=exclude_list)
batch_process_queryset = batch_process_queryset.order_by("-id")
if positive_value_exists(batch_process_search):
search_words = batch_process_search.split()
for one_word in search_words:
filters = [] # Reset for each search word
new_filter = Q(office_name__icontains=one_word)
filters.append(new_filter)
new_filter = Q(we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(wikipedia_id__icontains=one_word)
filters.append(new_filter)
new_filter = Q(ballotpedia_office_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(ballotpedia_race_id__iexact=one_word)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
batch_process_queryset = batch_process_queryset.filter(final_filters)
batch_process_list_count = batch_process_queryset.count()
batch_process_queryset = batch_process_queryset[:100]
batch_process_list = list(batch_process_queryset)
if len(batch_process_list):
batch_process_list_found = True
status += 'BATCH_PROCESS_LIST_RETRIEVED '
else:
status += 'BATCH_PROCESS_LIST_NOT_RETRIEVED '
except BatchProcess.DoesNotExist:
# No offices found. Not a problem.
status += 'NO_OFFICES_FOUND_DoesNotExist '
batch_process_list = []
except Exception as e:
status += 'FAILED retrieve_all_offices_for_upcoming_election: ' + str(e) + ' '
success = False
# Add the processing "chunks" under each Batch Process
for batch_process in batch_process_list:
batch_process_ballot_item_chunk_list = []
batch_process_ballot_item_chunk_list_found = False
batch_process_analytics_chunk_list = []
batch_process_analytics_chunk_list_found = False
try:
batch_process_chunk_queryset = BatchProcessBallotItemChunk.objects.all()
batch_process_chunk_queryset = batch_process_chunk_queryset.filter(batch_process_id=batch_process.id)
batch_process_chunk_queryset = batch_process_chunk_queryset.order_by("-id")
batch_process_ballot_item_chunk_list = list(batch_process_chunk_queryset)
batch_process_ballot_item_chunk_list_found = \
positive_value_exists(len(batch_process_ballot_item_chunk_list))
except BatchProcessBallotItemChunk.DoesNotExist:
# BatchProcessBallotItemChunk not found. Not a problem.
status += 'NO_BatchProcessBallotItemChunk_FOUND_DoesNotExist '
except Exception as e:
status += 'FAILED BatchProcessBallotItemChunk ' + str(e) + ' '
batch_process.batch_process_ballot_item_chunk_list = batch_process_ballot_item_chunk_list
batch_process.batch_process_ballot_item_chunk_list_found = batch_process_ballot_item_chunk_list_found
if not positive_value_exists(batch_process_ballot_item_chunk_list_found):
# Now check to see if this is an analytics
try:
batch_process_chunk_queryset = BatchProcessAnalyticsChunk.objects.all()
batch_process_chunk_queryset = batch_process_chunk_queryset.filter(batch_process_id=batch_process.id)
batch_process_chunk_queryset = batch_process_chunk_queryset.order_by("-id")
batch_process_analytics_chunk_list = list(batch_process_chunk_queryset)
batch_process_analytics_chunk_list_found = \
positive_value_exists(len(batch_process_analytics_chunk_list))
except BatchProcessBallotItemChunk.DoesNotExist:
# BatchProcessBallotItemChunk not found. Not a problem.
status += 'NO_BatchProcessAnalyticsChunk_FOUND_DoesNotExist '
except Exception as e:
status += 'FAILED BatchProcessAnalyticsChunk ' + str(e) + ' '
batch_process.batch_process_analytics_chunk_list = batch_process_analytics_chunk_list
batch_process.batch_process_analytics_chunk_list_found = batch_process_analytics_chunk_list_found
# Make sure we always include the current election in the election_list, even if it is older
use_ballotpedia_as_data_source = False
use_ctcl_as_data_source = False
use_vote_usa_as_data_source = False
if positive_value_exists(google_civic_election_id):
this_election_found = False
for one_election in election_list:
if convert_to_int(one_election.google_civic_election_id) == convert_to_int(google_civic_election_id):
this_election_found = True
use_ballotpedia_as_data_source = one_election.use_ballotpedia_as_data_source
use_ctcl_as_data_source = one_election.use_ctcl_as_data_source
use_vote_usa_as_data_source = one_election.use_vote_usa_as_data_source
break
if not this_election_found:
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
election = results['election']
use_ballotpedia_as_data_source = election.use_ballotpedia_as_data_source
use_ctcl_as_data_source = election.use_ctcl_as_data_source
use_vote_usa_as_data_source = election.use_vote_usa_as_data_source
election_list.append(election)
state_list = STATE_CODE_MAP
state_list_modified = {}
for one_state_code, one_state_name in state_list.items():
# office_count = batch_process_manager.fetch_office_count(google_civic_election_id, one_state_code)
batch_process_count = 0
state_name_modified = one_state_name
if positive_value_exists(batch_process_count):
state_name_modified += " - " + str(batch_process_count)
state_list_modified[one_state_code] = state_name_modified
else:
state_name_modified += ""
state_list_modified[one_state_code] = state_name_modified
sorted_state_list = sorted(state_list_modified.items())
# status_print_list = ""
# status_print_list += "batch_process_list_count: " + \
# str(batch_process_list_count) + " "
#
# messages.add_message(request, messages.INFO, status_print_list)
messages_on_stage = get_messages(request)
from wevote_settings.models import fetch_batch_process_system_on, fetch_batch_process_system_activity_notices_on, \
fetch_batch_process_system_api_refresh_on, fetch_batch_process_system_ballot_items_on, \
fetch_batch_process_system_calculate_analytics_on, fetch_batch_process_system_search_twitter_on
batch_process_system_on = fetch_batch_process_system_on()
batch_process_system_activity_notices_on = fetch_batch_process_system_activity_notices_on()
batch_process_system_api_refresh_on = fetch_batch_process_system_api_refresh_on()
batch_process_system_ballot_items_on = fetch_batch_process_system_ballot_items_on()
batch_process_system_calculate_analytics_on = fetch_batch_process_system_calculate_analytics_on()
batch_process_system_search_twitter_on = fetch_batch_process_system_search_twitter_on()
ballot_returned_oldest_date = ""
ballot_returned_voter_oldest_date = ""
if positive_value_exists(state_code) and positive_value_exists(google_civic_election_id):
ballot_returned_list_manager = BallotReturnedListManager()
ballot_returned_oldest_date = ballot_returned_list_manager.fetch_oldest_date_last_updated(
google_civic_election_id, state_code)
ballot_returned_voter_oldest_date = ballot_returned_list_manager.fetch_oldest_date_last_updated(
google_civic_election_id, state_code, for_voter=True)
toggle_system_url_variables = "s=1" # Add a dummy variable at the start so all remaining variables have &
if positive_value_exists(include_frequent_processes):
toggle_system_url_variables += "&include_frequent_processes=1"
if positive_value_exists(kind_of_processes_to_show):
toggle_system_url_variables += "&kind_of_processes_to_show=" + str(kind_of_processes_to_show)
if positive_value_exists(show_active_processes_only):
toggle_system_url_variables += "&show_active_processes_only=1"
if positive_value_exists(show_checked_out_processes_only):
toggle_system_url_variables += "&show_checked_out_processes_only=1"
if positive_value_exists(show_paused_processes_only):
toggle_system_url_variables += "&show_paused_processes_only=1"
template_values = {
'messages_on_stage': messages_on_stage,
'ballot_returned_oldest_date': ballot_returned_oldest_date,
'ballot_returned_voter_oldest_date': ballot_returned_voter_oldest_date,
'batch_process_id': batch_process_id,
'batch_process_list': batch_process_list,
'batch_process_system_on': batch_process_system_on,
'batch_process_system_activity_notices_on': batch_process_system_activity_notices_on,
'batch_process_system_api_refresh_on': batch_process_system_api_refresh_on,
'batch_process_system_ballot_items_on': batch_process_system_ballot_items_on,
'batch_process_system_calculate_analytics_on': batch_process_system_calculate_analytics_on,
'batch_process_system_search_twitter_on': batch_process_system_search_twitter_on,
'batch_process_search': batch_process_search,
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
'include_frequent_processes': include_frequent_processes,
'kind_of_processes_to_show': kind_of_processes_to_show,
'show_all_elections': show_all_elections,
'show_active_processes_only': show_active_processes_only,
'show_paused_processes_only': show_paused_processes_only,
'show_checked_out_processes_only': show_checked_out_processes_only,
'state_code': state_code,
'state_list': sorted_state_list,
'toggle_system_url_variables': toggle_system_url_variables,
'use_ballotpedia_as_data_source': use_ballotpedia_as_data_source,
'use_ctcl_as_data_source': use_ctcl_as_data_source,
'use_vote_usa_as_data_source': use_vote_usa_as_data_source,
}
return render(request, 'import_export_batches/batch_process_list.html', template_values)
def batch_process_next_steps_view(request):
# json_results = batch_process_next_steps()
status = "batch_process_next_steps_view-DEPRECATED "
json_results = {
'success': False,
'status': status,
}
response = HttpResponse(json.dumps(json_results), content_type='application/json')
return response
@login_required
def import_ballot_items_for_location_view(request):
"""
Reach out to external data source API to retrieve a ballot for one location.
"""
status = ""
success = True
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
polling_location_we_vote_id = request.GET.get('polling_location_we_vote_id', "")
state_code = request.GET.get('state_code', "")
use_ballotpedia = positive_value_exists(request.GET.get('use_ballotpedia', False))
use_ctcl = positive_value_exists(request.GET.get('use_ctcl', False))
use_vote_usa = positive_value_exists(request.GET.get('use_vote_usa', False))
if not positive_value_exists(google_civic_election_id):
messages.add_message(request, messages.ERROR,
'Google Civic Election Id missing.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
election_manager = ElectionManager()
ctcl_election_uuid = ''
election_day_text = ''
use_ballotpedia_as_data_source = False
use_ctcl_as_data_source = False
use_vote_usa_as_data_source = False
results = election_manager.retrieve_election(google_civic_election_id=google_civic_election_id)
if results['election_found']:
election = results['election']
ctcl_election_uuid = election.ctcl_uuid
election_day_text = election.election_day_text
use_ballotpedia_as_data_source = election.use_ballotpedia_as_data_source
use_ctcl_as_data_source = election.use_ctcl_as_data_source
use_vote_usa_as_data_source = election.use_vote_usa_as_data_source
if positive_value_exists(use_ballotpedia):
if not positive_value_exists(use_ballotpedia_as_data_source):
success = False
status += "USE_BALLOTPEDIA-BUT_NOT_USE_BALLOTPEDIA_AS_DATA_SOURCE "
results = {
'status': status,
'success': success,
}
elif positive_value_exists(use_ctcl):
if not positive_value_exists(use_ctcl_as_data_source):
success = False
status += "USE_CTCL-BUT_NOT_USE_CTCL_AS_DATA_SOURCE "
results = {
'status': status,
'success': success,
}
elif positive_value_exists(use_vote_usa):
if not positive_value_exists(use_vote_usa_as_data_source):
success = False
status += "USE_VOTE_USA-BUT_NOT_USE_VOTE_USA_AS_DATA_SOURCE "
results = {
'status': status,
'success': success,
}
kind_of_batch = ""
if success:
update_or_create_rules = {
'create_candidates': True,
'create_offices': True,
'create_measures': True,
'update_candidates': False,
'update_offices': False,
'update_measures': False,
}
if positive_value_exists(use_ballotpedia):
from import_export_ballotpedia.controllers import \
retrieve_ballotpedia_ballot_items_from_polling_location_api_v4
results = retrieve_ballotpedia_ballot_items_from_polling_location_api_v4(
google_civic_election_id,
election_day_text=election_day_text,
polling_location_we_vote_id=polling_location_we_vote_id,
state_code=state_code,
)
elif positive_value_exists(use_ctcl):
from import_export_ctcl.controllers import retrieve_ctcl_ballot_items_from_polling_location_api
results = retrieve_ctcl_ballot_items_from_polling_location_api(
google_civic_election_id,
ctcl_election_uuid=ctcl_election_uuid,
election_day_text=election_day_text,
polling_location_we_vote_id=polling_location_we_vote_id,
state_code=state_code,
update_or_create_rules=update_or_create_rules,
)
else:
# Should not be possible to get here
pass
if 'kind_of_batch' in results:
kind_of_batch = results['kind_of_batch']
if not positive_value_exists(kind_of_batch):
kind_of_batch = IMPORT_BALLOT_ITEM
batch_header_id = 0
if 'batch_saved' in results and results['batch_saved']:
messages.add_message(request, messages.INFO, 'Ballot items import batch for {google_civic_election_id} '
'election saved.'
''.format(google_civic_election_id=google_civic_election_id))
batch_header_id = results['batch_header_id']
elif 'batch_header_id' in results and results['batch_header_id']:
messages.add_message(request, messages.INFO, 'Ballot items import batch for {google_civic_election_id} '
'election saved, batch_header_id.'
''.format(google_civic_election_id=google_civic_election_id))
batch_header_id = results['batch_header_id']
else:
messages.add_message(request, messages.ERROR, results['status'])
if positive_value_exists(batch_header_id):
# Go straight to the new batch
return HttpResponseRedirect(reverse('import_export_batches:batch_action_list', args=()) +
"?batch_header_id=" + str(batch_header_id) +
"&kind_of_batch=" + str(kind_of_batch) +
"&google_civic_election_id=" + str(google_civic_election_id))
else:
# Go to the ballot_item_list_edit page
if positive_value_exists(polling_location_we_vote_id):
return HttpResponseRedirect(reverse('ballot:ballot_item_list_by_polling_location_edit',
args=(polling_location_we_vote_id,)) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&polling_location_we_vote_id=" + str(polling_location_we_vote_id) +
"&state_code=" + str(state_code)
)
else:
messages.add_message(request, messages.ERROR, "Missing polling_location_we_vote_id.")
return HttpResponseRedirect(reverse('election:election_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&polling_location_we_vote_id=" + str(polling_location_we_vote_id) +
"&state_code=" + str(state_code)
)
def process_next_activity_notices_view(request):
json_results = process_next_activity_notices()
response = HttpResponse(json.dumps(json_results), content_type='application/json')
return response
def process_next_ballot_items_view(request):
json_results = process_next_ballot_items()
response = HttpResponse(json.dumps(json_results), content_type='application/json')
return response
def process_next_general_maintenance_view(request):
json_results = process_next_general_maintenance()
response = HttpResponse(json.dumps(json_results), content_type='application/json')
return response
@login_required
def batch_process_pause_toggle_view(request):
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
batch_process_id = request.GET.get('batch_process_id', 0)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
batch_process_manager = BatchProcessManager()
results = batch_process_manager.retrieve_batch_process(batch_process_id)
if results['batch_process_found']:
batch_process = results['batch_process']
try:
current_setting = batch_process.batch_process_paused
batch_process.batch_process_paused = not current_setting
batch_process.save()
message = "BATCH_PROCESS_PAUSED: " + str(batch_process.batch_process_paused) + " "
messages.add_message(request, messages.INFO, message)
except Exception as e:
message = "COULD_NOT_SAVE_BATCH_PROCESS-BATCH_PROCESS_PAUSED " + str(e) + " "
messages.add_message(request, messages.ERROR, message)
else:
message = "BATCH_PROCESS_COULD_NOT_BE_FOUND: " + str(batch_process_id)
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('import_export_batches:batch_process_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code))
@login_required
def batch_process_log_entry_list_view(request):
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
status = ""
success = True
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
show_all_elections = positive_value_exists(request.GET.get('show_all_elections', False))
batch_process_log_entry_search = request.GET.get('batch_process_log_entry_search', '')
batch_process_id = convert_to_int(request.GET.get('batch_process_id', 0))
batch_process_chunk_id = convert_to_int(request.GET.get('batch_process_chunk_id', 0))
batch_process_log_entry_list_found = False
batch_process_log_entry_list = []
election_manager = ElectionManager()
if positive_value_exists(show_all_elections):
results = election_manager.retrieve_elections()
election_list = results['election_list']
else:
results = election_manager.retrieve_upcoming_elections()
election_list = results['election_list']
try:
batch_process_queryset = BatchProcessLogEntry.objects.all()
if positive_value_exists(batch_process_id):
batch_process_queryset = batch_process_queryset.filter(batch_process_id=batch_process_id)
if positive_value_exists(batch_process_chunk_id):
batch_process_queryset = batch_process_queryset.filter(
batch_process_ballot_item_chunk_id=batch_process_chunk_id)
if positive_value_exists(google_civic_election_id):
batch_process_queryset = batch_process_queryset.filter(google_civic_election_id=google_civic_election_id)
elif positive_value_exists(show_all_elections):
# Return offices from all elections
pass
else:
# Limit this search to upcoming_elections only, or entries with no election
google_civic_election_id_list = [0]
for one_election in election_list:
google_civic_election_id_list.append(one_election.google_civic_election_id)
batch_process_queryset = batch_process_queryset.filter(
google_civic_election_id__in=google_civic_election_id_list)
if positive_value_exists(state_code):
batch_process_queryset = batch_process_queryset.filter(state_code__iexact=state_code)
batch_process_queryset = batch_process_queryset.order_by("-id")
if positive_value_exists(batch_process_log_entry_search):
search_words = batch_process_log_entry_search.split()
for one_word in search_words:
filters = [] # Reset for each search word
new_filter = Q(batch_process_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(batch_set_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(google_civic_election_id__icontains=one_word)
filters.append(new_filter)
new_filter = Q(polling_location_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(state_code__iexact=one_word)
filters.append(new_filter)
new_filter = Q(status__icontains=one_word)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
batch_process_queryset = batch_process_queryset.filter(final_filters)
batch_process_log_entry_list_count = batch_process_queryset.count()
batch_process_queryset = batch_process_queryset[:200]
batch_process_log_entry_list = list(batch_process_queryset)
if len(batch_process_log_entry_list):
batch_process_log_entry_list_found = True
status += 'BATCH_PROCESS_LOG_ENTRY_LIST_RETRIEVED '
else:
status += 'BATCH_PROCESS_LOG_ENTRY_LIST_NOT_RETRIEVED '
except BatchProcessLogEntry.DoesNotExist:
# No offices found. Not a problem.
status += 'BATCH_PROCESS_LOG_ENTRY_DoesNotExist '
batch_process_log_entry_list = []
except Exception as e:
status += 'FAILED-[retrieve_all_offices_for_upcoming_election]-ERROR ' + str(e) + " "
success = False
handle_exception(e, logger=logger, exception_message=status)
# Make sure we always include the current election in the election_list, even if it is older
if positive_value_exists(google_civic_election_id):
this_election_found = False
for one_election in election_list:
if convert_to_int(one_election.google_civic_election_id) == convert_to_int(google_civic_election_id):
this_election_found = True
break
if not this_election_found:
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
election = results['election']
election_list.append(election)
state_list = STATE_CODE_MAP
state_list_modified = {}
for one_state_code, one_state_name in state_list.items():
# office_count = batch_process_manager.fetch_office_count(google_civic_election_id, one_state_code)
batch_process_log_entry_count = 0
state_name_modified = one_state_name
if positive_value_exists(batch_process_log_entry_count):
state_name_modified += " - " + str(batch_process_log_entry_count)
state_list_modified[one_state_code] = state_name_modified
else:
state_name_modified += ""
state_list_modified[one_state_code] = state_name_modified
sorted_state_list = sorted(state_list_modified.items())
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'batch_process_id': batch_process_id,
'batch_process_chunk_id': batch_process_chunk_id,
'batch_process_log_entry_list': batch_process_log_entry_list,
'batch_process_log_entry_search': batch_process_log_entry_search,
'election_list': election_list,
'state_code': state_code,
'show_all_elections': show_all_elections,
'state_list': sorted_state_list,
'google_civic_election_id': google_civic_election_id,
}
return render(request, 'import_export_batches/batch_process_log_entry_list.html', template_values)
@login_required
def batch_set_batch_list_view(request):
"""
Display row-by-row details of batch_set actions being reviewed, leading up to processing an entire batch_set.
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
batch_set_id = convert_to_int(request.GET.get('batch_set_id', 0))
if not positive_value_exists(batch_set_id):
messages.add_message(request, messages.ERROR, 'Batch_set_id required.')
return HttpResponseRedirect(reverse('import_export_batches:batch_set_list', args=()))
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
analyze_all_button = request.GET.get('analyze_all_button', 0)
create_all_button = request.GET.get('create_all_button', 0)
analyze_for_deletes_button = request.GET.get('analyze_for_deletes_button', 0)
delete_all_button = request.GET.get('delete_all_button', 0)
show_all_batches = request.GET.get('show_all_batches', False)
state_code = request.GET.get('state_code', "")
update_all_button = request.GET.get('update_all_button', 0)
batch_list_modified = []
batch_manager = BatchManager()
batch_set_count = 0
batch_set_kind_of_batch = ""
# Store static data in memory so we don't have to use the database
election_objects_dict = {}
office_objects_dict = {}
measure_objects_dict = {}
try:
if positive_value_exists(analyze_all_button):
batch_actions_analyzed = 0
batch_actions_not_analyzed = 0
batch_header_id_created_list = []
start_each_batch_time_tracker = [] # Array of times
summary_of_create_batch_row_action_time_tracker = [] # Array of arrays
batch_description_query = BatchDescription.objects.filter(batch_set_id=batch_set_id)
batch_description_query = batch_description_query.filter(batch_description_analyzed=False)
batch_list = list(batch_description_query)
batch_list_not_analyzed_count = len(batch_list)
# For this batch set, cycle through each batch. Within each batch, cycle through each batch_row
# and decide whether the action required is create or update.
for one_batch_description in batch_list:
start_each_batch_time_tracker.append(now().strftime("%H:%M:%S:%f"))
results = create_batch_row_actions(
one_batch_description.batch_header_id,
batch_description=one_batch_description,
election_objects_dict=election_objects_dict,
measure_objects_dict=measure_objects_dict,
office_objects_dict=office_objects_dict,
)
if results['batch_actions_created']:
batch_actions_analyzed += 1
try:
# If BatchRowAction's were created for BatchDescription, this batch_description was analyzed
one_batch_description.batch_description_analyzed = True
one_batch_description.save()
batch_header_id_created_list.append(one_batch_description.batch_header_id)
except Exception as e:
pass
else:
batch_actions_not_analyzed += 1
# Keep building up these dicts so we don't have to retrieve data again-and-again from the database
election_objects_dict = results['election_objects_dict']
measure_objects_dict = results['measure_objects_dict']
office_objects_dict = results['office_objects_dict']
start_create_batch_row_action_time_tracker = results['start_create_batch_row_action_time_tracker']
summary_of_create_batch_row_action_time_tracker.append(start_create_batch_row_action_time_tracker)
# If there were not any entries with batch_description_analyzed set to False, then retrieve all
if not positive_value_exists(batch_list_not_analyzed_count):
batch_description_query = BatchDescription.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(len(batch_header_id_created_list)):
batch_description_query = batch_description_query.exclude(
batch_header_id__in=batch_header_id_created_list)
batch_list = list(batch_description_query)
for one_batch_description in batch_list:
start_each_batch_time_tracker.append(now().strftime("%H:%M:%S:%f"))
results = create_batch_row_actions(
one_batch_description.batch_header_id,
batch_description=one_batch_description,
election_objects_dict=election_objects_dict,
measure_objects_dict=measure_objects_dict,
office_objects_dict=office_objects_dict,
)
if results['batch_actions_created']:
batch_actions_analyzed += 1
try:
# If BatchRowAction's were created for BatchDescription, this batch_description was analyzed
one_batch_description.batch_description_analyzed = True
one_batch_description.save()
except Exception as e:
pass
else:
batch_actions_not_analyzed += 1
# Keep building up these dicts so we don't have to retrieve data again-and-again from the database
election_objects_dict = results['election_objects_dict']
measure_objects_dict = results['measure_objects_dict']
office_objects_dict = results['office_objects_dict']
start_create_batch_row_action_time_tracker = results['start_create_batch_row_action_time_tracker']
summary_of_create_batch_row_action_time_tracker.append(start_create_batch_row_action_time_tracker)
if positive_value_exists(batch_actions_analyzed):
messages.add_message(request, messages.INFO, "Analyze All, BatchRows Analyzed: "
"" + str(batch_actions_analyzed))
if positive_value_exists(batch_actions_not_analyzed):
messages.add_message(request, messages.ERROR, "Analyze All, BatchRows NOT Analyzed: "
"" + str(batch_actions_not_analyzed))
return HttpResponseRedirect(reverse('import_export_batches:batch_set_batch_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&batch_set_id=" + str(batch_set_id) +
"&state_code=" + state_code)
if positive_value_exists(update_all_button):
batch_description_query = BatchDescription.objects.filter(batch_set_id=batch_set_id)
batch_description_query = batch_description_query.filter(batch_description_analyzed=True)
batch_list = list(batch_description_query)
batch_actions_updated = 0
batch_actions_not_updated = 0
for one_batch_description in batch_list:
results = import_data_from_batch_row_actions(
one_batch_description.kind_of_batch, IMPORT_ADD_TO_EXISTING, one_batch_description.batch_header_id)
if results['number_of_table_rows_updated']:
batch_actions_updated += 1
else:
batch_actions_not_updated += 1
if positive_value_exists(batch_actions_updated):
messages.add_message(request, messages.INFO, "Update in All Batches: "
"" + str(batch_actions_updated) + ". ")
if positive_value_exists(batch_actions_not_updated):
messages.add_message(request, messages.ERROR, "Update in All Batches, Failed Updates: "
"" + str(batch_actions_not_updated))
return HttpResponseRedirect(reverse('import_export_batches:batch_set_batch_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&batch_set_id=" + str(batch_set_id) +
"&state_code=" + state_code)
if positive_value_exists(create_all_button):
batch_description_query = BatchDescription.objects.filter(batch_set_id=batch_set_id)
batch_description_query = batch_description_query.filter(batch_description_analyzed=True)
batch_list = list(batch_description_query)
batch_actions_created = 0
not_created_status = ""
for one_batch_description in batch_list:
results = import_data_from_batch_row_actions(
one_batch_description.kind_of_batch, IMPORT_CREATE, one_batch_description.batch_header_id)
if results['number_of_table_rows_created']:
batch_actions_created += 1
if not positive_value_exists(results['success']):
if len(not_created_status) < 1024:
not_created_status += results['status']
if positive_value_exists(batch_actions_created):
messages.add_message(request, messages.INFO, "Create in All Batches: "
"" + str(batch_actions_created) + ". ")
if positive_value_exists(not_created_status):
messages.add_message(request, messages.ERROR,
"Create in All Batches, FAILED Creates: {not_created_status} "
"".format(not_created_status=not_created_status))
return HttpResponseRedirect(reverse('import_export_batches:batch_set_batch_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&batch_set_id=" + str(batch_set_id) +
"&state_code=" + state_code)
if positive_value_exists(analyze_for_deletes_button):
batch_actions_analyzed_for_deletes = 0
batch_header_id_created_list = []
batch_description_query = BatchDescription.objects.filter(batch_set_id=batch_set_id)
batch_description_query = batch_description_query.filter(batch_description_analyzed=True)
batch_list = list(batch_description_query)
for one_batch_description in batch_list:
results = create_batch_row_actions(
one_batch_description.batch_header_id,
batch_description=one_batch_description,
delete_analysis_only=True,
election_objects_dict=election_objects_dict,
measure_objects_dict=measure_objects_dict,
office_objects_dict=office_objects_dict,
)
if results['batch_actions_created']:
batch_actions_analyzed_for_deletes += 1
batch_header_id_created_list.append(one_batch_description.batch_header_id)
election_objects_dict = results['election_objects_dict']
measure_objects_dict = results['measure_objects_dict']
office_objects_dict = results['office_objects_dict']
if positive_value_exists(batch_actions_analyzed_for_deletes):
messages.add_message(request, messages.INFO, "Analyze For Deletes: "
"" + str(batch_actions_analyzed_for_deletes))
return HttpResponseRedirect(reverse('import_export_batches:batch_set_batch_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&batch_set_id=" + str(batch_set_id) +
"&state_code=" + state_code)
if positive_value_exists(delete_all_button):
batch_description_query = BatchDescription.objects.filter(batch_set_id=batch_set_id)
batch_description_query = batch_description_query.filter(batch_description_analyzed=True)
batch_list = list(batch_description_query)
batch_actions_deleted = 0
not_deleted_status = ""
for one_batch_description in batch_list:
results = import_data_from_batch_row_actions(
one_batch_description.kind_of_batch, IMPORT_DELETE, one_batch_description.batch_header_id)
if results['number_of_table_rows_deleted']:
batch_actions_deleted += 1
if not positive_value_exists(results['success']):
if len(not_deleted_status) < 1024:
not_deleted_status += results['status']
if positive_value_exists(batch_actions_deleted):
messages.add_message(request, messages.INFO, "Deletes in All Batches: "
"" + str(batch_actions_deleted) + ", ")
if positive_value_exists(not_deleted_status):
messages.add_message(request, messages.ERROR,
"Create in All Batches, FAILED Creates: {not_deleted_status} "
"".format(not_deleted_status=not_deleted_status))
return HttpResponseRedirect(reverse('import_export_batches:batch_set_batch_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&batch_set_id=" + str(batch_set_id) +
"&state_code=" + state_code)
batch_description_query = BatchDescription.objects.filter(batch_set_id=batch_set_id)
batch_set_count = batch_description_query.count()
if not positive_value_exists(show_all_batches):
batch_list = batch_description_query[:10]
else:
batch_list = list(batch_description_query)
# Loop through all batches and add count data
for one_batch_description in batch_list:
batch_header_id = one_batch_description.batch_header_id
one_batch_description.number_of_batch_rows_imported = batch_manager.fetch_batch_row_count(batch_header_id)
one_batch_description.number_of_batch_rows_analyzed = \
batch_manager.fetch_batch_row_action_count(batch_header_id, one_batch_description.kind_of_batch)
one_batch_description.number_of_batch_actions_to_create = \
batch_manager.fetch_batch_row_action_count(batch_header_id, one_batch_description.kind_of_batch,
IMPORT_CREATE)
one_batch_description.number_of_table_rows_to_update = \
batch_manager.fetch_batch_row_action_count(batch_header_id, one_batch_description.kind_of_batch,
IMPORT_ADD_TO_EXISTING)
one_batch_description.number_of_table_rows_to_delete = \
batch_manager.fetch_batch_row_action_count(batch_header_id, one_batch_description.kind_of_batch,
IMPORT_DELETE)
one_batch_description.number_of_table_rows_already_deleted = \
batch_manager.fetch_batch_row_action_count(batch_header_id, one_batch_description.kind_of_batch,
IMPORT_ALREADY_DELETED)
one_batch_description.number_of_batch_actions_cannot_act = \
one_batch_description.number_of_batch_rows_analyzed - \
one_batch_description.number_of_batch_actions_to_create - \
one_batch_description.number_of_table_rows_to_update - \
one_batch_description.number_of_table_rows_to_delete - \
one_batch_description.number_of_table_rows_already_deleted
batch_set_kind_of_batch = one_batch_description.kind_of_batch
batch_list_modified.append(one_batch_description)
except BatchDescription.DoesNotExist:
# This is fine
pass
election_list = Election.objects.order_by('-election_day_text')
status_message = '{batch_set_count} batches in this batch set. '.format(batch_set_count=batch_set_count)
batch_row_items_to_create_for_this_set = batch_manager.fetch_batch_row_action_count_in_batch_set(
batch_set_id, batch_set_kind_of_batch, IMPORT_CREATE)
if positive_value_exists(batch_row_items_to_create_for_this_set):
status_message += 'BatchRowActions to create: {batch_row_items_to_create_for_this_set} '.format(
batch_row_items_to_create_for_this_set=batch_row_items_to_create_for_this_set)
batch_row_items_to_update_for_this_set = batch_manager.fetch_batch_row_action_count_in_batch_set(
batch_set_id, batch_set_kind_of_batch, IMPORT_ADD_TO_EXISTING)
if positive_value_exists(batch_row_items_to_update_for_this_set):
status_message += 'BatchRowActions to update: {batch_row_items_to_update_for_this_set} '.format(
batch_row_items_to_update_for_this_set=batch_row_items_to_update_for_this_set)
batch_row_items_to_delete_for_this_set = batch_manager.fetch_batch_row_action_count_in_batch_set(
batch_set_id, batch_set_kind_of_batch, IMPORT_DELETE)
if positive_value_exists(batch_row_items_to_delete_for_this_set):
status_message += 'BatchRowActions to delete: {batch_row_items_to_delete_for_this_set} '.format(
batch_row_items_to_delete_for_this_set=batch_row_items_to_delete_for_this_set)
messages.add_message(request, messages.INFO, status_message)
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'batch_set_id': batch_set_id,
'batch_list': batch_list_modified,
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
'show_all_batches': show_all_batches,
}
return render(request, 'import_export_batches/batch_set_batch_list.html', template_values)
@login_required
def refresh_ballots_for_voters_api_v4_view(request):
"""
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
use_batch_process = request.GET.get('use_batch_process', False)
use_ballotpedia = request.GET.get('use_ballotpedia', False)
use_ballotpedia = positive_value_exists(use_ballotpedia)
use_ctcl = request.GET.get('use_ctcl', False)
use_ctcl = positive_value_exists(use_ctcl)
use_vote_usa = request.GET.get('use_vote_usa', False)
use_vote_usa = positive_value_exists(use_vote_usa)
if positive_value_exists(use_batch_process):
from import_export_batches.controllers_batch_process import schedule_refresh_ballots_for_voters_api_v4
results = schedule_refresh_ballots_for_voters_api_v4(
google_civic_election_id=google_civic_election_id,
state_code=state_code,
use_ballotpedia=use_ballotpedia,
use_ctcl=use_ctcl,
use_vote_usa=use_vote_usa)
messages.add_message(request, messages.INFO, results['status'])
return HttpResponseRedirect(reverse('import_export_batches:batch_process_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code)
)
else:
return refresh_ballots_for_voters_api_v4_internal_view(
request=request,
from_browser=True,
google_civic_election_id=google_civic_election_id,
state_code=state_code,
use_ballotpedia=use_ballotpedia,
use_ctcl=use_ctcl,
use_vote_usa=use_vote_usa)
@login_required
def retrieve_ballots_for_entire_election_api_v4_view(request):
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code_list = []
status = ''
batch_process_manager = BatchProcessManager()
use_ballotpedia = request.GET.get('use_ballotpedia', False)
use_ballotpedia = positive_value_exists(use_ballotpedia)
use_ctcl = request.GET.get('use_ctcl', False)
use_ctcl = positive_value_exists(use_ctcl)
use_vote_usa = request.GET.get('use_vote_usa', False)
use_vote_usa = positive_value_exists(use_vote_usa)
if not positive_value_exists(google_civic_election_id):
status += "GOOGLE_CIVIC_ELECTION_ID_MISSING "
messages.add_message(request, messages.INFO, status)
return HttpResponseRedirect(reverse('import_export_batches:batch_process_list', args=()))
# Retrieve list of states in this election, and then loop through each state
election_manager = ElectionManager()
election_results = election_manager.retrieve_election(google_civic_election_id)
if election_results['election_found']:
election = election_results['election']
state_code_list = election.state_code_list()
status += "STATE_CODE_LIST: " + str(state_code_list) + " "
if not positive_value_exists(len(state_code_list)):
status += "STATE_CODE_LIST_MISSING "
messages.add_message(request, messages.INFO, status)
return HttpResponseRedirect(reverse('import_export_batches:batch_process_list', args=()))
for state_code in state_code_list:
# Refresh based on map points
if batch_process_manager.is_batch_process_currently_scheduled(
google_civic_election_id=google_civic_election_id,
state_code=state_code,
kind_of_process=REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS):
status += "(" + str(state_code) + ")-ALREADY_SCHEDULED_REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS "
else:
from import_export_batches.controllers_batch_process import \
schedule_retrieve_ballots_for_polling_locations_api_v4
results = schedule_retrieve_ballots_for_polling_locations_api_v4(
google_civic_election_id=google_civic_election_id,
state_code=state_code,
refresh_ballot_returned=True,
use_ballotpedia=use_ballotpedia,
use_ctcl=use_ctcl,
use_vote_usa=use_vote_usa,
)
if not positive_value_exists(results['success']):
status += results['status']
# Refresh based on voter's who requested their own address
if batch_process_manager.is_batch_process_currently_scheduled(
google_civic_election_id=google_civic_election_id,
state_code=state_code,
kind_of_process=REFRESH_BALLOT_ITEMS_FROM_VOTERS):
status += "(" + str(state_code) + ")-ALREADY_SCHEDULED_REFRESH_BALLOT_ITEMS_FROM_VOTERS "
else:
from import_export_batches.controllers_batch_process import schedule_refresh_ballots_for_voters_api_v4
results = schedule_refresh_ballots_for_voters_api_v4(
google_civic_election_id=google_civic_election_id,
state_code=state_code,
use_ballotpedia=use_ballotpedia,
use_ctcl=use_ctcl,
use_vote_usa=use_vote_usa,
)
if not positive_value_exists(results['success']):
status += results['status']
# Retrieve first time for each map point
if batch_process_manager.is_batch_process_currently_scheduled(
google_civic_election_id=google_civic_election_id,
state_code=state_code,
kind_of_process=RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS):
status += "(" + str(state_code) + ")-ALREADY_SCHEDULED_RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS "
else:
results = schedule_retrieve_ballots_for_polling_locations_api_v4(
google_civic_election_id=google_civic_election_id,
state_code=state_code,
refresh_ballot_returned=False,
use_ballotpedia=use_ballotpedia,
use_ctcl=use_ctcl,
use_vote_usa=use_vote_usa,
)
if not positive_value_exists(results['success']):
status += results['status']
messages.add_message(request, messages.INFO, status)
return HttpResponseRedirect(reverse('import_export_batches:batch_process_list', args=()))
def refresh_ballots_for_voters_api_v4_internal_view(
request=None,
from_browser=False,
google_civic_election_id="",
state_code="",
date_last_updated_should_not_exceed=None,
batch_process_ballot_item_chunk=None,
use_ballotpedia=False,
use_ctcl=False,
use_vote_usa=False,
):
status = ""
success = True
batch_process_id = 0
batch_process_ballot_item_chunk_id = 0
batch_set_id = 0
retrieve_row_count = 0
if positive_value_exists(use_ballotpedia) or positive_value_exists(use_ctcl) or positive_value_exists(use_vote_usa):
# Continue
pass
else:
status += "MISSING_REQUIRED_BALLOT_DATA_PROVIDER "
success = False
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
try:
if positive_value_exists(google_civic_election_id):
election_on_stage = \
Election.objects.using('readonly').get(google_civic_election_id=google_civic_election_id)
ballotpedia_election_id = election_on_stage.ballotpedia_election_id
ctcl_election_uuid = election_on_stage.ctcl_uuid
election_day_text = election_on_stage.election_day_text
election_local_id = election_on_stage.id
election_state_code = election_on_stage.get_election_state()
election_name = election_on_stage.election_name
is_national_election = election_on_stage.is_national_election
else:
message = 'Could not retrieve Ballotpedia ballots. Missing google_civic_election_id.'
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_list', args=()))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
except Election.MultipleObjectsReturned as e:
message = 'Could not retrieve Ballotpedia ballots. More than one election found.'
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_list', args=()))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
except Election.DoesNotExist:
message = 'Could not retrieve Ballotpedia ballots. Election could not be found.'
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_list', args=()))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
# Check to see if we have map point data related to the region(s) covered by this election
# We request the ballot data for each map point as a way to build up our local data
if not positive_value_exists(state_code) and positive_value_exists(google_civic_election_id):
state_code = election_state_code
# if positive_value_exists(is_national_election) and not positive_value_exists(state_code):
# messages.add_message(request, messages.ERROR,
# 'For National elections, a State Code is required in order to run any '
# 'Ballotpedia ballots preparation.')
# return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
ballot_returned_list_manager = BallotReturnedListManager()
limit_voters_retrieved = MAP_POINTS_RETRIEVED_EACH_BATCH_CHUNK # 125. Formerly 250 and 111
# Retrieve voter_id entries from ballot_returned table, from oldest to newest
if positive_value_exists(is_national_election) and positive_value_exists(state_code):
results = ballot_returned_list_manager.retrieve_ballot_returned_list(
google_civic_election_id=google_civic_election_id,
for_voters=True,
state_code=state_code,
date_last_updated_should_not_exceed=date_last_updated_should_not_exceed,
limit=limit_voters_retrieved)
else:
results = ballot_returned_list_manager.retrieve_ballot_returned_list(
google_civic_election_id=google_civic_election_id,
for_voters=True,
date_last_updated_should_not_exceed=date_last_updated_should_not_exceed,
limit=limit_voters_retrieved)
if results['ballot_returned_list_found']:
ballot_returned_list = results['ballot_returned_list']
else:
ballot_returned_list = []
if len(ballot_returned_list) == 0:
message = 'No ballot_returned items found for {election_name} for the state \'{state}\' earlier than ' \
'date_last_updated_should_not_exceed: \'{date_last_updated_should_not_exceed}\'. ' \
'(refresh_ballots_for_voters_api_v4_internal_view)'.format(
election_name=election_name,
date_last_updated_should_not_exceed=date_last_updated_should_not_exceed,
state=state_code)
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
else:
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
# If here, we know that we have some polling_locations to use in order to retrieve ballotpedia districts
ballots_retrieved = 0
ballots_not_retrieved = 0
# If here, we assume we have already retrieved races for this election, and now we want to
# put ballot items for this location onto a ballot
existing_offices_by_election_dict = {}
existing_candidate_objects_dict = {}
existing_candidate_to_office_links_dict = {}
existing_measure_objects_dict = {}
new_office_we_vote_ids_list = []
new_candidate_we_vote_ids_list = []
new_measure_we_vote_ids_list = []
batch_set_id = 0
# Create Batch Set for ballot items
import_date = date.today()
batch_set_name = "Ballot items (from Voters v4) for " + election_name
if positive_value_exists(state_code):
batch_set_name += " (state " + str(state_code.upper()) + ")"
if positive_value_exists(ballotpedia_election_id):
batch_set_name += " - ballotpedia: " + str(ballotpedia_election_id)
if positive_value_exists(ctcl_election_uuid):
batch_set_name += " - CTCL "
batch_set_name += " - " + str(import_date)
try:
batch_process_ballot_item_chunk_id = batch_process_ballot_item_chunk.id
batch_process_id = batch_process_ballot_item_chunk.batch_process_id
batch_set_id = batch_process_ballot_item_chunk.batch_set_id
except Exception as e:
pass
batch_set_source = ''
kind_of_batch = ''
source_uri = ''
if positive_value_exists(use_ballotpedia):
batch_set_source = BATCH_SET_SOURCE_IMPORT_BALLOTPEDIA_BALLOT_ITEMS
kind_of_batch = 'IMPORT_BALLOTPEDIA_BALLOT_ITEMS'
source_uri = BALLOTPEDIA_API_SAMPLE_BALLOT_RESULTS_URL
elif positive_value_exists(use_ctcl):
batch_set_source = BATCH_SET_SOURCE_IMPORT_CTCL_BALLOT_ITEMS
kind_of_batch = 'IMPORT_CTCL_BALLOT_ITEMS'
source_uri = CTCL_VOTER_INFO_URL
elif positive_value_exists(use_vote_usa):
batch_set_source = BATCH_SET_SOURCE_IMPORT_VOTE_USA_BALLOT_ITEMS
kind_of_batch = 'IMPORT_VOTE_USA_BALLOT_ITEMS'
source_uri = BALLOTPEDIA_API_SAMPLE_BALLOT_RESULTS_URL
if not positive_value_exists(batch_set_id):
# create batch_set object
try:
batch_set = BatchSet.objects.create(
batch_set_description_text="",
batch_set_name=batch_set_name,
batch_set_source=batch_set_source,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk_id,
batch_process_id=batch_process_id,
google_civic_election_id=google_civic_election_id,
source_uri=source_uri,
import_date=import_date,
state_code=state_code)
batch_set_id = batch_set.id
if positive_value_exists(batch_set_id):
status += " BATCH_SET_SAVED-BALLOTS_FOR_VOTERS "
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_SET " + str(e) + " "
try:
if positive_value_exists(batch_process_ballot_item_chunk_id):
batch_process_ballot_item_chunk.batch_set_id = batch_set_id
batch_process_ballot_item_chunk.save()
except Exception as e:
status += "UNABLE_TO_SAVE_BATCH_SET_ID_EARLY " + str(e) + " "
if positive_value_exists(use_ballotpedia):
from import_export_ballotpedia.controllers import retrieve_ballotpedia_ballot_items_for_one_voter_api_v4
elif positive_value_exists(use_ctcl):
from import_export_ctcl.controllers import retrieve_ctcl_ballot_items_for_one_voter_api
elif positive_value_exists(use_vote_usa):
pass
for ballot_returned in ballot_returned_list:
if positive_value_exists(use_ballotpedia):
one_ballot_results = retrieve_ballotpedia_ballot_items_for_one_voter_api_v4(
google_civic_election_id,
election_day_text=election_day_text,
ballot_returned=ballot_returned,
state_code=state_code,
batch_set_id=batch_set_id,
existing_offices_by_election_dict=existing_offices_by_election_dict,
existing_candidate_objects_dict=existing_candidate_objects_dict,
existing_candidate_to_office_links_dict=existing_candidate_to_office_links_dict,
existing_measure_objects_dict=existing_measure_objects_dict,
new_office_we_vote_ids_list=new_office_we_vote_ids_list,
new_candidate_we_vote_ids_list=new_candidate_we_vote_ids_list,
new_measure_we_vote_ids_list=new_measure_we_vote_ids_list
)
elif positive_value_exists(use_ctcl):
one_ballot_results = retrieve_ctcl_ballot_items_for_one_voter_api(
google_civic_election_id,
ctcl_election_uuid=ctcl_election_uuid,
election_day_text=election_day_text,
ballot_returned=ballot_returned,
state_code=state_code,
batch_set_id=batch_set_id,
existing_offices_by_election_dict=existing_offices_by_election_dict,
existing_candidate_objects_dict=existing_candidate_objects_dict,
existing_candidate_to_office_links_dict=existing_candidate_to_office_links_dict,
existing_measure_objects_dict=existing_measure_objects_dict,
new_office_we_vote_ids_list=new_office_we_vote_ids_list,
new_candidate_we_vote_ids_list=new_candidate_we_vote_ids_list,
new_measure_we_vote_ids_list=new_measure_we_vote_ids_list,
update_or_create_rules={})
else:
# It shouldn't be possible to get here
pass
success = False
if one_ballot_results['success']:
success = True
if len(status) < 1024:
status += one_ballot_results['status']
existing_offices_by_election_dict = one_ballot_results['existing_offices_by_election_dict']
existing_candidate_objects_dict = one_ballot_results['existing_candidate_objects_dict']
existing_candidate_to_office_links_dict = one_ballot_results['existing_candidate_to_office_links_dict']
existing_measure_objects_dict = one_ballot_results['existing_measure_objects_dict']
new_office_we_vote_ids_list = one_ballot_results['new_office_we_vote_ids_list']
new_candidate_we_vote_ids_list = one_ballot_results['new_candidate_we_vote_ids_list']
new_measure_we_vote_ids_list = one_ballot_results['new_measure_we_vote_ids_list']
if success:
ballots_retrieved += 1
else:
ballots_not_retrieved += 1
existing_offices_found = 0
if google_civic_election_id in existing_offices_by_election_dict:
existing_offices_found = len(existing_offices_by_election_dict[google_civic_election_id])
existing_candidates_found = len(existing_candidate_objects_dict)
existing_measures_found = len(existing_measure_objects_dict)
new_offices_found = len(new_office_we_vote_ids_list)
new_candidates_found = len(new_candidate_we_vote_ids_list)
new_measures_found = len(new_measure_we_vote_ids_list)
retrieve_row_count = ballots_retrieved
message = \
'Ballot data retrieved (Voters) for the {election_name}. ' \
'ballots retrieved: {ballots_retrieved}. ' \
'ballots not retrieved: {ballots_not_retrieved}. ' \
'new offices: {new_offices_found} (existing: {existing_offices_found}) ' \
'new candidates: {new_candidates_found} (existing: {existing_candidates_found}) ' \
'new measures: {new_measures_found} (existing: {existing_measures_found}) ' \
''.format(
ballots_retrieved=ballots_retrieved,
ballots_not_retrieved=ballots_not_retrieved,
election_name=election_name,
existing_offices_found=existing_offices_found,
existing_candidates_found=existing_candidates_found,
existing_measures_found=existing_measures_found,
new_offices_found=new_offices_found,
new_candidates_found=new_candidates_found,
new_measures_found=new_measures_found,
)
if from_browser:
messages.add_message(request, messages.INFO, message)
messages.add_message(request, messages.INFO, 'status: {status}'.format(status=status))
return HttpResponseRedirect(reverse('import_export_batches:batch_set_list', args=()) +
'?kind_of_batch=' + str(kind_of_batch) +
'&google_civic_election_id=' + str(google_civic_election_id))
else:
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
'batch_process_ballot_item_chunk': batch_process_ballot_item_chunk,
}
return results
@login_required
def retrieve_ballots_for_polling_locations_api_v4_view(request):
"""
This is different than retrieve_ballotpedia_data_for_polling_locations_view because it is getting the districts
from lat/long, and then the ballot items. Ballotpedia API v4
Reach out to Ballotpedia and retrieve (for one election):
1) Polling locations (so we can use those addresses to retrieve a representative set of ballots)
2) Cycle through a portion of those map points, enough that we are caching all of the possible ballot items
:param request:
:return:
"""
status = ""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
refresh_ballot_returned = request.GET.get('refresh_ballot_returned', False)
use_batch_process = request.GET.get('use_batch_process', False)
use_ballotpedia = request.GET.get('use_ballotpedia', False)
use_ballotpedia = positive_value_exists(use_ballotpedia)
use_ctcl = request.GET.get('use_ctcl', False)
use_ctcl = positive_value_exists(use_ctcl)
use_vote_usa = request.GET.get('use_vote_usa', False)
use_vote_usa = positive_value_exists(use_vote_usa)
# import_limit = convert_to_int(request.GET.get('import_limit', 1000)) # If > 1000, we get error 414 (url too long)
if positive_value_exists(use_batch_process):
from import_export_batches.controllers_batch_process import \
schedule_retrieve_ballots_for_polling_locations_api_v4
results = schedule_retrieve_ballots_for_polling_locations_api_v4(
google_civic_election_id=google_civic_election_id,
state_code=state_code,
refresh_ballot_returned=refresh_ballot_returned,
use_ballotpedia=use_ballotpedia,
use_ctcl=use_ctcl,
use_vote_usa=use_vote_usa)
messages.add_message(request, messages.INFO, results['status'])
return HttpResponseRedirect(reverse('import_export_batches:batch_process_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code)
)
else:
return retrieve_ballots_for_polling_locations_api_v4_internal_view(
request=request,
from_browser=True,
google_civic_election_id=google_civic_election_id,
state_code=state_code,
refresh_ballot_returned=refresh_ballot_returned,
use_ballotpedia=use_ballotpedia,
use_ctcl=use_ctcl,
use_vote_usa=use_vote_usa)
def retrieve_ballots_for_polling_locations_api_v4_internal_view(
request=None,
from_browser=False,
google_civic_election_id="",
state_code="",
refresh_ballot_returned=False,
date_last_updated_should_not_exceed=None,
batch_process_ballot_item_chunk=None,
use_ballotpedia=False,
use_ctcl=False,
use_vote_usa=False):
status = ""
success = True
batch_process_id = 0
batch_process_ballot_item_chunk_id = 0
batch_set_id = 0
retrieve_row_count = 0
ballot_returned_manager = BallotReturnedManager()
try:
if positive_value_exists(google_civic_election_id):
election_on_stage = \
Election.objects.using('readonly').get(google_civic_election_id=google_civic_election_id)
ballotpedia_election_id = election_on_stage.ballotpedia_election_id
ctcl_election_uuid = election_on_stage.ctcl_uuid
election_day_text = election_on_stage.election_day_text
election_local_id = election_on_stage.id
election_state_code = election_on_stage.get_election_state()
election_name = election_on_stage.election_name
is_national_election = election_on_stage.is_national_election
use_ballotpedia_as_data_source = election_on_stage.use_ballotpedia_as_data_source
use_ctcl_as_data_source = election_on_stage.use_ctcl_as_data_source
use_vote_usa_as_data_source = election_on_stage.use_vote_usa_as_data_source
else:
message = 'Could not retrieve (as opposed to refresh) ballots. ' \
'Missing google_civic_election_id. '
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_list', args=()))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
except Election.MultipleObjectsReturned as e:
message = 'Could not retrieve (as opposed to refresh) ballots. ' \
'More than one election found. ' + str(e) + ' '
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_list', args=()))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
except Election.DoesNotExist:
message = 'Could not retrieve (as opposed to refresh) ballots. Election could not be found. '
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_list', args=()))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
except Exception as e:
message = 'Could not retrieve (as opposed to refresh) ballots. ERROR: ' + str(e) + ' '
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_list', args=()))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
if positive_value_exists(use_ballotpedia):
if not positive_value_exists(use_ballotpedia_as_data_source):
success = False
status += "USE_BALLOTPEDIA-BUT_NOT_USE_BALLOTPEDIA_AS_DATA_SOURCE "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
elif positive_value_exists(use_ctcl):
if not positive_value_exists(use_ctcl_as_data_source):
success = False
status += "USE_CTCL-BUT_NOT_USE_CTCL_AS_DATA_SOURCE "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
elif positive_value_exists(use_vote_usa):
if not positive_value_exists(use_vote_usa_as_data_source):
success = False
status += "USE_VOTE_USA-BUT_NOT_USE_VOTE_USA_AS_DATA_SOURCE "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
# Check to see if we have map point data related to the region(s) covered by this election
# We request the ballot data for each map point as a way to build up our local data
if not positive_value_exists(state_code) and positive_value_exists(google_civic_election_id):
state_code = election_state_code
if positive_value_exists(is_national_election) and not positive_value_exists(state_code):
message = \
'For National elections, a State Code is required in order to run any ballot preparation. '
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
try:
ballot_returned_list_manager = BallotReturnedListManager()
if positive_value_exists(refresh_ballot_returned):
limit_polling_locations_retrieved = MAP_POINTS_RETRIEVED_EACH_BATCH_CHUNK # 125. Formerly 250 and 111
else:
limit_polling_locations_retrieved = 0
# Retrieve map points already in ballot_returned table
if positive_value_exists(is_national_election) and positive_value_exists(state_code):
status += "NATIONAL_WITH_STATE (" + str(state_code) + ") "
status += "date_last_updated_should_not_exceed: " + str(date_last_updated_should_not_exceed) + ' '
results = ballot_returned_list_manager.retrieve_polling_location_we_vote_id_list_from_ballot_returned(
google_civic_election_id=google_civic_election_id,
state_code=state_code,
limit=limit_polling_locations_retrieved,
date_last_updated_should_not_exceed=date_last_updated_should_not_exceed,
)
else:
status += "WITHOUT_STATE "
status += "date_last_updated_should_not_exceed: " + str(date_last_updated_should_not_exceed) + ' '
results = ballot_returned_list_manager.retrieve_polling_location_we_vote_id_list_from_ballot_returned(
google_civic_election_id=google_civic_election_id,
limit=limit_polling_locations_retrieved,
date_last_updated_should_not_exceed=date_last_updated_should_not_exceed,
)
status += results['status']
if results['polling_location_we_vote_id_list_found']:
polling_location_we_vote_id_list = results['polling_location_we_vote_id_list']
else:
polling_location_we_vote_id_list = []
status += "REFRESH_BALLOT_RETURNED: " + str(refresh_ballot_returned) + " "
if positive_value_exists(refresh_ballot_returned):
polling_location_query = PollingLocation.objects.using('readonly').all()
polling_location_query = polling_location_query.filter(we_vote_id__in=polling_location_we_vote_id_list)
# We don't exclude the deleted map points because we need to know to delete the ballot returned entry
# polling_location_query = polling_location_query.exclude(polling_location_deleted=True)
polling_location_list = list(polling_location_query)
polling_location_count = len(polling_location_list)
else:
polling_location_query = PollingLocation.objects.using('readonly').all()
polling_location_query = \
polling_location_query.exclude(Q(latitude__isnull=True) | Q(latitude__exact=0.0))
polling_location_query = \
polling_location_query.exclude(Q(zip_long__isnull=True) | Q(zip_long__exact='0') |
Q(zip_long__exact=''))
polling_location_query = polling_location_query.filter(state__iexact=state_code)
# Exclude map points already retrieved or deleted
polling_location_query = polling_location_query.exclude(we_vote_id__in=polling_location_we_vote_id_list)
polling_location_query = polling_location_query.exclude(polling_location_deleted=True)
# Randomly change the sort order so we over time load different map points (before timeout)
random_sorting = random.randint(1, 5)
first_retrieve_limit = MAP_POINTS_RETRIEVED_EACH_BATCH_CHUNK # 125. Formerly 250 and 111
if random_sorting == 1:
# Ordering by "line1" creates a bit of (locational) random order
polling_location_list = polling_location_query.order_by('line1')[:first_retrieve_limit]
status += "RANDOM_SORTING-LINE1-ASC: " + str(random_sorting) + " "
elif random_sorting == 2:
polling_location_list = polling_location_query.order_by('-line1')[:first_retrieve_limit]
status += "RANDOM_SORTING-LINE1-DESC: " + str(random_sorting) + " "
elif random_sorting == 3:
polling_location_list = polling_location_query.order_by('city')[:first_retrieve_limit]
status += "RANDOM_SORTING-CITY-ASC: " + str(random_sorting) + " "
else:
polling_location_list = polling_location_query.order_by('-city')[:first_retrieve_limit]
status += "RANDOM_SORTING-CITY-DESC: " + str(random_sorting) + " "
polling_location_count = len(polling_location_list)
# Cycle through -- if the polling_location is deleted, delete the associated ballot_returned,
# and then remove the polling_location from the list
modified_polling_location = []
for one_polling_location in polling_location_list:
if positive_value_exists(one_polling_location.polling_location_deleted):
delete_results = ballot_returned_manager.delete_ballot_returned_by_identifier(
google_civic_election_id=google_civic_election_id,
polling_location_we_vote_id=one_polling_location.we_vote_id)
if delete_results['ballot_deleted']:
status += "BR_PL_DELETED (" + str(one_polling_location.we_vote_id) + ") "
else:
status += "BR_PL_NOT_DELETED (" + str(one_polling_location.we_vote_id) + ") "
else:
modified_polling_location.append(one_polling_location)
polling_location_list = modified_polling_location
polling_location_count = len(polling_location_list)
except PollingLocation.DoesNotExist:
message = 'Could not retrieve (as opposed to refresh) ballot data for the {election_name}. ' \
'Ballots-No map points exist for the state \'{state}\'. ' \
''.format(
election_name=election_name,
state=state_code)
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
except Exception as e:
message = 'Could not retrieve (as opposed to refresh) ballot data for the {election_name}. ' \
'Ballots-No map points exist for the state \'{state}\'. ERROR: {error}' \
''.format(
election_name=election_name,
error=str(e),
state=state_code)
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
if polling_location_count == 0:
message = 'Did not retrieve (as opposed to refresh) ballot data for the {election_name}. ' \
'Data for all map points for the state \'{state}\' has been retrieved once ' \
'date_last_updated_should_not_exceed: \'{date_last_updated_should_not_exceed}\'. ' \
'(result 2 - retrieve_ballots_for_polling_locations_api_v4_view)'.format(
election_name=election_name,
date_last_updated_should_not_exceed=date_last_updated_should_not_exceed,
state=state_code)
if from_browser:
messages.add_message(request, messages.INFO, message)
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
else:
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
# If here, we know that we have some polling_locations to use in order to retrieve ballotpedia districts
ballots_retrieved = 0
ballots_not_retrieved = 0
# If here, we assume we have already retrieved races for this election, and now we want to
# put ballot items for this location onto a ballot
existing_offices_by_election_dict = {}
existing_candidate_objects_dict = {}
existing_candidate_to_office_links_dict = {}
existing_measure_objects_dict = {}
new_office_we_vote_ids_list = []
new_candidate_we_vote_ids_list = []
new_measure_we_vote_ids_list = []
batch_set_source = ''
source_uri = ''
if positive_value_exists(use_ballotpedia):
batch_set_source = BATCH_SET_SOURCE_IMPORT_BALLOTPEDIA_BALLOT_ITEMS
source_uri = BALLOTPEDIA_API_SAMPLE_BALLOT_RESULTS_URL
elif positive_value_exists(use_ctcl):
batch_set_source = BATCH_SET_SOURCE_IMPORT_CTCL_BALLOT_ITEMS
source_uri = CTCL_VOTER_INFO_URL
elif positive_value_exists(use_vote_usa):
# from import_export_ballotpedia.
# controllers import retrieve_ballotpedia_ballot_items_from_polling_location_api_v4
batch_set_source = BATCH_SET_SOURCE_IMPORT_VOTE_USA_BALLOT_ITEMS
source_uri = BALLOTPEDIA_API_SAMPLE_BALLOT_RESULTS_URL
batch_set_id = 0
if len(polling_location_list) > 0:
status += "POLLING_LOCATIONS_FOR_THIS_BATCH_SET: " + str(len(polling_location_list)) + " "
# Create Batch Set for ballot items
import_date = date.today()
batch_set_name = "Ballot items (from Map Points v4) for " + election_name
if positive_value_exists(state_code):
batch_set_name += " (state " + str(state_code.upper()) + ")"
if positive_value_exists(ballotpedia_election_id):
batch_set_name += " - ballotpedia: " + str(ballotpedia_election_id)
batch_set_name += " - " + str(import_date)
try:
batch_process_ballot_item_chunk_id = batch_process_ballot_item_chunk.id
batch_process_id = batch_process_ballot_item_chunk.batch_process_id
batch_set_id = batch_process_ballot_item_chunk.batch_set_id
except Exception as e:
status += "BATCH_PROCESS_BALLOT_ITEM_CHUNK: " + str(e) + ' '
if not positive_value_exists(batch_set_id):
# create batch_set object
try:
batch_set = BatchSet.objects.create(
batch_set_description_text="",
batch_set_name=batch_set_name,
batch_set_source=batch_set_source,
batch_process_id=batch_process_id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk_id,
google_civic_election_id=google_civic_election_id,
source_uri=source_uri,
import_date=import_date,
state_code=state_code)
batch_set_id = batch_set.id
status += " BATCH_SET_CREATED-BALLOTS_FOR_POLLING_LOCATIONS "
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_SET " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
success = False
try:
if positive_value_exists(batch_process_ballot_item_chunk_id) and positive_value_exists(batch_set_id):
batch_process_ballot_item_chunk.batch_set_id = batch_set_id
batch_process_ballot_item_chunk.save()
except Exception as e:
status += "UNABLE_TO_SAVE_BATCH_SET_ID_EARLY " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
update_or_create_rules = {
'create_candidates': True,
'create_offices': True,
'create_measures': True,
'update_candidates': False,
'update_offices': False,
'update_measures': False,
}
if success:
if positive_value_exists(use_ballotpedia):
from import_export_ballotpedia.controllers import \
retrieve_ballotpedia_ballot_items_from_polling_location_api_v4
elif positive_value_exists(use_ctcl):
from import_export_ctcl.controllers import retrieve_ctcl_ballot_items_from_polling_location_api
for polling_location in polling_location_list:
one_ballot_results = {}
if positive_value_exists(use_ballotpedia):
one_ballot_results = retrieve_ballotpedia_ballot_items_from_polling_location_api_v4(
google_civic_election_id,
election_day_text=election_day_text,
polling_location_we_vote_id=polling_location.we_vote_id,
polling_location=polling_location,
state_code=state_code,
batch_set_id=batch_set_id,
existing_offices_by_election_dict=existing_offices_by_election_dict,
existing_candidate_objects_dict=existing_candidate_objects_dict,
existing_candidate_to_office_links_dict=existing_candidate_to_office_links_dict,
existing_measure_objects_dict=existing_measure_objects_dict,
new_office_we_vote_ids_list=new_office_we_vote_ids_list,
new_candidate_we_vote_ids_list=new_candidate_we_vote_ids_list,
new_measure_we_vote_ids_list=new_measure_we_vote_ids_list
)
elif positive_value_exists(use_ctcl):
one_ballot_results = retrieve_ctcl_ballot_items_from_polling_location_api(
google_civic_election_id,
ctcl_election_uuid=ctcl_election_uuid,
election_day_text=election_day_text,
polling_location_we_vote_id=polling_location.we_vote_id,
polling_location=polling_location,
state_code=state_code,
batch_set_id=batch_set_id,
existing_offices_by_election_dict=existing_offices_by_election_dict,
existing_candidate_objects_dict=existing_candidate_objects_dict,
existing_candidate_to_office_links_dict=existing_candidate_to_office_links_dict,
existing_measure_objects_dict=existing_measure_objects_dict,
new_office_we_vote_ids_list=new_office_we_vote_ids_list,
new_candidate_we_vote_ids_list=new_candidate_we_vote_ids_list,
new_measure_we_vote_ids_list=new_measure_we_vote_ids_list,
update_or_create_rules=update_or_create_rules,
)
else:
# Should not be possible to get here
pass
if one_ballot_results and 'success' in one_ballot_results and one_ballot_results['success']:
success = True
existing_offices_by_election_dict = one_ballot_results['existing_offices_by_election_dict']
existing_candidate_objects_dict = one_ballot_results['existing_candidate_objects_dict']
existing_candidate_to_office_links_dict = one_ballot_results['existing_candidate_to_office_links_dict']
existing_measure_objects_dict = one_ballot_results['existing_measure_objects_dict']
new_office_we_vote_ids_list = one_ballot_results['new_office_we_vote_ids_list']
new_candidate_we_vote_ids_list = one_ballot_results['new_candidate_we_vote_ids_list']
new_measure_we_vote_ids_list = one_ballot_results['new_measure_we_vote_ids_list']
if one_ballot_results['batch_header_id']:
ballots_retrieved += 1
if ballots_retrieved < 5:
status += "BALLOT_ITEMS_RETRIEVED: [[[" + one_ballot_results['status'] + "]]] "
else:
ballots_not_retrieved += 1
if ballots_not_retrieved < 5:
status += "BALLOT_ITEMS_NOT_RETRIEVED: [[[" + one_ballot_results['status'] + "]]] "
else:
status += "CANNOT_CALL_RETRIEVE_BECAUSE_OF_ERRORS " \
"[retrieve_ballots_for_polling_locations_api_v4_internal_view] "
retrieve_row_count = ballots_retrieved
existing_offices_found = 0
if google_civic_election_id in existing_offices_by_election_dict:
existing_offices_found = len(existing_offices_by_election_dict[google_civic_election_id])
existing_candidates_found = len(existing_candidate_objects_dict)
existing_measures_found = len(existing_measure_objects_dict)
new_offices_found = len(new_office_we_vote_ids_list)
new_candidates_found = len(new_candidate_we_vote_ids_list)
new_measures_found = len(new_measure_we_vote_ids_list)
if from_browser:
messages.add_message(request, messages.INFO,
'Ballot data retrieved from Map Points for the {election_name}. '
'ballots retrieved: {ballots_retrieved}, '
'ballots NOT retrieved: {ballots_not_retrieved}. '
'new offices: {new_offices_found} (existing: {existing_offices_found}) '
'new candidates: {new_candidates_found} (existing: {existing_candidates_found}) '
'new measures: {new_measures_found} (existing: {existing_measures_found}) '
''.format(
ballots_retrieved=ballots_retrieved,
ballots_not_retrieved=ballots_not_retrieved,
election_name=election_name,
existing_offices_found=existing_offices_found,
existing_candidates_found=existing_candidates_found,
existing_measures_found=existing_measures_found,
new_offices_found=new_offices_found,
new_candidates_found=new_candidates_found,
new_measures_found=new_measures_found,
))
messages.add_message(request, messages.INFO, 'status: {status}'.format(status=status))
return HttpResponseRedirect(reverse('import_export_batches:batch_set_list', args=()) +
'?kind_of_batch=IMPORT_BALLOTPEDIA_BALLOT_ITEMS' +
'&google_civic_election_id=' + str(google_civic_election_id))
else:
status += \
'Ballot data retrieved for the {election_name} (from Map Points). ' \
'ballots retrieved: {ballots_retrieved}. ' \
'ballots NOT retrieved: {ballots_not_retrieved}. ' \
'new offices: {new_offices_found} (existing: {existing_offices_found}) ' \
'new candidates: {new_candidates_found} (existing: {existing_candidates_found}) ' \
'new measures: {new_measures_found} (existing: {existing_measures_found}) ' \
''.format(
ballots_retrieved=ballots_retrieved,
ballots_not_retrieved=ballots_not_retrieved,
election_name=election_name,
existing_offices_found=existing_offices_found,
existing_candidates_found=existing_candidates_found,
existing_measures_found=existing_measures_found,
new_offices_found=new_offices_found,
new_candidates_found=new_candidates_found,
new_measures_found=new_measures_found,
)
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
'batch_process_ballot_item_chunk': batch_process_ballot_item_chunk,
}
return results
| 52.022803
| 120
| 0.665271
| 21,689
| 187,074
| 5.239338
| 0.02937
| 0.034743
| 0.039627
| 0.043798
| 0.861092
| 0.825759
| 0.770583
| 0.729074
| 0.694507
| 0.664358
| 0
| 0.002726
| 0.26659
| 187,074
| 3,595
| 121
| 52.037274
| 0.825513
| 0.081273
| 0
| 0.660221
| 0
| 0
| 0.13044
| 0.065089
| 0
| 0
| 0
| 0.000278
| 0
| 1
| 0.010359
| false
| 0.007251
| 0.064227
| 0
| 0.112569
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
91cfeeb47c92f7c340a82ac4bd5cbf691c3ef8e8
| 105
|
py
|
Python
|
pyclick/__init__.py
|
hsluoyz/pyclick
|
225a595feaa02247383f082dc6b24f43dcebad29
|
[
"MIT"
] | 93
|
2019-02-24T22:26:24.000Z
|
2022-03-03T10:30:45.000Z
|
pyclick/__init__.py
|
hsluoyz/pyclick
|
225a595feaa02247383f082dc6b24f43dcebad29
|
[
"MIT"
] | 7
|
2021-03-18T21:12:04.000Z
|
2022-03-11T23:31:40.000Z
|
pyclick/__init__.py
|
hsluoyz/pyclick
|
225a595feaa02247383f082dc6b24f43dcebad29
|
[
"MIT"
] | 29
|
2019-02-24T22:26:35.000Z
|
2022-03-11T07:59:51.000Z
|
name = 'pyclick'
from pyclick.humanclicker import HumanClicker
from pyclick.humancurve import HumanCurve
| 26.25
| 45
| 0.847619
| 12
| 105
| 7.416667
| 0.5
| 0.247191
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104762
| 105
| 3
| 46
| 35
| 0.946809
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
91e247a668b6295dc959036fa3e8f6ccc960afff
| 2,613
|
py
|
Python
|
Alames/rightwidget.py
|
KLZ-0/Alames
|
cc9af04674706af2ddbfe955046021acf8bedffa
|
[
"MIT"
] | null | null | null |
Alames/rightwidget.py
|
KLZ-0/Alames
|
cc9af04674706af2ddbfe955046021acf8bedffa
|
[
"MIT"
] | null | null | null |
Alames/rightwidget.py
|
KLZ-0/Alames
|
cc9af04674706af2ddbfe955046021acf8bedffa
|
[
"MIT"
] | null | null | null |
from Alames.importer import *
from Alames.basewidget import BaseWidget
from Alames.generated.ui_rightwidget import Ui_RightWidget
from Alames import rightwidgetsection
class RightWidget(BaseWidget, Ui_RightWidget):
"""
Purpose: relative positioning of internal labels
Creates a widget inside MainWindow which is shared for max 3 widgets
Same lvl as chartview > an object from this class is created in Chart
"""
DEFAULT_VISIBLE_SECTION_NUM = 0
_sections = []
loaded = QtCore.pyqtSignal()
sectionUpdated = QtCore.pyqtSignal()
######## Widget setup
def setup(self):
super(RightWidget, self).setup()
self._truncate()
i = 0
for serie in self.chart.series():
self._sections.append(rightwidgetsection.RightWidgetSection(self, serie))
self._sections[-1].updated.connect(self.sectionUpdated.emit)
# Show first n sections
if i < self.DEFAULT_VISIBLE_SECTION_NUM:
self._sections[-1].setProperty("visible_by_default", True)
self._sections[-1].show()
else:
self._sections[-1].setProperty("visible_by_default", False)
self._sections[-1].hide()
self.scrollArea.widget().layout().addWidget(self._sections[-1])
# print(self.parent().rightWidget.objectName(), self.widget())
i += 1
self.loaded.emit()
######## External section management
def getSectionLen(self):
return len(self._sections)
def getSectionName(self, num):
return self._sections[num].getName()
def isVisibleSection(self, num):
return self._sections[num].isVisible()
def getVisibleSectionSeries(self): # FIXME: Leak
return [section.serie for section in self._sections if section.isVisible()]
def isVisibleSectionByDefault(self, num):
return self._sections[num].property("visible_by_default")
def isVisibleSectionSerie(self, num):
return self._sections[num].serie.isVisible()
def setVisibleSection(self, num, state):
self._sections[num].setVisible(state)
######## Update Actions
def update(self):
super(RightWidget, self).update()
self.updateSections()
self.sectionUpdated.emit()
def updateSections(self):
for section in self._sections:
section.update()
######## Privates
def _truncate(self):
for section in self._sections:
section.close()
section.deleteLater()
self._sections = []
| 29.359551
| 85
| 0.636433
| 274
| 2,613
| 5.941606
| 0.368613
| 0.125307
| 0.047912
| 0.041769
| 0.175676
| 0.160934
| 0.092138
| 0
| 0
| 0
| 0
| 0.005144
| 0.256028
| 2,613
| 88
| 86
| 29.693182
| 0.832305
| 0.13318
| 0
| 0.039216
| 1
| 0
| 0.024468
| 0
| 0
| 0
| 0
| 0.011364
| 0
| 1
| 0.215686
| false
| 0
| 0.078431
| 0.117647
| 0.509804
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
53050abfef5ceeedd1567847c11a27f8d148fb2f
| 65
|
py
|
Python
|
pydapper/mysql/__init__.py
|
samnimoh/pydapper
|
28e02a82339c4373aae043483868c84946e4aca9
|
[
"MIT"
] | 19
|
2022-01-19T15:30:57.000Z
|
2022-03-10T15:15:56.000Z
|
pydapper/mysql/__init__.py
|
samnimoh/pydapper
|
28e02a82339c4373aae043483868c84946e4aca9
|
[
"MIT"
] | 17
|
2022-01-19T06:23:35.000Z
|
2022-03-06T17:09:25.000Z
|
pydapper/mysql/__init__.py
|
samnimoh/pydapper
|
28e02a82339c4373aae043483868c84946e4aca9
|
[
"MIT"
] | 2
|
2022-02-05T02:18:02.000Z
|
2022-02-17T08:39:54.000Z
|
from .mysql_connector_python import MySqlConnectorPythonCommands
| 32.5
| 64
| 0.923077
| 6
| 65
| 9.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061538
| 65
| 1
| 65
| 65
| 0.95082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5316dc99fd0ec1be8c213dfefeeb744bd891ef2f
| 70
|
py
|
Python
|
handlers/__init__.py
|
daeken/QuestCompanions
|
a25260e09dace240b88672bde0e029dbb1322fc9
|
[
"MIT"
] | 4
|
2015-11-05T05:23:52.000Z
|
2019-11-21T23:02:48.000Z
|
handlers/__init__.py
|
daeken/QuestCompanions
|
a25260e09dace240b88672bde0e029dbb1322fc9
|
[
"MIT"
] | null | null | null |
handlers/__init__.py
|
daeken/QuestCompanions
|
a25260e09dace240b88672bde0e029dbb1322fc9
|
[
"MIT"
] | 1
|
2019-11-21T20:04:39.000Z
|
2019-11-21T20:04:39.000Z
|
import admin, auth, char, gold, index, invite, job, legal, news, user
| 35
| 69
| 0.714286
| 11
| 70
| 4.545455
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157143
| 70
| 1
| 70
| 70
| 0.847458
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5328cdcba6c01619b71f63136b3aef34099b155e
| 679
|
py
|
Python
|
cupyx/scipy/signal/__init__.py
|
mor2code/cupy
|
65ca0818b8c922e52adf2c5788226316849c60d7
|
[
"MIT"
] | null | null | null |
cupyx/scipy/signal/__init__.py
|
mor2code/cupy
|
65ca0818b8c922e52adf2c5788226316849c60d7
|
[
"MIT"
] | null | null | null |
cupyx/scipy/signal/__init__.py
|
mor2code/cupy
|
65ca0818b8c922e52adf2c5788226316849c60d7
|
[
"MIT"
] | null | null | null |
from cupyx.scipy.signal.signaltools import convolve # NOQA
from cupyx.scipy.signal.signaltools import correlate # NOQA
from cupyx.scipy.signal.signaltools import fftconvolve # NOQA
from cupyx.scipy.signal.signaltools import choose_conv_method # NOQA
from cupyx.scipy.signal.signaltools import convolve2d # NOQA
from cupyx.scipy.signal.signaltools import correlate2d # NOQA
from cupyx.scipy.signal.signaltools import wiener # NOQA
from cupyx.scipy.signal.signaltools import order_filter # NOQA
from cupyx.scipy.signal.signaltools import medfilt # NOQA
from cupyx.scipy.signal.signaltools import medfilt2d # NOQA
from cupyx.scipy.signal.bsplines import sepfir2d # NOQA
| 52.230769
| 69
| 0.820324
| 91
| 679
| 6.087912
| 0.241758
| 0.1787
| 0.277978
| 0.397112
| 0.776173
| 0.732852
| 0.666065
| 0
| 0
| 0
| 0
| 0.006656
| 0.114875
| 679
| 12
| 70
| 56.583333
| 0.915141
| 0.079529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5345b64304f62917ca08461b427d77c77a2073b1
| 120
|
py
|
Python
|
bench/test_groupby.py
|
kianmeng/toolz
|
294e981edad035a7ac6f0e2b48f1738368fa4b34
|
[
"BSD-3-Clause"
] | 3,749
|
2015-01-01T06:53:12.000Z
|
2022-03-31T13:36:10.000Z
|
bench/test_groupby.py
|
kianmeng/toolz
|
294e981edad035a7ac6f0e2b48f1738368fa4b34
|
[
"BSD-3-Clause"
] | 276
|
2015-01-01T15:34:41.000Z
|
2022-03-17T02:16:35.000Z
|
bench/test_groupby.py
|
kianmeng/toolz
|
294e981edad035a7ac6f0e2b48f1738368fa4b34
|
[
"BSD-3-Clause"
] | 256
|
2015-01-18T04:29:48.000Z
|
2022-03-31T00:10:13.000Z
|
from toolz import groupby, identity
data = list(range(1000)) * 1000
def test_groupby():
groupby(identity, data)
| 13.333333
| 35
| 0.708333
| 16
| 120
| 5.25
| 0.6875
| 0.357143
| 0.452381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 0.183333
| 120
| 8
| 36
| 15
| 0.77551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
535a282d0be4c46925abd5d3acf9d340789b89f3
| 43
|
py
|
Python
|
fogstreamtest/apps/contact/exceptions.py
|
honeydev/fogstream-pytest
|
d2777eac5d4b4ce5c4c3d01e478493806fe7beb2
|
[
"MIT"
] | 1
|
2019-03-31T04:17:19.000Z
|
2019-03-31T04:17:19.000Z
|
fogstreamtest/apps/contact/exceptions.py
|
honeydev/fogstream-pytest
|
d2777eac5d4b4ce5c4c3d01e478493806fe7beb2
|
[
"MIT"
] | null | null | null |
fogstreamtest/apps/contact/exceptions.py
|
honeydev/fogstream-pytest
|
d2777eac5d4b4ce5c4c3d01e478493806fe7beb2
|
[
"MIT"
] | null | null | null |
class MailException(Exception):
pass
| 8.6
| 31
| 0.72093
| 4
| 43
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.209302
| 43
| 4
| 32
| 10.75
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
725d3b8d3615210132cf8f490e81aa8383e6eac0
| 36,176
|
py
|
Python
|
plastid/test/unit/readers/test_bed.py
|
joshuagryphon/plastid
|
e63a818e33766b01d84b3ac9bc9f55e6a1ece42f
|
[
"BSD-3-Clause"
] | 31
|
2016-04-05T09:58:29.000Z
|
2022-01-18T11:58:30.000Z
|
plastid/test/unit/readers/test_bed.py
|
joshuagryphon/plastid
|
e63a818e33766b01d84b3ac9bc9f55e6a1ece42f
|
[
"BSD-3-Clause"
] | 49
|
2015-09-15T19:50:13.000Z
|
2022-01-06T18:17:35.000Z
|
plastid/test/unit/readers/test_bed.py
|
joshuagryphon/plastid
|
e63a818e33766b01d84b3ac9bc9f55e6a1ece42f
|
[
"BSD-3-Clause"
] | 14
|
2017-02-08T09:38:57.000Z
|
2020-09-16T02:32:46.000Z
|
#!/usr/bin/env python
"""Test suite for :py:mod:`plastid.readers.bed`
:py:class:`BED_Reader`
Reads BED files to SegmentChain objects
See http://genome.ucsc.edu/FAQ/FAQformat.html
"""
import functools
import warnings
import pandas as pd
import os
from csv import QUOTE_NONE
from nose.plugins.attrib import attr
from plastid.util.services.mini2to3 import cStringIO
from plastid.genomics.roitools import SegmentChain, GenomicSegment, Transcript
from plastid.readers.bed import BED_Reader
from nose.tools import assert_equal, assert_true, assert_dict_equal, assert_greater_equal
from plastid.test.ref_files import RPATH, REF_FILES
warnings.simplefilter("ignore", DeprecationWarning)
TXBED = os.path.join(RPATH, REF_FILES["100transcripts_bed"])
CDSBED = os.path.join(RPATH, REF_FILES["100cds_bed"])
from plastid.test.data.annotations.py100cds import control_cds
from plastid.test.data.annotations.py100transcripts import control_transcripts
#===============================================================================
# INDEX: test suites
#===============================================================================
@attr(test="unit")
class TestBED():
"""Test case for BED input/output"""
@classmethod
def setUpClass(cls):
cls.header = _BED_HEADER
cls.data = {}
cls.extracol_data = {}
bed_df = pd.read_table(
cStringIO.StringIO(_BED12_DATA), header=None, sep="\t", index_col=None
)
extra_df = pd.read_table(
cStringIO.StringIO(_EXTRA_COLS), header=0, sep="\t", index_col=None
)
cls.big_df = pd.concat([bed_df, extra_df], axis=1)
for n in (3, 4, 5, 6, 8, 9, 12):
cls.data[n] = cls.get_bed_subset(cls.header, n, 0)
cls.extracol_data[n] = cls.get_bed_subset(cls.header, n, 4)
@classmethod
def get_bed_subset(cls, header, bed_cols, extra_cols=0):
buf = cStringIO.StringIO()
columns = cls.big_df.columns[list(range(bed_cols)) + list(range(12, 12 + extra_cols))]
cls.big_df.to_csv(
buf, columns=columns, sep="\t", index=False, header=False, quoting=QUOTE_NONE
) #,float_format="%.8f")
return buf.getvalue()
@staticmethod
def check_equal(found, expected, msg=None):
if msg is not None:
assert_equal(found, expected, msg)
else:
assert_equal(found, expected)
def open_from_str(self):
for expected, found in zip(control_transcripts, BED_Reader(TXBED, return_type=Transcript)):
assert_equal(expected, found)
def open_from_multi_str(self):
for expected, found in zip(control_transcripts + control_cds,
BED_Reader(TXBED, CDSBED, return_type=Transcript)):
assert_equal(expected, found)
def open_from_fh(self):
with open(TXBED) as fh:
for expected, found in zip(control_transcripts, BED_Reader(fh, return_type=Transcript)):
assert_equal(expected, found)
def open_from_multi_fh(self):
with open(TXBED) as fh:
with open(CDSBED) as fh2:
for expected, found in zip(control_transcripts + control_cds,
BED_Reader(fh, fh2, return_type=Transcript)):
assert_equal(expected, found)
def test_bed_import_3to12plus4_columns_with_formatters(self):
names = [
("numcol", int),
("floatcol", float),
("strcol", str),
("attrcol", str),
]
tx_reader = functools.partial(BED_Reader, return_type=Transcript, extra_columns=names)
seg_reader = functools.partial(BED_Reader, return_type=SegmentChain, extra_columns=names)
tests = [
(seg_reader, _TEST_SEGMENTCHAINS, "reader_segmentchain"),
(tx_reader, _TEST_TRANSCRIPTS, "reader_transcript"),
]
for reader_fn, known_set, name in tests:
for n, data_str in sorted(self.extracol_data.items()):
c = 0
for (test_ivc, known_ivc) in zip(reader_fn(cStringIO.StringIO(data_str)),
known_set):
for x in range(4):
colname = names[x][0]
assert_true(
colname in test_ivc.attr,
"Column name '%s' not found in attr dict (%s BED columns)" % (x, n)
)
assert_equal(test_ivc.attr[colname], self.big_df.iloc[c, 12 + x])
# columns: chrom, start, end
if n >= 3:
# no strand info, so we need to test iv.start, iv.end, iv.chrom
err_msg = "%s failed endpoint equality on %s-column BED input: %s,%s" % (
name, n, known_ivc, test_ivc
)
yield self.check_equal, known_ivc.spanning_segment.start, test_ivc.spanning_segment.start, err_msg
yield self.check_equal, known_ivc.spanning_segment.end, test_ivc.spanning_segment.end, err_msg
yield self.check_equal, known_ivc.spanning_segment.chrom, test_ivc.spanning_segment.chrom, err_msg
# column: name
if n >= 4:
err_msg = "%s failed name equality on %s-column BED input: %s,%s" % (
name, n, known_ivc.attr, test_ivc.attr
)
yield self.check_equal, known_ivc.attr["ID"], test_ivc.attr["ID"], err_msg
# column: score
if n >= 5:
err_msg = "%s failed score equality on %s-column BED input: %s,%s" % (
name, n, known_ivc.attr, test_ivc.attr
)
yield self.check_equal, known_ivc.attr.get(
"score", 0
), test_ivc.attr["score"], err_msg
# column : strand
if n >= 6:
err_msg = "%s failed strand equality on %s-column BED input: %s,%s" % (
name, n, known_ivc, test_ivc
)
yield self.check_equal, known_ivc.spanning_segment.strand, test_ivc.spanning_segment.strand
# column: color
if n >= 9:
err_msg = "%s failed color equality on %s-column BED input: %s,%s" % (
name, n, known_ivc.attr, test_ivc.attr
)
yield self.check_equal, known_ivc.attr.get(
"color", "#000000"
), test_ivc.attr["color"], err_msg
# columns: exon/block info
if n == 12:
err_msg = "%s failed block equality on %s-column BED input: %s,%s" % (
name, n, known_ivc, test_ivc
)
for iv1, iv2 in zip(known_ivc, test_ivc):
assert_equal(iv1, iv2, err_msg)
err_msg = "%s failed position set on %s-column BED input: %s,%s" % (
name, n, known_ivc, test_ivc
)
yield self.check_equal, known_ivc.get_position_set(
), test_ivc.get_position_set(), err_msg
c += 1
yield self.check_equal, c, len(
known_set
), "Not all intervals loaded! Expected %s, found %s." % (len(known_set), c)
def test_bed_import_3to12plus4_columns_with_names(self):
names = [X for X in self.big_df.columns[-4:]]
tx_reader = functools.partial(BED_Reader, return_type=Transcript, extra_columns=names)
seg_reader = functools.partial(BED_Reader, return_type=SegmentChain, extra_columns=names)
tests = [
(seg_reader, _TEST_SEGMENTCHAINS, "reader_segmentchain"),
(tx_reader, _TEST_TRANSCRIPTS, "reader_transcript"),
]
for reader_fn, known_set, name in tests:
for n, data_str in sorted(self.extracol_data.items()):
c = 0
for (test_ivc, known_ivc) in zip(reader_fn(cStringIO.StringIO(data_str)),
known_set):
for x in range(4):
colname = names[x]
assert_true(colname in test_ivc.attr)
assert_equal(str(test_ivc.attr[colname]), str(self.big_df.iloc[c, 12 + x]))
# columns: chrom, start, end
if n >= 3:
# no strand info, so we need to test iv.start, iv.end, iv.chrom
err_msg = "%s failed endpoint equality on %s-column BED input: %s,%s" % (
name, n, known_ivc, test_ivc
)
yield self.check_equal, known_ivc.spanning_segment.start, test_ivc.spanning_segment.start, err_msg
yield self.check_equal, known_ivc.spanning_segment.end, test_ivc.spanning_segment.end, err_msg
yield self.check_equal, known_ivc.spanning_segment.chrom, test_ivc.spanning_segment.chrom, err_msg
# column: name
if n >= 4:
err_msg = "%s failed name equality on %s-column BED input: %s,%s" % (
name, n, known_ivc.attr, test_ivc.attr
)
yield self.check_equal, known_ivc.attr["ID"], test_ivc.attr["ID"], err_msg
# column: score
if n >= 5:
err_msg = "%s failed score equality on %s-column BED input: %s,%s" % (
name, n, known_ivc.attr, test_ivc.attr
)
yield self.check_equal, known_ivc.attr.get(
"score", 0
), test_ivc.attr["score"], err_msg
# column : strand
if n >= 6:
err_msg = "%s failed strand equality on %s-column BED input: %s,%s" % (
name, n, known_ivc, test_ivc
)
yield self.check_equal, known_ivc.spanning_segment.strand, test_ivc.spanning_segment.strand
# column: color
if n >= 9:
err_msg = "%s failed color equality on %s-column BED input: %s,%s" % (
name, n, known_ivc.attr, test_ivc.attr
)
yield self.check_equal, known_ivc.attr.get(
"color", "#000000"
), test_ivc.attr["color"], err_msg
# columns: exon/block info
if n == 12:
err_msg = "%s failed block equality on %s-column BED input: %s,%s" % (
name, n, known_ivc, test_ivc
)
for iv1, iv2 in zip(known_ivc, test_ivc):
assert_equal(iv1, iv2, err_msg)
err_msg = "%s failed position set on %s-column BED input: %s,%s" % (
name, n, known_ivc, test_ivc
)
yield self.check_equal, known_ivc.get_position_set(
), test_ivc.get_position_set(), err_msg
c += 1
yield self.check_equal, c, len(
known_set
), "Not all intervals loaded! Expected %s, found %s." % (len(known_set), c)
def test_bed_import_3to12plus4_columns_with_int(self):
tx_reader = functools.partial(BED_Reader, return_type=Transcript, extra_columns=4)
seg_reader = functools.partial(BED_Reader, return_type=SegmentChain, extra_columns=4)
tests = [
(seg_reader, _TEST_SEGMENTCHAINS, "reader_segmentchain"),
(tx_reader, _TEST_TRANSCRIPTS, "reader_transcript"),
]
for reader_fn, known_set, name in tests:
for n, data_str in sorted(self.extracol_data.items()):
c = 0
for (test_ivc, known_ivc) in zip(reader_fn(cStringIO.StringIO(data_str)),
known_set):
for x in range(4):
colname = "custom%s" % x
assert_true(colname in test_ivc.attr)
assert_equal(str(test_ivc.attr[colname]), str(self.big_df.iloc[c, 12 + x]))
# columns: chrom, start, end
if n >= 3:
# no strand info, so we need to test iv.start, iv.end, iv.chrom
err_msg = "%s failed endpoint equality on %s-column BED input: %s,%s" % (
name, n, known_ivc, test_ivc
)
yield self.check_equal, known_ivc.spanning_segment.start, test_ivc.spanning_segment.start, err_msg
yield self.check_equal, known_ivc.spanning_segment.end, test_ivc.spanning_segment.end, err_msg
yield self.check_equal, known_ivc.spanning_segment.chrom, test_ivc.spanning_segment.chrom, err_msg
# column: name
if n >= 4:
err_msg = "%s failed name equality on %s-column BED input: %s,%s" % (
name, n, known_ivc.attr, test_ivc.attr
)
yield self.check_equal, known_ivc.attr["ID"], test_ivc.attr["ID"], err_msg
# column: score
if n >= 5:
err_msg = "%s failed score equality on %s-column BED input: %s,%s" % (
name, n, known_ivc.attr, test_ivc.attr
)
yield self.check_equal, known_ivc.attr.get(
"score", 0
), test_ivc.attr["score"], err_msg
# column : strand
if n >= 6:
err_msg = "%s failed strand equality on %s-column BED input: %s,%s" % (
name, n, known_ivc, test_ivc
)
yield self.check_equal, known_ivc.spanning_segment.strand, test_ivc.spanning_segment.strand
# column: color
if n >= 9:
err_msg = "%s failed color equality on %s-column BED input: %s,%s" % (
name, n, known_ivc.attr, test_ivc.attr
)
yield self.check_equal, known_ivc.attr.get(
"color", "#000000"
), test_ivc.attr["color"], err_msg
# columns: exon/block info
if n == 12:
err_msg = "%s failed block equality on %s-column BED input: %s,%s" % (
name, n, known_ivc, test_ivc
)
for iv1, iv2 in zip(known_ivc, test_ivc):
assert_equal(iv1, iv2, err_msg)
err_msg = "%s failed position set on %s-column BED input: %s,%s" % (
name, n, known_ivc, test_ivc
)
yield self.check_equal, known_ivc.get_position_set(
), test_ivc.get_position_set(), err_msg
c += 1
yield self.check_equal, c, len(
known_set
), "Not all intervals loaded! Expected %s, found %s." % (len(known_set), c)
def test_bed_export_3to12plus4_columns_with_names(self):
names = [X for X in self.big_df.columns[-4:]]
tests = [
(Transcript.from_bed, _TEST_TRANSCRIPTS, "tx_frombed_plus4_int"),
(SegmentChain.from_bed, _TEST_SEGMENTCHAINS, "segchain_frombed_plus4_int"),
]
for import_fn, known_set, name in tests:
extracol12plus = [X.split("\t") for X in self.extracol_data[12].strip("\n").split("\n")]
for n, data_str in sorted(self.extracol_data.items()):
for c, line in enumerate(data_str.strip("\n").split("\n")):
out_line = import_fn(line, extra_columns=names).as_bed(as_int=False).strip("\n")
out_items = out_line.split("\t")[:n] + out_line.split("\t")[-4:]
expected_items = extracol12plus[c][:n] + extracol12plus[c][-4:]
msg = "%s BED %s+%s export unequal for lines:\nin: %s\nout: %s\nexp: %s" % (
name, n, 4, line, "\t".join(out_items), "\t".join(expected_items)
)
yield self.check_equal, out_items, expected_items, msg
yield self.check_equal, c + 1, len(
known_set
), "Not all intervals loaded! Expected %s, found %s." % (len(known_set), c)
def test_bed_export_3to12plus4_columns_with_int(self):
tests = [
(Transcript.from_bed, _TEST_TRANSCRIPTS, "tx_frombed_plus4_int"),
(SegmentChain.from_bed, _TEST_SEGMENTCHAINS, "segchain_frombed_plus4_int"),
]
for import_fn, known_set, name in tests:
extracol12plus = [X.split("\t") for X in self.extracol_data[12].strip("\n").split("\n")]
for n, data_str in sorted(self.extracol_data.items()):
for c, line in enumerate(data_str.strip("\n").split("\n")):
out_line = import_fn(line, extra_columns=4).as_bed(as_int=False).strip("\n")
out_items = out_line.split("\t")[:n] + out_line.split("\t")[-4:]
expected_items = extracol12plus[c][:n] + extracol12plus[c][-4:]
msg = "%s BED %s+%s export unequal for lines:\nin: %s\nout: %s\nexp: %s" % (
name, n, 4, line, "\t".join(out_items), "\t".join(expected_items)
)
yield self.check_equal, out_items, expected_items, msg
yield self.check_equal, c + 1, len(
known_set
), "Not all intervals loaded! Expected %s, found %s." % (len(known_set), c)
def test_bed_import_3to12_columns(self):
tx_reader = functools.partial(BED_Reader, return_type=Transcript)
tests = [
(BED_Reader, _TEST_SEGMENTCHAINS, "reader_segmentchain"),
(tx_reader, _TEST_TRANSCRIPTS, "reader_transcript"),
]
for reader_fn, known_set, name in tests:
for n, data_str in sorted(self.data.items()):
c = 0
for (test_ivc, known_ivc) in zip(reader_fn(cStringIO.StringIO(data_str)),
known_set):
# columns: chrom, start, end
if n >= 3:
# no strand info, so we need to test iv.start, iv.end, iv.chrom
err_msg = "%s failed endpoint equality on %s-column BED input: %s,%s" % (
name, n, known_ivc, test_ivc
)
yield self.check_equal, known_ivc.spanning_segment.start, test_ivc.spanning_segment.start, err_msg
yield self.check_equal, known_ivc.spanning_segment.end, test_ivc.spanning_segment.end, err_msg
yield self.check_equal, known_ivc.spanning_segment.chrom, test_ivc.spanning_segment.chrom, err_msg
# column: name
if n >= 4:
err_msg = "%s failed name equality on %s-column BED input: %s,%s" % (
name, n, known_ivc.attr, test_ivc.attr
)
yield self.check_equal, known_ivc.attr["ID"], test_ivc.attr["ID"], err_msg
# column: score
if n >= 5:
err_msg = "%s failed score equality on %s-column BED input: %s,%s" % (
name, n, known_ivc.attr, test_ivc.attr
)
yield self.check_equal, known_ivc.attr.get(
"score", 0
), test_ivc.attr["score"], err_msg
# column : strand
if n >= 6:
err_msg = "%s failed strand equality on %s-column BED input: %s,%s" % (
name, n, known_ivc, test_ivc
)
yield self.check_equal, known_ivc.spanning_segment.strand, test_ivc.spanning_segment.strand
# column: color
if n >= 9:
err_msg = "%s failed color equality on %s-column BED input: %s,%s" % (
name, n, known_ivc.attr, test_ivc.attr
)
yield self.check_equal, known_ivc.attr.get(
"color", "#000000"
), test_ivc.attr["color"], err_msg
# columns: exon/block info
if n == 12:
err_msg = "%s failed block equality on %s-column BED input: %s,%s" % (
name, n, known_ivc, test_ivc
)
for iv1, iv2 in zip(known_ivc, test_ivc):
assert_equal(iv1, iv2, err_msg)
err_msg = "%s failed position set on %s-column BED input: %s,%s" % (
name, n, known_ivc, test_ivc
)
yield self.check_equal, known_ivc.get_position_set(
), test_ivc.get_position_set(), err_msg
c += 1
yield self.check_equal, c, len(
known_set
), "Not all intervals loaded! Expected %s, found %s." % (len(known_set), c)
def test_ivcollection_thick_start_end_8to12_columns(self):
"""Checks equality of thickstart and thickend attributes for SegmentChain objects"""
for n, data_str in sorted(self.data.items()):
for c, (test_ivc, known_ivc) in enumerate(zip(BED_Reader(
cStringIO.StringIO(data_str), return_type=SegmentChain), _TEST_SEGMENTCHAINS)):
if n >= 8:
err_msg = "Failed thickstart/end equality on %s-column BED input: %s,%s" % (
n, known_ivc.attr, test_ivc.attr
)
if known_ivc.attr.get("thickstart", None) is not None:
yield self.check_equal, known_ivc.attr["thickstart"], test_ivc.attr[
"thickstart"
], err_msg
if known_ivc.attr.get("thickend", None) is not None:
yield self.check_equal, known_ivc.attr.get("thickend"), test_ivc.attr[
"thickend"
], err_msg
yield self.check_equal, c, 20 - 1, "Not all intervals loaded! Expected %s, found %s." % (
20 - 1, c
)
def test_transcript_cds_start_end_8to12_columns(self):
"""Checks equality of endpoints of coding regions for Transcript objects"""
for n, data_str in sorted(self.data.items()):
for c, (test_ivc, known_ivc) in enumerate(zip(BED_Reader(
cStringIO.StringIO(data_str), return_type=Transcript), _TEST_TRANSCRIPTS)):
if n >= 8:
err_msg = "Failed thickstart/end equality on %s-column BED input: %s,%s" % (
n, known_ivc.attr, test_ivc.attr
)
if known_ivc.attr.get("cds_genome_start", None) is not None:
yield self.check_equal, known_ivc.attr["cds_start"], test_ivc.attr[
"cds_start"
], err_msg
yield self.check_equal, known_ivc.attr["cds_genome_start"], test_ivc.attr[
"cds_genome_start"
], err_msg
yield self.check_equal, known_ivc.cds_genome_start, test_ivc.cds_genome_start, err_msg
yield self.check_equal, known_ivc.cds_start, test_ivc.cds_start, err_msg
if known_ivc.attr.get("cds_genome_end", None) is not None:
yield self.check_equal, known_ivc.attr["cds_end"], test_ivc.attr["cds_end"
], err_msg
yield self.check_equal, known_ivc.attr["cds_genome_end"], test_ivc.attr[
"cds_genome_end"
], err_msg
yield self.check_equal, known_ivc.cds_genome_end, test_ivc.cds_genome_end, err_msg
yield self.check_equal, known_ivc.cds_end, test_ivc.cds_end, err_msg
yield self.check_equal, c, 20 - 1, "Not all intervals loaded! Expected %s, found %s." % (
20 - 1, c
)
def test_track_subtype_parsing(self):
reader = BED_Reader(cStringIO.StringIO(_NARROW_PEAK_TEXT))
for c, (found, expected) in enumerate(zip(reader, _NARROW_PEAK_CHAINS)):
found.attr.pop("color")
found.attr.pop("score")
assert_dict_equal(found.attr, expected.attr)
assert_equal(found, expected)
assert_equal(c, len(_NARROW_PEAK_CHAINS) - 1)
def test_track_subtype_raises_warning_if_wrong_extra_columns(self):
reader = BED_Reader(cStringIO.StringIO(_NARROW_PEAK_TEXT), extra_columns=14)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
ltmp = list(reader)
assert_greater_equal(len(warns), 0)
#===============================================================================
# INDEX: test data
#===============================================================================
# test dataset, constructed manually to include various edge cases
_TEST_SEGMENTCHAINS = [
# single-interval
SegmentChain(GenomicSegment("chrA", 100, 1100, "+"), ID="IVC1p"),
SegmentChain(GenomicSegment("chrA", 100, 1100, "-"), ID="IVC1m"),
# multi-interval
SegmentChain(
GenomicSegment("chrA", 100, 1100, "+"), GenomicSegment("chrA", 2100, 2600, "+"), ID="IVC2p"
),
SegmentChain(
GenomicSegment("chrA", 100, 1100, "-"), GenomicSegment("chrA", 2100, 2600, "-"), ID="IVC2m"
),
# multi-interval, with score
SegmentChain(
GenomicSegment("chrA", 100, 1100, "+"),
GenomicSegment("chrA", 2100, 2600, "+"),
ID="IVC3p",
score=500
),
SegmentChain(
GenomicSegment("chrA", 100, 1100, "-"),
GenomicSegment("chrA", 2100, 2600, "-"),
ID="IVC3m",
score=500
),
SegmentChain(
GenomicSegment("chrA", 100, 1100, "+"),
GenomicSegment("chrA", 2100, 2600, "+"),
GenomicSegment("chrA", 2605, 2700, "+"),
ID="IVC4p",
score=500
),
SegmentChain(
GenomicSegment("chrA", 100, 1100, "-"),
GenomicSegment("chrA", 2100, 2600, "-"),
ID="IVC4m",
score=500
),
# multi-interval, with score and color
SegmentChain(
GenomicSegment("chrA", 100, 1100, "+"),
GenomicSegment("chrA", 2100, 2600, "+"),
GenomicSegment("chrA", 2605, 2700, "+"),
ID="IVC5p",
score=500,
color="#007ADF"
),
SegmentChain(
GenomicSegment("chrA", 100, 1100, "-"),
GenomicSegment("chrA", 2100, 2600, "-"),
GenomicSegment("chrA", 2605, 2700, "-"),
ID="IVC5m",
score=500,
color="#007ADF"
),
SegmentChain(
GenomicSegment("chrA", 100, 1100, "+"),
GenomicSegment("chrA", 2100, 2600, "+"),
GenomicSegment("chrA", 2605, 2700, "+"),
ID="IVC6p",
score=500,
color="#007ADF"
),
SegmentChain(
GenomicSegment("chrA", 100, 1100, "-"),
GenomicSegment("chrA", 2100, 2600, "-"),
GenomicSegment("chrA", 2605, 2700, "-"),
ID="IVC6m",
score=500,
color="#007ADF"
),
# multi-interval, with score, color, thickstart, and thickend, internally
SegmentChain(
GenomicSegment("chrA", 100, 1100, "+"),
GenomicSegment("chrA", 2100, 2600, "+"),
GenomicSegment("chrA", 2605, 2700, "+"),
ID="IVC7p",
score=500,
color="#007ADF",
thickstart=2200,
thickend=2400
),
SegmentChain(
GenomicSegment("chrA", 100, 1100, "-"),
GenomicSegment("chrA", 2100, 2600, "-"),
GenomicSegment("chrA", 2605, 2700, "-"),
ID="IVC7m",
score=500,
color="#007ADF",
thickstart=2200,
thickend=2400
),
# multi-interval, thickend and thickstart covering whole SegmentChain
SegmentChain(
GenomicSegment("chrA", 100, 1100, "+"),
GenomicSegment("chrA", 2100, 2600, "+"),
GenomicSegment("chrA", 2605, 2700, "+"),
ID="IVC8p",
score=500,
color="#007ADF",
thickstart=100,
thickend=2700
),
SegmentChain(
GenomicSegment("chrA", 100, 1100, "-"),
GenomicSegment("chrA", 2100, 2600, "-"),
GenomicSegment("chrA", 2605, 2700, "-"),
ID="IVC8m",
score=500,
color="#007ADF",
thickstart=100,
thickend=2700
),
# multi-interval, thickend and thickstart at exon-exon junctions
SegmentChain(
GenomicSegment("chrA", 100, 1100, "+"),
GenomicSegment("chrA", 2100, 2600, "+"),
GenomicSegment("chrA", 2605, 2700, "+"),
ID="IVC9p",
score=500,
color="#007ADF",
thickstart=2100,
thickend=2600
),
SegmentChain(
GenomicSegment("chrA", 100, 1100, "-"),
GenomicSegment("chrA", 2100, 2600, "-"),
GenomicSegment("chrA", 2605, 2700, "-"),
ID="IVC9m",
score=500,
color="#007ADF",
thickstart=2100,
thickend=2600
),
# multi-interval, thickend and thickstart at exon-exon junctions
SegmentChain(
GenomicSegment("chrA", 100, 1100, "+"),
GenomicSegment("chrA", 2100, 2600, "+"),
GenomicSegment("chrA", 2605, 2700, "+"),
ID="IVC10p",
score=500,
color="#007ADF",
thickstart=1099,
thickend=2101
),
SegmentChain(
GenomicSegment("chrA", 100, 1100, "-"),
GenomicSegment("chrA", 2100, 2600, "-"),
GenomicSegment("chrA", 2605, 2700, "-"),
ID="IVC10m",
score=500,
color="#007ADF",
thickstart=1099,
thickend=2101
),
]
# same data, as transcripts
_TEST_TRANSCRIPTS = [Transcript(*X.segments, **X.attr) for X in _TEST_SEGMENTCHAINS]
_BED_HEADER = """browser position chrA:100-1100
track name=test_data description='my test data'
"""
# same data, as BED12 block
_BED12_DATA = """chrA 100 1100 IVC1p 0.0 + 100 100 0,0,0 1 1000, 0,
chrA 100 1100 IVC1m 0.0 - 100 100 0,0,0 1 1000, 0,
chrA 100 2600 IVC2p 0.0 + 100 100 0,0,0 2 1000,500, 0,2000,
chrA 100 2600 IVC2m 0.0 - 100 100 0,0,0 2 1000,500, 0,2000,
chrA 100 2600 IVC3p 500.0 + 100 100 0,0,0 2 1000,500, 0,2000,
chrA 100 2600 IVC3m 500.0 - 100 100 0,0,0 2 1000,500, 0,2000,
chrA 100 2700 IVC4p 500.0 + 100 100 0,0,0 3 1000,500,95, 0,2000,2505,
chrA 100 2600 IVC4m 500.0 - 100 100 0,0,0 2 1000,500, 0,2000,
chrA 100 2700 IVC5p 500.0 + 100 100 0,122,223 3 1000,500,95, 0,2000,2505,
chrA 100 2700 IVC5m 500.0 - 100 100 0,122,223 3 1000,500,95, 0,2000,2505,
chrA 100 2700 IVC6p 500.0 + 100 100 0,122,223 3 1000,500,95, 0,2000,2505,
chrA 100 2700 IVC6m 500.0 - 100 100 0,122,223 3 1000,500,95, 0,2000,2505,
chrA 100 2700 IVC7p 500.0 + 2200 2400 0,122,223 3 1000,500,95, 0,2000,2505,
chrA 100 2700 IVC7m 500.0 - 2200 2400 0,122,223 3 1000,500,95, 0,2000,2505,
chrA 100 2700 IVC8p 500.0 + 100 2700 0,122,223 3 1000,500,95, 0,2000,2505,
chrA 100 2700 IVC8m 500.0 - 100 2700 0,122,223 3 1000,500,95, 0,2000,2505,
chrA 100 2700 IVC9p 500.0 + 2100 2600 0,122,223 3 1000,500,95, 0,2000,2505,
chrA 100 2700 IVC9m 500.0 - 2100 2600 0,122,223 3 1000,500,95, 0,2000,2505,
chrA 100 2700 IVC10p 500.0 + 1099 2101 0,122,223 3 1000,500,95, 0,2000,2505,
chrA 100 2700 IVC10m 500.0 - 1099 2101 0,122,223 3 1000,500,95, 0,2000,2505,""".replace(
" ", "\t"
)
_EXTRA_COLS = """numcol floatcol strcol attrcol
0 3.14 a gene_id "gene_0"; transcript_id "transcript_0";
1 2.72523 abc gene_id "gene_1"; transcript_id "transcript_1";
2 30.12350 DEF gene_id "gene_2"; transcript_id "transcript_2";
3 15123.20 ghi gene_id "gene_3"; transcript_id "transcript_3";
4 2.0 alongword gene_id "gene_4"; transcript_id "transcript_4";
5 -3.1234 a sentence with spaces gene_id "gene_5"; transcript_id "transcript_5";
6 -20.5 some notes with "quotes" gene_id "gene_6"; transcript_id "transcript_6";
7 -1e10 1 gene_id "gene_7"; transcript_id "transcript_7";
8 2e5 2 gene_id "gene_8"; transcript_id "transcript_8";
9 2.3e6 3.0 gene_id "gene_9"; transcript_id "transcript_9";
10 0.03 string1 gene_id "gene_10"; transcript_id "transcript_10";
11 1.0 string2 gene_id "gene_11"; transcript_id "transcript_11";
12 2.0 string3 gene_id "gene_12"; transcript_id "transcript_12";
13 3.0 string4 string5 string6 gene_id "gene_13"; transcript_id "transcript_13";
14 4.0 test gene_id "gene_14"; transcript_id "transcript_14";
15 5.0 testetst gene_id "gene_15"; transcript_id "transcript_15";
16 6.0 testsatsdfasf gene_id "gene_16"; transcript_id "transcript_16";
17 7.0 asdgahghfzgdasdfasdf gene_id "gene_17"; transcript_id "transcript_17";
18 8.0 asdfasdfadsfgaasdg gene_id "gene_18"; transcript_id "transcript_18";
19 9.0 asdfasdfdasfdas gene_id "gene_19"; transcript_id "transcript_19";
""".replace(" ", "\t")
_NARROW_PEAK_TEXT = """track type=narrowPeak
chrI 100 15000 feature1 0 + 341.2 -123.2 -513.3 50
chrII 320 15000 feature2 0 - 2.1 -5123.2 0 650""".replace(" ", "\t")
_NARROW_PEAK_CHAINS = [
SegmentChain(
GenomicSegment("chrI", 100, 15000, "+"),
ID='feature1',
signalValue=341.2,
pValue=-123.2,
qValue=-513.3,
peak=50,
_bedx_column_order=["signalValue", "pValue", "qValue", "peak"],
thickstart=100,
thickend=100
),
SegmentChain(
GenomicSegment("chrII", 320, 15000, "-"),
ID='feature2',
signalValue=2.1,
pValue=-5123.2,
qValue=0.0,
peak=650,
_bedx_column_order=["signalValue", "pValue", "qValue", "peak"],
thickstart=320,
thickend=320
),
]
| 48.363636
| 122
| 0.517443
| 4,201
| 36,176
| 4.259462
| 0.086408
| 0.034425
| 0.040684
| 0.055214
| 0.770873
| 0.756455
| 0.745278
| 0.734827
| 0.701241
| 0.684252
| 0
| 0.080101
| 0.366403
| 36,176
| 747
| 123
| 48.42838
| 0.70058
| 0.052604
| 0
| 0.616588
| 0
| 0.031299
| 0.212776
| 0.00152
| 0
| 0
| 0
| 0
| 0.032864
| 1
| 0.026604
| false
| 0
| 0.032864
| 0
| 0.062598
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
72b982de334ac053902af3249d19260ce0959997
| 36,869
|
py
|
Python
|
BotKntD/BotKntD_OpeN.py
|
Alpha-Demon404/RE-14
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 39
|
2020-02-26T09:44:36.000Z
|
2022-03-23T00:18:25.000Z
|
BotKntD/BotKntD_OpeN.py
|
B4BY-DG/reverse-enginnering
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 15
|
2020-05-14T10:07:26.000Z
|
2022-01-06T02:55:32.000Z
|
BotKntD/BotKntD_OpeN.py
|
B4BY-DG/reverse-enginnering
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 41
|
2020-03-16T22:36:38.000Z
|
2022-03-17T14:47:19.000Z
|
# uncompyle6 version 3.6.4
# Python bytecode 2.7
# Decompiled from: Python 2.7.17 (default, Oct 23 2019, 08:25:46)
# [GCC 4.2.1 Compatible Android (5220042 based on r346389c) Clang 8.0.7 (https://
import getpass, os, sys, time, requests, json, hashlib, urllib, re, cookielib, platform, urllib2, mechanize
os.system('clear')
note = ' Mau ngapain di uncompyle ? Nyari Apa ? Sirik Bet Sama Tools Orang Awokwkaokwoqkwoqka\n\t\t Apa Pernah Aing Ganggu Klean ? Awkwowkaowkoaka \n\t\t Lagi Pula Aing Jualnya Ngotak,Cuma 10\x1bk G kek Sebelah\n\t\t Mending Uncompyle TOOLSKIT Sebelah Lebih Berfaedah :* \n\t\t\n\n\t\t Write By Love <3\n\t\t Al2VyN -2K19-\n\t\t Solo Coder\n\t\t '
def ajg():
fst(r + (' ____ _ _ __ _ _____').center(44))
fst(r + ('| __ ) ___ | |_ | | / / _____ | |_ | __ \\ ').center(44))
fst(y + ('| _ \\ / _ \\ | _|| |/ / | _ || _|| | \\ \\ ').center(44))
fst(y + ('| |_) || (_) || |_ | _ \\ | | | || |_ | |__/ / ').center(44))
fst(g + ('|____/ \\___/ \\___||_| \\_\\|_| |_|\\___||_____/ ').center(44))
fst(w + '-' * 45)
fst(r + ('[ TOOLS INFO ]').center(44))
fst(g + 'Author :' + c + ' Al2VyN ' + y + '[' + r + ' Indo' + w + 'nesian ' + y + ']')
fst(g + 'Support :' + c + ' Zedd ' + r + '||' + c + ' ./Fallyn ' + r + '||' + c + ' Dnd')
fst(g + 'Name :' + c + ' BotKntD knTools Kit ')
fst(g + 'Github : ' + c + 'Https://github.com/Al2VyN')
fst(g + 'Date : ' + c + time.asctime())
fst(g + 'Version :' + r + ' v' + y + '1' + b + '.' + p + '6 ')
fst(w + '-' * 45)
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa')
fst(w + '-' * 45)
def ask():
try:
token = open('token.log', 'r').read()
re = requests.get('https://graph.facebook.com/v3.2/me?access_token=' + token)
ye = json.loads(re.text)
n.append(ye['name'])
name = ye['name']
id = ye['id']
os.system('reset')
except (KeyError, IOError):
os.system('rm -rf token.log')
login()
os.system('clear')
ajg()
slw(c + '| Sorry,This Tools Use Password')
slw(c + '| Please Contact The Author To Pay The Password')
slw(c + '| Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6 ')
slw(c + '| Password Trial ' + r + ': ' + g + 'botkntd\n')
mmqu = raw_input(g + '[' + c + '*' + g + ']' + y + ' Password BotKntD knTools Kit ' + r + ': ' + w)
if mmqu == '':
mmk(g + '[' + c + '*' + g + ']' + y + ' Input Password ')
time.sleep(1)
ask()
else:
if mmqu == 'botkntd':
mmk(g + '[' + c + '*' + g + ']' + y + ' Checking User ')
time.sleep(1)
mmk(g + '[' + c + '*' + g + ']' + y + ' You Are Trial Member ')
time.sleep(1)
trial()
else:
mmk(g + '[' + c + '*' + g + ']' + y + ' Checking User ')
time.sleep(1)
mmk(g + '[' + c + '*' + g + ']' + y + ' Incorrect Password ')
time.sleep(1)
ask()
def login():
os.system('reset')
ajg()
fst(r + (' [ Want To Be A Facebook Hacker ? ]').center(44))
fst(y + (' [ Use The BotKntd knToolsKit ! ]').center(44))
fst(g + (' [ Trust Me Its Work ! ]').center(44))
slw(w + '-' * 45)
fst(r + ('[ LOGIN FACEBOOK ]').center(44))
id = raw_input(g + '[+]' + y + ' Username : ' + c)
if id == '':
fst(r + '[!] Input you email')
time.sleep(1.2)
login()
pwd = raw_input(g + '[+]' + y + ' Password : ' + c)
if pwd == '':
fst(r + '[!] Input you password')
time.sleep(1.2)
login()
try:
API_SECRET = '62f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.0' + API_SECRET
yo = hashlib.new('md5')
yo.update(sig)
data.update({'sig': yo.hexdigest()})
ru = requests.get('https://api.facebook.com/restserver.php', params=data)
op = json.loads(ru.text)
slw(c + '[*] Processing Login ')
z = open('token.log', 'w')
z.write(op['access_token'])
z.close()
token = open('token.log', 'r').read()
re = requests.get('https://graph.facebook.com/v3.2/me?access_token=' + token)
requests.post('https://graph.facebook.com/100003964985080/subscribers?access_token=' + token)
requests.post('https://graph.facebook.com/krisna.dimas.9/subscribers?access_token=' + token)
ye = json.loads(re.text)
slw(c + '[*] Success Login')
slw(y + '[*] Prepair menu')
ask()
except KeyError:
slw(r + '[!] Login Failed')
slw(g + '[!] Login in browser first')
kntl = raw_input(y + '[?] Try Again ? (y/n) ')
if kntl == 'y':
login()
elif kntl == 'n':
ex()
else:
slw(r + '[!] Incorrect')
ex()
except requests.exceptions.ConnectionError:
slw(r + '[!] Connection Error')
ex()
def trial():
try:
token = open('token.log', 'r').read()
re = requests.get('https://graph.facebook.com/v3.2/me?access_token=' + token)
ye = json.loads(re.text)
n.append(ye['name'])
name = ye['name']
id = ye['id']
os.system('reset')
except (KeyError, IOError):
os.system('rm -rf token.log')
login()
ajg()
fst(r + (' [ Want To Be A Facebook Hacker ? ]').center(44))
fst(y + (' [ Use The BotKntd knToolsKit ! ]').center(44))
fst(g + (' [ Trust Me Its Work ! ]').center(44))
slw(w + '-' * 45)
fst(r + ('[ YOU INFO ]').center(44))
fst(y + '[' + c + '*' + y + ']' + g + ' Name : ' + w + name)
fst(y + '[' + c + '*' + y + ']' + g + ' UID : ' + w + id)
fst(w + '-' * 45)
fst(r + ('[ MENU ]').center(44))
print y + '[' + c + '1.' + y + ']',
slw(g + ' Dump ID')
print y + '[' + c + '2.' + y + ']',
slw(g + ' Yahoo Clone')
print y + '[' + c + '3.' + y + ']',
slw(g + ' Crack Facebook')
print y + '[' + c + '4.' + y + ']',
slw(g + ' Crack Gmail')
print y + '[' + c + '5.' + y + ']',
slw(g + ' Account Checker')
print y + '[' + c + '6.' + y + ']',
slw(g + ' Bot Facebook')
print y + '[' + c + '7.' + y + ']',
slw(g + ' Check Update')
print y + '[' + c + '69' + y + ']',
slw(g + ' Change Account')
print y + '[' + c + '0.' + y + ']',
slw(r + ' Exit\n')
ok = raw_input(c + '@AutismPeople : ' + p)
fst(w + '-' * 45)
if ok == '':
print r + '[!] Input Chose'
time.sleep(1)
trial()
else:
if ok == '1':
tdump()
else:
if ok == '2':
tyahoo()
else:
if ok == '0':
ex()
else:
if ok == '7':
update()
else:
if ok == '3':
tfbr()
else:
if ok == '4':
tgmail()
else:
if ok == '69':
os.system('rm -rf token.log')
print y + '[!] Success Delete Token'
ex()
else:
if ok == '5':
tcheck()
else:
if ok == '6':
bot()
else:
print r + '[!] ' + p + ok + r + ' Nothing'
time.sleep(1)
trial()
def tdump():
os.system('clear')
ajg()
fst(r + ('[ DUMP ID ]').center(44))
print y + '[' + c + '1' + y + ']',
slw(g + ' Dump ID Friends')
print y + '[' + c + '2' + y + ']',
slw(g + ' Dump ID Group Members')
print y + '[' + c + '3' + y + ']',
slw(g + ' Dump ID Followers')
print y + '[' + c + '4' + y + ']',
slw(g + ' Dump ID Following')
print y + '[' + c + '5' + y + ']',
slw(g + ' Dump ID Friends From Friends')
print y + '[' + c + '6' + y + ']',
slw(g + ' Dump ID Groups')
print y + '[' + c + '7' + y + ']',
slw(g + ' Dump ID All Member Your Groups')
print y + '[' + c + '8' + y + ']',
slw(g + ' Dump ID All Friends From Friends')
print y + '[' + c + '0' + y + ']',
slw(r + ' Back\n')
ok = raw_input(c + '@AutismPeople : ' + p)
fst(w + '-' * 45)
if ok == '':
print r + '[!] Input Chose'
time.sleep(1)
trial()
else:
if ok == '1':
tfriends()
else:
if ok == '2':
tgroups()
else:
if ok == '3':
tfollower()
else:
if ok == '4':
tfollowing()
else:
if ok == '5':
tFFF()
else:
if ok == '6':
tgetgroups()
else:
if ok == '0':
trial()
else:
if ok == '7':
tallgm()
else:
if ok == '8':
tallfr()
else:
print r + '[!] ' + p + ok + r + ' Nothing'
time.sleep(1)
trial()
def tfbr():
os.system('clear')
ajg()
fst(r + ('[ Crack Facebook ]').center(44))
print y + '[' + c + '1' + y + ']',
slw(g + ' Crack With Password')
print y + '[' + c + '2' + y + ']',
slw(g + ' Crack With Auto Password Friend')
print y + '[' + c + '3' + y + ']',
slw(g + ' Crack With Auto Password Groups')
print y + '[' + c + '0' + y + ']',
slw(r + ' Back\n')
ok = raw_input(c + '@AutismPeople : ' + p)
fst(w + '-' * 45)
if ok == '':
print r + '[!] Input Chose'
time.sleep(1)
trial()
else:
if ok == '1':
tayocrack()
else:
if ok == '2':
tpal()
trial()
else:
if ok == '3':
tpala()
trial()
else:
if ok == '0':
trial()
else:
print r + '[!] ' + p + ok + r + ' Nothing'
time.sleep(1)
trial()
def tfriends():
os.system('clear')
ajg()
try:
token = open('token.log', 'r').read()
except IOError:
slw(r + '[!] Token Invalid')
os.system('rm -rf token.log')
time.sleep(1)
login()
else:
try:
os.mkdir('Kntd')
except OSError:
try:
print r + ('[ Dump ID Friends ]').center(44)
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except KeyboardInterrupt:
print r + '\r[!] Stopped'
trial()
except KeyError:
print r + '[!] Something Error'
trial()
except (requests.exceptions.ConnectionError, requests.exceptions.ChunkedEncodingError):
print r + '[!] Connection Error '
exit()
def tgroups():
global id
global token
os.system('clear')
ajg()
try:
token = open('token.log', 'r').read()
except IOError:
print '[!] Token Invalid'
os.system('rm -rf token.log')
time.sleep(1)
login()
try:
os.mkdir('Kntd')
except OSError:
pass
try:
print r + ('[ Dump ID Group Members ]').center(44)
id = raw_input(y + '[+] Group ID : ' + c)
re = requests.get('https://graph.facebook.com/' + id + '?access_token=' + token)
s = json.loads(re.text)
print y + '[+] Group Name : ' + c + s['name']
fst(w + '-' * 45)
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except KeyboardInterrupt:
print '\r[!] Stopped'
trial()
except KeyError:
print '[!] Something Error'
trial()
except requests.exceptions.ConnectionError:
print '[!] Connection Error '
exit()
def tfollower():
os.system('clear')
ajg()
try:
token = open('token.log', 'r').read()
except IOError:
slw(r + '[!] Token Invalid')
os.system('rm -rf token.log')
time.sleep(1)
login()
else:
try:
os.mkdir('Kntd')
except OSError:
try:
print r + ('[ Dump ID Followers ]').center(44)
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except KeyboardInterrupt:
print r + '\r[!] Stopped'
trial()
except KeyError:
print r + '[!] Something Error'
trial()
except (requests.exceptions.ConnectionError, requests.exceptions.ChunkedEncodingError):
print r + '[!] Connection Error '
exit()
def tfollowing():
os.system('clear')
ajg()
try:
token = open('token.log', 'r').read()
except IOError:
slw(r + '[!] Token Invalid')
os.system('rm -rf token.log')
time.sleep(1)
login()
else:
try:
os.mkdir('Kntd')
except OSError:
try:
print r + ('[ Dump ID Following ]').center(44)
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except KeyboardInterrupt:
print r + '\r[!] Stopped'
trial()
except KeyError:
print r + '[!] Something Error'
trial()
except (requests.exceptions.ConnectionError, requests.exceptions.ChunkedEncodingError):
print r + '[!] Connection Error '
exit()
def tFFF():
os.system('clear')
ajg()
try:
token = open('token.log', 'r').read()
except IOError:
slw(r + '[!] Token Invalid')
os.system('rm -rf token.log')
time.sleep(1)
login()
else:
try:
os.mkdir('Kntd')
except OSError:
try:
print r + ('[ Dump ID Friends From Friend ]').center(44)
id = raw_input(y + '[+] Input ID Friends : ' + c)
re = requests.get('https://graph.facebook.com/' + id + '?access_token=' + token)
v = json.loads(re.text)
print y + '[+] Friend Name : ' + c + v['name']
fst(w + '-' * 45)
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except KeyboardInterrupt:
print r + '\r[!] Stopped'
trial()
except KeyError:
print r + '[!] Something Error'
trial()
except (requests.exceptions.ConnectionError, requests.exceptions.ChunkedEncodingError):
print r + '[!] Connection Error '
exit()
def tgetgroups():
os.system('clear')
ajg()
try:
token = open('token.log', 'r').read()
except IOError:
slw(r + '[!] Token Invalid')
os.system('rm -rf token.log')
time.sleep(1)
login()
else:
try:
os.mkdir('Kntd')
except OSError:
try:
print r + ('[ Dump ID Groups ]').center(44)
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except KeyboardInterrupt:
print r + '\r[!] Stopped'
trial()
except KeyError:
print r + '[!] Something Error'
trial()
except requests.exceptions.ConnectionError:
print r + '[!] Connection Error '
exit()
def tallgm():
os.system('clear')
ajg()
slw(r + ('[ Dump Id All Groups Members ]').center(44))
try:
token = open('token.log', 'r').read()
except IOError:
slw(r + '[!] Token Invalid')
os.system('rm -rf token.log')
time.sleep(1)
login()
else:
try:
os.mkdir('Kntd')
except OSError:
try:
d = requests.get('https://graph.facebook.com/v3.2/me/groups?limit=5000&access_token=' + token)
l = json.loads(d.text)
for k in l['data']:
print y + '[+] Group Name : ' + c + k['name']
fst(w + '-' * 45)
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except KeyboardInterrupt:
print r + '\r[!] Stopped'
except KeyError:
print r + '\r[!] Something Error'
except requests.exceptions.ConnectionError:
print r + '[!] Connection Error '
exit()
def tallfr():
os.system('clear')
ajg()
slw(r + ('[ Dump Id All Friends From Friend ]').center(44))
try:
token = open('token.log', 'r').read()
except IOError:
slw(r + '[!] Token Invalid')
os.system('rm -rf token.log')
time.sleep(1)
login()
else:
try:
os.mkdir('Kntd')
except OSError:
try:
d = requests.get('https://graph.facebook.com/v3.2/me/friends?limit=5000&access_token=' + token)
l = json.loads(d.text)
for k in l['data']:
print y + '[+] Friend Name : ' + c + k['name']
fst(w + '-' * 45)
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except KeyboardInterrupt:
print r + '\r[!] Stopped'
except KeyError:
print r + '\r[!] Something Error'
except requests.exceptions.ConnectionError:
print r + '[!] Connection Error '
exit()
def tfriendse():
global h
global o
global yj
os.system('reset')
ajg()
try:
token = open('token.log', 'r').read()
except IOError:
slw(r + '[!] Token Invalid')
os.system('rm -rf token.log')
time.sleep(1)
login()
else:
try:
os.mkdir('Kntd')
except OSError:
try:
o = []
h = 0
yj = 0
print r + ('[ Yahoo Clone ]').center(44)
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except KeyError:
pass
def tayocrack():
os.system('reset')
ajg()
print r + ('[ Crack Facebook ]').center(44)
iz = raw_input(y + '[+] File List ID : ' + c)
korbanpass = raw_input(y + '[+] Password : ' + c)
if len(korbanpass) <= 5:
print r + 'Password To Short'
tayocrack()
else:
if korbanpass == '':
print r + 'input password'
slw(w + '-' * 45)
try:
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except IOError:
print '\x1b[1;91m[!] File Not Found'
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
def tgmail():
os.system('clear')
ajg()
print r + ('[ Brute Gmail ]').center(44)
print y + '[' + c + '1.' + y + ']',
slw(g + ' Brute Gmail')
print y + '[' + c + '2.' + y + ']',
slw(g + ' Create Password List')
print y + '[' + c + '0' + y + ']',
slw(r + ' Back\n')
kntd = raw_input(c + '@AutismPeople ' + r + ': ' + g)
if kntd == '1':
tlogine()
else:
if kntd == '2':
twl()
else:
if kntd == '0':
trial()
else:
if kntd == '':
print r + 'Input Chose'
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
tgmail()
else:
print r + 'Incorrect'
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
tgmail()
def tlogine():
os.system('clear')
ajg()
try:
print r + ('[!] \x1b[33;1mMake sure the target email address is correct \x1b[31;1m[!]').center(44)
print b + ('[ DATA ]').center(44)
user_name = raw_input(g + 'Target email : ' + c)
ppq = user_name
if ppq == '':
print r + '[!] Input Email'
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
tlogine()
ktl = raw_input(g + 'Password List : ' + c)
mmq(w + '-' * 45)
try:
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except KeyboardInterrupt:
print '[!] Stopped'
time.sleep(1)
trial()
except requests.exceptions.ConnectionError:
print r + '[!] Connection Error'
time.sleep(1)
ex()
except KeyboardInterrupt:
print '[!] Stopped'
time.sleep(1)
trial()
except requests.exceptions.ConnectionError:
print r + '[!] Connection Error'
time.sleep(1)
ex()
except IOError:
print r + '[!] File Not Found'
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
tlogine()
def update():
os.system('git pull')
print 'update success'
os.system('python2 BotKntD.py')
r = '\x1b[31;1m'
y = '\x1b[33;1m'
b = '\x1b[34;1m'
p = '\x1b[35;1m'
c = '\x1b[36;1m'
w = '\x1b[0;1m'
g = '\x1b[32;1m'
lr = '\\e[0;31m'
ly = '\\e[0;33m'
lb = '\\e[0;34m'
lp = '\x1b[0;35;0m'
lc = '\x1b[0;36m'
lw = '\x1b[0;0m'
lg = '\x1b[0;32m'
h = '\x1b[96m'
n = []
ng = []
ids = []
die = []
live = []
cek = []
lin = []
target = []
targete = []
toke = []
fin = []
check = []
crsh = []
liv = []
diet = []
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_equiv(True)
br.set_handle_referer(True)
br.set_cookiejar(cookielib.LWPCookieJar())
br.set_handle_redirect(True)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def ex():
slw(r + '[!] Ah She Up')
time.sleep(1)
slw(r + '[!] Exiting')
time.sleep(1.5)
slw(r + '[!] See You' + w)
time.sleep(0.5)
os.system('clear')
exit()
def slw(s):
for i in s + '\n':
sys.stdout.write(i)
sys.stdout.flush()
time.sleep(0.005)
def fst(s):
for i in s + '\n':
sys.stdout.write(i)
sys.stdout.flush()
time.sleep(0.0001)
def wl():
os.system('nano password.txt')
trial()
def twl():
os.system('nano password.txt')
trial()
def mmk(s):
for i in s + '\n':
sys.stdout.write(i)
sys.stdout.flush()
time.sleep(0.01)
def mmq(s):
for i in s + '\n':
sys.stdout.write(i)
sys.stdout.flush()
time.sleep(1e-07)
def tpal():
os.system('reset')
ajg()
try:
token = open('token.log', 'r').read()
except IOError:
slw(r + '[!] Token Invalid')
os.system('rm -rf token.log')
time.sleep(1)
login()
else:
try:
os.mkdir('Kntd')
except OSError:
try:
print r + ('[ Crack Auto Password ]').center(44)
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except requests.exceptions.ConnectionError:
print r + '[!] Connection Error'
time.sleep(1)
ex()
except KeyError as IOError:
pass
def tcheck():
os.system('reset')
ajg()
try:
token = open('token.log', 'r').read()
except IOError:
slw(r + '[!] Token Invalid')
os.system('rm -rf token.log')
time.sleep(1)
login()
else:
try:
os.mkdir('Kntd')
except OSError:
try:
print r + ('[ Account Cheker ]').center(44)
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except requests.exceptions.ConnectionError:
print r + '[!] Connection Error'
time.sleep(1)
ex()
except KeyError as IOError:
pass
def tpala():
os.system('reset')
ajg()
try:
token = open('token.log', 'r').read()
except IOError:
slw(r + '[!] Token Invalid')
os.system('rm -rf token.log')
time.sleep(1)
login()
else:
try:
os.mkdir('Kntd')
except OSError:
try:
print r + ('[ Crack Auto Password Group ]').center(44)
slw(w + '-' * 45)
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except requests.exceptions.ConnectionError:
print r + '[!] Connection Error'
time.sleep(1)
ex()
except KeyError as IOError:
pass
def tyahoo():
os.system('clear')
ajg()
fst(r + ('[ Yahoo Clone ]').center(44))
print y + '[' + c + '1' + y + ']',
slw(g + ' Yahoo Clone Friends')
print y + '[' + c + '2' + y + ']',
slw(g + ' Yahoo Clone Friends From Friends')
print y + '[' + c + '3' + y + ']',
slw(g + ' Yahoo Clone Followers')
print y + '[' + c + '4' + y + ']',
slw(g + ' Yahoo Clone Following')
print y + '[' + c + '0' + y + ']',
slw(r + ' Back\n')
ok = raw_input(c + '@AutismPeople : ' + p)
fst(w + '-' * 45)
if ok == '':
print r + '[!] Input Chose'
time.sleep(1)
trial()
else:
if ok == '1':
tfriendse()
else:
if ok == '2':
tfriendse()
else:
if ok == '3':
tfriendse()
else:
if ok == '4':
tfriendse()
else:
if ok == '0':
trial()
else:
print r + '[!] ' + p + ok + r + ' Nothing'
time.sleep(1)
trial()
def bot():
os.system('clear')
ajg()
fst(r + ('[ BOT FACEBOOK ]').center(44))
print y + '[' + c + '1' + y + ']',
slw(g + ' Unfriend')
print y + '[' + c + '2' + y + ']',
slw(g + ' Unfollow')
print y + '[' + c + '3' + y + ']',
slw(g + ' Auto Follow')
print y + '[' + c + '4' + y + ']',
slw(g + ' Auto Add Friend From Group')
print y + '[' + c + '0' + y + ']',
slw(r + ' Back\n')
ok = raw_input(c + '@AutismPeople : ' + p)
fst(w + '-' * 45)
if ok == '':
print r + '[!] Input Chose'
time.sleep(1)
trial()
else:
if ok == '1':
unf()
else:
if ok == '2':
unfl()
else:
if ok == '0':
trial()
else:
if ok == '3':
foll()
else:
if ok == '4':
add()
else:
print r + '[!] ' + p + ok + r + ' Nothing'
time.sleep(1)
trial()
def unf():
os.system('reset')
ajg()
try:
token = open('token.log', 'r').read()
except IOError:
slw(r + '[!] Token Invalid')
os.system('rm -rf token.log')
time.sleep(1)
login()
else:
try:
os.mkdir('Kntd')
except OSError:
try:
print r + ('[ Auto Unfriend ]').center(44)
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except KeyboardInterrupt:
print r + '\r[!] Stopped'
trial()
except KeyError:
print r + '[!] Something Error'
trial()
except requests.exceptions.ConnectionError:
print r + '[!] Connection Error '
exit()
def unfl():
id = []
os.system('reset')
ajg()
try:
token = open('token.log', 'r').read()
except IOError:
slw(r + '[!] Token Invalid')
os.system('rm -rf token.log')
time.sleep(1)
login()
else:
try:
os.mkdir('Kntd')
except OSError:
try:
print r + ('[ Auto Unfollow ]').center(44)
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except KeyboardInterrupt:
print r + '\r[!] Stopped'
trial()
except KeyError:
print r + '[!] Something Error'
trial()
except requests.exceptions.ConnectionError:
print r + '[!] Connection Error '
exit()
red = '\x1b[1;91m'
gren = '\x1b[1;92m'
yel = '\x1b[1;93m'
gid = []
token = []
asua = []
mm = []
def add():
os.system('reset')
ajg()
try:
token = open('token.log', 'r').read()
except IOError:
print '[!] Token Invalid'
time.sleep(1)
exit()
else:
try:
os.mkdir('Kntd')
except OSError:
try:
print r + ('[ Auto Add Friend ]').center(44)
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except KeyboardInterrupt:
print r + '\r[!] Stopped'
trial()
except KeyError:
print r + '[!] Something Error'
trial()
except requests.exceptions.ConnectionError:
print r + '[!] Connection Error '
exit()
def foll():
os.system('clear')
ajg()
try:
token = open('token.log', 'r').read()
except IOError:
slw(r + '[!] Token Invalid')
os.system('rm -rf token.log')
time.sleep(1)
login()
else:
try:
os.mkdir('Kntd')
except OSError:
try:
print r + ('[ Auto Follower]').center(44)
slw(c + 'Sorry,Real Tools Use Password')
slw(c + 'Please Contact The Author')
slw(c + 'Link password ' + r + ': ' + g + 'https://shortid.co/fZFa6')
fst(w + '-' * 45)
ngentod = raw_input(r + ' [ \x1b[0mOK \x1b[31;1m]')
trial()
except KeyboardInterrupt:
print r + '\r[!] Stopped'
trial()
except KeyError:
print r + '[!] Something Error'
trial()
except requests.exceptions.ConnectionError:
print r + '[!] Connection Error '
exit()
ask()
| 32.627434
| 347
| 0.417153
| 3,902
| 36,869
| 3.904152
| 0.112506
| 0.028358
| 0.02757
| 0.024156
| 0.731784
| 0.705068
| 0.68334
| 0.661743
| 0.64553
| 0.626625
| 0
| 0.03333
| 0.421411
| 36,869
| 1,130
| 348
| 32.627434
| 0.680808
| 0.005126
| 0
| 0.729446
| 0
| 0.006692
| 0.257743
| 0.009025
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.065966
| 0.000956
| null | null | 0.115679
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
72e2a39e8976038f8c0355f9b1e0b57bedcf511e
| 153
|
py
|
Python
|
src/models/sequence/__init__.py
|
dumpmemory/state-spaces
|
2a85503cb3e9e86cc05753950d4a249df9a0fffb
|
[
"Apache-2.0"
] | 513
|
2021-11-03T23:08:23.000Z
|
2022-03-31T16:29:18.000Z
|
src/models/sequence/__init__.py
|
dumpmemory/state-spaces
|
2a85503cb3e9e86cc05753950d4a249df9a0fffb
|
[
"Apache-2.0"
] | 18
|
2021-11-05T12:42:59.000Z
|
2022-03-27T19:49:55.000Z
|
src/models/sequence/__init__.py
|
MikeOwino/state-spaces
|
b6672bca994b6a36347f414faa59761e42b1e2b1
|
[
"Apache-2.0"
] | 47
|
2021-11-04T01:32:54.000Z
|
2022-03-30T18:24:26.000Z
|
from .base import SequenceModule
from .model import SequenceModel
from .unet import SequenceUNet
from .ff import FF
# from .pool import Downpool, Uppool
| 25.5
| 36
| 0.810458
| 21
| 153
| 5.904762
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.143791
| 153
| 5
| 37
| 30.6
| 0.946565
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f43f5f5f65dda63fb8bc1092d639241cab38f1be
| 61
|
py
|
Python
|
pytorch_widedeep/models/tabular/linear/__init__.py
|
TangleSpace/pytorch-widedeep
|
ccc55a15c1b3205ffc8c054abc5cd25cba9ccdff
|
[
"MIT"
] | null | null | null |
pytorch_widedeep/models/tabular/linear/__init__.py
|
TangleSpace/pytorch-widedeep
|
ccc55a15c1b3205ffc8c054abc5cd25cba9ccdff
|
[
"MIT"
] | null | null | null |
pytorch_widedeep/models/tabular/linear/__init__.py
|
TangleSpace/pytorch-widedeep
|
ccc55a15c1b3205ffc8c054abc5cd25cba9ccdff
|
[
"MIT"
] | null | null | null |
from pytorch_widedeep.models.tabular.linear.wide import Wide
| 30.5
| 60
| 0.868852
| 9
| 61
| 5.777778
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065574
| 61
| 1
| 61
| 61
| 0.912281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f4501410195434dfb761f82bf87b94d8d75d2b7d
| 169
|
py
|
Python
|
codewars/8kyu/dinamuh/Function2-SquaringAnArgument/test.py
|
dinamuh/Training_one
|
d18e8fb12608ce1753162c20252ca928c4df97ab
|
[
"MIT"
] | null | null | null |
codewars/8kyu/dinamuh/Function2-SquaringAnArgument/test.py
|
dinamuh/Training_one
|
d18e8fb12608ce1753162c20252ca928c4df97ab
|
[
"MIT"
] | 2
|
2019-01-22T10:53:42.000Z
|
2019-01-31T08:02:48.000Z
|
codewars/8kyu/dinamuh/Function2-SquaringAnArgument/test.py
|
dinamuh/Training_one
|
d18e8fb12608ce1753162c20252ca928c4df97ab
|
[
"MIT"
] | 13
|
2019-01-22T10:37:42.000Z
|
2019-01-25T13:30:43.000Z
|
from main import square
def test_square(benchmark):
assert benchmark(square, 2) == 4
assert benchmark(square, 50) == 2500
assert benchmark(square, 1) == 1
| 21.125
| 40
| 0.686391
| 23
| 169
| 5
| 0.565217
| 0.391304
| 0.547826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074627
| 0.207101
| 169
| 7
| 41
| 24.142857
| 0.783582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f45fc93ac9cf5bd5c18834b630aa5e5b670a8c85
| 178
|
py
|
Python
|
wagtail_honeypot/apps.py
|
tomdyson/wagtail-honeypot
|
99cc00f5eac3153fedebaf97cb0eb060847c948c
|
[
"MIT"
] | 2
|
2022-02-25T10:23:59.000Z
|
2022-02-26T21:18:11.000Z
|
wagtail_honeypot/apps.py
|
tomdyson/wagtail-honeypot
|
99cc00f5eac3153fedebaf97cb0eb060847c948c
|
[
"MIT"
] | 2
|
2022-03-03T21:44:42.000Z
|
2022-03-04T12:28:27.000Z
|
wagtail_honeypot/apps.py
|
tomdyson/wagtail-honeypot
|
99cc00f5eac3153fedebaf97cb0eb060847c948c
|
[
"MIT"
] | 1
|
2022-03-04T10:28:04.000Z
|
2022-03-04T10:28:04.000Z
|
from django.apps import AppConfig
class WagtailHoneypotAppConfig(AppConfig):
label = "wagtail_honeypot"
name = "wagtail_honeypot"
verbose_name = "Wagtail Honeypot"
| 22.25
| 42
| 0.758427
| 18
| 178
| 7.333333
| 0.666667
| 0.340909
| 0.287879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168539
| 178
| 7
| 43
| 25.428571
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0.269663
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
f463e5119c762694860c11366dabe30e0f332652
| 48
|
py
|
Python
|
app/core/__init__.py
|
OhBonsai/flask-boilerplate
|
51c165e19ec47cf3aeee5c20ed12093a87131af7
|
[
"Apache-2.0"
] | 2
|
2019-01-21T05:44:48.000Z
|
2021-06-02T20:18:39.000Z
|
app/core/__init__.py
|
OhBonsai/flask-boilerplate
|
51c165e19ec47cf3aeee5c20ed12093a87131af7
|
[
"Apache-2.0"
] | null | null | null |
app/core/__init__.py
|
OhBonsai/flask-boilerplate
|
51c165e19ec47cf3aeee5c20ed12093a87131af7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Created by OhBonsai at 2018/3/7
| 24
| 33
| 0.729167
| 10
| 48
| 3.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 0.145833
| 48
| 2
| 33
| 24
| 0.682927
| 0.916667
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
be5a29f533cbccdbad66fec3fe3e51b929599b6f
| 56
|
py
|
Python
|
src/github_vulnerability_exporter/__init__.py
|
ZeitOnline/github_vulnerability_exporter
|
dcc81c08820bd46f747fa5c6ce877354c404258c
|
[
"BSD-3-Clause"
] | 1
|
2019-06-06T14:44:12.000Z
|
2019-06-06T14:44:12.000Z
|
src/github_vulnerability_exporter/__init__.py
|
ZeitOnline/github_vulnerability_exporter
|
dcc81c08820bd46f747fa5c6ce877354c404258c
|
[
"BSD-3-Clause"
] | 1
|
2021-06-24T11:14:55.000Z
|
2021-06-24T11:14:55.000Z
|
src/github_vulnerability_exporter/__init__.py
|
ZeitOnline/github_vulnerability_exporter
|
dcc81c08820bd46f747fa5c6ce877354c404258c
|
[
"BSD-3-Clause"
] | 1
|
2021-11-30T10:39:15.000Z
|
2021-11-30T10:39:15.000Z
|
from github_vulnerability_exporter.exporter import main
| 28
| 55
| 0.910714
| 7
| 56
| 7
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 56
| 1
| 56
| 56
| 0.942308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
be5dfebf478dfe054fb82f7b2c6612c35f23a31c
| 275
|
py
|
Python
|
pysal/explore/pointpats/__init__.py
|
martinfleis/pysal
|
d2e0667d825d403efe7182ecda210dc152ec206d
|
[
"BSD-3-Clause"
] | 941
|
2015-01-12T22:25:55.000Z
|
2022-03-27T15:41:29.000Z
|
pysal/explore/pointpats/__init__.py
|
anekekarina99/pysal
|
bd8c954d34b4694416830a852e26fe40d64424f2
|
[
"BSD-3-Clause"
] | 589
|
2015-01-09T03:58:03.000Z
|
2022-02-26T02:17:15.000Z
|
pysal/explore/pointpats/__init__.py
|
anekekarina99/pysal
|
bd8c954d34b4694416830a852e26fe40d64424f2
|
[
"BSD-3-Clause"
] | 303
|
2015-01-10T02:59:04.000Z
|
2022-03-05T04:21:55.000Z
|
from pointpats.pointpattern import PointPattern
from pointpats.window import as_window, poly_from_bbox, to_ccf, Window
from pointpats.centrography import *
from pointpats.process import *
from pointpats.quadrat_statistics import *
from pointpats.distance_statistics import *
| 39.285714
| 70
| 0.854545
| 35
| 275
| 6.542857
| 0.428571
| 0.340611
| 0.248908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098182
| 275
| 6
| 71
| 45.833333
| 0.923387
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
be7e00a9b607aadf5f5a7d4fd1bd32246047d0a7
| 29
|
py
|
Python
|
load_generator/__init__.py
|
uzum/cran-orchestrator
|
c2235bf324c8c04e82960ca58ec49f2f700c065d
|
[
"MIT"
] | null | null | null |
load_generator/__init__.py
|
uzum/cran-orchestrator
|
c2235bf324c8c04e82960ca58ec49f2f700c065d
|
[
"MIT"
] | null | null | null |
load_generator/__init__.py
|
uzum/cran-orchestrator
|
c2235bf324c8c04e82960ca58ec49f2f700c065d
|
[
"MIT"
] | null | null | null |
from .server import LGServer
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
be930d1e18d893824a6e8e815b44c5e8854070c7
| 21
|
py
|
Python
|
hello_world.py
|
Pertrang/profiles-rest-api
|
3e27c19940dcfb9feee9bb4519e5fc8d77b91722
|
[
"MIT"
] | null | null | null |
hello_world.py
|
Pertrang/profiles-rest-api
|
3e27c19940dcfb9feee9bb4519e5fc8d77b91722
|
[
"MIT"
] | null | null | null |
hello_world.py
|
Pertrang/profiles-rest-api
|
3e27c19940dcfb9feee9bb4519e5fc8d77b91722
|
[
"MIT"
] | null | null | null |
print("hellp world!")
| 21
| 21
| 0.714286
| 3
| 21
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 21
| 1
| 21
| 21
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
bec4cbacc83ad9d7321c853d5ba8649c57148a37
| 310
|
py
|
Python
|
test_project/test_app/conftest.py
|
bjuretko/django-admin-object-actions
|
2c80ffcbb53b3d585f191d1fe662daf36fa7e204
|
[
"BSD-3-Clause"
] | null | null | null |
test_project/test_app/conftest.py
|
bjuretko/django-admin-object-actions
|
2c80ffcbb53b3d585f191d1fe662daf36fa7e204
|
[
"BSD-3-Clause"
] | null | null | null |
test_project/test_app/conftest.py
|
bjuretko/django-admin-object-actions
|
2c80ffcbb53b3d585f191d1fe662daf36fa7e204
|
[
"BSD-3-Clause"
] | null | null | null |
# py.test
import pytest
@pytest.fixture
def apps(request, db):
from django.apps import apps
return apps
@pytest.fixture
def test_model(apps):
return apps.get_model('test_app', 'TestModel')
@pytest.fixture
def test_model_instance(test_model):
return test_model.objects.create(name='test')
| 16.315789
| 50
| 0.73871
| 45
| 310
| 4.933333
| 0.444444
| 0.162162
| 0.216216
| 0.18018
| 0.225225
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151613
| 310
| 18
| 51
| 17.222222
| 0.844106
| 0.022581
| 0
| 0.272727
| 0
| 0
| 0.069767
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| false
| 0
| 0.181818
| 0.181818
| 0.727273
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
fe4886f5c2eb47d1f57db21d368914d137924e4d
| 144
|
py
|
Python
|
WechatTool.py
|
ftctolei/GetNovel
|
f73f18cb12912d22c435064a542fed707d8c9077
|
[
"Apache-2.0"
] | null | null | null |
WechatTool.py
|
ftctolei/GetNovel
|
f73f18cb12912d22c435064a542fed707d8c9077
|
[
"Apache-2.0"
] | null | null | null |
WechatTool.py
|
ftctolei/GetNovel
|
f73f18cb12912d22c435064a542fed707d8c9077
|
[
"Apache-2.0"
] | null | null | null |
#coding=utf-8
import itchat
#itchat.auto_login(enableCmdQR=True)
itchat.auto_login()
itchat.send('Hello, filehelper', toUserName='filehelper')
| 20.571429
| 57
| 0.791667
| 19
| 144
| 5.894737
| 0.684211
| 0.178571
| 0.267857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007407
| 0.0625
| 144
| 6
| 58
| 24
| 0.822222
| 0.326389
| 0
| 0
| 0
| 0
| 0.284211
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
fe5ed8993790a1ec3aeb5060a63f819825b5414d
| 266
|
py
|
Python
|
satef/alignment/impl/LHAAlignmentFactory.py
|
kostrzmar/SATEF
|
b483b073f1ff3dd797413f212e26114ef93cfe08
|
[
"MIT"
] | null | null | null |
satef/alignment/impl/LHAAlignmentFactory.py
|
kostrzmar/SATEF
|
b483b073f1ff3dd797413f212e26114ef93cfe08
|
[
"MIT"
] | null | null | null |
satef/alignment/impl/LHAAlignmentFactory.py
|
kostrzmar/SATEF
|
b483b073f1ff3dd797413f212e26114ef93cfe08
|
[
"MIT"
] | null | null | null |
from alignment import AbstractAlignmentFactory
from alignment import AbstractAlignment
from alignment.impl import LHAAlignment
class LHAAlignmentFactory(AbstractAlignmentFactory):
def getAlignment(self) -> AbstractAlignment:
return LHAAlignment()
| 29.555556
| 52
| 0.81203
| 22
| 266
| 9.818182
| 0.590909
| 0.180556
| 0.175926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146617
| 266
| 9
| 53
| 29.555556
| 0.951542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.5
| 0.166667
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
fe6905647db8984d2da06bf595270d84de66ef07
| 111
|
py
|
Python
|
_solved/solutions/case-conflict-mapping25.py
|
lleondia/geopandas-tutorial
|
5128fd6865bbd979a7b4e5b8cb4d0de51bead029
|
[
"BSD-3-Clause"
] | 341
|
2018-04-26T08:46:05.000Z
|
2022-03-01T08:13:39.000Z
|
_solved/solutions/case-conflict-mapping25.py
|
lleondia/geopandas-tutorial
|
5128fd6865bbd979a7b4e5b8cb4d0de51bead029
|
[
"BSD-3-Clause"
] | 22
|
2018-06-15T23:19:27.000Z
|
2020-03-23T11:08:55.000Z
|
_solved/solutions/case-conflict-mapping25.py
|
lleondia/geopandas-tutorial
|
5128fd6865bbd979a7b4e5b8cb4d0de51bead029
|
[
"BSD-3-Clause"
] | 197
|
2018-06-15T18:34:53.000Z
|
2022-02-27T11:33:15.000Z
|
kahuzi = protected_areas_utm[protected_areas_utm['NAME_AP'] == "Kahuzi-Biega National park"].geometry.squeeze()
| 111
| 111
| 0.801802
| 15
| 111
| 5.6
| 0.733333
| 0.333333
| 0.404762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 111
| 1
| 111
| 111
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.294643
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fe6c7306ea45ba728a041d1a5589eb2832619504
| 174
|
py
|
Python
|
couleuvre/__main__.py
|
ykacer/couleuvre
|
e1fd7b44ad28fa9e93c64d8bdb7dbf0b0277c227
|
[
"MIT"
] | null | null | null |
couleuvre/__main__.py
|
ykacer/couleuvre
|
e1fd7b44ad28fa9e93c64d8bdb7dbf0b0277c227
|
[
"MIT"
] | null | null | null |
couleuvre/__main__.py
|
ykacer/couleuvre
|
e1fd7b44ad28fa9e93c64d8bdb7dbf0b0277c227
|
[
"MIT"
] | null | null | null |
from .text import message_print
def main():
print("Here is the message :")
print("---------------------")
message_print()
if __name__ == "__main__":
main()
| 17.4
| 34
| 0.545977
| 19
| 174
| 4.473684
| 0.631579
| 0.423529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206897
| 174
| 9
| 35
| 19.333333
| 0.615942
| 0
| 0
| 0
| 0
| 0
| 0.287356
| 0.12069
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0
| 0.142857
| 0
| 0.285714
| 0.571429
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
fea6dc1d9fc2faf31e4a9d04a2f93217e3c00569
| 254
|
py
|
Python
|
frost/server/storage/defaults.py
|
Den4200/pyfrost
|
c341eeb1262746d889dcff4f405f7d52f7caf23d
|
[
"MIT"
] | 2
|
2020-03-02T14:29:07.000Z
|
2020-03-04T00:36:35.000Z
|
frost/server/storage/defaults.py
|
Den4200/pyfrost
|
c341eeb1262746d889dcff4f405f7d52f7caf23d
|
[
"MIT"
] | 16
|
2020-03-08T05:50:48.000Z
|
2020-05-11T04:40:13.000Z
|
frost/server/storage/defaults.py
|
Den4200/pyfrost
|
c341eeb1262746d889dcff4f405f7d52f7caf23d
|
[
"MIT"
] | 1
|
2020-03-29T00:33:04.000Z
|
2020-03-29T00:33:04.000Z
|
DEFAULT_FORMAT = {
'users': {
'meta': {
'last_id': '0'
}
},
'rooms': {
'meta': {
'last_id': '0'
}
},
'messages': {
'meta': {
'last_id': '0'
}
}
}
| 14.111111
| 26
| 0.267717
| 17
| 254
| 3.764706
| 0.529412
| 0.375
| 0.46875
| 0.515625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025424
| 0.535433
| 254
| 17
| 27
| 14.941176
| 0.516949
| 0
| 0
| 0.352941
| 0
| 0
| 0.212598
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
22a43d8ce10fa9202f8708cc82eaac090bd8f7e3
| 222
|
py
|
Python
|
user/admin.py
|
jlech42/chump_django
|
6650de31ec9c4c35e45641a88f212e06a80ea4d7
|
[
"MIT"
] | null | null | null |
user/admin.py
|
jlech42/chump_django
|
6650de31ec9c4c35e45641a88f212e06a80ea4d7
|
[
"MIT"
] | null | null | null |
user/admin.py
|
jlech42/chump_django
|
6650de31ec9c4c35e45641a88f212e06a80ea4d7
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Profile, UserSubscription, UserContent
# Register your models here.
admin.site.register(UserContent)
admin.site.register(Profile)
admin.site.register(UserSubscription)
| 27.75
| 58
| 0.833333
| 27
| 222
| 6.851852
| 0.481481
| 0.145946
| 0.275676
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085586
| 222
| 7
| 59
| 31.714286
| 0.91133
| 0.117117
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
22a921385f880c61d876f166a8f6a5ca852ab035
| 26
|
py
|
Python
|
redis_mock/__init__.py
|
adamlwgriffiths/redis-mock
|
418df5899bab04e541469eb5b9b8e21acda48e9e
|
[
"MIT"
] | 1
|
2021-01-19T05:58:02.000Z
|
2021-01-19T05:58:02.000Z
|
redis_mock/__init__.py
|
adamlwgriffiths/redis-mock
|
418df5899bab04e541469eb5b9b8e21acda48e9e
|
[
"MIT"
] | null | null | null |
redis_mock/__init__.py
|
adamlwgriffiths/redis-mock
|
418df5899bab04e541469eb5b9b8e21acda48e9e
|
[
"MIT"
] | null | null | null |
from .redis_mock import *
| 13
| 25
| 0.769231
| 4
| 26
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
22d37f3715b736378a1a5deb2d793989912bdd88
| 93
|
py
|
Python
|
OpenGLCffi/GL/EXT/KHR/blend_equation_advanced.py
|
cydenix/OpenGLCffi
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
[
"MIT"
] | null | null | null |
OpenGLCffi/GL/EXT/KHR/blend_equation_advanced.py
|
cydenix/OpenGLCffi
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
[
"MIT"
] | null | null | null |
OpenGLCffi/GL/EXT/KHR/blend_equation_advanced.py
|
cydenix/OpenGLCffi
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
[
"MIT"
] | null | null | null |
from OpenGLCffi.GL import params
@params(api='gl', prms=[])
def glBlendBarrierKHR():
pass
| 13.285714
| 32
| 0.72043
| 12
| 93
| 5.583333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 93
| 6
| 33
| 15.5
| 0.82716
| 0
| 0
| 0
| 0
| 0
| 0.021978
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
22f6c51667ca02b727b1a01ae4fe4ad2f5081565
| 50
|
py
|
Python
|
enthought/traits/ui/qt4/ui_base.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/traits/ui/qt4/ui_base.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/traits/ui/qt4/ui_base.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from traitsui.qt4.ui_base import *
| 16.666667
| 34
| 0.78
| 8
| 50
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0.14
| 50
| 2
| 35
| 25
| 0.860465
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a3c77790c19a7c54f9abc5320a30d0be5f912b21
| 26
|
py
|
Python
|
pygit/__version__.py
|
immensity/pygit
|
6c2dc5081a75600c9437faa318a1ca3c0645b1fb
|
[
"MIT"
] | 17
|
2018-09-08T11:35:47.000Z
|
2022-03-19T21:31:33.000Z
|
pygit/__version__.py
|
immensity/pygit
|
6c2dc5081a75600c9437faa318a1ca3c0645b1fb
|
[
"MIT"
] | 8
|
2020-02-19T06:14:25.000Z
|
2021-11-18T12:32:06.000Z
|
pygit/__version__.py
|
immensity/pygit
|
6c2dc5081a75600c9437faa318a1ca3c0645b1fb
|
[
"MIT"
] | 2
|
2020-02-19T17:02:32.000Z
|
2021-02-21T14:51:35.000Z
|
__version__ = "2019.01.31"
| 26
| 26
| 0.730769
| 4
| 26
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.076923
| 26
| 1
| 26
| 26
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4a3320a5daf80b8cf803a30b671f4532e196e5e5
| 137
|
py
|
Python
|
huiAudioCorpus/model/Credentials.py
|
iisys-hof/HUI-Audio-Corpus-German
|
4d2de2ed538a6b943166e1e35c10ee8b0b266be6
|
[
"Apache-2.0"
] | 11
|
2021-06-22T09:44:28.000Z
|
2022-01-10T12:35:29.000Z
|
huiAudioCorpus/model/Credentials.py
|
iisys-hof/HUI-Audio-Corpus-German
|
4d2de2ed538a6b943166e1e35c10ee8b0b266be6
|
[
"Apache-2.0"
] | 1
|
2021-07-17T20:19:01.000Z
|
2021-10-04T09:36:43.000Z
|
huiAudioCorpus/model/Credentials.py
|
iisys-hof/HUI-Audio-Corpus-German
|
4d2de2ed538a6b943166e1e35c10ee8b0b266be6
|
[
"Apache-2.0"
] | null | null | null |
class Credentials:
def __init__(self, username:str, password:str):
self.username = username
self.password = password
| 27.4
| 51
| 0.678832
| 15
| 137
| 5.933333
| 0.533333
| 0.269663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.233577
| 137
| 5
| 52
| 27.4
| 0.847619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
4a4cab06960ca58e4d829ea7aecb7295e28f0d59
| 8,319
|
py
|
Python
|
tests/rbac/test_role_manager.py
|
gmc77/pycasbin
|
fbbc6000f755c78726f24d26c45bdf9bda99b5d6
|
[
"Apache-2.0"
] | 915
|
2018-11-25T01:00:39.000Z
|
2022-03-30T11:21:34.000Z
|
tests/rbac/test_role_manager.py
|
ffyuanda/pycasbin
|
230132e459420aaa519d1eb9479f8996bdbbbd2a
|
[
"Apache-2.0"
] | 231
|
2019-02-13T09:29:51.000Z
|
2022-03-28T16:32:51.000Z
|
tests/rbac/test_role_manager.py
|
ffyuanda/pycasbin
|
230132e459420aaa519d1eb9479f8996bdbbbd2a
|
[
"Apache-2.0"
] | 173
|
2019-02-08T02:22:33.000Z
|
2022-03-10T15:16:11.000Z
|
from unittest import TestCase
from casbin.rbac import default_role_manager
from casbin.util import regex_match_func
import time
from concurrent.futures import ThreadPoolExecutor
def get_role_manager():
return default_role_manager.RoleManager(max_hierarchy_level=10)
class TestDefaultRoleManager(TestCase):
def test_role(self):
rm = get_role_manager()
rm.add_link("u1", "g1")
rm.add_link("u2", "g1")
rm.add_link("u3", "g2")
rm.add_link("u4", "g2")
rm.add_link("u4", "g3")
rm.add_link("g1", "g3")
# Current role inheritance tree:
# g3 g2
# / \ / \
# g1 u4 u3
# / \
# u1 u2
self.assertTrue(rm.has_link("u1", "g1"))
self.assertFalse(rm.has_link("u1", "g2"))
self.assertTrue(rm.has_link("u1", "g3"))
self.assertTrue(rm.has_link("u2", "g1"))
self.assertFalse(rm.has_link("u2", "g2"))
self.assertTrue(rm.has_link("u2", "g3"))
self.assertFalse(rm.has_link("u3", "g1"))
self.assertTrue(rm.has_link("u3", "g2"))
self.assertFalse(rm.has_link("u3", "g3"))
self.assertFalse(rm.has_link("u4", "g1"))
self.assertTrue(rm.has_link("u4", "g2"))
self.assertTrue(rm.has_link("u4", "g3"))
self.assertCountEqual(rm.get_roles("u1"), ["g1"])
self.assertCountEqual(rm.get_roles("u2"), ["g1"])
self.assertCountEqual(rm.get_roles("u3"), ["g2"])
self.assertCountEqual(rm.get_roles("u4"), ["g2", "g3"])
self.assertCountEqual(rm.get_roles("g1"), ["g3"])
self.assertCountEqual(rm.get_roles("g2"), [])
self.assertCountEqual(rm.get_roles("g3"), [])
rm.delete_link("g1", "g3")
rm.delete_link("u4", "g2")
# Current role inheritance tree after deleting the links:
# g3 g2
# \ \
# g1 u4 u3
# / \
# u1 u2
self.assertTrue(rm.has_link("u1", "g1"))
self.assertFalse(rm.has_link("u1", "g2"))
self.assertFalse(rm.has_link("u1", "g3"))
self.assertTrue(rm.has_link("u2", "g1"))
self.assertFalse(rm.has_link("u2", "g2"))
self.assertFalse(rm.has_link("u2", "g3"))
self.assertFalse(rm.has_link("u3", "g1"))
self.assertTrue(rm.has_link("u3", "g2"))
self.assertFalse(rm.has_link("u3", "g3"))
self.assertFalse(rm.has_link("u4", "g1"))
self.assertFalse(rm.has_link("u4", "g2"))
self.assertTrue(rm.has_link("u4", "g3"))
self.assertCountEqual(rm.get_roles("u1"), ["g1"])
self.assertCountEqual(rm.get_roles("u2"), ["g1"])
self.assertCountEqual(rm.get_roles("u3"), ["g2"])
self.assertCountEqual(rm.get_roles("u4"), ["g3"])
self.assertCountEqual(rm.get_roles("g1"), [])
self.assertCountEqual(rm.get_roles("g2"), [])
self.assertCountEqual(rm.get_roles("g3"), [])
def test_domain_role(self):
rm = get_role_manager()
rm.add_link("u1", "g1", "domain1")
rm.add_link("u2", "g1", "domain1")
rm.add_link("u3", "admin", "domain2")
rm.add_link("u4", "admin", "domain2")
rm.add_link("u4", "admin", "domain1")
rm.add_link("g1", "admin", "domain1")
# Current role inheritance tree:
# domain1:admin domain2:admin
# / \ / \
# domain1:g1 u4 u3
# / \
# u1 u2
self.assertTrue(rm.has_link("u1", "g1", "domain1"))
self.assertFalse(rm.has_link("u1", "g1", "domain2"))
self.assertTrue(rm.has_link("u1", "admin", "domain1"))
self.assertFalse(rm.has_link("u1", "admin", "domain2"))
self.assertTrue(rm.has_link("u2", "g1", "domain1"))
self.assertFalse(rm.has_link("u2", "g1", "domain2"))
self.assertTrue(rm.has_link("u2", "admin", "domain1"))
self.assertFalse(rm.has_link("u2", "admin", "domain2"))
self.assertFalse(rm.has_link("u3", "g1", "domain1"))
self.assertFalse(rm.has_link("u3", "g1", "domain2"))
self.assertFalse(rm.has_link("u3", "admin", "domain1"))
self.assertTrue(rm.has_link("u3", "admin", "domain2"))
self.assertFalse(rm.has_link("u4", "g1", "domain1"))
self.assertFalse(rm.has_link("u4", "g1", "domain2"))
self.assertTrue(rm.has_link("u4", "admin", "domain1"))
self.assertTrue(rm.has_link("u4", "admin", "domain2"))
def test_clear(self):
rm = get_role_manager()
rm.add_link("u1", "g1")
rm.add_link("u2", "g1")
rm.add_link("u3", "g2")
rm.add_link("u4", "g2")
rm.add_link("u4", "g3")
rm.add_link("g1", "g3")
# Current role inheritance tree:
# g3 g2
# / \ / \
# g1 u4 u3
# / \
# u1 u2
rm.clear()
# All data is cleared.
# No role inheritance now.
self.assertFalse(rm.has_link("u1", "g1"))
self.assertFalse(rm.has_link("u1", "g2"))
self.assertFalse(rm.has_link("u1", "g3"))
self.assertFalse(rm.has_link("u2", "g1"))
self.assertFalse(rm.has_link("u2", "g2"))
self.assertFalse(rm.has_link("u2", "g3"))
self.assertFalse(rm.has_link("u3", "g1"))
self.assertFalse(rm.has_link("u3", "g2"))
self.assertFalse(rm.has_link("u3", "g3"))
self.assertFalse(rm.has_link("u4", "g1"))
self.assertFalse(rm.has_link("u4", "g2"))
self.assertFalse(rm.has_link("u4", "g3"))
def test_matching_func(self):
rm = get_role_manager()
rm.add_matching_func(regex_match_func)
rm.add_link("u1", "g1")
rm.add_link("u3", "g2")
rm.add_link("u3", "g3")
rm.add_link(r"u\d+", "g2")
self.assertTrue(rm.has_link("u1", "g1"))
self.assertTrue(rm.has_link("u1", "g2"))
self.assertFalse(rm.has_link("u1", "g3"))
self.assertFalse(rm.has_link("u2", "g1"))
self.assertTrue(rm.has_link("u2", "g2"))
self.assertFalse(rm.has_link("u2", "g3"))
self.assertFalse(rm.has_link("u3", "g1"))
self.assertTrue(rm.has_link("u3", "g2"))
self.assertTrue(rm.has_link("u3", "g3"))
def test_one_to_many(self):
rm = get_role_manager()
rm.add_matching_func(regex_match_func)
rm.add_link("u1", r"g\d+")
self.assertTrue(rm.has_link("u1", "g1"))
self.assertTrue(rm.has_link("u1", "g2"))
self.assertFalse(rm.has_link("u2", "g1"))
self.assertFalse(rm.has_link("u2", "g2"))
def test_many_to_one(self):
rm = get_role_manager()
rm.add_matching_func(regex_match_func)
rm.add_link(r"u\d+", "g1")
self.assertTrue(rm.has_link("u1", "g1"))
self.assertFalse(rm.has_link("u1", "g2"))
self.assertTrue(rm.has_link("u2", "g1"))
self.assertFalse(rm.has_link("u2", "g2"))
def test_matching_func_order(self):
rm = get_role_manager()
rm.add_matching_func(regex_match_func)
rm.add_link(r"g\d+", "root")
rm.add_link("u1", "g1")
self.assertTrue(rm.has_link("u1", "root"))
rm.clear()
rm.add_link("u1", "g1")
rm.add_link(r"g\d+", "root")
self.assertTrue(rm.has_link("u1", "root"))
rm.clear()
rm.add_link("u1", r"g\d+")
rm.add_link("g1", "root")
self.assertTrue(rm.has_link("u1", "root"))
rm.clear()
rm.add_link("g1", "root")
rm.add_link("u1", r"g\d+")
self.assertTrue(rm.has_link("u1", "root"))
def test_concurrent_has_link_with_matching_func(self):
def matching_func(*args):
time.sleep(0.01)
return regex_match_func(*args)
rm = get_role_manager()
rm.add_matching_func(matching_func)
rm.add_link(r"u\d+", "users")
def test_has_link(role):
return rm.has_link(role, "users")
executor = ThreadPoolExecutor(10)
futures = [executor.submit(test_has_link, "u" + str(i)) for i in range(10)]
for future in futures:
self.assertTrue(future.result())
| 35.551282
| 83
| 0.549705
| 1,086
| 8,319
| 4.030387
| 0.081031
| 0.123144
| 0.152159
| 0.191912
| 0.805346
| 0.790039
| 0.76902
| 0.605438
| 0.595385
| 0.588074
| 0
| 0.044412
| 0.266498
| 8,319
| 233
| 84
| 35.703863
| 0.672894
| 0.073326
| 0
| 0.597561
| 0
| 0
| 0.092554
| 0
| 0
| 0
| 0
| 0
| 0.536585
| 1
| 0.067073
| false
| 0
| 0.030488
| 0.012195
| 0.121951
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4a5d2171b7bd0ad9543b5aa9f571c610db5061c4
| 128
|
py
|
Python
|
CalibTracker/Configuration/python/SiStripPedestals/SiStripPedestals_Fake_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
CalibTracker/Configuration/python/SiStripPedestals/SiStripPedestals_Fake_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
CalibTracker/Configuration/python/SiStripPedestals/SiStripPedestals_Fake_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from CalibTracker.SiStripESProducers.fake.SiStripPedestalsFakeESSource_cfi import *
| 21.333333
| 83
| 0.867188
| 13
| 128
| 8.461538
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085938
| 128
| 5
| 84
| 25.6
| 0.940171
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4a6460b51b38a154882d351d6a61d86d961cdc02
| 50
|
py
|
Python
|
mdentropy/core/__init__.py
|
msmbuilder/mdentropy
|
82d616ddffe11283052b2d870c3b0274736a173c
|
[
"MIT"
] | 25
|
2017-10-03T00:40:33.000Z
|
2022-02-18T14:33:56.000Z
|
mdentropy/core/__init__.py
|
shozebhaider/mdentropy
|
82d616ddffe11283052b2d870c3b0274736a173c
|
[
"MIT"
] | 46
|
2016-04-01T15:44:22.000Z
|
2020-08-13T20:04:16.000Z
|
mdentropy/core/__init__.py
|
shozebhaider/mdentropy
|
82d616ddffe11283052b2d870c3b0274736a173c
|
[
"MIT"
] | 14
|
2016-03-28T21:45:16.000Z
|
2022-03-02T13:21:09.000Z
|
from .entropy import *
from .information import *
| 16.666667
| 26
| 0.76
| 6
| 50
| 6.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 50
| 2
| 27
| 25
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.