hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cbdf0b893a5327d5e9558455c8c727b7c552b9b9
| 225
|
py
|
Python
|
custom_addons/books_log/models/quotations.py
|
MonwarAdeeb/Bista_Solutions
|
d261e31f21ff03b2cc82b0c26d680036dca6d799
|
[
"MIT"
] | null | null | null |
custom_addons/books_log/models/quotations.py
|
MonwarAdeeb/Bista_Solutions
|
d261e31f21ff03b2cc82b0c26d680036dca6d799
|
[
"MIT"
] | null | null | null |
custom_addons/books_log/models/quotations.py
|
MonwarAdeeb/Bista_Solutions
|
d261e31f21ff03b2cc82b0c26d680036dca6d799
|
[
"MIT"
] | null | null | null |
from odoo import _, api, fields, models
class Quotations(models.Model):
_inherit = "sale.order"
note_on_customer = fields.Text("Note on Customer",
help="Add Notes on Customers!")
| 25
| 66
| 0.604444
| 26
| 225
| 5.076923
| 0.769231
| 0.090909
| 0.212121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.297778
| 225
| 8
| 67
| 28.125
| 0.835443
| 0
| 0
| 0
| 0
| 0
| 0.217778
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
cbe01b859fdec5b75c6ca5d80bdb0090e7fffe18
| 129
|
py
|
Python
|
Curso de Cisco/Actividades/Usando una variable contador para salir de un ciclo.py
|
tomasfriz/Curso-de-Cisco
|
a50ee5fa96bd86d468403e29ccdc3565a181af60
|
[
"MIT"
] | null | null | null |
Curso de Cisco/Actividades/Usando una variable contador para salir de un ciclo.py
|
tomasfriz/Curso-de-Cisco
|
a50ee5fa96bd86d468403e29ccdc3565a181af60
|
[
"MIT"
] | null | null | null |
Curso de Cisco/Actividades/Usando una variable contador para salir de un ciclo.py
|
tomasfriz/Curso-de-Cisco
|
a50ee5fa96bd86d468403e29ccdc3565a181af60
|
[
"MIT"
] | null | null | null |
contador = 5
while contador != 0:
print("Dentro del ciclo: ", contador)
contador -= 1
print("Fuera del ciclo", contador)
| 25.8
| 41
| 0.658915
| 18
| 129
| 4.777778
| 0.611111
| 0.186047
| 0.372093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029126
| 0.20155
| 129
| 5
| 42
| 25.8
| 0.796117
| 0
| 0
| 0
| 0
| 0
| 0.255814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.4
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1db845d595321edefc8d7c07f7a1547eb2a47cda
| 295
|
py
|
Python
|
pyimgaug3d/augmenters/src/base_augmenter.py
|
SiyuLiu0329/pyimgaug3d
|
cc99cd3ef12fab665df4f1d4ad08ed5e20c6da4a
|
[
"BSD-2-Clause"
] | 1
|
2021-10-05T19:52:46.000Z
|
2021-10-05T19:52:46.000Z
|
pyimgaug3d/augmenters/src/base_augmenter.py
|
SiyuLiu0329/pyimgaug3d
|
cc99cd3ef12fab665df4f1d4ad08ed5e20c6da4a
|
[
"BSD-2-Clause"
] | null | null | null |
pyimgaug3d/augmenters/src/base_augmenter.py
|
SiyuLiu0329/pyimgaug3d
|
cc99cd3ef12fab665df4f1d4ad08ed5e20c6da4a
|
[
"BSD-2-Clause"
] | null | null | null |
import random
class BaseAugmenter:
def __init__(self):
self.augmentation = []
def add_augmentation(self, augmentation):
self.augmentation.append(augmentation)
def __call__(self, images):
aug = random.choice(self.augmentation)
return aug(images)
| 24.583333
| 46
| 0.667797
| 30
| 295
| 6.266667
| 0.5
| 0.340426
| 0.297872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.240678
| 295
| 12
| 47
| 24.583333
| 0.839286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.111111
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
1de8a7f7a0dfcf9747c0d6f8afb235a1b4a3377a
| 184
|
py
|
Python
|
FNetEncDec.py
|
Dmitriuso/FNet-pytorch
|
bc744b4947b693604371b586e263ce72e90ff1df
|
[
"MIT"
] | null | null | null |
FNetEncDec.py
|
Dmitriuso/FNet-pytorch
|
bc744b4947b693604371b586e263ce72e90ff1df
|
[
"MIT"
] | null | null | null |
FNetEncDec.py
|
Dmitriuso/FNet-pytorch
|
bc744b4947b693604371b586e263ce72e90ff1df
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from fnet import FNet
class FNetEncoderDecoder(nn.Module):
def __init__(self):
super().__init__()
self.fnet_encoder = FNet(self)
| 20.444444
| 38
| 0.701087
| 24
| 184
| 5
| 0.541667
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211957
| 184
| 9
| 38
| 20.444444
| 0.827586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.428571
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
1df5f4fde009b7e0a5d7a811c122d66d8996be06
| 257
|
py
|
Python
|
src/seveno_pyutil/logging_utilities/__init__.py
|
tadams42/seveno_pyutil
|
9e3b4157408b0b54a4c609ff1a8c704be958543b
|
[
"MIT"
] | null | null | null |
src/seveno_pyutil/logging_utilities/__init__.py
|
tadams42/seveno_pyutil
|
9e3b4157408b0b54a4c609ff1a8c704be958543b
|
[
"MIT"
] | null | null | null |
src/seveno_pyutil/logging_utilities/__init__.py
|
tadams42/seveno_pyutil
|
9e3b4157408b0b54a4c609ff1a8c704be958543b
|
[
"MIT"
] | null | null | null |
from .single_line_formatter import SingleLineColoredFormatter, SingleLineFormatter
from .sql_filter import SQLFilter
from .standard_metadata_filter import StandardMetadataFilter
from .utilities import log_to_console_for, log_to_tmp_file_for, silence_logger
| 51.4
| 82
| 0.898833
| 32
| 257
| 6.8125
| 0.6875
| 0.110092
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07393
| 257
| 4
| 83
| 64.25
| 0.915966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
69994ebdbc60714972b7b6734243517a9ccda0b2
| 104
|
py
|
Python
|
url`s_and_templates/django101_admin/apps.py
|
EmilianStoyanov/python-web
|
60ddb1f0cc4c5bb1615317967c4da33c4171b27b
|
[
"MIT"
] | 3
|
2021-01-19T18:54:38.000Z
|
2022-01-05T17:28:41.000Z
|
url`s_and_templates/django101_admin/apps.py
|
EmilianStoyanov/python-web
|
60ddb1f0cc4c5bb1615317967c4da33c4171b27b
|
[
"MIT"
] | null | null | null |
url`s_and_templates/django101_admin/apps.py
|
EmilianStoyanov/python-web
|
60ddb1f0cc4c5bb1615317967c4da33c4171b27b
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class Django101AdminConfig(AppConfig):
name = 'django101_admin'
| 17.333333
| 38
| 0.788462
| 11
| 104
| 7.363636
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067416
| 0.144231
| 104
| 5
| 39
| 20.8
| 0.842697
| 0
| 0
| 0
| 0
| 0
| 0.144231
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
699ca3d21076959cbca46f1c771fb501bd6c1baa
| 310
|
py
|
Python
|
problem017.py
|
samidarko/euler
|
f5d2c0fe41c2cb5517d2dd7f7db075add0dbedb1
|
[
"MIT"
] | null | null | null |
problem017.py
|
samidarko/euler
|
f5d2c0fe41c2cb5517d2dd7f7db075add0dbedb1
|
[
"MIT"
] | null | null | null |
problem017.py
|
samidarko/euler
|
f5d2c0fe41c2cb5517d2dd7f7db075add0dbedb1
|
[
"MIT"
] | null | null | null |
from num2words import num2words
from functools import reduce
def number_letters(n):
return len(num2words(n).replace(' ', '').replace('-', ''))
def main():
def fn(acc, n):
return acc + number_letters(n)
return reduce(fn, range(1, 1001), 0)
if __name__ == "__main__":
print(main())
| 18.235294
| 62
| 0.632258
| 41
| 310
| 4.536585
| 0.536585
| 0.112903
| 0.150538
| 0.215054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036585
| 0.206452
| 310
| 16
| 63
| 19.375
| 0.719512
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3
| false
| 0
| 0.2
| 0.2
| 0.8
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
69a5aa26a0f1e3229931aef186a19e148af8aad7
| 185
|
py
|
Python
|
location/apps.py
|
ohahlev/ahlev-django-location
|
7d6060ab7b21509f53790f5863b596f2b95c286a
|
[
"BSD-3-Clause"
] | null | null | null |
location/apps.py
|
ohahlev/ahlev-django-location
|
7d6060ab7b21509f53790f5863b596f2b95c286a
|
[
"BSD-3-Clause"
] | null | null | null |
location/apps.py
|
ohahlev/ahlev-django-location
|
7d6060ab7b21509f53790f5863b596f2b95c286a
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import AppConfig
from . import __version__ as VERSION
class LocationConfig(AppConfig):
name = "location"
verbose_name = "Location Management %s" % VERSION
| 26.428571
| 53
| 0.740541
| 21
| 185
| 6.285714
| 0.666667
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189189
| 185
| 7
| 54
| 26.428571
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
69a76c0a17eeaf86bff6be79b56692f86f13fcba
| 180
|
py
|
Python
|
src/core/managers/__init__.py
|
ablil/meistertask-cli
|
6c90802ac5dc7e5ac016e5c61c0e68db043e5784
|
[
"MIT"
] | 3
|
2020-11-03T22:27:18.000Z
|
2021-12-11T23:13:55.000Z
|
src/core/managers/__init__.py
|
ablil/meistertask-cli
|
6c90802ac5dc7e5ac016e5c61c0e68db043e5784
|
[
"MIT"
] | 1
|
2021-09-12T13:28:13.000Z
|
2021-09-12T13:28:13.000Z
|
src/core/managers/__init__.py
|
ablil/meistertask-cli
|
6c90802ac5dc7e5ac016e5c61c0e68db043e5784
|
[
"MIT"
] | null | null | null |
from .projectmanager import ProjectManager
from .sectionmanager import SectionManager
from .taskmanager import TaskManager
__all__ = [ProjectManager, SectionManager, TaskManager]
| 30
| 55
| 0.855556
| 16
| 180
| 9.375
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 180
| 5
| 56
| 36
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
69bb14fdb85056a9374cc9391656a6f438066089
| 159
|
py
|
Python
|
rbp_eclip/custom_keras_objects.py
|
Luma-1994/lama
|
60d802e2e4cce789f03eea11b038212ba5f7fd1b
|
[
"MIT"
] | 137
|
2018-03-13T17:44:46.000Z
|
2022-02-18T06:07:45.000Z
|
rbp_eclip/custom_keras_objects.py
|
Luma-1994/lama
|
60d802e2e4cce789f03eea11b038212ba5f7fd1b
|
[
"MIT"
] | 111
|
2018-03-14T08:16:35.000Z
|
2022-03-04T18:26:41.000Z
|
rbp_eclip/custom_keras_objects.py
|
Luma-1994/lama
|
60d802e2e4cce789f03eea11b038212ba5f7fd1b
|
[
"MIT"
] | 57
|
2018-03-14T08:39:24.000Z
|
2022-02-01T15:56:04.000Z
|
import concise
# all the custom objects are already loaded through importing concise
OBJECTS = None
# new concise version
# OBJECTS = concise.custom_objects
| 19.875
| 69
| 0.798742
| 21
| 159
| 6
| 0.666667
| 0.206349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163522
| 159
| 7
| 70
| 22.714286
| 0.947368
| 0.754717
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
69dff319ca7dbf29be8e3384b7b90359f52f9b2b
| 84
|
py
|
Python
|
more practice/date_time.py
|
shinigami423/Election_Analysis
|
dee8c7b08ea3b5d3d8d7fa618fed25ecd56f0318
|
[
"MIT"
] | null | null | null |
more practice/date_time.py
|
shinigami423/Election_Analysis
|
dee8c7b08ea3b5d3d8d7fa618fed25ecd56f0318
|
[
"MIT"
] | null | null | null |
more practice/date_time.py
|
shinigami423/Election_Analysis
|
dee8c7b08ea3b5d3d8d7fa618fed25ecd56f0318
|
[
"MIT"
] | null | null | null |
import datetime
now = datetime.datetime.now()
print(f"The time right now is {now}")
| 21
| 37
| 0.738095
| 14
| 84
| 4.428571
| 0.642857
| 0.354839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130952
| 84
| 4
| 37
| 21
| 0.849315
| 0
| 0
| 0
| 0
| 0
| 0.321429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
69f328d7748034d4d4f469bc503c8527c26edd6a
| 115
|
py
|
Python
|
datacaptureapp/templatetags/tags.py
|
steftaz/PinPoint
|
c38c19e25a2f4ab6688c48d0c84f3b046be86059
|
[
"MIT"
] | 1
|
2020-11-05T21:54:49.000Z
|
2020-11-05T21:54:49.000Z
|
datacaptureapp/templatetags/tags.py
|
steftaz/PinPoint
|
c38c19e25a2f4ab6688c48d0c84f3b046be86059
|
[
"MIT"
] | null | null | null |
datacaptureapp/templatetags/tags.py
|
steftaz/PinPoint
|
c38c19e25a2f4ab6688c48d0c84f3b046be86059
|
[
"MIT"
] | null | null | null |
from django import template
register = template.Library()
@register.filter()
def get(h, key):
return h[key]
| 12.777778
| 29
| 0.704348
| 16
| 115
| 5.0625
| 0.75
| 0.098765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 115
| 8
| 30
| 14.375
| 0.852632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
69f8299e2688600dbe459e04364c8cbf6393451f
| 1,206
|
py
|
Python
|
app/migrations/0018_auto_20200918_0832.py
|
mapoetto/group2_CTFLab
|
5b492ce46875ea37a57701686897bd9613e2dd13
|
[
"MIT"
] | 1
|
2021-10-15T14:37:33.000Z
|
2021-10-15T14:37:33.000Z
|
app/migrations/0018_auto_20200918_0832.py
|
mapoetto/group2_CTFLab
|
5b492ce46875ea37a57701686897bd9613e2dd13
|
[
"MIT"
] | null | null | null |
app/migrations/0018_auto_20200918_0832.py
|
mapoetto/group2_CTFLab
|
5b492ce46875ea37a57701686897bd9613e2dd13
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2020-09-18 08:32
import app.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0017_auto_20200918_0831'),
]
operations = [
migrations.AddField(
model_name='sshtunnel_configs',
name='DNS_NAME_SERVER',
field=models.CharField(default='', max_length=220),
),
migrations.AddField(
model_name='sshtunnel_configs',
name='FULL_PATH_SSH_KEY',
field=models.CharField(default='', max_length=220),
),
migrations.AddField(
model_name='sshtunnel_configs',
name='LOCAL_PORT',
field=models.IntegerField(default=0, validators=[app.models.validate_flag]),
),
migrations.AddField(
model_name='sshtunnel_configs',
name='REMOTE_PORT',
field=models.IntegerField(default=0, validators=[app.models.validate_flag]),
),
migrations.AddField(
model_name='sshtunnel_configs',
name='USER_SERVER',
field=models.CharField(default='', max_length=64),
),
]
| 30.15
| 88
| 0.596186
| 121
| 1,206
| 5.727273
| 0.438017
| 0.12987
| 0.165945
| 0.194805
| 0.7114
| 0.7114
| 0.7114
| 0.574315
| 0.574315
| 0.574315
| 0
| 0.048837
| 0.286899
| 1,206
| 39
| 89
| 30.923077
| 0.756977
| 0.038143
| 0
| 0.575758
| 1
| 0
| 0.151123
| 0.019862
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.060606
| 0
| 0.151515
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
3870844e7bd2480b2ad479af86334d7d26e0b9f7
| 2,987
|
py
|
Python
|
tests/codelets/test_frame_matcher.py
|
juliakzn/construction_finder
|
92e9f044163fbe8bde3a6c5f9ec125a7ecf96de8
|
[
"MIT"
] | null | null | null |
tests/codelets/test_frame_matcher.py
|
juliakzn/construction_finder
|
92e9f044163fbe8bde3a6c5f9ec125a7ecf96de8
|
[
"MIT"
] | null | null | null |
tests/codelets/test_frame_matcher.py
|
juliakzn/construction_finder
|
92e9f044163fbe8bde3a6c5f9ec125a7ecf96de8
|
[
"MIT"
] | null | null | null |
import spacy
from construction_finder import codelets, frame
class TestFrameMatcher:
def test_from_frame_and_sentence(
self,
dative_frame_matcher,
codelet_info,
dont_give_me_that_sentence_doc,
dative_frame,
):
assert dative_frame_matcher.urgency_level == 1
assert dative_frame_matcher.codelet_probability == 1
assert dative_frame_matcher.sentence_doc == dont_give_me_that_sentence_doc
assert dative_frame_matcher.not_bonded_slot_ids is None
assert str(dative_frame_matcher.frame) == str(dative_frame)
def test_run(self, dative_frame_matcher):
frame_matcher_result = dative_frame_matcher.run()
for i, codelet in enumerate(frame_matcher_result.new_codelets):
assert isinstance(codelet, codelets.SlotMatcher)
assert codelet.urgency_level == 2
assert codelet.codelet_probability == 1
assert codelet.temp_modifier == 5.25
assert codelet.slot_id == i
assert frame_matcher_result.temp_modifier == 21
def test_set_bond(self, dative_frame_matcher, codelet):
new_codelets = dative_frame_matcher.set_bond(0, [2], codelet)
assert dative_frame_matcher.frame.slots[0].bond == [2]
assert dative_frame_matcher.frame.slots[0].form == "give"
assert dative_frame_matcher.frame.all_required_slots_found == False
assert dative_frame_matcher.frame.required_slots_to_find == 3
assert len(new_codelets) == 0
_ = dative_frame_matcher.set_bond(1, ["PRODROP"], codelet)
assert dative_frame_matcher.frame.all_required_slots_found == False
assert dative_frame_matcher.frame.required_slots_to_find == 2
_ = dative_frame_matcher.set_bond(2, [4], codelet)
assert dative_frame_matcher.frame.all_required_slots_found == False
assert dative_frame_matcher.frame.required_slots_to_find == 1
new_codelets = dative_frame_matcher.set_bond(3, [3], codelet)
assert dative_frame_matcher.frame.all_required_slots_found == True
assert dative_frame_matcher.frame.required_slots_to_find == 0
assert len(new_codelets) == 1
def test_get_form(self, dative_frame_matcher):
output = dative_frame_matcher.get_form([2])[0]
assert isinstance(output, spacy.tokens.token.Token)
assert output.text == "give"
def test_create_frame_finalizer(self, dative_frame_matcher):
output = dative_frame_matcher.create_frame_finalizer(
urgency_level=1, temp_modifier=42
)
assert isinstance(output, codelets.FrameFinalizer)
assert output.frame_matcher == dative_frame_matcher
assert output.urgency_level == 2
assert output.temp_modifier == 42
def test_assign_noun_phrases(self, dative_frame_matcher):
dative_frame_matcher.assign_noun_phrases("TEST_NOUN_PHRASES")
assert dative_frame_matcher.noun_phrases == "TEST_NOUN_PHRASES"
| 43.289855
| 82
| 0.719451
| 383
| 2,987
| 5.198433
| 0.206266
| 0.210949
| 0.280261
| 0.180814
| 0.496735
| 0.361125
| 0.332496
| 0.261175
| 0.214967
| 0.190859
| 0
| 0.013942
| 0.207566
| 2,987
| 68
| 83
| 43.926471
| 0.827207
| 0
| 0
| 0.053571
| 0
| 0
| 0.016404
| 0
| 0
| 0
| 0
| 0
| 0.535714
| 1
| 0.107143
| false
| 0
| 0.035714
| 0
| 0.160714
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
3873cccbe695a32cc61d6a7fa4bc58362dae9c36
| 810
|
py
|
Python
|
src/simpleAero.py
|
CB1204/LapSimulation
|
7d7f7c43a6bc3db3dbf02050d939da3f17647c2c
|
[
"MIT"
] | 7
|
2018-02-22T16:58:26.000Z
|
2022-02-05T18:17:56.000Z
|
src/simpleAero.py
|
CB1204/LapSimulation
|
7d7f7c43a6bc3db3dbf02050d939da3f17647c2c
|
[
"MIT"
] | null | null | null |
src/simpleAero.py
|
CB1204/LapSimulation
|
7d7f7c43a6bc3db3dbf02050d939da3f17647c2c
|
[
"MIT"
] | 2
|
2019-04-15T21:07:03.000Z
|
2021-05-11T07:41:49.000Z
|
import numpy as np
class Aero:
def __init__(self,reference_down_force = np.array([ 1000, 1000 ]), reference_drag = 500, reference_speed = 20):
self.reference_down_force = reference_down_force
self.reference_drag = reference_drag
self.reference_speed = reference_speed
self.Cdft = np.sum(self.reference_down_force) / (self.reference_speed**2)
self.Cdf = self.reference_down_force / (self.reference_speed**2)
self.Cdr = self.reference_down_force / (self.reference_speed**2)
self.Cd = self.reference_drag / (self.reference_speed**2)
def down_force(self,state):
return self.reference_down_force * (state.speed/self.reference_speed)**2
def drage(self,state):
return self.reference_drag * (state.speed/self.reference_speed)**2
| 45
| 116
| 0.708642
| 111
| 810
| 4.882883
| 0.252252
| 0.383764
| 0.232472
| 0.243542
| 0.643911
| 0.356089
| 0.249077
| 0.249077
| 0.249077
| 0
| 0
| 0.028701
| 0.182716
| 810
| 18
| 117
| 45
| 0.78852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.071429
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
389c1346e26e21c539673f6e79c223d5a7e2315c
| 325
|
py
|
Python
|
test/torch/funcs/base.py
|
opendilab/DI-treetensor
|
fe5f681123c3d6e8d7507fba38586d2edf12e693
|
[
"Apache-2.0"
] | 45
|
2021-09-04T15:57:44.000Z
|
2022-03-11T19:28:56.000Z
|
test/torch/funcs/base.py
|
opendilab/DI-treetensor
|
fe5f681123c3d6e8d7507fba38586d2edf12e693
|
[
"Apache-2.0"
] | 7
|
2021-09-06T13:06:12.000Z
|
2022-03-03T13:38:05.000Z
|
test/torch/funcs/base.py
|
opendilab/DI-treetensor
|
fe5f681123c3d6e8d7507fba38586d2edf12e693
|
[
"Apache-2.0"
] | 1
|
2021-09-30T15:18:06.000Z
|
2021-09-30T15:18:06.000Z
|
import treetensor.torch as ttorch
from treetensor.utils import replaceable_partial
from ...tests import choose_mark_with_existence_check, get_mark_with_existence_check
get_mark = replaceable_partial(get_mark_with_existence_check, base=ttorch)
choose_mark = replaceable_partial(choose_mark_with_existence_check, base=ttorch)
| 46.428571
| 84
| 0.883077
| 46
| 325
| 5.782609
| 0.369565
| 0.120301
| 0.255639
| 0.330827
| 0.5
| 0.443609
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067692
| 325
| 6
| 85
| 54.166667
| 0.877888
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
38ce571b51c703429bee58b9177005c964b1cc7b
| 5,125
|
py
|
Python
|
active_subspaces/utils/simrunners.py
|
carlosal1015/active_subspaces
|
caaf108fcb89548a374fea7704b0d92d38b4539a
|
[
"MIT"
] | 1
|
2020-03-16T18:05:05.000Z
|
2020-03-16T18:05:05.000Z
|
active_subspaces/utils/simrunners.py
|
carlosal1015/active_subspaces
|
caaf108fcb89548a374fea7704b0d92d38b4539a
|
[
"MIT"
] | null | null | null |
active_subspaces/utils/simrunners.py
|
carlosal1015/active_subspaces
|
caaf108fcb89548a374fea7704b0d92d38b4539a
|
[
"MIT"
] | 1
|
2020-03-16T18:05:09.000Z
|
2020-03-16T18:05:09.000Z
|
"""Utilities for running several simulations at different inputs."""
import numpy as np
import logging
import time
from misc import process_inputs
class SimulationRunner():
"""
A class for running several simulations at different input values.
:cvar function fun: Runs the simulation for a fixed value of the input
parameters, given as an ndarray.
**See Also**
utils.simrunners.SimulationGradientRunner
**Notes**
The function fun should take an ndarray of size 1-by-m and return a float.
This float is the quantity of interest from the simulation. Often, the
function is a wrapper to a larger simulation code.
"""
fun = None
def __init__(self, fun):
"""
Initialize a SimulationRunner.
:param function fun: A function that runs the simulation for a fixed
value of the input parameters, given as an ndarray. This function
returns the quantity of interest from the model. Often, this
function is a wrapper to a larger simulation code.
"""
if not hasattr(fun, '__call__'):
raise TypeError('fun should be a callable function.')
self.fun = fun
def run(self, X):
"""
Run the simulation at several input values.
:param ndarray X: Contains all input points where one wishes to run the
simulation. If the simulation takes m inputs, then `X` must have
shape M-by-m, where M is the number of simulations to run.
:return: F, Contains the simulation output at each given input point.
The shape of `F` is M-by-1.
:rtype: ndarray
**Notes**
In principle, the simulation calls can be executed independently and in
parallel. Right now this function uses a sequential for-loop. Future
development will take advantage of multicore architectures to
parallelize this for-loop.
"""
# right now this just wraps a sequential for-loop.
# should be parallelized
X, M, m = process_inputs(X)
F = np.zeros((M, 1))
logger = logging.getLogger(__name__)
start = time.time()
for i in range(M):
F[i] = self.fun(X[i,:].reshape((1,m)))
if ((i+1) % 10) == 0:
logger.debug('\t{:d} of {:d}'.format(i+1, M))
end = time.time() - start
logger.info('Completed {:d} function evaluations in {:4.2f} seconds.'.format(M, end))
return F
class SimulationGradientRunner():
"""
A class for running several simulations at different input values that
return the gradients of the quantity of interest.
:cvar function dfun: A function that runs the simulation for a fixed value
of the input parameters, given as an ndarray. It returns the gradient of
the quantity of interest at the given input.
**See Also**
utils.simrunners.SimulationRunner
**Notes**
The function dfun should take an ndarray of size 1-by-m and return an
ndarray of shape 1-by-m. This ndarray is the gradient of the quantity of
interest from the simulation. Often, the function is a wrapper to a larger
simulation code.
"""
dfun = None
def __init__(self, dfun):
"""
Initialize a SimulationGradientRunner.
:param function dfun: A function that runs the simulation for a fixed
value of the input parameters, given as an ndarray. It returns the
gradient of the quantity of interest at the given input.
"""
if not hasattr(dfun, '__call__'):
raise TypeError('fun should be a callable function.')
self.dfun = dfun
def run(self, X):
"""
Run the simulation at several input values and return the gradients of
the quantity of interest.
:param ndarray X: Contains all input points where one wishes to run the
simulation. If the simulation takes m inputs, then `X` must have
shape M-by-m, where M is the number of simulations to run.
:return: dF, ontains the gradient of the quantity of interest at each
given input point. The shape of `dF` is M-by-m.
:rtype: ndarray
**Notes**
In principle, the simulation calls can be executed independently and in
parallel. Right now this function uses a sequential for-loop. Future
development will take advantage of multicore architectures to
parallelize this for-loop.
"""
# right now this just wraps a sequential for-loop.
# should be parallelized
X, M, m = process_inputs(X)
dF = np.zeros((M, m))
logger = logging.getLogger(__name__)
start = time.time()
for i in range(M):
df = self.dfun(X[i,:].reshape((1,m)))
dF[i,:] = df.reshape((1,m))
logger.debug('Completed {:d} of {:d} gradient evaluations.'.format(i+1, M))
end = time.time() - start
logger.info('Completed {:d} gradient evaluations in {:4.2f} seconds.'.format(M, end))
return dF
| 33.717105
| 93
| 0.630244
| 705
| 5,125
| 4.543262
| 0.204255
| 0.06088
| 0.03247
| 0.052451
| 0.77365
| 0.766781
| 0.745863
| 0.741805
| 0.686232
| 0.649079
| 0
| 0.004961
| 0.292098
| 5,125
| 151
| 94
| 33.940397
| 0.877894
| 0.603707
| 0
| 0.35
| 0
| 0
| 0.159091
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2a19fb6964ed389691cceb1c6e1ce0aa7472b369
| 2,042
|
py
|
Python
|
keycloak/urls_patterns.py
|
c0mpiler/py-keycloak
|
e2fee4ab0d8b2fc0f3b98291d907ddf45290cbb4
|
[
"Apache-2.0"
] | 1
|
2018-08-06T00:50:30.000Z
|
2018-08-06T00:50:30.000Z
|
keycloak/urls_patterns.py
|
c0mpiler/py-keycloak
|
e2fee4ab0d8b2fc0f3b98291d907ddf45290cbb4
|
[
"Apache-2.0"
] | null | null | null |
keycloak/urls_patterns.py
|
c0mpiler/py-keycloak
|
e2fee4ab0d8b2fc0f3b98291d907ddf45290cbb4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# OPENID URLS
URL_WELL_KNOWN = "realms/{realm-name}/.well-known/openid-configuration"
URL_TOKEN = "realms/{realm-name}/protocol/openid-connect/token"
URL_USERINFO = "realms/{realm-name}/protocol/openid-connect/userinfo"
URL_LOGOUT = "realms/{realm-name}/protocol/openid-connect/logout"
URL_CERTS = "realms/{realm-name}/protocol/openid-connect/certs"
URL_INTROSPECT = "realms/{realm-name}/protocol/openid-connect/token/introspect"
URL_ENTITLEMENT = "realms/{realm-name}/authz/entitlement/{resource-server-id}"
# ADMIN URLS
URL_ADMIN_USERS = "admin/realms/{realm-name}/users"
URL_ADMIN_USERS_COUNT = "admin/realms/{realm-name}/users/count"
URL_ADMIN_USER = "admin/realms/{realm-name}/users/{id}"
URL_ADMIN_USER_CONSENTS = "admin/realms/{realm-name}/users/{id}/consents"
URL_ADMIN_SEND_UPDATE_ACCOUNT = "admin/realms/{realm-name}/users/{id}/execute-actions-email"
URL_ADMIN_SEND_VERIFY_EMAIL = "admin/realms/{realm-name}/users/{id}/send-verify-email"
URL_ADMIN_RESET_PASSWORD = "admin/realms/{realm-name}/users/{id}/reset-password"
URL_ADMIN_GET_SESSIONS = "admin/realms/{realm-name}/users/{id}/sessions"
URL_ADMIN_USER_CLIENT_ROLES = "admin/realms/{realm-name}/users/{id}/role-mappings/clients/{client-id}"
URL_ADMIN_USER_GROUP = "admin/realms/{realm-name}/users/{id}/groups/{group-id}"
URL_ADMIN_SERVER_INFO = "admin/serverinfo"
URL_ADMIN_GROUPS = "admin/realms/{realm-name}/groups"
URL_ADMIN_GROUP = "admin/realms/{realm-name}/groups/{id}"
URL_ADMIN_GROUP_CHILD = "admin/realms/{realm-name}/groups/{id}/children"
URL_ADMIN_GROUP_PERMISSIONS = "admin/realms/{realm-name}/groups/{id}/management/permissions"
URL_ADMIN_CLIENTS = "admin/realms/{realm-name}/clients"
URL_ADMIN_CLIENT = "admin/realms/{realm-name}/clients/{id}"
URL_ADMIN_CLIENT_ROLES = "admin/realms/{realm-name}/clients/{id}/roles"
URL_ADMIN_CLIENT_ROLE = "admin/realms/{realm-name}/clients/{id}/roles/{role-name}"
URL_ADMIN_REALM_ROLES = "admin/realms/{realm-name}/roles"
URL_ADMIN_USER_STORAGE = "admin/realms/{realm-name}/user-storage/{id}/sync"
| 52.358974
| 102
| 0.77571
| 299
| 2,042
| 5.070234
| 0.183946
| 0.19591
| 0.26715
| 0.263852
| 0.492744
| 0.401715
| 0.098945
| 0
| 0
| 0
| 0
| 0.000515
| 0.049951
| 2,042
| 38
| 103
| 53.736842
| 0.780928
| 0.021548
| 0
| 0
| 0
| 0.035714
| 0.647944
| 0.63992
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.035714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2a23555ca32bcafd6bc0fec5a02872d41b8e45a4
| 1,771
|
py
|
Python
|
tests/changes/api/test_jenkins_master_blacklist.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 443
|
2015-01-03T16:28:39.000Z
|
2021-04-26T16:39:46.000Z
|
tests/changes/api/test_jenkins_master_blacklist.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 12
|
2015-07-30T19:07:16.000Z
|
2016-11-07T23:11:21.000Z
|
tests/changes/api/test_jenkins_master_blacklist.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 47
|
2015-01-09T10:04:00.000Z
|
2020-11-18T17:58:19.000Z
|
from changes.testutils import APITestCase
class JenkinsMasterBlacklist(APITestCase):
def test_add_remove_blacklist(self):
path = '/api/0/jenkins_master_blacklist/'
# Add to blacklist
data = dict(master_url='https://jenkins-master-a')
resp = self.client.post(path, data=data)
assert resp.status_code == 200
data = dict(master_url='https://jenkins-master-b')
resp = self.client.post(path, data=data)
assert resp.status_code == 200
resp = self.client.get(path)
resp.status_code == 200
result = self.unserialize(resp)
assert 'https://jenkins-master-a' in result['blacklist']
assert 'https://jenkins-master-b' in result['blacklist']
# Delete from blacklist
data = dict(master_url='https://jenkins-master-a', remove=1)
resp = self.client.post(path, data=data)
resp.status_code == 200
assert ['https://jenkins-master-b'] == self.unserialize(resp)['blacklist']
def test_re_add(self):
path = '/api/0/jenkins_master_blacklist/'
data = dict(master_url='https://jenkins-master-a')
resp = self.client.post(path, data=data)
assert resp.status_code == 200
data = dict(master_url='https://jenkins-master-a')
resp = self.client.post(path, data=data)
assert resp.status_code == 200
result = self.unserialize(resp)
assert 'warning' in result
def test_remove_missing(self):
path = '/api/0/jenkins_master_blacklist/'
data = dict(master_url='https://jenkins-master-a', remove=1)
resp = self.client.post(path, data=data)
assert resp.status_code == 200
result = self.unserialize(resp)
assert 'warning' in result
| 38.5
| 82
| 0.636364
| 225
| 1,771
| 4.893333
| 0.182222
| 0.141689
| 0.147139
| 0.108084
| 0.757493
| 0.71208
| 0.71208
| 0.681199
| 0.681199
| 0.637602
| 0
| 0.01916
| 0.233766
| 1,771
| 45
| 83
| 39.355556
| 0.792189
| 0.021457
| 0
| 0.722222
| 0
| 0
| 0.204046
| 0.055491
| 0
| 0
| 0
| 0
| 0.277778
| 1
| 0.083333
| false
| 0
| 0.027778
| 0
| 0.138889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2a3209bea5bb8001caa34c536b20c6629be15a1a
| 338
|
py
|
Python
|
bhabana/metrics/confusion_matrix.py
|
dashayushman/bhabana
|
7438505e20be53a4c524324abf9cf8985d0fc684
|
[
"Apache-2.0"
] | null | null | null |
bhabana/metrics/confusion_matrix.py
|
dashayushman/bhabana
|
7438505e20be53a4c524324abf9cf8985d0fc684
|
[
"Apache-2.0"
] | null | null | null |
bhabana/metrics/confusion_matrix.py
|
dashayushman/bhabana
|
7438505e20be53a4c524324abf9cf8985d0fc684
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from bhabana.metrics import Metric
from sklearn.metrics import classification_report
class ClassificationReport(Metric):
def __call__(self, pred, gt):
return self.calculate(pred, gt)
def calculate(self, pred, gt):
return classification_report(np.argmax(gt, axis=1), np.argmax(pred, axis=1))
| 26
| 84
| 0.733728
| 46
| 338
| 5.26087
| 0.5
| 0.07438
| 0.082645
| 0.132231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007143
| 0.171598
| 338
| 13
| 84
| 26
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.375
| 0.25
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 4
|
2a42a748492a4ae4fd9c040967785cfc2ee17759
| 108
|
py
|
Python
|
tests/qrcode/svg.py
|
heuer/segno-mimos
|
0b1b220c63fcda9fcaa0e42725ea719651a1d53e
|
[
"BSD-3-Clause"
] | 1
|
2017-02-08T21:24:37.000Z
|
2017-02-08T21:24:37.000Z
|
tests/qrcode/svg.py
|
heuer/segno-mimos
|
0b1b220c63fcda9fcaa0e42725ea719651a1d53e
|
[
"BSD-3-Clause"
] | 2
|
2016-09-01T18:36:06.000Z
|
2018-02-16T11:17:23.000Z
|
tests/qrcode/svg.py
|
heuer/segno-mimos
|
0b1b220c63fcda9fcaa0e42725ea719651a1d53e
|
[
"BSD-3-Clause"
] | null | null | null |
from segno_mimos.qrcode.image.svg import SvgImage
class SvgImageWhite(SvgImage):
background = 'white'
| 18
| 49
| 0.777778
| 13
| 108
| 6.384615
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 108
| 5
| 50
| 21.6
| 0.892473
| 0
| 0
| 0
| 0
| 0
| 0.046296
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
2a478d767ca9f4fd848a7b06f22f49ab57c05f0a
| 59
|
py
|
Python
|
config.py
|
FAUSheppy/open-web-leaderboard
|
defcf5671d71bfa170c3c9267488b18a926b02d7
|
[
"MIT"
] | 2
|
2020-12-27T01:55:32.000Z
|
2021-07-26T15:40:03.000Z
|
config.py
|
FAUSheppy/open-web-leaderboard
|
defcf5671d71bfa170c3c9267488b18a926b02d7
|
[
"MIT"
] | 2
|
2020-12-22T15:39:06.000Z
|
2021-05-22T23:53:21.000Z
|
config.py
|
FAUSheppy/open-web-leaderboard
|
defcf5671d71bfa170c3c9267488b18a926b02d7
|
[
"MIT"
] | null | null | null |
DB_PATH="/home/sheppy-gaming/insurgency-skillbird/python/"
| 29.5
| 58
| 0.813559
| 8
| 59
| 5.875
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016949
| 59
| 1
| 59
| 59
| 0.810345
| 0
| 0
| 0
| 0
| 0
| 0.813559
| 0.813559
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2a59054beae94715a7a2f7f2d4ab876c9dc1bc2c
| 110
|
py
|
Python
|
lap/conventions.py
|
kenkeiras/lxc-application-packages
|
29f11b3185e5078e41cb433c647bd87d1537490f
|
[
"MIT"
] | 1
|
2020-02-16T15:02:18.000Z
|
2020-02-16T15:02:18.000Z
|
lap/conventions.py
|
kenkeiras/lxc-application-packages
|
29f11b3185e5078e41cb433c647bd87d1537490f
|
[
"MIT"
] | 6
|
2016-11-15T22:07:19.000Z
|
2016-11-20T22:54:08.000Z
|
lap/conventions.py
|
kenkeiras/lxc-application-packages
|
29f11b3185e5078e41cb433c647bd87d1537490f
|
[
"MIT"
] | null | null | null |
import os
from os.path import expanduser
LOCAL_PATH = os.path.join(expanduser("~"), '.local', 'share', 'lap')
| 27.5
| 68
| 0.7
| 16
| 110
| 4.75
| 0.5625
| 0.157895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109091
| 110
| 3
| 69
| 36.666667
| 0.77551
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
aab5d452c4d3b43731838736b61623cb29d14b52
| 89
|
py
|
Python
|
rcfg/reditor/apps.py
|
tony-mikhailov/Kalachakra
|
7a46be7e75bad0500914e5a7c44662c6740ebaa2
|
[
"MIT"
] | null | null | null |
rcfg/reditor/apps.py
|
tony-mikhailov/Kalachakra
|
7a46be7e75bad0500914e5a7c44662c6740ebaa2
|
[
"MIT"
] | 3
|
2021-03-19T01:19:04.000Z
|
2021-06-04T22:44:35.000Z
|
rcfg/reditor/apps.py
|
tony-mikhailov/Kalachakra
|
7a46be7e75bad0500914e5a7c44662c6740ebaa2
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class ReditorConfig(AppConfig):
name = 'reditor'
| 14.833333
| 33
| 0.752809
| 10
| 89
| 6.7
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168539
| 89
| 5
| 34
| 17.8
| 0.905405
| 0
| 0
| 0
| 0
| 0
| 0.078652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
aab7eee288de33d4bfea121f908e81a3e0d27e03
| 264
|
py
|
Python
|
Winston/lesson-06/range.py
|
gfoo003/programming-together
|
225e0a2255dd8da1f1ef32d2a88deea27c050f10
|
[
"MIT"
] | 2
|
2021-03-20T02:07:19.000Z
|
2021-03-20T02:07:26.000Z
|
Winston/lesson-06/range.py
|
gfoo003/programming-together
|
225e0a2255dd8da1f1ef32d2a88deea27c050f10
|
[
"MIT"
] | null | null | null |
Winston/lesson-06/range.py
|
gfoo003/programming-together
|
225e0a2255dd8da1f1ef32d2a88deea27c050f10
|
[
"MIT"
] | 8
|
2021-02-20T03:10:50.000Z
|
2021-03-20T02:42:45.000Z
|
indexes = range(5)
same_indexes = range(0, 5)
print("indexes are:")
for i in indexes:
print(i)
print("same_indexes are:")
for i in same_indexes:
print(i)
special_indexes = range(5, 9)
print("special_indexes are:")
for i in special_indexes:
print(i)
| 16.5
| 29
| 0.689394
| 44
| 264
| 4
| 0.272727
| 0.204545
| 0.221591
| 0.238636
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023041
| 0.17803
| 264
| 16
| 30
| 16.5
| 0.788018
| 0
| 0
| 0.25
| 0
| 0
| 0.185606
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
aacd18d855883862825fbe188a40121058d8b604
| 406
|
py
|
Python
|
S9/model/utils/callbacks.py
|
abishek-raju/EVA4B2
|
189f4062c85d91f43c1381087a9c89ff794e5428
|
[
"Apache-2.0"
] | null | null | null |
S9/model/utils/callbacks.py
|
abishek-raju/EVA4B2
|
189f4062c85d91f43c1381087a9c89ff794e5428
|
[
"Apache-2.0"
] | null | null | null |
S9/model/utils/callbacks.py
|
abishek-raju/EVA4B2
|
189f4062c85d91f43c1381087a9c89ff794e5428
|
[
"Apache-2.0"
] | null | null | null |
from torch.optim.lr_scheduler import StepLR
def lr_scheduler(optimizer, step_size, gamma):
"""Create LR scheduler.
Args:
optimizer: Model optimizer.
step_size: Frequency for changing learning rate.
gamma: Factor for changing learning rate.
Returns:
StepLR: Learning rate scheduler.
"""
return StepLR(optimizer, step_size=step_size, gamma=gamma)
| 23.882353
| 62
| 0.684729
| 48
| 406
| 5.666667
| 0.479167
| 0.117647
| 0.1875
| 0.169118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.241379
| 406
| 16
| 63
| 25.375
| 0.883117
| 0.504926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
aadb48b79c4dd455f0de4cbf65ebf3eae241c297
| 180
|
py
|
Python
|
venv/lib/python3.8/site-packages/restructuredtext_lint/__init__.py
|
trkohler/biopython
|
e7b7d010c71b19de439aa25de736748de4d6ca32
|
[
"BSD-3-Clause"
] | 142
|
2015-01-27T13:37:45.000Z
|
2022-01-29T06:57:23.000Z
|
venv/lib/python3.8/site-packages/restructuredtext_lint/__init__.py
|
trkohler/biopython
|
e7b7d010c71b19de439aa25de736748de4d6ca32
|
[
"BSD-3-Clause"
] | 50
|
2015-03-04T18:36:08.000Z
|
2022-02-26T20:34:08.000Z
|
venv/lib/python3.8/site-packages/restructuredtext_lint/__init__.py
|
trkohler/biopython
|
e7b7d010c71b19de439aa25de736748de4d6ca32
|
[
"BSD-3-Clause"
] | 28
|
2015-04-09T16:52:08.000Z
|
2020-11-22T20:37:14.000Z
|
# Load in our dependencies
from __future__ import absolute_import
from restructuredtext_lint.lint import lint, lint_file
# Export lint functions
lint = lint
lint_file = lint_file
| 22.5
| 54
| 0.827778
| 26
| 180
| 5.384615
| 0.5
| 0.228571
| 0.171429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 180
| 7
| 55
| 25.714286
| 0.903226
| 0.255556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
2aa412e48e8a6ff1301dae4f3f1796fd768131e1
| 227
|
py
|
Python
|
pytorch_learning/02.py
|
Howardhuang98/leet_code
|
b9985cfc163ca4de92dfeacd8fc3a167d0731d0b
|
[
"MIT"
] | 1
|
2021-12-16T14:47:45.000Z
|
2021-12-16T14:47:45.000Z
|
pytorch_learning/02.py
|
Howardhuang98/leet_code
|
b9985cfc163ca4de92dfeacd8fc3a167d0731d0b
|
[
"MIT"
] | null | null | null |
pytorch_learning/02.py
|
Howardhuang98/leet_code
|
b9985cfc163ca4de92dfeacd8fc3a167d0731d0b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : 02.py
@Contact : huanghoward@foxmail.com
@Modify Time : 2021/9/23 10:34
------------
"""
import torch.version
print(torch.cuda.is_available())
| 17.461538
| 36
| 0.563877
| 29
| 227
| 4.37931
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078212
| 0.211454
| 227
| 12
| 37
| 18.916667
| 0.631285
| 0.700441
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 4
|
2dc2649ba578b740eb86d3b4d95eb6685fe0d885
| 501
|
py
|
Python
|
test/test.py
|
SarithT/xapitrader
|
0018bc37d9756a10c328def90d042ef39857cfb5
|
[
"MIT"
] | null | null | null |
test/test.py
|
SarithT/xapitrader
|
0018bc37d9756a10c328def90d042ef39857cfb5
|
[
"MIT"
] | null | null | null |
test/test.py
|
SarithT/xapitrader
|
0018bc37d9756a10c328def90d042ef39857cfb5
|
[
"MIT"
] | null | null | null |
import unittest
# def discover_and_run(start_dir: str = '.', pattern: str = 'test_*.py'):
# """Discover and run tests cases, returning the result."""
# tests = unittest.defaultTestLoader(start_dir, pattern=pattern)
# # We'll use the standard text runner which prints to stdout
# runner = unittest.TextTestRunner()
# result = runner.run(tests) # Returns a TestResult
# print(result.errors, result.failures) # And more useful properties
# return result
# discover_and_run()
| 41.75
| 73
| 0.702595
| 63
| 501
| 5.47619
| 0.619048
| 0.095652
| 0.121739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183633
| 501
| 12
| 74
| 41.75
| 0.843521
| 0.922156
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
2dca63f9de7d6399cab3fe9b430dfbe457f199b3
| 171
|
py
|
Python
|
tests/web_platform/css_flexbox_1/test_flexbox_align_items_flexend.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/css_flexbox_1/test_flexbox_align_items_flexend.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/css_flexbox_1/test_flexbox_align_items_flexend.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | 1
|
2020-01-16T01:56:41.000Z
|
2020-01-16T01:56:41.000Z
|
from tests.utils import W3CTestCase
class TestFlexbox_AlignItemsFlexend(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'flexbox_align-items-flexend'))
| 28.5
| 82
| 0.818713
| 19
| 171
| 7
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019108
| 0.081871
| 171
| 5
| 83
| 34.2
| 0.828025
| 0
| 0
| 0
| 0
| 0
| 0.158824
| 0.158824
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
2df1023bf51a171ab5b57afaec5813a339bce0d3
| 154
|
py
|
Python
|
dfdata/source/collect/futures/futures.py
|
Eric2827/DFdata
|
4db142232fc7127da3faae7c608772c72005cd25
|
[
"MIT"
] | null | null | null |
dfdata/source/collect/futures/futures.py
|
Eric2827/DFdata
|
4db142232fc7127da3faae7c608772c72005cd25
|
[
"MIT"
] | null | null | null |
dfdata/source/collect/futures/futures.py
|
Eric2827/DFdata
|
4db142232fc7127da3faae7c608772c72005cd25
|
[
"MIT"
] | null | null | null |
import pandas as pd
def get_futures_contract():
df = pd.DataFrame([[1 for i in range(4)] for j in range(6)], columns=list('ABCD'))
return df
| 25.666667
| 86
| 0.649351
| 27
| 154
| 3.62963
| 0.814815
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024793
| 0.214286
| 154
| 6
| 87
| 25.666667
| 0.785124
| 0
| 0
| 0
| 0
| 0
| 0.025806
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
2df37b0510f436db1261c060169d35b212d03ce2
| 127
|
py
|
Python
|
courses/python/cursoemvideo/exercicios/ex005.py
|
bdpcampos/public
|
dda57c265718f3e1cc0d6bce73f149051f5647ef
|
[
"MIT"
] | 3
|
2020-04-28T01:42:09.000Z
|
2020-05-03T12:05:23.000Z
|
courses/python/cursoemvideo/exercicios/ex005.py
|
bdpcampos/public
|
dda57c265718f3e1cc0d6bce73f149051f5647ef
|
[
"MIT"
] | null | null | null |
courses/python/cursoemvideo/exercicios/ex005.py
|
bdpcampos/public
|
dda57c265718f3e1cc0d6bce73f149051f5647ef
|
[
"MIT"
] | null | null | null |
n = int(input('Digite um número: '))
print('Seu número é o {}, seu sucessor é o {} e seu antecessor o {}.'.format(n,n+1,n-1))
| 31.75
| 88
| 0.614173
| 25
| 127
| 3.12
| 0.6
| 0.051282
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019048
| 0.173228
| 127
| 3
| 89
| 42.333333
| 0.72381
| 0
| 0
| 0
| 0
| 0
| 0.622047
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
9330076d7b9419e855d9d5ee3329b3ea3619b5f3
| 85
|
py
|
Python
|
rosalind/ini3.py
|
sowmyamanojna/BT3051-Data-Structures-and-Algorithms
|
09c17e42c2e173a6ab10339f08fbc1505db8ea56
|
[
"MIT"
] | 1
|
2021-05-13T13:10:42.000Z
|
2021-05-13T13:10:42.000Z
|
rosalind/ini3.py
|
sowmyamanojna/BT3051-Data-Structures-and-Algorithms
|
09c17e42c2e173a6ab10339f08fbc1505db8ea56
|
[
"MIT"
] | null | null | null |
rosalind/ini3.py
|
sowmyamanojna/BT3051-Data-Structures-and-Algorithms
|
09c17e42c2e173a6ab10339f08fbc1505db8ea56
|
[
"MIT"
] | null | null | null |
s = raw_input()
[a, b, c, d] = map(int, raw_input().split())
print s[a:b+1], s[c:d+1]
| 28.333333
| 44
| 0.552941
| 21
| 85
| 2.142857
| 0.571429
| 0.355556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027397
| 0.141176
| 85
| 3
| 45
| 28.333333
| 0.589041
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
933f7a5214934f481124da6f21936a740c310aaa
| 22
|
py
|
Python
|
project/__init__.py
|
Apriorhythm/PythonOpenSourceProjectTemplate
|
b7221ada02ec3a667e1c5b0e749b4c303fc29143
|
[
"MIT"
] | 1
|
2018-01-15T10:32:23.000Z
|
2018-01-15T10:32:23.000Z
|
project/__init__.py
|
Apriorhythm/PythonOpenSourceProjectTemplate
|
b7221ada02ec3a667e1c5b0e749b4c303fc29143
|
[
"MIT"
] | null | null | null |
project/__init__.py
|
Apriorhythm/PythonOpenSourceProjectTemplate
|
b7221ada02ec3a667e1c5b0e749b4c303fc29143
|
[
"MIT"
] | null | null | null |
"""
I do not know
"""
| 5.5
| 13
| 0.454545
| 4
| 22
| 2.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 22
| 3
| 14
| 7.333333
| 0.625
| 0.590909
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
fa8d2e179ded5d0ff287eb7219e7091c40b25555
| 195
|
py
|
Python
|
libraries/Arduino-LUFA/install.py
|
nullstalgia/Arduino-Lufa
|
44be19a68bea1e049f32e531bacc9c78e20d53b6
|
[
"Unlicense",
"MIT"
] | 78
|
2015-06-19T06:52:40.000Z
|
2022-03-26T18:54:14.000Z
|
libraries/Arduino-LUFA/install.py
|
nullstalgia/Arduino-Lufa
|
44be19a68bea1e049f32e531bacc9c78e20d53b6
|
[
"Unlicense",
"MIT"
] | 21
|
2016-12-05T14:28:17.000Z
|
2022-02-26T03:32:33.000Z
|
libraries/Arduino-LUFA/install.py
|
nullstalgia/Arduino-Lufa
|
44be19a68bea1e049f32e531bacc9c78e20d53b6
|
[
"Unlicense",
"MIT"
] | 22
|
2015-08-11T08:53:31.000Z
|
2021-12-10T11:30:29.000Z
|
#!/usr/bin/env python3
from activate import install
"""
Script to install LUFA boards for Arduino.
More info can be found in the activate.py script.
"""
if __name__ == '__main__':
install()
| 19.5
| 49
| 0.717949
| 29
| 195
| 4.551724
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006211
| 0.174359
| 195
| 9
| 50
| 21.666667
| 0.813665
| 0.107692
| 0
| 0
| 0
| 0
| 0.109589
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
fab023ee92eb288e52061c7cf9add0ffdb87f42b
| 219
|
py
|
Python
|
src/ensae_teaching_cs/pandas_helper.py
|
sdpython/ensae_teaching_cs
|
ac978c4031afe6a5b846402a28628791e547a841
|
[
"MIT"
] | 73
|
2015-05-12T13:12:11.000Z
|
2021-12-21T11:44:29.000Z
|
src/ensae_teaching_cs/pandas_helper.py
|
Pandinosaurus/ensae_teaching_cs
|
3bc80f29d93c30de812e34c314bc96e6a4f0d025
|
[
"MIT"
] | 90
|
2015-06-23T11:11:35.000Z
|
2021-03-31T22:09:15.000Z
|
src/ensae_teaching_cs/pandas_helper.py
|
Pandinosaurus/ensae_teaching_cs
|
3bc80f29d93c30de812e34c314bc96e6a4f0d025
|
[
"MIT"
] | 65
|
2015-01-13T08:23:55.000Z
|
2022-02-11T22:42:07.000Z
|
# -*- coding: utf-8 -*-
"""
@file
@brief Collection of function to help with pandas
"""
from .td_2a.serialization import dfs2excel, df2list
from .faq.faq_pandas import read_csv, df_to_clipboard, groupby_topn, df_equal
| 24.333333
| 77
| 0.753425
| 33
| 219
| 4.787879
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021053
| 0.13242
| 219
| 8
| 78
| 27.375
| 0.810526
| 0.356164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
fac15805ad411f491085c716d5d6dcd3b2469969
| 1,169
|
py
|
Python
|
cognito/identity/exceptions.py
|
Rome84/AWS
|
32f5b6a83e37e62b0e33658bdab03ea493c905cb
|
[
"MIT"
] | null | null | null |
cognito/identity/exceptions.py
|
Rome84/AWS
|
32f5b6a83e37e62b0e33658bdab03ea493c905cb
|
[
"MIT"
] | null | null | null |
cognito/identity/exceptions.py
|
Rome84/AWS
|
32f5b6a83e37e62b0e33658bdab03ea493c905cb
|
[
"MIT"
] | null | null | null |
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import BotoServerError
class LimitExceededException(BotoServerError):
pass
class ResourceConflictException(BotoServerError):
pass
class DeveloperUserAlreadyRegisteredException(BotoServerError):
pass
class TooManyRequestsException(BotoServerError):
pass
class InvalidParameterException(BotoServerError):
pass
class ResourceNotFoundException(BotoServerError):
pass
class InternalErrorException(BotoServerError):
pass
class NotAuthorizedException(BotoServerError):
pass
| 25.977778
| 75
| 0.763901
| 131
| 1,169
| 6.816794
| 0.580153
| 0.170213
| 0.18813
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192472
| 1,169
| 44
| 76
| 26.568182
| 0.945975
| 0.48503
| 0
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.470588
| 0.058824
| 0
| 0.529412
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
facc79a1a37a78d576d1f836f1065401065c6c7d
| 194
|
py
|
Python
|
project/payments/payment_methods/payments_stripe/urls.py
|
steetstyle/Django-Ecommerce-API
|
89c2c973e560346a5be74019709dc9a9f8ab7b2a
|
[
"MIT"
] | null | null | null |
project/payments/payment_methods/payments_stripe/urls.py
|
steetstyle/Django-Ecommerce-API
|
89c2c973e560346a5be74019709dc9a9f8ab7b2a
|
[
"MIT"
] | null | null | null |
project/payments/payment_methods/payments_stripe/urls.py
|
steetstyle/Django-Ecommerce-API
|
89c2c973e560346a5be74019709dc9a9f8ab7b2a
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from .views import custom_webhook
urlpatterns = [
path("", include("djstripe.urls", namespace="djstripe")),
path("custom_webhook", custom_webhook)
]
| 24.25
| 60
| 0.737113
| 23
| 194
| 6.086957
| 0.521739
| 0.278571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128866
| 194
| 8
| 61
| 24.25
| 0.828402
| 0
| 0
| 0
| 0
| 0
| 0.180412
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
fae8ed0591ca237e03f8e6e9cf8d2caa9320ff62
| 155
|
py
|
Python
|
source/models/interpolation_bilenear.py
|
1pkg/neura
|
b5ac79d2141a556f9b488b6ae07cc89f8b0cbccd
|
[
"MIT"
] | null | null | null |
source/models/interpolation_bilenear.py
|
1pkg/neura
|
b5ac79d2141a556f9b488b6ae07cc89f8b0cbccd
|
[
"MIT"
] | null | null | null |
source/models/interpolation_bilenear.py
|
1pkg/neura
|
b5ac79d2141a556f9b488b6ae07cc89f8b0cbccd
|
[
"MIT"
] | null | null | null |
from PIL import Image
from .base_interpolation import BaseInterpolation
class InterpolationBilenear(BaseInterpolation):
_scale_type = Image.BILINEAR
| 22.142857
| 49
| 0.83871
| 16
| 155
| 7.9375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122581
| 155
| 7
| 50
| 22.142857
| 0.933824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
faf1f6b335867f37bfd1e359ef4799129ac27fa1
| 578
|
py
|
Python
|
pycritty/__init__.py
|
T1erno/pycritty
|
bf785c5aa464358f5b692832562fd983bd407b0f
|
[
"MIT"
] | null | null | null |
pycritty/__init__.py
|
T1erno/pycritty
|
bf785c5aa464358f5b692832562fd983bd407b0f
|
[
"MIT"
] | null | null | null |
pycritty/__init__.py
|
T1erno/pycritty
|
bf785c5aa464358f5b692832562fd983bd407b0f
|
[
"MIT"
] | null | null | null |
"""Automated tools for managing alacritty configurations"""
__version__ = "0.4.0"
class PycrittyError(Exception):
pass
# Export public API
from pycritty.api.config import Config, set_config # noqa: F401, E402
from pycritty.api.install import install # noqa: F401, E402
from pycritty.api.load import load_config # noqa: F401, E402
from pycritty.api.save import save_config # noqa: F401, E402
from pycritty.api.rm import remove # noqa: F401, E402
from pycritty.api.ls import ( # noqa: F401, E402
list_themes,
list_fonts,
list_configs,
print_list,
)
| 26.272727
| 70
| 0.735294
| 82
| 578
| 5.04878
| 0.426829
| 0.173913
| 0.217391
| 0.193237
| 0.369565
| 0.369565
| 0.23913
| 0
| 0
| 0
| 0
| 0.081761
| 0.17474
| 578
| 21
| 71
| 27.52381
| 0.786164
| 0.301038
| 0
| 0
| 0
| 0
| 0.012755
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.071429
| 0.428571
| 0
| 0.5
| 0.071429
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 4
|
87ae1ab35400b530721e199d22e07a34b5c638e0
| 624
|
py
|
Python
|
main.py
|
duanyuluo/PyCookbook
|
c70fa22779997dad58ffc056f428c434a879ecca
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
duanyuluo/PyCookbook
|
c70fa22779997dad58ffc056f428c434a879ecca
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
duanyuluo/PyCookbook
|
c70fa22779997dad58ffc056f428c434a879ecca
|
[
"Apache-2.0"
] | null | null | null |
#encoding=utf-8
# Python Language Cookbook
# You can learn the Python programming language through same sample code.
# Importing a module when you want to review the sample code's result.
from tools import *
# section 1: variables
# variables name-rule, scope, casting and multi-assign
import variable
# section 2: datatype
# python have 14 datetypes that belongs to text/number/sequence/map/set/boolean/binary catalog.
# text, boolean and number are simple datatype.
# sequence, map and set are collection datatype.
# binary is a raw datatype of computer memery and storage.
import datatype
import string
| 31.2
| 97
| 0.767628
| 92
| 624
| 5.206522
| 0.684783
| 0.041754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009728
| 0.176282
| 624
| 20
| 98
| 31.2
| 0.922179
| 0.852564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
87d0bb2afdf4893b27dfc53271b0b7d49e93e73f
| 334
|
py
|
Python
|
retropath2_wrapper/__init__.py
|
brsynth/retropath2-wrapper
|
1959e09fc97be0220ef28b87384c26a8ade818da
|
[
"MIT"
] | 4
|
2021-10-13T22:12:16.000Z
|
2021-12-25T13:00:53.000Z
|
retropath2_wrapper/__init__.py
|
brsynth/retropath2-wrapper
|
1959e09fc97be0220ef28b87384c26a8ade818da
|
[
"MIT"
] | 6
|
2020-08-14T15:02:35.000Z
|
2022-03-04T13:05:21.000Z
|
retropath2_wrapper/__init__.py
|
brsynth/retropath2-wrapper
|
1959e09fc97be0220ef28b87384c26a8ade818da
|
[
"MIT"
] | null | null | null |
"""
Created on June 16 2020
@author: Joan Hérisson
"""
from retropath2_wrapper.RetroPath2 import retropath2
from retropath2_wrapper.Args import build_args_parser
from retropath2_wrapper._version import __version__
from retropath2_wrapper.__main__ import parse_and_check_args
__all__ = ["retropath2", "build_args_parser"]
| 25.692308
| 62
| 0.811377
| 42
| 334
| 5.880952
| 0.5
| 0.226721
| 0.340081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044674
| 0.128743
| 334
| 12
| 63
| 27.833333
| 0.804124
| 0.140719
| 0
| 0
| 0
| 0
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
87e93e57ac6241f54843651fe76c7909977f9d3b
| 164
|
py
|
Python
|
satchmo/projects/simple/urls.py
|
funwhilelost/satchmo
|
589a5d797533ea15dfde9af7f36e304092d22a94
|
[
"BSD-3-Clause"
] | 16
|
2015-03-06T14:42:27.000Z
|
2019-12-23T21:37:01.000Z
|
satchmo/projects/simple/urls.py
|
funwhilelost/satchmo
|
589a5d797533ea15dfde9af7f36e304092d22a94
|
[
"BSD-3-Clause"
] | null | null | null |
satchmo/projects/simple/urls.py
|
funwhilelost/satchmo
|
589a5d797533ea15dfde9af7f36e304092d22a94
|
[
"BSD-3-Clause"
] | 8
|
2015-01-28T16:02:37.000Z
|
2022-03-03T21:29:40.000Z
|
from django.conf.urls.defaults import *
from satchmo_store.urls import urlpatterns
urlpatterns += patterns('',
(r'test/', include('simple.localsite.urls'))
)
| 20.5
| 48
| 0.731707
| 20
| 164
| 5.95
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 164
| 7
| 49
| 23.428571
| 0.826389
| 0
| 0
| 0
| 0
| 0
| 0.158537
| 0.128049
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
e2133507a4953071bc10d698fb6e7ea212138716
| 137
|
py
|
Python
|
tests/interface_test.py
|
h0uter/sensor_director
|
5751aa564bccd44d0027476caef514833e013b49
|
[
"MIT"
] | null | null | null |
tests/interface_test.py
|
h0uter/sensor_director
|
5751aa564bccd44d0027476caef514833e013b49
|
[
"MIT"
] | null | null | null |
tests/interface_test.py
|
h0uter/sensor_director
|
5751aa564bccd44d0027476caef514833e013b49
|
[
"MIT"
] | null | null | null |
import sensor_director
def test_interface():
# frame_a =
point_b = (0, 0,0)
rot = sensor_director.determine_look_at_quat()
| 19.571429
| 50
| 0.693431
| 20
| 137
| 4.35
| 0.8
| 0.321839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027523
| 0.20438
| 137
| 7
| 50
| 19.571429
| 0.770642
| 0.065693
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e21631424148ef6d8c8e9000f2f8fa24705d9e52
| 1,480
|
py
|
Python
|
cli/psym/graphql/input/survey_question_response.py
|
danielrh135568/symphony-1
|
54c92a0f8775d1a837ab7c7bd6a08ccd906d28a4
|
[
"BSD-3-Clause"
] | null | null | null |
cli/psym/graphql/input/survey_question_response.py
|
danielrh135568/symphony-1
|
54c92a0f8775d1a837ab7c7bd6a08ccd906d28a4
|
[
"BSD-3-Clause"
] | 12
|
2022-02-14T04:20:30.000Z
|
2022-03-28T04:20:17.000Z
|
cli/psym/graphql/input/survey_question_response.py
|
danielrh135568/symphony-1
|
54c92a0f8775d1a837ab7c7bd6a08ccd906d28a4
|
[
"BSD-3-Clause"
] | 1
|
2022-02-24T21:47:51.000Z
|
2022-02-24T21:47:51.000Z
|
#!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass, field as _field
from functools import partial
from ...config import custom_scalars, datetime
from numbers import Number
from typing import Any, AsyncGenerator, Dict, List, Generator, Optional
from dataclasses_json import DataClassJsonMixin, config
from gql_client.runtime.enum_utils import enum_field_metadata
from ..enum.survey_question_type import SurveyQuestionType
from ..input.file_input import FileInput
from ..input.survey_cell_scan_data import SurveyCellScanData
from ..input.survey_wi_fi_scan_data import SurveyWiFiScanData
@dataclass(frozen=True)
class SurveyQuestionResponse(DataClassJsonMixin):
formIndex: int
questionText: str
questionIndex: int
wifiData: List[SurveyWiFiScanData]
cellData: List[SurveyCellScanData]
imagesData: List[FileInput]
formName: Optional[str] = None
formDescription: Optional[str] = None
questionFormat: Optional[SurveyQuestionType] = None
boolData: Optional[bool] = None
emailData: Optional[str] = None
latitude: Optional[Number] = None
longitude: Optional[Number] = None
locationAccuracy: Optional[Number] = None
altitude: Optional[Number] = None
phoneData: Optional[str] = None
textData: Optional[str] = None
floatData: Optional[Number] = None
intData: Optional[int] = None
dateData: Optional[int] = None
photoData: Optional[FileInput] = None
| 34.418605
| 71
| 0.769595
| 167
| 1,480
| 6.718563
| 0.48503
| 0.04902
| 0.066845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000799
| 0.154054
| 1,480
| 42
| 72
| 35.238095
| 0.895367
| 0.04527
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.323529
| 0
| 0.970588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
3563ad358c5ed56a82b69eab00453103199f9076
| 821
|
py
|
Python
|
Post/migrations/0022_auto_20210302_1402.py
|
singh-sushil/minorproject
|
02fe8c1dce41109447d5f394bb37e10cb34d9316
|
[
"MIT"
] | 2
|
2020-12-27T11:28:02.000Z
|
2021-01-04T07:52:38.000Z
|
Post/migrations/0022_auto_20210302_1402.py
|
singh-sushil/minorproject
|
02fe8c1dce41109447d5f394bb37e10cb34d9316
|
[
"MIT"
] | 1
|
2020-12-26T13:36:12.000Z
|
2020-12-26T13:36:12.000Z
|
Post/migrations/0022_auto_20210302_1402.py
|
singh-sushil/minorproject
|
02fe8c1dce41109447d5f394bb37e10cb34d9316
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2021-03-02 08:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Post', '0021_auto_20210227_2216'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='image1',
new_name='backsideview',
),
migrations.RenameField(
model_name='post',
old_name='image2',
new_name='frontview',
),
migrations.RenameField(
model_name='post',
old_name='image3',
new_name='leftsideview',
),
migrations.RenameField(
model_name='post',
old_name='image4',
new_name='rightsideview',
),
]
| 24.147059
| 48
| 0.509135
| 72
| 821
| 5.597222
| 0.513889
| 0.208437
| 0.258065
| 0.297767
| 0.406948
| 0.406948
| 0.406948
| 0
| 0
| 0
| 0
| 0.069034
| 0.38246
| 821
| 33
| 49
| 24.878788
| 0.725838
| 0.054811
| 0
| 0.444444
| 1
| 0
| 0.152497
| 0.031039
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
3579f937ad1ec2c4de70d0b2ec26bb9174b1f225
| 60
|
py
|
Python
|
tests/archive/__init__.py
|
ZabeMath/pywikibot
|
856a197c53efcb80b16475a8d203a4ecd79eee2f
|
[
"MIT"
] | 326
|
2017-11-21T07:04:19.000Z
|
2022-03-26T01:25:44.000Z
|
tests/archive/__init__.py
|
ZabeMath/pywikibot
|
856a197c53efcb80b16475a8d203a4ecd79eee2f
|
[
"MIT"
] | 17
|
2017-12-20T13:41:32.000Z
|
2022-02-16T16:42:41.000Z
|
tests/archive/__init__.py
|
ZabeMath/pywikibot
|
856a197c53efcb80b16475a8d203a4ecd79eee2f
|
[
"MIT"
] | 147
|
2017-11-22T19:13:40.000Z
|
2022-03-29T04:47:07.000Z
|
"""THIS DIRECTORY IS TO HOLD TESTS FOR ARCHIVED SCRIPTS."""
| 30
| 59
| 0.733333
| 9
| 60
| 4.888889
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 60
| 1
| 60
| 60
| 0.862745
| 0.883333
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
359254f7fa77b039c0be62b4312f3b416701e544
| 13,099
|
py
|
Python
|
tests/test_api.py
|
pik-software/apiqa-storage
|
197235d0012737f9964cd5bcf60d20b17cbd1104
|
[
"MIT"
] | null | null | null |
tests/test_api.py
|
pik-software/apiqa-storage
|
197235d0012737f9964cd5bcf60d20b17cbd1104
|
[
"MIT"
] | 2
|
2019-06-13T07:17:56.000Z
|
2020-08-05T12:56:55.000Z
|
tests/test_api.py
|
pik-software/apiqa-storage
|
197235d0012737f9964cd5bcf60d20b17cbd1104
|
[
"MIT"
] | 6
|
2019-12-05T14:58:44.000Z
|
2021-03-07T08:51:14.000Z
|
import json
import uuid
from collections import OrderedDict
from unittest.mock import patch
import faker
import pytest
from django.contrib.contenttypes.models import ContentType
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test.client import BOUNDARY, MULTIPART_CONTENT, encode_multipart
from django.urls import reverse
from django.utils.crypto import get_random_string
from minio import S3Error
from rest_framework import status
from apiqa_storage import settings
from apiqa_storage.files import file_info
from apiqa_storage.models import Attachment
from tests_storage.models import ModelWithAttachments
from .factories import AttachmentFactory, UserFactory, create_attach_with_file
@pytest.mark.django_db
def test_post_file(storage, api_client):
fake = faker.Faker('ru_RU')
url = reverse('file_upload-list')
file_size = fake.random_int(min=1, max=settings.MAX_FILE_SIZE)
file_data = get_random_string(file_size).encode()
attachment = SimpleUploadedFile(
fake.file_name(category='image', extension='jpeg'),
file_data, content_type='image/jpeg'
)
post_data = {
'file': attachment,
}
with patch('apiqa_storage.serializers.storage', storage):
res = api_client.post(
url, data=encode_multipart(BOUNDARY, post_data),
content_type=MULTIPART_CONTENT)
assert res.status_code == status.HTTP_201_CREATED
info = file_info(attachment)
attachment = Attachment.objects.get(uid=res.data['uid'])
assert attachment.user == api_client.user
assert res.data == OrderedDict([
('uid', str(attachment.uid)),
('created', attachment.created.isoformat()),
('name', info.name),
('size', info.size),
('content_type', info.content_type),
('tags', []),
('linked_from', attachment.linked_from),
])
@pytest.mark.django_db
def test_post_file_with_custom_uid(storage, api_client):
fake = faker.Faker('ru_RU')
url = reverse('file_upload-list')
file_data = get_random_string().encode()
attachment = SimpleUploadedFile(
fake.file_name(category='image', extension='jpeg'),
file_data, content_type='image/jpeg'
)
attachment_uid = uuid.uuid4()
post_data = {'file': attachment}
with patch('apiqa_storage.serializers.storage', storage):
res = api_client.post(
url + f'?uid={attachment_uid}',
data=encode_multipart(BOUNDARY, post_data),
content_type=MULTIPART_CONTENT)
assert res.status_code == status.HTTP_201_CREATED
info = file_info(attachment)
attachment = Attachment.objects.get(uid=res.data['uid'])
assert attachment.user == api_client.user
assert res.data == OrderedDict([
('uid', str(attachment_uid)),
('created', attachment.created.isoformat()),
('name', info.name),
('size', info.size),
('content_type', info.content_type),
('tags', []),
('linked_from', attachment.linked_from),
])
@pytest.mark.django_db
def test_post_file_with_incorrect_uid(storage, api_client):
fake = faker.Faker('ru_RU')
url = reverse('file_upload-list')
file_data = get_random_string().encode()
attachment = SimpleUploadedFile(
fake.file_name(category='image', extension='jpeg'),
file_data, content_type='image/jpeg'
)
attachment_uid = 'incorrect'
post_data = {'file': attachment}
with patch('apiqa_storage.serializers.storage', storage):
res = api_client.post(
url + f'?uid={attachment_uid}',
data=encode_multipart(BOUNDARY, post_data),
content_type=MULTIPART_CONTENT)
assert res.status_code == status.HTTP_400_BAD_REQUEST
@pytest.mark.django_db
def test_post_file_with_duplicate_uid(storage, api_client):
fake = faker.Faker('ru_RU')
url = reverse('file_upload-list')
file_data = get_random_string().encode()
attachment = AttachmentFactory()
attachment_file = SimpleUploadedFile(
fake.file_name(category='image', extension='jpeg'),
file_data, content_type='image/jpeg'
)
post_data = {'file': attachment_file}
with patch('apiqa_storage.serializers.storage', storage):
res = api_client.post(
url + f'?uid={attachment.uid}',
data=encode_multipart(BOUNDARY, post_data),
content_type=MULTIPART_CONTENT)
assert res.status_code == status.HTTP_400_BAD_REQUEST
assert res.data[0] == (f'Attachment with uid = {attachment.uid} '
f'already exists.')
@pytest.mark.django_db
def test_post_file_size_validation_error(storage, api_client):
fake = faker.Faker('ru_RU')
url = reverse('file_upload-list')
file_data = get_random_string(settings.MAX_FILE_SIZE + 1).encode()
attachment = SimpleUploadedFile(
fake.file_name(category='image', extension='jpeg'),
file_data, content_type='image/jpeg'
)
post_data = {'file': attachment}
with patch('apiqa_storage.serializers.storage', storage):
res = api_client.post(
url, data=encode_multipart(BOUNDARY, post_data),
content_type=MULTIPART_CONTENT)
assert res.status_code == status.HTTP_400_BAD_REQUEST
assert res.data['file'][0] == (f'Max size of attach file:'
f' {settings.MINIO_STORAGE_MAX_FILE_SIZE}')
@pytest.mark.django_db
def test_destroy_attachment(storage, api_client):
attachment = create_attach_with_file(storage)
url = reverse('file_upload-detail', args=(str(attachment.uid),))
with patch('apiqa_storage.serializers.storage', storage):
res = api_client.delete(url)
assert res.status_code == status.HTTP_204_NO_CONTENT
with pytest.raises(S3Error):
storage.file_get(attachment.path)
@pytest.mark.django_db
def test_destroy_related_attachment_validation_error(storage, api_client):
user = UserFactory()
attachment = AttachmentFactory(
object_content_type=ContentType.objects.get_for_model(user),
object_id=user.id
)
url = reverse('file_upload-detail', args=(str(attachment.uid),))
res = api_client.delete(url)
assert res.status_code == status.HTTP_400_BAD_REQUEST
assert res.data[0] == 'Delete attachments with relations not allowed'
@pytest.mark.django_db
def test_post_model_with_attachment(storage, api_client):
fake = faker.Faker('ru_RU')
url = reverse('modelwithattachments-list')
attachments = AttachmentFactory.create_batch(
size=settings.MINIO_STORAGE_MAX_FILES_COUNT)
post_data = {
'name': fake.name(),
'attachment_ids': [str(attachment.pk) for attachment in attachments]
}
res = api_client.post(url, data=json.dumps(post_data),
content_type='application/json')
assert res.status_code == status.HTTP_201_CREATED
model_with_attachments = ModelWithAttachments.objects.get()
assert res.data == OrderedDict([
('uid', str(model_with_attachments.uid)),
('name', model_with_attachments.name),
('attachments', [OrderedDict([
('uid', str(attachment.uid)),
('created', attachment.created.isoformat()),
('name', attachment.name),
('size', attachment.size),
('content_type', attachment.content_type),
('tags', []),
('linked_from', attachment.linked_from),
]) for attachment in model_with_attachments.attachments.all()])
])
for attachment in attachments:
attachment.refresh_from_db()
assert attachment.object_id == model_with_attachments.pk
assert (attachment.object_content_type == ContentType.objects
.get_for_model(model_with_attachments))
@pytest.mark.django_db
def test_post_model_with_exising_attachments(storage, api_client):
fake = faker.Faker('ru_RU')
file_count = settings.MINIO_STORAGE_MAX_FILES_COUNT
url = reverse('modelwithattachments-list')
attachments = AttachmentFactory.create_batch(
size=file_count)
post_data = {
'name': fake.name(),
'attachment_ids': [str(attachment.pk) for attachment in attachments]
}
res = api_client.post(url, data=json.dumps(post_data),
content_type='application/json')
assert res.status_code == status.HTTP_201_CREATED
assert Attachment.objects.count() == file_count
res = api_client.post(url, data=json.dumps(post_data),
content_type='application/json')
assert res.status_code == status.HTTP_201_CREATED
for attach in res.data['attachments']:
assert attach['name'] == Attachment.objects.filter(
pk=attach['linked_from'],
).first().name
assert Attachment.objects.count() == file_count * 2
attach = Attachment.objects.first()
assert Attachment.objects.filter(path=attach.path).count() == 2
@pytest.mark.django_db
def test_post_model_with_max_files_count_validation_error(storage, api_client):
fake = faker.Faker('ru_RU')
url = reverse('modelwithattachments-list')
attachments = AttachmentFactory.create_batch(
size=settings.MINIO_STORAGE_MAX_FILES_COUNT + 1)
post_data = {
'name': fake.name(),
'attachment_ids': [str(attachment.pk) for attachment in attachments]
}
res = api_client.post(url, data=json.dumps(post_data),
content_type='application/json')
assert res.status_code == status.HTTP_400_BAD_REQUEST
assert res.data['attachment_ids'][0] == (
f'Max files count: {settings.MINIO_STORAGE_MAX_FILES_COUNT}')
@pytest.mark.django_db
def test_post_file_with_tags(storage, api_client):
fake = faker.Faker('ru_RU')
url = reverse('file_upload-list')
file_size = fake.random_int(min=1, max=settings.MAX_FILE_SIZE)
file_data = get_random_string(file_size).encode()
attachment = SimpleUploadedFile(
fake.file_name(category='image', extension='jpeg'),
file_data, content_type='image/jpeg'
)
post_data = {
'file': attachment,
'tags': [fake.pystr(
min_chars=1, max_chars=settings.TAGS_CHARACTER_LIMIT)
for _ in range(fake.random_int(
min=1, max=settings.TAGS_COUNT_MAX))]
}
with patch('apiqa_storage.serializers.storage', storage):
res = api_client.post(
url, data=encode_multipart(BOUNDARY, post_data),
content_type=MULTIPART_CONTENT)
assert res.status_code == status.HTTP_201_CREATED
info = file_info(attachment)
attachment = Attachment.objects.get(uid=res.data['uid'])
assert attachment.user == api_client.user
assert res.data == OrderedDict([
('uid', str(attachment.uid)),
('created', attachment.created.isoformat()),
('name', info.name),
('size', info.size),
('content_type', info.content_type),
('tags', post_data['tags']),
('linked_from', attachment.linked_from),
])
@pytest.mark.django_db
def test_post_file_with_tags_character_limit_validation_error(
storage, api_client):
fake = faker.Faker('ru_RU')
url = reverse('file_upload-list')
file_size = fake.random_int(min=1, max=settings.MAX_FILE_SIZE)
file_data = get_random_string(file_size).encode()
attachment = SimpleUploadedFile(
fake.file_name(category='image', extension='jpeg'),
file_data, content_type='image/jpeg'
)
tags_with_character_limit_error = [
fake.pystr(min_chars=settings.TAGS_CHARACTER_LIMIT + 1,
max_chars=settings.TAGS_CHARACTER_LIMIT + 20)]
post_data = {
'file': attachment,
'tags': tags_with_character_limit_error
}
with patch('apiqa_storage.serializers.storage', storage):
res = api_client.post(
url, data=encode_multipart(BOUNDARY, post_data),
content_type=MULTIPART_CONTENT)
assert res.data['tags'][0][0] == (
f'Ensure this field has no more than '
f'{settings.TAGS_CHARACTER_LIMIT} characters.')
@pytest.mark.django_db
def test_post_file_with_tags_count_max_validation_error(
storage, api_client):
fake = faker.Faker('ru_RU')
url = reverse('file_upload-list')
file_size = fake.random_int(min=1, max=settings.MAX_FILE_SIZE)
file_data = get_random_string(file_size).encode()
attachment = SimpleUploadedFile(
fake.file_name(category='image', extension='jpeg'),
file_data, content_type='image/jpeg'
)
tags_with_count_max_error = [fake.pystr() for _
in range(settings.TAGS_COUNT_MAX + 1)]
post_data = {
'file': attachment,
'tags': tags_with_count_max_error
}
with patch('apiqa_storage.serializers.storage', storage):
res = api_client.post(
url, data=encode_multipart(BOUNDARY, post_data),
content_type=MULTIPART_CONTENT)
assert res.data['tags'][0] == (
f'Ensure this field has no more than {settings.TAGS_COUNT_MAX} '
f'elements.')
| 37.74928
| 79
| 0.678143
| 1,593
| 13,099
| 5.300691
| 0.091651
| 0.031975
| 0.035528
| 0.027712
| 0.782331
| 0.768119
| 0.755803
| 0.719564
| 0.691497
| 0.646613
| 0
| 0.005768
| 0.205817
| 13,099
| 346
| 80
| 37.858382
| 0.805921
| 0
| 0
| 0.629508
| 0
| 0
| 0.122681
| 0.043438
| 0
| 0
| 0
| 0
| 0.101639
| 1
| 0.042623
| false
| 0
| 0.059016
| 0
| 0.101639
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
359eb2cc0eff8f260579bfd0208e7c1b219c030e
| 28
|
py
|
Python
|
sample/__init__.py
|
sonalnikam/try
|
26ef8355d652ffd35f63564c3c7665ad0776a0c8
|
[
"CC0-1.0"
] | null | null | null |
sample/__init__.py
|
sonalnikam/try
|
26ef8355d652ffd35f63564c3c7665ad0776a0c8
|
[
"CC0-1.0"
] | null | null | null |
sample/__init__.py
|
sonalnikam/try
|
26ef8355d652ffd35f63564c3c7665ad0776a0c8
|
[
"CC0-1.0"
] | null | null | null |
"""
Package for sample.
"""
| 7
| 19
| 0.571429
| 3
| 28
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 28
| 3
| 20
| 9.333333
| 0.695652
| 0.678571
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
35b9eb3e088833b9ce44e1f62fa6545924433971
| 196
|
py
|
Python
|
transforms/__init__.py
|
Abhishek-Aditya-bs/Streaming-Spark-For-Machine-Learning
|
76f9c97e66d6171bc83d1183fadc30bd492422a7
|
[
"MIT"
] | 1
|
2021-12-10T13:14:53.000Z
|
2021-12-10T13:14:53.000Z
|
transforms/__init__.py
|
iVishalr/SSML-spark-streaming-for-machine-learning
|
ba95a7d2d6bb15bacfbbf5b3c95317310b36d54f
|
[
"MIT"
] | null | null | null |
transforms/__init__.py
|
iVishalr/SSML-spark-streaming-for-machine-learning
|
ba95a7d2d6bb15bacfbbf5b3c95317310b36d54f
|
[
"MIT"
] | null | null | null |
from .normalize import Normalize
from .transforms import Transforms
from .random_flips import RandomHorizontalFlip,RandomVerticalFlip
from .resize import Resize
from .color_shift import ColorShift
| 39.2
| 65
| 0.872449
| 23
| 196
| 7.347826
| 0.521739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096939
| 196
| 5
| 66
| 39.2
| 0.954802
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
35d4c318c39c8ae8035817cc2f9a7c72f926c120
| 139,019
|
py
|
Python
|
ckanext-hdx_theme/ckanext/hdx_theme/tests/mock_helper.py
|
OCHA-DAP/hdx-ckan
|
202e0c44adc4ea8d0b90141e69365b65cce68672
|
[
"Apache-2.0"
] | 58
|
2015-01-11T09:05:15.000Z
|
2022-03-17T23:44:07.000Z
|
ckanext-hdx_theme/ckanext/hdx_theme/tests/mock_helper.py
|
OCHA-DAP/hdx-ckan
|
202e0c44adc4ea8d0b90141e69365b65cce68672
|
[
"Apache-2.0"
] | 1,467
|
2015-01-01T16:47:44.000Z
|
2022-02-28T16:51:20.000Z
|
ckanext-hdx_theme/ckanext/hdx_theme/tests/mock_helper.py
|
OCHA-DAP/hdx-ckan
|
202e0c44adc4ea8d0b90141e69365b65cce68672
|
[
"Apache-2.0"
] | 17
|
2015-05-06T14:04:21.000Z
|
2021-11-11T19:58:16.000Z
|
import ckan.model as model
def populate_mock_as_c(mock_c, username):
mock_c.user = username
mock_c.userobj = model.User.by_name(username)
def mock_faq_page_content(id):
return {'topics': {'faq-Sensitive_Data': u'Sensitive Data', 'faq-Getting_Started': u'Getting Started',
'faq-Sharing_and_Using_Data': u'Sharing and Using Data', 'faq-Data_Licenses': u'Data Licenses',
'faq-Resources_for_Developers': u'Resources for Developers', 'faq-Geodata': u'Geodata',
'faq-Organisations': u'Organisations', 'faq-Contact': u'Contact',
'faq-HXL_and_HDX_Tools': u'HXL and HDX Tools',
'faq-Metadata_and_Data_Quality': u'Metadata and Data Quality', 'faq-Search': u'Search'},
'faq_data': [{'id': 'faq-Getting_Started', 'questions': [{'q': u'How does HDX define humanitarian data?',
'a': u'<p>We define humanitarian data as:</p>\n<ol>\n<li>data about the context in which a humanitarian crisis is occurring (e.g., baseline/development data, damage assessments, geospatial data)</li>\n<li>data about the people affected by the crisis and their needs</li>\n<li>data about the response by organisations and people seeking to help those who need assistance.</li>\n</ol>\n',
'id': u'How_does_HDX_define_humanitarian_data_'},
{'q': u'Is HDX open source?',
'a': u'<p>Yes. HDX uses an open-source software called <a href="http://ckan.org/" target="_blank" rel="noopener noreferrer">CKAN</a> for our technical back-end. You can find all of our code on <a href="https://github.com/OCHA-DAP" target="_blank" rel="noopener noreferrer">GitHub</a>.</p>\n',
'id': u'Is_HDX_open_source_'},
{'q': u'What browsers are best to use for HDX?',
'a': u'<p>We build and test HDX using the latest versions of Chrome and Firefox. We also test on Microsoft Edge, but do not formally support it.</p>\n',
'id': u'What_browsers_are_best_to_use_for_HDX_'},
{'q': u'How do I register an account with HDX?',
'a': u'<p>You can register by clicking on <a href="https://data.humdata.org/user/register" target="_blank" rel="noopener noreferrer">‘Sign Up’</a>.</p>\n',
'id': u'How_do_I_register_an_account_with_HDX_'},
{
'q': u'What if I forget my username and password?',
'a': u'<p>Use our <a href="https://data.humdata.org/user/reset" target="_blank" rel="noopener noreferrer">password recovery form</a> to reset your account details. Enter your username or e-mail and we will send you an e-mail with a link to create a new password.</p>\n',
'id': u'What_if_I_forget_my_username_and_password_'},
{
'q': u'What are the benefits of being a registered user?',
'a': u'<p>Anyone can view and download the data from the site, but registered users can access more features. After signing up you can:</p>\n<ol>\n<li>Contact data contributors to ask for more information about their data.</li>\n<li>Request access to the underlying data for metadata only entries (our HDX Connect feature).</li>\n<li>Join organisations to share data or to access private data, depending on your role within the organisation, e.g. an admin, editor, or member of the organisation (see more below).</li>\n<li>Request to create a new organisation and if approved, share data publicly or privately.</li>\n<li>Add data visualizations as showcase items alongside your organisations datasets.</li>\n<li>Follow the latest changes to data.</li>\n</ol>\n',
'id': u'What_are_the_benefits_of_being_a_registered_user_'},
{
'q': u'What does it mean to ‘follow’ data?',
'a': u'<p>HDX allows registered users to follow the data they are interested in. Updates to the datasets that you follow will appear as a running list in your user dashboard(accessible from your user name in the top right of every page when you are logged in). You can follow data, organisations, locations, topics and crises.</p>\n',
'id': u'What_does_it_mean_to___8216_follow__8217__data_'},
{
'q': u'How do I request access to a dataset where I can only see metadata?',
'a': u'<p>You’ll find a ‘Request Access’ button for datasets where only metadata is provided. The HDX Connect feature makes it possible to discover what data is available or what data collection initiatives are underway. Only registered users have the ability to contact the organisation through the request access module. The administrator for the contributing organisation can decide whether to accept or deny the request. Once the connection is made, HDX is not involved in the decision to share the data. Learn more about HDX Connect <a class="link faq-google-embed-marker" id="faq-google-embed-link-hdx-connect">here</a>.</p>\n<div class="modal presentation-modal" id="faq-google-embed-hdx-connect" tabindex="-1" role="dialog" aria-hidden="true">\n<div class="modal-dialog" role="document"><button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">\xd7</span></button></p>\n<div class="modal-content"><iframe load-src="https://docs.google.com/presentation/d/e/2PACX-1vQY05J7cbuRbbFyFGQ43dhPr6TfVjk0oXfdzqREIyFmkMAfZxjjiWofjhuYYieRvfHUBdRwQWqBpWov/embed?start=false&loop=false&delayms=3000" frameborder="0" width="900" height="560" allowfullscreen="true" mozallowfullscreen="true" webkitallowfullscreen="true" src="https://docs.google.com/presentation/d/e/2PACX-1vQY05J7cbuRbbFyFGQ43dhPr6TfVjk0oXfdzqREIyFmkMAfZxjjiWofjhuYYieRvfHUBdRwQWqBpWov/embed?start=false&loop=false&delayms=3000"></iframe></div>\n</div>\n</div>\n',
'id': u'How_do_I_request_access_to_a_dataset_where_I_can_only_see_metadata_'},
{'q': u'How do I contact a data contributor?',
'a': u'<p>You’ll find a ‘contact the contributor’ link below the title of the data on all the dataset pages. Please find more details <a href="https://centre.humdata.org/new-features-contact-the-contributor-and-group-message/" target="_blank" rel="noopener noreferrer">here</a>.</p>\n',
'id': u'How_do_I_contact_a_data_contributor_'}],
'title': u'Getting Started'}, {'id': 'faq-Organisations', 'questions': [
{'q': u'What is an organisation?',
'a': u'<p>Organisations in HDX can be legal entities, such as WFP, or informal groups, such as the Shelter Cluster or Information Management Working Group for a specific country. Data can only be shared on HDX through an organisation. The HDX team verifies all organisations to ensure they are trusted and have relevant data to share with the HDX user community.</p>\n',
'id': u'What_is_an_organisation_'},
{'q': u'Where can I see how popular an organisation’s datasets are?',
'a': u'<p>On an organisation’s page, click on the ‘Stats’ tab to see how many visitors an organisation has received and which datasets are most popular in terms of downloads. Here’s an <a href="https://data.humdata.org/organization/stats/un-operational-satellite-appplications-programme-unosat" target="_blank" rel="noopener noreferrer">example</a>. The number of unique visitors is approximate and is based on the browser someone uses when visiting HDX. A user visiting from different browsers or from different devices will be counted separately.</p>\n<p>You can also see a timeline of how often an individual dataset has been downloaded on each dataset page. The download timeline is located on the left side of a dataset page, just beside the dataset description. Downloads for a dataset are counted as the total number of downloads of any resource in a dataset, with repeated downloads of the same resource by the same user being counted a maximum of once per day.</p>\n<p>There is a delay, usually less than one day, between when a user views a page or downloads a resource on HDX and when the activity is visible in these graphs and figures.</p>\n',
'id': u'Where_can_I_see_how_popular_an_organisation__8217_s_datasets_are_'},
{'q': u'How do I create an organisation?',
'a': u'<p>You can request an organisation through the ‘Add Data’ button. We ask you to submit the following information: an organisation name, description and link to an organisation-related website (optional). We review this information and then either accept the request or ask for more information, such as a sample dataset. Approved organisations will remain inactive and not displayed under ‘Organisations’ page until at least one dataset has been shared through HDX.</p>\n',
'id': u'How_do_I_create_an_organisation_'}, {'q': u'How do I request organisation membership?',
'a': u'<p>Registered users have an option to join an organisation during signup. You can also request membership through the organisation’s page. Please keep in mind that you need to work for the organisation in order to click on the ‘Request Membership’ button and a request will be sent to the organisation’s administrator(s). The requestor can not specify the role (i.e., admin, editor or member). Instead, the person receiving the request assigns the role. If you do not see this option displayed on organisation page, the organisation is a closed group and is not accepting new members.</p>\n',
'id': u'How_do_I_request_organisation_membership_'},
{'q': u'How does organisation membership work?',
'a': u'<p>Organisation membership includes three roles:</p>\n<ul>\n<li>Administrators can add, edit and delete datasets belonging to the organisation and accept or refuse new member requests.</li>\n<li>Editors can add, edit and delete datasets belonging to the organisation but cannot manage membership.</li>\n<li>Members can view the organisation’s private datasets, but cannot add new datasets or manage membership.</li>\n</ul>\n<p>The user who requests the creation of an organisation is assigned an administrator role. That person can invite other HDX users into their organisation and assign them one of the three roles above, or registered users on HDX can request membership from the organisation’s administrator(s).</p>\n',
'id': u'How_does_organisation_membership_work_'},
{'q': u'I’m an organisation admin. How do I add/remove members?',
'a': u'<p>Organisation admins can invite new members, remove existing members or change their roles from the ‘Members’ tab on organisation page.</p>\n<p>Registered users can also initiate a request to join your organisation during the signup process or later on from your organisation page(if you want to disable this option, read the question below<a href="https://data.humdata.org/faq#auto-faq-Organisations-I_am_an_organisation_admin__I_don_t_want_anyone_to_request_membership_and_want_to_manually_add_remove_members_-q">‘I am an organisation admin. I don’t want anyone to request membership and want to manually add/remove members.’</a>).</p>\n<p>Membership requests are sent to your email and also added as a notification on HDX. If you can confirm that the user works for your organisation (ie. by using a company directory) or is in your trusted network, then you may approve the request. If you cannot verify who the user is, you should decline the request. Please do not approve membership requests for people outside your organisation or working group. For full details on managing members, please read <a href="https://humanitarian.atlassian.net/wiki/spaces/HDXKB/pages/1254490113/For+HDX+org+admins+How+to+Manage+Organizational+Membership" target="_blank" rel="noopener noreferrer">this document</a>. Please be aware that anyone added to your organisation on HDX can view the organisation’s private datasets.</p>\n',
'id': u'I__8217_m_an_organisation_admin__How_do_I_add_remove_members_'}, {
'q': u'I am an organisation admin. I don’t want anyone to request membership and want to manually add/remove members.',
'a': u'<p>Organisation admins have the option to make the organisation an open or closed group. By default, all organisations are an open group to allow new users to request membership. If you don’t want to allow any member to join your organisation, you can turn off the ‘Allow members’ checkbox under ‘Edit organisation page’. This will make your organization a closed group with existing members. No new member will be able to send a request to join your organization on HDX. The admin(s) of your organization can still manually invite new members, remove existing members or change their roles from the ‘Members’ tab.</p>\n',
'id': u'I_am_an_organisation_admin__I_don__8217_t_want_anyone_to_request_membership_and_want_to_manually_add_remove_members_'},
{'q': u'Can I be part of more than one organisation?',
'a': u'<p>Yes. Registered users can be part of several organisations.</p>\n',
'id': u'Can_I_be_part_of_more_than_one_organisation_'},
{'q': u'I don’t see my organisation. What should I do?',
'a': u'<p>If your organisation is not listed, you can request to create one or you may want to join an existing organisation via your <a href="https://data.humdata.org/dashboard/" target="_blank" rel="noopener noreferrer">dashboard</a>. For instance, there may be a WFP organisation that was created by its staff at headquarters in Rome. You may prefer to join that one rather than creating a separate organisation for a specific location, e.g., WFP Liberia. You can see the full list of organisations by clicking <a href="https://data.humdata.org/organization">Organisations</a> in the main navigation.</p>\n<p>If you have previously created an organisation and no longer see it on the site, this is because you have not yet shared a public dataset. Once you share a dataset, your organisation will become active and visible on the site. For details on how to upload a dataset, see <a href="https://data.humdata.org/faq#auto-faq-Sharing_and_Using_Data-How_do_I_add_a_dataset_-a" target="_blank" rel="noopener noreferrer">“How do I add a dataset?”</a>.</p>\n',
'id': u'I_don__8217_t_see_my_organisation__What_should_I_do_'},
{'q': u'Can an organisation have more than one administrator?',
'a': u'<p>Yes. Each administrator is able to manage datasets and membership. If a user requests membership, the request will be sent to all organisation administrators. The decision to accept or deny a membership request will be taken by whichever administrator acts first. The other administrators are not alerted to this action. We are planning to make this process more clear in future versions of the platform, so please bear with us!</p>\n',
'id': u'Can_an_organisation_have_more_than_one_administrator_'},
{'q': u'How do I create a branded organisation page on HDX?',
'a': u'<p>HDX offers custom organisation pages to all organisations on the site. The page includes the organisation’s logo and colour palette, topline figures, space for a data visualization and the list of datasets. If you would like a custom page, send a request to <a href="mailto:hdx@un.org">hdx@un.org</a>.</p>\n',
'id': u'How_do_I_create_a_branded_organisation_page_on_HDX_'},
{'q': u'How do I use the Group Message feature?',
'a': u'<p>‘Group message’ lets members of an organisation send messages to all other members of their organisation. Please find more details <a href="https://centre.humdata.org/new-features-contact-the-contributor-and-group-message/" target="_blank" rel="noopener noreferrer">here</a>.</p>\n',
'id': u'How_do_I_use_the_Group_Message_feature_'},
{'q': u'I changed my job – what happens to my account?',
'a': u'<p>You can keep your account. On the organisation page that you’re a part of, click the link to ‘Leave this organisation’. If you want to change the e-mail address associated with your account, click on your username on the upper-right corner of any HDX page and then select ‘User Settings’. From there, you can update your profile.</p>\n',
'id': u'I_changed_my_job___8211__what_happens_to_my_account_'}], 'title': u'Organisations'},
{'id': 'faq-Sharing_and_Using_Data', 'questions': [{'q': u'How do I share data on HDX?',
'a': u'<p>Data on HDX is shared through organisations. You need to be a member of an organisation (with appropriate privileges) before you can contribute data. If you have data to share, you can either request to create a new organisation or ask to join an existing one. (See the <a href="https://data.humdata.org/faq#body-faq-Organisations">Organisations section</a> above.)</p>\n<p>There are three ways to share data on HDX:</p>\n<p>Public – Data shared publicly is accessible to all users of the HDX platform, whether or not they are registered. All public data must be shared under an appropriate license. Select the ‘public’ setting in the metadata field when uploading data.</p>\n<p>Private – Organisations can share data privately with their members. The administrator of each organisation controls who can become a member. The default visibility is set to ‘private’ when uploading new data. Once shared, private datasets are only listed on your organisation page (make sure you are logged in to see them). They will not be included in search results or the <a href="https://data.humdata.org/dataset">data list</a> page.To make data accessible to HDX users, the contributing organisation needs to change the visibility to public.</p>\n<p>By Request – Organisations can share the metadata of a dataset and grant access to the underlying data when requested by a registered user. See how to share and request metadata only datasets through <a id="faq-google-embed-link-hdx-connect-2" class="link faq-google-embed-marker"></a>these walkthrough slides.</p>\n<p>Learn more about how HDX handles <a href="https://data.humdata.org/faq#body-faq-Sensitive_Data">sensitive data below</a>.</p>\n',
'id': u'How_do_I_share_data_on_HDX_'}, {
'q': u'What is the difference between a dataset and a resource?',
'a': u'<p>A dataset is a collection of related data resources. A resource is an individual file within a dataset. When sharing data, you first create a dataset and then you can add one or more resources to it. A resource can either be a file uploaded to HDX (such as a CSV or XLS file) or a link to another website with a downloadable file. A resource, such as a readme file, could also contain documentation that helps users to understand the dataset.</p>\n',
'id': u'What_is_the_difference_between_a_dataset_and_a_resource_'},
{'q': u'How do I add a dataset?',
'a': u'<p>Click on the ‘Add Data’ button from any page on HDX. You will be required to login and associate yourself with an organisation. <a id="faq-google-embed-link-1" class="link faq-google-embed-marker"></a>These slides provide a walkthrough of how to add a dataset. General information about all the metadata options in HDX is available in our <a href="https://centre.humdata.org/providing-metadata-for-your-datasets-on-hdx/" target="_blank" rel="noopener noreferrer">Guide to Metadata</a>.</p>\n',
'id': u'How_do_I_add_a_dataset_'},
{'q': u'Can I just share metadata?',
'a': u'<p>Let others know your data is available by publishing your metadata without uploading any file(s) via HDX Connect. Once users request access, you decide what to share.</p>\n<p>This is a good option if:</p>\n<ul>\n<li>You are in the process of collecting data but you are not finished.</li>\n<li>Your data contains personally identifiable information.</li>\n<li>You need to restrict access to your data.</li>\n</ul>\n<p>Learn more about HDX Connect through <a id="faq-google-embed-link-hdx-connect-3" class="link faq-google-embed-marker"></a>these walkthrough slides. <a href="https://centre.humdata.org/a-new-call-to-action-sharing-the-existence-of-data/" target="_blank" rel="noopener noreferrer">Read this blog</a> to understand the research and rationale behind HDX Connect.</p>\n',
'id': u'Can_I_just_share_metadata_'}, {
'q': u'How can I add links and formatting to my dataset page?',
'a': u'<p>There are 4 metadata fields that accept <a href="https://daringfireball.net/projects/markdown/syntax" target="_blank" rel="noopener noreferrer">markdown</a> which provides some simple formatting commands.</p>\n<p>The “description”, “methodology:other”, and “caveats/comments” fields, as well as the description field for each resource attached to the dataset, all accept markdown formatting. The most useful markdown commands are outlined here:</p>\n<p>Links can be entered like this:</p>\n<pre>[the linked text](https://data.humdata.org)</pre>\n<p>and will be rendered like this: <a href="https://data.humdata.org/">the linked text</a><br />\n<i>Italics</i> can be indicated by surrounding text with single asterisks, like this:</p>\n<pre>*A bit of italics text*</pre>\n<p><b>Bold</b> can be indicated by surrounding text with double asterisks, like this:</p>\n<pre>**A bit of bold text**</pre>\n<p>Bulleted lists must start with and be followed by a blank line. Each item in the list starts with an asterisk and a space:</p>\n<p>* item 1<br />\n* item 2<br />\n* etc.</p>\n<p>Numbered lists must also start with and be followed by a blank line. Each item starts with the number 1, a period, and a space:</p>\n<p>1. First item<br />\n1. Second item. Note that the lines always start with a one followed by a period and space.<br />\n1. 3rd item<br />\n1. etc.</p>\n',
'id': u'How_can_I_add_links_and_formatting_to_my_dataset_page_'},
{'q': u'How do I edit a dataset?',
'a': u'<p>You can only edit a dataset if you are an administrator or editor of your organisation. If you have the appropriate role, on the dataset page you will find an ‘Edit’ button just below the dataset title on the right. This will allow you to edit the dataset metadata and the resources. <a id="faq-google-embed-link-2" class="link faq-google-embed-marker"></a>These slides provide a walk-through of how to edit a dataset.</p>\n',
'id': u'How_do_I_edit_a_dataset_'}, {
'q': u'How can I add graphs and key figures to my dataset?',
'a': u'<p>If your data uses the <a href="http://hxlstandard.org/" target="_blank" rel="noopener noreferrer">HXL standard</a>, then HDX can automatically create customizable graphs and key figures to help you highlight the most important aspects of your dataset. We call these ‘Quick Charts’. For a Quick Chart to be generated, your dataset needs to be public and contain a CSV or XLSX resource with HXL tags. HXL is easy! Check out the <a href="http://hxlstandard.org/" target="_blank" rel="noopener noreferrer">30-second tutorial</a>.</p>\n<p>The resource can be stored on HDX or as a remote resource at another URL. Quick Charts will be generated from the first resource with HXL tags in the list of a dataset’s resources. The system will try to generate up to three charts based on the HXL tags, and these can be changed to best tell the story in your data. You can edit each Quick Chart’s title, axis labels, and description. Don’t forget to save the changes so they become the default view that users see when viewing your dataset. Here’s a good <a href="https://data.humdata.org/dataset/madagascar-cyclone-enawo-needs-assessment-data-5-april" target="_blank" rel="noopener noreferrer">example</a> to get you started.</p>\n<p>Learn more about HXL and HDX Tools in the section below.</p>\n',
'id': u'How_can_I_add_graphs_and_key_figures_to_my_dataset_'},
{
'q': u'How can I add data visualizations to my dataset?',
'a': u'<p>Organization admins and editors can add data visualizations to dataset pages to let users explore your data. The data visuals can be made using Tableau, Power BI or whatever software you prefer. The visuals will appear in the “Interactive Data” section at the top of the page.</p>\n<p>Learn how to do this by taking a quick look at <a class="link faq-google-embed-marker" id="faq-google-embed-link-3">these slides</a>.</p>\n<div class="modal presentation-modal" id="faq-google-embed-3" tabindex="-1" role="dialog">\n<div class="modal-dialog" role="document"><button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">\xd7</span></button></p>\n<div class="modal-content"><iframe load-src="https://docs.google.com/presentation/d/e/2PACX-1vS1A1i-fg5PucF0hIRWDc_4_IUC_TXomWho8POCefSYuNHl9wN1SvS3_EM4jOsiWY4XvzZZDzquisgk/embed?start=false&loop=false&delayms=3000" frameborder="0" width="900" height="560" allowfullscreen="true" mozallowfullscreen="true" webkitallowfullscreen="true"></iframe></div>\n</div>\n</div>\n',
'id': u'How_can_I_add_data_visualizations_to_my_dataset_'},
{
'q': u'How can I check errors in my HXL-tagged spreadsheet?',
'a': u'<p>Data Check automatically detects and highlights common humanitarian data errors including validation against <a href="https://public.tableau.com/profile/ocha.field.information.services#!/vizhome/COD-Status_1/DetailedEvaluation" target="_blank" rel="noopener noreferrer">CODs</a> and other vocabularies from your <a href="https://tools.humdata.org/examples/hxl/" target="_blank" rel="noopener noreferrer">HXL-tagged spreadsheet</a>. You can access Data Check from:</p>\n<ol>\n<li>HDX via dataset pages (The “Validate with Data Check” option will appear under “More” button under HXL-tagged resources)</li>\n<li><a href="https://tools.humdata.org/wizard/#datacheck" target="_blank" rel="noopener noreferrer">HDX Tools</a>, for datasets that exist outside of HDX. For this option, you should not use Data Check to process personal or otherwise <a href="https://data.humdata.org/faq#body-faq-Sensitive_Data" target="_blank" rel="noopener noreferrer">sensitive data</a>.</li>\n</ol>\n<p>Data uploaded to HDX Tools is not retained within the HDX infrastructure, while data downloaded by HDX Tools from public URLs is cached only as long as necessary for processing.</p>\n<p>You can access both versions of Data Check without being a registered user of HDX. For instructions on how to use Data Check, review <a id="faq-google-embed-link-data-check" class="link faq-google-embed-marker"></a>these walkthrough slides.</p>\n<p>Data Check uses a generic schema that detects many kinds of common errors like possible spelling mistakes or atypical numeric values, but in some cases, an organisation will want to validate against its own more-specific rules. In that case, you can write your own, custom HXL schema and validate using the <a href="https://proxy.hxlstandard.org/" target="_blank" rel="noopener noreferrer">HXL Proxy</a> (Data Check’s backend engine) directly. Information is available on these pages in the HXL Proxy wiki: <a href="https://github.com/HXLStandard/hxl-proxy/wiki/HXL-schemas" target="_blank" rel="noopener noreferrer">HXL schemas</a>, <a href="https://github.com/HXLStandard/hxl-proxy/wiki/Validation-page" target="_blank" rel="noopener noreferrer">Validation page</a>, and <a href="https://github.com/HXLStandard/hxl-proxy/wiki/Validation-service" target="_blank" rel="noopener noreferrer">Validation service</a>.</p>\n',
'id': u'How_can_I_check_errors_in_my_HXL_tagged_spreadsheet_'},
{
'q': u'What are the recommended data formats?',
'a': u'<p>We define data as information that common software can read and analyse. We encourage contributions in any common data format. HDX has built-in preview support for tabular data in CSV and Microsoft Excel (xls only) formats, and for geographic data in zipped shapefile, kml and geojson formats. If multiple formats are available, each can be added as a resource to the dataset, or if you only wish to add one format, then for tabular data, csv is preferable and for geographic data, zipped shapefile is preferred.</p>\n<p>A PDF file is not data. If you have a data visualization in PDF format, you can add it as a showcase item on the dataset page. If you wish to share documents, graphics, or other types of humanitarian information that are not related to the data you are sharing, please visit our companion sites <a href="http://reliefweb.int/" target="_blank" rel="noopener noreferrer">ReliefWeb</a> and <a href="http://www.humanitarianresponse.info/" target="_blank" rel="noopener noreferrer">HumanitarianResponse</a>. A resource, such as a readme file, could also contain documentation that helps users to understand the dataset.</p>\n',
'id': u'What_are_the_recommended_data_formats_'},
{
'q': u'What are the best practices for managing resources in a dataset?',
'a': u'<p>Resources can be either different formats of the same data (such as XLSX and CSV) or different releases of the same data (such as March, April, and May needs assessments). Always put the resource with the most-recent or most-important information first, because the HDX system will by default use the first resource to create visualisations such as Quick Charts or geographic preview (this default can be overridden in the dataset edit page). </p>\n<p>If you have data that is substantially different, like a different type of assessment or data about a different province, we recommend creating a separate dataset.</p>\n',
'id': u'What_are_the_best_practices_for_managing_resources_in_a_dataset_'},
{
'q': u'What are the recommended best practices for naming datasets and resources?',
'a': u'<p>For datasets: the keywords in your dataset title are matched to the search terms users enter when looking for data in HDX. Avoid using abbreviations in the title that users may not be familiar with. Also avoid using words such as current, latest or previous when referring to the time period (e.g., latest 3W), as these terms become misleading as the dataset ages. The following is a good example of a dataset title: ‘Who is Doing What Where in Afghanistan in Dec 2016’.</p>\n<p>For resources: by default, the resource name is the name of the uploaded file. However, you can change this if needed to make it more clear to users. </p>\n<p>For zipped shapefiles: we recommend the filename be name_of_the_file.shp.zip. However, the system does not require this construction.</p>\n',
'id': u'What_are_the_recommended_best_practices_for_naming_datasets_and_resources_'},
{
'q': u'Is there a limit on file size for the data that I upload?',
'a': u'<p>If your resource is simply a link to a file hosted elsewhere, there is no size limit. If you are uploading a file onto HDX, the file size is limited to 300MB. If you have larger files that you want to share, e-mail us at <a href="mailto:hdx@un.org">hdx@un.org</a>.</p>\n',
'id': u'Is_there_a_limit_on_file_size_for_the_data_that_I_upload_'},
{'q': u'Can I share data hosted elsewhere?',
'a': u'<p>Yes. HDX can host the data for you, but it works equally well with a link to data hosted somewhere else on the web. For example, if your organisation already has a system or API that produces data for download, you can simply include a link to that data as a resource in your dataset, and the version on HDX will automatically stay up to date.</p>\n',
'id': u'Can_I_share_data_hosted_elsewhere_'},
{
'q': u'Can I drag&drop files from my computer?',
'a': u'<p>Yes. HDX allows you to drag and drop files from your computer. First, you need to click on the ‘Add Data’ link and then select files from your computer. Drop the files in the designated area. A new dataset form will appear with some fields already pre-filled.</p>\n',
'id': u'Can_I_drag__038_drop_files_from_my_computer_'},
{
'q': u'How can I share data from my Google Drive?',
'a': u'<p>First you need to be sure that the Google Drive file or files are publicly visible or accessible to anyone who has the link. For instructions on how to change, follow <a id="faq-google-embed-link-4" class="link faq-google-embed-marker"></a>this walkthrough.</p>\n<p>You can click on ‘Add Data’ and choose the option to import files from ‘Google Drive’. A ‘Google Drive’ pop-up will show and help you choose the file/files from your account. The files will not be copied into HDX. Instead, the HDX ‘Download’ button will always direct users to the live version of the Google document.</p>\n<p>The HDX Resource Picker for Google Drive will only have access to your list of Google Drive files when you are choosing Google Drive resources through the HDX interface. You can revoke this permission at any time in <a href="https://security.google.com/settings/security/permissions?pli=1" target="_blank" rel="noopener noreferrer">Google Drive’s App Manager</a>. However, this will not change the visibility of the Google Drive resources already created on HDX.</p>\n',
'id': u'How_can_I_share_data_from_my_Google_Drive_'},
{
'q': u'How do I share a live Google Sheet?',
'a': u'<p>To include a link to a Google Sheet, you must first set the sheet’s sharing permissions so that it is either publicly visible or at least accessible to anyone who has the link. We recommend creating at least two separate resources for each Google Sheet: 1) a link to the sheet itself in the regular Google Drive interface; and 2) a direct-download link to an Excel or CSV version of the sheet, so that users can preview it in HDX. The version in HDX will update automatically as you make changes to the original Google Sheet.</p>\n<p>To obtain the direct download link, select “Publish to the web…” from the “File” menu in Google Sheets, then in the dialog box that opens, under the ‘Link’ tab select your preferred file format (such as Excel or CSV), confirm, and Google Sheets will provide you the link. (Note that this process is not necessary simply for working with HXL-aware tools like Quick Charts, because they can open data directly from the regular Google Sheets link.)</p>\n',
'id': u'How_do_I_share_a_live_Google_Sheet_'},
{
'q': u'How do I share a live spreadsheet from Dropbox?',
'a': u'<p>HDX can live-link to and preview files stored in any Dropbox folder and even preview them if they are in CSV or XLS format. You must login to Dropbox via the web application and navigate to the folder containing the spreadsheet (or other file) that you want to share. Select the file and choose ‘Share link’, following the <a href="https://www.dropbox.com/en/help/167" target="_blank" rel="noopener noreferrer">instructions in the Dropbox help centre</a>. You will then receive a special link that allows anyone to download the file.</p>\n<p>Add that link as a resource to your HDX dataset. When you receive a Dropbox link, it normally looks something like this:<br />\nhttps://www.dropbox.com/etc/etc/your_file_name.csv?dl=0</p>\n<p>For HDX to be able to process and preview your file, you’ll need to change the last ‘0’ to a ‘1’ so that it looks like this:<br />\nhttps://www.dropbox.com/etc/etc/your_file_name.csv?dl=1</p>\n<p>The HDX resource will automatically track any changes you save to the Dropbox file on your own computer. Be careful not to move or rename the file after you share it.</p>\n',
'id': u'How_do_I_share_a_live_spreadsheet_from_Dropbox_'},
{
'q': u'If the dataset date on HDX did not change automatically after updating my remote resource, how do I change it to the correct date?',
'a': u'<p>The data that users download from HDX will always reflect updates made to the remote resource (such as a file on Dropbox or Google Drive). However, the metadata and activity stream will not automatically indicate the updated date of the data. This has to be done manually in HDX by the dataset owner. We are working to improve this functionality, so please bear with us!</p>\n',
'id': u'If_the_dataset_date_on_HDX_did_not_change_automatically_after_updating_my_remote_resource__how_do_I_change_it_to_the_correct_date_'}],
'title': u'Sharing and Using Data'}, {'id': 'faq-Geodata', 'questions': [
{'q': u'How can I generate a map with my geographic data?',
'a': u'<p>The HDX system will attempt to create a map, or geographic preview, from geodata formats that it recognizes. For a geographic preview to be generated, your data needs to be in either a zipped shapefile, kml or geojson format. Ensure that the ‘File type’ field for the resource also has one of the above formats. Pro tip: HDX will automatically add the correct format if the file extension is ‘.shp.zip’, ‘.kml’, or ‘.geojson’. Here are examples of geodata <a href="https://data.humdata.org/dataset/somalia-schools" target="_blank" rel="noopener noreferrer">points</a>, <a href="https://data.humdata.org/dataset/nigeria-water-courses-cod" target="_blank" rel="noopener noreferrer">lines</a>, and <a href="https://data.humdata.org/dataset/health-districts" target="_blank" rel="noopener noreferrer">polygons</a>showing the preview feature.</p>\n<p>The preview feature will continue to work when there are multiple geodata resources in a single dataset (i.e., one HDX dataset with many resources attached). The layers icon in the top-right corner of the map enables users to switch between geodata layers. Here is an <a href="https://data.humdata.org/dataset/nigeria-water-courses-cod" target="_blank" rel="noopener noreferrer">example</a>.</p>\n',
'id': u'How_can_I_generate_a_map_with_my_geographic_data_'},
{'q': u'Why is the geodata preview only working for one layer in my resource?',
'a': u'<p>To generate a map preview, a dataset can have multiple resources but each resource can only include one layer within it. Resources with multiple layers (e.g., multiple shapefiles in a single zip file) are not supported. In this case, the system will only create a preview of the first layer in the resource, however all the layers will still be available in the downloaded file. If you would like all of the layers to display, you need to create a separate resource for each layer.</p>\n',
'id': u'Why_is_the_geodata_preview_only_working_for_one_layer_in_my_resource_'}],
'title': u'Geodata'}, {'id': 'faq-Search',
'questions': [{
'q': u'How does search work on HDX?',
'a': u'<p>Searching for datasets on HDX is done in two ways: by searching for terms that you type into the search bar found at the top of almost every page on HDX, and by filtering a list of search results.</p>\n<p>Entering a search term causes HDX to look for matching terms in the titles, descriptions, locations and tags of a dataset. The resulting list of items can be further refined using the filter options on the left side of the search result. You can filter by location, tag, organisation, license and format as well as filtering for some special classes of datasets (like <a href="https://data.humdata.org/search?ext_hxl=1" target="_blank" rel="noopener noreferrer">datasets with HXL tags</a> or <a href="https://data.humdata.org/search?ext_quickcharts=1" target="_blank" rel="noopener noreferrer">datasets with Quick Charts</a>) in the ‘featured’ filters.</p>\n',
'id': u'How_does_search_work_on_HDX_'},
{
'q': u'How do I find the Common Operational Datasets in HDX?',
'a': u'<p>In 2015, HDX migrated the Common Operational Datasets (CODs) from the COD Registry on HumanitarianResponse.info to HDX. Each of these datasets has a ‘cod’ tag. To limit search results to only CODs, use the ‘CODs’ filter in the filter panel on the left side of the dataset list.You can also find all CODs datasets <a href="https://data.humdata.org/cod" target="_blank" rel="noopener noreferrer">here</a>.</p>\n',
'id': u'How_do_I_find_the_Common_Operational_Datasets_in_HDX_'},
{
'q': u'How do I find a set of high quality datasets for a specific country? (How do I use the Data Grid?)',
'a': u'<p>The Data Grid is a prototype feature to help our users find the most critical and useful data. The Data Grid provides a quick way to find datasets that meet or partially meet the criteria for a set of core data categories, like internally displaced persons and refugee numbers, conflict events, transportation status, food prices, administrative divisions, health facilities, and baseline population. These categories of core data, determined from research with our users, may be customized to meet the needs of specific countries and the evolving data needs of humanitarian response. The small square to the left of the dataset name indicates if the dataset fully (solid blue) or partially (hashed blue and white) meets the criteria for the Data Grid category in which it appears. In the latter case, hovering on a dataset name displays some comments about the limitations of the dataset. Learn more in our <a href="https://centre.humdata.org/introducing-the-hdx-data-grid-a-way-to-find-and-fill-data-gaps/" target="_blank" rel="noopener noreferrer">blog post</a> about it.</p>\n<p>Data Grid is not available for all countries. Here is an <a href="https://data.humdata.org/group/som" target="_blank" rel="noopener noreferrer">overview</a>.</p>\n',
'id': u'How_do_I_find_a_set_of_high_quality_datasets_for_a_specific_country___How_do_I_use_the_Data_Grid__'},
{
'q': u'I shared a private dataset but cannot find it.',
'a': u'<p>Private datasets are only listed on your organisation page and will not be included in search results or the <a href="https://data.humdata.org/dataset" target="_blank" rel="noopener noreferrer">data list</a> page. Please make sure you are logged in to see them.</p>\n',
'id': u'I_shared_a_private_dataset_but_cannot_find_it_'}],
'title': u'Search'},
{'id': 'faq-Metadata_and_Data_Quality', 'questions': [
{'q': u'What metadata do I need to include when sharing data?',
'a': u'<p>All data on HDX must include a minimum set of metadata fields. You can read our <a href="https://centre.humdata.org/providing-metadata-for-your-datasets-on-hdx/" target="_blank" rel="noopener noreferrer">Guide to Metadata</a> to learn more. We encourage data contributors to include as much metadata as possible to make their data easier to understand and use for analysis.</p>\n',
'id': u'What_metadata_do_I_need_to_include_when_sharing_data_'},
{'q': u'How does HDX ensure data quality?',
'a': u'<p>Data quality is important to us, so we manually review every new dataset for relevance, timeliness, interpretability and comparability. We contact data contributors if we have any concerns or suggestions for improvement. You can learn more about our definition of the dimensions of data quality and our quality-assurance processes <a href="https://centre.humdata.org/wp-content/uploads/HDX_Quality_Assurance_Framework_Draft.pdf" target="_blank" rel="noopener noreferrer">here</a>.</p>\n',
'id': u'How_does_HDX_ensure_data_quality_'},
{'q': u'What should I put for expected update frequency?',
'a': u'<p>This metadata field indicates how often you expect the data in your dataset to be updated. It should reflect the frequency with which you believe your data will change. This can be different from how often you check your data. It includes values like “Every day” and “Every year” as well as the following:</p>\n<p> </p>\n<ul>\n<li>Live – for datasets where updates are continuous and ongoing</li>\n<li>As needed – for datasets with an unpredictable, widely varying update frequency</li>\n<li>Never – for datasets with data that will never be changed</li>\n</ul>\n<p>We recommend you choose the nearest less frequent regular value instead of “As needed” or “Never”. This helps with our monitoring of data freshness. For example, if your data will be updated every 1-6 days, pick “Every week”, or if every 2 to 9 weeks, choose “Every three months”.</p>\n',
'id': u'What_should_I_put_for_expected_update_frequency_'},
{'q': u'What does the green leaf symbol mean?',
'a': u'<p>The green leaf symbol indicates that a dataset is up to date – that there has been an update to the data in the dataset (not the dataset metadata) within the expected update frequency plus some leeway. For more information on the expected update frequency metadata field and the number of days a dataset qualifies as being fresh, see <a href="https://humanitarian.atlassian.net/wiki/spaces/HDX/pages/442826919/Expected+Update+Frequency+vs+Freshness+Status" target="_blank" rel="noopener noreferrer">here</a>.</p>\n',
'id': u'What_does_the_green_leaf_symbol_mean_'},
{'q': u'Does HDX make any changes to my dataset?',
'a': u'<p>No. HDX will never make changes to the data that has been shared. We do add tags, or make changes to dataset titles to help make your data more discoverable by HDX users. We may also add a data visualization for the data in the dataset showcase. A list of changes appears in the activity stream on the left-hand column of the dataset page.</p>\n',
'id': u'Does_HDX_make_any_changes_to_my_dataset_'},
{'q': u'What does it mean for a dataset to be ‘under review’?',
'a': u'<p>The HDX team manually reviews every dataset uploaded to the platform as part of a standard quality assurance (QA) process. This process exists to ensure compliance with the <a href="https://data.humdata.org/faqs/terms">HDX Terms of Service</a>, which prohibit the sharing of personal data. It also serves as a means to check different quality criteria, including the completeness of metadata, the relevance of the data to humanitarian action, and the integrity of the data file(s).</p>\n<p>If an issue is found, the resource(s) requiring additional review will be temporarily unavailable for download and marked as ‘under review’ in the dataset page on the public HDX interface.</p>\n',
'id': u'What_does_it_mean_for_a_dataset_to_be___8216_under_review__8217__'}],
'title': u'Metadata and Data Quality'}, {'id': 'faq-Resources_for_Developers', 'questions': [
{'q': u'How do I access the HDX API?',
'a': u'<p>Please see our <a href="https://data.humdata.org/documentation" target="_blank" rel="noopener noreferrer">Resources for Developers</a> page for more information.</p>\n',
'id': u'How_do_I_access_the_HDX_API_'}, {'q': u'Where can I read about coding with HDX?',
'a': u'<p>Please see our <a href="https://data.humdata.org/documentation" target="_blank" rel="noopener noreferrer">Resources for Developers</a> page for more information.</p>\n',
'id': u'Where_can_I_read_about_coding_with_HDX_'}],
'title': u'Resources for Developers'},
{'id': 'faq-HXL_and_HDX_Tools', 'questions': [
{'q': u'What is the Humanitarian Exchange Language?',
'a': u'<p>The Humanitarian Exchange Language (HXL) is a simple standard for messy data. It is based on spreadsheet formats such as CSV or Excel. The standard works by adding hashtags with semantic information in the row between the column header and data allow software to validate, clean, merge and analyse data more easily. To learn more about HXL and who’s currently using it, visit the <a href="http://hxlstandard.org/" target="_blank" rel="noopener noreferrer">HXL standard site</a>.</p>\n<p>HDX is currently adding features to visualise HXL-tagged data. To learn more about HXL and who’s currently using it, visit the <a href="http://hxlstandard.org/" target="_blank" rel="noopener noreferrer">HXL standard site</a>.</p>\n',
'id': u'What_is_the_Humanitarian_Exchange_Language_'}, {'q': u'What are HDX Tools?',
'a': u'<p>HDX Tools include a number of HXL-enabled support processes that help you do more with your data, more quickly. The tools include:</p>\n<ul>\n<li><a href="https://tools.humdata.org/wizard/#quickcharts" target="_blank" rel="noopener noreferrer">Quick Charts</a> – Automatically generate embeddable, live data charts, graphs and key figures from your spreadsheet.</li>\n<li><a href="https://tools.humdata.org/examples/hxl/" target="_blank" rel="noopener noreferrer">HXL Tag Assist</a> – See HXL hashtags in action and add them to your own spreadsheet.</li>\n<li><a href="https://tools.humdata.org/wizard/#datacheck" target="_blank" rel="noopener noreferrer">Data Check</a> – Data cleaning for humanitarian data, automatically detects and highlights common errors including validation against CODs and other vocabularies.</li>\n</ul>\n<p>You can find all HDX Tools through <a href="https://tools.humdata.org/" target="_blank" rel="noopener noreferrer">tools.humdata.org</a>. The tools will work with data that is stored on HDX, the cloud or local machines. The only requirement is that the data includes HXL hashtags.</p>\n',
'id': u'What_are_HDX_Tools_'},
{'q': u'How can I add Quick Charts to my dataset?',
'a': u'<p>If your data uses HXL hashtags, then the Quick Charts tool can automatically create customizable graphs and key figures to help you highlight the most important aspects of your dataset. Quick Charts require the following:</p>\n<ol>\n<li>The first resource in your dataset (stored on HDX or remotely) must have HXL hashtags.</li>\n<li>That dataset must have the HDX category tag ‘HXL’ (not to be confused with the actual HXL hashtags).</li>\n</ol>\n<p>For more details you can view <a class="link faq-google-embed-marker" id="faq-google-embed-link-5">these walkthrough slides</a>.</p>\n<div class="modal presentation-modal" id="faq-google-embed-5" tabindex="-1" role="dialog">\n<div class="modal-dialog" role="document"><button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">\xd7</span></button></p>\n<div class="modal-content"><iframe load-src="https://docs.google.com/presentation/d/e/2PACX-1vR-gSY38muZE9SA27NjAcueKoobhKi_Dc3jN4BIDPTp7FJjOCiWIkhPU4ZkPyHvfR0pBdNpfswmKZ4p/embed?start=false&loop=false&delayms=3000" frameborder="0" width="900" height="560" allowfullscreen="true" mozallowfullscreen="true" webkitallowfullscreen="true"></iframe></div>\n</div>\n</div>\n',
'id': u'How_can_I_add_Quick_Charts_to_my_dataset_'},
{'q': u'How can I add Quick Charts to my own web sites or blogs?',
'a': u'<p>Every Quick Chart on HDX includes a small link icon at the bottom, that will give you HTML markup to copy into a web page or blog to add the chart. The chart will be live, and will update whenever the source data updates. If your data is not on HDX, you can also generate a Quick Chart using the standalone version of the service, available on <a href="https://tools.humdata.org/" target="_blank" rel="noopener noreferrer">https://tools.humdata.org</a>.</p>\n',
'id': u'How_can_I_add_Quick_Charts_to_my_own_web_sites_or_blogs_'},
{'q': u'Why isn’t Quick Charts recognizing the HXL hashtags in my dataset?',
'a': u'<p>At this stage, Quick Charts are working with a limited number of HXL hashtags, but we are constantly expanding the list. The current set of JSON-encoded Quick Charts recipes is available on <a href="https://github.com/OCHA-DAP/hxl-recipes/" target="_blank" rel="noopener noreferrer">GitHub</a>.</p>\n',
'id': u'Why_isn__8217_t_Quick_Charts_recognizing_the_HXL_hashtags_in_my_dataset_'},
{'q': u'How does HXL Tag Assist work?',
'a': u'<p>The <a href="https://tools.humdata.org/examples/hxl/" target="_blank" rel="noopener noreferrer">HXL Tag Assist tool</a> will show you different HXL hashtags in datasets that organisations have already uploaded to HDX. You can find a quick (and portable) list of the core HXL hashtags on the <a href="http://hxlstandard.org/standard/postcards/" target="_blank" rel="noopener noreferrer">HXL Postcard</a>. The detailed list of HXL hashtags and attributes is available in the <a href="http://hxlstandard.org/standard/1_1final/dictionary/" target="_blank" rel="noopener noreferrer">HXL hashtag dictionary</a>. Finally, an up-to-date machine-readable version of the hashtag dictionary is <a href="https://data.humdata.org/dataset/hxl-core-schemas/" target="_blank" rel="noopener noreferrer">available on HDX.</a></p>\n',
'id': u'How_does_HXL_Tag_Assist_work_'}, {'q': u'How does Data Check work?',
'a': u'<p>You can use <a href="https://centre.humdata.org/clean-your-data-with-data-check/" target="_blank" rel="noopener noreferrer">Data Check</a> to compare your HXL-tagged dataset against a collection of validation rules that you can configure. Data Check identifies the errors in your data such as spelling mistakes, incorrect geographical codes, extra whitespace, numerical outliers, and incorrect data types.</p>\n<p>For more details you can view <a class="link faq-google-embed-marker" id="faq-google-embed-link-6">these walkthrough slides</a>.</p>\n<div class="modal presentation-modal" id="faq-google-embed-6" tabindex="-1" role="dialog">\n<div class="modal-dialog" role="document"><button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">\xd7</span></button></p>\n<div class="modal-content"><iframe load-src="https://docs.google.com/presentation/d/e/2PACX-1vQmqK3qgUchHmZ5YQ8M-ktJ0UccIDeBeuqAqjIAbZ2HIXfmZ5OdqFRb7AM1YJI6N1vmimBAbOVa7QMe/embed?start=false&loop=false&delayms=3000" frameborder="0" width="900" height="560" allowfullscreen="true" mozallowfullscreen="true" webkitallowfullscreen="true"></iframe></div>\n</div>\n</div>\n',
'id': u'How_does_Data_Check_work_'}],
'title': u'HXL and HDX Tools'}, {'id': 'faq-Sensitive_Data', 'questions': [
{'q': u'How does HDX define sensitive data?',
'a': u'<p>For the purpose of sharing data through HDX, we have <a href="https://centre.humdata.org/three-ways-to-share-data-on-hdx/" target="_blank" rel="noopener noreferrer">developed the following categories</a> to communicate data sensitivity:</p>\n<ol>\n<li>Non-Sensitive – This includes datasets containing country statistics, roadmaps, weather data and other data with no foreseeable risk associated with sharing.</li>\n<li>Uncertain Sensitivity – For this data, sensitivity depends on a number of factors, including other datasets collected in the same context, what technology is or could be used to extract insights, and the local context from which the data is collected or which will be impacted by use of the data.</li>\n<li>Sensitive – This includes any dataset containing personal data of affected populations or aid workers. Datasets containing demographically identifiable information (DII) or community identifiable information (CII) that can put affected populations or aid workers at risk, are also considered sensitive data. Depending on context, satellite imagery can also fall into this third category of sensitivity.</li>\n</ol>\n',
'id': u'How_does_HDX_define_sensitive_data_'}, {'q': u'Can I share personal data through HDX?',
'a': u'<p>HDX does not allow personal data or personally identifiable information (PII) to be shared in public or private datasets. All data shared through the platform must be sufficiently aggregated or anonymized so as to prevent identification of people or harm to affected people and the humanitarian community. We do allow private datasets to include contact information of aid workers if they have provided consentto the sharing of their data within the organisation. Read more in our <a href="https://data.humdata.org/faqs/terms" target="_blank" rel="noopener noreferrer">Terms of Service</a>.</p>\n',
'id': u'Can_I_share_personal_data_through_HDX_'},
{'q': u'How can I assess and manage the sensitivity of data before sharing on HDX?',
'a': u'<p>The <a href="https://centre.humdata.org/wp-content/uploads/2019/03/OCHA-DR-Guidelines-working-draft-032019.pdf" target="_blank" rel="noopener noreferrer">Working Draft of the OCHA Data Responsibility Guidelines</a> (‘the Guidelines’) helps staff better assess and manage the sensitivity of the data they handle in different crisis contexts. We recommend that HDX users familiarize themselves with the Guidelines.</p>\n<p>Different data can have different levels of sensitivity depending on the context. For example, locations of medical facilities in conflict settings can expose patients and staff to risk of attacks, whereas the same facility location data would likely not be considered sensitive in a natural disaster setting.</p>\n<p>Recognizing this complexity, the Guidelines include an <a href="https://centre.humdata.org/wp-content/uploads/2019/03/image1-768x596.png" target="_blank" rel="noopener noreferrer">Information and Data Sensitivity Classification model</a> to help colleagues assess and manage sensitivity in a standardized way.</p>\n<p>For microdata (survey and needs-assessment data), you can manage the sensitivity level by applying a Statistical Disclosure Control (SDC) process. There are several tools available online to do SDC – we use <a href="http://surveys.worldbank.org/sdcmicro" target="_blank" rel="noopener noreferrer">sdcMicro</a>.</p>\n<p>The Centre has developed a <a href="https://centre.humdata.org/guidance-note-statistical-disclosure-control/" target="_blank" rel="noopener noreferrer">Guidance Note on Statistical Disclosure Control</a> that outlines the steps involved in the SDC process, potential applications for its use, case studies and key actions for humanitarian data practitioners to take when managing sensitive microdata.</p>\n',
'id': u'How_can_I_assess_and_manage_the_sensitivity_of_data_before_sharing_on_HDX_'},
{'q': u'How does HDX assess the sensitivity of data?',
'a': u'<p>HDX endeavors not to allow publicly shared data that includes community identifiable information (CII) or demographically identifiable information (DII) that may put affected people at risk. However, this type of data is more challenging to identify within datasets during our quality assurance process without deeper analysis. In cases where we suspect that survey data may have a high risk of re-identification of affected people, we run an internal statistical disclosure control process using sdcMicro. Data is made private while we run this process. If the risk level is found to be too high for public sharing on HDX given the particular context to which the data relates, HDX will notify the data contributor to determine a course of action.</p>\n',
'id': u'How_does_HDX_assess_the_sensitivity_of_data_'}], 'title': u'Sensitive Data'},
{'id': 'faq-Data_Licenses', 'questions': [{'q': u'What data licences does HDX offer?',
'a': u'<p>HDX promotes the use of licenses developed by the <a href="http://creativecommons.org/" target="_blank" rel="noopener noreferrer">Creative Commons Foundation</a> and the <a href="http://opendatacommons.org/" target="_blank" rel="noopener noreferrer">Open Data Foundation</a>. The main difference between the two classes of licences is that the Creative Commons licences were developed for sharing creative works in general, while the Open Data Commons licences were developed more specifically for sharing databases. See the full list of licences <a href="https://data.humdata.org/faqs/licenses" target="_blank" rel="noopener noreferrer">here</a>.</p>\n',
'id': u'What_data_licences_does_HDX_offer_'}],
'title': u'Data Licenses'}, {'id': 'faq-Contact', 'questions': [
{'q': u'How do I contact the HDX team?',
'a': u'<p>For general enquiries or issues with the site, e-mail <a href="mailto:hdx@un.org">hdx@un.org</a>. You can also reach us on Twitter at <a href="https://twitter.com/humdata" target="_blank" rel="noopener noreferrer">@humdata</a>. Sign up to receive our newsletter <a href="http://humdata.us14.list-manage.com/subscribe?u=ea3f905d50ea939780139789d&id=99796325d1" target="_blank" rel="noopener noreferrer">here</a>.</p>\n',
'id': u'How_do_I_contact_the_HDX_team_'}], 'title': u'Contact'}]}
def mock_documentation_page_content(id):
return {'topics': {'faq-Contact_Us': u'Contact Us', 'faq-Accessing_HDX_by_API': u'Accessing HDX by API',
'faq-Other_HDX_Libraries': u'Other HDX Libraries', 'faq-Tools': u'Tools',
'faq-Coding_with_the_Humanitarian_Exchange_Language': u'Coding with the Humanitarian Exchange Language'},
'faq_data': [
{'id': 'faq-Accessing_HDX_by_API', 'questions': [{'q': u'About the Humanitarian Data Exchange API',
'a': u'<p>This section contains information for developers who want to write code that interacts with the Humanitarian Data Exchange (HDX) and the datasets it contains. Anything that you can do by way of the HDX user interface, you can do programatically by making calls to the API and you can do a lot more. Typical uses of the API might be to script the creation and update of datasets in HDX or to read data for analysis and visualisation.</p>\n',
'id': u'About_the_Humanitarian_Data_Exchange_API'},
{'q': u'Programming Language Support',
'a': u'<p>HDX has a RESTful API largely unchanged from the underlying CKAN API which can be used from any programming language that supports HTTP GET and POST requests. However, the terminology that CKAN uses is a little different to the HDX user interface. Hence, we have developed wrappers for specific languages that harmonise the nomenclature and simplify the interaction with HDX.<br />\nThese APIs allow various operations such as searching, reading and writing dataset metadata, but not the direct querying of data within resources which can point to files or urls and of which there can be more than one per dataset.</p>\n',
'id': u'Programming_Language_Support'},
{'q': u'Python',
'a': u'<p>The recommended way of developing against HDX is to use the <a href="https://github.com/OCHA-DAP/hdx-python-api" target="_blank" rel="noopener noreferrer">HDX Python API</a>. This is a mature library that supports Python 2.7 and 3 with tests that have a high level of code coverage. The major goal of the library is to make pushing and pulling data from HDX as simple as possible for the end user. There are several ways this is achieved. It provides a simple interface that communicates with HDX using the CKAN Python API, a thin wrapper around the CKAN REST API. The HDX objects, such as datasets and resources, are represented by Python classes. This should make the learning curve gentle and enable users to quickly get started with using HDX programmatically. For example, to read a dataset and get its resources, you would simply do:</p>\n<pre><code class="python">from hdx.hdx_configuration import Configuration \r\nfrom hdx.data.dataset import Dataset\r\nConfiguration.create(hdx_site=\'prod\', user_agent=\'A_Quick_Example\', hdx_read_only=True)\'\r\ndataset = Dataset.read_from_hdx(\'reliefweb-crisis-app-data\')\r\nresources = dataset.get_resources()\r\n</code></pre>\n<p>There is <a href="http://ocha-dap.github.io/hdx-python-api/" target="_blank" rel="noopener noreferrer">library API-level documentation</a> available online.<br />\nIf you intend to push data to HDX, then it may be helpful to start with this <a href="https://github.com/OCHA-DAP/hdxscraper-template" target="_blank" rel="noopener noreferrer">scraper template</a> which shows what needs to be done to create datasets on HDX. It should be straightforward to adapt the template for your needs.</p>\n',
'id': u'Python'}, {'q': u'R',
'a': u'<p>If you wish to read data from HDX for analysis in R, then you can use the <a href="https://gitlab.com/dickoa/rhdx" target="_blank" rel="noopener noreferrer">rhdx</a> package. The goal of this package is to provide a simple interface to interact with HDX. Like the Python API, it is a wrapper around the CKAN REST API. rhdx is not yet fully mature and some breaking changes are expected.</p>\n',
'id': u'R'}, {'q': u'REST',
'a': u'<p>If you need to use another language or simply want to examine dataset metadata in detail in your web browser, then you can use <a href="http://docs.ckan.org/en/ckan-2.6.3/api/index.html" target="_blank" rel="noopener noreferrer">CKAN’s RESTful API</a>, a powerful, RPC-style interface that exposes all of CKAN’s core features to clients.</p>\n',
'id': u'REST'}],
'title': u'Accessing HDX by API'}, {'id': 'faq-Coding_with_the_Humanitarian_Exchange_Language',
'questions': [{'q': u'About the Humanitarian Exchange Language',
'a': u'<p>This section contains information for developers who want to write code to process datasets that use the Humanitarian Exchange Language (HXL). HXL is a different kind of data standard, adding hashtags to existing datasets to improve information sharing during a humanitarian crisis without adding extra reporting burdens. HXL has its <a href="http://hxlstandard.org/" target="_blank" rel="noopener noreferrer">own website</a> and of particular interest will be the <a href="http://hxlstandard.org/standard" target="_blank" rel="noopener noreferrer">documentation</a> section.</p>\n',
'id': u'About_the_Humanitarian_Exchange_Language'},
{'q': u'Python',
'a': u'<p>The most well developed HXL library, <a href="https://github.com/HXLStandard/libhxl-python" target="_blank" rel="noopener noreferrer">libhxl-python</a>, is written in Python. The most recent versions support Python 3 only, but there are earlier versions with Python 2.7 support. Features of the library include filtering, validation and the ingestion and generation of various formats. libhxl-python uses an idiom that is familiar from JQuery and other Javascript libraries; for example, to load a dataset, you would use simply</p>\n<pre><code>import hxl \r\nsource = hxl.data(\'http://example.org/dataset.xlsx\')</code></pre>\n<p>As in JQuery, you process the dataset by adding additional steps to the chain. The following example selects every row with the organisation “UNICEF” and removes the column with email addresses:</p>\n<pre><code>source.with_rows(\'#org=UNICEF\').without_columns(\'#contact+email\')</code></pre>\n<p>The library also includes a set of command-line tools for processing HXL data in shell scripts. For example, the following will perform the same operation shown above, without the need to write Python code:</p>\n<pre><code>$ cat dataset.xlsx | hxlselect -q "#org=UNICEF" | hxlcut -x \'#contact+email\'</code></pre>\n<p>There is library <a href="http://hxlstandard.github.io/libhxl-python/" target="_blank" rel="noopener noreferrer">API-level documentation</a> available online.</p>\n',
'id': u'Python'}, {'q': u'Javascript',
'a': u'<p><a href="https://github.com/HXLStandard/libhxl-js" target="_blank" rel="noopener noreferrer">libhxl-js</a> is a library for HXL written in Javascript. It supports high-level filtering and aggregation operations on HXL datasets. Its programming idiom is similar to libhxl-python, but it is smaller and contains fewer filters and no data-validation support.</p>\n',
'id': u'Javascript'},
{'q': u'R',
'a': u'<p>Third party support for R is available via the package <a href="https://github.com/dirkschumacher/rhxl" target="_blank" rel="noopener noreferrer">rhxl</a>. It has basic support for reading HXLated files to make them available for advanced data-processing and analytics inside R.</p>\n',
'id': u'R'}],
'title': u'Coding with the Humanitarian Exchange Language'},
{'id': 'faq-Tools', 'questions': [{'q': u'HDX Tools',
'a': u'<p>HDX provides a <a href="https://tools.humdata.org/" target="_blank" rel="noopener noreferrer">suite of tools</a> that leverage HXLated datasets:</p>\n<ol>\n<li>QuickCharts automatically generates embeddable, live data charts, graphs and key figures from your data. It uses the HXL hashtags to guess the best charts to display, but you can then go in and override with your own <a href="https://github.com/OCHA-DAP/hxl-recipes" target="_blank" rel="noopener noreferrer">preferences</a>.</li>\n<li>HXL Tag Assist allows you to find hashtag examples and definitions, and see how data managers are using the hashtags in their data.</li>\n<li>Data Check provides help with data cleaning for humanitarian data, automatically detecting and highlighting common errors. It includes validation against CODs and other vocabularies.</li>\n</ol>\n',
'id': u'HDX_Tools'}, {'q': u'HXL Proxy',
'a': u'<p>The <a href="https://proxy.hxlstandard.org/" target="_blank" rel="noopener noreferrer">HXL Proxy</a> is a tool for validating, cleaning, transforming, and visualising HXL-tagged data. You supply an input url pointing to a tabular or JSON dataset and then create a recipe that contains a series of steps for transforming the data. The result is a download link that you can share and use in HDX, and the output will update automatically whenever the source dataset changes. Full user documentation is available in the <a href="https://github.com/HXLStandard/hxl-proxy/wiki" target="_blank" rel="noopener noreferrer">HXL Proxy wiki</a>.<br />\nThe HXL Proxy is primarily a web wrapper around the libhxl-python library (see above), and makes the same functionality available via <a href="https://en.wikipedia.org/wiki/Representational_state_transfer" target="_blank" rel="noopener noreferrer">RESTful</a> web calls.</p>\n',
'id': u'HXL_Proxy'}], 'title': u'Tools'},
{'id': 'faq-Other_HDX_Libraries', 'questions': [{'q': u'HDX Python Country',
'a': u'<p>Humanitarian projects frequently require handling countries, locations and regions in particular dealing with inconsistent country naming between different data sources and different coding standards like ISO3 and M49. The <a href="https://github.com/OCHA-DAP/hdx-python-country" target="_blank" rel="noopener noreferrer">HDX Python Country</a> library was created to fulfill these requirements and is a dependency of the HDX Python API. It is also very useful as a standalone library and has <a href="https://ocha-dap.github.io/hdx-python-country/" target="_blank" rel="noopener noreferrer">library API-level documentation</a>available online.</p>\n',
'id': u'HDX_Python_Country'},
{'q': u'HDX Python Utilities',
'a': u'<p>All kinds of utility functions have been coded over time for use internally, so since we think these have value externally, it was decided that they should be packaged into the <a href="https://github.com/OCHA-DAP/hdx-python-utilities" target="_blank" rel="noopener noreferrer">HDX Python Utilities</a> library which has library API-level documentation available online.</p>\n',
'id': u'HDX_Python_Utilities'}],
'title': u'Other HDX Libraries'}, {'id': 'faq-Contact_Us', 'questions': [
{'q': u'How do I contact the HDX team?',
'a': u'<p>If you have any questions about these resources, we will do our best to answer them. We would also love to hear about how you are using them for your work.</p>\n<p>Please contact us at: <a href="mailto:hdx@un.org">hdx@un.org</a>. Sign up to receive our <a href="http://humdata.us14.list-manage1.com/subscribe?u=ea3f905d50ea939780139789d&id=99796325d1" target="_blank" rel="noopener noreferrer">newsletter here</a>.</p>\n',
'id': u'How_do_I_contact_the_HDX_team_'}], 'title': u'Contact Us'}]}
def mock_data_responsability_page_content(id):
return {'topics': {'faq-_Data_Responsibility_COVID_19_Content_': u'[Data Responsibility COVID-19 Content]'},
'faq_data': [{'id': 'faq-_Data_Responsibility_COVID_19_Content_', 'questions': [{
'q': u'What are some basic health data management precautions that all organizations should take in the COVID-19 response?',
'a': u'<p>The World Health Organization recommends the following measures to ensure the ethical and secure use of data:</p>\n<ol>\n<li>Use anonymization and other tools as appropriate.</li>\n<li>Comply with informed consent agreements where such consent is needed and respect assurances about ways in which the data (anonymized or otherwise) would be used, shared, stored or protected.</li>\n<li>Adopt appropriate security measures to foster public trust.</li>\n<li>Any platforms established to share data should have an explicit ethical framework governing data collection and use.</li>\n</ol>\n<p>In addition, consider the following:</p>\n<ol>\n<li>Ensure adequate de-identification of data within health data management activities. Consult relevant guidance to determine which tool is most appropriate for de-identification of the type of data you’re handling. When using digital (communication) technologies in healthcare, data protection is paramount. Determine which tools are used by healthcare professionals and only use tools that allow for the appropriate level of encryption.</li>\n<li>Clearly define the purpose of data management, measures for data minimisation and limitation of data retention, and the specific roles and responsibilities of different stakeholders throughout the data management process. This should include a clear overview of which parties are responsible for safeguarding data at different stages.</li>\n<li>When sharing data with specific recipients, be transparent regarding the appropriate use of the data, and make sure this is compatible with the original purpose for which the data was collected.</li>\n<li>Data can be vulnerable to interception at points of transfer between different organizations. Additionally, data may be misused intentionally or unintentionally after the transfer. Select the right method and tool for transfer, and to stipulate the licence or terms under which data may be used in a clear manner (see <a href="https://data.humdata.org/faq-data-responsibility-covid-19#auto-faq-Section_1-What_are_the_different_licenses_available_for_data_sharing_and_what_do_they_cover_-q">“What are the different licenses available for data sharing and what do they cover?”</a> for more information on this point).</li>\n</ol>\n<p>Following these best practices will help ensure responsible data management in the COVID-19 response.</p>\n',
'id': u'What_are_some_basic_health_data_management_precautions_that_all_organizations_should_take_in_the_COVID_19_response_'},
{
'q': u'What constitutes sensitive data generally and in the health sector specifically?',
'a': u'<p>Your organization may have standard definitions for data sensitivity included in a data policy or elsewhere. Data sensitivity definitions may also be found in applicable privacy or data protection legislation. In the absence of such guidance, any data that may put certain individuals, groups or organizations at risk of harm in a particular context should be considered sensitive. While personal data can categorically be considered sensitive, more nuanced issues arise for non-personal data. For example, locations of medical facilities in conflict settings can expose patients and staff to risk, while the same data would not necessarily be considered sensitive in a natural disaster response context.</p>\n<p>In the health sector specifically, all identifiable data concerning health, factors influencing health (for example, cultural and socio-economic details) and the history of individuals are sensitive and must be handled with care and professionalism. In addition, any data (identifiable or not) that can be voluntarily or involuntarily misused against the interests of patients, potential patients, their family, groups or communities and/or health service providers or other humanitarian organizations and their staff, or put any of them at risk for political reasons, financial gain or any other reasons shall be treated as “highly sensitive” data. Even some seemingly non-sensitive data can be highly sensitive in certain contexts (for example, details of cholera outbreaks). Finally, the metadata generated as a ‘byproduct’ of data management can create a distinct set of risks, which should not be overlooked. For more information on the risks associated with metadata, see <a href="https://www.icrc.org/en/document/digital-trails-could-endanger-people-receiving-humanitarian-aid-icrc-and-privacy" target="_blank" rel="noopener noreferrer">https://www.icrc.org/en/document/digital-trails-could-endanger-people-receiving-humanitarian-aid-icrc-and-privacy</a></p>\n',
'id': u'What_constitutes_sensitive_data_generally_and_in_the_health_sector_specifically_'},
{
'q': u'What are some common types of sensitive data in the COVID-19 response?',
'a': u'<p>In the COVID-19 response, the following common data types may be considered sensitive and should be treated with care:</p>\n<ol>\n<li>any directly identifiable data (such as datasets containing names or telephone numbers)</li>\n<li>any indirectly identifiable data (such as survey results or call detail records that have not been appropriately anonymized)</li>\n<li>non-identifiable data on sensitive topics, including but not limited to aggregated and/or anonymized data onviolence related injuries; rape; termination of pregnancy, and; patients in prisons or detention centers;</li>\n<li>information on the disease in a context where there is an obligation to abide by treatment or other related measures, such as quarantine;</li>\n<li>non-identifiable data which reveals or implies racial or ethnic origin, political opinions, religious or philosophical beliefs, offences or sex life or preferences.</li>\n</ol>\n<p>Assessing the sensitivity of data requires a clear understanding of the context and the different ways in which data may lead to harm. Data Sensitivity Classifications such as <a href="https://docs.google.com/document/d/1FYI9n2NcQAUTC-0XlQ5drPcYwfY_OXPRvaU0KMuMzHQ/edit" target="_blank" rel="noopener noreferrer">this example</a> (from the working draft <a href="https://centre.humdata.org/wp-content/uploads/2019/03/OCHA-DR-Guidelines-working-draft-032019.pdf" target="_blank" rel="noopener noreferrer">OCHA Data Responsibility Guidelines</a>) can help humanitarian organizations consistently assess and manage data sensitivity in different environments.</p>\n<p>These classifications can be developed at the country level and/or at the sector/cluster level where necessary (e.g. the health cluster may wish to establish a sensitivity classification specific to data required for COVID-19 response interventions in certain contexts). Humanitarians operating at the National or Sub-National level are encouraged to engage with the appropriate partners and coordinating bodies to ensure data management is conducted according to relevant standards for IM services in public health. This includes aligning with existing context-specific data sensitivity classifications.</p>\n',
'id': u'What_are_some_common_types_of_sensitive_data_in_the_COVID_19_response_'},
{
'q': u'What are the key measures I should take to ensure privacy and data protection in data management?',
'a': u'<p>Data management in the COVID-19 response should be principled and follow existing best practice in humanitarian data management. Some key measures for upholding privacy and data protection include:</p>\n<ol>\n<li><strong>Purpose limitation</strong>: clearly specify the purpose for which data is needed, explain this to the populations from whom data will be collected, and establish safeguards to ensure that data is used only for the intended purpose.</li>\n<li><strong>Privacy by design</strong>: anticipate and build-in technical and procedural measures to prevent privacy invasive events at the outset of a data management exercise.</li>\n<li><strong>Transparency</strong>: provide accurate and complete information to people about what data is being collected about them, for what purpose, how it will be used, how long it will be kept and who it will be shared with</li>\n<li><strong>Necessity and proportionality</strong>: only collect data that is relevant and necessary to achieve the purpose specified, thereby abiding by the principle of data minimisation.</li>\n<li><strong>Time limitations</strong>: ensure that any data processing is strictly limited in time and that data collected for COVID-19 response efforts is not retained beyond the time for which they are strictly needed to combat the pandemic.</li>\n</ol>\n<p>For additional resources and examples of best practice on data protection and privacy in the COVID-19 response, see this repository from UN Global Pulse: <a href="https://www.unglobalpulse.org/policy/covid-19-data-protection-and-privacy-resources/" target="_blank" rel="noopener noreferrer">https://www.unglobalpulse.org/policy/covid-19-data-protection-and-privacy-resources/</a></p>\n<p>For detailed recommendations on data privacy, data protection, and responsible data management in digital contact tracing, see this recent working paper from UNICEF: <a href="https://www.unicef-irc.org/publications/1096-digital-contact-tracing-surveillance-covid-19-response-child-specific-issues-iwp.html" target="_blank" rel="noopener noreferrer">https://www.unicef-irc.org/publications/1096-digital-contact-tracing-surveillance-covid-19-response-child-specific-issues-iwp.html</a></p>\n',
'id': u'What_are_the_key_measures_I_should_take_to_ensure_privacy_and_data_protection_in_data_management_'},
{
'q': u'What measures can I take to reduce the risk of re-identification of individuals and groups before publishing data?',
'a': u'<p>Data on the characteristics of units of a population (e.g. individuals, households or establishments) collected by a census, survey or experiment is referred to in statistics as ‘microdata’. In humanitarian response, this type of data is gathered through exercises such as household surveys, needs assessments, and other programme monitoring activities. Such data make up an increasingly significant volume of data in the humanitarian sector, and will play a key role in the COVID-19 response.</p>\n<p>In its raw form, microdata can contain both personal data and non-personal data on a range of topics. Most humanitarian organisations acknowledge the sensitivity of personal data such as names, biometric data, or ID numbers and anonymise data sets accordingly as a matter of standard practice. However, it is often still possible to re-identify individual respondents or groups by combining answers to different questions, even after such ‘anonymisation’ is applied.</p>\n<p>Depending on the type of data you’re managing, there are various tools available to determine and reduce the risk of re-identification in the data. For microdata, one such approach is Statistical Disclosure Control (SDC).</p>\n<p>SDC is a technique used to assess and lower the risk of a person or organization being re-identified from the analysis of microdata (data on the characteristics of a population). The purpose of applying disclosure control to microdata is to be able to share the data more widely in a responsible manner. An SDC process can lower the risk of re-identification to an acceptable level but the risk threshold may vary depending on the context to which the data relates. There are a variety of free and open source tools available for conducting SDC, including <a href="https://ihsn.org/software/disclosure-control-toolbox" target="_blank" rel="noopener noreferrer">sdcMicro</a>. Read this <a href="https://centre.humdata.org/wp-content/uploads/2019/07/guidance_note_sdc.pdf" target="_blank" rel="noopener noreferrer">guidance note</a> from the Centre for Humanitarian Data for more information on how to start using SDC.</p>\n',
'id': u'What_measures_can_I_take_to_reduce_the_risk_of_re_identification_of_individuals_and_groups_before_publishing_data_'},
{
'q': u'What are the existing standards for surveillance and case definition and reporting?',
'a': u'<p>The World Health Organization has published <a href="https://www.who.int/emergencies/diseases/novel-coronavirus-2019/technical-guidance/surveillance-and-case-definitions" target="_blank" rel="noopener noreferrer">technical guidance on surveillance and case definitions for COVID-19</a>. This guidance includes resources for use in case-based reporting — including a Case-based reporting form, a Data dictionary for case-based reporting form, and Template for Line list for case-based reporting — as well as aggregated reporting, including an Aggregated weekly reporting form.</p>\n',
'id': u'What_are_the_existing_standards_for_surveillance_and_case_definition_and_reporting_'},
{
'q': u'Where can I find the latest data about the ongoing COVID-19 emergency?',
'a': u'<p>The World Health Organization maintains a real-time dashboard providing an overview of the COVID-19 situation here <a href="https://who.sprinklr.com/" target="_blank" rel="noopener noreferrer">https://experience.arcgis.com/experience/685d0ace521648f8a5beeeee1b9125cd</a></p>\n<p>Their data is updated live and can be accessed here: <a href="https://data.humdata.org/dataset/coronavirus-covid-19-cases-and-deaths" target="_blank" rel="noopener noreferrer">https://data.humdata.org/dataset/coronavirus-covid-19-cases-and-deaths</a></p>\n<p>A number of humanitarian organizations are publishing data about different aspects of the global and country-level response to COVID-19. Many of these resources are available in a dedicated <a href="https://data.humdata.org/event/covid-19">COVID-19 crisis page on the Humanitarian Data Exchange</a>.</p>\n<p>Many national health authorities also provide updates on a daily basis. Visit your national health authority’s website for more information.</p>\n',
'id': u'Where_can_I_find_the_latest_data_about_the_ongoing_COVID_19_emergency_'},
{
'q': u'How can I determine the most appropriate method and/or tool for sharing or otherwise transferring data in a secure way?',
'a': u'<p>Consult the relevant guidance (such as a data policy or specific protocols for a given data management activity) or focal point within your organization to see which methods and tools are considered appropriate for the secure transfer of (sensitive) data. In general, a secure method or tool will enable encryption of the data in transit and at rest, offer secure authentication functionality and access restrictions, among other security features. For example, most email service providers allow you to turn on encryption of emails and their attachments.</p>\n',
'id': u'How_can_I_determine_the_most_appropriate_method_and_or_tool_for_sharing_or_otherwise_transferring_data_in_a_secure_way_'},
{
'q': u'What are the different licenses available for data sharing and what do they cover?',
'a': u'<p>Licenses stipulate the terms under which data is shared. This means that a license will describe how data may be used and shared further, as well as any attribution to the original source that should take place. A list of commonly used licenses is available here: <a href="https://data.humdata.org/faqs/licenses">https://data.humdata.org/faqs/licenses</a></p>\n',
'id': u'What_are_the_different_licenses_available_for_data_sharing_and_what_do_they_cover_'},
{
'q': u'How can my organization ensure responsible data practice when developing or using models in the COVID-19 response?',
'a': u'<p>Epidemic models are an essential tool in the hands of governments and policy makers for planning and responding to COVID-19. This crisis shows how predictive analytics can inform and maximise the impact of interventions, especially in resource-limited contexts. It also shows the importance of having models that are validated and ready to be deployed right before or at the beginning of a crisis.</p>\n<p>Unfortunately, translating the outputs of predictive models into timely and appropriate responses in the humanitarian sector remains a challenge for several reasons:</p>\n<ol>\n<li>First, there is no common standard for documenting predictive models and their intended use which highlights the critical aspects for the application of models in the humanitarian sector.</li>\n<li>Second, there is no common standard or mechanism for assessing the technical rigor and operational readiness of predictive models in the sector.</li>\n<li>Third, the development of predictive models is often led by technical specialists who may not consider important ethical concerns that the application of models in humanitarian contexts may entail.</li>\n</ol>\n<p>One approach for addressing these challenges is to submit models for peer review. The Centre for Humanitarian Data recently published an updated version of its <a href="https://centre.humdata.org/wp-content/uploads/2020/03/peer-review-framework-2020.pdf" target="_blank" rel="noopener noreferrer">Peer Review Framework for Predictive Analytics in Humanitarian Response</a>. The Framework aims to create standards and processes for the use of models in our sector. It is based on research with experts and stakeholders across a range of organizations that design and use predictive models. The Framework also draws on best practices from academia and the private sector.</p>\n',
'id': u'How_can_my_organization_ensure_responsible_data_practice_when_developing_or_using_models_in_the_COVID_19_response_'},
{
'q': u'What policies and guidelines currently exist to inform the management of data in public health emergencies?',
'a': u'<p>Many individual organizations have policies and guidelines specific to the safe, ethical, and effective management of different types of data. Institutional policies on personal data protection are particularly relevant to the responsible management of health data and should serve as a primary reference for staff in the COVID-19 response.</p>\n<p>In addition, many national and regional authorities have included provisions specific to health data management in national and regional data protection legislation and other relevant regulatory frameworks. National laws on medical practice may also include specific rules on health data management. Consult a local legal professional to ensure you are aware of and abide by all applicable data protection laws.</p>\n<p>The World Health Organization <a href="https://www.who.int/wer/2016/wer9118.pdf?ua=1" target="_blank" rel="noopener noreferrer">Policy statement on data sharing by WHO in the context of public health emergencies (as of 13 April 2016)</a> and <a href="https://www.who.int/medicines/publications/pharmprep/WHO_TRS_996_annex05.pdf?ua=1%22" target="_blank" rel="noopener noreferrer">Guidance on good data and record management practices</a> are the primary global frameworks of reference for the management of data in public health emergencies.</p>\n<p>The Global Health Cluster <a href="https://www.who.int/health-cluster/resources/publications/Final-PHIS-Standards.pdf" target="_blank" rel="noopener noreferrer">Standards for Public Health Information Services in Activated Health Clusters and other Humanitarian Health Coordination Mechanisms</a> should also serve as a key reference for humanitarian practitioners. Although this document refers to Public Health Information Services (PHIS) in activated health clusters (HCs), these PHIS Standards are by no means restricted to health clusters, and can be applied to support government led emergency coordination or other types of humanitarian sectoral coordination mechanisms.</p>\n<p>The WHO <a href="https://www.who.int/publishing/datapolicy/Policy_data_sharing_non_emergency_final.pdf" target="_blank" rel="noopener noreferrer">‘Policy on the use and sharing of data collected in Member States by the WHO, outside the context of public health emergencies’</a> contains <a href="https://www.who.int/about/who-we-are/publishing-policies/data-policy" target="_blank" rel="noopener noreferrer">extensive annexes on security, safeguards, ethics and guidance</a> on implementation and may also serve as a helpful reference. However, the policy excludes data shared in the context of public health emergencies, including Public Health Emergencies of International Concern (such as the COVID-19 pandemic) and data and reports from clinical trials and biological samples, and data collected by WHO prior to policy implementation.</p>\n<p>While there are a number of different sets of principles related to the responsible management of data in public health, international development and humanitarian action, the most directly relevant here are the <a href="https://www.go-fair.org/fair-principles/" target="_blank" rel="noopener noreferrer">FAIR data principles</a> and the <a href="https://www.unsystem.org/personal-data-protection-and-privacy-principles" target="_blank" rel="noopener noreferrer">United Nations Privacy Policy Group Personal Data Protection and Privacy Principles</a>.</p>\n<p>When data is used for purposes other than informing the response (e.g. research), additional frameworks and principles may apply. Researchers should refer to the <a href="https://www.who.int/about/ethics/code-of-conduct-for-responsible-research" target="_blank" rel="noopener noreferrer">WHO Code of Conduct for responsible Research</a>, which provides standards of good practice to guide individuals working on all research associated with WHO, including non-clinical research, in line with the principles of integrity, accountability, independence/impartiality, respect and professional commitment described in <a href="https://www.who.int/about/ethics/code_of_ethics_full_version.pdf?ua=1" target="_blank" rel="noopener noreferrer">WHO’s Code of Ethics and Professional Conduct</a>.</p>\n',
'id': u'What_policies_and_guidelines_currently_exist_to_inform_the_management_of_data_in_public_health_emergencies_'},
{
'q': u'Where can I learn more about data responsibility in humanitarian situations and in public health programming?',
'a': u'<p>Data responsibility entails a set of principles, processes and tools that support the safe, ethical and effective management of data in humanitarian response. This includes data privacy, protection, and security, as well as other practical measures to mitigate risk and prevent harm.</p>\n<p>There is a wealth of guidance available on how to responsibly manage data in public health emergencies and in humanitarian action more generally that should inform data management in the COVID-19 response. The following resources provide additional information and guidance on the safe, ethical, and effective management of data in humanitarian action:</p>\n<ol>\n<li><a href="https://www.accessnow.org/cms/assets/uploads/2020/03/Access-Now-recommendations-on-Covid-and-data-protection-and-privacy.pdf" target="_blank" rel="noopener noreferrer">Recommendations on privacy and data protection in the fight against COVID-19 (Access Now)</a></li>\n<li><a href="https://www.icrc.org/en/data-protection-humanitarian-action-handbook" target="_blank" rel="noopener noreferrer">Handbook on Data Protection in Humanitarian Action (ICRC and Brussels Privacy Hub)</a></li>\n<li><a href="https://centre.humdata.org/wp-content/uploads/2019/03/OCHA-DR-Guidelines-working-draft-032019.pdf" target="_blank" rel="noopener noreferrer">Working Draft Data Responsibility Guidelines (OCHA Centre for Humanitarian Data)</a></li>\n<li><a href="https://www.measureevaluation.org/resources/publications/ms-17-125a" target="_blank" rel="noopener noreferrer">mHealth Data Security, Privacy, and Confidentiality: Guidelines for Program Implementers and Policymakers</a></li>\n</ol>\n<p>For a broad range of resources related to data responsibility in development and humanitarian work, consult the <a href="https://docs.google.com/document/d/1Fa2QHusD5iJ8Woi8s7-SMFItAufKv4U5UR-PZ1szMNU/edit#heading=h.k5ayqcygtlml" target="_blank" rel="noopener noreferrer">Responsible Data Resource List</a> maintained by MERL Tech and the Engine Room.</p>\n',
'id': u'Where_can_I_learn_more_about_data_responsibility_in_humanitarian_situations_and_in_public_health_programming_'},
{
'q': u'How can my organization practice data responsibility when working and meeting remotely?',
'a': u'<p>Recent changes to working conditions have increased the use of online conferencing tools throughout the humanitarian sector. These conferencing technologies are invaluable when face-to-face meetings are not possible, but they also pose a significant information security and data protection risk when not used responsibly. Some steps for reducing these risks include:</p>\n<ol>\n<li>Familiarizing yourself with your organization’s approved online conferencing tools, their features and settings</li>\n<li>Using only online conferencing tools that are approved, configured and verified as secure by your organization</li>\n<li>Using a unique access code so that only those with the code for that meeting can access the room, particularly when a sensitive topic is being discussed</li>\n<li>Monitoring the dashboard of participants to ensure no uninvited parties are attending throughout the call</li>\n</ol>\n<p>For more information and additional recommendations, see <a href="https://centre.humdata.org/wp-content/uploads/2020/04/4459_002_Tip-Sheet-Responsible-Use-of-Online-Conferencing-Tools_WEB_1.pdf" target="_blank" rel="noopener noreferrer">this tip sheet</a> developed by the ICRC, IFRC and the Centre for Humanitarian Data on the responsible use of online conferencing tools</p>\n',
'id': u'How_can_my_organization_practice_data_responsibility_when_working_and_meeting_remotely_'}],
'title': u'[Data Responsibility COVID-19 Content]'}]}
def mock_faqs_license_page_content(id):
return {
'faq_data': [{'id': 'faq-_Data_Licenses_Content_', 'questions': [{'q': u'Creative Commons Attribution for Intergovernmental Organisations (CC BY-IGO)', 'a': u'<style></style><p>Under the CC BY-IGO license, you are free to share (copy and redistribute the material in any medium or format) and or adapt (remix, transform, and build upon the material) for any purpose, even commercially. The licensor cannot revoke these freedoms as long as you follow the license terms. The license terms are that you must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. Additionally, you may not apply legal terms or technological measures that legally restrict others from doing anything the license permits. When the Licensor is an intergovernmental organization, disputes will be resolved by mediation and arbitration unless otherwise agreed.</p>\n<p><span class="sspRegular12">[ more information: <a class="info-item-name" href="https://creativecommons.org/licenses/by/3.0/igo/" target="_blank" rel="noopener noreferrer">deed</a> | <a class="info-item-name" href="https://creativecommons.org/licenses/by/3.0/igo/legalcode" target="_blank" rel="noopener noreferrer">license</a>]</span></p>\n<div class=\'ewd-ufaq-faq-custom-fields\'>\n\n\t\n</div>', 'id': u'Creative_Commons_Attribution_for_Intergovernmental_Organisations__CC_BY_IGO_'}, {'q': u'Creative Commons Attribution International(CC BY)', 'a': u'<style></style><p>Under the CC BY license, you are free to share (copy and redistribute the material in any medium or format) and or adapt (remix, transform, and build upon the material) for any purpose, even commercially. The licensor cannot revoke these freedoms as long as you follow the license terms. The license terms are that you must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. Additionally, you may not apply legal terms or technological measures that legally restrict others from doing anything the license permits.</p>\n<p><span class="sspRegular12">[ more information: <a class="info-item-name" href="https://creativecommons.org/licenses/by/4.0" target="_blank" rel="noopener noreferrer">deed</a> | <a class="info-item-name" href="https://creativecommons.org/licenses/by/4.0/legalcode" target="_blank" rel="noopener noreferrer">license</a>]</span></p>\n<div class=\'ewd-ufaq-faq-custom-fields\'>\n\n\t\n</div>', 'id': u'Creative_Commons_Attribution_International_CC_BY_'}, {'q': u'Creative Commons Attribution-ShareAlike (CC BY-SA)', 'a': u'<style></style><p>Under the CC BY-SA license, you are free to share (copy and redistribute the material in any medium or format) and or adapt (remix, transform, and build upon the material) for any purpose, even commercially. The licensor cannot revoke these freedoms as long as you follow the license terms. The license terms are that you must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. If you remix, transform, or build upon the material, you must distribute your contributions under the same license as the original. Additionally, you may not apply legal terms or technological measures that legally restrict others from doing anything the license permits.</p>\n<p><span class="sspRegular12">[ more information: <a class="info-item-name" href="https://creativecommons.org/licenses/by-sa/4.0" target="_blank" rel="noopener noreferrer">deed</a> | <a class="info-item-name" href="https://creativecommons.org/licenses/by-sa/4.0/legalcode" target="_blank" rel="noopener noreferrer">license</a>]</span></p>\n<div class=\'ewd-ufaq-faq-custom-fields\'>\n\n\t\n</div>', 'id': u'Creative_Commons_Attribution_ShareAlike__CC_BY_SA_'}, {'q': u'Open Database License (ODC-ODbL)', 'a': u'<style></style><p>Under the ODC-ODbL license, you are free:</p>\n<ul>\n<li>To Share: To copy, distribute and use the database.</li>\n<li>To Create: To produce works from the database.</li>\n<li>To Adapt: To modify, transform and build upon the database.</li>\n</ul>\n<p>As long as you:</p>\n<ul>\n<li>Attribute: You must attribute any public use of the database, or works produced from the database, in the manner specified in the ODbL. For any use or redistribution of the database, or works produced from it, you must make clear to others the license of the database and keep intact any notices on the original database.</li>\n<li>Share-Alike: If you publicly use any adapted version of this database, or works produced from an adapted database, you must also offer that adapted database under the ODbL.</li>\n<li>Keep open: If you redistribute the database, or an adapted version of it, then you may use technological measures that restrict the work (such as DRM) as long as you also redistribute a version without such measures.</li>\n</ul>\n<p><span class="sspRegular12">[ more information: <a class="info-item-name" href="https://opendatacommons.org/licenses/odbl/summary/" target="_blank" rel="noopener noreferrer">deed</a> | <a class="info-item-name" href="https://opendatacommons.org/licenses/odbl/1.0/" target="_blank" rel="noopener noreferrer">license</a>]</span></p>\n<div class=\'ewd-ufaq-faq-custom-fields\'>\n\n\t\n</div>', 'id': u'Open_Database_License__ODC_ODbL_'}, {'q': u'Open Data Commons Attribution License (ODC-BY)', 'a': u'<style></style><p>Under the ODC-BY license, you are free:</p>\n<ul>\n<li>To Share: To copy, distribute and use the database.</li>\n<li>To Create: To produce works from the database.</li>\n<li>To Adapt: To modify, transform and build upon the database.</li>\n</ul>\n<p>As long as you:</p>\n<ul>\n<li>Attribute: You must attribute any public use of the database, or works produced from the database, in the manner specified in the license. For any use or redistribution of the database, or works produced from it, you must make clear to others the license of the database and keep intact any notices on the original database.</li>\n</ul>\n<p><span class="sspRegular12">[ more information: <a class="info-item-name" href="https://opendatacommons.org/licenses/by/summary/" target="_blank" rel="noopener noreferrer">deed</a> | <a class="info-item-name" href="https://opendatacommons.org/licenses/by/1.0/" target="_blank" rel="noopener noreferrer">license</a>]</span></p>\n<div class=\'ewd-ufaq-faq-custom-fields\'>\n\n\t\n</div>', 'id': u'Open_Data_Commons_Attribution_License__ODC_BY_'}, {'q': u'Open Data Commons Public Domain Dedication and License (PDDL)', 'a': u'<style></style><p>Under the ODC-PDDL license, You are free:</p>\n<ul>\n<li>To Share: To copy, distribute and use the database.</li>\n<li>To Create: To produce works from the database.</li>\n<li>To Adapt: To modify, transform and build upon the database.</li>\n</ul>\n<p>As long as you:</p>\n<ul>\n<li>Blank: This section is intentionally left blank. The PDDL imposes no restrictions on your use of the PDDL licensed database.</li>\n</ul>\n<p><span class="sspRegular12">[ more information: <a class="info-item-name" href="https://opendatacommons.org/licenses/pddl/summary/" target="_blank" rel="noopener noreferrer">deed</a> | <a class="info-item-name" href="https://opendatacommons.org/licenses/pddl/1.0/" target="_blank" rel="noopener noreferrer">license</a>]</span></p>\n<div class=\'ewd-ufaq-faq-custom-fields\'>\n\n\t\n</div>', 'id': u'Open_Data_Commons_Public_Domain_Dedication_and_License__PDDL_'}, {'q': u'Public Domain/No restrictions (CC0)', 'a': u'<style></style><p>Under the terms of this license you are free to use the material for any purpose without any restrictions.</p>\n<p><span class="sspRegular12">[ more information: <a class="info-item-name" href="https://creativecommons.org/publicdomain/zero/1.0/" target="_blank" rel="noopener noreferrer">deed</a> | <a class="info-item-name" href="https://creativecommons.org/publicdomain/zero/1.0/legalcode" target="_blank" rel="noopener noreferrer">license</a>]</span></p>\n<div class=\'ewd-ufaq-faq-custom-fields\'>\n\n\t\n</div>', 'id': u'Public_Domain_No_restrictions__CC0_'}, {'q': u'Multiple Licenses', 'a': u'<style></style><div class="col-xs-12">\n<div class="row">\n<div class="col-xs-8 styleNo5">\n<p>The dataset contains data having different licenses or terms of use. The details of these licenses or terms of use should be listed in the file.</p>\n<p>\xa0</p>\n</div>\n</div>\n</div>\n<div class=\'ewd-ufaq-faq-custom-fields\'>\n\n\t\n</div>', 'id': u'Multiple_Licenses'}, {'q': u'Other', 'a': u"<style></style><p>Any other license or terms of use which are listed in the description of the dataset, or in the metadata fields of the dataset, or any other place in the dataset such as a specific license or terms of use file that is included as part of the dataset files.</p>\n<div class='ewd-ufaq-faq-custom-fields'>\n\n\t\n</div>", 'id': u'Other'}], 'title': u'[Data Licenses Content]'}],
'topics': {'faq-_Data_Licenses_Content_': u'[Data Licenses Content]'}
}
def mock_faqs_terms_page_content(id):
return {
'faq_data': [{'id': 'faq-_HDX_Terms_of_Service_Content_', 'questions': [{'q': u'Account Management', 'a': u'<style></style><div class="col-xs-8 styleNo5">\n<ol>\n<li>User account. HDX is an open platform and anyone can use it without creating a user account. Signing up with HDX gives users access to additional features such as the ability to receive notifications about data; joining an organization as a member, editor or admin; and requesting access to datasets shared via HDX Connect, among other benefits.</li>\n<li>Organization account. Data can only be shared on HDX by approved organizations. Organizations can represent a formal legal entity such as a non-governmental organization, or an informal collective such as an Information Management Working Group. OCHA reviews requests to create an organization account to: (1) verify the identity of the requester and (2) determine whether the data that will be shared meets the requirements set out in the DATA SCOPE AND CRITERIA section below.</li>\n<li>You may delete your user or organization account at any time. When you delete your account, OCHA will delete any personal data we collected in order to create the account. When an organization account is deleted, the data shared by the organization is also deleted from HDX.</li>\n</ol>\n</div>\n<div class=\'ewd-ufaq-faq-custom-fields\'>\n\n\t\n</div>', 'id': u'Account_Management'}, {'q': u'Data Scope and Criteria', 'a': u'<style></style><div class="col-xs-8 styleNo5">\n<ol start="4">\n<li aria-level="1">There are three categories of humanitarian data which may be shared on HDX:<br />\na.\xa0Data about the context in which a humanitarian crisis is occurring (e.g. administrative boundaries, locations of schools, health facilities and other physical infrastructure, and baseline socio-economic indicators).<br />\nb. Data about the people affected by the crisis and their needs (e.g. needs assessment data, movement data and locations of affected people).<br />\nc. Data about the response by organizations seeking to help those who need assistance (e.g. who-is-doing-what-where, community perception surveys, and funding levels).</li>\n<li aria-level="1">All data shared on HDX must meet the following criteria:<br />\na. Public and private datasets may not contain any personal data. Aid worker contact details may be shared within a private dataset, if those aid workers have provided consent. Personal data is information, in any form, that relates to an identified or identifiable natural person.<br />\nb. Public and private datasets may not contain any sensitive non-personal data. This includes information which, while not relating to an identified or identifiable natural person, may, by reason of its sensitive context, put certain individuals or groups of individuals at risk of harm.<br />\nc. Data must have been collected in a fair and legitimate manner with a defined purpose and in line with principles of necessity and proportionality.<br />\nd. Data must be shared in a supported data format. HDX supports <a href="https://github.com/OCHA-DAP/hdx-ckan/blob/dev/ckanext-hdx_package/ckanext/hdx_package/config/resource_formats.json">all common data formats</a> and offers built-in preview support for CSV, TXT, XLS, and JSON formats. Map previews are possible from geographic data in zipped shapefile, KML and GeoJSON formats.</li>\n<li aria-level="1">Organizations should keep their data on HDX up-to-date in order to present the latest available information.</li>\n</ol>\n</div>\n<div class=\'ewd-ufaq-faq-custom-fields\'>\n\n\t\n</div>', 'id': u'Data_Scope_and_Criteria'}, {'q': u'Sharing Data', 'a': u'<style></style><div class="col-xs-8 styleNo5">\n<ol start="7">\n<li aria-level="1">There are three ways to share data on HDX:<br />\na. <i>Public</i>: Data is accessible to anyone who visits HDX, whether or not they are a registered user.<br />\n<i>b. Private</i>: Data is accessible only to registered users who are members of the organization that uploaded the data on HDX.<br />\n<i>c. HDX Connect:</i> The metadata of a dataset is available and the contributing organization can decide whether or not to grant access to the full dataset when requested by a registered user.</li>\n<li aria-level="1">Organizations must specify an appropriate license for all data they share publicly. Organizations are free to choose the license for their data. We have suggested some options <a href="https://data.humdata.org/about/license">here</a>.</li>\n<li aria-level="1">Organizations may use HDX to share data from other sources if the applicable license allows for onward sharing.</li>\n<li aria-level="1">After downloading a public dataset, users must follow the applicable license when using and sharing the data.</li>\n<li aria-level="1">Organizations may use the HDX Connect feature to direct users to data hosted outside of HDX. In such cases, organizations should link directly to the specific dataset described on HDX and not to a more general landing page of an external platform.</li>\n<li aria-level="1">When an organization grants access to data requested via HDX Connect, the data does not pass through the HDX infrastructure.</li>\n</ol>\n</div>\n<div class=\'ewd-ufaq-faq-custom-fields\'>\n\n\t\n</div>', 'id': u'Sharing_Data'}, {'q': u'Data Review', 'a': u'<style></style><div class="col-xs-8 styleNo5">\n<ol start="13">\n<li aria-level="1">In order to ensure data quality and to prevent any sensitive data from being exposed through HDX, OCHA reviews all datasets that are shared publicly or privately on the platform. This review consists of:<br />\na. An automated scan for sensitive data using Google\u2019s Data Loss Prevention (DLP) tool, to flag and prioritize data for manual review by OCHA.<br />\nb. A manual review based on a <a href="https://data.humdata.org/dataset/2048a947-5714-4220-905b-e662cbcd14c8/resource/658d5c4f-1680-4cb5-9fbf-10a0a64e2c39/download/hdx-qa-checklist.pdf">quality assurance checklist</a> that includes the completeness of metadata, the relevance of the data to humanitarian action, the integrity of the data resources, and the absence of any sensitive data, among other criteria.</li>\n<li aria-level="1">If the manual review under 13(b) shows that a dataset contains personal or sensitive data, the dataset is placed \u2018under review\u2019. While data is under review, users will only be able to consult the metadata.</li>\n<li aria-level="1">For microdata such as household survey results, OCHA runs a disclosure risk assessment to assess the risk of a person or group being re-identified. All datasets labeled as \u2018microdata\u2019 by the contributing organization at the point of upload are automatically placed under review. The dataset will remain under review until OCHA is able to determine that the risk of re-identification is below the risk threshold and that any sensitive data has been removed from the dataset by the organization. More information about this process is available <a href="https://humanitarian.atlassian.net/wiki/spaces/HDXKB/pages/1381498881/Statistical+Disclosure+Control+on+HDX">here</a>.</li>\n<li aria-level="1">If a user notices personal or sensitive data shared through the HDX platform they should contact <a href="mailto:hdx@un.org">hdx@un.org</a> immediately to request that the data be removed.</li>\n</ol>\n</div>\n<div class=\'ewd-ufaq-faq-custom-fields\'>\n\n\t\n</div>', 'id': u'Data_Review'}, {'q': u'Data Management', 'a': u'<style></style><div class="col-xs-8 styleNo5">\n<ol start="17">\n<li aria-level="1">HDX is built using <a href="https://ckan.org/">CKAN</a>, an open-source data management system.</li>\n<li aria-level="1">Data that is uploaded to HDX is stored by OCHA on servers provided by Amazon Web Services. Data is encrypted in transit and at rest. The servers are located in Virginia, the United States of America.</li>\n<li aria-level="1">All data uploaded to HDX is sent via Google\u2019s DLP API for automated scanning for sensitive data using the DLP algorithm. Data is encrypted in transit and scanned through DLP\u2019s <a href="https://cloud.google.com/dlp/docs/concepts-method-types">content method</a>. Data is not retained by Google in this process.</li>\n<li aria-level="1">OCHA will never alter the values within datasets shared through HDX without prior permission from the contributing organization.</li>\n<li aria-level="1">Data shared privately through the HDX platform will never be shared further by OCHA without prior permission from the contributing organization.</li>\n<li aria-level="1">OCHA will make a dataset private if it is found to violate these Terms and will contact the contributing organization to discuss next steps.</li>\n<li aria-level="1">Deleted datasets cannot be retrieved by users, but will continue to exist in backups of the HDX database which are maintained for 30 days.</li>\n</ol>\n</div>\n<div class=\'ewd-ufaq-faq-custom-fields\'>\n\n\t\n</div>', 'id': u'Data_Management'}, {'q': u'Generic Disclaimer of Liability', 'a': u'<style></style><ol start="24">\n<li aria-level="1">Organizations are responsible for the data they share on HDX. OCHA assumes no liability whatsoever for data shared on HDX. While OCHA upholds a high standard for the quality and timeliness of the data shared on HDX, we cannot verify data accuracy. Sharing data through HDX does not imply the transfer of any rights over this data to OCHA. OCHA disclaims all warranties, whether express or implied.</li>\n<li aria-level="1">Data and information on HDX do not imply the expression or endorsement of any opinion on the part of OCHA or the United Nations. This includes opinions concerning the legal status of any country, territory, city or area or of its authorities, or concerning the delimitation of its frontiers or boundaries.</li>\n</ol>\n<div class=\'ewd-ufaq-faq-custom-fields\'>\n\n\t\n</div>', 'id': u'Generic_Disclaimer_of_Liability'}, {'q': u'Privacy Notice', 'a': u'<style></style><ol start="26">\n<li aria-level="1">User contact details are only shared with the administrator of an HDX organization if the user requests access to an HDX Connect dataset.</li>\n<li aria-level="1">OCHA upholds the highest standard of data protection for the personal data of HDX users and organization administrators. In case such personal data is exposed, OCHA will notify all affected individuals and remedy the incident.</li>\n<li aria-level="1">OCHA continually seeks to understand the behavior of users on the HDX platform in order to make improvements. To do so, OCHA uses third-party analytics services, including Google Analytics and Mixpanel. Both of these services use cookies stored on users\u2019 devices to send encrypted information to Google Analytics and Mixpanel about how users arrived at HDX, what pages they visited on HDX, and their actions within those pages. Similar tracking is performed when users access HDX via our API or when directly downloading files from a shared link. OCHA does not send identifying information (including names, usernames, or email addresses) to either Google Analytics or Mixpanel. Google Analytics\u2019 and Mixpanel\u2019s use of the data collected from the HDX platform is governed by their respective Terms of Use.</li>\n<li aria-level="1">If you would like to disable the tracking described above under clause 28, you can install the <a href="https://tools.google.com/dlpage/gaoptout">Google Analytics Opt-out Browser Add-on</a> to disable Google Analytics tracking. Mixpanel respects <a href="https://allaboutdnt.com/">\u201cDo Not Track\u201d</a> settings in web browsers. Follow the instructions in <a href="https://allaboutdnt.com/#adjust-settings">this guide</a> to prevent your browser from sending data to Mixpanel. The data collected by these tracking systems will be retained indefinitely in order to understand how user behavior is changing over time.</li>\n<li aria-level="1">Emails sent by OCHA to registered HDX users may contain <a href="https://en.wikipedia.org/wiki/Web_beacon">web beacons</a>, which allow OCHA to track information about how many people have viewed its email campaigns. OCHA will never share personal data from this tracking with third parties other than with MailChimp, our mailing list provider, which has access by default. The data collected by this tracking system will be retained indefinitely in order to understand how readership of the emails is changing over time.</li>\n</ol>\n<div class=\'ewd-ufaq-faq-custom-fields\'>\n\n\t\n</div>', 'id': u'Privacy_Notice'}, {'q': u'Applicable Guidance and Policy', 'a': u'<style></style><div class="col-xs-8 styleNo5">\n<ol start="31">\n<li aria-level="1">OCHA is mandated by <a href="https://undocs.org/A/RES/46/182">United Nations General Assembly Resolution 46/182</a> and guided by the <a href="https://www.unocha.org/sites/dms/Documents/OOM-humanitarianprinciples_eng_June12.pdf">Humanitarian Principles</a>. OCHA is governed by the applicable guidance and policies established by the United Nations General Assembly and the United Nations Secretariat. Notably, personal data is processed according to the <a href="http://www.refworld.org/pdfid/3ddcafaac.pdf">1990 Guidelines for the Regulation of Computerized Data Files</a> and in line with the <a href="https://www.unsystem.org/privacy-principles">UN Principles on Personal Data Protection and Privacy</a>.</li>\n</ol>\n</div>\n<div class=\'ewd-ufaq-faq-custom-fields\'>\n\n\t\n</div>', 'id': u'Applicable_Guidance_and_Policy'}], 'title': u'[HDX Terms of Service Content]'}],
'topics': {'faq-_HDX_Terms_of_Service_Content_': u'[HDX Terms of Service Content]'}
}
| 385.094183
| 13,548
| 0.665902
| 20,015
| 139,019
| 4.561329
| 0.108718
| 0.004359
| 0.019475
| 0.030604
| 0.397897
| 0.353174
| 0.309765
| 0.270409
| 0.248414
| 0.228643
| 0
| 0.014087
| 0.244247
| 139,019
| 360
| 13,549
| 386.163889
| 0.85486
| 0
| 0
| 0.022857
| 0
| 0.274286
| 0.818838
| 0.105763
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017143
| false
| 0.011429
| 0.025714
| 0.014286
| 0.057143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
35d92ece8021861f54e3487b58486d09fb120c2f
| 1,049
|
py
|
Python
|
AtCoder Beginner Contest 208/B - Factorial Yen Coin.py
|
codedreamer-dg/AtCoder
|
6a4a9a2bc558bb0b21505877e00858d0c7981701
|
[
"MIT"
] | null | null | null |
AtCoder Beginner Contest 208/B - Factorial Yen Coin.py
|
codedreamer-dg/AtCoder
|
6a4a9a2bc558bb0b21505877e00858d0c7981701
|
[
"MIT"
] | null | null | null |
AtCoder Beginner Contest 208/B - Factorial Yen Coin.py
|
codedreamer-dg/AtCoder
|
6a4a9a2bc558bb0b21505877e00858d0c7981701
|
[
"MIT"
] | null | null | null |
# _ _ _
# ___ ___ __| | ___ __| |_ __ ___ __ _ _ __ ___ ___ _ __ __| | __ _
# / __/ _ \ / _` |/ _ \/ _` | '__/ _ \/ _` | '_ ` _ \ / _ \ '__|/ _` |/ _` |
# | (_| (_) | (_| | __/ (_| | | | __/ (_| | | | | | | __/ | | (_| | (_| |
# \___\___/ \__,_|\___|\__,_|_| \___|\__,_|_| |_| |_|\___|_|___\__,_|\__, |
# |_____| |___/
from sys import *
'''sys.stdin = open('input.txt', 'r')
sys.stdout = open('output.txt', 'w') '''
from collections import defaultdict as dd
from math import *
from bisect import *
#sys.setrecursionlimit(10 ** 8)
def sinp():
return input()
def inp():
return int(sinp())
def minp():
return map(int, sinp().split())
def linp():
return list(minp())
def strl():
return list(sinp())
def pr(x):
print(x)
mod = int(1e9+7)
res = 0
n = inp()
while n:
p = 1
i = 1
while p * i <= n and i <= 10:
p *= i
i += 1
res += 1
n -= p
pr(res)
| 28.351351
| 77
| 0.418494
| 93
| 1,049
| 3.462366
| 0.483871
| 0.055901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019908
| 0.377502
| 1,049
| 37
| 78
| 28.351351
| 0.473201
| 0.448046
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214286
| false
| 0
| 0.142857
| 0.178571
| 0.535714
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
ea1c0f80b7ecc37d6f8726ceeea5e84500b5ce4e
| 109
|
py
|
Python
|
EstruturaDeDecisao/18 terminar.py
|
TheCarvalho/atividades-wikipython
|
9163d5de40dbed0d73917f6257e64a651a77e085
|
[
"Unlicense"
] | null | null | null |
EstruturaDeDecisao/18 terminar.py
|
TheCarvalho/atividades-wikipython
|
9163d5de40dbed0d73917f6257e64a651a77e085
|
[
"Unlicense"
] | null | null | null |
EstruturaDeDecisao/18 terminar.py
|
TheCarvalho/atividades-wikipython
|
9163d5de40dbed0d73917f6257e64a651a77e085
|
[
"Unlicense"
] | null | null | null |
# ex18 - Faça um Programa que peça uma data no formato dd/mm/aaaa e determine se a mesma é uma data válida.
| 36.333333
| 107
| 0.743119
| 22
| 109
| 3.681818
| 0.909091
| 0.17284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0.211009
| 109
| 2
| 108
| 54.5
| 0.918605
| 0.963303
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ea4d0b99f30daba9cd24669e5a90702fdadbdf87
| 279
|
py
|
Python
|
doc/samples/uptodate_callable.py
|
m4ta1l/doit
|
d1a1b7b3abc7641d977d3b78b580d97aea4e27ea
|
[
"MIT"
] | 1,390
|
2015-01-01T21:11:47.000Z
|
2022-03-31T11:35:44.000Z
|
doc/samples/uptodate_callable.py
|
m4ta1l/doit
|
d1a1b7b3abc7641d977d3b78b580d97aea4e27ea
|
[
"MIT"
] | 393
|
2015-01-05T11:18:29.000Z
|
2022-03-20T11:46:46.000Z
|
doc/samples/uptodate_callable.py
|
m4ta1l/doit
|
d1a1b7b3abc7641d977d3b78b580d97aea4e27ea
|
[
"MIT"
] | 176
|
2015-01-07T16:58:56.000Z
|
2022-03-28T12:12:11.000Z
|
def fake_get_value_from_db():
return 5
def check_outdated():
total = fake_get_value_from_db()
return total > 10
def task_put_more_stuff_in_db():
def put_stuff(): pass
return {'actions': [put_stuff],
'uptodate': [check_outdated],
}
| 18.6
| 41
| 0.634409
| 38
| 279
| 4.210526
| 0.526316
| 0.0875
| 0.15
| 0.2
| 0.3
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0.014493
| 0.258065
| 279
| 14
| 42
| 19.928571
| 0.758454
| 0
| 0
| 0
| 0
| 0
| 0.053957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.1
| 0
| 0.1
| 0.7
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
ea5e0f4d176116cbcd7a26e9b143873c3890f228
| 152
|
py
|
Python
|
app/cmuxovik/templatetags/remove_newlines.py
|
artem343/cmuxovik
|
6f923f66ba47d0c513659c332fd8c89d21ea4abf
|
[
"MIT"
] | 2
|
2020-03-31T18:01:55.000Z
|
2020-03-31T18:45:02.000Z
|
app/cmuxovik/templatetags/remove_newlines.py
|
artem343/cmuxovik
|
6f923f66ba47d0c513659c332fd8c89d21ea4abf
|
[
"MIT"
] | 35
|
2020-03-31T17:47:09.000Z
|
2022-03-12T00:22:54.000Z
|
app/cmuxovik/templatetags/remove_newlines.py
|
artem343/cmuxovik
|
6f923f66ba47d0c513659c332fd8c89d21ea4abf
|
[
"MIT"
] | null | null | null |
from django import template
register = template.Library()
@register.filter
def remove_newlines(cmux: str) -> str:
return cmux.replace('\n', ' ')
| 16.888889
| 38
| 0.703947
| 19
| 152
| 5.578947
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 152
| 8
| 39
| 19
| 0.828125
| 0
| 0
| 0
| 0
| 0
| 0.019737
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
ea63c7514a362bbf268f9a9ea9b6ae94c0c88422
| 170
|
py
|
Python
|
sample/sample/views.py
|
Fhall21/django-datetimepicker
|
3f6b38d83bf52d3f48dca0ad843e4fdbf342a0f7
|
[
"Apache-2.0"
] | 1
|
2020-11-13T06:48:23.000Z
|
2020-11-13T06:48:23.000Z
|
sample/sample/views.py
|
Fhall21/django-datetimepicker
|
3f6b38d83bf52d3f48dca0ad843e4fdbf342a0f7
|
[
"Apache-2.0"
] | null | null | null |
sample/sample/views.py
|
Fhall21/django-datetimepicker
|
3f6b38d83bf52d3f48dca0ad843e4fdbf342a0f7
|
[
"Apache-2.0"
] | 6
|
2018-01-24T00:21:21.000Z
|
2022-03-09T06:06:51.000Z
|
from django.views.generic.edit import FormView
from .forms import SampleForm
class SampleView(FormView):
form_class = SampleForm
template_name = 'sample.html'
| 18.888889
| 46
| 0.770588
| 21
| 170
| 6.142857
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158824
| 170
| 8
| 47
| 21.25
| 0.902098
| 0
| 0
| 0
| 0
| 0
| 0.064706
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
ea75a121a7a7f2458731307018d9a50750c50d0b
| 12,133
|
py
|
Python
|
tests/test_region.py
|
khurrumsaleem/dassh
|
8823e4b5256975a375391787558e5b6aba816251
|
[
"BSD-3-Clause"
] | 11
|
2021-08-12T17:08:37.000Z
|
2021-12-09T22:35:48.000Z
|
tests/test_region.py
|
khurrumsaleem/dassh
|
8823e4b5256975a375391787558e5b6aba816251
|
[
"BSD-3-Clause"
] | 3
|
2021-11-24T21:15:36.000Z
|
2022-03-25T14:00:52.000Z
|
tests/test_region.py
|
khurrumsaleem/dassh
|
8823e4b5256975a375391787558e5b6aba816251
|
[
"BSD-3-Clause"
] | 2
|
2021-08-23T08:00:55.000Z
|
2021-09-16T02:26:59.000Z
|
########################################################################
# Copyright 2021, UChicago Argonne, LLC
#
# Licensed under the BSD-3 License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a
# copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
########################################################################
"""
date: 2022-01-05
author: matz
Test the base Region class and its methods
"""
########################################################################
import numpy as np
import pytest
from tests import conftest
def test_activate_to_rr(c_fuel_params, c_lrefl_simple):
"""Test activation from simple model to pin bundle model (single
adiabatic duct wall)
- All subchannels in pin bundle equal simple model temperature.
- Average coolant temperature should be maintained.
- Duct wall temperatures are recalculated to maintain energy cons.
- Since adiabatic, average duct temperature should be close.
"""
# Need to activate RR
flowrate = 1.0
t_gap = np.ones(2) # Making adiabatic so it doesn't matter
htc_gap = np.ones(2) # Making adiabatic so it doesn't matter
rr = conftest.make_rodded_region_fixture('conceptual_fuel',
c_fuel_params[0],
c_fuel_params[1],
flowrate)
# SETUP THE PREVIOUS REGION: Update the flow rate on the simple region
ur = c_lrefl_simple.clone(new_flowrate=flowrate)
# Activate simple region manually (set temps equal to something)
k = 'coolant_int'
ur.temp[k] = np.random.random((ur.temp[k].shape)) * 10 + 623.15
ur._update_coolant_params(ur.temp['coolant_int'][0])
ur._calc_duct_temp(t_gap, htc_gap, True)
# Now activate RR based on UR and see what happened
rr.activate(ur, t_gap, htc_gap, True)
# Subchannel temperatures
msg = 'Subchannel coolant temperature error'
diff = rr.temp['coolant_int'] - ur.temp['coolant_int'][0]
assert np.all(np.abs(diff) < 1e-9), msg
# Average coolant interior temperature
msg = 'Average interior coolant temperature error'
diff = rr.avg_coolant_int_temp - ur.avg_coolant_int_temp
assert np.abs(diff) < 1e-9, msg
# Overall avg coolant temp (since no bypass, same as above)
msg = 'Average interior coolant temperature error'
diff = rr.avg_coolant_temp - ur.avg_coolant_temp
assert np.abs(diff) < 1e-9, msg
# Average duct MW temp: for the outer duct, this is the only
# thing that might be different.
msg = 'Average duct midwall temperature'
diff = rr.avg_duct_mw_temp - ur.avg_duct_mw_temp
# Fails at tolerance 1e-9; 1e-8 K is still pretty close tho
assert np.abs(diff) < 1e-8, msg
# Corner duct MW and surface temperatures
idx = np.arange(0, 6, 1, dtype=int) + 1
idx *= int(rr.subchannel.n_sc['duct']['total'] / 6)
idx -= 1
msg = 'Corner duct midwall temperatures'
rr_corner_temps = rr.temp['duct_mw'][-1, idx]
diff = rr_corner_temps - ur.temp['duct_mw'][-1]
assert np.all(np.isclose(diff, 0.0)), msg
msg = 'Corner duct surface temperatures'
rr_corner_temps = rr.temp['duct_surf'][:, :, idx]
diff = rr_corner_temps - ur.temp['duct_surf']
assert np.all(np.isclose(diff, 0.0)), msg
def test_activate_from_rr(c_fuel_params, c_lrefl_simple):
"""Test activation from pin bundle model to simple bundle model
(single adiabatic duct wall)
- Simple model coolant temperature equals average pin bundle model
coolant temperature
- Duct wall temperatures are recalculated to maintain energy cons.
Therefore, duct wall temperatures may not be maintained (see
note below).
Note
----
Average duct temperature won't be preserved because the
new duct temperatures are calculated based on the new
coolant temperature (which for the simple model region
is only 1 value). The old average duct temperature was
based on the temperatures of only the edge and corner
subchannels immediately adjacent to it.
"""
# Need to activate RR
flowrate = 1.0
rr = conftest.make_rodded_region_fixture('conceptual_fuel',
c_fuel_params[0],
c_fuel_params[1],
flowrate)
rr = conftest.activate_rodded_region(rr, 650.0)
# Muss up the temperatures so it's like it did something
t_gap = np.ones(54) # Making adiabatic so it doesn't matter
htc_gap = np.ones(54) # Making adiabatic so it doesn't matter
p_duct = np.zeros(54) # Zero power in duct
k = 'coolant_int'
rr.temp[k] = np.random.random((rr.temp[k].shape)) * 10 + 650.15
rr._update_coolant_int_params(rr.avg_coolant_int_temp)
rr._calc_duct_temp(p_duct, t_gap, htc_gap, True)
# Update the flow rate on the simple region and activate
ur = c_lrefl_simple.clone(new_flowrate=flowrate)
ur.activate(rr, t_gap, htc_gap, True)
# Average coolant interior temperature
msg = 'Average interior coolant temperature error'
diff = rr.avg_coolant_int_temp - ur.avg_coolant_int_temp
assert np.abs(diff) < 1e-9, msg
# Overall avg coolant temp (w/ no bypass, should be same as above)
msg = 'Average interior coolant temperature error'
diff = rr.avg_coolant_temp - ur.avg_coolant_temp
assert np.abs(diff) < 1e-9, msg
def test_activate_to_rr_dd(c_ctrl_params, c_lrefl_simple):
"""Test activation from simple model to pin bundle model (double
duct with adiabatic boundary on outer duct wall surface)
- All interior subchannels in pin bundle equal simple model temp.
- All interior duct wall temperatures in pin bundle model equal
simple model coolant temp.
- All bypass gap temperatures in pin bundle model equal simple
model coolant temp.
- Outer duct wall is recalculated - may not match exactly.
"""
# Need to activate RR
flowrate = 1.0
rr = conftest.make_rodded_region_fixture('conceptual_ctrl',
c_ctrl_params[0],
c_ctrl_params[1],
flowrate)
t_gap = np.ones(2) # Making adiabatic so it doesn't matter
htc_gap = np.ones(2) # Making adiabatic so it doesn't matter
# SETUP THE PREVIOUS REGION: Update the flow rate on the simple region
ur = c_lrefl_simple.clone(new_flowrate=flowrate)
# Activate simple region manually (set temps equal to something)
k = 'coolant_int'
ur.temp[k] = np.random.random((ur.temp[k].shape)) * 10 + 623.15
ur._update_coolant_params(ur.temp['coolant_int'][0])
ur._calc_duct_temp(t_gap, htc_gap, True)
# Now activate RR based on UR and see what happened
rr.activate(ur, t_gap, htc_gap, True)
# Subchannel temperatures
msg = 'Interior subchannel temperatures error'
diff = rr.temp['coolant_int'] - ur.temp['coolant_int'][0]
assert np.all(np.abs(diff) < 1e-9), msg
# Interior duct wall temperatures
msg = 'Interior duct wall temperatures error'
diff = rr.temp['duct_mw'] - ur.temp['coolant_int'][0]
# Note: this fails at 1e-9.
assert np.all(np.abs(diff) < 2e-8), msg
# Bypass gap temperatures
msg = 'Subchannels between ducts temperature error'
diff = rr.temp['coolant_byp'][0] - ur.temp['coolant_int'][0]
assert np.all(np.abs(diff) < 1e-9), msg
# Average coolant interior temperature; should be the same when
# activated double-duct assembly because all coolant is the same
# temp.
msg = 'Average interior coolant temperature error'
diff = rr.avg_coolant_int_temp - ur.avg_coolant_int_temp
assert np.abs(diff) < 1e-9, msg
# Overall avg coolant temp
msg = 'Average interior coolant temperature error'
diff = rr.avg_coolant_temp - ur.avg_coolant_temp
assert np.abs(diff) < 1e-9, msg
# Average duct MW temp
msg = 'Average outer duct midwall temperature'
diff = rr.avg_duct_mw_temp[-1] - ur.avg_duct_mw_temp
# This fails at 1e-9, but 2e-8 degrees K is pretty close
assert np.abs(diff) < 2e-8, msg
msg = 'Average inner duct midwall temperature'
diff = rr.avg_duct_mw_temp[0] - ur.avg_coolant_temp
assert np.abs(diff) < 1e-9, msg
# Corner duct MW and surface temperatures
idx = np.arange(0, 6, 1, dtype=int) + 1
idx *= int(rr.subchannel.n_sc['duct']['total'] / 6)
idx -= 1
msg = 'Corner duct midwall temperatures'
rr_corner_temps = rr.temp['duct_mw'][-1, idx]
diff = rr_corner_temps - ur.temp['duct_mw'][-1]
# This fails at 1e-9, but 2e-8 degrees K is pretty close
assert np.all(np.abs(diff) < 2e-8), msg
msg = 'Corner duct surface temperatures'
rr_corner_temps = rr.temp['duct_surf'][:, :, idx]
diff = rr_corner_temps[-1] - ur.temp['duct_surf'][-1]
# This fails at 1e-9, but 2e-8 degrees K is pretty close
assert np.all(np.abs(diff) < 2e-8), msg
def test_activate_from_rr_dd(c_ctrl_params, c_lrefl_simple):
"""Test activation from double-ducted pin bundle model to
simple bundle model
- Pin bundle overall average coolant temperature (interior and
double-duct bypass) --> simple model coolant temperature
- Simple model outer duct wall temperature recalculated to
maintain energy conservation; temps may not be maintained.
coolant temperature
Note
----
Average duct temperature won't be preserved because the
new duct temperatures are calculated based on the new
coolant temperature (which for the simple model region
is only 1 value). The old average duct temperature was
based on the temperatures of only the edge and corner
subchannels immediately adjacent to it.
"""
# Need to activate RR
flowrate = 1.0
t_gap = np.ones(54) # Making adiabatic so it doesn't matter
htc_gap = np.ones(54) # Making adiabatic so it doesn't matter
rr = conftest.make_rodded_region_fixture('conceptual_ctrl',
c_ctrl_params[0],
c_ctrl_params[1],
flowrate)
rr = conftest.activate_rodded_region(rr, 650.0, base=False)
# Muss up the temperatures so it's like it did something
p_duct = np.zeros(54) # Zero power in duct
k = 'coolant_int'
rr.temp[k] = np.random.random((rr.temp[k].shape)) * 10 + 650.15
rr._update_coolant_int_params(rr.avg_coolant_int_temp)
rr._calc_duct_temp(p_duct, t_gap, htc_gap, True)
# Update the flow rate on the simple region and activate
ur = c_lrefl_simple.clone(new_flowrate=flowrate)
ur.activate(rr, t_gap, htc_gap, True)
# Average coolant interior temperature not the same when activating
# from double-duct assembly because it's assumed that all coolant
# will mix. However, overall avg coolant temp should be same
msg = 'Average coolant temperature error'
diff = rr.avg_coolant_temp - ur.avg_coolant_temp
assert np.abs(diff) < 1e-9, msg
def test_material_update_errors(c_fuel_params, caplog):
"""Test that material update failures return error messages"""
flowrate = 1.0
rr = conftest.make_rodded_region_fixture('conceptual_fuel',
c_fuel_params[0],
c_fuel_params[1],
flowrate)
rr = conftest.activate_rodded_region(rr, 650.0)
with pytest.raises(SystemExit):
rr._update_coolant(-50.0)
msg = "Coolant material update failure; Name: conceptual_fuel"
assert msg in caplog.text
| 40.989865
| 74
| 0.654496
| 1,749
| 12,133
| 4.399085
| 0.1498
| 0.028594
| 0.018716
| 0.017156
| 0.760333
| 0.734598
| 0.717962
| 0.697427
| 0.696127
| 0.655316
| 0
| 0.019214
| 0.245034
| 12,133
| 295
| 75
| 41.128814
| 0.820742
| 0.398582
| 0
| 0.782609
| 0
| 0
| 0.152286
| 0
| 0
| 0
| 0
| 0
| 0.137681
| 1
| 0.036232
| false
| 0
| 0.021739
| 0
| 0.057971
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ea99e88b70709ab303e6cbe4083c21e63503e33d
| 191
|
py
|
Python
|
dice.py
|
key999/oop-lab-term3
|
11c91f967029363b5c023253d7a629d097cca366
|
[
"Unlicense"
] | 2
|
2021-03-02T12:14:13.000Z
|
2021-12-12T02:32:05.000Z
|
dice.py
|
key999/oop-lab-term3
|
11c91f967029363b5c023253d7a629d097cca366
|
[
"Unlicense"
] | null | null | null |
dice.py
|
key999/oop-lab-term3
|
11c91f967029363b5c023253d7a629d097cca366
|
[
"Unlicense"
] | null | null | null |
def toss(y):
try:
from secrets import randbelow
return(randbelow(y) + 1)
except ImportError:
from random import randint
return(randint(1, y))
| 21.222222
| 38
| 0.570681
| 22
| 191
| 4.954545
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016129
| 0.350785
| 191
| 8
| 39
| 23.875
| 0.862903
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.428571
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
eaa0d8cfffe3f1a31dbce83b7ffe626f1fe87aa6
| 992
|
py
|
Python
|
simple_rl/abstraction/__init__.py
|
dwhit/simple_rl
|
32ba356680f2ea6c8913702fe2c7fee3ee511b3b
|
[
"Apache-2.0"
] | 10
|
2021-11-22T12:29:30.000Z
|
2022-03-28T10:23:16.000Z
|
simple_rl/abstraction/__init__.py
|
samlobel/simple_rl_mbrl
|
ed868916d06dbf68f4af23bea83b0e852e88df6e
|
[
"Apache-2.0"
] | null | null | null |
simple_rl/abstraction/__init__.py
|
samlobel/simple_rl_mbrl
|
ed868916d06dbf68f4af23bea83b0e852e88df6e
|
[
"Apache-2.0"
] | 2
|
2022-03-19T07:42:56.000Z
|
2022-03-28T10:36:33.000Z
|
# Classes.
from simple_rl.abstraction.AbstractionWrapperClass import AbstractionWrapper
from simple_rl.abstraction.AbstractValueIterationClass import AbstractValueIteration
from simple_rl.abstraction.state_abs.StateAbstractionClass import StateAbstraction
from simple_rl.abstraction.state_abs.ProbStateAbstractionClass import ProbStateAbstraction
from simple_rl.abstraction.action_abs.ActionAbstractionClass import ActionAbstraction
from simple_rl.abstraction.action_abs.InListPredicateClass import InListPredicate
from simple_rl.abstraction.action_abs.OptionClass import Option
from simple_rl.abstraction.action_abs.PolicyClass import Policy
from simple_rl.abstraction.action_abs.PolicyFromDictClass import PolicyFromDict
from simple_rl.abstraction.action_abs.PredicateClass import Predicate
# Scripts.
from simple_rl.abstraction.state_abs import sa_helpers, indicator_funcs
from simple_rl.abstraction.action_abs import aa_helpers
from simple_rl.abstraction.abstr_mdp import abstr_mdp_funcs
| 62
| 90
| 0.903226
| 118
| 992
| 7.347458
| 0.313559
| 0.149942
| 0.179931
| 0.344867
| 0.365629
| 0.365629
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05746
| 992
| 16
| 91
| 62
| 0.927273
| 0.017137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
57abc3e2ee45059b4a663df6a16aeb59ed8e234a
| 98
|
py
|
Python
|
PythonClub/PythonClub/PythonApp/apps.py
|
isadoracabral/PythonClub
|
75ca7d92d17e68d9e32523c473de13e7ef1d8628
|
[
"Apache-2.0"
] | null | null | null |
PythonClub/PythonClub/PythonApp/apps.py
|
isadoracabral/PythonClub
|
75ca7d92d17e68d9e32523c473de13e7ef1d8628
|
[
"Apache-2.0"
] | null | null | null |
PythonClub/PythonClub/PythonApp/apps.py
|
isadoracabral/PythonClub
|
75ca7d92d17e68d9e32523c473de13e7ef1d8628
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class PythonappConfig(AppConfig):
name = 'PythonApp'
| 16.333333
| 34
| 0.72449
| 10
| 98
| 7.1
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204082
| 98
| 5
| 35
| 19.6
| 0.910256
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
57ae5642a9cd5e851474795eab4fc5ad3d75d21e
| 387
|
py
|
Python
|
commands/slotsjackpot.py
|
Dabomstew/goldenrod
|
36c4f9e5321788779840371b09a78cc6b26b38b1
|
[
"MIT"
] | 2
|
2015-05-20T00:42:03.000Z
|
2015-05-23T04:18:42.000Z
|
commands/slotsjackpot.py
|
Dabomstew/goldenrod
|
36c4f9e5321788779840371b09a78cc6b26b38b1
|
[
"MIT"
] | null | null | null |
commands/slotsjackpot.py
|
Dabomstew/goldenrod
|
36c4f9e5321788779840371b09a78cc6b26b38b1
|
[
"MIT"
] | null | null | null |
import config
import random
import datetime, time
import math
def execute(parser, bot, user, args):
slotsPool = bot.execQuerySelectOne("SELECT * FROM slotspool")
bot.addressUser(user, "The current slots jackpot is %d %s." % (slotsPool["slotspool"], config.currencyPlural))
def requiredPerm():
return "anyone"
def canUseByWhisper():
return True
| 24.1875
| 115
| 0.684755
| 43
| 387
| 6.162791
| 0.697674
| 0.090566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211886
| 387
| 15
| 116
| 25.8
| 0.868852
| 0
| 0
| 0
| 0
| 0
| 0.196766
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| false
| 0
| 0.363636
| 0.181818
| 0.818182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 4
|
57c639a59e8e6f6fab6583cd007695587135a57d
| 144
|
py
|
Python
|
python/1619.A.py
|
arechesk/cf
|
8d2209398f0fc4a73c139f4101634a8ed8c62ff6
|
[
"BSD-3-Clause"
] | null | null | null |
python/1619.A.py
|
arechesk/cf
|
8d2209398f0fc4a73c139f4101634a8ed8c62ff6
|
[
"BSD-3-Clause"
] | null | null | null |
python/1619.A.py
|
arechesk/cf
|
8d2209398f0fc4a73c139f4101634a8ed8c62ff6
|
[
"BSD-3-Clause"
] | null | null | null |
t=int(input())
for i in range(t):
s=input()
if s[:int(len(s)/2)]==s[int(len(s)/2):]:
print("YES")
else:
print("NO")
| 18
| 44
| 0.458333
| 26
| 144
| 2.538462
| 0.576923
| 0.121212
| 0.212121
| 0.242424
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019048
| 0.270833
| 144
| 7
| 45
| 20.571429
| 0.609524
| 0
| 0
| 0
| 0
| 0
| 0.034722
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
57cc30c51d33d6b2eaabc16495589e81fff6b361
| 76
|
py
|
Python
|
blue_st_sdk/features/audio/opus/__init__.py
|
cchangeur/BlueSTSDK_Python
|
e5c6e4bc5a58680bad0d867633dd9d92012b9baf
|
[
"BSD-3-Clause"
] | 43
|
2019-03-08T08:03:19.000Z
|
2022-01-20T11:51:11.000Z
|
blue_st_sdk/features/audio/opus/__init__.py
|
cchangeur/BlueSTSDK_Python
|
e5c6e4bc5a58680bad0d867633dd9d92012b9baf
|
[
"BSD-3-Clause"
] | 24
|
2019-04-01T20:50:40.000Z
|
2022-03-16T17:00:54.000Z
|
blue_st_sdk/features/audio/opus/__init__.py
|
cchangeur/BlueSTSDK_Python
|
e5c6e4bc5a58680bad0d867633dd9d92012b9baf
|
[
"BSD-3-Clause"
] | 19
|
2019-02-20T08:41:20.000Z
|
2021-11-21T11:39:50.000Z
|
__all__ = [
'feature_audio_opus', \
'feature_audio_opus_conf'
]
| 15.2
| 30
| 0.631579
| 8
| 76
| 4.875
| 0.625
| 0.615385
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 76
| 4
| 31
| 19
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0.569444
| 0.319444
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
57cd1515a5bf4b72f0c01bf2d150a95bc66f1b6b
| 2,618
|
py
|
Python
|
tests/fixtures/srt.py
|
rlaPHOENiX/pycaption
|
9824701ed74dfd3b0c39d0dc1fb2f00f4619c4a6
|
[
"Apache-2.0"
] | 1
|
2021-08-28T07:03:27.000Z
|
2021-08-28T07:03:27.000Z
|
tests/fixtures/srt.py
|
rlaphoenix/pycaption
|
9824701ed74dfd3b0c39d0dc1fb2f00f4619c4a6
|
[
"Apache-2.0"
] | null | null | null |
tests/fixtures/srt.py
|
rlaphoenix/pycaption
|
9824701ed74dfd3b0c39d0dc1fb2f00f4619c4a6
|
[
"Apache-2.0"
] | null | null | null |
import pytest
@pytest.fixture(scope="session")
def sample_srt():
return """1
00:00:09,209 --> 00:00:12,312
( clock ticking )
2
00:00:14,848 --> 00:00:17,000
MAN:
When we think
\u266a ...say bow, wow, \u266a
3
00:00:17,000 --> 00:00:18,752
we have this vision of Einstein
4
00:00:18,752 --> 00:00:20,887
as an old, wrinkly man
with white hair.
5
00:00:20,887 --> 00:00:26,760
MAN 2:
E equals m c-squared is
not about an old Einstein.
6
00:00:26,760 --> 00:00:32,200
MAN 2:
It's all about an eternal Einstein.
7
00:00:32,200 --> 00:00:36,200
<LAUGHING & WHOOPS!>
"""
@pytest.fixture(scope="session")
def sample_srt_ascii():
return """1
00:00:09,209 --> 00:00:12,312
( clock ticking )
2
00:00:14,848 --> 00:00:17,000
MAN:
When we think
of "E equals m c-squared",
3
00:00:17,000 --> 00:00:18,752
we have this vision of Einstein
4
00:00:18,752 --> 00:00:20,887
as an old, wrinkly man
with white hair.
5
00:00:20,887 --> 00:00:26,760
MAN 2:
E equals m c-squared is
not about an old Einstein.
6
00:00:26,760 --> 00:00:32,200
MAN 2:
It's all about an eternal Einstein.
7
00:00:32,200 --> 00:00:34,400
<LAUGHING & WHOOPS!>
8
00:00:34,400 --> 00:00:38,400
some more text
"""
@pytest.fixture(scope="session")
def sample_srt_numeric():
return """35
00:00:32,290 --> 00:00:32,890
TO FIND HIM. IF
36
00:00:32,990 --> 00:00:33,590
YOU HAVE ANY INFORMATION
37
00:00:33,690 --> 00:00:34,290
THAT CAN HELP, CALL THE
38
00:00:34,390 --> 00:00:35,020
STOPPERS LINE. THAT
39
00:00:35,120 --> 00:00:35,760
NUMBER IS 662-429-84-77.
40
00:00:35,860 --> 00:00:36,360
STD OUT
41
00:00:36,460 --> 00:02:11,500
3
"""
@pytest.fixture(scope="session")
def sample_srt_empty():
return """
"""
@pytest.fixture(scope="session")
def sample_srt_blank_lines():
return """35
00:00:32,290 --> 00:00:32,890
36
00:00:32,990 --> 00:00:33,590
YOU HAVE ANY INFORMATION
"""
@pytest.fixture(scope="session")
def sample_srt_trailing_blanks():
return """35
00:00:32,290 --> 00:00:32,890
HELP I SAY
36
00:00:32,990 --> 00:00:33,590
YOU HAVE ANY INFORMATION
"""
@pytest.fixture(scope="session")
def samples_srt_same_time():
return """1
00:00:05,213 --> 00:00:10,552
SO NO ONE TOLD YOU
2
00:00:05,213 --> 00:00:10,552
LIFE WAS GONNA BE THIS WAY
3
00:00:10,566 --> 00:00:10,580
YOUR JOB IS A JOKE, YOUR ARE BROKE
4
00:00:10,594 --> 00:00:10,600
IT IS LIKE YOU ARE ALWAYS STUCK
5
00:00:10,594 --> 00:00:10,600
IN A SECOND GEAR
"""
@pytest.fixture(scope="session")
def sample_srt_empty_cue_output():
return """\
1
00:00:01,209 --> 00:00:02,312
abc
"""
| 14.384615
| 35
| 0.646677
| 535
| 2,618
| 3.127103
| 0.302804
| 0.150628
| 0.046623
| 0.119546
| 0.698147
| 0.688583
| 0.688583
| 0.59474
| 0.501494
| 0.501494
| 0
| 0.296434
| 0.175325
| 2,618
| 181
| 36
| 14.464088
| 0.478462
| 0
| 0
| 0.552239
| 0
| 0
| 0.770053
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059701
| true
| 0
| 0.007463
| 0.059701
| 0.126866
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
57cd875f32afc94819661b070ae0dc9db05505f9
| 231
|
py
|
Python
|
scrubby/execution/execution_step.py
|
typerandom/scrubby
|
5cccfad6c735828e6eec1452162a4e58aea917a9
|
[
"MIT"
] | 2
|
2019-05-27T22:28:21.000Z
|
2021-02-19T11:37:11.000Z
|
scrubby/execution/execution_step.py
|
typerandom/scrubby
|
5cccfad6c735828e6eec1452162a4e58aea917a9
|
[
"MIT"
] | 1
|
2021-03-25T21:27:34.000Z
|
2021-03-25T21:27:34.000Z
|
scrubby/execution/execution_step.py
|
typerandom/scrubby
|
5cccfad6c735828e6eec1452162a4e58aea917a9
|
[
"MIT"
] | null | null | null |
class ExecutionStep(object):
def run(self, db):
raise NotImplementedError('Method run(self, db) is not implemented.')
def explain(self):
raise NotImplementedError('Method explain(self) is not implemented.')
| 38.5
| 77
| 0.701299
| 27
| 231
| 6
| 0.518519
| 0.08642
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 231
| 6
| 78
| 38.5
| 0.86631
| 0
| 0
| 0
| 0
| 0
| 0.344828
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
57d60d1c7cbc15aeef2cb33b932fce96ef62f017
| 104
|
py
|
Python
|
tests/test_stub.py
|
PlaidWeb/Subl
|
2fa3f30aa935df61c03ce614e6ea22eab519ec6c
|
[
"MIT"
] | null | null | null |
tests/test_stub.py
|
PlaidWeb/Subl
|
2fa3f30aa935df61c03ce614e6ea22eab519ec6c
|
[
"MIT"
] | 1
|
2020-07-20T08:28:47.000Z
|
2020-07-20T08:28:47.000Z
|
tests/test_stub.py
|
PlaidWeb/Subl
|
2fa3f30aa935df61c03ce614e6ea22eab519ec6c
|
[
"MIT"
] | null | null | null |
""" stub test, remove this when there's actual testing """
def test_nothing():
""" do nothing """
| 17.333333
| 58
| 0.625
| 14
| 104
| 4.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211538
| 104
| 5
| 59
| 20.8
| 0.780488
| 0.596154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| true
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
17acd44b2823516e5d98e96db26b6112f10205bc
| 965
|
py
|
Python
|
plugins/tff_backend/migrations/_007_referral_in_user_data.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | null | null | null |
plugins/tff_backend/migrations/_007_referral_in_user_data.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 178
|
2017-08-02T12:58:06.000Z
|
2017-12-20T15:01:12.000Z
|
plugins/tff_backend/migrations/_007_referral_in_user_data.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 2
|
2018-01-10T10:43:12.000Z
|
2018-03-18T10:42:23.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
from framework.bizz.job import run_job
from plugins.tff_backend.bizz.user import store_referral_in_user_data
from plugins.tff_backend.models.user import TffProfile
def migrate(dry_run=False):
run_job(_profiles_with_referrer, [], store_referral_in_user_data, [])
def _profiles_with_referrer():
return TffProfile.query()
| 33.275862
| 74
| 0.767876
| 147
| 965
| 4.904762
| 0.62585
| 0.083218
| 0.036061
| 0.044383
| 0.0638
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013382
| 0.148187
| 965
| 28
| 75
| 34.464286
| 0.863747
| 0.621762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.428571
| 0.142857
| 0.857143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 4
|
17b1c69a71056b42a57e141ebecede51d3855608
| 134
|
py
|
Python
|
project_e/dealers/forms.py
|
ElectricFleming/project-e
|
cf05d2a835a09555e3dba5813d635d329684a71c
|
[
"bzip2-1.0.6"
] | null | null | null |
project_e/dealers/forms.py
|
ElectricFleming/project-e
|
cf05d2a835a09555e3dba5813d635d329684a71c
|
[
"bzip2-1.0.6"
] | 1
|
2020-01-17T14:23:09.000Z
|
2020-01-17T14:23:09.000Z
|
project_e/dealers/forms.py
|
ElectricFleming/project-e
|
cf05d2a835a09555e3dba5813d635d329684a71c
|
[
"bzip2-1.0.6"
] | 1
|
2019-12-27T22:45:45.000Z
|
2019-12-27T22:45:45.000Z
|
from django import forms
class DealerCreationForm(forms.Form):
name = forms.CharField()
address = forms.CharField() #Address
| 22.333333
| 40
| 0.738806
| 15
| 134
| 6.6
| 0.666667
| 0.282828
| 0.424242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164179
| 134
| 5
| 41
| 26.8
| 0.883929
| 0.052239
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
17e8df55205e733065c94ba24fbd9e7e676ba5cf
| 89
|
py
|
Python
|
GreedDice_main.py
|
tjbruce19/codewars
|
ecc22421916d88589635d8781400acbd71c53c01
|
[
"Apache-2.0"
] | null | null | null |
GreedDice_main.py
|
tjbruce19/codewars
|
ecc22421916d88589635d8781400acbd71c53c01
|
[
"Apache-2.0"
] | null | null | null |
GreedDice_main.py
|
tjbruce19/codewars
|
ecc22421916d88589635d8781400acbd71c53c01
|
[
"Apache-2.0"
] | null | null | null |
from GreedDice import score
if __name__ == "__main__":
print(score([2, 4, 6, 1, 1]))
| 22.25
| 33
| 0.640449
| 14
| 89
| 3.5
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069444
| 0.191011
| 89
| 4
| 33
| 22.25
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
17ef92a3a3fee906c46cb2b903ba87e92a25aa97
| 11,361
|
py
|
Python
|
sdk/python/pulumi_aws/sagemaker/_inputs.py
|
mdop-wh/pulumi-aws
|
05bb32e9d694dde1c3b76d440fd2cd0344d23376
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/sagemaker/_inputs.py
|
mdop-wh/pulumi-aws
|
05bb32e9d694dde1c3b76d440fd2cd0344d23376
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/sagemaker/_inputs.py
|
mdop-wh/pulumi-aws
|
05bb32e9d694dde1c3b76d440fd2cd0344d23376
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'EndpointConfigurationProductionVariantArgs',
'ModelContainerArgs',
'ModelPrimaryContainerArgs',
'ModelVpcConfigArgs',
]
@pulumi.input_type
class EndpointConfigurationProductionVariantArgs:
def __init__(__self__, *,
initial_instance_count: pulumi.Input[float],
instance_type: pulumi.Input[str],
model_name: pulumi.Input[str],
accelerator_type: Optional[pulumi.Input[str]] = None,
initial_variant_weight: Optional[pulumi.Input[float]] = None,
variant_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[float] initial_instance_count: Initial number of instances used for auto-scaling.
:param pulumi.Input[str] instance_type: The type of instance to start.
:param pulumi.Input[str] model_name: The name of the model to use.
:param pulumi.Input[str] accelerator_type: The size of the Elastic Inference (EI) instance to use for the production variant.
:param pulumi.Input[float] initial_variant_weight: Determines initial traffic distribution among all of the models that you specify in the endpoint configuration. If unspecified, it defaults to 1.0.
:param pulumi.Input[str] variant_name: The name of the variant. If omitted, this provider will assign a random, unique name.
"""
pulumi.set(__self__, "initial_instance_count", initial_instance_count)
pulumi.set(__self__, "instance_type", instance_type)
pulumi.set(__self__, "model_name", model_name)
if accelerator_type is not None:
pulumi.set(__self__, "accelerator_type", accelerator_type)
if initial_variant_weight is not None:
pulumi.set(__self__, "initial_variant_weight", initial_variant_weight)
if variant_name is not None:
pulumi.set(__self__, "variant_name", variant_name)
@property
@pulumi.getter(name="initialInstanceCount")
def initial_instance_count(self) -> pulumi.Input[float]:
"""
Initial number of instances used for auto-scaling.
"""
return pulumi.get(self, "initial_instance_count")
@initial_instance_count.setter
def initial_instance_count(self, value: pulumi.Input[float]):
pulumi.set(self, "initial_instance_count", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> pulumi.Input[str]:
"""
The type of instance to start.
"""
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="modelName")
def model_name(self) -> pulumi.Input[str]:
"""
The name of the model to use.
"""
return pulumi.get(self, "model_name")
@model_name.setter
def model_name(self, value: pulumi.Input[str]):
pulumi.set(self, "model_name", value)
@property
@pulumi.getter(name="acceleratorType")
def accelerator_type(self) -> Optional[pulumi.Input[str]]:
"""
The size of the Elastic Inference (EI) instance to use for the production variant.
"""
return pulumi.get(self, "accelerator_type")
@accelerator_type.setter
def accelerator_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "accelerator_type", value)
@property
@pulumi.getter(name="initialVariantWeight")
def initial_variant_weight(self) -> Optional[pulumi.Input[float]]:
"""
Determines initial traffic distribution among all of the models that you specify in the endpoint configuration. If unspecified, it defaults to 1.0.
"""
return pulumi.get(self, "initial_variant_weight")
@initial_variant_weight.setter
def initial_variant_weight(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "initial_variant_weight", value)
@property
@pulumi.getter(name="variantName")
def variant_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the variant. If omitted, this provider will assign a random, unique name.
"""
return pulumi.get(self, "variant_name")
@variant_name.setter
def variant_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "variant_name", value)
@pulumi.input_type
class ModelContainerArgs:
def __init__(__self__, *,
image: pulumi.Input[str],
container_hostname: Optional[pulumi.Input[str]] = None,
environment: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
model_data_url: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] image: The registry path where the inference code image is stored in Amazon ECR.
:param pulumi.Input[str] container_hostname: The DNS host name for the container.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] environment: Environment variables for the Docker container.
A list of key value pairs.
:param pulumi.Input[str] model_data_url: The URL for the S3 location where model artifacts are stored.
"""
pulumi.set(__self__, "image", image)
if container_hostname is not None:
pulumi.set(__self__, "container_hostname", container_hostname)
if environment is not None:
pulumi.set(__self__, "environment", environment)
if model_data_url is not None:
pulumi.set(__self__, "model_data_url", model_data_url)
@property
@pulumi.getter
def image(self) -> pulumi.Input[str]:
"""
The registry path where the inference code image is stored in Amazon ECR.
"""
return pulumi.get(self, "image")
@image.setter
def image(self, value: pulumi.Input[str]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="containerHostname")
def container_hostname(self) -> Optional[pulumi.Input[str]]:
"""
The DNS host name for the container.
"""
return pulumi.get(self, "container_hostname")
@container_hostname.setter
def container_hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_hostname", value)
@property
@pulumi.getter
def environment(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Environment variables for the Docker container.
A list of key value pairs.
"""
return pulumi.get(self, "environment")
@environment.setter
def environment(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "environment", value)
@property
@pulumi.getter(name="modelDataUrl")
def model_data_url(self) -> Optional[pulumi.Input[str]]:
"""
The URL for the S3 location where model artifacts are stored.
"""
return pulumi.get(self, "model_data_url")
@model_data_url.setter
def model_data_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "model_data_url", value)
@pulumi.input_type
class ModelPrimaryContainerArgs:
def __init__(__self__, *,
image: pulumi.Input[str],
container_hostname: Optional[pulumi.Input[str]] = None,
environment: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
model_data_url: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] image: The registry path where the inference code image is stored in Amazon ECR.
:param pulumi.Input[str] container_hostname: The DNS host name for the container.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] environment: Environment variables for the Docker container.
A list of key value pairs.
:param pulumi.Input[str] model_data_url: The URL for the S3 location where model artifacts are stored.
"""
pulumi.set(__self__, "image", image)
if container_hostname is not None:
pulumi.set(__self__, "container_hostname", container_hostname)
if environment is not None:
pulumi.set(__self__, "environment", environment)
if model_data_url is not None:
pulumi.set(__self__, "model_data_url", model_data_url)
@property
@pulumi.getter
def image(self) -> pulumi.Input[str]:
"""
The registry path where the inference code image is stored in Amazon ECR.
"""
return pulumi.get(self, "image")
@image.setter
def image(self, value: pulumi.Input[str]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="containerHostname")
def container_hostname(self) -> Optional[pulumi.Input[str]]:
"""
The DNS host name for the container.
"""
return pulumi.get(self, "container_hostname")
@container_hostname.setter
def container_hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_hostname", value)
@property
@pulumi.getter
def environment(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Environment variables for the Docker container.
A list of key value pairs.
"""
return pulumi.get(self, "environment")
@environment.setter
def environment(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "environment", value)
@property
@pulumi.getter(name="modelDataUrl")
def model_data_url(self) -> Optional[pulumi.Input[str]]:
"""
The URL for the S3 location where model artifacts are stored.
"""
return pulumi.get(self, "model_data_url")
@model_data_url.setter
def model_data_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "model_data_url", value)
@pulumi.input_type
class ModelVpcConfigArgs:
def __init__(__self__, *,
security_group_ids: pulumi.Input[List[pulumi.Input[str]]],
subnets: pulumi.Input[List[pulumi.Input[str]]]):
pulumi.set(__self__, "security_group_ids", security_group_ids)
pulumi.set(__self__, "subnets", subnets)
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> pulumi.Input[List[pulumi.Input[str]]]:
return pulumi.get(self, "security_group_ids")
@security_group_ids.setter
def security_group_ids(self, value: pulumi.Input[List[pulumi.Input[str]]]):
pulumi.set(self, "security_group_ids", value)
@property
@pulumi.getter
def subnets(self) -> pulumi.Input[List[pulumi.Input[str]]]:
return pulumi.get(self, "subnets")
@subnets.setter
def subnets(self, value: pulumi.Input[List[pulumi.Input[str]]]):
pulumi.set(self, "subnets", value)
| 39.311419
| 206
| 0.66165
| 1,373
| 11,361
| 5.286963
| 0.105608
| 0.121229
| 0.104147
| 0.054553
| 0.828213
| 0.716765
| 0.678468
| 0.627084
| 0.599807
| 0.587684
| 0
| 0.001027
| 0.228941
| 11,361
| 288
| 207
| 39.447917
| 0.827626
| 0.235103
| 0
| 0.539773
| 1
| 0
| 0.111573
| 0.024345
| 0
| 0
| 0
| 0
| 0
| 1
| 0.204545
| false
| 0
| 0.028409
| 0.011364
| 0.346591
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
17fa53f9a3919169c875057b943e2dffc7e0f871
| 128
|
py
|
Python
|
chess_py/core/algebraic/__init__.py
|
Aubhro/chess_py
|
14bebc2f8c49ae25c59375cc83d0b38d8ff7281d
|
[
"MIT"
] | 14
|
2016-07-02T01:54:00.000Z
|
2020-12-16T19:26:48.000Z
|
chess_py/core/algebraic/__init__.py
|
Aubhro/chess_py
|
14bebc2f8c49ae25c59375cc83d0b38d8ff7281d
|
[
"MIT"
] | 18
|
2016-09-01T04:27:49.000Z
|
2019-03-29T04:52:03.000Z
|
chess_py/core/algebraic/__init__.py
|
Aubhro/chess_py
|
14bebc2f8c49ae25c59375cc83d0b38d8ff7281d
|
[
"MIT"
] | 7
|
2016-05-14T20:55:05.000Z
|
2020-10-30T05:42:02.000Z
|
from .location import Location, Direction
from .move import Move
__all__ = ['converter', 'Location', 'Move', 'notation_const']
| 25.6
| 61
| 0.742188
| 15
| 128
| 6
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 128
| 4
| 62
| 32
| 0.803571
| 0
| 0
| 0
| 0
| 0
| 0.273438
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
aa1c87f8e19ed40e3868ca3075fd0c0134127786
| 220
|
py
|
Python
|
katas/beta/builtin_product_function.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
katas/beta/builtin_product_function.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
katas/beta/builtin_product_function.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
from functools import reduce
from operator import mul
def product(iterable=(), start=1):
""" kata currently supports only Python 3.4.3 """
return reduce(mul, iterable, start)
# __builtins__.product = product
| 20
| 53
| 0.718182
| 29
| 220
| 5.310345
| 0.689655
| 0.168831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022099
| 0.177273
| 220
| 10
| 54
| 22
| 0.828729
| 0.336364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
aa33cff724b4996ddbfd95978f87253e4f36e010
| 26
|
py
|
Python
|
dash_bootstrap_components/_version.py
|
sthagen/dash-bootstrap-components
|
d79ad7f8fdf4c26165038e6989e24f2ac17663b1
|
[
"Apache-2.0"
] | 1
|
2021-09-05T10:01:30.000Z
|
2021-09-05T10:01:30.000Z
|
dash_bootstrap_components/_version.py
|
sthagen/dash-bootstrap-components
|
d79ad7f8fdf4c26165038e6989e24f2ac17663b1
|
[
"Apache-2.0"
] | null | null | null |
dash_bootstrap_components/_version.py
|
sthagen/dash-bootstrap-components
|
d79ad7f8fdf4c26165038e6989e24f2ac17663b1
|
[
"Apache-2.0"
] | null | null | null |
__version__ = "1.0.3-dev"
| 13
| 25
| 0.653846
| 5
| 26
| 2.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 0.115385
| 26
| 1
| 26
| 26
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0.346154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
aa3d4f11d90fe7be4ffb8b2fe71431c9922f7b7c
| 164
|
py
|
Python
|
bin/django-admin.py
|
zmac12/saleor
|
ef833c22a8260e31ba70c5b676061d78fcfe961a
|
[
"CC-BY-4.0"
] | null | null | null |
bin/django-admin.py
|
zmac12/saleor
|
ef833c22a8260e31ba70c5b676061d78fcfe961a
|
[
"CC-BY-4.0"
] | null | null | null |
bin/django-admin.py
|
zmac12/saleor
|
ef833c22a8260e31ba70c5b676061d78fcfe961a
|
[
"CC-BY-4.0"
] | null | null | null |
#!/Users/zachmcquiston/ReactProjects/saleor/bin/python3.7
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 27.333333
| 57
| 0.79878
| 20
| 164
| 6
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013423
| 0.091463
| 164
| 5
| 58
| 32.8
| 0.791946
| 0.341463
| 0
| 0
| 0
| 0
| 0.074766
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
a4ce9911c666642b70ee4c139bbc4ffad626a47c
| 91
|
py
|
Python
|
src/1672_maximum_wealth.py
|
soamsy/leetcode
|
091f3b33e44613fac130ff1018c8b63493798f09
|
[
"MIT"
] | null | null | null |
src/1672_maximum_wealth.py
|
soamsy/leetcode
|
091f3b33e44613fac130ff1018c8b63493798f09
|
[
"MIT"
] | null | null | null |
src/1672_maximum_wealth.py
|
soamsy/leetcode
|
091f3b33e44613fac130ff1018c8b63493798f09
|
[
"MIT"
] | null | null | null |
def maximumWealth(accounts: list[list[int]]) -> int:
return sum(max(accounts, key=sum))
| 45.5
| 52
| 0.703297
| 13
| 91
| 4.923077
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120879
| 91
| 2
| 53
| 45.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
a4d35c2f694f582dd39b062032d3b664a442bb08
| 331
|
py
|
Python
|
functions/arg_nomeados.py
|
Brunokrk/Learning-Python
|
36a3b1c4782dbb21af189760a451fd2e9c083bb6
|
[
"MIT"
] | null | null | null |
functions/arg_nomeados.py
|
Brunokrk/Learning-Python
|
36a3b1c4782dbb21af189760a451fd2e9c083bb6
|
[
"MIT"
] | null | null | null |
functions/arg_nomeados.py
|
Brunokrk/Learning-Python
|
36a3b1c4782dbb21af189760a451fd2e9c083bb6
|
[
"MIT"
] | null | null | null |
def describe_pet (animal_type,pet_name):
"""Exibe informações sobre um animal de estimação"""
print("\nI have a "+ animal_type)
print("My "+animal_type+"'s name is "+pet_name.title())
#argumentos devem ser fornecidos na posição de seus respectivos parametros
describe_pet(animal_type='hamster', pet_name='harry')
| 41.375
| 74
| 0.731118
| 48
| 331
| 4.854167
| 0.645833
| 0.171674
| 0.145923
| 0.180258
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151057
| 331
| 7
| 75
| 47.285714
| 0.829181
| 0.362538
| 0
| 0
| 0
| 0
| 0.180488
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.25
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
a4e1bcac6f0b35525f3426143180ef3dd92e3830
| 224
|
py
|
Python
|
test/test_project/backends.py
|
radopetrik/django-otp
|
ee8373fd9ceb02f8b53a21dd1806334c254d6200
|
[
"BSD-2-Clause"
] | null | null | null |
test/test_project/backends.py
|
radopetrik/django-otp
|
ee8373fd9ceb02f8b53a21dd1806334c254d6200
|
[
"BSD-2-Clause"
] | null | null | null |
test/test_project/backends.py
|
radopetrik/django-otp
|
ee8373fd9ceb02f8b53a21dd1806334c254d6200
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
class DummyBackend(object):
def authenticate(self, request):
return None
def get_user(self, user_id):
return None
| 22.4
| 82
| 0.732143
| 27
| 224
| 5.740741
| 0.777778
| 0.129032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205357
| 224
| 9
| 83
| 24.888889
| 0.870787
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.333333
| 1
| 0.166667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
3505e029551be6102fa9337ece600ebf58ef67a1
| 335
|
py
|
Python
|
Python/DocStruct/ASAPI/__init__.py
|
appcove/DocStruct
|
16c15bb59f9aab29abb78b0aa9f2ab63c10b8da4
|
[
"Apache-2.0"
] | 1
|
2015-06-18T07:30:02.000Z
|
2015-06-18T07:30:02.000Z
|
Python/DocStruct/ASAPI/__init__.py
|
appcove/DocStruct
|
16c15bb59f9aab29abb78b0aa9f2ab63c10b8da4
|
[
"Apache-2.0"
] | null | null | null |
Python/DocStruct/ASAPI/__init__.py
|
appcove/DocStruct
|
16c15bb59f9aab29abb78b0aa9f2ab63c10b8da4
|
[
"Apache-2.0"
] | null | null | null |
# vim:fileencoding=utf-8:ts=2:sw=2:expandtab
'''
A DocStruct_Release table in the schema
Every time we do an upgrade, we'll replace the column in this table with the correct version number.
ALTER TABLE DocStruct_Release RENAME 1.0.1 TO 1.0.2 ... this will fail if you are out of sync.
'''
from .Client import Client
| 12.407407
| 100
| 0.716418
| 60
| 335
| 3.966667
| 0.75
| 0.134454
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033708
| 0.202985
| 335
| 26
| 101
| 12.884615
| 0.857678
| 0.838806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
35404fe562dde5ec897f9e61b58efa79fa47b9d5
| 87
|
py
|
Python
|
fedota/apps.py
|
fedota/fl-webserver
|
8015f59445529edf13589d7c9339a6e48e58640f
|
[
"MIT"
] | null | null | null |
fedota/apps.py
|
fedota/fl-webserver
|
8015f59445529edf13589d7c9339a6e48e58640f
|
[
"MIT"
] | 1
|
2022-02-10T15:02:06.000Z
|
2022-02-10T15:02:06.000Z
|
fedota/apps.py
|
fedota/fl-webserver
|
8015f59445529edf13589d7c9339a6e48e58640f
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class FedotaConfig(AppConfig):
name = 'fedota'
| 14.5
| 33
| 0.747126
| 10
| 87
| 6.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 87
| 5
| 34
| 17.4
| 0.902778
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
354923cf5d29963d203829eb86638d49e168a079
| 80
|
py
|
Python
|
src/tcgmanager/helpers/__init__.py
|
BenjaminLSmith/TCGManager
|
f367dc33bcdd59ab89ac0066ab5f7cf330ccaa38
|
[
"Apache-2.0"
] | null | null | null |
src/tcgmanager/helpers/__init__.py
|
BenjaminLSmith/TCGManager
|
f367dc33bcdd59ab89ac0066ab5f7cf330ccaa38
|
[
"Apache-2.0"
] | 1
|
2021-06-01T23:54:41.000Z
|
2021-06-01T23:54:41.000Z
|
src/tcgmanager/helpers/__init__.py
|
BenjaminLSmith/TCGManager
|
f367dc33bcdd59ab89ac0066ab5f7cf330ccaa38
|
[
"Apache-2.0"
] | null | null | null |
from .tcgplayer import TCGPlayerBase
from .esconnection import ESConnectionBase
| 26.666667
| 42
| 0.875
| 8
| 80
| 8.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 80
| 2
| 43
| 40
| 0.972222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
101bdafd290a2e452f78b159e4e987041c4404ab
| 1,232
|
py
|
Python
|
data/train/python/101bdafd290a2e452f78b159e4e987041c4404aburls.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/101bdafd290a2e452f78b159e4e987041c4404aburls.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/101bdafd290a2e452f78b159e4e987041c4404aburls.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
#encoding:utf-8
urls = (
'/admin/?', 'controller.admin.index',
'/admin/login', 'controller.admin.login',
'/admin/logout', 'controller.admin.logout',
#--------------user -----------
#----用户信息表----
"/admin/user_list", "controller.admin.user.user_list",
"/admin/user_read/(\d+)", "controller.admin.user.user_read",
"/admin/user_edit/(\d+)", "controller.admin.user.user_edit",
"/admin/user_delete/(\d+)", "controller.admin.user.user_delete",
#--------------end user -------
#--------------area -----------
#----区域表----
"/admin/area_list", "controller.admin.area.area_list",
"/admin/area_read/(\d+)", "controller.admin.area.area_read",
"/admin/area_edit/(\d+)", "controller.admin.area.area_edit",
"/admin/area_delete/(\d+)", "controller.admin.area.area_delete",
#--------------end area -------
#--------------policy -----------
#----政策传递----
"/admin/policy_list", "controller.admin.policy.policy_list",
"/admin/policy_read/(\d+)", "controller.admin.policy.policy_read",
"/admin/policy_edit/(\d+)", "controller.admin.policy.policy_edit",
"/admin/policy_delete/(\d+)", "controller.admin.policy.policy_delete",
#--------------end policy -------
)
| 36.235294
| 73
| 0.568994
| 135
| 1,232
| 5.014815
| 0.155556
| 0.332349
| 0.212703
| 0.135894
| 0.33678
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000921
| 0.118506
| 1,232
| 33
| 74
| 37.333333
| 0.622468
| 0.189935
| 0
| 0
| 0
| 0
| 0.763158
| 0.67915
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
10410e756a285fea48e1a27560f7895332b5d0d9
| 368
|
py
|
Python
|
helloflask/guestbook/forms.py
|
walterfan/helloworld
|
8d2e6465f36500ba8e28308b17b6c1a2c2059be1
|
[
"Apache-2.0"
] | null | null | null |
helloflask/guestbook/forms.py
|
walterfan/helloworld
|
8d2e6465f36500ba8e28308b17b6c1a2c2059be1
|
[
"Apache-2.0"
] | 9
|
2020-03-04T23:40:56.000Z
|
2022-03-02T02:34:58.000Z
|
helloflask/guestbook/forms.py
|
walterfan/helloworld
|
8d2e6465f36500ba8e28308b17b6c1a2c2059be1
|
[
"Apache-2.0"
] | 5
|
2018-11-10T16:13:40.000Z
|
2021-09-18T06:09:15.000Z
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField
from wtforms.validators import DataRequired, Length
class MessageForm(FlaskForm):
subject = StringField('Subject', validators=[DataRequired(), Length(1, 32)])
content = TextAreaField('Content', validators=[DataRequired(), Length(1 ,4096)])
submit = SubmitField()
| 40.888889
| 84
| 0.769022
| 38
| 368
| 7.421053
| 0.526316
| 0.191489
| 0.198582
| 0.205674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024768
| 0.122283
| 368
| 9
| 85
| 40.888889
| 0.848297
| 0
| 0
| 0
| 0
| 0
| 0.03794
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.428571
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
52ca20dc89e31cbfbc1f79a4df4c90d1a1930775
| 167
|
py
|
Python
|
weather/forms.py
|
BianSuma/weather_prediction
|
ab4b9744fef5ceb8cfdf9d06439ae849c132736e
|
[
"Unlicense"
] | 1
|
2021-06-28T14:47:25.000Z
|
2021-06-28T14:47:25.000Z
|
weather/forms.py
|
BianSuma/weather_prediction
|
ab4b9744fef5ceb8cfdf9d06439ae849c132736e
|
[
"Unlicense"
] | null | null | null |
weather/forms.py
|
BianSuma/weather_prediction
|
ab4b9744fef5ceb8cfdf9d06439ae849c132736e
|
[
"Unlicense"
] | null | null | null |
from django import forms
from weather.models import Weathers
class WeatherForm(forms.ModelForm):
class Meta:
model = Weathers
fields = "__all__"
| 18.555556
| 35
| 0.700599
| 19
| 167
| 5.947368
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.239521
| 167
| 8
| 36
| 20.875
| 0.889764
| 0
| 0
| 0
| 0
| 0
| 0.041916
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
52d7fda17aa30a801b7d21bd536e64574c9058af
| 83
|
py
|
Python
|
Lunchtime/Lproj/Lapp/apps.py
|
WildernessBear/447-COVID-lunch-proj
|
1ab440bb9025d2c2eaf6daca90bb35503265be60
|
[
"Apache-2.0"
] | null | null | null |
Lunchtime/Lproj/Lapp/apps.py
|
WildernessBear/447-COVID-lunch-proj
|
1ab440bb9025d2c2eaf6daca90bb35503265be60
|
[
"Apache-2.0"
] | null | null | null |
Lunchtime/Lproj/Lapp/apps.py
|
WildernessBear/447-COVID-lunch-proj
|
1ab440bb9025d2c2eaf6daca90bb35503265be60
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class LappConfig(AppConfig):
name = 'Lapp'
| 13.833333
| 33
| 0.73494
| 10
| 83
| 6.1
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180723
| 83
| 5
| 34
| 16.6
| 0.897059
| 0
| 0
| 0
| 0
| 0
| 0.048193
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
52d8f0426494784af131233604040efd8dd75298
| 32
|
py
|
Python
|
__init__.py
|
loop333/ha_dunehd_media_player
|
f6b50434b25213b573390c390466b642d5ce3dc7
|
[
"MIT"
] | 1
|
2019-10-24T16:07:59.000Z
|
2019-10-24T16:07:59.000Z
|
__init__.py
|
loop333/ha_dunehd_media_player
|
f6b50434b25213b573390c390466b642d5ce3dc7
|
[
"MIT"
] | null | null | null |
__init__.py
|
loop333/ha_dunehd_media_player
|
f6b50434b25213b573390c390466b642d5ce3dc7
|
[
"MIT"
] | null | null | null |
""" Custom DuneHD component """
| 16
| 31
| 0.65625
| 3
| 32
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 32
| 1
| 32
| 32
| 0.777778
| 0.71875
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
5e0c2967dd9151bf5117b4cdc60758a73c084a77
| 186
|
py
|
Python
|
test.py
|
saibotshamtul/rimp
|
1d35b85aa84ab4430ba56afdd6ef4737c214adcd
|
[
"MIT"
] | 3
|
2018-12-26T15:19:48.000Z
|
2020-10-02T00:22:09.000Z
|
test.py
|
saibotshamtul/rimp
|
1d35b85aa84ab4430ba56afdd6ef4737c214adcd
|
[
"MIT"
] | 1
|
2020-10-02T12:34:33.000Z
|
2020-10-02T12:34:33.000Z
|
test.py
|
saibotshamtul/rimp
|
1d35b85aa84ab4430ba56afdd6ef4737c214adcd
|
[
"MIT"
] | 4
|
2020-08-31T13:50:03.000Z
|
2021-09-26T14:50:16.000Z
|
from rimp import load_repl
load_repl("21natzil", "Permissions", verbose=False)
load_repl("21natzil", "discordy", force_reinstall=True)
import perms
print(dir(perms))
import discordy
| 16.909091
| 55
| 0.77957
| 25
| 186
| 5.64
| 0.64
| 0.170213
| 0.22695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023952
| 0.102151
| 186
| 10
| 56
| 18.6
| 0.820359
| 0
| 0
| 0
| 0
| 0
| 0.188172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.166667
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
5e1c75266ed37d4bea14363b9a2ca6a019aae48e
| 317
|
py
|
Python
|
homura/optim.py
|
Fragile-azalea/homura
|
900d1d63affb9c8af3accd9b196b5276cb2e14b6
|
[
"Apache-2.0"
] | 1
|
2020-06-30T01:55:41.000Z
|
2020-06-30T01:55:41.000Z
|
homura/optim.py
|
Fragile-azalea/homura
|
900d1d63affb9c8af3accd9b196b5276cb2e14b6
|
[
"Apache-2.0"
] | null | null | null |
homura/optim.py
|
Fragile-azalea/homura
|
900d1d63affb9c8af3accd9b196b5276cb2e14b6
|
[
"Apache-2.0"
] | null | null | null |
from functools import partial
import torch
def Adam(lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
return partial(torch.optim.Adam, **locals())
def SGD(lr=1e-1, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
return partial(torch.optim.SGD, **locals())
| 22.642857
| 48
| 0.66877
| 50
| 317
| 4.2
| 0.56
| 0.038095
| 0.114286
| 0.219048
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061069
| 0.173502
| 317
| 13
| 49
| 24.384615
| 0.740458
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
5e308dda1a8fb32d560a360848b9f6a8f72925bf
| 346
|
py
|
Python
|
pic/data/__init__.py
|
hankyul2/pytorch-image-classification
|
2da942aaf806de961941d57e9daa0b9a37798530
|
[
"Apache-2.0"
] | null | null | null |
pic/data/__init__.py
|
hankyul2/pytorch-image-classification
|
2da942aaf806de961941d57e9daa0b9a37798530
|
[
"Apache-2.0"
] | null | null | null |
pic/data/__init__.py
|
hankyul2/pytorch-image-classification
|
2da942aaf806de961941d57e9daa0b9a37798530
|
[
"Apache-2.0"
] | null | null | null |
from .custom_dataset import MyImageFolder, MiTIndoor, CUB200, TinyImageNet, MyCaltech101
from .mix import MixUP, CutMix
from .sampler import RepeatAugSampler
from .cifar import MyCIFAR100
from .transforms import TrainTransform, ValTransform
from .dataloader import get_dataloader
from .dataset import get_dataset, _dataset_dict, register_dataset
| 43.25
| 88
| 0.852601
| 41
| 346
| 7.04878
| 0.560976
| 0.089965
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029032
| 0.104046
| 346
| 7
| 89
| 49.428571
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
5e39618dca4ad6c3f0d4c8cb20af59ab85fb0eba
| 98
|
py
|
Python
|
Funções Analíticas/Virtualenv/Lib/site-packages/setuptools/tests/textwrap.py
|
Leonardo-Maciel/PSO_Maciel
|
3939448da45716260f3ac7811afdd13be670f346
|
[
"MIT"
] | 1,744
|
2016-03-29T15:46:26.000Z
|
2022-03-31T23:51:04.000Z
|
Funções Analíticas/Virtualenv/Lib/site-packages/setuptools/tests/textwrap.py
|
Leonardo-Maciel/PSO_Maciel
|
3939448da45716260f3ac7811afdd13be670f346
|
[
"MIT"
] | 2,404
|
2016-03-29T16:24:00.000Z
|
2022-03-31T22:25:20.000Z
|
Funções Analíticas/Virtualenv/Lib/site-packages/setuptools/tests/textwrap.py
|
Leonardo-Maciel/PSO_Maciel
|
3939448da45716260f3ac7811afdd13be670f346
|
[
"MIT"
] | 1,042
|
2016-03-29T15:28:34.000Z
|
2022-03-31T16:27:27.000Z
|
import textwrap
def DALS(s):
"dedent and left-strip"
return textwrap.dedent(s).lstrip()
| 14
| 38
| 0.683673
| 14
| 98
| 4.785714
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193878
| 98
| 6
| 39
| 16.333333
| 0.848101
| 0.214286
| 0
| 0
| 0
| 0
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
eaa6206d06f3187fc7d345293454afcdc53d26c6
| 211
|
py
|
Python
|
jewels_and_stones_771.py
|
cthi/LeetCode
|
fbeb077e382ab4c4e8d8cc4707b9f1a9f33c5a89
|
[
"MIT"
] | null | null | null |
jewels_and_stones_771.py
|
cthi/LeetCode
|
fbeb077e382ab4c4e8d8cc4707b9f1a9f33c5a89
|
[
"MIT"
] | null | null | null |
jewels_and_stones_771.py
|
cthi/LeetCode
|
fbeb077e382ab4c4e8d8cc4707b9f1a9f33c5a89
|
[
"MIT"
] | null | null | null |
class Solution:
def numJewelsInStones(self, J, S):
"""
:type J: str
:type S: str
:rtype: int
"""
J = set(J)
return sum(1 for stone in S if stone in J)
| 21.1
| 50
| 0.469194
| 29
| 211
| 3.413793
| 0.655172
| 0.141414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008197
| 0.421801
| 211
| 9
| 51
| 23.444444
| 0.803279
| 0.175355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
eab41880287b15414e0b5e2002c88c67a15b1310
| 416
|
py
|
Python
|
books_scrapy/utils/misc.py
|
hdtls/books-scrapy
|
d8e72463df05de16fafc4207e3c292284a7c126d
|
[
"Apache-2.0"
] | null | null | null |
books_scrapy/utils/misc.py
|
hdtls/books-scrapy
|
d8e72463df05de16fafc4207e3c292284a7c126d
|
[
"Apache-2.0"
] | null | null | null |
books_scrapy/utils/misc.py
|
hdtls/books-scrapy
|
d8e72463df05de16fafc4207e3c292284a7c126d
|
[
"Apache-2.0"
] | null | null | null |
import json
import re
def eval_js_variable(label, text):
match = re.findall(r"var %s ?= ?(.*?);" % (label), text)
if not match:
return None
return json.loads(match[0])
def list_extend(lhs, rhs):
lhs = lhs or []
rhs = rhs or []
return list(set(lhs + rhs)) or None
def formatted_meta(arg):
return {"__meta__": arg}
def revert_formatted_meta(arg):
return arg["__meta__"]
| 17.333333
| 60
| 0.617788
| 61
| 416
| 3.983607
| 0.491803
| 0.08642
| 0.131687
| 0.18107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003155
| 0.237981
| 416
| 23
| 61
| 18.086957
| 0.763407
| 0
| 0
| 0
| 0
| 0
| 0.079327
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.266667
| false
| 0
| 0.133333
| 0.133333
| 0.733333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
eaef353a679d961958704468cd3dcdd8028176ca
| 247
|
py
|
Python
|
lnked/colleges/admin.py
|
NewsNerdsAtCoJMC/ProjectTicoTeam4
|
26d2430a8ab63b585c00ac8530bc476c15597685
|
[
"MIT"
] | null | null | null |
lnked/colleges/admin.py
|
NewsNerdsAtCoJMC/ProjectTicoTeam4
|
26d2430a8ab63b585c00ac8530bc476c15597685
|
[
"MIT"
] | null | null | null |
lnked/colleges/admin.py
|
NewsNerdsAtCoJMC/ProjectTicoTeam4
|
26d2430a8ab63b585c00ac8530bc476c15597685
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from django.contrib import admin
from .models import SignificantMajors, College, Blog
admin.site.register(SignificantMajors)
admin.site.register(College)
admin.site.register(Blog)
| 19
| 52
| 0.813765
| 32
| 247
| 6.28125
| 0.40625
| 0.134328
| 0.253731
| 0.228856
| 0.278607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109312
| 247
| 12
| 53
| 20.583333
| 0.913636
| 0.105263
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
dc215ffa1b85cdeb8869900176cc31d71e7ee65f
| 116
|
py
|
Python
|
userena/compat.py
|
WisChrendel/django-userena-ce
|
38aeb22900aba3945ec37369bb627c84f1a507fe
|
[
"BSD-3-Clause"
] | null | null | null |
userena/compat.py
|
WisChrendel/django-userena-ce
|
38aeb22900aba3945ec37369bb627c84f1a507fe
|
[
"BSD-3-Clause"
] | 1
|
2022-03-10T16:20:49.000Z
|
2022-03-10T16:20:49.000Z
|
userena/compat.py
|
WisChrendel/django-userena-ce
|
38aeb22900aba3945ec37369bb627c84f1a507fe
|
[
"BSD-3-Clause"
] | 2
|
2016-01-13T02:52:24.000Z
|
2019-03-15T18:37:02.000Z
|
# -*- coding: utf-8 -*-
# SiteProfileNotAvailable compatibility
class SiteProfileNotAvailable(Exception):
pass
| 19.333333
| 41
| 0.75
| 9
| 116
| 9.666667
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01
| 0.137931
| 116
| 5
| 42
| 23.2
| 0.86
| 0.508621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
dc8bee9b56f7f5bd4aaf68375b1a1a0b202348e8
| 724
|
py
|
Python
|
src/sfcparse/__xml/xmlbuildmanual.py
|
aaronater10/sfconfig
|
f1ebd0a4dc5e6ec235d30b0ef1540fb65422729a
|
[
"MIT"
] | null | null | null |
src/sfcparse/__xml/xmlbuildmanual.py
|
aaronater10/sfconfig
|
f1ebd0a4dc5e6ec235d30b0ef1540fb65422729a
|
[
"MIT"
] | null | null | null |
src/sfcparse/__xml/xmlbuildmanual.py
|
aaronater10/sfconfig
|
f1ebd0a4dc5e6ec235d30b0ef1540fb65422729a
|
[
"MIT"
] | null | null | null |
# xmlbuildmanual
#########################################################################################################
# Imports
import xml.etree.ElementTree as __xml_etree
#########################################################################################################
# Build manual xml data
def xmlbuildmanual() -> __xml_etree:
"""
Returns a empty xml ElementTree obj to build/work with xml data
Assign the output to var
This is using the native xml library via etree shipped with the python standard library.
For more information on the xml.etree api, visit: https://docs.python.org/3/library/xml.etree.elementtree.html#module-xml.etree.ElementTree
"""
return __xml_etree
| 40.222222
| 143
| 0.524862
| 75
| 724
| 4.946667
| 0.586667
| 0.150943
| 0.153639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001597
| 0.135359
| 724
| 17
| 144
| 42.588235
| 0.591054
| 0.504144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
f4f4a2f56f7c778d95aef45a681962974fd48420
| 182
|
py
|
Python
|
crazy-filters/main.py
|
isabella232/je-code-crazy-filters
|
f9b654cab445bd2ad42e75fcb69a18c17241dd83
|
[
"Apache-2.0"
] | 6
|
2018-06-28T08:52:42.000Z
|
2019-04-05T20:46:25.000Z
|
crazy-filters/main.py
|
criteo/je-code-crazy-filters
|
f9b654cab445bd2ad42e75fcb69a18c17241dd83
|
[
"Apache-2.0"
] | 4
|
2018-11-20T13:40:04.000Z
|
2022-03-11T23:24:26.000Z
|
crazy-filters/main.py
|
isabella232/je-code-crazy-filters
|
f9b654cab445bd2ad42e75fcb69a18c17241dd83
|
[
"Apache-2.0"
] | 2
|
2019-01-17T14:41:33.000Z
|
2022-02-21T11:14:25.000Z
|
"""
Rien de très intéressant à modifier ici. Va plutôt voir transforms.py
"""
from ui.crazyfiltersapp import CrazyFiltersApp
if __name__ == '__main__':
CrazyFiltersApp().run()
| 20.222222
| 69
| 0.741758
| 22
| 182
| 5.772727
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 182
| 8
| 70
| 22.75
| 0.824675
| 0.379121
| 0
| 0
| 0
| 0
| 0.07619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
521d97e4653663d34f0babeb18fedc140dec34be
| 82
|
py
|
Python
|
lessnless/__init__.py
|
codyd51/lessnless
|
7279158aabd9136b49a485ed61bfc46a836e9232
|
[
"MIT"
] | null | null | null |
lessnless/__init__.py
|
codyd51/lessnless
|
7279158aabd9136b49a485ed61bfc46a836e9232
|
[
"MIT"
] | null | null | null |
lessnless/__init__.py
|
codyd51/lessnless
|
7279158aabd9136b49a485ed61bfc46a836e9232
|
[
"MIT"
] | null | null | null |
from mingus.midi import fluidsynth
fluidsynth.init('Nice-Keys-Ultimate-V2.3.sf2')
| 27.333333
| 46
| 0.804878
| 13
| 82
| 5.076923
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038961
| 0.060976
| 82
| 2
| 47
| 41
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0.329268
| 0.329268
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
52264d1188d9165f456acfe10d098370f48a1446
| 271
|
py
|
Python
|
code/28 - subst.py
|
alaudo/coderdojo-python
|
c0a4c284810d8da5217398ae8964d12f27c8ecc2
|
[
"CC0-1.0"
] | null | null | null |
code/28 - subst.py
|
alaudo/coderdojo-python
|
c0a4c284810d8da5217398ae8964d12f27c8ecc2
|
[
"CC0-1.0"
] | null | null | null |
code/28 - subst.py
|
alaudo/coderdojo-python
|
c0a4c284810d8da5217398ae8964d12f27c8ecc2
|
[
"CC0-1.0"
] | null | null | null |
def subst(text):
s = { 'т' : 't', '$' : 's', '@' : 'a', '!' : 'i', 'Я' : 'r', '1' : 'l', 'ш' : 'w', '0' : 'o', 'п' : 'n'}
return "".join([t if not(t in s) else s[t] for t in text ])
print(subst("тhe$e @Яe que$т!0п$ f0Я @cт!0п, п0т $pecu1@т!0п, шh!ch !$ !d1e."))
| 45.166667
| 108
| 0.409594
| 52
| 271
| 2.134615
| 0.730769
| 0.054054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 0.236162
| 271
| 5
| 109
| 54.2
| 0.492754
| 0
| 0
| 0
| 0
| 0.25
| 0.298893
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.5
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
5261e5982339728e92a80d035718256946539c9b
| 882
|
py
|
Python
|
check_tools_version.py
|
mikedlr/arnparse
|
626662f8a6ffb31d6f514e2f3c3e2785d2a170ef
|
[
"MIT"
] | 30
|
2018-05-22T23:03:58.000Z
|
2022-03-19T18:43:56.000Z
|
check_tools_version.py
|
mikedlr/arnparse
|
626662f8a6ffb31d6f514e2f3c3e2785d2a170ef
|
[
"MIT"
] | 7
|
2018-05-25T18:18:12.000Z
|
2020-11-12T22:49:52.000Z
|
check_tools_version.py
|
mikedlr/arnparse
|
626662f8a6ffb31d6f514e2f3c3e2785d2a170ef
|
[
"MIT"
] | 7
|
2018-05-23T00:48:27.000Z
|
2021-02-18T11:49:45.000Z
|
from distutils.version import StrictVersion
import setuptools
import twine
import wheel
if __name__ == '__main__':
"""
Ensure that all tools are correctly installed. See https://stackoverflow.com/a/26737258
"""
assert StrictVersion(setuptools.__version__) >= StrictVersion('38.6.0'), 'Please upgrade setuptools. ' \
'See https://stackoverflow.com/a/26737258'
assert StrictVersion(twine.__version__) >= StrictVersion('1.11.0'), 'Please upgrade twine. ' \
'See https://stackoverflow.com/a/26737258'
assert StrictVersion(wheel.__version__) >= StrictVersion('0.31.0'), 'Please upgrade wheel. ' \
'See https://stackoverflow.com/a/26737258'
| 51.882353
| 119
| 0.55102
| 78
| 882
| 5.974359
| 0.423077
| 0.06867
| 0.180258
| 0.206009
| 0.405579
| 0.405579
| 0.334764
| 0.334764
| 0
| 0
| 0
| 0.076257
| 0.345805
| 882
| 16
| 120
| 55.125
| 0.731369
| 0
| 0
| 0.272727
| 0
| 0
| 0.278562
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 1
| 0
| true
| 0
| 0.363636
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
52878a824818296676a13c8bdcf3758188815fde
| 42
|
py
|
Python
|
backend/const.py
|
sshaman1101/what-about-blank
|
66df54fcc3d715aafb50ed9be347698b7a4b14d3
|
[
"BSD-3-Clause"
] | null | null | null |
backend/const.py
|
sshaman1101/what-about-blank
|
66df54fcc3d715aafb50ed9be347698b7a4b14d3
|
[
"BSD-3-Clause"
] | 1
|
2018-06-16T23:19:26.000Z
|
2018-06-17T10:48:38.000Z
|
backend/const.py
|
sshaman1101/what-about-blank
|
66df54fcc3d715aafb50ed9be347698b7a4b14d3
|
[
"BSD-3-Clause"
] | null | null | null |
GITHUB_PULLS_PROVIDER_ID = 'github_pulls'
| 21
| 41
| 0.857143
| 6
| 42
| 5.333333
| 0.666667
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 42
| 1
| 42
| 42
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
872040306dccb8a6a6460b1affa4df7a6cf79306
| 6,603
|
py
|
Python
|
migrations/versions/c2f65a03dbdb_.py
|
Ashaba/API-Monitor
|
533eb6698fcb5decb48f746784af6894844b3c69
|
[
"MIT"
] | null | null | null |
migrations/versions/c2f65a03dbdb_.py
|
Ashaba/API-Monitor
|
533eb6698fcb5decb48f746784af6894844b3c69
|
[
"MIT"
] | 22
|
2018-02-06T19:53:11.000Z
|
2021-04-30T20:35:01.000Z
|
migrations/versions/c2f65a03dbdb_.py
|
Ashaba/API-Monitor
|
533eb6698fcb5decb48f746784af6894844b3c69
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: c2f65a03dbdb
Revises:
Create Date: 2018-05-03 18:18:27.470606
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c2f65a03dbdb'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('User',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), server_default=sa.text('now()'), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=250), nullable=False),
sa.Column('email', sa.String(length=250), nullable=True),
sa.Column('image_url', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('Team',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), server_default=sa.text('now()'), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=128), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['User.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('Collection',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), server_default=sa.text('now()'), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=128), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('team_id', sa.Integer(), nullable=True),
sa.Column('interval', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['team_id'], ['Team.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['User.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('team_members',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('team_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['team_id'], ['Team.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['User.id'], )
)
op.create_table('Request',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), server_default=sa.text('now()'), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('collection_id', sa.Integer(), nullable=True),
sa.Column('method', sa.String(length=128), nullable=False),
sa.Column('body', sa.String(length=255), nullable=True),
sa.Column('url', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['collection_id'], ['Collection.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('ResponseSummary',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), server_default=sa.text('now()'), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('status', sa.String(), nullable=False),
sa.Column('failures', sa.Integer(), nullable=False),
sa.Column('run_from', sa.String(), nullable=True),
sa.Column('collection_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['collection_id'], ['Collection.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('Header',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), server_default=sa.text('now()'), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('key', sa.String(), nullable=False),
sa.Column('value', sa.String(), nullable=False),
sa.Column('request_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['request_id'], ['Request.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('RequestAssertion',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), server_default=sa.text('now()'), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('assertion_type', sa.String(), nullable=False),
sa.Column('comparison', sa.String(), nullable=False),
sa.Column('value', sa.Integer(), nullable=False),
sa.Column('request_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['request_id'], ['Request.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('Response',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), server_default=sa.text('now()'), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('status_code', sa.Integer(), nullable=False),
sa.Column('status', sa.String(), nullable=True),
sa.Column('failures', sa.Integer(), nullable=False),
sa.Column('response_time', sa.Integer(), nullable=True),
sa.Column('data', sa.String(), nullable=False),
sa.Column('headers', sa.String(), nullable=False),
sa.Column('request_id', sa.Integer(), nullable=False),
sa.Column('response_summary_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['request_id'], ['Request.id'], ),
sa.ForeignKeyConstraint(['response_summary_id'], ['ResponseSummary.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('ResponseAssertion',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), server_default=sa.text('now()'), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('assertion_type', sa.String(), nullable=False),
sa.Column('comparison', sa.String(), nullable=False),
sa.Column('value', sa.Integer(), nullable=False),
sa.Column('status', sa.String(), nullable=True),
sa.Column('request_assertion_id', sa.Integer(), nullable=False),
sa.Column('response_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['request_assertion_id'], ['RequestAssertion.id'], ),
sa.ForeignKeyConstraint(['response_id'], ['Response.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('ResponseAssertion')
op.drop_table('Response')
op.drop_table('RequestAssertion')
op.drop_table('Header')
op.drop_table('ResponseSummary')
op.drop_table('Request')
op.drop_table('team_members')
op.drop_table('Collection')
op.drop_table('Team')
op.drop_table('User')
# ### end Alembic commands ###
| 44.918367
| 93
| 0.670301
| 822
| 6,603
| 5.277372
| 0.110706
| 0.123559
| 0.134855
| 0.150069
| 0.78769
| 0.764408
| 0.738589
| 0.72107
| 0.659521
| 0.62402
| 0
| 0.008894
| 0.131607
| 6,603
| 146
| 94
| 45.226027
| 0.747646
| 0.042859
| 0
| 0.476563
| 0
| 0
| 0.180169
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 1
| 0.015625
| false
| 0
| 0.015625
| 0
| 0.03125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.