hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b18ca143545f1459ed655bf933c4f2268f25c74c
| 35,953
|
py
|
Python
|
tests/functional/test_cluster_service_config_functions.py
|
dimuha-rs/adcm
|
0f49cc9ece16c1e257be12375a64b65a34b3a3ae
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/test_cluster_service_config_functions.py
|
dimuha-rs/adcm
|
0f49cc9ece16c1e257be12375a64b65a34b3a3ae
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/test_cluster_service_config_functions.py
|
dimuha-rs/adcm
|
0f49cc9ece16c1e257be12375a64b65a34b3a3ae
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import allure
import coreapi
import pytest
from adcm_pytest_plugin import utils
from adcm_pytest_plugin.docker_utils import DockerWrapper
from jsonschema import validate
# pylint: disable=E0401, W0601, W0611, W0621
from tests.library import errorcodes as err
from tests.library import steps
from tests.library.utils import (
get_random_service,
get_random_cluster_service_component,
get_action_by_name, wait_until
)
BUNDLES = os.path.join(os.path.dirname(__file__), "../stack/")
SCHEMAS = os.path.join(os.path.dirname(__file__), "schemas/")
@pytest.fixture(scope="module")
def adcm(image, request, adcm_credentials):
repo, tag = image
dw = DockerWrapper()
adcm = dw.run_adcm(image=repo, tag=tag, pull=False)
adcm.api.auth(**adcm_credentials)
yield adcm
adcm.stop()
@pytest.fixture(scope="module")
def client(adcm):
steps.upload_bundle(adcm.api.objects, BUNDLES + "cluster_bundle")
steps.upload_bundle(adcm.api.objects, BUNDLES + "hostprovider_bundle")
return adcm.api.objects
class TestClusterServiceConfig:
def test_create_cluster_service_config(self, client):
cluster = steps.create_cluster(client)
cfg_json = {"ssh-key": "TItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAA",
"zoo.cfg": {"autopurge.purgeInterval": 30, "dataDir": "/dev/0", "port": 80},
"required-key": "value"}
with allure.step('Create service'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Create config'):
config = client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
description='simple desc',
config=cfg_json)
with allure.step('Check created config'):
expected = client.cluster.service.config.history.read(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
version=config['id'])
assert config == expected
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_config_not_json(self, client):
cluster = steps.create_cluster(client)
with allure.step('Create service on the cluster'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Try to create config from non-json string'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
config=utils.random_string())
with allure.step('Check error that config should not be just one string'):
err.JSON_ERROR.equal(e, 'config should not be just one string')
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_config_is_number(self, client): # ADCM-86
cluster = steps.create_cluster(client)
with allure.step('Create service on the cluster'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Try to create config from a number'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
config=random.randint(0, 9))
with allure.step('Check error that config should not be just one int or float'):
err.JSON_ERROR.equal(e, 'should not be just one int or float')
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_config_doesnt_have_one_req_sub(self, client):
cluster = steps.create_cluster(client)
config_wo_required_sub = {"ssh-key": "TItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAA",
"zoo.cfg": {"autopurge.purgeInterval": 34},
"required-key": "110"}
with allure.step('Create service on the cluster'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Try to create config when config doesn\'t have required'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
config=config_wo_required_sub)
with allure.step('Check error about no required subkey'):
err.CONFIG_KEY_ERROR.equal(e, 'There is no required subkey')
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_config_doesnt_have_one_req_key(self, client):
cluster = steps.create_cluster(client)
config_wo_required_key = {"ssh-key": "TItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAA",
"zoo.cfg": {"autopurge.purgeInterval": 34,
"dataDir": "/zookeeper", "port": 80}}
with allure.step('Create service on the cluster'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Try to create config without required key'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
config=config_wo_required_key)
with allure.step('Check error about no required key'):
err.CONFIG_KEY_ERROR.equal(e, 'There is no required key')
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_parameter_is_not_integer(self, client):
cluster = steps.create_cluster(client)
config_w_illegal_param = {"ssh-key": "TItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAA",
"zoo.cfg": {"autopurge.purgeInterval": "blabla",
"dataDir": "/zookeeper", "port": 80},
"required-key": "value"}
with allure.step('Create service on the cluster'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Try to create config when parameter is not integer'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
config=config_w_illegal_param)
with allure.step('Check error that parameter is not integer'):
err.CONFIG_VALUE_ERROR.equal(e, 'should be integer')
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_parameter_is_not_float(self, client):
cluster = steps.create_cluster(client)
config_w_illegal_param = {"ssh-key": "TItbmlzHyNTAAIbmzdHAyNTYAAA", "float-key": "blah",
"zoo.cfg": {"autopurge.purgeInterval": 30,
"dataDir": "/zookeeper", "port": 80},
"required-key": "value"}
with allure.step('Create service on the cluster'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Try to create config when param is not float'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
config=config_w_illegal_param)
with allure.step('Check error that parameter is not float'):
err.CONFIG_VALUE_ERROR.equal(e, 'should be float')
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_parameter_is_not_string(self, client):
cluster = steps.create_cluster(client)
config_w_illegal_param = {"ssh-key": "TItbmlzdHAyNTYAAAAIbmlzdHAyNTY", "float-key": 5.7,
"zoo.cfg": {"autopurge.purgeInterval": 30,
"dataDir": "/zookeeper", "port": 80},
"required-key": 500}
with allure.step('Create service on the cluster'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Try to create config when param is not float'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
config=config_w_illegal_param)
with allure.step('Check error that parameter is not string'):
err.CONFIG_VALUE_ERROR.equal(e, 'should be string')
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_parameter_is_not_in_option_list(self, client):
cluster = steps.create_cluster(client)
config_w_illegal_param = {"ssh-key": "TItbmlzdHAyNTYAIbmlzdHAyNTYAAA", "float-key": 4.5,
"zoo.cfg": {"autopurge.purgeInterval": 30,
"dataDir": "/zookeeper", "port": 500},
"required-key": "value"}
with allure.step('Create service on the cluster'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Try to create config has not option in a list'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
config=config_w_illegal_param)
with allure.step('Check CONFIG_VALUE_ERROR'):
assert e.value.error.title == '400 Bad Request'
assert e.value.error['code'] == 'CONFIG_VALUE_ERROR'
assert ('not in option list' in e.value.error['desc']) is True
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_integer_param_bigger_than_boundary(self, client):
cluster = steps.create_cluster(client)
config_int_bigger_boundary = {"ssh-key": "TItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAA",
"zoo.cfg": {"autopurge.purgeInterval": 999,
"dataDir": "/zookeeper", "port": 80},
"required-key": "value"}
with allure.step('Create service on the cluster'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Try to create config when integer bigger than boundary'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
config=config_int_bigger_boundary)
with allure.step('Check error that integer bigger than boundary'):
err.CONFIG_VALUE_ERROR.equal(e, 'Value', 'should be less than')
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_integer_param_less_than_boundary(self, client):
cluster = steps.create_cluster(client)
config_int_less_boundary = {"ssh-key": "TItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAA",
"zoo.cfg": {"autopurge.purgeInterval": 0,
"dataDir": "/zookeeper", "port": 80},
"required-key": "value"}
with allure.step('Create service on the cluster'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Try to create config when integer less than boundary'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
config=config_int_less_boundary)
with allure.step('Check error that integer less than boundary'):
err.CONFIG_VALUE_ERROR.equal(e, 'Value', 'should be more than')
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_float_param_bigger_than_boundary(self, client):
cluster = steps.create_cluster(client)
config_float_bigger_boundary = {"ssh-key": "TItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAA",
"zoo.cfg": {"autopurge.purgeInterval": 24,
"dataDir": "/zookeeper", "port": 80},
"float-key": 50.5, "required-key": "value"}
with allure.step('Create service on the cluster'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Try to create config when float bigger than boundary'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
config=config_float_bigger_boundary)
with allure.step('Check error that float bigger than boundary'):
err.CONFIG_VALUE_ERROR.equal(e, 'Value', 'should be less than')
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_float_param_less_than_boundary(self, client):
cluster = steps.create_cluster(client)
config_float_less_boundary = {"ssh-key": "TItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAA",
"zoo.cfg": {"autopurge.purgeInterval": 24,
"dataDir": "/zookeeper", "port": 80},
"float-key": 3.3, "required-key": "value"}
with allure.step('Create service on the cluster'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Try to create config when float less than boundary'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
config=config_float_less_boundary)
with allure.step('Check error that float less than boundary'):
err.CONFIG_VALUE_ERROR.equal(e, 'Value', 'should be more than')
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_config_doesnt_have_all_req_param(self, client):
cluster = steps.create_cluster(client)
config_wo_required_param = {"ssh-key": "TItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAA"}
with allure.step('Create service on the cluster'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Try to create config when config doesnt have all params'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
config=config_wo_required_param)
with allure.step('Check error about params'):
err.CONFIG_KEY_ERROR.equal(e)
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_config_have_unknown_subkey(self, client):
cluster = steps.create_cluster(client)
config_w_unknown_subkey = {"ssh-key": "TItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAA",
"zoo.cfg": {"autopurge.purgeInterval": 24,
"dataDir": "/zookeeper", "portium": "http"},
"required-key": "value"}
with allure.step('Create service on the cluster'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Try to create config with unknown subkey'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
config=config_w_unknown_subkey)
with allure.step('Check error about unknown subkey'):
err.CONFIG_KEY_ERROR.equal(e, 'There is unknown subkey')
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_config_have_unknown_param(self, client):
cluster = steps.create_cluster(client)
config_w_unknown_param = {"name": "foo"}
with allure.step('Create service on the cluster'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Try to create config with unknown parameter'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
config=config_w_unknown_param)
with allure.step('Check error about unknown key'):
err.CONFIG_KEY_ERROR.equal(e, 'There is unknown key')
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_key_shouldnt_have_any_subkeys(self, client):
cluster = steps.create_cluster(client)
config_shouldnt_have_subkeys = {"ssh-key": {"key": "value"},
"zoo.cfg": {"autopurge.purgeInterval": "24",
"dataDir": "/zookeeper", "port": "http"}}
with allure.step('Create service on the cluster'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Try to create config where param shouldn\'t have any subkeys'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
config=config_shouldnt_have_subkeys)
with allure.step('Check error about unknown subkey'):
err.CONFIG_KEY_ERROR.equal(e, 'input config should not have any subkeys')
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_try_to_put_dictionary_in_flat_key(self, client):
cluster = steps.create_cluster(client)
config = {"ssh-key": "as32fKj14fT88",
"zoo.cfg": {"autopurge.purgeInterval": 24, "dataDir": "/zookeeper",
"port": {"foo": "bar"}}, "required-key": "value"}
with allure.step('Create service on the cluster'):
cluster_svc = client.cluster.service.create(
cluster_id=cluster['id'], prototype_id=get_random_service(client)['id'])
with allure.step('Try to create config where in flat param we put a dictionary'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=cluster_svc['id'],
config=config)
with allure.step('Check error about flat param'):
err.CONFIG_VALUE_ERROR.equal(e, 'should be flat')
steps.delete_all_data(client)
def test_when_delete_host_all_children_cannot_be_deleted(self, client):
# Should be faild if random service has not components
cluster = steps.create_cluster(client)
with allure.step('Create provider'):
provider = client.provider.create(prototype_id=client.stack.provider.list()[0]['id'],
name=utils.random_string())
with allure.step('Create host'):
host = client.host.create(prototype_id=client.stack.host.list()[0]['id'],
provider_id=provider['id'],
fqdn=utils.random_string())
steps.add_host_to_cluster(client, host, cluster)
with allure.step('Create random service'):
service = steps.create_random_service(client, cluster['id'])
with allure.step('Create random service component'):
component = get_random_cluster_service_component(client, cluster, service)
with allure.step('Create hostcomponent'):
steps.create_hostcomponent_in_cluster(client, cluster, host, service, component)
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.host.delete(host_id=host['id'], cluster_id=cluster['id'])
with allure.step('Check host conflict'):
err.HOST_CONFLICT.equal(e)
def test_should_throws_exception_when_havent_previous_config(self, client):
cluster = steps.create_cluster(client)
service = steps.create_random_service(client, cluster['id'])
with allure.step('Try to get previous version of the service config'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.previous.list(cluster_id=cluster['id'],
service_id=service['id'])
with allure.step('Check error that config version doesn\'t exist'):
err.CONFIG_NOT_FOUND.equal(e, 'config version doesn\'t exist')
steps.delete_all_data(client)
class TestClusterServiceConfigHistory:
def test_config_history_url_must_point_to_the_service_config(self, client):
cluster = steps.create_cluster(client)
service = steps.create_random_service(client, cluster['id'])
config_str = {"ssh-key": "eulav", "integer-key": 23, "required-key": "10",
"float-key": 38.5, "zoo.cfg": {"autopurge.purgeInterval": 40,
"dataDir": "/opt/data", "port": 80}}
i = 0
while i < random.randint(0, 10):
client.cluster.service.config.history.create(cluster_id=cluster['id'],
service_id=service['id'],
description=utils.random_string(),
config=config_str)
i += 1
history = client.cluster.service.config.history.list(cluster_id=cluster['id'],
service_id=service['id'])
with allure.step('Check config history'):
for conf in history:
assert ('cluster/{0}/service/'.format(cluster['id']) in conf['url']) is True
steps.delete_all_data(client)
def test_get_config_from_nonexistant_cluster_service(self, client):
cluster = steps.create_cluster(client)
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.service.config.list(cluster_id=cluster['id'],
service_id=random.randint(100, 500))
with allure.step('Check error that service doesn\'t exist'):
err.SERVICE_NOT_FOUND.equal(e, "service doesn\'t exist")
steps.delete_all_data(client)
class TestClusterConfig:
def test_config_history_url_must_point_to_the_cluster_config(self, client):
cluster = steps.create_cluster(client)
config_str = {"required": 10, "int_key": 50, "bool": False, "str-key": "eulav"}
i = 0
with allure.step('Create config history'):
while i < random.randint(0, 10):
client.cluster.config.history.create(cluster_id=cluster['id'],
description=utils.random_string(),
config=config_str)
i += 1
history = client.cluster.config.history.list(cluster_id=cluster['id'])
with allure.step('Check config history'):
for conf in history:
assert ('api/v1/cluster/{0}/config/'.format(cluster['id']) in conf['url']) is True
steps.delete_all_data(client)
def test_read_default_cluster_config(self, client):
cluster = steps.create_cluster(client)
config = client.cluster.config.current.list(cluster_id=cluster['id'])
if config:
config_json = utils.ordered_dict_to_dict(config)
with allure.step('Load schema'):
schema = json.load(open(SCHEMAS + '/config_item_schema.json'))
with allure.step('Check schema'):
assert validate(config_json, schema) is None
steps.delete_all_data(client)
def test_create_new_config_version_with_one_req_parameter(self, client):
cluster = steps.create_cluster(client)
cfg = {"required": random.randint(0, 9)}
with allure.step('Create new config'):
new_config = client.cluster.config.history.create(cluster_id=cluster['id'], config=cfg)
with allure.step('Create config history'):
expected = client.cluster.config.history.read(cluster_id=cluster['id'],
version=new_config['id'])
with allure.step('Check new config'):
assert new_config == expected
steps.delete_all_data(client)
def test_create_new_config_version_with_other_parameters(self, client):
cluster = steps.create_cluster(client)
cfg = {"required": 99, "str-key": utils.random_string()}
with allure.step('Create new config'):
new_config = client.cluster.config.history.create(cluster_id=cluster['id'], config=cfg)
with allure.step('Create config history'):
expected = client.cluster.config.history.read(cluster_id=cluster['id'],
version=new_config['id'])
with allure.step('Check new config'):
assert new_config == expected
steps.delete_all_data(client)
def test_shouldnt_create_cluster_config_when_config_not_json(self, client):
cluster = steps.create_cluster(client)
with allure.step('Try to create the cluster config from non-json string'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.config.history.create(cluster_id=cluster['id'],
config=utils.random_string())
with allure.step('Check that config should not be just one string'):
err.JSON_ERROR.equal(e, 'config should not be just one string')
steps.delete_all_data(client)
def test_shouldnt_create_service_config_when_config_is_number(self, client): # ADCM-86
cluster = steps.create_cluster(client)
with allure.step('Try to create config from number'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.config.history.create(cluster_id=cluster['id'],
config=random.randint(0, 9))
with allure.step('Check that config should not be just one int or float'):
err.JSON_ERROR.equal(e, 'config should not be just one int or float')
steps.delete_all_data(client)
def test_shouldnt_create_config_when_config_doesnt_have_required_key(self, client):
cluster = steps.create_cluster(client)
config_wo_required_key = {"str-key": "value"}
with allure.step('Try to create config wo required key'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.config.history.create(cluster_id=cluster['id'],
config=config_wo_required_key)
with allure.step('Check that no required key'):
err.CONFIG_KEY_ERROR.equal(e, 'There is no required key')
steps.delete_all_data(client)
def test_shouldnt_create_config_when_key_is_not_in_option_list(self, client):
cluster = steps.create_cluster(client)
config_key_not_in_list = {"option": "bluh", "required": 10}
with allure.step('Try to create config has not option in a list'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.config.history.create(cluster_id=cluster['id'],
config=config_key_not_in_list)
with allure.step('Check that not in option list'):
err.CONFIG_VALUE_ERROR.equal(e, 'Value', 'not in option list')
steps.delete_all_data(client)
def test_shouldnt_create_config_with_unknown_key(self, client):
# config has key that not defined in prototype
cluster = steps.create_cluster(client)
config = {"new_key": "value"}
with allure.step('Try to create config with unknown key'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.config.history.create(cluster_id=cluster['id'], config=config)
with allure.step('Check that unknown key'):
err.CONFIG_KEY_ERROR.equal(e, 'There is unknown key')
steps.delete_all_data(client)
def test_shouldnt_create_config_when_try_to_put_map_in_option(self, client):
# we try to put key:value in a parameter with the option datatype
cluster = steps.create_cluster(client)
config_with_deep_depth = {"str-key": "{1bbb}", "option": {"http": "string"},
"sub": {"sub1": "f"}}
with allure.step('Try to create config with map in flat key'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.config.history.create(cluster_id=cluster['id'],
config=config_with_deep_depth)
with allure.step('Check that input config should not have any subkeys'):
err.CONFIG_KEY_ERROR.equal(e, 'input config should not have any subkeys')
steps.delete_all_data(client)
def test_get_nonexistant_cluster_config(self, client):
# we try to get a nonexistant cluster config, test should raise exception
with allure.step('Get cluster config from non existant cluster'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.cluster.config.list(cluster_id=random.randint(100, 500))
with allure.step('Check that cluster doesn\'t exist'):
err.CLUSTER_NOT_FOUND.equal(e, 'cluster doesn\'t exist')
steps.delete_all_data(client)
check_types = [
('file', 'input_file'),
('text', 'textarea'),
('password', 'password_phrase'),
]
@pytest.mark.parametrize(('datatype', 'name'), check_types)
def test_verify_that_supported_type_is(self, client, datatype, name):
with allure.step('Create stack'):
stack = client.stack.cluster.read(prototype_id=client.stack.cluster.list()[0]['id'])
with allure.step('Check stack config'):
for item in stack['config']:
if item['name'] == name:
assert item['type'] == datatype
steps.delete_all_data(client)
def test_check_that_file_field_put_correct_data_in_file_inside_docker(self, client):
cluster = steps.create_cluster(client)
test_data = "lorem ipsum"
with allure.step('Create config data'):
config_data = utils.ordered_dict_to_dict(
client.cluster.config.current.list(cluster_id=cluster['id'])['config'])
config_data['input_file'] = test_data
config_data['required'] = random.randint(0, 99)
with allure.step('Create config history'):
client.cluster.config.history.create(cluster_id=cluster['id'], config=config_data)
with allure.step('Check file type'):
action = client.cluster.action.run.create(
action_id=get_action_by_name(client, cluster, 'check-file-type')['id'],
cluster_id=cluster['id']
)
wait_until(client, action)
with allure.step('Check that state is success'):
expected = client.task.read(task_id=action['id'])
assert expected['status'] == 'success'
| 60.834179
| 99
| 0.596724
| 4,024
| 35,953
| 5.126243
| 0.079523
| 0.053229
| 0.063797
| 0.050611
| 0.811179
| 0.781608
| 0.766676
| 0.732403
| 0.68693
| 0.637871
| 0
| 0.006144
| 0.302812
| 35,953
| 590
| 100
| 60.937288
| 0.816804
| 0.022668
| 0
| 0.522727
| 0
| 0.007576
| 0.169766
| 0.023121
| 0
| 0
| 0
| 0
| 0.020833
| 1
| 0.070076
| false
| 0.001894
| 0.022727
| 0
| 0.102273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
49403d84368e33b30b78a18303c4c43429e4a38d
| 152
|
py
|
Python
|
pyrite/keybindings/__init__.py
|
wkeeling/pyrite
|
01c8d283a5e653775d66cb38d3b0e46f006e31b6
|
[
"MIT"
] | null | null | null |
pyrite/keybindings/__init__.py
|
wkeeling/pyrite
|
01c8d283a5e653775d66cb38d3b0e46f006e31b6
|
[
"MIT"
] | 29
|
2020-11-01T13:53:25.000Z
|
2020-11-05T13:39:19.000Z
|
pyrite/keybindings/__init__.py
|
wkeeling/pyrite
|
01c8d283a5e653775d66cb38d3b0e46f006e31b6
|
[
"MIT"
] | null | null | null |
import platform
if platform.system() == 'Linux':
from .linux import *
else:
raise RuntimeError(f"Unsupported platform: '{platform.system()}'")
| 21.714286
| 70
| 0.690789
| 17
| 152
| 6.176471
| 0.647059
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 152
| 6
| 71
| 25.333333
| 0.820313
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 0.138158
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
494f32a19b39e30925bfb3e07ff2fcdf72424a80
| 19
|
py
|
Python
|
datalab_utils/__about__.py
|
pmerienne/datalab-utils
|
f6e6607a8737327554c1c1b67a1c0122fd70e33a
|
[
"BSD-3-Clause"
] | null | null | null |
datalab_utils/__about__.py
|
pmerienne/datalab-utils
|
f6e6607a8737327554c1c1b67a1c0122fd70e33a
|
[
"BSD-3-Clause"
] | null | null | null |
datalab_utils/__about__.py
|
pmerienne/datalab-utils
|
f6e6607a8737327554c1c1b67a1c0122fd70e33a
|
[
"BSD-3-Clause"
] | null | null | null |
version = '0.0.23'
| 9.5
| 18
| 0.578947
| 4
| 19
| 2.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.157895
| 19
| 1
| 19
| 19
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
499e40eca84f776c4d11f1b58a557516261072e3
| 345
|
py
|
Python
|
app/test/test_resultsets.py
|
piculator/piculator_gamma
|
3dc0f71dda663c05d606689e1167359a69bf11fd
|
[
"BSD-3-Clause"
] | 414
|
2015-01-17T21:40:32.000Z
|
2022-03-31T11:42:37.000Z
|
app/test/test_resultsets.py
|
piculator/piculator_gamma
|
3dc0f71dda663c05d606689e1167359a69bf11fd
|
[
"BSD-3-Clause"
] | 112
|
2015-01-18T20:06:13.000Z
|
2022-02-12T23:15:11.000Z
|
app/test/test_resultsets.py
|
piculator/piculator_gamma
|
3dc0f71dda663c05d606689e1167359a69bf11fd
|
[
"BSD-3-Clause"
] | 94
|
2015-01-08T13:10:24.000Z
|
2022-02-22T03:16:34.000Z
|
from __future__ import absolute_import
from app.logic import resultsets
from sympy import sympify, I, sqrt
def test_predicates():
assert not resultsets.is_approximatable_constant(sqrt(2))
assert not resultsets.is_approximatable_constant(sympify('2'))
assert resultsets.is_complex(2 * I + 3)
assert not resultsets.is_complex(3)
| 31.363636
| 66
| 0.782609
| 48
| 345
| 5.375
| 0.458333
| 0.186047
| 0.22093
| 0.244186
| 0.333333
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0.016892
| 0.142029
| 345
| 10
| 67
| 34.5
| 0.85473
| 0
| 0
| 0
| 0
| 0
| 0.002899
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.125
| true
| 0
| 0.375
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b8d1c107211ae47d9aec5f8fef7c2c54d43fa57f
| 118
|
py
|
Python
|
queue/exceptions.py
|
senavs/AbstractDataTypes
|
83afdb3b5c7e4a303cf2b0f0cf65152e43e2778d
|
[
"MIT"
] | 1
|
2021-04-14T10:03:59.000Z
|
2021-04-14T10:03:59.000Z
|
queue/exceptions.py
|
senavs/AbstractDataTypes
|
83afdb3b5c7e4a303cf2b0f0cf65152e43e2778d
|
[
"MIT"
] | 2
|
2019-07-02T14:59:12.000Z
|
2019-07-07T14:37:41.000Z
|
queue/exceptions.py
|
senavs/AbstractDataTypes
|
83afdb3b5c7e4a303cf2b0f0cf65152e43e2778d
|
[
"MIT"
] | null | null | null |
class QueueException(Exception):
pass
class Empty(QueueException):
pass
class Full(QueueException):
pass
| 14.75
| 32
| 0.737288
| 12
| 118
| 7.25
| 0.5
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186441
| 118
| 8
| 33
| 14.75
| 0.90625
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
b8d8f311ed2bc6983e8a3557ab226c68ca7b46da
| 57
|
py
|
Python
|
wordfinds/__init__.py
|
GrandMoff100/WordFinds
|
4b56532f399178e5f2b18b246084644061c5bfc2
|
[
"MIT"
] | 2
|
2021-05-22T19:19:56.000Z
|
2021-08-16T11:34:11.000Z
|
wordfinds/__init__.py
|
GrandMoff100/WordFinds
|
4b56532f399178e5f2b18b246084644061c5bfc2
|
[
"MIT"
] | null | null | null |
wordfinds/__init__.py
|
GrandMoff100/WordFinds
|
4b56532f399178e5f2b18b246084644061c5bfc2
|
[
"MIT"
] | 1
|
2021-11-09T13:55:43.000Z
|
2021-11-09T13:55:43.000Z
|
from .main import WordFind
from .wordbank import WordBank
| 28.5
| 30
| 0.842105
| 8
| 57
| 6
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122807
| 57
| 2
| 30
| 28.5
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b8e2a9d44a1dea56fdff0e8c68efe167c5935674
| 66
|
py
|
Python
|
gymgif/__init__.py
|
tianhaoz95/gymgif
|
aa0583c93953df43756d5962f90331ca5e74c4c4
|
[
"MIT"
] | null | null | null |
gymgif/__init__.py
|
tianhaoz95/gymgif
|
aa0583c93953df43756d5962f90331ca5e74c4c4
|
[
"MIT"
] | null | null | null |
gymgif/__init__.py
|
tianhaoz95/gymgif
|
aa0583c93953df43756d5962f90331ca5e74c4c4
|
[
"MIT"
] | null | null | null |
from .generate_gif import generate_gif
__all__ = ['generate_gif']
| 22
| 38
| 0.80303
| 9
| 66
| 5.111111
| 0.555556
| 0.717391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106061
| 66
| 3
| 39
| 22
| 0.779661
| 0
| 0
| 0
| 1
| 0
| 0.179104
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b8ef897a87928d157e4d94d754f18420a62da138
| 16
|
py
|
Python
|
tests/scripts/script4.py
|
SomberNight/distlib
|
d0e3f49df5d1aeb9daeaaabf0391c9e13e4a6562
|
[
"PSF-2.0"
] | 17
|
2021-12-10T12:59:38.000Z
|
2022-02-11T11:30:13.000Z
|
tests/scripts/script4.py
|
SomberNight/distlib
|
d0e3f49df5d1aeb9daeaaabf0391c9e13e4a6562
|
[
"PSF-2.0"
] | 11
|
2021-12-11T07:54:47.000Z
|
2022-03-30T18:43:38.000Z
|
tests/scripts/script4.py
|
SomberNight/distlib
|
d0e3f49df5d1aeb9daeaaabf0391c9e13e4a6562
|
[
"PSF-2.0"
] | 9
|
2015-04-24T14:26:02.000Z
|
2021-06-03T21:31:57.000Z
|
#!pythonw
pass
| 4
| 9
| 0.6875
| 2
| 16
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 16
| 3
| 10
| 5.333333
| 0.846154
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
773a24c748c53cb9a6cfd7a44a81c6fe79900d2e
| 24,538
|
py
|
Python
|
heat/tests/test_os_database.py
|
redhat-openstack/heat
|
6b9be0a868b857e942c1cc90594d0f3a0d0725d0
|
[
"Apache-2.0"
] | null | null | null |
heat/tests/test_os_database.py
|
redhat-openstack/heat
|
6b9be0a868b857e942c1cc90594d0f3a0d0725d0
|
[
"Apache-2.0"
] | null | null | null |
heat/tests/test_os_database.py
|
redhat-openstack/heat
|
6b9be0a868b857e942c1cc90594d0f3a0d0725d0
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from troveclient.openstack.common.apiclient import exceptions as troveexc
import uuid
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import neutron
from heat.engine.clients.os import nova
from heat.engine.clients.os import trove
from heat.engine import parser
from heat.engine.resources import os_database
from heat.engine import scheduler
from heat.tests.common import HeatTestCase
from heat.tests import utils
db_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "MySQL instance running on openstack DBaaS cloud",
"Resources" : {
"MySqlCloudDB": {
"Type": "OS::Trove::Instance",
"Properties" : {
"name" : "test",
"flavor" : "1GB",
"size" : 30,
"users" : [{"name": "testuser", "password": "pass", "databases":
["validdb"]}],
"databases" : [{"name": "validdb"}],
"datastore_type": "SomeDStype",
"datastore_version": "MariaDB-5.5"
}
}
}
}
'''
db_template_with_nics = '''
heat_template_version: 2013-05-23
description: MySQL instance running on openstack DBaaS cloud
resources:
MySqlCloudDB:
type: OS::Trove::Instance
properties:
name: test
flavor: 1GB
size: 30
networks:
- port: someportname
fixed_ip: 1.2.3.4
'''
class FakeDBInstance(object):
def __init__(self):
self.id = 12345
self.hostname = "testhost"
self.links = [
{"href": "https://adga23dd432a.rackspacecloud.com/132345245",
"rel": "self"}]
self.resource_id = 12345
self.status = 'ACTIVE'
def get(self):
pass
def delete(self):
pass
class FakeFlavor(object):
def __init__(self, id, name):
self.id = id
self.name = name
class FakeVersion(object):
def __init__(self, name="MariaDB-5.5"):
self.name = name
class OSDBInstanceTest(HeatTestCase):
def setUp(self):
super(OSDBInstanceTest, self).setUp()
self.stub_keystoneclient()
self.fc = self.m.CreateMockAnything()
self.nova = self.m.CreateMockAnything()
self.m.StubOutWithMock(trove.TroveClientPlugin, '_create')
def _setup_test_clouddbinstance(self, name, t):
stack_name = '%s_stack' % name
template = parser.Template(t)
stack = parser.Stack(utils.dummy_context(),
stack_name,
template,
stack_id=str(uuid.uuid4()))
instance = os_database.OSDBInstance(
'%s_name' % name,
template.resource_definitions(stack)['MySqlCloudDB'],
stack)
return instance
def _stubout_common_create(self):
trove.TroveClientPlugin._create().AndReturn(self.fc)
self.m.StubOutWithMock(self.fc, 'flavors')
self.m.StubOutWithMock(trove.TroveClientPlugin, 'get_flavor_id')
trove.TroveClientPlugin.get_flavor_id('1GB').AndReturn(1)
self.m.StubOutWithMock(self.fc, 'instances')
self.m.StubOutWithMock(self.fc.instances, 'create')
def _stubout_create(self, instance, fake_dbinstance):
self._stubout_common_create()
users = [{"name": "testuser", "password": "pass", "host": "%",
"databases": [{"name": "validdb"}]}]
databases = [{"collate": "utf8_general_ci",
"character_set": "utf8",
"name": "validdb"}]
self.fc.instances.create('test', 1, volume={'size': 30},
databases=databases,
users=users,
restorePoint=None,
availability_zone=None,
datastore="SomeDStype",
datastore_version="MariaDB-5.5",
nics=[]
).AndReturn(fake_dbinstance)
self.m.ReplayAll()
def _stubout_validate(self, instance, neutron=None):
trove.TroveClientPlugin._create().AndReturn(self.fc)
self.m.StubOutWithMock(self.fc, 'datastore_versions')
self.m.StubOutWithMock(self.fc.datastore_versions, 'list')
self.fc.datastore_versions.list(instance.properties['datastore_type']
).AndReturn([FakeVersion()])
if neutron is not None:
self.m.StubOutWithMock(instance, 'is_using_neutron')
instance.is_using_neutron().AndReturn(bool(neutron))
self.m.ReplayAll()
def test_osdatabase_create(self):
fake_dbinstance = FakeDBInstance()
t = template_format.parse(db_template)
instance = self._setup_test_clouddbinstance('dbinstance_create', t)
self._stubout_create(instance, fake_dbinstance)
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_osdatabase_restore_point(self):
fake_dbinstance = FakeDBInstance()
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties']['restore_point'] = "1234"
instance = self._setup_test_clouddbinstance('dbinstance_create', t)
trove.TroveClientPlugin._create().AndReturn(self.fc)
self.m.StubOutWithMock(self.fc, 'flavors')
self.m.StubOutWithMock(self.fc.flavors, "list")
self.fc.flavors.list().AndReturn([FakeFlavor(1, '1GB'),
FakeFlavor(2, '2GB')])
self.m.StubOutWithMock(self.fc, 'instances')
self.m.StubOutWithMock(self.fc.instances, 'create')
users = [{"name": "testuser", "password": "pass", "host": "%",
"databases": [{"name": "validdb"}]}]
databases = [{"collate": "utf8_general_ci",
"character_set": "utf8",
"name": "validdb"}]
self.fc.instances.create('test', 1, volume={'size': 30},
databases=databases,
users=users,
restorePoint={"backupRef": "1234"},
availability_zone=None,
datastore="SomeDStype",
datastore_version="MariaDB-5.5",
nics=[]
).AndReturn(fake_dbinstance)
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_osdatabase_create_overlimit(self):
fake_dbinstance = FakeDBInstance()
t = template_format.parse(db_template)
instance = self._setup_test_clouddbinstance('dbinstance_create', t)
self._stubout_create(instance, fake_dbinstance)
# Simulate an OverLimit exception
self.m.StubOutWithMock(fake_dbinstance, 'get')
fake_dbinstance.get().AndRaise(
troveexc.RequestEntityTooLarge)
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_osdatabase_create_fails(self):
fake_dbinstance = FakeDBInstance()
fake_dbinstance.status = 'ERROR'
t = template_format.parse(db_template)
instance = self._setup_test_clouddbinstance('dbinstance_create', t)
self._stubout_create(instance, fake_dbinstance)
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(instance.create))
self.m.VerifyAll()
def test_osdatabase_delete(self):
fake_dbinstance = FakeDBInstance()
t = template_format.parse(db_template)
instance = self._setup_test_clouddbinstance('dbinstance_del', t)
self._stubout_create(instance, fake_dbinstance)
scheduler.TaskRunner(instance.create)()
self.m.StubOutWithMock(self.fc.instances, 'get')
self.fc.instances.get(12345).AndReturn(fake_dbinstance)
self.m.StubOutWithMock(fake_dbinstance, 'delete')
fake_dbinstance.delete().AndReturn(None)
self.m.StubOutWithMock(fake_dbinstance, 'get')
fake_dbinstance.get().AndReturn(None)
fake_dbinstance.get().AndRaise(troveexc.NotFound(404))
self.m.ReplayAll()
scheduler.TaskRunner(instance.delete)()
self.m.VerifyAll()
def test_osdatabase_delete_overlimit(self):
fake_dbinstance = FakeDBInstance()
t = template_format.parse(db_template)
instance = self._setup_test_clouddbinstance('dbinstance_del', t)
self._stubout_create(instance, fake_dbinstance)
scheduler.TaskRunner(instance.create)()
self.m.StubOutWithMock(self.fc.instances, 'get')
self.fc.instances.get(12345).AndReturn(fake_dbinstance)
self.m.StubOutWithMock(fake_dbinstance, 'delete')
fake_dbinstance.delete().AndReturn(None)
# Simulate an OverLimit exception
self.m.StubOutWithMock(fake_dbinstance, 'get')
fake_dbinstance.get().AndRaise(
troveexc.RequestEntityTooLarge)
fake_dbinstance.get().AndReturn(None)
fake_dbinstance.get().AndRaise(troveexc.NotFound(404))
self.m.ReplayAll()
scheduler.TaskRunner(instance.delete)()
self.m.VerifyAll()
def test_osdatabase_delete_resource_none(self):
fake_dbinstance = FakeDBInstance()
t = template_format.parse(db_template)
instance = self._setup_test_clouddbinstance('dbinstance_del', t)
self._stubout_create(instance, fake_dbinstance)
scheduler.TaskRunner(instance.create)()
instance.resource_id = None
self.m.ReplayAll()
scheduler.TaskRunner(instance.delete)()
self.assertIsNone(instance.resource_id)
self.m.VerifyAll()
def test_osdatabase_resource_not_found(self):
fake_dbinstance = FakeDBInstance()
t = template_format.parse(db_template)
instance = self._setup_test_clouddbinstance('dbinstance_del', t)
self._stubout_create(instance, fake_dbinstance)
scheduler.TaskRunner(instance.create)()
self.m.StubOutWithMock(self.fc.instances, 'get')
self.fc.instances.get(12345).AndRaise(
troveexc.NotFound(404))
self.m.ReplayAll()
scheduler.TaskRunner(instance.delete)()
self.m.VerifyAll()
def test_osdatabase_invalid_attribute(self):
t = template_format.parse(db_template)
instance = self._setup_test_clouddbinstance("db_invalid_attrib", t)
attrib = instance._resolve_attribute("invalid_attrib")
self.assertIsNone(attrib)
self.m.VerifyAll()
def test_osdatabase_get_hostname(self):
fake_dbinstance = FakeDBInstance()
t = template_format.parse(db_template)
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
instance.resource_id = 12345
trove.TroveClientPlugin._create().AndReturn(self.fc)
self.m.StubOutWithMock(self.fc, 'instances')
self.m.StubOutWithMock(self.fc.instances, 'get')
self.fc.instances.get(12345).AndReturn(fake_dbinstance)
self.m.ReplayAll()
attrib = instance._resolve_attribute('hostname')
self.assertEqual(fake_dbinstance.hostname, attrib)
self.m.VerifyAll()
def test_osdatabase_get_href(self):
fake_dbinstance = FakeDBInstance()
t = template_format.parse(db_template)
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
instance.resource_id = 12345
trove.TroveClientPlugin._create().AndReturn(self.fc)
self.m.StubOutWithMock(self.fc, 'instances')
self.m.StubOutWithMock(self.fc.instances, 'get')
self.fc.instances.get(12345).AndReturn(fake_dbinstance)
self.m.ReplayAll()
attrib = instance._resolve_attribute('href')
self.assertEqual(fake_dbinstance.links[0]['href'], attrib)
self.m.VerifyAll()
def test_osdatabase_get_href_links_none(self):
fake_dbinstance = FakeDBInstance()
fake_dbinstance.links = None
t = template_format.parse(db_template)
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
instance.resource_id = 12345
trove.TroveClientPlugin._create().AndReturn(self.fc)
self.m.StubOutWithMock(self.fc, 'instances')
self.m.StubOutWithMock(self.fc.instances, 'get')
self.fc.instances.get(12345).AndReturn(fake_dbinstance)
self.m.ReplayAll()
attrib = instance._resolve_attribute('href')
self.assertIsNone(attrib)
self.m.VerifyAll()
def test_osdatabase_prop_validation_success(self):
t = template_format.parse(db_template)
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
self._stubout_validate(instance)
ret = instance.validate()
self.assertIsNone(ret)
self.m.VerifyAll()
def test_osdatabase_prop_validation_invaliddb(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties']['databases'] = [
{"name": "onedb"}]
t['Resources']['MySqlCloudDB']['Properties']['users'] = [
{"name": "testuser",
"password": "pass",
"databases": ["invaliddb"]}]
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
self._stubout_validate(instance)
self.assertRaises(exception.StackValidationFailed, instance.validate)
self.m.VerifyAll()
def test_osdatabase_prop_validation_users_none(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties']['users'] = []
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
self._stubout_validate(instance)
ret = instance.validate()
self.assertIsNone(ret)
self.m.VerifyAll()
def test_osdatabase_prop_validation_databases_none(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties']['databases'] = []
t['Resources']['MySqlCloudDB']['Properties']['users'] = [
{"name": "testuser",
"password": "pass",
"databases": ["invaliddb"]}]
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
self._stubout_validate(instance)
self.assertRaises(exception.StackValidationFailed, instance.validate)
self.m.VerifyAll()
def test_osdatabase_prop_validation_user_no_db(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties']['databases'] = [
{"name": "validdb"}]
t['Resources']['MySqlCloudDB']['Properties']['users'] = [
{"name": "testuser", "password": "pass", "databases": []}]
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
self.assertRaises(exception.StackValidationFailed, instance.validate)
self.m.VerifyAll()
def test_osdatabase_prop_validation_no_datastore_yes_version(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties'].pop('datastore_type')
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
ex = self.assertRaises(exception.StackValidationFailed,
instance.validate)
exp_msg = "Not allowed - datastore_version without datastore_type."
self.assertEqual(exp_msg, six.text_type(ex))
self.m.VerifyAll()
def test_osdatabase_prop_validation_no_dsversion(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties'][
'datastore_type'] = 'mysql'
t['Resources']['MySqlCloudDB']['Properties'].pop('datastore_version')
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
self._stubout_validate(instance)
self.assertIsNone(instance.validate())
self.m.VerifyAll()
def test_osdatabase_prop_validation_wrong_dsversion(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties'][
'datastore_type'] = 'mysql'
t['Resources']['MySqlCloudDB']['Properties'][
'datastore_version'] = 'SomeVersion'
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
self._stubout_validate(instance)
ex = self.assertRaises(exception.StackValidationFailed,
instance.validate)
expected_msg = ("Datastore version SomeVersion for datastore type "
"mysql is not valid. "
"Allowed versions are MariaDB-5.5.")
self.assertEqual(expected_msg, six.text_type(ex))
self.m.VerifyAll()
def test_osdatabase_prop_validation_implicit_version_fail(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties'][
'datastore_type'] = 'mysql'
t['Resources']['MySqlCloudDB']['Properties'].pop('datastore_version')
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
trove.TroveClientPlugin._create().AndReturn(self.fc)
self.m.StubOutWithMock(self.fc, 'datastore_versions')
self.m.StubOutWithMock(self.fc.datastore_versions, 'list')
self.fc.datastore_versions.list(
instance.properties['datastore_type']
).AndReturn([FakeVersion(), FakeVersion('MariaDB-5.0')])
self.m.ReplayAll()
ex = self.assertRaises(exception.StackValidationFailed,
instance.validate)
expected_msg = ("Multiple active datastore versions exist for "
"datastore type mysql. "
"Explicit datastore version must be provided. "
"Allowed versions are MariaDB-5.5, MariaDB-5.0.")
self.assertEqual(expected_msg, six.text_type(ex))
self.m.VerifyAll()
def test_osdatabase_prop_validation_net_with_port_fail(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties']['networks'] = [
{
"port": "someportuuid",
"network": "somenetuuid"
}]
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
self._stubout_validate(instance, neutron=True)
ex = self.assertRaises(
exception.StackValidationFailed, instance.validate)
self.assertEqual('Either network or port must be provided.',
six.text_type(ex))
self.m.VerifyAll()
def test_osdatabase_prop_validation_no_net_no_port_fail(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties']['networks'] = [
{
"fixed_ip": "1.2.3.4"
}]
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
self._stubout_validate(instance, neutron=True)
ex = self.assertRaises(
exception.StackValidationFailed, instance.validate)
self.assertEqual('Either network or port must be provided.',
six.text_type(ex))
self.m.VerifyAll()
def test_osdatabase_prop_validation_nic_port_on_novanet_fails(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties']['networks'] = [
{
"port": "someportuuid",
}]
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
self._stubout_validate(instance, neutron=False)
ex = self.assertRaises(
exception.StackValidationFailed, instance.validate)
self.assertEqual('Can not use port property on Nova-network.',
six.text_type(ex))
self.m.VerifyAll()
def test_osdatabase_create_with_port(self):
fake_dbinstance = FakeDBInstance()
t = template_format.parse(db_template_with_nics)
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
self._stubout_common_create()
self.m.StubOutWithMock(neutron.NeutronClientPlugin,
'find_neutron_resource')
neutron.NeutronClientPlugin.find_neutron_resource(
instance.properties, 'port', 'port').AndReturn('someportid')
self.fc.instances.create('test', 1, volume={'size': 30},
databases=[],
users=[],
restorePoint=None,
availability_zone=None,
datastore=None,
datastore_version=None,
nics=[{'port-id': 'someportid',
'v4-fixed-ip': '1.2.3.4'}]
).AndReturn(fake_dbinstance)
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_osdatabase_create_with_net_id(self):
net_id = '034aa4d5-0f36-4127-8481-5caa5bfc9403'
fake_dbinstance = FakeDBInstance()
t = template_format.parse(db_template_with_nics)
t['resources']['MySqlCloudDB']['properties']['networks'] = [
{'network': net_id}]
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
self._stubout_common_create()
self.fc.instances.create('test', 1, volume={'size': 30},
databases=[],
users=[],
restorePoint=None,
availability_zone=None,
datastore=None,
datastore_version=None,
nics=[{'net-id': net_id}]
).AndReturn(fake_dbinstance)
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_osdatabase_create_with_net_name(self):
class FakeNet(object):
id = 'somenetid'
fake_dbinstance = FakeDBInstance()
t = template_format.parse(db_template_with_nics)
t['resources']['MySqlCloudDB']['properties']['networks'] = [
{'network': 'somenetname'}]
instance = self._setup_test_clouddbinstance('dbinstance_test', t)
self._stubout_common_create()
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
nova.NovaClientPlugin._create().AndReturn(self.nova)
self.m.StubOutWithMock(self.nova, 'networks')
self.m.StubOutWithMock(self.nova.networks, 'find')
self.nova.networks.find(label='somenetname').AndReturn(FakeNet())
self.fc.instances.create('test', 1, volume={'size': 30},
databases=[],
users=[],
restorePoint=None,
availability_zone=None,
datastore=None,
datastore_version=None,
nics=[{'net-id': 'somenetid'}]
).AndReturn(fake_dbinstance)
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
| 42.017123
| 78
| 0.618958
| 2,421
| 24,538
| 6.061545
| 0.116481
| 0.025894
| 0.043612
| 0.036797
| 0.790392
| 0.7754
| 0.741329
| 0.734174
| 0.72184
| 0.687155
| 0
| 0.010557
| 0.266525
| 24,538
| 583
| 79
| 42.089194
| 0.804812
| 0.024859
| 0
| 0.634343
| 0
| 0
| 0.15115
| 0.005312
| 0
| 0
| 0
| 0
| 0.060606
| 1
| 0.074747
| false
| 0.016162
| 0.026263
| 0
| 0.113131
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
774895188348038a86b9e3f59ceb17bf0d9d8f11
| 43
|
py
|
Python
|
onadata/libs/baseviewset.py
|
childhelpline/myhelpline
|
d72120ee31b6713cbaec79f299f5ee8bcb7ea429
|
[
"BSD-3-Clause"
] | 1
|
2018-07-15T13:13:43.000Z
|
2018-07-15T13:13:43.000Z
|
onadata/libs/baseviewset.py
|
aondiaye/myhelpline
|
d72120ee31b6713cbaec79f299f5ee8bcb7ea429
|
[
"BSD-3-Clause"
] | 14
|
2018-07-10T12:48:46.000Z
|
2022-03-11T23:24:51.000Z
|
onadata/libs/baseviewset.py
|
aondiaye/myhelpline
|
d72120ee31b6713cbaec79f299f5ee8bcb7ea429
|
[
"BSD-3-Clause"
] | 5
|
2018-07-04T07:59:14.000Z
|
2020-01-28T07:50:18.000Z
|
class DefaultBaseViewset(object):
pass
| 14.333333
| 33
| 0.767442
| 4
| 43
| 8.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 43
| 2
| 34
| 21.5
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
775cf2a3e118d5a9160a331896f5cf873abe2608
| 224
|
py
|
Python
|
courier/forms.py
|
ejplatform/django-courier
|
f58bb10f134bc788e07febdbd8b4a2fdca809014
|
[
"BSD-3-Clause"
] | 1
|
2019-02-15T20:12:50.000Z
|
2019-02-15T20:12:50.000Z
|
courier/forms.py
|
ejplatform/django-courier
|
f58bb10f134bc788e07febdbd8b4a2fdca809014
|
[
"BSD-3-Clause"
] | 1
|
2018-09-07T19:59:27.000Z
|
2018-09-07T19:59:27.000Z
|
courier/forms.py
|
ejplatform/django-courier
|
f58bb10f134bc788e07febdbd8b4a2fdca809014
|
[
"BSD-3-Clause"
] | 3
|
2018-05-04T21:37:37.000Z
|
2019-04-01T03:31:08.000Z
|
from django import forms
class NotificationSendInBulkForm(forms.Form):
sender = forms.CharField(max_length=50)
title = forms.CharField(max_length=200)
short_description = forms.CharField(widget=forms.Textarea)
| 28
| 62
| 0.78125
| 27
| 224
| 6.37037
| 0.666667
| 0.244186
| 0.197674
| 0.267442
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025641
| 0.129464
| 224
| 7
| 63
| 32
| 0.85641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
621cb474fb129aba5c504718c52e17b6757b8dde
| 27
|
py
|
Python
|
test.py
|
LucasLibereco/Test01
|
d99a2a3100fb97d4fc5ae0e8c9ceb51f4bb5649b
|
[
"MIT"
] | null | null | null |
test.py
|
LucasLibereco/Test01
|
d99a2a3100fb97d4fc5ae0e8c9ceb51f4bb5649b
|
[
"MIT"
] | null | null | null |
test.py
|
LucasLibereco/Test01
|
d99a2a3100fb97d4fc5ae0e8c9ceb51f4bb5649b
|
[
"MIT"
] | null | null | null |
print("hello")
raw_input()
| 9
| 14
| 0.703704
| 4
| 27
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 27
| 2
| 15
| 13.5
| 0.72
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
6222bf5e6710cde72d0eb381cf704cced06fb1cd
| 11,030
|
py
|
Python
|
torch_rl/memory/sequential.py
|
JimmyMVP/plain_rl
|
4780f05fffb62533a339197b49de487cdc9d9954
|
[
"MIT"
] | 10
|
2018-03-18T21:27:59.000Z
|
2020-08-13T20:15:05.000Z
|
torch_rl/memory/sequential.py
|
JimmyMVP/plain_rl
|
4780f05fffb62533a339197b49de487cdc9d9954
|
[
"MIT"
] | 1
|
2018-03-19T09:51:04.000Z
|
2018-03-19T10:11:02.000Z
|
torch_rl/memory/sequential.py
|
JimmyMVP/plain_rl
|
4780f05fffb62533a339197b49de487cdc9d9954
|
[
"MIT"
] | 3
|
2018-03-10T09:17:10.000Z
|
2019-12-17T20:19:56.000Z
|
from torch_rl.memory.core import *
class SequentialMemory(Memory):
def __init__(self, limit, **kwargs):
super(SequentialMemory, self).__init__(**kwargs)
self.limit = limit
# Do not use deque to implement the memory. This data structure may seem convenient but
# it is way too slow on random access. Instead, we use our own ring buffer implementation.
self.actions = RingBuffer(limit)
self.rewards = RingBuffer(limit)
self.terminals = RingBuffer(limit)
self.observations = RingBuffer(limit)
self.goals = RingBuffer(limit)
def sample(self, batch_size, batch_idxs=None):
if batch_idxs is None:
# Draw random indexes such that we have at least a single entry before each
# index.
batch_idxs = sample_batch_indexes(0, self.nb_entries - 1, size=batch_size)
batch_idxs = np.array(batch_idxs) + 1
assert np.min(batch_idxs) >= 1
assert np.max(batch_idxs) < self.nb_entries
assert len(batch_idxs) == batch_size
# Create experiences
experiences = []
for idx in batch_idxs:
terminal0 = self.terminals[idx - 2] if idx >= 2 else False
while terminal0:
# Skip this transition because the environment was reset here. Select a new, random
# transition and use this instead. This may cause the batch to contain the same
# transition twice.
idx = sample_batch_indexes(1, self.nb_entries, size=1)[0]
terminal0 = self.terminals[idx - 2] if idx >= 2 else False
assert 1 <= idx < self.nb_entries
# This code is slightly complicated by the fact that subsequent observations might be
# from different episodes. We ensure that an experience never spans multiple episodes.
# This is probably not that important in practice but it seems cleaner.
state0 = [self.observations[idx - 1]]
for offset in range(0, self.window_length - 1):
current_idx = idx - 2 - offset
current_terminal = self.terminals[current_idx - 1] if current_idx - 1 > 0 else False
if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal):
# The previously handled observation was terminal, don't add the current one.
# Otherwise we would leak into a different episode.
break
state0.insert(0, self.observations[current_idx])
while len(state0) < self.window_length:
state0.insert(0, zeroed_observation(state0[0]))
action = self.actions[idx - 1]
reward = self.rewards[idx - 1]
terminal1 = self.terminals[idx - 1]
goal = self.goals[idx - 1] if self.goals.length > 0 else None
# Okay, now we need to create the follow-up state. This is state0 shifted on timestep
# to the right. Again, we need to be careful to not include an observation from the next
# episode if the last state is terminal.
state1 = [np.copy(x) for x in state0[1:]]
state1.append(self.observations[idx])
assert len(state0) == self.window_length
assert len(state1) == len(state0)
experiences.append(Experience(state0=state0, goal=goal, action=action, reward=reward,
state1=state1, terminal1=terminal1))
assert len(experiences) == batch_size
return experiences
def sample_and_split(self, batch_size, batch_idxs=None):
experiences = self.sample(batch_size, batch_idxs)
state0_batch = []
goal_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
goal_batch.append(e.goal)
terminal1_batch.append(0. if e.terminal1 else 1.)
# Prepare and validate parameters.
state0_batch = np.array(state0_batch, dtype=np.float32).reshape(batch_size, -1)
state1_batch = np.array(state1_batch, dtype=np.float32).reshape(batch_size, -1)
terminal1_batch = np.array(terminal1_batch, dtype=np.bool).reshape(batch_size, -1)
reward_batch = np.array(reward_batch, dtype=np.float32).reshape(batch_size, -1)
action_batch = np.array(action_batch, dtype=np.float32).reshape(batch_size, -1)
if self.goals.length > 0:
return state0_batch, goal_batch, action_batch, reward_batch, state1_batch, terminal1_batch
else:
return state0_batch, action_batch, reward_batch, state1_batch, terminal1_batch
def _append(self, observation, action, reward, terminal, training=True):
super(SequentialMemory, self).append(observation, action, reward, terminal, training=training)
# This needs to be understood as follows: in `observation`, take `action`, obtain `reward`
# and weather the next state is `terminal` or not.
if training:
self.observations.append(observation)
self.actions.append(action)
self.rewards.append(reward)
self.terminals.append(terminal)
def _appendg(self, observation, goal, action, reward, terminal, training=True):
self._append(observation, action, reward, terminal, training=training)
if training:
self.goals.append(goal)
def append(self, *args, **kwargs):
try:
self._appendg(*args, **kwargs)
except Exception as e:
self._append(*args, **kwargs)
@property
def nb_entries(self):
return len(self.observations)
def get_config(self):
config = super(SequentialMemory, self).get_config()
config['limit'] = self.limit
return config
class GeneralisedMemory(SequentialMemory):
def __init__(self, limit,**kwargs):
super(GeneralisedMemory, self).__init__(limit, **kwargs)
self.extra_info = RingBuffer(limit)
self.limit = limit
def sample(self, batch_size, batch_idxs=None):
if batch_idxs is None:
# Draw random indexes such that we have at least a single entry before each
# index.
batch_idxs = sample_batch_indexes(0, self.nb_entries - 1, size=batch_size)
batch_idxs = np.array(batch_idxs) + 1
assert np.min(batch_idxs) >= 1
assert np.max(batch_idxs) < self.nb_entries
assert len(batch_idxs) == batch_size
# Create experiences
experiences = []
for idx in batch_idxs:
terminal0 = self.terminals[idx - 2] if idx >= 2 else False
while terminal0:
# Skip this transition because the environment was reset here. Select a new, random
# transition and use this instead. This may cause the batch to contain the same
# transition twice.
idx = sample_batch_indexes(1, self.nb_entries, size=1)[0]
terminal0 = self.terminals[idx - 2] if idx >= 2 else False
assert 1 <= idx < self.nb_entries
# This code is slightly complicated by the fact that subsequent observations might be
# from different episodes. We ensure that an experience never spans multiple episodes.
# This is probably not that important in practice but it seems cleaner.
state0 = [self.observations[idx - 1]]
for offset in range(0, self.window_length - 1):
current_idx = idx - 2 - offset
current_terminal = self.terminals[current_idx - 1] if current_idx - 1 > 0 else False
if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal):
# The previously handled observation was terminal, don't add the current one.
# Otherwise we would leak into a different episode.
break
state0.insert(0, self.observations[current_idx])
while len(state0) < self.window_length:
state0.insert(0, zeroed_observation(state0[0]))
action = self.actions[idx - 1]
reward = self.rewards[idx - 1]
terminal1 = self.terminals[idx - 1]
goal = self.goals[idx - 1] if self.goals.length > 0 else None
# Okay, now we need to create the follow-up state. This is state0 shifted on timestep
# to the right. Again, we need to be careful to not include an observation from the next
# episode if the last state is terminal.
state1 = [np.copy(x) for x in state0[1:]]
state1.append(self.observations[idx])
extra0 = self.extra_info[idx]
assert len(state0) == self.window_length
assert len(state1) == len(state0)
experiences.append([state0, action,reward,
state1, terminal1, extra0])
assert len(experiences) == batch_size
return experiences
def append(self, observation, action, reward, terminal, extra_info, training=True):
super(GeneralisedMemory, self).append(observation, action, reward, terminal, training=training)
# This needs to be understood as follows: in `observation`, take `action`, obtain `reward`
# and weather the next state is `terminal` or not.
if training:
self.extra_info.append(extra_info)
def sample_and_split(self, batch_size, batch_idxs=None):
experiences = self.sample(batch_size, batch_idxs)
state0_batch = []
goal_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
extra_info = []
for e in experiences:
state0_batch.append(e[0])
reward_batch.append(e[2])
action_batch.append(e[1])
state1_batch.append(e[3])
terminal1_batch.append(0. if e[4] else 1.)
extra_info.append(e[-1])
# Prepare and validate parameters.
state0_batch = np.array(state0_batch).reshape(batch_size, -1)
state1_batch = np.array(state1_batch).reshape(batch_size, -1)
terminal1_batch = np.array(terminal1_batch).reshape(batch_size, -1)
reward_batch = np.array(reward_batch).reshape(batch_size, -1)
action_batch = np.array(action_batch).reshape(batch_size, -1)
extra_info_batch = np.array(extra_info).reshape(batch_size, -1)
if self.goals.length > 0:
return state0_batch, goal_batch, action_batch, reward_batch, state1_batch, terminal1_batch
else:
return state0_batch, action_batch, reward_batch, state1_batch, terminal1_batch, extra_info_batch
| 46.150628
| 108
| 0.623572
| 1,367
| 11,030
| 4.890271
| 0.1485
| 0.030965
| 0.019746
| 0.027973
| 0.798654
| 0.789529
| 0.774271
| 0.761556
| 0.71997
| 0.71997
| 0
| 0.021895
| 0.291931
| 11,030
| 238
| 109
| 46.344538
| 0.834059
| 0.201451
| 0
| 0.595092
| 0
| 0
| 0.00057
| 0
| 0
| 0
| 0
| 0
| 0.08589
| 1
| 0.07362
| false
| 0
| 0.006135
| 0.006135
| 0.141104
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
622ada20d653849806cec4220158699d58bd91cd
| 173
|
py
|
Python
|
gitspammer/utils.py
|
a-random-lemurian/GitSpammer
|
721cf4117508c350e6e276c6ff9c7d56d41646b0
|
[
"MIT"
] | null | null | null |
gitspammer/utils.py
|
a-random-lemurian/GitSpammer
|
721cf4117508c350e6e276c6ff9c7d56d41646b0
|
[
"MIT"
] | null | null | null |
gitspammer/utils.py
|
a-random-lemurian/GitSpammer
|
721cf4117508c350e6e276c6ff9c7d56d41646b0
|
[
"MIT"
] | null | null | null |
import random
LETTERS = 'abcdefghijklmnopqrstuvwxyz1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def get_letters(length: int):
return ''.join(random.choices(LETTERS,k=length))
| 28.833333
| 74
| 0.815029
| 16
| 173
| 8.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063291
| 0.086705
| 173
| 6
| 75
| 28.833333
| 0.822785
| 0
| 0
| 0
| 0
| 0
| 0.356322
| 0.356322
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
6274397c6a5806b28b9710386a8cdb04c33108fa
| 94
|
py
|
Python
|
server/core/admin.py
|
webclinic017/trading-bot-1
|
c567b1d865a88aa025e589b8c403cc7749ab52ad
|
[
"MIT"
] | 1
|
2021-08-29T03:34:30.000Z
|
2021-08-29T03:34:30.000Z
|
server/core/admin.py
|
webclinic017/trading-bot-1
|
c567b1d865a88aa025e589b8c403cc7749ab52ad
|
[
"MIT"
] | null | null | null |
server/core/admin.py
|
webclinic017/trading-bot-1
|
c567b1d865a88aa025e589b8c403cc7749ab52ad
|
[
"MIT"
] | 1
|
2021-08-16T22:33:24.000Z
|
2021-08-16T22:33:24.000Z
|
from django.contrib import admin
from .models import Strategy
admin.site.register(Strategy)
| 15.666667
| 32
| 0.819149
| 13
| 94
| 5.923077
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117021
| 94
| 5
| 33
| 18.8
| 0.927711
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
628681e0d84cdbb5573ae44ff442740795eeb002
| 517
|
py
|
Python
|
test.py
|
ndsvw/Maximum-Sum-Subarray-Python
|
e34e81d939f7cd47b9e5a8cd7cf03db239d2f332
|
[
"MIT"
] | null | null | null |
test.py
|
ndsvw/Maximum-Sum-Subarray-Python
|
e34e81d939f7cd47b9e5a8cd7cf03db239d2f332
|
[
"MIT"
] | null | null | null |
test.py
|
ndsvw/Maximum-Sum-Subarray-Python
|
e34e81d939f7cd47b9e5a8cd7cf03db239d2f332
|
[
"MIT"
] | null | null | null |
from max_sum_subarray import max_sum_dac
print("Tests:")
a = [3,-5,0,2,100,-2,-5,9]
print(max_sum_dac(a) == 104)
a = [-3,5,-1,2,-5,3]
print(max_sum_dac(a) == 6)
a = [13,-5,-1,7,-5,4]
print(max_sum_dac(a) == 14)
a = [3,-5,0,1,10,-2,-5,6]
print(max_sum_dac(a) == 11)
a = [3,-5,0,1,0,-2,-5,6]
print(max_sum_dac(a) == 6)
a = [-3,-9,-4,-5,-2,-1]
print(max_sum_dac(a) == -1)
a = [3,5,10,9,2,3]
print(max_sum_dac(a) == 32)
a = [3,-5,-10,-9,-2,-3]
print(max_sum_dac(a) == 3)
a = [3,-5,10,9,-2,-3]
print(max_sum_dac(a) == 19)
| 24.619048
| 40
| 0.562863
| 133
| 517
| 2.022556
| 0.195489
| 0.245353
| 0.334572
| 0.468401
| 0.669145
| 0.464684
| 0.460967
| 0.390335
| 0.256506
| 0.256506
| 0
| 0.179039
| 0.11412
| 517
| 21
| 41
| 24.619048
| 0.408297
| 0
| 0
| 0.1
| 0
| 0
| 0.011583
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
655c6c3c1ba64736cb205029c603266416359b57
| 28,730
|
py
|
Python
|
sdk/python/pulumi_azure/sql/database.py
|
suresh198526/pulumi-azure
|
bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/sql/database.py
|
suresh198526/pulumi-azure
|
bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/sql/database.py
|
suresh198526/pulumi-azure
|
bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Database']
class Database(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
collation: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[str]] = None,
edition: Optional[pulumi.Input[str]] = None,
elastic_pool_name: Optional[pulumi.Input[str]] = None,
extended_auditing_policy: Optional[pulumi.Input[pulumi.InputType['DatabaseExtendedAuditingPolicyArgs']]] = None,
import_: Optional[pulumi.Input[pulumi.InputType['DatabaseImportArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
max_size_bytes: Optional[pulumi.Input[str]] = None,
max_size_gb: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
read_scale: Optional[pulumi.Input[bool]] = None,
requested_service_objective_id: Optional[pulumi.Input[str]] = None,
requested_service_objective_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
restore_point_in_time: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
source_database_deletion_date: Optional[pulumi.Input[str]] = None,
source_database_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
threat_detection_policy: Optional[pulumi.Input[pulumi.InputType['DatabaseThreatDetectionPolicyArgs']]] = None,
zone_redundant: Optional[pulumi.Input[bool]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Allows you to manage an Azure SQL Database
> **NOTE:** The Database Extended Auditing Policy Can be set inline here as well as with the mssql_database_extended_auditing_policy resource resource. You can only use one or the other and using both will cause a conflict.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West US")
example_sql_server = azure.sql.SqlServer("exampleSqlServer",
resource_group_name=example_resource_group.name,
location="West US",
version="12.0",
administrator_login="4dm1n157r470r",
administrator_login_password="4-v3ry-53cr37-p455w0rd",
tags={
"environment": "production",
})
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="LRS")
example_database = azure.sql.Database("exampleDatabase",
resource_group_name=example_resource_group.name,
location="West US",
server_name=example_sql_server.name,
extended_auditing_policy=azure.sql.DatabaseExtendedAuditingPolicyArgs(
storage_endpoint=example_account.primary_blob_endpoint,
storage_account_access_key=example_account.primary_access_key,
storage_account_access_key_is_secondary=True,
retention_in_days=6,
),
tags={
"environment": "production",
})
```
## Import
SQL Databases can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:sql/database:Database database1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Sql/servers/myserver/databases/database1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] collation: The name of the collation. Applies only if `create_mode` is `Default`. Azure default is `SQL_LATIN1_GENERAL_CP1_CI_AS`. Changing this forces a new resource to be created.
:param pulumi.Input[str] create_mode: Specifies how to create the database. Valid values are: `Default`, `Copy`, `OnlineSecondary`, `NonReadableSecondary`, `PointInTimeRestore`, `Recovery`, `Restore` or `RestoreLongTermRetentionBackup`. Must be `Default` to create a new database. Defaults to `Default`. Please see [Azure SQL Database REST API](https://docs.microsoft.com/en-us/rest/api/sql/databases/createorupdate#createmode)
:param pulumi.Input[str] edition: The edition of the database to be created. Applies only if `create_mode` is `Default`. Valid values are: `Basic`, `Standard`, `Premium`, `DataWarehouse`, `Business`, `BusinessCritical`, `Free`, `GeneralPurpose`, `Hyperscale`, `Premium`, `PremiumRS`, `Standard`, `Stretch`, `System`, `System2`, or `Web`. Please see [Azure SQL Database Service Tiers](https://azure.microsoft.com/en-gb/documentation/articles/sql-database-service-tiers/).
:param pulumi.Input[str] elastic_pool_name: The name of the elastic database pool.
:param pulumi.Input[pulumi.InputType['DatabaseExtendedAuditingPolicyArgs']] extended_auditing_policy: A `extended_auditing_policy` block as defined below.
:param pulumi.Input[pulumi.InputType['DatabaseImportArgs']] import_: A Database Import block as documented below. `create_mode` must be set to `Default`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] max_size_bytes: The maximum size that the database can grow to. Applies only if `create_mode` is `Default`. Please see [Azure SQL Database Service Tiers](https://azure.microsoft.com/en-gb/documentation/articles/sql-database-service-tiers/).
:param pulumi.Input[str] name: The name of the database.
:param pulumi.Input[bool] read_scale: Read-only connections will be redirected to a high-available replica. Please see [Use read-only replicas to load-balance read-only query workloads](https://docs.microsoft.com/en-us/azure/sql-database/sql-database-read-scale-out).
:param pulumi.Input[str] requested_service_objective_id: A GUID/UUID corresponding to a configured Service Level Objective for the Azure SQL database which can be used to configure a performance level.
.
:param pulumi.Input[str] requested_service_objective_name: The service objective name for the database. Valid values depend on edition and location and may include `S0`, `S1`, `S2`, `S3`, `P1`, `P2`, `P4`, `P6`, `P11` and `ElasticPool`. You can list the available names with the cli: ```shell az sql db list-editions -l westus -o table ```. For further information please see [Azure CLI - az sql db](https://docs.microsoft.com/en-us/cli/azure/sql/db?view=azure-cli-latest#az-sql-db-list-editions).
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the database. This must be the same as Database Server resource group currently.
:param pulumi.Input[str] restore_point_in_time: The point in time for the restore. Only applies if `create_mode` is `PointInTimeRestore` e.g. 2013-11-08T22:00:40Z
:param pulumi.Input[str] server_name: The name of the SQL Server on which to create the database.
:param pulumi.Input[str] source_database_deletion_date: The deletion date time of the source database. Only applies to deleted databases where `create_mode` is `PointInTimeRestore`.
:param pulumi.Input[str] source_database_id: The URI of the source database if `create_mode` value is not `Default`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[pulumi.InputType['DatabaseThreatDetectionPolicyArgs']] threat_detection_policy: Threat detection policy configuration. The `threat_detection_policy` block supports fields documented below.
:param pulumi.Input[bool] zone_redundant: Whether or not this database is zone redundant, which means the replicas of this database will be spread across multiple availability zones.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['collation'] = collation
__props__['create_mode'] = create_mode
__props__['edition'] = edition
__props__['elastic_pool_name'] = elastic_pool_name
if extended_auditing_policy is not None:
warnings.warn("""the `extended_auditing_policy` block has been moved to `azurerm_mssql_server_extended_auditing_policy` and `azurerm_mssql_database_extended_auditing_policy`. This block will be removed in version 3.0 of the provider.""", DeprecationWarning)
pulumi.log.warn("extended_auditing_policy is deprecated: the `extended_auditing_policy` block has been moved to `azurerm_mssql_server_extended_auditing_policy` and `azurerm_mssql_database_extended_auditing_policy`. This block will be removed in version 3.0 of the provider.")
__props__['extended_auditing_policy'] = extended_auditing_policy
__props__['import_'] = import_
__props__['location'] = location
__props__['max_size_bytes'] = max_size_bytes
__props__['max_size_gb'] = max_size_gb
__props__['name'] = name
__props__['read_scale'] = read_scale
__props__['requested_service_objective_id'] = requested_service_objective_id
__props__['requested_service_objective_name'] = requested_service_objective_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['restore_point_in_time'] = restore_point_in_time
if server_name is None:
raise TypeError("Missing required property 'server_name'")
__props__['server_name'] = server_name
__props__['source_database_deletion_date'] = source_database_deletion_date
__props__['source_database_id'] = source_database_id
__props__['tags'] = tags
__props__['threat_detection_policy'] = threat_detection_policy
__props__['zone_redundant'] = zone_redundant
__props__['creation_date'] = None
__props__['default_secondary_location'] = None
__props__['encryption'] = None
super(Database, __self__).__init__(
'azure:sql/database:Database',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
collation: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[str]] = None,
creation_date: Optional[pulumi.Input[str]] = None,
default_secondary_location: Optional[pulumi.Input[str]] = None,
edition: Optional[pulumi.Input[str]] = None,
elastic_pool_name: Optional[pulumi.Input[str]] = None,
encryption: Optional[pulumi.Input[str]] = None,
extended_auditing_policy: Optional[pulumi.Input[pulumi.InputType['DatabaseExtendedAuditingPolicyArgs']]] = None,
import_: Optional[pulumi.Input[pulumi.InputType['DatabaseImportArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
max_size_bytes: Optional[pulumi.Input[str]] = None,
max_size_gb: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
read_scale: Optional[pulumi.Input[bool]] = None,
requested_service_objective_id: Optional[pulumi.Input[str]] = None,
requested_service_objective_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
restore_point_in_time: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
source_database_deletion_date: Optional[pulumi.Input[str]] = None,
source_database_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
threat_detection_policy: Optional[pulumi.Input[pulumi.InputType['DatabaseThreatDetectionPolicyArgs']]] = None,
zone_redundant: Optional[pulumi.Input[bool]] = None) -> 'Database':
"""
Get an existing Database resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] collation: The name of the collation. Applies only if `create_mode` is `Default`. Azure default is `SQL_LATIN1_GENERAL_CP1_CI_AS`. Changing this forces a new resource to be created.
:param pulumi.Input[str] create_mode: Specifies how to create the database. Valid values are: `Default`, `Copy`, `OnlineSecondary`, `NonReadableSecondary`, `PointInTimeRestore`, `Recovery`, `Restore` or `RestoreLongTermRetentionBackup`. Must be `Default` to create a new database. Defaults to `Default`. Please see [Azure SQL Database REST API](https://docs.microsoft.com/en-us/rest/api/sql/databases/createorupdate#createmode)
:param pulumi.Input[str] creation_date: The creation date of the SQL Database.
:param pulumi.Input[str] default_secondary_location: The default secondary location of the SQL Database.
:param pulumi.Input[str] edition: The edition of the database to be created. Applies only if `create_mode` is `Default`. Valid values are: `Basic`, `Standard`, `Premium`, `DataWarehouse`, `Business`, `BusinessCritical`, `Free`, `GeneralPurpose`, `Hyperscale`, `Premium`, `PremiumRS`, `Standard`, `Stretch`, `System`, `System2`, or `Web`. Please see [Azure SQL Database Service Tiers](https://azure.microsoft.com/en-gb/documentation/articles/sql-database-service-tiers/).
:param pulumi.Input[str] elastic_pool_name: The name of the elastic database pool.
:param pulumi.Input[pulumi.InputType['DatabaseExtendedAuditingPolicyArgs']] extended_auditing_policy: A `extended_auditing_policy` block as defined below.
:param pulumi.Input[pulumi.InputType['DatabaseImportArgs']] import_: A Database Import block as documented below. `create_mode` must be set to `Default`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] max_size_bytes: The maximum size that the database can grow to. Applies only if `create_mode` is `Default`. Please see [Azure SQL Database Service Tiers](https://azure.microsoft.com/en-gb/documentation/articles/sql-database-service-tiers/).
:param pulumi.Input[str] name: The name of the database.
:param pulumi.Input[bool] read_scale: Read-only connections will be redirected to a high-available replica. Please see [Use read-only replicas to load-balance read-only query workloads](https://docs.microsoft.com/en-us/azure/sql-database/sql-database-read-scale-out).
:param pulumi.Input[str] requested_service_objective_id: A GUID/UUID corresponding to a configured Service Level Objective for the Azure SQL database which can be used to configure a performance level.
.
:param pulumi.Input[str] requested_service_objective_name: The service objective name for the database. Valid values depend on edition and location and may include `S0`, `S1`, `S2`, `S3`, `P1`, `P2`, `P4`, `P6`, `P11` and `ElasticPool`. You can list the available names with the cli: ```shell az sql db list-editions -l westus -o table ```. For further information please see [Azure CLI - az sql db](https://docs.microsoft.com/en-us/cli/azure/sql/db?view=azure-cli-latest#az-sql-db-list-editions).
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the database. This must be the same as Database Server resource group currently.
:param pulumi.Input[str] restore_point_in_time: The point in time for the restore. Only applies if `create_mode` is `PointInTimeRestore` e.g. 2013-11-08T22:00:40Z
:param pulumi.Input[str] server_name: The name of the SQL Server on which to create the database.
:param pulumi.Input[str] source_database_deletion_date: The deletion date time of the source database. Only applies to deleted databases where `create_mode` is `PointInTimeRestore`.
:param pulumi.Input[str] source_database_id: The URI of the source database if `create_mode` value is not `Default`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[pulumi.InputType['DatabaseThreatDetectionPolicyArgs']] threat_detection_policy: Threat detection policy configuration. The `threat_detection_policy` block supports fields documented below.
:param pulumi.Input[bool] zone_redundant: Whether or not this database is zone redundant, which means the replicas of this database will be spread across multiple availability zones.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["collation"] = collation
__props__["create_mode"] = create_mode
__props__["creation_date"] = creation_date
__props__["default_secondary_location"] = default_secondary_location
__props__["edition"] = edition
__props__["elastic_pool_name"] = elastic_pool_name
__props__["encryption"] = encryption
__props__["extended_auditing_policy"] = extended_auditing_policy
__props__["import_"] = import_
__props__["location"] = location
__props__["max_size_bytes"] = max_size_bytes
__props__["max_size_gb"] = max_size_gb
__props__["name"] = name
__props__["read_scale"] = read_scale
__props__["requested_service_objective_id"] = requested_service_objective_id
__props__["requested_service_objective_name"] = requested_service_objective_name
__props__["resource_group_name"] = resource_group_name
__props__["restore_point_in_time"] = restore_point_in_time
__props__["server_name"] = server_name
__props__["source_database_deletion_date"] = source_database_deletion_date
__props__["source_database_id"] = source_database_id
__props__["tags"] = tags
__props__["threat_detection_policy"] = threat_detection_policy
__props__["zone_redundant"] = zone_redundant
return Database(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def collation(self) -> pulumi.Output[str]:
"""
The name of the collation. Applies only if `create_mode` is `Default`. Azure default is `SQL_LATIN1_GENERAL_CP1_CI_AS`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "collation")
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> pulumi.Output[Optional[str]]:
"""
Specifies how to create the database. Valid values are: `Default`, `Copy`, `OnlineSecondary`, `NonReadableSecondary`, `PointInTimeRestore`, `Recovery`, `Restore` or `RestoreLongTermRetentionBackup`. Must be `Default` to create a new database. Defaults to `Default`. Please see [Azure SQL Database REST API](https://docs.microsoft.com/en-us/rest/api/sql/databases/createorupdate#createmode)
"""
return pulumi.get(self, "create_mode")
@property
@pulumi.getter(name="creationDate")
def creation_date(self) -> pulumi.Output[str]:
"""
The creation date of the SQL Database.
"""
return pulumi.get(self, "creation_date")
@property
@pulumi.getter(name="defaultSecondaryLocation")
def default_secondary_location(self) -> pulumi.Output[str]:
"""
The default secondary location of the SQL Database.
"""
return pulumi.get(self, "default_secondary_location")
@property
@pulumi.getter
def edition(self) -> pulumi.Output[str]:
"""
The edition of the database to be created. Applies only if `create_mode` is `Default`. Valid values are: `Basic`, `Standard`, `Premium`, `DataWarehouse`, `Business`, `BusinessCritical`, `Free`, `GeneralPurpose`, `Hyperscale`, `Premium`, `PremiumRS`, `Standard`, `Stretch`, `System`, `System2`, or `Web`. Please see [Azure SQL Database Service Tiers](https://azure.microsoft.com/en-gb/documentation/articles/sql-database-service-tiers/).
"""
return pulumi.get(self, "edition")
@property
@pulumi.getter(name="elasticPoolName")
def elastic_pool_name(self) -> pulumi.Output[str]:
"""
The name of the elastic database pool.
"""
return pulumi.get(self, "elastic_pool_name")
@property
@pulumi.getter
def encryption(self) -> pulumi.Output[str]:
return pulumi.get(self, "encryption")
@property
@pulumi.getter(name="extendedAuditingPolicy")
def extended_auditing_policy(self) -> pulumi.Output['outputs.DatabaseExtendedAuditingPolicy']:
"""
A `extended_auditing_policy` block as defined below.
"""
return pulumi.get(self, "extended_auditing_policy")
@property
@pulumi.getter(name="import")
def import_(self) -> pulumi.Output[Optional['outputs.DatabaseImport']]:
"""
A Database Import block as documented below. `create_mode` must be set to `Default`.
"""
return pulumi.get(self, "import_")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maxSizeBytes")
def max_size_bytes(self) -> pulumi.Output[str]:
"""
The maximum size that the database can grow to. Applies only if `create_mode` is `Default`. Please see [Azure SQL Database Service Tiers](https://azure.microsoft.com/en-gb/documentation/articles/sql-database-service-tiers/).
"""
return pulumi.get(self, "max_size_bytes")
@property
@pulumi.getter(name="maxSizeGb")
def max_size_gb(self) -> pulumi.Output[str]:
return pulumi.get(self, "max_size_gb")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the database.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="readScale")
def read_scale(self) -> pulumi.Output[Optional[bool]]:
"""
Read-only connections will be redirected to a high-available replica. Please see [Use read-only replicas to load-balance read-only query workloads](https://docs.microsoft.com/en-us/azure/sql-database/sql-database-read-scale-out).
"""
return pulumi.get(self, "read_scale")
@property
@pulumi.getter(name="requestedServiceObjectiveId")
def requested_service_objective_id(self) -> pulumi.Output[str]:
"""
A GUID/UUID corresponding to a configured Service Level Objective for the Azure SQL database which can be used to configure a performance level.
.
"""
return pulumi.get(self, "requested_service_objective_id")
@property
@pulumi.getter(name="requestedServiceObjectiveName")
def requested_service_objective_name(self) -> pulumi.Output[str]:
"""
The service objective name for the database. Valid values depend on edition and location and may include `S0`, `S1`, `S2`, `S3`, `P1`, `P2`, `P4`, `P6`, `P11` and `ElasticPool`. You can list the available names with the cli: ```shell az sql db list-editions -l westus -o table ```. For further information please see [Azure CLI - az sql db](https://docs.microsoft.com/en-us/cli/azure/sql/db?view=azure-cli-latest#az-sql-db-list-editions).
"""
return pulumi.get(self, "requested_service_objective_name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to create the database. This must be the same as Database Server resource group currently.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="restorePointInTime")
def restore_point_in_time(self) -> pulumi.Output[str]:
"""
The point in time for the restore. Only applies if `create_mode` is `PointInTimeRestore` e.g. 2013-11-08T22:00:40Z
"""
return pulumi.get(self, "restore_point_in_time")
@property
@pulumi.getter(name="serverName")
def server_name(self) -> pulumi.Output[str]:
"""
The name of the SQL Server on which to create the database.
"""
return pulumi.get(self, "server_name")
@property
@pulumi.getter(name="sourceDatabaseDeletionDate")
def source_database_deletion_date(self) -> pulumi.Output[str]:
"""
The deletion date time of the source database. Only applies to deleted databases where `create_mode` is `PointInTimeRestore`.
"""
return pulumi.get(self, "source_database_deletion_date")
@property
@pulumi.getter(name="sourceDatabaseId")
def source_database_id(self) -> pulumi.Output[str]:
"""
The URI of the source database if `create_mode` value is not `Default`.
"""
return pulumi.get(self, "source_database_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="threatDetectionPolicy")
def threat_detection_policy(self) -> pulumi.Output['outputs.DatabaseThreatDetectionPolicy']:
"""
Threat detection policy configuration. The `threat_detection_policy` block supports fields documented below.
"""
return pulumi.get(self, "threat_detection_policy")
@property
@pulumi.getter(name="zoneRedundant")
def zone_redundant(self) -> pulumi.Output[Optional[bool]]:
"""
Whether or not this database is zone redundant, which means the replicas of this database will be spread across multiple availability zones.
"""
return pulumi.get(self, "zone_redundant")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 63.421634
| 505
| 0.69189
| 3,517
| 28,730
| 5.434746
| 0.1089
| 0.053521
| 0.050539
| 0.03296
| 0.776185
| 0.737993
| 0.722245
| 0.715078
| 0.687716
| 0.679816
| 0
| 0.006241
| 0.20811
| 28,730
| 452
| 506
| 63.561947
| 0.833897
| 0.495162
| 0
| 0.304527
| 1
| 0.00823
| 0.191488
| 0.094723
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115226
| false
| 0.004115
| 0.057613
| 0.016461
| 0.288066
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
655ecca9098cc69f81b455c4453a49e74ad86738
| 111
|
py
|
Python
|
CAIL2020/cocr/torchocr/networks/__init__.py
|
ShenDezhou/CAIL
|
c4cfa98ab4ecedbce34a7a5a186830486047540c
|
[
"Apache-2.0"
] | 71
|
2020-07-16T01:49:27.000Z
|
2022-03-27T16:55:00.000Z
|
CAIL2020/cocr/torchocr/networks/__init__.py
|
ShenDezhou/CAIL
|
c4cfa98ab4ecedbce34a7a5a186830486047540c
|
[
"Apache-2.0"
] | 11
|
2020-09-18T14:26:25.000Z
|
2022-02-09T23:49:33.000Z
|
CAIL2020/cocr/torchocr/networks/__init__.py
|
ShenDezhou/CAIL
|
c4cfa98ab4ecedbce34a7a5a186830486047540c
|
[
"Apache-2.0"
] | 16
|
2020-07-15T07:24:30.000Z
|
2022-03-19T05:41:11.000Z
|
from .architectures import build_model
from .losses import build_loss
__all__ = ['build_model', 'build_loss']
| 22.2
| 39
| 0.792793
| 15
| 111
| 5.333333
| 0.533333
| 0.275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117117
| 111
| 4
| 40
| 27.75
| 0.816327
| 0
| 0
| 0
| 0
| 0
| 0.189189
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
65776b77d363b933b5d7d38d1e6f56f0c9086252
| 58
|
py
|
Python
|
private_sleepin/settings.py
|
turtle1229/private_sleepin
|
fdc352aff7430ab324fcaa30d181545037dd95c4
|
[
"MIT"
] | null | null | null |
private_sleepin/settings.py
|
turtle1229/private_sleepin
|
fdc352aff7430ab324fcaa30d181545037dd95c4
|
[
"MIT"
] | 2
|
2021-02-15T04:41:03.000Z
|
2021-02-15T04:48:43.000Z
|
private_sleepin/settings.py
|
turtle1229/private_sleepin
|
fdc352aff7430ab324fcaa30d181545037dd95c4
|
[
"MIT"
] | null | null | null |
from .settings_common import *
from .settings_dev import *
| 29
| 30
| 0.810345
| 8
| 58
| 5.625
| 0.625
| 0.533333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 58
| 2
| 31
| 29
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
65811a49049a815aeb4f65a3c03d236b9c8aa86b
| 49
|
py
|
Python
|
tests/gpio/__init__.py
|
Brinfer/ClearWay
|
208abe652350ae457f4195ac3e73e6006738974a
|
[
"MIT"
] | null | null | null |
tests/gpio/__init__.py
|
Brinfer/ClearWay
|
208abe652350ae457f4195ac3e73e6006738974a
|
[
"MIT"
] | null | null | null |
tests/gpio/__init__.py
|
Brinfer/ClearWay
|
208abe652350ae457f4195ac3e73e6006738974a
|
[
"MIT"
] | null | null | null |
"""Module to test the module `clearway.gpio`."""
| 24.5
| 48
| 0.673469
| 7
| 49
| 4.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 49
| 1
| 49
| 49
| 0.767442
| 0.857143
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6597664efb20e6bf73140adf6eda43f38bf39ab3
| 107
|
py
|
Python
|
gsfarc/gptool/parameter/templates/long64.py
|
geospatial-services-framework/gsfpyarc
|
5ef69299fbc0b763ad4c1857ceac3ff087c0dc14
|
[
"MIT"
] | 1
|
2021-11-06T18:36:28.000Z
|
2021-11-06T18:36:28.000Z
|
gsfarc/gptool/parameter/templates/long64.py
|
geospatial-services-framework/gsfpyarc
|
5ef69299fbc0b763ad4c1857ceac3ff087c0dc14
|
[
"MIT"
] | null | null | null |
gsfarc/gptool/parameter/templates/long64.py
|
geospatial-services-framework/gsfpyarc
|
5ef69299fbc0b763ad4c1857ceac3ff087c0dc14
|
[
"MIT"
] | null | null | null |
"""
"""
from .basic import BASIC
class LONG64(BASIC): pass
def template():
return LONG64('GPLong')
| 9.727273
| 27
| 0.64486
| 13
| 107
| 5.307692
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046512
| 0.196262
| 107
| 11
| 27
| 9.727273
| 0.755814
| 0
| 0
| 0
| 0
| 0
| 0.06
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
|
0
| 5
|
65bc350cab81c3b5a3170f10e338723988f4d798
| 164
|
py
|
Python
|
kale/tests/assets/functions/func03.out.py
|
klolos/kale
|
c64daa96e043f17077d66f1a7fc1aac96484ff77
|
[
"Apache-2.0"
] | 12
|
2019-05-14T15:16:54.000Z
|
2019-06-19T15:51:11.000Z
|
kale/tests/assets/functions/func03.out.py
|
klolos/kale
|
c64daa96e043f17077d66f1a7fc1aac96484ff77
|
[
"Apache-2.0"
] | 1
|
2022-01-22T11:32:46.000Z
|
2022-01-22T11:32:46.000Z
|
kale/tests/assets/functions/func03.out.py
|
klolos/kale
|
c64daa96e043f17077d66f1a7fc1aac96484ff77
|
[
"Apache-2.0"
] | 1
|
2019-06-11T17:45:28.000Z
|
2019-06-11T17:45:28.000Z
|
def test():
from kale.utils import pod_utils as _kale_pod_utils
_kale_pod_utils.snapshot_pipeline_step(
"T",
"test",
"/path/to/nb")
| 23.428571
| 55
| 0.621951
| 23
| 164
| 4.043478
| 0.652174
| 0.258065
| 0.258065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.268293
| 164
| 6
| 56
| 27.333333
| 0.775
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.166667
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
65bf0ae52fd26bcaf84fa9dac9ffe2c9d63c874f
| 189
|
py
|
Python
|
main/admin.py
|
AriAlavi/meet_me_up
|
a5dbf82097125e924d66429db91c072070336e97
|
[
"BSD-3-Clause"
] | null | null | null |
main/admin.py
|
AriAlavi/meet_me_up
|
a5dbf82097125e924d66429db91c072070336e97
|
[
"BSD-3-Clause"
] | 4
|
2021-03-18T22:35:45.000Z
|
2021-09-22T18:21:46.000Z
|
main/admin.py
|
AriAlavi/meet_me_up
|
a5dbf82097125e924d66429db91c072070336e97
|
[
"BSD-3-Clause"
] | 2
|
2020-01-11T05:19:45.000Z
|
2020-01-11T05:21:08.000Z
|
from django.contrib import admin
from main.models import Event, Profile, Free
# Register your models here.
admin.site.register(Event)
admin.site.register(Profile)
admin.site.register(Free)
| 27
| 44
| 0.809524
| 28
| 189
| 5.464286
| 0.5
| 0.176471
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 189
| 7
| 45
| 27
| 0.894737
| 0.137566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
65c54f884f7760f05ec9fe54bc83f46b41650165
| 11,275
|
py
|
Python
|
shalchemy/test/test_redirection.py
|
mechaform/shalchemy
|
0a81c7084cbaa065934a2ac97ac988725df85acc
|
[
"MIT"
] | 6
|
2022-02-07T23:15:12.000Z
|
2022-03-09T12:14:49.000Z
|
shalchemy/test/test_redirection.py
|
mechaform/shalchemy
|
0a81c7084cbaa065934a2ac97ac988725df85acc
|
[
"MIT"
] | 3
|
2022-02-07T12:07:19.000Z
|
2022-03-01T05:03:31.000Z
|
shalchemy/test/test_redirection.py
|
mechaform/shalchemy
|
0a81c7084cbaa065934a2ac97ac988725df85acc
|
[
"MIT"
] | null | null | null |
# Note that you need the -s flag on pytest when you run these tests otherwise
# some of the tests will fail. In other words, you need to type `pytest -s`
import io
import tempfile
from shalchemy import sh, bin
from shalchemy.test.base import TestCase
complain = bin.shalchemyprobe.complain
errcat = bin.shalchemyprobe.errcat
class TestRedirectionSimple(TestCase):
# Tests for >
def test_filename_out(self):
# Works
sh.run(bin.echo('-n', self.content) > self.filename)
self.assertEqual(self.read_file(), self.content)
# Appending
sh.run(bin.echo('-n', self.content) > self.filename)
sh.run(bin.echo('-n', self.content) > self.filename)
self.assertEqual(self.read_file(), self.content)
def test_file_out(self):
# Works
sh.run(bin.echo('-n', self.content) > self.fileobj)
self.assertEqual(self.read_file(), self.content)
# Appending
sh.run(bin.echo('-n', self.content) > self.fileobj)
sh.run(bin.echo('-n', self.content) > self.fileobj)
self.assertEqual(self.read_file(), self.content)
def test_stream_out(self):
with io.StringIO() as stream:
# Works
sh.run(bin.echo('-n', self.content) > stream)
stream.seek(0)
self.assertEqual(stream.read(), self.content)
# Appending
sh.run(bin.echo('-n', self.content) > stream)
sh.run(bin.echo('-n', self.content) > stream)
stream.seek(0)
self.assertEqual(stream.read(), self.content)
def test_filename_out_explicit(self):
# Works
sh.run(bin.echo('-n', self.content).out_(self.filename))
self.assertEqual(self.read_file(), self.content)
# Appending
sh.run(bin.echo('-n', self.content).out_(self.filename))
sh.run(bin.echo('-n', self.content).out_(self.filename))
self.assertEqual(self.read_file(), self.content)
# Tests for >>
def test_filename_out_append(self):
# Works
sh.run(bin.echo('-n', self.content) >> self.filename)
self.assertEqual(self.read_file(), self.content)
# Appending
sh.run(bin.echo('-n', self.content) >> self.filename)
sh.run(bin.echo('-n', self.content) >> self.filename)
self.assertEqual(self.read_file(), self.content * 3)
def test_file_out_append(self):
# Works
sh.run(bin.echo('-n', self.content) >> self.fileobj)
self.assertEqual(self.read_file(), self.content)
# Appending
sh.run(bin.echo('-n', self.content) >> self.fileobj)
sh.run(bin.echo('-n', self.content) >> self.fileobj)
self.assertEqual(self.read_file(), self.content * 3)
def test_stream_out_append(self):
with io.StringIO() as stream:
# Works
sh.run(bin.echo('-n', self.content) >> stream)
stream.seek(0)
self.assertEqual(stream.read(), self.content)
# # Appending
sh.run(bin.echo('-n', self.content) >> stream)
sh.run(bin.echo('-n', self.content) >> stream)
stream.seek(0)
self.assertEqual(stream.read(), self.content * 3)
def test_filename_out_append_explicit(self):
# Works
sh.run(bin.echo('-n', self.content).out_(self.filename, append=True))
self.assertEqual(self.read_file(), self.content)
# Appending
sh.run(bin.echo('-n', self.content).out_(self.filename, append=True))
sh.run(bin.echo('-n', self.content).out_(self.filename, append=True))
self.assertEqual(self.read_file(), self.content * 3)
# Tests for <
def test_filename_in(self):
self.write_file(self.content)
self.assertEqual(str(bin.cat < self.filename), self.content)
# Passing the filename should make it open its own copy so no need to re-seek
self.assertEqual(str(bin.cat < self.filename), self.content)
def test_file_in(self):
self.write_file(self.content)
self.fileobj.seek(0)
self.assertEqual(str(bin.cat < self.fileobj), self.content)
# Passing a file object should pass it our actual file descriptor
# So it moves our pointer as well
self.assertEqual(str(bin.cat < self.fileobj), '')
self.fileobj.seek(0)
self.assertEqual(str(bin.cat < self.fileobj), self.content)
def test_stream_in(self):
with io.StringIO(self.content) as stream:
self.assertEqual(str(bin.cat < stream), self.content)
self.assertEqual(str(bin.cat < stream), '')
stream.seek(0)
self.assertEqual(str(bin.cat < stream), self.content)
def test_stream_in_explicit(self):
self.write_file(self.content)
self.assertEqual(str(bin.cat.in_(self.filename)), self.content)
# Tests for >=
def test_stderr_to_stdout(self):
# Plain stderr redirect
self.assertEqual(str(complain('--both', '-n', self.content) >= '&1'), self.content * 2)
# Using the err_ function
self.assertEqual(str(complain('--both', '-n', self.content).err_('&1')), self.content * 2)
# Setting append=True should do nothing
self.assertEqual(str(complain('--both', '-n', self.content).err_('&1', append=True)), self.content * 2)
def test_filename_err(self):
# Works
sh.run(complain('-n', self.content) >= self.filename)
self.assertEqual(self.read_file(), self.content)
# Appending
sh.run(complain('-n', self.content) >= self.filename)
sh.run(complain('-n', self.content) >= self.filename)
self.assertEqual(self.read_file(), self.content)
def test_file_err(self):
# Works
sh.run(complain('-n', self.content) >= self.fileobj)
self.assertEqual(self.read_file(), self.content)
# Appending
sh.run(complain('-n', self.content) >= self.fileobj)
sh.run(complain('-n', self.content) >= self.fileobj)
self.assertEqual(self.read_file(), self.content)
def test_stream_err(self):
with io.StringIO() as stream:
# Works
sh.run(complain('-n', self.content) >= stream)
stream.seek(0)
self.assertEqual(stream.read(), self.content)
# Appending
sh.run(complain('-n', self.content) >= stream)
sh.run(complain('-n', self.content) >= stream)
stream.seek(0)
self.assertEqual(stream.read(), self.content)
def test_filename_err_explicit(self):
# Works
sh.run(complain('-n', self.content).err_(self.filename))
self.assertEqual(self.read_file(), self.content)
# Appending
sh.run(complain('-n', self.content).err_(self.filename))
sh.run(complain('-n', self.content).err_(self.filename))
self.assertEqual(self.read_file(), self.content)
# Tests for 2>>
def test_filename_err_append(self):
# Works
sh.run(complain('-n', self.content).err_(self.filename, append=True))
self.assertEqual(self.read_file(), self.content)
# Appending
sh.run(complain('-n', self.content).err_(self.filename, append=True))
sh.run(complain('-n', self.content).err_(self.filename, append=True))
self.assertEqual(self.read_file(), self.content * 3)
def test_file_err_append(self):
# Works
sh.run(complain('-n', self.content).err_(self.fileobj, append=True))
self.assertEqual(self.read_file(), self.content)
# Appending
sh.run(complain('-n', self.content).err_(self.fileobj, append=True))
sh.run(complain('-n', self.content).err_(self.fileobj, append=True))
self.assertEqual(self.read_file(), self.content * 3)
def test_stream_err_append(self):
with io.StringIO() as stream:
# Works
sh.run(complain('-n', self.content).err_(stream, append=True))
stream.seek(0)
self.assertEqual(stream.read(), self.content)
# # Appending
sh.run(complain('-n', self.content).err_(stream, append=True))
sh.run(complain('-n', self.content).err_(stream, append=True))
stream.seek(0)
self.assertEqual(stream.read(), self.content * 3)
class TestRedirectionChained(TestCase):
def test_stderr_stdout_devnull(self):
sh.run(complain('--both', '-n', self.content).err_('&1', append=True))
self.assertEqual(self.read_stdout(), self.content * 2)
sh.run(complain('--both', '-n', self.content).err_('&1', append=True).out_('/dev/null'))
self.assertEqual(self.read_stdout(), '')
sh.run(complain('--both', '-n', self.content).out_('/dev/null').err_('&1', append=True))
self.assertEqual(self.read_stdout(), '')
def test_all_mixed(self):
stream_in = io.StringIO(self.content)
with tempfile.TemporaryFile('rt') as tferr:
# Go crazy and do every possible way of redirection together
sh.run(((errcat('--both') < stream_in) >= tferr) > self.filename)
tferr.seek(0)
self.assertEqual(tferr.read(), self.content)
self.assertEqual(self.fileobj.read(), self.content)
def test_all_files(self):
stream_in = io.StringIO(self.content)
stream_out = io.StringIO()
stream_err = io.StringIO()
sh.run(
errcat('--both')
.in_(stream_in)
.out_(stream_out)
.err_(stream_err)
)
stream_out.seek(0)
stream_err.seek(0)
self.assertEqual(stream_out.read(), self.content)
self.assertEqual(stream_err.read(), self.content)
stream_in.close()
stream_out.close()
stream_err.close()
def test_file_redirects(self):
some_input = tempfile.TemporaryFile('w+')
some_output = tempfile.TemporaryFile('w+')
some_input.write('hello world')
some_input.seek(0)
sh.run((sh('tr [a-z] [A-Z]') < some_input) > some_output)
some_output.seek(0)
result = some_output.read()
some_input.close()
some_output.close()
self.assertEqual(result, 'HELLO WORLD')
def test_io_strings(self):
some_input = io.StringIO('hello world')
some_output = io.StringIO()
sh.run((sh('tr [a-z] [A-Z]') < some_input) > some_output)
some_output.seek(0)
result = some_output.read()
some_input.close()
some_output.close()
self.assertEqual(result, 'HELLO WORLD')
stream = io.StringIO()
sh.run(bin.echo('-n', 'hello') > stream)
stream.seek(0)
self.assertEqual(stream.read(), 'hello')
self.assertEqual(str(bin.cat < io.StringIO('hello world')), 'hello world')
def test_file_redirects_alt(self):
some_input = tempfile.TemporaryFile('w+')
some_output = tempfile.TemporaryFile('w+')
some_input.write('hello world')
some_input.seek(0)
sh.run(sh('tr [a-z] [A-Z]').in_(some_input).out_(some_output))
some_output.seek(0)
output_text = some_output.read()
some_input.close()
some_output.close()
self.assertEqual(output_text, 'HELLO WORLD')
| 39.423077
| 111
| 0.607894
| 1,442
| 11,275
| 4.634535
| 0.089459
| 0.167889
| 0.091576
| 0.04489
| 0.805477
| 0.764776
| 0.745174
| 0.725722
| 0.688613
| 0.667963
| 0
| 0.004443
| 0.241419
| 11,275
| 285
| 112
| 39.561404
| 0.77692
| 0.068293
| 0
| 0.631841
| 0
| 0
| 0.031746
| 0
| 0
| 0
| 0
| 0
| 0.268657
| 1
| 0.129353
| false
| 0
| 0.019901
| 0
| 0.159204
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f30859f2df4810c43f6ad0e16b25c30dc3f8a73c
| 572
|
py
|
Python
|
samples/q20.for-and-arithmetics.py
|
mtanji/intro-algorithm-python
|
ed4fe6f729bbfd3ca15e97a5b851039fc0644f86
|
[
"MIT"
] | null | null | null |
samples/q20.for-and-arithmetics.py
|
mtanji/intro-algorithm-python
|
ed4fe6f729bbfd3ca15e97a5b851039fc0644f86
|
[
"MIT"
] | null | null | null |
samples/q20.for-and-arithmetics.py
|
mtanji/intro-algorithm-python
|
ed4fe6f729bbfd3ca15e97a5b851039fc0644f86
|
[
"MIT"
] | null | null | null |
# Q20
print("======= a =======")
for x in range(1,200):
if x % 4 == 0:
print('{0:.2f}'.format(x))
print("======= b =======")
#while x < 200:
# if (x % 4 == 0):
# x = x + 1
# print('{0:d}'.format(x))
# x = x + 2
print("======= c =======")
for x in range(1,200):
if x % 2 == 0:
print('{0:d}'.format(x))
print("======= d =======")
for x in range(1,200):
if x % 4 == 0:
print('{0:d}'.format(x))
print("======= e =======")
for x in range(1,200):
if x % 4 != 0:
print('{0:d}'.format(x))
x = x + 1
| 19.066667
| 34
| 0.361888
| 92
| 572
| 2.25
| 0.217391
| 0.120773
| 0.144928
| 0.21256
| 0.78744
| 0.748792
| 0.748792
| 0.541063
| 0.454106
| 0.454106
| 0
| 0.1
| 0.300699
| 572
| 29
| 35
| 19.724138
| 0.4175
| 0.18007
| 0
| 0.5
| 0
| 0
| 0.231602
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
f30f4e387eb2f4ded64e206dd0a0e1d0d1e7401d
| 106
|
py
|
Python
|
tests/integration/conftest.py
|
Chen188/chalice
|
293350669e4b870cd1d156f9afc0c2eefd6c24b0
|
[
"Apache-2.0"
] | 6,240
|
2017-07-31T22:56:40.000Z
|
2022-03-31T17:29:45.000Z
|
tests/integration/conftest.py
|
Chen188/chalice
|
293350669e4b870cd1d156f9afc0c2eefd6c24b0
|
[
"Apache-2.0"
] | 1,337
|
2017-07-31T22:27:10.000Z
|
2022-03-31T16:04:49.000Z
|
tests/integration/conftest.py
|
Chen188/chalice
|
293350669e4b870cd1d156f9afc0c2eefd6c24b0
|
[
"Apache-2.0"
] | 845
|
2017-08-01T10:28:07.000Z
|
2022-03-26T08:26:44.000Z
|
from pytest import fixture
@fixture(autouse=True)
def ensure_no_local_config(no_local_config):
pass
| 15.142857
| 44
| 0.801887
| 16
| 106
| 5
| 0.75
| 0.175
| 0.325
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 106
| 6
| 45
| 17.666667
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
b82cd9778c76e5e1d635af6b1a8771a071c2b612
| 49
|
py
|
Python
|
cuenca/http/__init__.py
|
andreshndz/cuenca-python
|
ca9f0f078584f1458e71baeb4cd15fcc55b40397
|
[
"MIT"
] | 6
|
2020-11-02T21:03:11.000Z
|
2022-01-13T23:12:01.000Z
|
cuenca/http/__init__.py
|
andreshndz/cuenca-python
|
ca9f0f078584f1458e71baeb4cd15fcc55b40397
|
[
"MIT"
] | 220
|
2020-05-13T19:20:57.000Z
|
2022-03-30T22:03:03.000Z
|
cuenca/http/__init__.py
|
andreshndz/cuenca-python
|
ca9f0f078584f1458e71baeb4cd15fcc55b40397
|
[
"MIT"
] | 14
|
2020-07-15T15:32:03.000Z
|
2021-09-17T19:11:14.000Z
|
from .client import Session
session = Session()
| 12.25
| 27
| 0.755102
| 6
| 49
| 6.166667
| 0.666667
| 0.756757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163265
| 49
| 3
| 28
| 16.333333
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b83fcf004ac5d02f987bb1919020a161eef20fb7
| 1,602
|
py
|
Python
|
Irctc LogIn.py
|
RAVURISREESAIHARIKRISHNA/Python-2.7.12-3.5.2-
|
b2626a5a9bd5630dfadba252bbc00760597fe304
|
[
"MIT"
] | null | null | null |
Irctc LogIn.py
|
RAVURISREESAIHARIKRISHNA/Python-2.7.12-3.5.2-
|
b2626a5a9bd5630dfadba252bbc00760597fe304
|
[
"MIT"
] | null | null | null |
Irctc LogIn.py
|
RAVURISREESAIHARIKRISHNA/Python-2.7.12-3.5.2-
|
b2626a5a9bd5630dfadba252bbc00760597fe304
|
[
"MIT"
] | null | null | null |
#This is a Protype the COMPLETE Version is NOT OPEN SOURCE
#Contact the Author to get Entrprise Edition,this is may Include to Accept Certain Terms & Conditions against
#User License Agreement
import pyautogui,time,webbrowser
webbrowser.open("https://www.irctc.co.in/eticketing/loginHome.jsf")
time.sleep(5)
pyautogui.typewrite("\t")
pyautogui.typewrite("\t")
pyautogui.typewrite(["enter"])
pyautogui.typewrite("\t")
pyautogui.typewrite("username")#Username Morphed to get Protected
pyautogui.typewrite("\t")
pyautogui.typewrite("password")#Password Morphed to get Protected
pyautogui.typewrite("\t")
time.sleep(8)
pyautogui.typewrite(["enter"])
time.sleep(8)
pyautogui.typewrite("\t")
pyautogui.typewrite("\t")
pyautogui.typewrite("SRC")
pyautogui.typewrite("\t")
pyautogui.typewrite("DES")
pyautogui.typewrite("\t")
pyautogui.typewrite("DD-MM-YYYY")
pyautogui.typewrite("\t")
pyautogui.typewrite("\t")
pyautogui.typewrite("\t")
pyautogui.typewrite("\t")
pyautogui.typewrite(["enter"])
time.sleep(8)
pyautogui.typewrite("\t")
pyautogui.typewrite("\t")
pyautogui.typewrite("\t")
pyautogui.typewrite("\t")
pyautogui.typewrite(["enter"])
time.sleep(8)
pyautogui.click(541,467)
time.sleep(8)
pyautogui.typewrite("XXXXXXXXXXXXXXXXXXXXXX")
pyautogui.typewrite("\t")
pyautogui.typewrite("XX")
pyautogui.typewrite("\t")
pyautogui.typewrite(["down"])
pyautogui.typewrite("\t")
pyautogui.typewrite("\t")
pyautogui.typewrite(["pagedown"])
pyautogui.typewrite(["pagedown"])
time.sleep(10)
pyautogui.typewrite(["enter"])
time.sleep(5)
pyautogui.moveTo(1222,165)
| 30.807692
| 111
| 0.741573
| 197
| 1,602
| 6.030457
| 0.324873
| 0.545455
| 0.335859
| 0.47138
| 0.661616
| 0.431818
| 0.431818
| 0.364478
| 0.31734
| 0.265993
| 0
| 0.015048
| 0.087391
| 1,602
| 51
| 112
| 31.411765
| 0.797538
| 0.158552
| 0
| 0.729167
| 0
| 0
| 0.147833
| 0.017028
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.020833
| 0.020833
| 0
| 0.020833
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b894fd73545bfbc6da275a29ff989b6dafc2022d
| 100
|
py
|
Python
|
src/gapp_calendar/__init__.py
|
decentfox/gapp-calendar
|
23f0a49d222dbf5f1536c8aa1bc1ccd187975a22
|
[
"BSD-3-Clause"
] | null | null | null |
src/gapp_calendar/__init__.py
|
decentfox/gapp-calendar
|
23f0a49d222dbf5f1536c8aa1bc1ccd187975a22
|
[
"BSD-3-Clause"
] | null | null | null |
src/gapp_calendar/__init__.py
|
decentfox/gapp-calendar
|
23f0a49d222dbf5f1536c8aa1bc1ccd187975a22
|
[
"BSD-3-Clause"
] | null | null | null |
from .endpoint import init_app as init_calendar_app
def init_app(app):
init_calendar_app(app)
| 16.666667
| 51
| 0.79
| 17
| 100
| 4.294118
| 0.470588
| 0.191781
| 0.410959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 100
| 5
| 52
| 20
| 0.858824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b8a87d3e2615ea1be30e2d7b69d547a2498a7009
| 61
|
py
|
Python
|
src/settings/__init__.py
|
PetrushynskyiOleksii/shooter-stats
|
d7b5326c88bc33792a958623dab9ecda7992d6d6
|
[
"MIT"
] | null | null | null |
src/settings/__init__.py
|
PetrushynskyiOleksii/shooter-stats
|
d7b5326c88bc33792a958623dab9ecda7992d6d6
|
[
"MIT"
] | 8
|
2018-09-03T07:04:02.000Z
|
2018-09-15T16:01:27.000Z
|
src/settings/__init__.py
|
PetrushynskyiOleksii/shooter-stats
|
d7b5326c88bc33792a958623dab9ecda7992d6d6
|
[
"MIT"
] | null | null | null |
"""Configuration package."""
from .config import app_config
| 15.25
| 30
| 0.754098
| 7
| 61
| 6.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114754
| 61
| 3
| 31
| 20.333333
| 0.833333
| 0.360656
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b8b3b19a11c29679f59eab886233134b057bfdd4
| 57
|
py
|
Python
|
wrinkler/utils.py
|
brandongk-ubco/wrinkler
|
79090c42849a2db77997bcd9171d6d885cc45d4f
|
[
"MIT"
] | null | null | null |
wrinkler/utils.py
|
brandongk-ubco/wrinkler
|
79090c42849a2db77997bcd9171d6d885cc45d4f
|
[
"MIT"
] | null | null | null |
wrinkler/utils.py
|
brandongk-ubco/wrinkler
|
79090c42849a2db77997bcd9171d6d885cc45d4f
|
[
"MIT"
] | null | null | null |
def RoundUp(x, mul):
return ((x + mul - 1) & (-mul))
| 19
| 35
| 0.491228
| 9
| 57
| 3.111111
| 0.666667
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 0.263158
| 57
| 2
| 36
| 28.5
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
b21c84a51cf3ee2528ecd694050e78c414d8e746
| 123
|
py
|
Python
|
sdo/util.py
|
zkdev/cc-utils
|
042c6632ca6f61a484bc0a71f85957aeba7f7278
|
[
"BSD-3-Clause"
] | null | null | null |
sdo/util.py
|
zkdev/cc-utils
|
042c6632ca6f61a484bc0a71f85957aeba7f7278
|
[
"BSD-3-Clause"
] | null | null | null |
sdo/util.py
|
zkdev/cc-utils
|
042c6632ca6f61a484bc0a71f85957aeba7f7278
|
[
"BSD-3-Clause"
] | null | null | null |
import functools
import logging
@functools.lru_cache
def component_logger(name: str):
return logging.getLogger(name)
| 15.375
| 34
| 0.796748
| 16
| 123
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130081
| 123
| 7
| 35
| 17.571429
| 0.897196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
b25803843b186785a90dcac50331bfc880423479
| 99
|
py
|
Python
|
producer/app/__init__.py
|
abhayalekal74/halflife-regression
|
e11018427d1c38cd561f3bf7714c76eb9f7274a1
|
[
"MIT"
] | 1
|
2021-10-31T02:56:58.000Z
|
2021-10-31T02:56:58.000Z
|
producer/app/__init__.py
|
abhayalekal74/halflife-regression
|
e11018427d1c38cd561f3bf7714c76eb9f7274a1
|
[
"MIT"
] | null | null | null |
producer/app/__init__.py
|
abhayalekal74/halflife-regression
|
e11018427d1c38cd561f3bf7714c76eb9f7274a1
|
[
"MIT"
] | null | null | null |
from flask import Flask
app = Flask(__name__)
from app import apis
from app import celery_tasks
| 14.142857
| 29
| 0.787879
| 16
| 99
| 4.5625
| 0.5
| 0.191781
| 0.356164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 99
| 6
| 30
| 16.5
| 0.901235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a2c5384cfa51e2505876c586934e14588be2c1c5
| 153
|
py
|
Python
|
naive.py
|
ghazniali95/twitter_live_bot_detection
|
c1e30fe921321bc36a0243225997d719a625196f
|
[
"MIT"
] | null | null | null |
naive.py
|
ghazniali95/twitter_live_bot_detection
|
c1e30fe921321bc36a0243225997d719a625196f
|
[
"MIT"
] | null | null | null |
naive.py
|
ghazniali95/twitter_live_bot_detection
|
c1e30fe921321bc36a0243225997d719a625196f
|
[
"MIT"
] | null | null | null |
from sklearn.naive_bayes import GaussianNB
class Naive:
def naive(self, data):
#only accepts data as multilist
return 0
| 25.5
| 43
| 0.633987
| 19
| 153
| 5.052632
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009615
| 0.320261
| 153
| 6
| 44
| 25.5
| 0.913462
| 0.196078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
a2d13436db0904442ff783931ee170a04e856e3c
| 248
|
py
|
Python
|
tests/fail/datetime_.py
|
Prowler-io/numpy-stubs
|
ceef07c4ad5c246dc3d25b0084b8f138c20ebe5b
|
[
"BSD-3-Clause"
] | 2
|
2019-07-08T15:40:55.000Z
|
2021-03-23T20:44:30.000Z
|
tests/fail/datetime_.py
|
Prowler-io/numpy-stubs
|
ceef07c4ad5c246dc3d25b0084b8f138c20ebe5b
|
[
"BSD-3-Clause"
] | 1
|
2019-06-20T13:16:48.000Z
|
2019-06-20T13:16:48.000Z
|
tests/fail/datetime_.py
|
Prowler-io/numpy-stubs
|
ceef07c4ad5c246dc3d25b0084b8f138c20ebe5b
|
[
"BSD-3-Clause"
] | 1
|
2019-07-08T09:29:51.000Z
|
2019-07-08T09:29:51.000Z
|
import numpy as np
np.datetime64('2001') + np.datetime64('2002') # E: Unsupported operand types for + ("datetime64" and "datetime64")
np.timedelta64(1) - np.datetime64('2001') # E: Unsupported operand types for - ("timedelta64" and "datetime64")
| 62
| 115
| 0.717742
| 32
| 248
| 5.5625
| 0.46875
| 0.202247
| 0.179775
| 0.269663
| 0.303371
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134259
| 0.129032
| 248
| 4
| 116
| 62
| 0.689815
| 0.540323
| 0
| 0
| 0
| 0
| 0.107143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a2f88cc2f934dfef8fc930266a77c96b9590b0ba
| 30,788
|
py
|
Python
|
storm_control/test/hal/tcp_tests.py
|
emanuega/storm-5
|
f41dba34d1ea219d80954f8b32f0c25b9e7a876c
|
[
"MIT"
] | null | null | null |
storm_control/test/hal/tcp_tests.py
|
emanuega/storm-5
|
f41dba34d1ea219d80954f8b32f0c25b9e7a876c
|
[
"MIT"
] | null | null | null |
storm_control/test/hal/tcp_tests.py
|
emanuega/storm-5
|
f41dba34d1ea219d80954f8b32f0c25b9e7a876c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import storm_analysis.sa_library.datareader as datareader
import storm_control.sc_library.halExceptions as halExceptions
import storm_control.hal4000.testing.testActions as testActions
import storm_control.hal4000.testing.testActionsTCP as testActionsTCP
import storm_control.hal4000.testing.testing as testing
import storm_control.test as test
#
# Test "Check Focus Lock" message.
#
class CheckFocusLockAction1(testActionsTCP.CheckFocusLock):
def checkMessage(self, tcp_message):
assert(tcp_message.getResponse("focus_status"))
class CheckFocusLock1(testing.TestingTCP):
"""
Check that the focus lock returns that it is locked.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [testActions.ShowGUIControl(control_name = "focus lock"),
testActions.Timer(100),
SetFocusLockModeAction1(mode_name = "Always On",
locked = True),
testActions.Timer(100),
CheckFocusLockAction1(focus_scan = False,
num_focus_checks = 6)]
class CheckFocusLockAction2(testActionsTCP.CheckFocusLock):
def checkMessage(self, tcp_message):
assert not (tcp_message.getResponse("focus_status"))
class CheckFocusLock2(testing.TestingTCP):
"""
Check that the focus lock returns that it is not locked. Note that
moving the stage x um offsets the locked center position by x um.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [testActions.ShowGUIControl(control_name = "focus lock"),
testActions.Timer(100),
SetFocusLockModeAction1(mode_name = "Always On",
locked = True),
testActionsTCP.MoveStage(x = 10, y = 0),
testActions.Timer(200),
CheckFocusLockAction2(focus_scan = False,
num_focus_checks = 6)]
class CheckFocusLock3(testing.TestingTCP):
"""
Check that the focus lock can find (and hold) lock again.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [testActions.ShowGUIControl(control_name = "focus lock"),
testActions.Timer(100),
SetFocusLockModeAction1(mode_name = "Always On",
locked = True),
testActionsTCP.MoveStage(x = -6, y = 0),
testActions.Timer(200),
CheckFocusLockAction1(focus_scan = True,
num_focus_checks = 6,
scan_range = 8),
testActions.Timer(200),
CheckFocusLockAction1(focus_scan = False,
num_focus_checks = 6)]
class CheckFocusLock4(testing.TestingTCP):
"""
Check that the focus lock can't find lock again
if the scan range is too small.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [testActions.ShowGUIControl(control_name = "focus lock"),
testActions.Timer(100),
SetFocusLockModeAction1(mode_name = "Always On",
locked = True),
testActionsTCP.MoveStage(x = -6, y = 0),
testActions.Timer(200),
CheckFocusLockAction2(focus_scan = True,
num_focus_checks = 6,
scan_range = 1)]
class CheckFocusLockAction3(testActionsTCP.CheckFocusLock):
def checkMessage(self, tcp_message):
assert(tcp_message.getResponse("duration") is not None)
class CheckFocusLock5(testing.TestingTCP):
"""
Check that the focus lock returns a duration in test mode.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [SetFocusLockModeAction1(mode_name = "Always On",
locked = False),
CheckFocusLockAction3(focus_scan = False,
num_focus_checks = 6,
test_mode = True)]
class CheckFocusLock6(testing.TestingTCP):
"""
Check that a search for offset starts from the last good z position.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [testActions.ShowGUIControl(control_name = "focus lock"),
testActions.Timer(100),
SetFocusLockModeAction1(mode_name = "Always On",
locked = True),
testActionsTCP.MoveStage(x = -6, y = 0),
testActions.Timer(200),
CheckFocusLockAction1(focus_scan = True,
num_focus_checks = 6,
scan_range = 8),
testActionsTCP.MoveStage(x = -12, y = 0),
testActions.Timer(200),
CheckFocusLockAction1(focus_scan = True,
num_focus_checks = 6,
scan_range = 8),
testActions.Timer(200),
CheckFocusLockAction1(focus_scan = False,
num_focus_checks = 6)]
class CheckFocusLock7(testing.TestingTCP):
"""
Check that the entire range is searched when scan_range = None.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [testActions.ShowGUIControl(control_name = "focus lock"),
testActions.Timer(100),
SetFocusLockModeAction1(mode_name = "Always On",
locked = True),
testActionsTCP.MoveStage(x = -44, y = 0),
testActions.Timer(200),
CheckFocusLockAction1(focus_scan = True,
num_focus_checks = 6),
testActions.Timer(200),
CheckFocusLockAction1(focus_scan = False,
num_focus_checks = 6)]
#
# Test "Find Sum" message.
#
class FindSumAction1(testActionsTCP.FindSum):
def checkMessage(self, tcp_message):
assert(tcp_message.getResponse("found_sum") is not None)
assert(tcp_message.getResponse("focus_status"))
class FindSum1(testing.TestingTCP):
"""
Test that find sum returns immediately if it already has sum.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [testActions.ShowGUIControl(control_name = "focus lock"),
testActions.Timer(100),
SetFocusLockModeAction1(mode_name = "Always On",
locked = True),
testActions.Timer(100),
FindSumAction1(min_sum = 50)]
class FindSum2(testing.TestingTCP):
"""
Test that find sum can find sum & relock.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [testActions.ShowGUIControl(control_name = "focus lock"),
testActions.Timer(100),
SetFocusLockModeAction1(mode_name = "Always On",
locked = True),
testActionsTCP.MoveStage(x = -40.5, y = 0),
testActions.Timer(200),
FindSumAction1(min_sum = 100),
testActions.Timer(400),
CheckFocusLockAction1(focus_scan = False,
num_focus_checks = 6)]
class FindSumAction3(testActionsTCP.FindSum):
def checkMessage(self, tcp_message):
assert not (tcp_message.getResponse("focus_status"))
class FindSum3(testing.TestingTCP):
"""
Test failure if minimum sum cannot be found.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [testActions.ShowGUIControl(control_name = "focus lock"),
testActions.Timer(100),
SetFocusLockModeAction1(mode_name = "Always On",
locked = True),
FindSumAction3(min_sum = 1000)]
class FindSumAction4(testActionsTCP.FindSum):
def checkMessage(self, tcp_message):
assert (tcp_message.getResponse("duration") is not None)
class FindSum4(testing.TestingTCP):
"""
Test that we get a 'duration' response in test mode.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [testActions.ShowGUIControl(control_name = "focus lock"),
testActions.Timer(100),
SetFocusLockModeAction1(mode_name = "Always On",
locked = True),
FindSumAction4(min_sum = 100,
test_mode = True)]
#
# Test "Get Mosaic Settings" message.
#
class GetMosaicSettingsAction1(testActionsTCP.GetMosaicSettings):
def checkMessage(self, tcp_message):
assert(tcp_message.getResponse("obj1") == "100x,0.160,0.0,0.0")
class GetMosaicSettings1(testing.TestingTCP):
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [GetMosaicSettingsAction1()]
class GetMosaicSettingsAction2(testActionsTCP.GetMosaicSettings):
def checkMessage(self, tcp_message):
assert not tcp_message.hasError()
class GetMosaicSettings2(testing.TestingTCP):
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [GetMosaicSettingsAction2(test_mode = True)]
#
# Test "Get Objective" message.
#
class GetObjectiveAction1(testActionsTCP.GetObjective):
def checkMessage(self, tcp_message):
assert(tcp_message.getResponse("objective") == "100x")
class GetObjective1(testing.TestingTCP):
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [GetObjectiveAction1()]
class GetObjectiveAction2(testActionsTCP.GetObjective):
def checkMessage(self, tcp_message):
assert not tcp_message.hasError()
class GetObjective2(testing.TestingTCP):
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [GetObjectiveAction2(test_mode = True)]
#
# Test "Get Stage Position" message.
#
class GetStagePositionAction1(testActionsTCP.GetStagePosition):
def __init__(self, x = 0.0, y = 0.0, **kwds):
super().__init__(**kwds)
self.x = x
self.y = y
def checkMessage(self, tcp_message):
assert(tcp_message.getResponse("stage_x") == self.x)
assert(tcp_message.getResponse("stage_y") == self.y)
class GetStagePosition1(testing.TestingTCP):
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [GetStagePositionAction1()]
class GetStagePositionAction2(testActionsTCP.GetStagePosition):
def checkMessage(self, tcp_message):
assert not tcp_message.hasError()
class GetStagePosition2(testing.TestingTCP):
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [GetStagePositionAction2(test_mode = True)]
#
# Test "Move Stage" message.
#
class MoveStageAction1(testActionsTCP.MoveStage):
def checkMessage(self, tcp_message):
assert(tcp_message.getResponse("duration") == 1)
class MoveStage1(testing.TestingTCP):
"""
This tests both 'actual' movement and test_mode movement.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
#
# We have to pause for 500ms so that the stage
# a chance to update it's position.
#
x = 10.0
y = 10.0
self.test_actions = [testActionsTCP.MoveStage(x = x, y = y),
testActions.Timer(500),
GetStagePositionAction1(x = x, y = y),
MoveStageAction1(test_mode = True, x = 0.0, y = 0.0)]
class MoveStage2(testing.TestingTCP):
"""
This tests the stage move watchdog timer.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
#
# We have to pause for 500ms so that the stage move watchdog
# timer has a chance to go off.
#
x = 10.0
y = 10.0
self.test_actions = [testActionsTCP.MoveStage(x = x, y = y),
testActions.Timer(500),
GetStagePositionAction1(x = x, y = y)]
#
# Test handling of messages that are not supported.
#
class NoSuchMessageAction1(testActionsTCP.NoSuchMessage):
def checkMessage(self, tcp_message):
assert tcp_message.hasError()
class NoSuchMessage1(testing.TestingTCP):
"""
Test sending a message that HAL does not handle.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [NoSuchMessageAction1()]
# test_mode = True]
#
# Test "Set Focus Lock Mode" message.
#
class SetFocusLockModeAction1(testActionsTCP.SetFocusLockMode):
def checkMessage(self, tcp_message):
assert not tcp_message.hasError()
class SetFocusLockMode1(testing.TestingTCP):
"""
Test changing to "Always On" and starting the focus lock.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [testActions.ShowGUIControl(control_name = "focus lock"),
testActions.Timer(100),
SetFocusLockModeAction1(mode_name = "Always On",
locked = True),
testActions.Timer(2000)]
class SetFocusLockModeAction2(testActionsTCP.SetFocusLockMode):
def checkMessage(self, tcp_message):
assert tcp_message.hasError()
class SetFocusLockMode2(testing.TestingTCP):
"""
Test changing trying to change to a mode that does not exist.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [SetFocusLockModeAction2(mode_name = "XYZZY")]
#
# Test "Set Lock Target" message.
#
class SetLockTarget1(testing.TestingTCP):
"""
Test setting the lock target.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [testActions.ShowGUIControl(control_name = "focus lock"),
testActions.Timer(100),
SetFocusLockModeAction1(mode_name = "Always On",
locked = True),
testActions.Timer(1000),
testActionsTCP.SetLockTarget(lock_target = 0.5),
testActions.Timer(1000)]
#
# Test "Set Parameters" message.
#
class SetParametersAction1(testActionsTCP.SetParameters):
def checkMessage(self, tcp_message):
assert not tcp_message.hasError()
class SetParameters1(testing.TestingTCP):
"""
Test setting the parameters to ones that exist.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
fname = "256x512"
self.test_actions = [testActions.LoadParameters(filename = test.halXmlFilePathAndName(fname + ".xml")),
SetParametersAction1(name_or_index = fname)]
class SetParametersAction2(testActionsTCP.SetParameters):
def checkMessage(self, tcp_message):
assert tcp_message.hasError()
class SetParameters2(testing.TestingTCP):
"""
Test setting parameters to ones that don't exist (in test mode).
"""
def __init__(self, **kwds):
super().__init__(**kwds)
fname = "256x512"
self.test_actions = [SetParametersAction2(name_or_index = fname, test_mode = True)]
class SetParameters3(testing.TestingTCP):
"""
Test setting parameters to ones that don't exist.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
fname = "256x512"
self.test_actions = [SetParametersAction2(name_or_index = fname)]
class SetParameters4(testing.TestingTCP):
"""
Test setting the parameters to ones that exist (in test mode).
"""
def __init__(self, **kwds):
super().__init__(**kwds)
fname = "256x512"
self.test_actions = [testActions.LoadParameters(filename = test.halXmlFilePathAndName(fname + ".xml")),
SetParametersAction1(name_or_index = fname, test_mode = True)]
class SetParameters5(testing.TestingTCP):
"""
Test setting the parameters to ones that exist, and
are already the current parameters.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [SetParametersAction1(name_or_index = 0)]
#
# Test "Set Progression" message.
#
# FIXME: These are really just checking that there are no communication errors. They
# don't check whether the progressions actually work.
#
class SetProgressionAction1(testActionsTCP.SetProgression):
def checkMessage(self, tcp_message):
assert not tcp_message.hasError()
class SetProgression1(testing.TestingTCP):
"""
Test loading a power file that exists.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [SetProgressionAction1(filename = test.halXmlFilePathAndName("prog_test.power"),
prog_type = "file"),
testActions.Timer(1000)]
class SetProgressionAction2(testActionsTCP.SetProgression):
def checkMessage(self, tcp_message):
assert tcp_message.hasError()
class SetProgression2(testing.TestingTCP):
"""
Test loading a power file that does not exist.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [SetProgressionAction2(filename = "foo.power",
prog_type = "file")]
class SetProgression3(testing.TestingTCP):
"""
Test loading a power file that exists in test_mode.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [SetProgressionAction1(filename = test.halXmlFilePathAndName("prog_test.power"),
prog_type = "file",
test_mode = True)]
class SetProgression4(testing.TestingTCP):
"""
Test loading a power file that does not exist in test_mode.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [SetProgressionAction2(filename = "foo.power",
prog_type = "file",
test_mode = True)]
class SetProgression5(testing.TestingTCP):
"""
Test locking out the progression and that it resets after filming.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.test_actions = [SetProgressionAction1(filename = test.halXmlFilePathAndName("prog_test.power"),
prog_type = "file"),
testActions.Timer(500),
SetProgressionAction1(prog_type = "lockedout"),
testActions.Timer(500),
TakeMovieAction1(directory = test.dataDirectory(),
length = 5,
name = "movie_01"),
testActions.Timer(500)]
#
# Test "Take Movie" message.
#
class TakeMovieAction1(testActionsTCP.TakeMovie):
def checkMessage(self, tcp_message):
movie = datareader.inferReader(os.path.join(self.directory, self.name + ".dax"))
assert(movie.filmSize() == [512, 512, self.length])
class TakeMovie1(testing.TestingTCP):
"""
Request a movie by TCP and verify that it is taken & the correct size.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
directory = test.dataDirectory()
filename = "movie_01"
# Remove old movie (if any).
fullname = os.path.join(directory, filename + ".dax")
if os.path.exists(fullname):
os.remove(fullname)
self.test_actions = [TakeMovieAction1(directory = directory,
length = 5,
name = filename)]
class TakeMovieAction2(testActionsTCP.TakeMovie):
def checkMessage(self, tcp_message):
assert tcp_message.hasError()
class TakeMovie2(testing.TestingTCP):
"""
Test TCP movie overwrite handling.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
directory = test.dataDirectory()
filename = "movie_01"
# Remove old movie (if any).
fullname = os.path.join(directory, filename + ".dax")
if os.path.exists(fullname):
os.remove(fullname)
self.test_actions = [TakeMovieAction1(directory = directory,
length = 5,
name = filename),
TakeMovieAction2(directory = directory,
length = 2,
name = filename,
overwrite = False),
TakeMovieAction2(directory = directory,
length = 2,
name = filename,
overwrite = False,
test_mode = True),
TakeMovieAction1(directory = directory,
length = 5,
name = filename)]
class TakeMovieAction3(testActionsTCP.TakeMovie):
def checkMessage(self, tcp_message):
assert(tcp_message.getResponse("disk_usage") == 25.0)
assert(tcp_message.getResponse("duration") == 1.0)
class TakeMovie3(testing.TestingTCP):
"""
Simple test of test_mode.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
directory = test.dataDirectory()
filename = "movie_01"
self.test_actions = [TakeMovieAction3(directory = directory,
length = 50,
name = filename,
test_mode = True)]
class TakeMovieAction4(testActionsTCP.TakeMovie):
def checkMessage(self, tcp_message):
assert(tcp_message.getResponse("disk_usage") == 6.25)
assert(tcp_message.getResponse("duration") == 1.0)
class TakeMovie4(testing.TestingTCP):
"""
Test test_mode w/ parameters request.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
directory = test.dataDirectory()
filename = "movie_01"
p_name = "256x256"
self.test_actions = [testActions.LoadParameters(filename = test.halXmlFilePathAndName(p_name + ".xml")),
TakeMovieAction4(directory = directory,
length = 50,
name = filename,
parameters = p_name,
test_mode = True)]
class TakeMovie5(testing.TestingTCP):
"""
Test test_mode w/ parameters request.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
directory = test.dataDirectory()
filename = "movie_01"
self.test_actions = [TakeMovieAction3(directory = directory,
length = 50,
name = filename,
parameters = "default",
test_mode = True)]
class TakeMovie6(testing.TestingTCP):
"""
Request a movie by TCP with parameters that exist.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
directory = test.dataDirectory()
filename = "movie_01"
# Remove old movie (if any).
fullname = os.path.join(directory, filename + ".dax")
if os.path.exists(fullname):
os.remove(fullname)
self.test_actions = [TakeMovieAction1(directory = directory,
length = 5,
name = filename,
parameters = "default")]
class TakeMovieAction7(testActionsTCP.TakeMovie):
def checkMessage(self, tcp_message):
movie = datareader.inferReader(os.path.join(self.directory, self.name + ".dax"))
assert(movie.filmSize() == [256, 256, self.length])
class TakeMovie7(testing.TestingTCP):
"""
Request a movie by TCP with parameters that exist but that
are not the current parameters.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
directory = test.dataDirectory()
filename = "movie_01"
# Remove old movie (if any).
fullname = os.path.join(directory, filename + ".dax")
if os.path.exists(fullname):
os.remove(fullname)
p_name = "256x256"
self.test_actions = [testActions.LoadParameters(filename = test.halXmlFilePathAndName(p_name + ".xml")),
TakeMovieAction7(directory = directory,
length = 5,
name = filename,
parameters = p_name)]
class TakeMovie8(testing.TestingTCP):
"""
Request a movie by TCP with parameters that don't exist (test_mode).
"""
def __init__(self, **kwds):
super().__init__(**kwds)
directory = test.dataDirectory()
filename = "movie_01"
# Remove old movie (if any).
fullname = os.path.join(directory, filename + ".dax")
if os.path.exists(fullname):
os.remove(fullname)
p_name = "256x256"
self.test_actions = [TakeMovieAction2(directory = directory,
length = 5,
name = filename,
parameters = p_name,
test_mode = True)]
class TakeMovie9(testing.TestingTCP):
"""
Request a movie by TCP with parameters that don't exist.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
directory = test.dataDirectory()
filename = "movie_01"
# Remove old movie (if any).
fullname = os.path.join(directory, filename + ".dax")
if os.path.exists(fullname):
os.remove(fullname)
p_name = "256x256"
self.test_actions = [TakeMovieAction2(directory = directory,
length = 5,
name = filename,
parameters = p_name)]
class TakeMovieAction10(testActionsTCP.TakeMovie):
def checkMessage(self, tcp_message):
assert(tcp_message.getResponse("found_spots") == None)
class TakeMovie10(testing.TestingTCP):
"""
Request a movie by TCP without a spot counter and verify that
'found_spots' is None.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
directory = test.dataDirectory()
filename = "movie_01"
# Remove old movie (if any).
fullname = os.path.join(directory, filename + ".dax")
if os.path.exists(fullname):
os.remove(fullname)
self.test_actions = [TakeMovieAction10(directory = directory,
length = 5,
name = filename)]
class TakeMovieAction11(testActionsTCP.TakeMovie):
def checkMessage(self, tcp_message):
print(tcp_message)
assert(tcp_message.getResponse("found_spots") == 0)
class TakeMovie11(testing.TestingTCP):
"""
Request a movie by TCP with a spot counter and verify that
'found_spots' is 0.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
directory = test.dataDirectory()
filename = "movie_01"
# Remove old movie (if any).
fullname = os.path.join(directory, filename + ".dax")
if os.path.exists(fullname):
os.remove(fullname)
self.test_actions = [TakeMovieAction11(directory = directory,
length = 5,
name = filename)]
| 36.093787
| 124
| 0.533455
| 2,691
| 30,788
| 5.873653
| 0.120401
| 0.036062
| 0.031317
| 0.048399
| 0.759332
| 0.737568
| 0.727509
| 0.71049
| 0.653043
| 0.610022
| 0
| 0.024147
| 0.371833
| 30,788
| 852
| 125
| 36.13615
| 0.793123
| 0.104716
| 0
| 0.707661
| 0
| 0
| 0.027645
| 0
| 0
| 0
| 0
| 0.001174
| 0.0625
| 1
| 0.145161
| false
| 0
| 0.014113
| 0
| 0.302419
| 0.002016
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a2fd9e19593c67eca88d032b4622feb48b16b57d
| 1,037
|
py
|
Python
|
AutotestWebD/all_models/models/__init__.py
|
yangjourney/sosotest
|
2e88099a829749910ca325253c9b1a2e368d21a0
|
[
"MIT"
] | 422
|
2019-08-18T05:04:20.000Z
|
2022-03-31T06:49:19.000Z
|
AutotestWebD/all_models/models/__init__.py
|
LinSongJian1985/sosotest
|
091863dee531b5726650bb63efd6f169267cbeb4
|
[
"MIT"
] | 10
|
2019-10-24T09:55:38.000Z
|
2021-09-29T17:28:43.000Z
|
AutotestWebD/all_models/models/__init__.py
|
LinSongJian1985/sosotest
|
091863dee531b5726650bb63efd6f169267cbeb4
|
[
"MIT"
] | 202
|
2019-08-18T05:04:27.000Z
|
2022-03-30T05:57:18.000Z
|
from all_models.models.A0001_user import *
from all_models.models.A0002_config import *
from all_models.models.A0003_attribute import *
from all_models.models.A0004_globals import *
from all_models.models.A0005_interface import *
from all_models.models.A0006_testcase import *
from all_models.models.A0007_task import *
from all_models.models.A0008_standard_interface import *
from all_models.models.A0009_python_manage import *
from all_models.models.A0010_webprotal import *
from all_models.models.A0011_version_manage import *
from all_models.models.A0012_admin import *
from all_models.models.A0013_ui_test import *
from all_models.models.A0014_ui_task import *
from all_models.models.A0014_ui_testcase import *
from all_models.models.A0015_ui_globals import *
from all_models.models.A0016_ui_version_manage import *
from all_models.models.A0017_ui_package_manage import *
from all_models.models.A0018_ui_mobile_server import *
from all_models.models.A0020_deployment_tool import *
from all_models.models.A0021_task_suite import *
| 43.208333
| 56
| 0.856316
| 162
| 1,037
| 5.12963
| 0.277778
| 0.176895
| 0.32852
| 0.480144
| 0.731649
| 0.521059
| 0.168472
| 0
| 0
| 0
| 0
| 0.088328
| 0.082932
| 1,037
| 23
| 57
| 45.086957
| 0.785489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0c165f25cb0559c177c08bb8c60c048ece8aadcd
| 404
|
py
|
Python
|
desdeo_emo/selection/__init__.py
|
giomara-larraga/desdeo-emo
|
d891e9a12b25d02af5dfba5b17b4c2c4c0bd3d53
|
[
"MIT"
] | 3
|
2021-05-02T17:42:39.000Z
|
2022-02-16T05:22:56.000Z
|
desdeo_emo/selection/__init__.py
|
giomara-larraga/desdeo-emo
|
d891e9a12b25d02af5dfba5b17b4c2c4c0bd3d53
|
[
"MIT"
] | 32
|
2019-10-30T08:33:13.000Z
|
2022-03-12T00:54:02.000Z
|
desdeo_emo/selection/__init__.py
|
giomara-larraga/desdeo-emo
|
d891e9a12b25d02af5dfba5b17b4c2c4c0bd3d53
|
[
"MIT"
] | 12
|
2019-10-16T10:00:47.000Z
|
2022-03-17T13:31:41.000Z
|
"""This module provides implementations of various selection operators.
"""
__all__ = ["APD_Select", "NSGAIII_select", "tour_select", "MOEAD_select"]
from desdeo_emo.selection.APD_Select_constraints import APD_Select
from desdeo_emo.selection.NSGAIII_select import NSGAIII_select
from desdeo_emo.selection.tournament_select import tour_select
from desdeo_emo.selection.MOEAD_select import MOEAD_select
| 40.4
| 73
| 0.846535
| 54
| 404
| 5.944444
| 0.37037
| 0.124611
| 0.199377
| 0.23676
| 0.34891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081683
| 404
| 9
| 74
| 44.888889
| 0.865229
| 0.168317
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0c47ee6f50a3d4a88a91bddd272870b1b7636e29
| 221
|
py
|
Python
|
build/lib/sui/ml/__init__.py
|
AdrianLeonLhy/sui
|
06e0636e98e862a0fa95d97cfd56b1f0ef51b299
|
[
"MIT"
] | null | null | null |
build/lib/sui/ml/__init__.py
|
AdrianLeonLhy/sui
|
06e0636e98e862a0fa95d97cfd56b1f0ef51b299
|
[
"MIT"
] | null | null | null |
build/lib/sui/ml/__init__.py
|
AdrianLeonLhy/sui
|
06e0636e98e862a0fa95d97cfd56b1f0ef51b299
|
[
"MIT"
] | null | null | null |
"""sui.ml
Machine learning algorithm implementations
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
model_class_list = ["sui.ml.mf.FunkSVD() => FunkSVD"]
| 24.555556
| 53
| 0.80543
| 28
| 221
| 5.785714
| 0.642857
| 0.185185
| 0.296296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108597
| 221
| 8
| 54
| 27.625
| 0.822335
| 0.221719
| 0
| 0
| 0
| 0
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0.25
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0c4f81eb72dabf3458024ea2e90cf1904ac2dfa1
| 3,075
|
py
|
Python
|
src/insalata/builder/config/interface.py
|
tumi8/INSALATA
|
e4c552023334f709b9586f664b7e049036133d33
|
[
"Apache-2.0"
] | 6
|
2016-11-25T17:31:57.000Z
|
2018-02-15T16:39:38.000Z
|
src/insalata/builder/config/interface.py
|
tumi8/INSALATA
|
e4c552023334f709b9586f664b7e049036133d33
|
[
"Apache-2.0"
] | null | null | null |
src/insalata/builder/config/interface.py
|
tumi8/INSALATA
|
e4c552023334f709b9586f664b7e049036133d33
|
[
"Apache-2.0"
] | null | null | null |
import os
import uuid
import json
import subprocess
from insalata.builder.decorator import builderFor
from insalata.helper.ansibleWrapper import addToKnownHosts
@builderFor(action="configureInterface", template=["ubuntu"])
def configureInterfaceAnsibleDebian(logger, interface):
"""
Configure a single interface on the host
:param logger: A logger used for logging possible errors.
:type logger: seealso:: :class:`logging:Logger`
:param interface: The interface to configure
:type interface: Interface
"""
host = interface.getHost()
target = host.getID() if host.getNameApplied() else host.getTemplate().getID()
addToKnownHosts(target)
#build json with host.template, host.id and host.interfaces
data = {
"target": target,
"interfaces": [{
"iface": interface.getID(),
"type": "interface",
"inet": "dhcp" if interface.isDhcp() else "static",
"gateway": None if interface.isDhcp() else list(interface.getAddresses())[0].getGateway(),
"addresses": None if interface.isDhcp() else list(interface.getAddresses())[0].getID() + "/" + str(list(interface.getAddresses())[0].getPrefix())
}]
}
filename = str(uuid.uuid4()) + ".json"
with open(filename, 'w') as outfile:
json.dump(data, outfile)
#run with json
logger.info("[{0}] Configure interface {1} on machine named '{2}'.".format(host.getID(), interface.getID(), target))
subprocess.call('ansible-playbook /etc/insalata/template/ansible/interfaces/debian_interfaces.yml --extra-vars "@' + filename + '" -v -c paramiko', shell=True)
#remove json
if os.path.exists(filename):
os.remove(filename)
@builderFor(action="unconfigureInterface", template=["ubuntu"])
def unconfigureInterfaceAnsibleDebian(logger, interface):
"""
Remove/unconfigure a single interface on the host
:param logger: A logger used for logging possible errors.
:type logger: seealso:: :class:`logging:Logger`
:param interface: The interface to unconfigure
:type interface: Interface
"""
host = interface.getHost()
target = host.getID() if host.getNameApplied() else host.getTemplate().getID()
addToKnownHosts(target)
#build json with host.template, host.id and host.interfaces
data = {
"target": target,
"interfaces": [{
"iface": interface.getID(),
"type": "interface",
"inet": "dhcp" if interface.isDhcp() else "static",
"delete": True
}]
}
filename = str(uuid.uuid4()) + ".json"
with open(filename, 'w') as outfile:
json.dump(data, outfile)
#run with json
logger.info("[{0}] Unconfigure interface {1} on machine named '{2}'.".format(host.getID(), interface.getID(), target))
subprocess.call('ansible-playbook /etc/insalata/template/ansible/interfaces/debian_interfaces.yml --extra-vars "@' + filename + '"', shell=True)
#remove json
if os.path.exists(filename):
os.remove(filename)
| 35.755814
| 163
| 0.653659
| 338
| 3,075
| 5.940828
| 0.289941
| 0.025896
| 0.033865
| 0.041833
| 0.753984
| 0.753984
| 0.753984
| 0.753984
| 0.753984
| 0.703187
| 0
| 0.004519
| 0.208455
| 3,075
| 85
| 164
| 36.176471
| 0.82046
| 0.20065
| 0
| 0.612245
| 0
| 0.040816
| 0.209068
| 0.052897
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0
| 0.122449
| 0
| 0.163265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0c565760a5a18ef387d995b298f902ba98187603
| 69
|
py
|
Python
|
python_runtime/exonum_runtime/runtime/__init__.py
|
alekseysidorov/exonum-python-backend
|
fae38042acba4c7fd9ca05f6afa1e9bec54dd86d
|
[
"Apache-2.0"
] | 2
|
2019-10-06T17:23:08.000Z
|
2019-10-07T09:35:59.000Z
|
python_runtime/exonum_runtime/runtime/__init__.py
|
alekseysidorov/exonum-python-backend
|
fae38042acba4c7fd9ca05f6afa1e9bec54dd86d
|
[
"Apache-2.0"
] | null | null | null |
python_runtime/exonum_runtime/runtime/__init__.py
|
alekseysidorov/exonum-python-backend
|
fae38042acba4c7fd9ca05f6afa1e9bec54dd86d
|
[
"Apache-2.0"
] | 1
|
2020-01-18T09:29:30.000Z
|
2020-01-18T09:29:30.000Z
|
from .runtime import PythonRuntime
from .config import Configuration
| 23
| 34
| 0.855072
| 8
| 69
| 7.375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115942
| 69
| 2
| 35
| 34.5
| 0.967213
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a74c7808ba50ec0e8036ecc5afd174ee08035aba
| 2,088
|
py
|
Python
|
leetcode_python/Array/sort-array-by-parity-ii.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | 18
|
2019-08-01T07:45:02.000Z
|
2022-03-31T18:05:44.000Z
|
leetcode_python/Array/sort-array-by-parity-ii.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | null | null | null |
leetcode_python/Array/sort-array-by-parity-ii.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | 15
|
2019-12-29T08:46:20.000Z
|
2022-03-08T14:14:05.000Z
|
# V0
class Solution:
def sortArrayByParityII(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
A.sort(key = lambda x : x % 2)
N = len(A)
res = []
for i in range(N // 2):
res.append(A[i])
res.append(A[N - 1 - i])
return res
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/83045735
class Solution(object):
def sortArrayByParityII(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
odd = [x for x in A if x % 2 == 1]
even = [x for x in A if x % 2 == 0]
res = []
iseven = True
while odd or even:
if iseven:
res.append(even.pop())
else:
res.append(odd.pop())
iseven = not iseven
return res
# V1'
# https://blog.csdn.net/fuxuemingzhu/article/details/83045735
class Solution:
def sortArrayByParityII(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
A.sort(key = lambda x : x % 2)
N = len(A)
res = []
for i in range(N // 2):
res.append(A[i])
res.append(A[N - 1 - i])
return res
# V1''
# https://blog.csdn.net/fuxuemingzhu/article/details/83045735
class Solution:
def sortArrayByParityII(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
N = len(A)
res = [0] * N
even, odd = 0, 1
for a in A:
if a % 2 == 1:
res[odd] = a
odd += 2
else:
res[even] = a
even += 2
return res
# V2
# Time: O(n)
# Space: O(1)
class Solution(object):
def sortArrayByParityII(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
j = 1
for i in range(0, len(A), 2):
if A[i] % 2:
while A[j] % 2:
j += 2
A[i], A[j] = A[j], A[i]
return A
| 24
| 61
| 0.426724
| 264
| 2,088
| 3.375
| 0.19697
| 0.078563
| 0.145903
| 0.151515
| 0.744108
| 0.744108
| 0.744108
| 0.744108
| 0.717172
| 0.717172
| 0
| 0.044877
| 0.434387
| 2,088
| 87
| 62
| 24
| 0.709568
| 0.197318
| 0
| 0.566038
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09434
| false
| 0
| 0
| 0
| 0.283019
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a78a98604544ef2fc9039ecdc16cacaab1edfa4b
| 25
|
py
|
Python
|
bsp2obj/__init__.py
|
JakobCh/BSP2OBJ
|
1567604f86177fd94bd2141c82a3b585d36aae5d
|
[
"MIT"
] | 19
|
2018-03-22T21:20:35.000Z
|
2022-03-28T15:47:18.000Z
|
bsp2obj/__init__.py
|
JakobCh/BSP2OBJ
|
1567604f86177fd94bd2141c82a3b585d36aae5d
|
[
"MIT"
] | 1
|
2021-08-28T02:27:07.000Z
|
2021-08-28T02:27:07.000Z
|
bsp2obj/__init__.py
|
JakobCh/BSP2OBJ
|
1567604f86177fd94bd2141c82a3b585d36aae5d
|
[
"MIT"
] | 3
|
2019-10-28T03:21:51.000Z
|
2021-08-28T02:26:32.000Z
|
from bsp2obj.bsp import *
| 25
| 25
| 0.8
| 4
| 25
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.12
| 25
| 1
| 25
| 25
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a7aa61961b5b52e0e0c297a5ad0efd3cd50204a4
| 47
|
py
|
Python
|
dotenv_manager/InteractorException.py
|
danilocgsilva/dotenv_manager
|
3d7bf249bd8fd6b2bd8ce88b9c6fcf7214ae958f
|
[
"MIT"
] | null | null | null |
dotenv_manager/InteractorException.py
|
danilocgsilva/dotenv_manager
|
3d7bf249bd8fd6b2bd8ce88b9c6fcf7214ae958f
|
[
"MIT"
] | null | null | null |
dotenv_manager/InteractorException.py
|
danilocgsilva/dotenv_manager
|
3d7bf249bd8fd6b2bd8ce88b9c6fcf7214ae958f
|
[
"MIT"
] | null | null | null |
class InteractorException(Exception):
pass
| 15.666667
| 37
| 0.787234
| 4
| 47
| 9.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 47
| 2
| 38
| 23.5
| 0.925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
38f2d71715a3ffa2c3d97a74186d97e0e7b29e8a
| 191
|
py
|
Python
|
my_wallet/wallets/apps.py
|
ibolorino/wallet-backend
|
99b00caebdbcf4b5e7f0f7b183615231405b15b6
|
[
"MIT"
] | null | null | null |
my_wallet/wallets/apps.py
|
ibolorino/wallet-backend
|
99b00caebdbcf4b5e7f0f7b183615231405b15b6
|
[
"MIT"
] | null | null | null |
my_wallet/wallets/apps.py
|
ibolorino/wallet-backend
|
99b00caebdbcf4b5e7f0f7b183615231405b15b6
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class WalletsConfig(AppConfig):
name = "my_wallet.wallets"
verbose_name = "Wallets"
def ready(self):
import my_wallet.wallets.signals
| 19.1
| 40
| 0.712042
| 23
| 191
| 5.782609
| 0.695652
| 0.120301
| 0.225564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204188
| 191
| 9
| 41
| 21.222222
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0.125654
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
38fc832232003e2863f6a3e26277e7e1929264bd
| 23
|
py
|
Python
|
casbin/persist/__init__.py
|
goodrain/pycasbin
|
1a481ba1af7619e1cc7e83896581d14976927d80
|
[
"Apache-2.0"
] | 1,674
|
2015-08-17T03:54:10.000Z
|
2022-03-29T12:07:43.000Z
|
casbin/persist/__init__.py
|
goodrain/pycasbin
|
1a481ba1af7619e1cc7e83896581d14976927d80
|
[
"Apache-2.0"
] | 183
|
2015-08-25T11:34:21.000Z
|
2022-03-22T15:33:59.000Z
|
casbin/persist/__init__.py
|
goodrain/pycasbin
|
1a481ba1af7619e1cc7e83896581d14976927d80
|
[
"Apache-2.0"
] | 359
|
2015-08-21T20:37:48.000Z
|
2022-03-23T15:41:12.000Z
|
from .adapter import *
| 11.5
| 22
| 0.73913
| 3
| 23
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ac141153c77949777d926ccfe0ecea157878fe24
| 206
|
py
|
Python
|
lms/chat/views.py
|
nag-gd9/lms
|
070f674976807ec93fc40792d4e23afb0640220e
|
[
"MIT"
] | null | null | null |
lms/chat/views.py
|
nag-gd9/lms
|
070f674976807ec93fc40792d4e23afb0640220e
|
[
"MIT"
] | null | null | null |
lms/chat/views.py
|
nag-gd9/lms
|
070f674976807ec93fc40792d4e23afb0640220e
|
[
"MIT"
] | 1
|
2021-12-15T04:49:47.000Z
|
2021-12-15T04:49:47.000Z
|
from django.shortcuts import render
def chat(request):
return render(request, 'chat.html')
def room(request, room_name):
return render(request, 'room.html', {
'room_name': room_name
})
| 22.888889
| 41
| 0.679612
| 27
| 206
| 5.074074
| 0.444444
| 0.175182
| 0.277372
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.194175
| 206
| 9
| 42
| 22.888889
| 0.825301
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.285714
| 0.714286
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
ac321dcfb873d0e8d998c7a73148eea13e0c835f
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/aiohttp/http_writer.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/aiohttp/http_writer.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/aiohttp/http_writer.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/a1/f0/c0/107f0826bcb4d775ec6ebadf36257cd85afb1acdddeecc9db8cc75bd85
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.302083
| 0
| 96
| 1
| 96
| 96
| 0.59375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ac32ff945e1a1759349b7281b7fdd838bdf5b03e
| 5,303
|
py
|
Python
|
kmip/demos/pie/register_certificate.py
|
vbnmmnbv/PyKMIP
|
4617ae528006178c466fe3945a477f568b596940
|
[
"Apache-2.0"
] | 12
|
2016-09-14T21:59:10.000Z
|
2020-03-11T07:37:25.000Z
|
kmip/demos/pie/register_certificate.py
|
vbnmmnbv/PyKMIP
|
4617ae528006178c466fe3945a477f568b596940
|
[
"Apache-2.0"
] | 1
|
2021-06-25T15:43:48.000Z
|
2021-06-25T15:43:48.000Z
|
kmip/demos/pie/register_certificate.py
|
vbnmmnbv/PyKMIP
|
4617ae528006178c466fe3945a477f568b596940
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sys
from kmip.core import enums
from kmip.demos import utils
from kmip.pie import client
from kmip.pie import objects
if __name__ == '__main__':
logger = utils.build_console_logger(logging.INFO)
parser = utils.build_cli_parser()
opts, args = parser.parse_args(sys.argv[1:])
config = opts.config
value = (
b'\x30\x82\x03\x12\x30\x82\x01\xFA\xA0\x03\x02\x01\x02\x02\x01\x01'
b'\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30'
b'\x3B\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x0D'
b'\x30\x0B\x06\x03\x55\x04\x0A\x13\x04\x54\x45\x53\x54\x31\x0E\x30'
b'\x0C\x06\x03\x55\x04\x0B\x13\x05\x4F\x41\x53\x49\x53\x31\x0D\x30'
b'\x0B\x06\x03\x55\x04\x03\x13\x04\x4B\x4D\x49\x50\x30\x1E\x17\x0D'
b'\x31\x30\x31\x31\x30\x31\x32\x33\x35\x39\x35\x39\x5A\x17\x0D\x32'
b'\x30\x31\x31\x30\x31\x32\x33\x35\x39\x35\x39\x5A\x30\x3B\x31\x0B'
b'\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x0D\x30\x0B\x06'
b'\x03\x55\x04\x0A\x13\x04\x54\x45\x53\x54\x31\x0E\x30\x0C\x06\x03'
b'\x55\x04\x0B\x13\x05\x4F\x41\x53\x49\x53\x31\x0D\x30\x0B\x06\x03'
b'\x55\x04\x03\x13\x04\x4B\x4D\x49\x50\x30\x82\x01\x22\x30\x0D\x06'
b'\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F'
b'\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xAB\x7F\x16\x1C\x00\x42'
b'\x49\x6C\xCD\x6C\x6D\x4D\xAD\xB9\x19\x97\x34\x35\x35\x77\x76\x00'
b'\x3A\xCF\x54\xB7\xAF\x1E\x44\x0A\xFB\x80\xB6\x4A\x87\x55\xF8\x00'
b'\x2C\xFE\xBA\x6B\x18\x45\x40\xA2\xD6\x60\x86\xD7\x46\x48\x34\x6D'
b'\x75\xB8\xD7\x18\x12\xB2\x05\x38\x7C\x0F\x65\x83\xBC\x4D\x7D\xC7'
b'\xEC\x11\x4F\x3B\x17\x6B\x79\x57\xC4\x22\xE7\xD0\x3F\xC6\x26\x7F'
b'\xA2\xA6\xF8\x9B\x9B\xEE\x9E\x60\xA1\xD7\xC2\xD8\x33\xE5\xA5\xF4'
b'\xBB\x0B\x14\x34\xF4\xE7\x95\xA4\x11\x00\xF8\xAA\x21\x49\x00\xDF'
b'\x8B\x65\x08\x9F\x98\x13\x5B\x1C\x67\xB7\x01\x67\x5A\xBD\xBC\x7D'
b'\x57\x21\xAA\xC9\xD1\x4A\x7F\x08\x1F\xCE\xC8\x0B\x64\xE8\xA0\xEC'
b'\xC8\x29\x53\x53\xC7\x95\x32\x8A\xBF\x70\xE1\xB4\x2E\x7B\xB8\xB7'
b'\xF4\xE8\xAC\x8C\x81\x0C\xDB\x66\xE3\xD2\x11\x26\xEB\xA8\xDA\x7D'
b'\x0C\xA3\x41\x42\xCB\x76\xF9\x1F\x01\x3D\xA8\x09\xE9\xC1\xB7\xAE'
b'\x64\xC5\x41\x30\xFB\xC2\x1D\x80\xE9\xC2\xCB\x06\xC5\xC8\xD7\xCC'
b'\xE8\x94\x6A\x9A\xC9\x9B\x1C\x28\x15\xC3\x61\x2A\x29\xA8\x2D\x73'
b'\xA1\xF9\x93\x74\xFE\x30\xE5\x49\x51\x66\x2A\x6E\xDA\x29\xC6\xFC'
b'\x41\x13\x35\xD5\xDC\x74\x26\xB0\xF6\x05\x02\x03\x01\x00\x01\xA3'
b'\x21\x30\x1F\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x04\xE5'
b'\x7B\xD2\xC4\x31\xB2\xE8\x16\xE1\x80\xA1\x98\x23\xFA\xC8\x58\x27'
b'\x3F\x6B\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05'
b'\x00\x03\x82\x01\x01\x00\xA8\x76\xAD\xBC\x6C\x8E\x0F\xF0\x17\x21'
b'\x6E\x19\x5F\xEA\x76\xBF\xF6\x1A\x56\x7C\x9A\x13\xDC\x50\xD1\x3F'
b'\xEC\x12\xA4\x27\x3C\x44\x15\x47\xCF\xAB\xCB\x5D\x61\xD9\x91\xE9'
b'\x66\x31\x9D\xF7\x2C\x0D\x41\xBA\x82\x6A\x45\x11\x2F\xF2\x60\x89'
b'\xA2\x34\x4F\x4D\x71\xCF\x7C\x92\x1B\x4B\xDF\xAE\xF1\x60\x0D\x1B'
b'\xAA\xA1\x53\x36\x05\x7E\x01\x4B\x8B\x49\x6D\x4F\xAE\x9E\x8A\x6C'
b'\x1D\xA9\xAE\xB6\xCB\xC9\x60\xCB\xF2\xFA\xE7\x7F\x58\x7E\xC4\xBB'
b'\x28\x20\x45\x33\x88\x45\xB8\x8D\xD9\xAE\xEA\x53\xE4\x82\xA3\x6E'
b'\x73\x4E\x4F\x5F\x03\xB9\xD0\xDF\xC4\xCA\xFC\x6B\xB3\x4E\xA9\x05'
b'\x3E\x52\xBD\x60\x9E\xE0\x1E\x86\xD9\xB0\x9F\xB5\x11\x20\xC1\x98'
b'\x34\xA9\x97\xB0\x9C\xE0\x8D\x79\xE8\x13\x11\x76\x2F\x97\x4B\xB1'
b'\xC8\xC0\x91\x86\xC4\xD7\x89\x33\xE0\xDB\x38\xE9\x05\x08\x48\x77'
b'\xE1\x47\xC7\x8A\xF5\x2F\xAE\x07\x19\x2F\xF1\x66\xD1\x9F\xA9\x4A'
b'\x11\xCC\x11\xB2\x7E\xD0\x50\xF7\xA2\x7F\xAE\x13\xB2\x05\xA5\x74'
b'\xC4\xEE\x00\xAA\x8B\xD6\x5D\x0D\x70\x57\xC9\x85\xC8\x39\xEF\x33'
b'\x6A\x44\x1E\xD5\x3A\x53\xC6\xB6\xB6\x96\xF1\xBD\xEB\x5F\x7E\xA8'
b'\x11\xEB\xB2\x5A\x7F\x86')
usage_mask = [
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.VERIFY]
name = 'Demo X.509 Certificate'
cert = objects.X509Certificate(value, usage_mask, name)
# Build the client and connect to the server
with client.ProxyKmipClient(config=config) as client:
try:
uid = client.register(cert)
logger.info("Successfully registered certificate with ID: "
"{0}".format(uid))
except Exception as e:
logger.error(e)
| 53.03
| 76
| 0.661889
| 1,047
| 5,303
| 3.338109
| 0.330468
| 0.013734
| 0.015451
| 0.017167
| 0.148212
| 0.140773
| 0.136481
| 0.136481
| 0.136481
| 0.128755
| 0
| 0.251327
| 0.147652
| 5,303
| 99
| 77
| 53.565657
| 0.521903
| 0.124269
| 0
| 0
| 0
| 0.662162
| 0.699654
| 0.6828
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.081081
| 0
| 0.081081
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ac3e976afb20849d9b8f05759ca5acdf8788e6c4
| 1,617
|
py
|
Python
|
src/ebay_rest/api/sell_finances/models/__init__.py
|
gbm001/ebay_rest
|
077d3478423ccd80ff35e0361821d6a11180bc54
|
[
"MIT"
] | 3
|
2021-12-12T04:28:03.000Z
|
2022-03-10T03:29:18.000Z
|
src/ebay_rest/api/sell_finances/models/__init__.py
|
jdavv/ebay_rest
|
20fc88c6aefdae9ab90f9c1330e79abddcd750cd
|
[
"MIT"
] | 33
|
2021-06-16T20:44:36.000Z
|
2022-03-30T14:55:06.000Z
|
src/ebay_rest/api/sell_finances/models/__init__.py
|
jdavv/ebay_rest
|
20fc88c6aefdae9ab90f9c1330e79abddcd750cd
|
[
"MIT"
] | 7
|
2021-06-03T09:30:23.000Z
|
2022-03-08T19:51:33.000Z
|
# coding: utf-8
# flake8: noqa
"""
eBay Finances API
This API is used to retrieve seller payouts and monetary transaction details related to those payouts. # noqa: E501
OpenAPI spec version: 1.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from ...sell_finances.models.amount import Amount
from ...sell_finances.models.balance_adjustment import BalanceAdjustment
from ...sell_finances.models.buyer import Buyer
from ...sell_finances.models.charge import Charge
from ...sell_finances.models.error import Error
from ...sell_finances.models.error_parameter import ErrorParameter
from ...sell_finances.models.fee import Fee
from ...sell_finances.models.funding_source import FundingSource
from ...sell_finances.models.order_line_item import OrderLineItem
from ...sell_finances.models.payout import Payout
from ...sell_finances.models.payout_instrument import PayoutInstrument
from ...sell_finances.models.payout_summary_response import PayoutSummaryResponse
from ...sell_finances.models.payouts import Payouts
from ...sell_finances.models.reference import Reference
from ...sell_finances.models.seller_funds_summary_response import SellerFundsSummaryResponse
from ...sell_finances.models.transaction import Transaction
from ...sell_finances.models.transaction_summary_response import TransactionSummaryResponse
from ...sell_finances.models.transactions import Transactions
from ...sell_finances.models.transfer import Transfer
from ...sell_finances.models.transfer_detail import TransferDetail
| 43.702703
| 120
| 0.825603
| 206
| 1,617
| 6.291262
| 0.368932
| 0.123457
| 0.246914
| 0.339506
| 0.203704
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005468
| 0.095238
| 1,617
| 36
| 121
| 44.916667
| 0.880383
| 0.179963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ac6da18f1d507fc4996ecf23da64c4e36382a19e
| 42
|
py
|
Python
|
tests/__init__.py
|
samarpan-rai/serveitlearn
|
8871e18ab6350cd7f2b96813a81c00c37a9ad8e1
|
[
"MIT"
] | 1
|
2020-07-12T11:33:20.000Z
|
2020-07-12T11:33:20.000Z
|
tests/__init__.py
|
samarpan-rai/serveitlearn
|
8871e18ab6350cd7f2b96813a81c00c37a9ad8e1
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
samarpan-rai/serveitlearn
|
8871e18ab6350cd7f2b96813a81c00c37a9ad8e1
|
[
"MIT"
] | null | null | null |
"""Unit test package for serveitlearn."""
| 21
| 41
| 0.714286
| 5
| 42
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 1
| 42
| 42
| 0.810811
| 0.833333
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3bb2264f44487b516fd795526e0b6878b8c76a55
| 150
|
py
|
Python
|
pokemongo_bot/cell_workers/migrations/evolve_log.py
|
timgates42/PokemonGo-Bot
|
5e80f20760f32478a84a8f0ced7ca24cdf41fe03
|
[
"MIT"
] | 5,362
|
2016-07-21T02:38:46.000Z
|
2022-03-23T13:34:51.000Z
|
pokemongo_bot/cell_workers/migrations/evolve_log.py
|
timgates42/PokemonGo-Bot
|
5e80f20760f32478a84a8f0ced7ca24cdf41fe03
|
[
"MIT"
] | 5,897
|
2016-07-21T05:05:49.000Z
|
2022-03-17T09:21:35.000Z
|
pokemongo_bot/cell_workers/migrations/evolve_log.py
|
timgates42/PokemonGo-Bot
|
5e80f20760f32478a84a8f0ced7ca24cdf41fe03
|
[
"MIT"
] | 3,379
|
2016-07-21T02:38:48.000Z
|
2022-03-30T02:46:57.000Z
|
from yoyo import step
step(
"CREATE TABLE IF NOT EXISTS evolve_log (pokemon text, iv real, cp real, dated datetime DEFAULT CURRENT_TIMESTAMP)"
)
| 25
| 118
| 0.76
| 23
| 150
| 4.869565
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18
| 150
| 5
| 119
| 30
| 0.910569
| 0
| 0
| 0
| 0
| 0.25
| 0.746667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3bfbc1d5f204ca5c91311939bd6a3bb4dc55f0a9
| 21
|
py
|
Python
|
src/main.py
|
GitPracticeRepo/ltelearn
|
44230baef53d7366a646e860a9beb825d98c388c
|
[
"Apache-2.0"
] | null | null | null |
src/main.py
|
GitPracticeRepo/ltelearn
|
44230baef53d7366a646e860a9beb825d98c388c
|
[
"Apache-2.0"
] | null | null | null |
src/main.py
|
GitPracticeRepo/ltelearn
|
44230baef53d7366a646e860a9beb825d98c388c
|
[
"Apache-2.0"
] | null | null | null |
# Code for feature 1
| 10.5
| 20
| 0.714286
| 4
| 21
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.238095
| 21
| 1
| 21
| 21
| 0.875
| 0.857143
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3bff250daf312db8e63bd74bee5aa1a4763b321a
| 243
|
py
|
Python
|
core/admin.py
|
adamw00000/MAK-Datahub
|
9735f2f970439c49cec3ad693a056d7a151156ce
|
[
"MIT"
] | 1
|
2022-03-16T09:05:42.000Z
|
2022-03-16T09:05:42.000Z
|
core/admin.py
|
adamw00000/MAK-Datahub
|
9735f2f970439c49cec3ad693a056d7a151156ce
|
[
"MIT"
] | 8
|
2021-02-08T20:43:06.000Z
|
2021-06-09T18:54:14.000Z
|
core/admin.py
|
adamw00000/MAK-Datahub
|
9735f2f970439c49cec3ad693a056d7a151156ce
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import DataFileInfo, Device, ProfileInfo, ProfileCreationRun
admin.site.register(DataFileInfo)
admin.site.register(Device)
admin.site.register(ProfileInfo)
admin.site.register(ProfileCreationRun)
| 30.375
| 73
| 0.847737
| 28
| 243
| 7.357143
| 0.428571
| 0.174757
| 0.330097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065844
| 243
| 8
| 74
| 30.375
| 0.907489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ce05fc079800dcb22e23cadddf3d483ba708380b
| 98
|
py
|
Python
|
qstrader/price_handler/iterator/pandas/__init__.py
|
ivanliu1989/qstrader
|
95cbe6d0abdf53bc145daa96d5352c60b5030540
|
[
"MIT"
] | 113
|
2019-01-11T05:55:41.000Z
|
2022-03-27T23:49:47.000Z
|
qstrader/price_handler/iterator/pandas/__init__.py
|
ivanliu1989/qstrader
|
95cbe6d0abdf53bc145daa96d5352c60b5030540
|
[
"MIT"
] | 7
|
2019-04-09T05:30:24.000Z
|
2020-09-09T04:52:49.000Z
|
qstrader/price_handler/iterator/pandas/__init__.py
|
ivanliu1989/qstrader
|
95cbe6d0abdf53bc145daa96d5352c60b5030540
|
[
"MIT"
] | 54
|
2019-01-10T17:22:14.000Z
|
2022-03-15T23:47:43.000Z
|
# flake8: noqa
from .bar import PandasBarEventIterator
from .tick import PandasTickEventIterator
| 19.6
| 41
| 0.836735
| 10
| 98
| 8.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 0.122449
| 98
| 4
| 42
| 24.5
| 0.94186
| 0.122449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ce1fce322d5cff73afa299d435833b1253408722
| 96
|
py
|
Python
|
tests/test_datamodule.py
|
Actis92/lit-tracking
|
9e7b243ba77c80ca260bff479e54db271d10c195
|
[
"MIT"
] | null | null | null |
tests/test_datamodule.py
|
Actis92/lit-tracking
|
9e7b243ba77c80ca260bff479e54db271d10c195
|
[
"MIT"
] | 14
|
2021-11-01T08:48:23.000Z
|
2022-01-08T14:20:17.000Z
|
tests/test_datamodule.py
|
Actis92/lit-tracking
|
9e7b243ba77c80ca260bff479e54db271d10c195
|
[
"MIT"
] | null | null | null |
from lit_tracking import TrackingDataModule
def test():
datamodule = TrackingDataModule()
| 16
| 43
| 0.78125
| 9
| 96
| 8.222222
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 96
| 5
| 44
| 19.2
| 0.91358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ce2b7f96fb5c0d4e1e98abc0fc7f5f86687c5c02
| 443
|
py
|
Python
|
daemon/core/gui/graph/shapeutils.py
|
montag451/core
|
3be162b0b0f54b35520b980023abdfad4ff5e489
|
[
"BSD-2-Clause"
] | null | null | null |
daemon/core/gui/graph/shapeutils.py
|
montag451/core
|
3be162b0b0f54b35520b980023abdfad4ff5e489
|
[
"BSD-2-Clause"
] | null | null | null |
daemon/core/gui/graph/shapeutils.py
|
montag451/core
|
3be162b0b0f54b35520b980023abdfad4ff5e489
|
[
"BSD-2-Clause"
] | null | null | null |
import enum
class ShapeType(enum.Enum):
MARKER = "marker"
OVAL = "oval"
RECTANGLE = "rectangle"
TEXT = "text"
SHAPES = {ShapeType.OVAL, ShapeType.RECTANGLE}
def is_draw_shape(shape_type: ShapeType) -> bool:
return shape_type in SHAPES
def is_shape_text(shape_type: ShapeType) -> bool:
return shape_type == ShapeType.TEXT
def is_marker(shape_type: ShapeType) -> bool:
return shape_type == ShapeType.MARKER
| 18.458333
| 49
| 0.704289
| 57
| 443
| 5.280702
| 0.298246
| 0.179402
| 0.299003
| 0.219269
| 0.428571
| 0.428571
| 0.428571
| 0.305648
| 0
| 0
| 0
| 0
| 0.191874
| 443
| 23
| 50
| 19.26087
| 0.840782
| 0
| 0
| 0
| 0
| 0
| 0.051919
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0
| 0.076923
| 0.230769
| 0.923077
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
02111558f13b347fd0453954107da8330e07bef6
| 142
|
py
|
Python
|
redis_metrics/__init__.py
|
remohammadi/django-redis-metrics
|
bd6a4fa8ecf40416652071d480f159d9044640cf
|
[
"MIT"
] | 1
|
2019-02-10T19:33:41.000Z
|
2019-02-10T19:33:41.000Z
|
redis_metrics/__init__.py
|
remohammadi/django-redis-metrics
|
bd6a4fa8ecf40416652071d480f159d9044640cf
|
[
"MIT"
] | null | null | null |
redis_metrics/__init__.py
|
remohammadi/django-redis-metrics
|
bd6a4fa8ecf40416652071d480f159d9044640cf
|
[
"MIT"
] | null | null | null |
__version__ = "0.7.1"
try:
from .utils import gauge, metric # NOQA
except ImportError: # pragma: no cover
pass # pragma: no cover
| 20.285714
| 44
| 0.661972
| 20
| 142
| 4.5
| 0.85
| 0.177778
| 0.288889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 0.239437
| 142
| 6
| 45
| 23.666667
| 0.805556
| 0.267606
| 0
| 0
| 0
| 0
| 0.05
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.2
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
026903cd88da68e71bba58c3e7d89b2cafc1b26f
| 56
|
py
|
Python
|
src/dl/models/modules/normalization/__init__.py
|
okunator/Dippa
|
dcbb7056511dd6f66bcc7b095716c385d0b0a8bb
|
[
"MIT"
] | 13
|
2021-01-25T07:47:03.000Z
|
2022-01-20T16:02:51.000Z
|
src/dl/models/modules/normalization/__init__.py
|
okunator/Dippa
|
dcbb7056511dd6f66bcc7b095716c385d0b0a8bb
|
[
"MIT"
] | 1
|
2022-02-12T15:03:23.000Z
|
2022-02-12T15:03:23.000Z
|
src/dl/models/modules/normalization/__init__.py
|
okunator/Dippa
|
dcbb7056511dd6f66bcc7b095716c385d0b0a8bb
|
[
"MIT"
] | null | null | null |
from .bcn import EstBN, BCNorm
from .gn import GroupNorm
| 28
| 30
| 0.803571
| 9
| 56
| 5
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 56
| 2
| 31
| 28
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5a067d437b64217ad3a11b1f8264d1a32480e2c4
| 225
|
py
|
Python
|
kare_dev/kare_dev/doctype/service_provider_type/test_service_provider_type.py
|
umaepoch/kare-dev
|
5d900cda45a8d62bf4e90772c9ae09e7b1132c78
|
[
"MIT"
] | null | null | null |
kare_dev/kare_dev/doctype/service_provider_type/test_service_provider_type.py
|
umaepoch/kare-dev
|
5d900cda45a8d62bf4e90772c9ae09e7b1132c78
|
[
"MIT"
] | null | null | null |
kare_dev/kare_dev/doctype/service_provider_type/test_service_provider_type.py
|
umaepoch/kare-dev
|
5d900cda45a8d62bf4e90772c9ae09e7b1132c78
|
[
"MIT"
] | 1
|
2021-12-10T06:03:28.000Z
|
2021-12-10T06:03:28.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Yashwanth A N and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestServiceProviderType(unittest.TestCase):
pass
| 20.454545
| 52
| 0.768889
| 28
| 225
| 6
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025907
| 0.142222
| 225
| 10
| 53
| 22.5
| 0.84456
| 0.453333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
5a187f5a76bdc9c5da512654c8d3b1919c3f9062
| 158
|
py
|
Python
|
wlke/index/admin.py
|
HAUDRAUFHAUN/wlke
|
87a90343ac00d94912094ca06e7c8dbdafae442d
|
[
"MIT"
] | 2
|
2020-05-19T10:15:43.000Z
|
2020-07-15T20:21:31.000Z
|
wlke/index/admin.py
|
HAUDRAUFHAUN/wlke
|
87a90343ac00d94912094ca06e7c8dbdafae442d
|
[
"MIT"
] | 4
|
2020-05-20T18:44:47.000Z
|
2021-05-07T18:20:25.000Z
|
wlke/index/admin.py
|
HAUDRAUFHAUN/wlke
|
87a90343ac00d94912094ca06e7c8dbdafae442d
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Jumbo, Impressum
# Register your models here.
admin.site.register(Jumbo)
admin.site.register(Impressum)
| 22.571429
| 36
| 0.810127
| 22
| 158
| 5.818182
| 0.545455
| 0.140625
| 0.265625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107595
| 158
| 6
| 37
| 26.333333
| 0.907801
| 0.164557
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5a399ca10ba445cc20d02ab8726e418745e3c34d
| 100
|
py
|
Python
|
code/tmp_rtrip/test/test_lib2to3.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | 24
|
2018-01-23T05:28:40.000Z
|
2021-04-13T20:52:59.000Z
|
code/tmp_rtrip/test/test_lib2to3.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | 17
|
2017-12-21T18:32:31.000Z
|
2018-12-18T17:09:50.000Z
|
code/tmp_rtrip/test/test_lib2to3.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | null | null | null |
from lib2to3.tests import load_tests
import unittest
if __name__ == '__main__':
unittest.main()
| 20
| 36
| 0.76
| 13
| 100
| 5.153846
| 0.692308
| 0.328358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023529
| 0.15
| 100
| 4
| 37
| 25
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5a3ab2ab116a7eafa917d9aca7495429a7474682
| 141
|
py
|
Python
|
py33.py
|
afq984/syntaxguard
|
977916fa664acb393781c0e94e9aa5e925a3e17f
|
[
"Apache-2.0"
] | 1
|
2020-12-23T03:35:45.000Z
|
2020-12-23T03:35:45.000Z
|
py33.py
|
afq984/syntaxguard
|
977916fa664acb393781c0e94e9aa5e925a3e17f
|
[
"Apache-2.0"
] | null | null | null |
py33.py
|
afq984/syntaxguard
|
977916fa664acb393781c0e94e9aa5e925a3e17f
|
[
"Apache-2.0"
] | null | null | null |
*Please, = u'use Python 3.3 or later'
# https://docs.python.org/3/whatsnew/3.3.html
# https://docs.python.org/3/whatsnew/3.0.html#new-syntax
| 35.25
| 56
| 0.70922
| 28
| 141
| 3.571429
| 0.535714
| 0.04
| 0.3
| 0.36
| 0.56
| 0.56
| 0.56
| 0
| 0
| 0
| 0
| 0.061538
| 0.078014
| 141
| 3
| 57
| 47
| 0.707692
| 0.687943
| 0
| 0
| 0
| 0
| 0.575
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5a5719bea78e9bd27b95ebff36755284d8cf6c9b
| 10,591
|
py
|
Python
|
FEV_KEGG/Experiments/32.py
|
ryhaberecht/FEV-KEGG
|
f55f294aae07b76954ed823f0c2e6d189fb2b1bb
|
[
"MIT"
] | null | null | null |
FEV_KEGG/Experiments/32.py
|
ryhaberecht/FEV-KEGG
|
f55f294aae07b76954ed823f0c2e6d189fb2b1bb
|
[
"MIT"
] | 2
|
2019-05-30T06:42:08.000Z
|
2021-05-06T10:37:40.000Z
|
FEV_KEGG/Experiments/32.py
|
ryhaberecht/FEV-KEGG
|
f55f294aae07b76954ed823f0c2e6d189fb2b1bb
|
[
"MIT"
] | null | null | null |
"""
Context
-------
As in the experiments before, the set of EC numbers predicted by our approach is to be compared with the set predicted by the approach of Oh et al. (2007).
Question
--------
Does the consensus/majority graph approach to core metabolism yield a similar set of EC numbers as the approach of Oh et al. (2007)?
Method
------
- extract EC numbers from Oh et al. (2007) by hand
- remove EC numbers with wildcards
- get group of organisms 'Bacillus subtilis'
- REPEAT for varying majority-percentages:
- calculate EC numbers occuring in group's core metabolism
- remove EC numbers with wildcards
- overlap Oh's set with ours and print amount of EC numbers inside the intersection and falling off either side
Result
------
::
Maj. % others both ours
100%: 135 313 235
90%: 114 334 261
80%: 108 340 267
70%: 108 340 268
60%: 107 341 268
50%: 107 341 268
40%: 107 341 270
30%: 107 341 270
20%: 107 341 271
10%: 107 341 274
1%: 106 342 283
Conclusion
----------
Even at only 1% (effectively 1 organism) majority, there are many EC numbers predicted by Oh et al., but not occuring anywhere in our model organisms.
This may be caused by:
1. EC number is associated with the organism, but not listed in one of KEGG's hand-drawn pathways. For example 1.13.11.24 is associated with all our 15 organisms, but not present in any pathway.
2. As seen in experiment :mod:`30`, there may be EC numbers predicted by Oh et al. which are outdated.
3. Oh et al. used a compilation of several sources, some may have predicted EC numbers for B. subtilis which never made their way into KEGG at all, which is our only source.
"""
from FEV_KEGG.Graph.Elements import EcNumber
from FEV_KEGG.Evolution.Taxonomy import NCBI
from FEV_KEGG.KEGG import Organism
if __name__ == '__main__':
output = ['Maj. %\tothers\tboth\tours']
#- extract EC numbers from Oh et al. (2007) by hand
theirECnumberStrings = ['5.3.1.23', '1.4.1.13', '6.3.5.5', '6.3.1.2', '2.3.1.1', '3.5.1.47', '1.4.1.1', '1.5.1.2', '2.1.3.3', '2.7.2.8', '2.6.1.11', '3.5.3.11', '1.2.1.38', '2.6.1.2', '4.1.1.19', '1.2.1.11', '6.3.5.4', '2.7.2.4', '2.6.1.1', '4.1.1.20', '5.1.1.7', '1.3.1.26', '4.2.1.52', '1.1.1.3', '2.7.1.39', '4.2.3.1', '1.2.1.41', '2.7.2.11', '4.1.3.18', '1.2.1.27', '1.4.1.2', '4.1.3.27', '2.4.2.18', '5.4.99.5', '4.2.3.5', '4.1.2.15', '4.2.1.10', '4.2.3.4', '4.1.1.48', '1.3.1.12', '4.2.1.51', '5.3.1.24', '2.5.1.19', '1.1.1.25', '2.7.1.71', '4.2.1.20', '2.6.1.5', '4.2.1.22', '4.1.99.1', '2.1.2.1', '2.7.1.25', '4.2.99.8', '4.4.1.1', '4.4.1.8', '1.1.1.95', '2.6.1.52', '2.3.1.30', '2.6.1.9', '2.6.1.58', '1.4.3.3', '2.5.1.6', '2.6.1.16', '2.1.1.13', '2.3.2.2', '1.5.3.1', '3.2.2.16', '4.1.1.50', '2.7.1.100', '2.5.1.16', '2.5.1.22', '2.6.1.19', '3.5.1.16', '4.2.99.8', '4.2.99.8', '5.1.1.1', '3.5.3.1', '4.3.2.1', '6.3.4.5', '2.4.2.17', '4.2.1.9', '4.2.1.9', '3.5.3.8', '3.5.1.31', '3.5.1.2', '5.1.1.3', '2.3.1.29', '1.1.1.31', '4.3.1.3', '1.1.1.23', '3.1.3.15', '1.5.1.12', '2.3.1.46', '4.2.1.19', '1.4.1.9', '2.6.1.42', '1.1.1.85', '4.2.1.33', '4.1.3.12', '3.5.2.7', '1.1.1.86', '1.1.1.86', '2.6.1.42', '1.4.1.9', '5.4.3.2', '4.1.1.18', '1.2.1.27', '1.2.1.27', '2.6.1.13', '2.3.1.35', '1.5.1.12', '3.5.4.19', '3.6.1.31', '5.3.1.16', '1.5.1.2', '3.1.3.3', '3.5.1.18', '4.3.1.18', '4.3.1.17', '4.2.99.9', '4.2.99.9', '4.2.99.9', '1.2.1.16', '1.1.1.103', '4.2.1.49', '3.5.1.5', '1.4.1.9', '2.6.1.42', '3.5.1.1', '4.3.1.1', '2.3.1.57', '2.3.1.57', '1.2.1.27', '2.1.1.37', '3.1.2.4', '3.1.3.3', '4.1.1.29', '4.2.99.9', '1.13.11.24', '1.1.1.2', '1.1.1.4', '4.1.1.5', '1.1.1.94', '2.7.1.12', '2.7.1.15', '1.1.1.14', '6.2.1.1', '6.4.1.1', '1.2.1.12', '1.1.1.44', '5.4.2.2', '5.1.3.1', '5.3.1.6', '1.1.99.5', '1.1.1.49', '2.7.2.1', '1.2.1.12', '4.2.1.11', '4.1.2.13', '1.1.1.17', '4.1.1.49', '1.1.1.40', '1.1.1.21', '3.2.1.108', '5.1.3.14', '4.2.1.42', '4.2.1.40', '4.1.1.2', '1.1.3.15', '5.4.2.10', '3.5.1.25', '1.1.1.267', '3.5.99.6', '3.2.1.55', '4.1.2.13', '4.2.1.7', '5.3.1.12', '1.1.1.57', '4.2.1.8', '1.1.1.58', '5.3.1.12', '2.7.1.2', '5.3.1.17', '1.1.1.127', '2.7.1.45', '4.1.2.14', '3.2.1.20', '3.2.1.48', '2.7.1.15', '1.1.1.21', '4.1.1.47', '1.1.1.60', '1.1.1.83', '4.1.1.73', '1.1.1.93', '1.1.1.93', '1.2.1.3', '5.1.3.2', '1.3.99.1', '6.2.1.5', '4.1.3.31', '4.1.3.30', '4.2.1.3', '4.1.3.7', '4.2.1.2', '1.1.1.42', '1.1.1.37', '3.2.1.21', '3.2.1.21', '3.2.1.26', '3.2.1.1', '3.2.1.86', '1.2.1.19', '3.6.1.13', '1.2.1.46', '1.2.1.3', '1.2.1.3', '1.2.1.3', '5.3.1.4', '3.2.1.21', '2.7.2.7', '4.2.1.41', '2.7.1.56', '3.1.3.11', '2.7.1.4', '1.1.1.118', '2.7.1.41', '5.3.1.9', '2.7.1.6', '3.2.1.22', '3.2.1.22', '3.2.1.22', '3.2.1.22', '2.7.7.10', '2.4.1.18', '1.2.1.21', '2.4.1.1', '2.4.1.21', '2.7.7.27', '2.7.1.30', '4.1.3.5', '3.2.1.26', '3.2.1.122', '2.3.1.79', '5.3.1.8', '4.2.3.3', '2.8.3.5', '2.3.1.19', '2.7.1.11', '3.2.1.86', '3.1.3.18', '5.4.2.6', '3.1.3.8', '5.4.2.8', '6.4.1.3', '2.7.9.2', '2.7.1.47', '2.7.1.16', '5.1.3.4', '2.7.1.5', '3.2.1.86', '2.2.1.2', '2.2.1.1', '2.2.1.1', '3.2.1.93', '5.1.3.14', '5.3.1.5', '5.3.1.5', '2.7.1.17', '2.3.1.8', '1.2.1.51', '2.7.1.11', '5.3.1.9', '5.4.2.1', '2.7.1.40', '5.3.1.1', '2.7.2.3', '5.3.1.14', '1.1.1.18', '4.1.3.19', '6.2.1.1', '5.3.1.3', '4.3.1.7', '3.5.1.49', '4.1.2.20', '2.7.1.31', '5.4.99.11', '1.2.1.22', '3.1.3.25', '2.4.1.8', '3.1.1.31', '4.1.2.19', '1.1.99.21', '4.1.2.40', '5.1.3.7', '3.5.3.19', '3.2.1.37', '2.7.8.13', '2.7.7.9', '2.7.7.39', '1.1.1.22', '2.7.7.23', '6.3.2.4', '3.5.1.28', '3.2.1.52', '6.3.2.13', '2.5.1.7', '6.3.2.9', '6.3.2.8', '1.1.1.158', '6.3.2.15', '2.3.1.157', '3.6.1.27', '1.5.1.5', '1.7.99.5', '2.7.7.1', '2.7.7.3', '2.7.1.24', '1.1.1.169', '2.5.1.30', '5.4.99.6', '2.1.2.11', '2.4.2.11', '3.5.1.19', '2.7.7.18', '4.1.3.36', '6.3.2.1', '2.7.1.34', '6.2.1.26', '2.7.7.58', '2.3.1.47', '1.1.1.193', '1.5.1.3', '6.3.2.12', '2.5.1.15', '2.5.1.9', '3.5.4.16', '3.5.4.9', '4.1.2.25', '2.6.1.62', '2.8.1.6', '6.2.1.14', '6.3.3.3', '3.5.4.26', '2.7.1.50', '2.7.6.3', '2.7.4.7', '2.5.1.9', '2.7.4.16', '2.5.1.3', '4.1.1.11', '1.4.3.16', '1.3.1.28', '1.5.1.3', '2.7.7.2', '3.5.1.10', '3.5.4.25', '3.3.2.1', '4.1.1.71', '2.7.1.33', '1.3.3.4', '4.2.3.12', '2.7.1.26', '2.4.2.19', '6.3.1.5', '3.5.99.2', '1.3.3.3', '4.99.1.1', '5.4.3.8', '4.3.1.8', '2.1.1.107', '4.2.1.24', '4.2.1.75', '4.1.1.37', '2.7.1.49', '3.1.3.5', '4.1.1.36', '6.3.2.5', '2.3.1.15', '3.1.3.27', '2.7.7.41', '4.1.1.65', '6.4.1.2', '1.2.1.25', '1.2.1.25', '1.2.1.25', '2.5.1.1', '5.3.3.2', '2.3.1.41', '2.3.1.41', '2.3.1.41', '2.3.1.41', '2.3.1.41', '2.3.1.41', '2.3.1.41', '2.3.1.41', '2.3.1.41', '2.3.1.41', '2.3.1.41', '2.3.1.41', '2.3.1.41', '2.3.1.41', '2.7.8.7', '6.2.1.3', '6.2.1.3', '6.2.1.3', '6.2.1.3', '6.2.1.3', '6.2.1.3', '6.2.1.3', '6.2.1.3', '6.2.1.3', '6.2.1.3', '6.2.1.3', '6.2.1.3', '6.2.1.3', '2.5.1.10', '2.7.8.8', '2.3.1.51', '2.3.1.16', '2.3.1.9', '2.3.1.16', '2.3.1.16', '2.3.1.16', '2.3.1.16', '2.3.1.16', '2.3.1.16', '1.3.99.2', '1.3.99.2', '1.3.99.3', '1.3.99.2', '1.3.99.3', '1.3.99.3', '1.3.99.3', '1.3.99.3', '1.3.99.3', '3.1.4.14', '2.7.1.107', '4.2.1.17', '4.2.1.17', '4.2.1.17', '4.2.1.17', '4.2.1.17', '4.2.1.17', '4.2.1.17', '4.2.1.17', '4.2.1.17', '6.2.1.3', '3.1.4.46', '3.1.4.46', '3.1.4.46', '1.1.1.35', '1.1.1.35', '1.1.1.35', '1.1.1.35', '1.1.1.35', '1.1.1.35', '1.1.1.35', '1.1.1.35', '1.1.1.35', '4.1.3.4', '2.5.1.31', '2.5.1.29', '2.5.1.33', '1.1.1.27', '1.6.5.3', '1.7.99.4', '3.6.3.14', '1.2.1.2', '1.5.1.30', '1.8.1.9', '1.9.3.1', '1.10.2.2', '1.5.1.29', '1.1.1.27', '3.1.3.5', '3.1.3.5', '3.1.3.5', '3.1.3.5', '3.1.3.5', '3.1.3.5', '3.1.3.5', '3.1.3.5', '3.1.3.5', '3.1.3.5', '3.1.4.16', '3.1.4.16', '3.1.4.16', '3.1.4.16', '3.1.3.6', '3.1.3.6', '3.1.3.6', '3.1.3.6', '2.7.1.23', '2.7.1.23', '2.7.1.23', '2.7.1.23', '2.7.1.23', '2.7.1.23', '3.2.2.9', '3.5.4.12', '3.5.4.12', '1.17.4.1', '1.17.4.1', '1.17.4.1', '1.17.4.1', '2.7.6.1', '3.5.4.3', '3.5.4.5', '2.4.2.2', '5.4.2.7', '5.4.2.7', '2.4.2.4', '2.4.2.2', '3.5.4.14', '2.7.1.74', '3.5.4.2', '2.7.1.20', '3.5.2.5', '2.7.4.14', '2.7.4.14', '2.7.4.11', '2.7.1.76', '2.7.1.113', '4.1.2.4', '2.7.4.9', '3.6.1.23', '2.7.4.8', '1.7.1.7', '2.7.6.5', '2.4.2.8', '2.7.4.6', '2.7.4.6', '2.7.4.6', '2.7.4.6', '2.7.4.6', '2.7.4.6', '2.7.4.6', '6.3.2.6', '2.4.2.1', '2.4.2.1', '2.4.2.1', '2.4.2.1', '2.4.2.1', '2.4.2.1', '2.4.2.1', '2.4.2.1', '2.4.2.2', '2.4.2.1', '2.7.1.21', '2.1.1.45', '1.7.3.3', '2.7.1.48', '2.7.1.48', '2.7.1.48', '1.1.1.204', '2.4.2.22', '6.3.4.13', '6.3.3.1', '6.3.5.3', '6.3.4.2', '2.7.4.3', '2.4.2.7', '4.3.2.2', '4.3.2.2', '6.3.4.4', '2.1.2.3', '2.1.3.2', '6.3.4.2', '1.3.3.1', '3.5.2.3', '2.1.2.2', '2.7.4.8', '2.4.2.14', '6.3.5.2', '3.5.4.10', '1.1.1.205', '2.7.4.6', '2.4.2.10', '2.4.2.9', '2.7.4.3', '3.5.3.4', '3.5.4.1', '2.7.4.6', '4.1.1.23', '2.7.4.4', '1.1.1.204', '1.2.1.8', '1.1.99.1', '3.5.2.6', '2.1.2.9', '1.11.1.9', '6.1.1.10', '3.5.1.11', '1.15.1.1', '6.1.1.17', '1.11.1.6', '3.6.1.15', '4.2.1.1', '2.7.7.4', '1.8.1.2', '3.1.3.1', '3.6.1.1', '3.1.3.1', '3.6.1.2']
theirECnumbers = set()
for string in theirECnumberStrings:
theirECnumbers.add( EcNumber(string) )
#- remove EC numbers with wildcards
theirECnumbers = EcNumber.removeWildcards(theirECnumbers)
taxonomy = NCBI.getTaxonomy()
#- get group of organisms 'Bacillus subtilis'
organisms = taxonomy.getOrganismAbbreviationsByPath('Bacillus subtilis', oneOrganismPerSpecies=False)
group = Organism.Group( organisms )
#- REPEAT for varying majority-percentages:
for percentage in [100, 90, 80, 70, 60, 50, 40, 30, 20, 10 , 1]:
#- calculate EC numbers occuring in group's core metabolism
ourECnumbers = group.majorityEcGraph(majorityPercentage = percentage, noMultifunctional = False).getECs()
#- remove EC numbers with wildcards
ourECnumbers = EcNumber.removeWildcards(ourECnumbers)
#- overlap Oh's set with ours and print amount of EC numbers inside the intersection and falling off either side
onlyInTheirs = theirECnumbers.difference( ourECnumbers )
inBoth = theirECnumbers.intersection( ourECnumbers )
onlyInOurs = ourECnumbers.difference( theirECnumbers )
output.append(str(percentage) + '%:\t' + str(len(onlyInTheirs)) + '\t' + str(len(inBoth)) + '\t' + str(len(onlyInOurs)) )
for line in output:
print(line)
| 117.677778
| 7,057
| 0.486262
| 2,882
| 10,591
| 1.783137
| 0.087786
| 0.061101
| 0.030356
| 0.013621
| 0.349874
| 0.244211
| 0.221833
| 0.184083
| 0.166959
| 0.144775
| 0
| 0.328914
| 0.145406
| 10,591
| 90
| 7,058
| 117.677778
| 0.238869
| 0.209895
| 0
| 0
| 0
| 0
| 0.55944
| 0.002514
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.136364
| 0
| 0.136364
| 0.045455
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5a58c06139315fb12a904e5516fba77c7da228fa
| 138
|
py
|
Python
|
BLSeg/blseg/model/__init__.py
|
ForrestPi/semanticSegmentation
|
1e5519279e2a9574f09eaf91439138b74b0f860c
|
[
"MIT"
] | 7
|
2020-04-06T10:25:30.000Z
|
2021-02-24T14:51:22.000Z
|
BLSeg/blseg/model/__init__.py
|
ForrestPi/semanticSegmentation
|
1e5519279e2a9574f09eaf91439138b74b0f860c
|
[
"MIT"
] | null | null | null |
BLSeg/blseg/model/__init__.py
|
ForrestPi/semanticSegmentation
|
1e5519279e2a9574f09eaf91439138b74b0f860c
|
[
"MIT"
] | 2
|
2020-04-08T14:43:21.000Z
|
2020-12-11T03:03:37.000Z
|
from .fcn import FCN
from .unet import UNet, ModernUNet
from .pspnet import PSPNet
from .deeplab import DeepLabV3Plus
from .gcn import GCN
| 27.6
| 34
| 0.811594
| 21
| 138
| 5.333333
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008475
| 0.144928
| 138
| 5
| 35
| 27.6
| 0.940678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ce6bbec82dd3c37561efcee45ca789e7196e6680
| 120
|
py
|
Python
|
programmers/string_handling_basics.py
|
schio/algorithm_test
|
c240faca428a9adb2970591338d4792b2f4fb7f3
|
[
"MIT"
] | null | null | null |
programmers/string_handling_basics.py
|
schio/algorithm_test
|
c240faca428a9adb2970591338d4792b2f4fb7f3
|
[
"MIT"
] | null | null | null |
programmers/string_handling_basics.py
|
schio/algorithm_test
|
c240faca428a9adb2970591338d4792b2f4fb7f3
|
[
"MIT"
] | null | null | null |
# https://programmers.co.kr/learn/courses/30/lessons/12918
def solution(s):
return s.isdigit() and len(s) in [4, 6]
| 30
| 58
| 0.691667
| 21
| 120
| 3.952381
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 0.125
| 120
| 3
| 59
| 40
| 0.704762
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
cea559d06b51777f8d4f46ef0f1e20a08756575c
| 146
|
py
|
Python
|
data_collection/influxdb_management/influx_setting_example.py
|
Mishuni/Collect_Environ_Data
|
ae8aedc07ca28d0d6fbf5f68b8d574a599cbaf48
|
[
"MIT"
] | null | null | null |
data_collection/influxdb_management/influx_setting_example.py
|
Mishuni/Collect_Environ_Data
|
ae8aedc07ca28d0d6fbf5f68b8d574a599cbaf48
|
[
"MIT"
] | null | null | null |
data_collection/influxdb_management/influx_setting_example.py
|
Mishuni/Collect_Environ_Data
|
ae8aedc07ca28d0d6fbf5f68b8d574a599cbaf48
|
[
"MIT"
] | null | null | null |
host_='127.0.0.1'
port_=8086
user_='user name'
pass_ ='user password'
protocol='line'
air_api_key="air api key"
weather_api_key="weather_api_key"
| 18.25
| 33
| 0.767123
| 27
| 146
| 3.777778
| 0.592593
| 0.235294
| 0.176471
| 0.313725
| 0.313725
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074627
| 0.082192
| 146
| 8
| 33
| 18.25
| 0.686567
| 0
| 0
| 0
| 0
| 0
| 0.414966
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.142857
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
0c6ac749fceabf651c083a76bfcadcb7d51d526a
| 590
|
py
|
Python
|
sickbeard/lib/hachoir_parser/audio/__init__.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | null | null | null |
sickbeard/lib/hachoir_parser/audio/__init__.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | null | null | null |
sickbeard/lib/hachoir_parser/audio/__init__.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | null | null | null |
from lib.hachoir_parser.audio.aiff import AiffFile
from lib.hachoir_parser.audio.au import AuFile
from lib.hachoir_parser.audio.itunesdb import ITunesDBFile
from lib.hachoir_parser.audio.midi import MidiFile
from lib.hachoir_parser.audio.mpeg_audio import MpegAudioFile
from lib.hachoir_parser.audio.real_audio import RealAudioFile
from lib.hachoir_parser.audio.xm import XMModule
from lib.hachoir_parser.audio.s3m import S3MModule
from lib.hachoir_parser.audio.s3m import PTMModule
from lib.hachoir_parser.audio.mod import AmigaModule
from lib.hachoir_parser.audio.flac import FlacParser
| 45.384615
| 61
| 0.867797
| 90
| 590
| 5.544444
| 0.3
| 0.154309
| 0.308617
| 0.440882
| 0.587174
| 0.136273
| 0.136273
| 0
| 0
| 0
| 0
| 0.005505
| 0.076271
| 590
| 12
| 62
| 49.166667
| 0.910092
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0c9bfb096c85e093a24256c20749ab55e71d644d
| 1,006
|
py
|
Python
|
test/rps_test.py
|
Ekram49/ekram-lambdata-13f
|
09f85f30e08903ad5e6b322148141cc26a2a3eb9
|
[
"MIT"
] | null | null | null |
test/rps_test.py
|
Ekram49/ekram-lambdata-13f
|
09f85f30e08903ad5e6b322148141cc26a2a3eb9
|
[
"MIT"
] | null | null | null |
test/rps_test.py
|
Ekram49/ekram-lambdata-13f
|
09f85f30e08903ad5e6b322148141cc26a2a3eb9
|
[
"MIT"
] | null | null | null |
'''# from app.game import determine_winner
# FYI normally we'd have this application code (determine_winner()) in another file,
# ... but for this exercise we'll keep it here
def determine_winner(user_choice, computer_choice):
return "rock" # todo: write logic here to make the tests pass!
if user_choice == computer_choice:
return None
elif user_choice == "rock" and computer_choice == "scissors"
winner = "rock"
def test_determine_winner():
assert determine_winner("rock", "rock") == None
assert determine_winner("rock", "paper") == "paper"
assert determine_winner("rock", "scissors") == "rock"
assert determine_winner("paper", "rock") == "paper"
assert determine_winner("paper", "paper") == None
assert determine_winner("paper", "scissors") == "scissors"
assert determine_winner("scissors", "rock") == "rock"
assert determine_winner("scissors", "paper") == "scissors"
assert determine_winner("scissors", "scissors") == None
'''
| 41.916667
| 84
| 0.680915
| 121
| 1,006
| 5.495868
| 0.380165
| 0.293233
| 0.284211
| 0.112782
| 0.201504
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180915
| 1,006
| 24
| 85
| 41.916667
| 0.807039
| 0.993042
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.041667
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0c9d964e4bae7a1a5faad2c8efb0534f1cfb8df4
| 417
|
py
|
Python
|
KafNafParserPy/__init__.py
|
opener-project/KafNafParserPy
|
1b82fda4d38c0bc626388be58f7dabca27b71b54
|
[
"Apache-2.0"
] | 3
|
2017-07-06T15:14:44.000Z
|
2019-06-06T20:05:54.000Z
|
KafNafParserPy/__init__.py
|
opener-project/KafNafParserPy
|
1b82fda4d38c0bc626388be58f7dabca27b71b54
|
[
"Apache-2.0"
] | null | null | null |
KafNafParserPy/__init__.py
|
opener-project/KafNafParserPy
|
1b82fda4d38c0bc626388be58f7dabca27b71b54
|
[
"Apache-2.0"
] | null | null | null |
from KafNafParserMod import KafNafParser
from header_data import *
from external_references_data import *
from span_data import *
from term_data import *
from term_sentiment_data import *
from text_data import *
from entity_data import *
from features_data import *
from opinion_data import *
from dependency_data import *
from constituency_data import *
from references_data import *
from coreference_data import *
| 27.8
| 40
| 0.832134
| 58
| 417
| 5.724138
| 0.310345
| 0.391566
| 0.506024
| 0.144578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136691
| 417
| 14
| 41
| 29.785714
| 0.922222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0cac71217cfe4d6748784c01df197bc4c6f1247d
| 97
|
py
|
Python
|
pybam/defexceptions.py
|
benchiverton/Proteomics
|
006ac5877a5256ee60abdfff35ad81c4a1afa157
|
[
"MIT"
] | 2
|
2020-09-26T14:33:21.000Z
|
2021-01-19T19:22:54.000Z
|
pybam/defexceptions.py
|
benchiverton/Proteomics
|
006ac5877a5256ee60abdfff35ad81c4a1afa157
|
[
"MIT"
] | 2
|
2020-09-28T12:39:04.000Z
|
2022-02-13T15:02:38.000Z
|
pybam/defexceptions.py
|
benchiverton/Proteomics
|
006ac5877a5256ee60abdfff35ad81c4a1afa157
|
[
"MIT"
] | null | null | null |
# custom exceptions
class PybamWarn(Exception):
pass
class PybamError(Exception):
pass
| 12.125
| 28
| 0.731959
| 10
| 97
| 7.1
| 0.7
| 0.366197
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195876
| 97
| 7
| 29
| 13.857143
| 0.910256
| 0.175258
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
0cf8bca598e995099f2fc401f8fd1ee2652ab281
| 202
|
py
|
Python
|
06-oops-advanced/animal.py
|
mkumar-552/learn-programming-with-python-
|
cb0aa11c741019959ce3db84552a7e012486092e
|
[
"MIT"
] | 64
|
2018-05-25T01:26:31.000Z
|
2022-03-03T20:42:20.000Z
|
06-oops-advanced/animal.py
|
mkumar-552/learn-programming-with-python-
|
cb0aa11c741019959ce3db84552a7e012486092e
|
[
"MIT"
] | null | null | null |
06-oops-advanced/animal.py
|
mkumar-552/learn-programming-with-python-
|
cb0aa11c741019959ce3db84552a7e012486092e
|
[
"MIT"
] | 72
|
2018-05-24T15:04:46.000Z
|
2022-03-08T04:19:18.000Z
|
from abc import ABC, abstractmethod
class AbstractAnimal(ABC):
@abstractmethod
def bark(self): pass
class Dog(AbstractAnimal):
def bark(self):
print("Bow Bow")
print(Dog().bark())
| 18.363636
| 35
| 0.678218
| 25
| 202
| 5.48
| 0.52
| 0.248175
| 0.160584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19802
| 202
| 11
| 36
| 18.363636
| 0.845679
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.125
| 0.125
| 0
| 0.625
| 0.25
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
0b3fecbd1873b235abe1fc017294d7cf07a596b9
| 23
|
py
|
Python
|
webbreaker/__init__.py
|
one3chens/webbreaker
|
2e3e661122bc82f0bc8c9bb105affe700d859035
|
[
"MIT"
] | null | null | null |
webbreaker/__init__.py
|
one3chens/webbreaker
|
2e3e661122bc82f0bc8c9bb105affe700d859035
|
[
"MIT"
] | null | null | null |
webbreaker/__init__.py
|
one3chens/webbreaker
|
2e3e661122bc82f0bc8c9bb105affe700d859035
|
[
"MIT"
] | null | null | null |
__version__ = '1.99.28'
| 23
| 23
| 0.695652
| 4
| 23
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.238095
| 0.086957
| 23
| 1
| 23
| 23
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0b82644666b28629b33bf5bf39310de702ca814e
| 3,141
|
py
|
Python
|
tests/test_find_missing_element.py
|
ssichynskyi/algorithms_data_structures
|
59af17529e2d3849ff00d15d2998c5d67f823f12
|
[
"MIT"
] | null | null | null |
tests/test_find_missing_element.py
|
ssichynskyi/algorithms_data_structures
|
59af17529e2d3849ff00d15d2998c5d67f823f12
|
[
"MIT"
] | null | null | null |
tests/test_find_missing_element.py
|
ssichynskyi/algorithms_data_structures
|
59af17529e2d3849ff00d15d2998c5d67f823f12
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from find_missing_element import (
find_missing_element_with_summing,
find_missing_element_with_xor,
find_missing_element_with_sort,
find_missing_element_with_hash
)
class Test(TestCase):
def setUp(self):
self.test_set_pos_ints = {
1: ([1], []),
8: ([1, 3, 5, 7, 9, 0, 2, 4, 6, 8], [2, 9, 6, 3, 4, 5, 1, 0, 7]),
0: ([1, 3, 5, 7, 9, 0, 2, 4, 6, 8], [2, 9, 6, 3, 4, 5, 1, 7, 8])
}
self.test_set_neg_ints = {
8: ([1, -3, 5, -7, 9, 0, -2, 4, 6, 8], [-2, 9, 6, -3, 4, 5, 1, 0, -7]),
0: ([1, -3, 5, -7, 9, 0, -2, 4, 6, 8], [-2, 9, 6, -3, 4, 5, 1, -7, 8]),
-3: ([1, -3, 5, -7, 9, 0, -2, 4, 6, 8], [-2, 9, 6, 4, 5, 1, -7, 8, 0])
}
self.test_set_floats = {
1.1: ([1.1, 2.5], [2.5]),
0.0: ([-1.6, 0.0, 5.4], [-1.6, 5.4]),
5.4: ([-1.6, 0.0, 5.4], [0.0, -1.6])
}
self.test_set_chars = {
'z': (['a', 'z', '0'], ['0', 'a'])
}
# keys below have no value, implemented for data conformance
self.test_raises_value_error = {
'x': ([], []),
'y': ([0], [0])
}
def test_find_missing_element_with_summing(self):
self.generic_test(self.test_set_pos_ints, find_missing_element_with_summing)
self.generic_test(self.test_set_neg_ints, find_missing_element_with_summing)
self.generic_test(self.test_set_floats, find_missing_element_with_summing)
self.generic_negative_test(self.test_raises_value_error, find_missing_element_with_summing)
def test_find_missing_element_with_sort(self):
self.generic_test(self.test_set_pos_ints, find_missing_element_with_sort)
self.generic_test(self.test_set_neg_ints, find_missing_element_with_sort)
self.generic_test(self.test_set_floats, find_missing_element_with_sort)
self.generic_test(self.test_set_chars, find_missing_element_with_sort)
def test_find_missing_element_with_hash(self):
self.generic_test(self.test_set_pos_ints, find_missing_element_with_hash)
self.generic_test(self.test_set_neg_ints, find_missing_element_with_hash)
self.generic_test(self.test_set_floats, find_missing_element_with_hash)
self.generic_test(self.test_set_chars, find_missing_element_with_hash)
self.generic_negative_test(self.test_raises_value_error, find_missing_element_with_hash)
def test_find_missing_element_with_xor(self):
self.generic_test(self.test_set_pos_ints, find_missing_element_with_xor)
self.generic_test(self.test_set_neg_ints, find_missing_element_with_xor)
self.generic_negative_test(self.test_raises_value_error, find_missing_element_with_xor)
def generic_test(self, test_set: dict, func) -> None:
for answer in test_set.keys():
self.assertEqual(answer, func(*test_set[answer]))
def generic_negative_test(self, test_set: dict, func) -> None:
for answer in test_set.keys():
with self.assertRaises(ValueError):
func(*test_set[answer])
| 45.521739
| 99
| 0.639924
| 495
| 3,141
| 3.684848
| 0.117172
| 0.150768
| 0.246711
| 0.289474
| 0.841009
| 0.725329
| 0.645285
| 0.639254
| 0.635965
| 0.635965
| 0
| 0.056814
| 0.226679
| 3,141
| 68
| 100
| 46.191176
| 0.694113
| 0.018465
| 0
| 0.034483
| 0
| 0
| 0.002597
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 1
| 0.12069
| false
| 0
| 0.034483
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0bb0e746f0b8d744af39b59be041c5ff327d88d6
| 193
|
py
|
Python
|
myMath/arithmatic.py
|
BothellRobotics/PythonClass
|
19adcd53d1c213003a73ff73b9dadc063b19b898
|
[
"MIT"
] | null | null | null |
myMath/arithmatic.py
|
BothellRobotics/PythonClass
|
19adcd53d1c213003a73ff73b9dadc063b19b898
|
[
"MIT"
] | null | null | null |
myMath/arithmatic.py
|
BothellRobotics/PythonClass
|
19adcd53d1c213003a73ff73b9dadc063b19b898
|
[
"MIT"
] | 6
|
2019-02-22T03:50:58.000Z
|
2019-04-05T02:47:45.000Z
|
class Arithmatic:
#This function does adding to integer
def Add(self, x, y):
return x + y
#This fnction does su
def Subtraction(self, x, y):
return x - y
| 12.866667
| 41
| 0.57513
| 28
| 193
| 3.964286
| 0.607143
| 0.072072
| 0.108108
| 0.216216
| 0.252252
| 0.252252
| 0
| 0
| 0
| 0
| 0
| 0
| 0.34715
| 193
| 14
| 42
| 13.785714
| 0.880952
| 0.290155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
0bc0a23f388f60d20cd0142eb338a1ab4b5127d0
| 329
|
py
|
Python
|
server/bridge/pos/sentence_count_question.py
|
jainkuniya/Question-Generator
|
a13279076367e4c61a416d3c663016e525e4a0a6
|
[
"Apache-2.0"
] | null | null | null |
server/bridge/pos/sentence_count_question.py
|
jainkuniya/Question-Generator
|
a13279076367e4c61a416d3c663016e525e4a0a6
|
[
"Apache-2.0"
] | null | null | null |
server/bridge/pos/sentence_count_question.py
|
jainkuniya/Question-Generator
|
a13279076367e4c61a416d3c663016e525e4a0a6
|
[
"Apache-2.0"
] | null | null | null |
from bridge.template.one_word_answer import generate_one_word_question
def get_sentence_count_qustion(sentence_count, period_count):
if (sentence_count != period_count and sentence_count <=5):
return generate_one_word_question('How many sentences are there in this paragraph?', sentence_count)
return None
| 47
| 114
| 0.793313
| 46
| 329
| 5.304348
| 0.608696
| 0.266393
| 0.122951
| 0.188525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003571
| 0.148936
| 329
| 6
| 115
| 54.833333
| 0.867857
| 0
| 0
| 0
| 1
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
e7f03d4f8a14d392e0e9ded7e10dae6ec5a25eee
| 110
|
py
|
Python
|
samples/src/main/resources/datasets/python/153.py
|
sritchie/kotlingrad
|
8165ed1cd77220a5347c58cded4c6f2bcf22ee30
|
[
"Apache-2.0"
] | 11
|
2020-12-19T01:19:44.000Z
|
2021-12-25T20:43:33.000Z
|
src/main/resources/datasets/python/153.py
|
breandan/katholic
|
081c39f3acc73ff41f5865563debe78a36e1038f
|
[
"Apache-2.0"
] | null | null | null |
src/main/resources/datasets/python/153.py
|
breandan/katholic
|
081c39f3acc73ff41f5865563debe78a36e1038f
|
[
"Apache-2.0"
] | 2
|
2021-01-25T07:59:20.000Z
|
2021-08-07T07:13:49.000Z
|
if __name__ == '__main__':
if (1!=1) : print("IF")
else : print("InnerElse")
else: print("OuterElse")
| 22
| 29
| 0.590909
| 14
| 110
| 4.071429
| 0.571429
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022472
| 0.190909
| 110
| 4
| 30
| 27.5
| 0.617978
| 0
| 0
| 0
| 0
| 0
| 0.254545
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.75
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
f02bb824b690b266a66a98e97e4662082baaf306
| 221
|
py
|
Python
|
MeliusSoft/credit/admin.py
|
AntonBalmakov/Meliys
|
47c13fb8059d52b874fe98ffcbf8653690b95de9
|
[
"MIT"
] | null | null | null |
MeliusSoft/credit/admin.py
|
AntonBalmakov/Meliys
|
47c13fb8059d52b874fe98ffcbf8653690b95de9
|
[
"MIT"
] | null | null | null |
MeliusSoft/credit/admin.py
|
AntonBalmakov/Meliys
|
47c13fb8059d52b874fe98ffcbf8653690b95de9
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import CreditBit, Contract, Product, Manufacturer
admin.site.register(CreditBit)
admin.site.register(Contract)
admin.site.register(Product)
admin.site.register(Manufacturer)
| 27.625
| 62
| 0.832579
| 28
| 221
| 6.571429
| 0.428571
| 0.195652
| 0.369565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072398
| 221
| 7
| 63
| 31.571429
| 0.897561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
f04d367505bd7c78ab3d8dafe7740355264967be
| 150
|
py
|
Python
|
Exercise_3_4.py
|
kushrami/Python-Crash-Course-book-Excersice
|
7093181940a90d9f4bab5775ef56f57963450393
|
[
"Apache-2.0"
] | null | null | null |
Exercise_3_4.py
|
kushrami/Python-Crash-Course-book-Excersice
|
7093181940a90d9f4bab5775ef56f57963450393
|
[
"Apache-2.0"
] | null | null | null |
Exercise_3_4.py
|
kushrami/Python-Crash-Course-book-Excersice
|
7093181940a90d9f4bab5775ef56f57963450393
|
[
"Apache-2.0"
] | null | null | null |
#GuestList:
names = ['tony','steve','thor']
message = ", You are invited!"
print(names[0]+message)
print(names[1]+message)
print(names[2]+message)
| 15
| 31
| 0.673333
| 21
| 150
| 4.809524
| 0.619048
| 0.29703
| 0.336634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022388
| 0.106667
| 150
| 9
| 32
| 16.666667
| 0.731343
| 0.066667
| 0
| 0
| 0
| 0
| 0.223022
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
f07f0f49ddcd519ea1fe173a43445f352b54106d
| 36
|
py
|
Python
|
concurrency/__init__.py
|
heyglen/Concurrency
|
b09050884b2b7373f787a5718adbf83d46aad6aa
|
[
"MIT"
] | null | null | null |
concurrency/__init__.py
|
heyglen/Concurrency
|
b09050884b2b7373f787a5718adbf83d46aad6aa
|
[
"MIT"
] | null | null | null |
concurrency/__init__.py
|
heyglen/Concurrency
|
b09050884b2b7373f787a5718adbf83d46aad6aa
|
[
"MIT"
] | null | null | null |
from concurrency import Concurrency
| 18
| 35
| 0.888889
| 4
| 36
| 8
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b2c874c60f56f706292f04e3ded7ec5b5def5221
| 22
|
py
|
Python
|
Inclass/python/lec8.py
|
chenchuw/EC602-Design-by-Software
|
c233c9d08a67abc47235282fedd866d67ccaf4ce
|
[
"MIT"
] | null | null | null |
Inclass/python/lec8.py
|
chenchuw/EC602-Design-by-Software
|
c233c9d08a67abc47235282fedd866d67ccaf4ce
|
[
"MIT"
] | null | null | null |
Inclass/python/lec8.py
|
chenchuw/EC602-Design-by-Software
|
c233c9d08a67abc47235282fedd866d67ccaf4ce
|
[
"MIT"
] | 1
|
2022-01-11T20:23:47.000Z
|
2022-01-11T20:23:47.000Z
|
# Oct 6th. Chuwei Chen
| 22
| 22
| 0.727273
| 4
| 22
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0.181818
| 22
| 1
| 22
| 22
| 0.833333
| 0.909091
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b2cf6d6b365c60584f89b6e2b331657224c3056d
| 211
|
py
|
Python
|
tests/.tests/.test2.py
|
greyblue9/justuse
|
44a5fe2192aa64aac43ed4886979d9d0cbb9fb87
|
[
"MIT"
] | null | null | null |
tests/.tests/.test2.py
|
greyblue9/justuse
|
44a5fe2192aa64aac43ed4886979d9d0cbb9fb87
|
[
"MIT"
] | 1
|
2021-09-11T15:33:40.000Z
|
2021-09-11T22:59:58.000Z
|
tests/.tests/.test2.py
|
thirteenpylons/justuse
|
02ad8811bd3c993b72cdc7acbf31d62d8accaf52
|
[
"MIT"
] | null | null | null |
from unittest.mock import patch
import webbrowser
def test():
with patch("webbrowser.open"):
return foobar() == 43
def foobar():
webbrowser.open("google.com")
return 43
print(test())
| 15.071429
| 34
| 0.64455
| 26
| 211
| 5.230769
| 0.615385
| 0.205882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02454
| 0.227488
| 211
| 14
| 35
| 15.071429
| 0.809816
| 0
| 0
| 0
| 0
| 0
| 0.117925
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| true
| 0
| 0.222222
| 0
| 0.666667
| 0.111111
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
b2cf8cdb00964af76860b61ef70052b1cb6e28b6
| 74
|
py
|
Python
|
src/searchset/__init__.py
|
makarchuk/django_lucy
|
ea9eed4e7a1d4572b85c879f5bf43028f999dac7
|
[
"MIT"
] | 5
|
2019-09-02T12:15:17.000Z
|
2020-03-07T12:54:36.000Z
|
src/searchset/__init__.py
|
makarchuk/django_lucy
|
ea9eed4e7a1d4572b85c879f5bf43028f999dac7
|
[
"MIT"
] | 19
|
2019-08-12T12:00:13.000Z
|
2019-11-11T14:14:38.000Z
|
src/searchset/__init__.py
|
makarchuk/django_lucy
|
ea9eed4e7a1d4572b85c879f5bf43028f999dac7
|
[
"MIT"
] | 3
|
2019-10-24T16:45:27.000Z
|
2020-03-05T09:48:45.000Z
|
from .django import DjangoSearchSet
from .elastic import ElasticSearchSet
| 24.666667
| 37
| 0.864865
| 8
| 74
| 8
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 74
| 2
| 38
| 37
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b2f4402cb23d8d277c47fb66521c8af3e5049582
| 183
|
py
|
Python
|
asab/web/tenant/__init__.py
|
TeskaLabs/asab
|
f28894b62bad192d8d30df01a8ad1b842ee2a2fb
|
[
"BSD-3-Clause"
] | 23
|
2018-03-07T18:58:13.000Z
|
2022-03-29T17:11:47.000Z
|
asab/web/tenant/__init__.py
|
TeskaLabs/asab
|
f28894b62bad192d8d30df01a8ad1b842ee2a2fb
|
[
"BSD-3-Clause"
] | 87
|
2018-04-04T19:44:13.000Z
|
2022-03-31T11:18:00.000Z
|
asab/web/tenant/__init__.py
|
TeskaLabs/asab
|
f28894b62bad192d8d30df01a8ad1b842ee2a2fb
|
[
"BSD-3-Clause"
] | 10
|
2018-04-30T16:40:25.000Z
|
2022-03-09T10:55:24.000Z
|
from .service import TenantService
from .midleware import tenant_middleware_factory, tenant_handler
__all__ = (
'TenantService',
'tenant_middleware_factory',
'tenant_handler',
)
| 18.3
| 64
| 0.803279
| 19
| 183
| 7.210526
| 0.526316
| 0.233577
| 0.335766
| 0.423358
| 0.525547
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114754
| 183
| 9
| 65
| 20.333333
| 0.845679
| 0
| 0
| 0
| 0
| 0
| 0.284153
| 0.136612
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
650942b1f06a56e77a4a297694847c93ad310bbd
| 37
|
py
|
Python
|
backblazeb2/__init__.py
|
acidburn23/backblaze-b2
|
c23ac39c5099f7b26728c50784a086f9871ccf8b
|
[
"MIT"
] | 39
|
2015-12-18T19:34:47.000Z
|
2022-02-19T20:50:23.000Z
|
backblazeb2/__init__.py
|
acidburn23/backblaze-b2
|
c23ac39c5099f7b26728c50784a086f9871ccf8b
|
[
"MIT"
] | 5
|
2016-02-03T00:37:47.000Z
|
2018-08-20T18:10:24.000Z
|
backblazeb2/__init__.py
|
acidburn23/backblaze-b2
|
c23ac39c5099f7b26728c50784a086f9871ccf8b
|
[
"MIT"
] | 32
|
2015-12-30T12:30:27.000Z
|
2021-03-01T04:24:12.000Z
|
from .backblazeb2 import BackBlazeB2
| 18.5
| 36
| 0.864865
| 4
| 37
| 8
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 0.108108
| 37
| 1
| 37
| 37
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
653a877c3414c203e396fce795ddb8722882407e
| 8,214
|
py
|
Python
|
activity_recognition/visualization.py
|
linw7/Activity-Recognition
|
f76a327268c48f6e3cbe5ff25576f49d8c4927cf
|
[
"MIT"
] | 49
|
2017-12-08T07:24:43.000Z
|
2022-03-18T08:16:56.000Z
|
activity_recognition/visualization.py
|
linw7/Activity-Recognition
|
f76a327268c48f6e3cbe5ff25576f49d8c4927cf
|
[
"MIT"
] | 1
|
2019-02-17T14:33:21.000Z
|
2019-02-17T14:33:21.000Z
|
activity_recognition/visualization.py
|
linw7/Activity-Recognition
|
f76a327268c48f6e3cbe5ff25576f49d8c4927cf
|
[
"MIT"
] | 17
|
2018-04-10T08:45:45.000Z
|
2021-11-12T12:29:02.000Z
|
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection import ShuffleSplit
from sklearn.grid_search import GridSearchCV
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.font_manager as fm
myfont = fm.FontProperties(fname='/home/tk/Desktop/msyh.ttf')
def plot_learning_curve_default(X, y, clf):
plt.title("Learning Curve")
plt.xlabel("Training Instance")
plt.ylabel("Score")
part = ShuffleSplit(n_splits=5, test_size=0.2, random_state=0)
train_sizes, train_scores, test_scores = learning_curve(clf, X, y, cv=part, n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, "o-", color="r", label="Training score")
plt.plot(train_sizes, test_scores_mean, "o-", color="g", label="Cross-validation score")
plt.legend(loc="best")
plt.show()
plt.close()
def plot_learning_curve_cv(X, y, clf, cv):
plt.title("Learning Curve")
plt.xlabel("Training Instance")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(clf, X, y, cv=cv, n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, "o-",
color="r", label="Training score")
plt.plot(train_sizes, test_scores_mean, "o-",
color="g", label="Cross-validation score")
plt.legend(loc="best")
plt.show()
plt.close()
def plot_paramter_curve_default(X, y, clf, param_name, param_range):
plt.title('Validation Curve')
plt.xlabel('Max_Depth')
plt.ylabel('Score')
part = ShuffleSplit(n_splits=5, test_size=0.2, random_state=0)
train_scores, test_scores = validation_curve(clf, X, y, param_name=param_name, param_range=param_range, cv=part, n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.fill_between(p_range, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r")
plt.fill_between(p_range, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(p_range, train_scores_mean, "o-",
color="r", label='Training Score')
plt.plot(p_range, test_scores_mean, "o-",
color="g", label='Cross-Validation Score')
plt.legend(loc='best')
plt.show()
plt.close()
def plot_paramter_curve_cv(X, y, clf, cv, param_name, param_range):
plt.title('Validation Curve')
plt.xlabel('Max_Depth')
plt.ylabel('Score')
train_scores, test_scores = validation_curve(
clf, X, y, param_name=param_name, param_range=p_range, cv=cv, n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.fill_between(p_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color="r")
plt.fill_between(p_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(p_range, train_scores_mean, "o-",
color="r", label='Training Score')
plt.plot(p_range, test_scores_mean, "o-",
color="g", label='Cross-Validation Score')
plt.legend(loc='best')
plt.show()
plt.close()
def plot_confusion_matrix(confusion_matrix):
plt.title("Confusion Matrix")
plt.ylabel('True label')
plt.xlabel('Predicted label')
LABELS = ["Sitting", "Standing", "Upstairs", "Doenstairs", "Walking", "Jogging"]
normalised_confusion_matrix = np.array(confusion_matrix, dtype=np.float32) / np.sum(confusion_matrix) * 100
plt.imshow(normalised_confusion_matrix, interpolation='nearest', cmap=plt.cm.rainbow)
plt.colorbar()
tick_marks = np.arange(6)
plt.xticks(tick_marks, LABELS, rotation=90)
plt.yticks(tick_marks, LABELS)
plt.show()
plt.close()
def plot_gridsearch(clf, X, y):
plt.title("Parameters Matrix")
plt.xlabel('Max_Depth')
plt.ylabel('Min_Samples_Leaf')
depth_range = np.linspace(1, 5, 5)
min_leaf_range = np.linspace(0.01, 0.05, 5)
param = dict(max_depth=depth_range, min_samples_leaf=min_leaf_range)
grid_search = GridSearchCV(clf, param_grid=param, cv=2, scoring='accuracy')
grid_search.fit(X, y)
scores = [x[1] for x in grid_search.grid_scores_]
scores = np.array(scores).reshape(len(depth_range), len(min_leaf_range))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.rainbow)
plt.colorbar()
plt.xticks(np.arange(len(depth_range)), depth_range, rotation=45)
plt.yticks(np.arange(len(min_leaf_range)), min_leaf_range)
plt.show()
plt.close()
def plot_learning_curve_cv_compare(X, y, X_S, y_S, clf, cv):
plt.subplot(121)
plt.title("未均衡处理前决策树学习曲线", fontproperties=myfont, fontsize=12)
plt.xlabel("训练样例数", fontproperties=myfont, fontsize=12)
plt.ylabel("模型精度", fontproperties=myfont, fontsize=12)
train_sizes, train_scores, test_scores = learning_curve(
clf, X, y, cv=cv, n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, "o-",
color="r", label="训练集精度")
plt.plot(train_sizes, test_scores_mean, "o-",
color="g", label="十折交叉精度")
plt.legend(loc="best", prop=myfont, fontsize=12)
plt.subplot(122)
plt.title("均衡处理后决策树学习曲线", fontproperties=myfont, fontsize=12)
plt.xlabel("训练样例数", fontproperties=myfont, fontsize=12)
plt.ylabel("模型精度", fontproperties=myfont, fontsize=12)
train_sizes, train_scores, test_scores = learning_curve(
clf, X_S, y_S, cv=cv, n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, "o-",
color="r", label="训练集精度")
plt.plot(train_sizes, test_scores_mean, "o-",
color="g", label="十折交叉精度")
plt.legend(loc="best", prop=myfont, fontsize=12)
plt.show()
plt.close()
| 38.745283
| 131
| 0.697224
| 1,240
| 8,214
| 4.341129
| 0.124194
| 0.122608
| 0.066877
| 0.046814
| 0.777819
| 0.766673
| 0.741408
| 0.741408
| 0.723946
| 0.714286
| 0
| 0.017102
| 0.174215
| 8,214
| 211
| 132
| 38.92891
| 0.7765
| 0
| 0
| 0.622642
| 0
| 0
| 0.071829
| 0.003044
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044025
| false
| 0
| 0.056604
| 0
| 0.100629
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
33381f3fd9b9178c88e3f503b4660a2596953f19
| 193
|
py
|
Python
|
registraion_and_login/home/views.py
|
n3k0fi5t/Django_Tutorial
|
e3953335ca88fe22c68268fd76afb7c4f9bbb55f
|
[
"MIT"
] | null | null | null |
registraion_and_login/home/views.py
|
n3k0fi5t/Django_Tutorial
|
e3953335ca88fe22c68268fd76afb7c4f9bbb55f
|
[
"MIT"
] | null | null | null |
registraion_and_login/home/views.py
|
n3k0fi5t/Django_Tutorial
|
e3953335ca88fe22c68268fd76afb7c4f9bbb55f
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from django.views import View
# Create your views here.
class Home(View):
def get(self, request):
return render(request, 'home.html')
| 24.125
| 45
| 0.725389
| 27
| 193
| 5.185185
| 0.703704
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181347
| 193
| 7
| 46
| 27.571429
| 0.886076
| 0.119171
| 0
| 0
| 0
| 0
| 0.053571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
334063f658716fd608f319805d5064f01bd6cdc2
| 157
|
py
|
Python
|
tasks/custom_fields.py
|
sh4rpy/todo_api
|
6395da91099c2c6b46acb850bc30880db7c6800d
|
[
"BSD-3-Clause"
] | null | null | null |
tasks/custom_fields.py
|
sh4rpy/todo_api
|
6395da91099c2c6b46acb850bc30880db7c6800d
|
[
"BSD-3-Clause"
] | null | null | null |
tasks/custom_fields.py
|
sh4rpy/todo_api
|
6395da91099c2c6b46acb850bc30880db7c6800d
|
[
"BSD-3-Clause"
] | null | null | null |
from rest_framework import fields
class CustomChoiceField(fields.ChoiceField):
def to_representation(self, value):
return self._choices[value]
| 22.428571
| 44
| 0.770701
| 18
| 157
| 6.555556
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159236
| 157
| 6
| 45
| 26.166667
| 0.893939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
335c9b7b72bc5d95d0af505d11f3a4b9b1e2e7c1
| 6,005
|
py
|
Python
|
test/neo4j_graphdatabaseservice_match.py
|
fabsx00/py2neo
|
80f6605499ee4cec4b338f15453e8f509a09468a
|
[
"Apache-2.0"
] | null | null | null |
test/neo4j_graphdatabaseservice_match.py
|
fabsx00/py2neo
|
80f6605499ee4cec4b338f15453e8f509a09468a
|
[
"Apache-2.0"
] | null | null | null |
test/neo4j_graphdatabaseservice_match.py
|
fabsx00/py2neo
|
80f6605499ee4cec4b338f15453e8f509a09468a
|
[
"Apache-2.0"
] | 1
|
2021-10-08T03:41:54.000Z
|
2021-10-08T03:41:54.000Z
|
#/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from py2neo import neo4j
class MatchTestCase(unittest.TestCase):
def setUp(self):
self.graph_db = neo4j.GraphDatabaseService()
self.graph_db.clear()
stuff = self.graph_db.create(
{"name": "Alice"},
{"name": "Bob"},
{"name": "Carol"},
(0, "LOVES", 1),
(1, "LOVES", 0),
(0, "KNOWS", 1),
(1, "KNOWS", 0),
(1, "KNOWS", 2),
(2, "KNOWS", 1),
)
self.alice, self.bob, self.carol = stuff[0:3]
def test_can_match_all(self):
rels = self.graph_db.match()
assert len(rels) == 6
def test_will_return_empty_list_on_no_match(self):
rels = self.graph_db.match(rel_type="HATES")
assert len(rels) == 0
def test_can_match_start_node(self):
rels = self.graph_db.match(start_node=self.alice)
assert len(rels) == 2
assert "KNOWS" in [rel.type for rel in rels]
assert "LOVES" in [rel.type for rel in rels]
assert self.bob in [rel.end_node for rel in rels]
def test_can_match_type_only(self):
rels = self.graph_db.match(rel_type="LOVES")
assert len(rels) == 2
assert self.alice in [rel.start_node for rel in rels]
assert self.bob in [rel.start_node for rel in rels]
assert self.alice in [rel.end_node for rel in rels]
assert self.bob in [rel.end_node for rel in rels]
def test_can_match_start_node_and_type(self):
rels = self.graph_db.match(start_node=self.alice, rel_type="KNOWS")
assert len(rels) == 1
assert self.bob in [rel.end_node for rel in rels]
def test_can_match_start_node_and_end_node(self):
rels = self.graph_db.match(start_node=self.alice, end_node=self.bob)
assert len(rels) == 2
assert "KNOWS" in [rel.type for rel in rels]
assert "LOVES" in [rel.type for rel in rels]
def test_can_match_type_and_end_node(self):
rels = self.graph_db.match(rel_type="KNOWS", end_node=self.bob)
assert len(rels) == 2
assert self.alice in [rel.start_node for rel in rels]
assert self.carol in [rel.start_node for rel in rels]
def test_can_bidi_match_start_node(self):
rels = self.graph_db.match(start_node=self.bob, bidirectional=True)
assert len(rels) == 6
assert "KNOWS" in [rel.type for rel in rels]
assert "LOVES" in [rel.type for rel in rels]
assert self.alice in [rel.start_node for rel in rels]
assert self.bob in [rel.start_node for rel in rels]
assert self.carol in [rel.start_node for rel in rels]
assert self.alice in [rel.end_node for rel in rels]
assert self.bob in [rel.end_node for rel in rels]
assert self.carol in [rel.end_node for rel in rels]
def test_can_bidi_match_start_node_and_type(self):
rels = self.graph_db.match(start_node=self.bob, rel_type="KNOWS", bidirectional=True)
assert len(rels) == 4
assert self.alice in [rel.start_node for rel in rels]
assert self.bob in [rel.start_node for rel in rels]
assert self.carol in [rel.start_node for rel in rels]
assert self.alice in [rel.end_node for rel in rels]
assert self.bob in [rel.end_node for rel in rels]
assert self.carol in [rel.end_node for rel in rels]
def test_can_bidi_match_start_node_and_end_node(self):
rels = self.graph_db.match(start_node=self.alice, end_node=self.bob, bidirectional=True)
assert len(rels) == 4
assert "KNOWS" in [rel.type for rel in rels]
assert "LOVES" in [rel.type for rel in rels]
assert self.alice in [rel.start_node for rel in rels]
assert self.bob in [rel.start_node for rel in rels]
assert self.alice in [rel.end_node for rel in rels]
assert self.bob in [rel.end_node for rel in rels]
def test_can_bidi_match_type_and_end_node(self):
rels = self.graph_db.match(rel_type="KNOWS", end_node=self.bob, bidirectional=True)
assert len(rels) == 4
assert self.alice in [rel.start_node for rel in rels]
assert self.carol in [rel.start_node for rel in rels]
assert self.alice in [rel.start_node for rel in rels]
assert self.bob in [rel.start_node for rel in rels]
assert self.carol in [rel.start_node for rel in rels]
assert self.alice in [rel.end_node for rel in rels]
assert self.bob in [rel.end_node for rel in rels]
assert self.carol in [rel.end_node for rel in rels]
def test_can_match_with_limit(self):
rels = self.graph_db.match(limit=3)
assert len(rels) == 3
def test_can_match_one(self):
rel = self.graph_db.match_one()
assert isinstance(rel, neo4j.Relationship)
def test_can_match_none(self):
rels = self.graph_db.match(rel_type="HATES", limit=1)
assert len(rels) == 0
def test_can_match_multiple_types(self):
rels = self.graph_db.match(rel_type=("LOVES", "KNOWS"))
assert len(rels) == 6
def test_can_match_start_node_and_multiple_types(self):
rels = self.graph_db.match(start_node=self.alice, rel_type=("LOVES",
"KNOWS"))
assert len(rels) == 2
if __name__ == '__main__':
unittest.main()
| 40.033333
| 96
| 0.64746
| 951
| 6,005
| 3.914827
| 0.135647
| 0.05372
| 0.085952
| 0.128928
| 0.75047
| 0.741875
| 0.723073
| 0.704808
| 0.67553
| 0.633092
| 0
| 0.010712
| 0.253789
| 6,005
| 149
| 97
| 40.302013
| 0.820129
| 0.099251
| 0
| 0.486239
| 0
| 0
| 0.029297
| 0
| 0
| 0
| 0
| 0
| 0.513761
| 1
| 0.155963
| false
| 0
| 0.018349
| 0
| 0.183486
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
684294df45321952340bc52f17c2d1eae0ca8384
| 17
|
py
|
Python
|
tests/PyroTests/__init__.py
|
gabehack/Pyro4
|
88f88bf1ccdfaff8c2bbbda8fc032a145d07d44b
|
[
"MIT"
] | 638
|
2015-01-04T14:59:55.000Z
|
2022-03-29T02:28:39.000Z
|
tests/PyroTests/__init__.py
|
gabehack/Pyro4
|
88f88bf1ccdfaff8c2bbbda8fc032a145d07d44b
|
[
"MIT"
] | 173
|
2015-01-05T17:29:19.000Z
|
2021-12-25T01:47:07.000Z
|
tests/PyroTests/__init__.py
|
gabehack/Pyro4
|
88f88bf1ccdfaff8c2bbbda8fc032a145d07d44b
|
[
"MIT"
] | 103
|
2015-01-10T10:00:08.000Z
|
2022-03-06T14:19:20.000Z
|
# just a package
| 8.5
| 16
| 0.705882
| 3
| 17
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 17
| 1
| 17
| 17
| 0.923077
| 0.823529
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6878d123701cd7cc382ad174061d03e2a02c6fc6
| 212
|
py
|
Python
|
potemkin/__init__.py
|
stelligent/potemkin-decorator
|
2d30cf07a580f5aad67d7d595e3bcd622bc0e232
|
[
"MIT"
] | 9
|
2020-03-25T02:20:54.000Z
|
2021-12-29T08:09:17.000Z
|
potemkin/__init__.py
|
stelligent/potemkin-decorator
|
2d30cf07a580f5aad67d7d595e3bcd622bc0e232
|
[
"MIT"
] | 12
|
2020-03-24T17:42:45.000Z
|
2020-05-08T21:46:59.000Z
|
potemkin/__init__.py
|
stelligent/potemkin-decorator
|
2d30cf07a580f5aad67d7d595e3bcd622bc0e232
|
[
"MIT"
] | 1
|
2020-08-25T13:47:30.000Z
|
2020-08-25T13:47:30.000Z
|
"""
Potemkin decorator to allow using cloudformation for initial conditions in boto integration testing
"""
from .cloudformationstack import CloudFormationStack
from .terraformresources import TerraformResources
| 35.333333
| 99
| 0.853774
| 21
| 212
| 8.619048
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108491
| 212
| 5
| 100
| 42.4
| 0.957672
| 0.466981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d79f8fdcba43f2728c90f120f4678d0c779cd4b6
| 161
|
py
|
Python
|
Cable/VoiceLeader.py
|
jameswenzel/Cable-
|
c8876bfd6c0272d0e03cbd41c9d3f278ada9884f
|
[
"MIT"
] | 1
|
2019-07-30T10:25:42.000Z
|
2019-07-30T10:25:42.000Z
|
Cable/VoiceLeader.py
|
jameswenzel/Cable
|
c8876bfd6c0272d0e03cbd41c9d3f278ada9884f
|
[
"MIT"
] | 2
|
2018-04-22T05:25:20.000Z
|
2018-04-24T17:31:53.000Z
|
Cable/VoiceLeader.py
|
jameswenzel/Cable
|
c8876bfd6c0272d0e03cbd41c9d3f278ada9884f
|
[
"MIT"
] | null | null | null |
from .Cable import Cable, Chord
def lead(cable, *chords):
chord_fingerings = list(map(list, map(cable.generate, chords)))
print(len(chord_fingerings))
| 23
| 67
| 0.720497
| 22
| 161
| 5.181818
| 0.590909
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149068
| 161
| 6
| 68
| 26.833333
| 0.832117
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0.25
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d7d43af2582a9808759e7cc338f6140684cb96a4
| 225
|
py
|
Python
|
Tests/test_class.py
|
AlexWaygood/Pyjion
|
974bd3cf434fad23fbfa1ea9acf43e3387a5c21f
|
[
"MIT"
] | null | null | null |
Tests/test_class.py
|
AlexWaygood/Pyjion
|
974bd3cf434fad23fbfa1ea9acf43e3387a5c21f
|
[
"MIT"
] | null | null | null |
Tests/test_class.py
|
AlexWaygood/Pyjion
|
974bd3cf434fad23fbfa1ea9acf43e3387a5c21f
|
[
"MIT"
] | null | null | null |
def test_add():
class Number:
def __add__(self, other):
return 4 + other
def __radd__(self, other):
return other + 4
a = Number()
assert 3 + a == 7
assert a + 3 == 7
| 17.307692
| 34
| 0.488889
| 29
| 225
| 3.482759
| 0.482759
| 0.178218
| 0.29703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045113
| 0.408889
| 225
| 12
| 35
| 18.75
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.333333
| false
| 0
| 0
| 0.222222
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
d7eb83ba95f2f0229ae5aa62c5fdf20dcca45e02
| 123
|
py
|
Python
|
groups/admin.py
|
weareua/MarkIT
|
ef45c2814785911ca5cc2e5dc4ff7beea2f49a7f
|
[
"MIT"
] | null | null | null |
groups/admin.py
|
weareua/MarkIT
|
ef45c2814785911ca5cc2e5dc4ff7beea2f49a7f
|
[
"MIT"
] | null | null | null |
groups/admin.py
|
weareua/MarkIT
|
ef45c2814785911ca5cc2e5dc4ff7beea2f49a7f
|
[
"MIT"
] | 1
|
2019-03-11T14:55:03.000Z
|
2019-03-11T14:55:03.000Z
|
from django.contrib import admin
from groups.models import Group
# Register your models here.
admin.site.register(Group)
| 17.571429
| 32
| 0.804878
| 18
| 123
| 5.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130081
| 123
| 6
| 33
| 20.5
| 0.925234
| 0.211382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d7fdf158c99c9320e3d06e459cd913fc8587a735
| 45
|
py
|
Python
|
restwice/rest_exception.py
|
zugaldia/restwice
|
9f5fe20adf13a6af0cdb158bc414fd2391f2c51c
|
[
"MIT"
] | 7
|
2017-03-07T17:43:20.000Z
|
2021-02-11T08:36:51.000Z
|
restwice/rest_exception.py
|
worldbank/cv4ag
|
012bbd3024b4a069ed5d0a1c6c12354fbca4aa66
|
[
"MIT"
] | 6
|
2017-02-14T08:15:16.000Z
|
2017-03-07T17:42:11.000Z
|
restwice/rest_exception.py
|
zugaldia/restwice
|
9f5fe20adf13a6af0cdb158bc414fd2391f2c51c
|
[
"MIT"
] | 4
|
2017-08-15T12:02:54.000Z
|
2020-02-10T08:04:31.000Z
|
class RestwiceException(Exception):
pass
| 15
| 35
| 0.777778
| 4
| 45
| 8.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 45
| 2
| 36
| 22.5
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
d7ff4d9126d08697b44c9faa503366402657e854
| 46
|
py
|
Python
|
xmatters/util/__init__.py
|
matthewhenry1/pyxmatters
|
9c376e370b30f0b8b2e589ba05fed26dad2814ae
|
[
"MIT"
] | 1
|
2021-08-09T17:33:38.000Z
|
2021-08-09T17:33:38.000Z
|
xmatters/util/__init__.py
|
xmatters/pyxmatters
|
ebe4a88558b7eb3408b59fbf9cabed3a748601a0
|
[
"MIT"
] | null | null | null |
xmatters/util/__init__.py
|
xmatters/pyxmatters
|
ebe4a88558b7eb3408b59fbf9cabed3a748601a0
|
[
"MIT"
] | 2
|
2020-04-03T22:30:32.000Z
|
2020-04-20T13:44:18.000Z
|
from .column import *
from .timecalc import *
| 15.333333
| 23
| 0.73913
| 6
| 46
| 5.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 46
| 2
| 24
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0bdeed06e8fae820321cc0ed2e6f0a2b0c30294d
| 71
|
py
|
Python
|
enthought/pyface/workbench/action/action_controller.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/pyface/workbench/action/action_controller.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/pyface/workbench/action/action_controller.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from pyface.workbench.action.action_controller import *
| 23.666667
| 55
| 0.830986
| 9
| 71
| 6.444444
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098592
| 71
| 2
| 56
| 35.5
| 0.90625
| 0.169014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
045c71bb3325dc8f5d6d21f58e474e4f6a3219d8
| 173
|
py
|
Python
|
server/src/resources/__init__.py
|
marinewigniolle/centrale
|
a4ff37e0de9737250dd4885671054797d103d5cf
|
[
"MIT"
] | null | null | null |
server/src/resources/__init__.py
|
marinewigniolle/centrale
|
a4ff37e0de9737250dd4885671054797d103d5cf
|
[
"MIT"
] | null | null | null |
server/src/resources/__init__.py
|
marinewigniolle/centrale
|
a4ff37e0de9737250dd4885671054797d103d5cf
|
[
"MIT"
] | null | null | null |
from .user import UserResource
from .movie import MovieResource
from .movie import MovieALLResource
from .marks import MarksResource
from .marks import MarksAverageResource
| 28.833333
| 39
| 0.855491
| 20
| 173
| 7.4
| 0.5
| 0.121622
| 0.202703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115607
| 173
| 5
| 40
| 34.6
| 0.96732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
f08a4aceed89dd3bb2dd908ef00a72e1bf19cf8d
| 63
|
py
|
Python
|
tests/unicode/unicode_iter.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 13,648
|
2015-01-01T01:34:51.000Z
|
2022-03-31T16:19:53.000Z
|
tests/unicode/unicode_iter.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 7,092
|
2015-01-01T07:59:11.000Z
|
2022-03-31T23:52:18.000Z
|
tests/unicode/unicode_iter.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 4,942
|
2015-01-02T11:48:50.000Z
|
2022-03-31T19:57:10.000Z
|
for c in "Hello":
print(c)
for c in "Привет":
print(c)
| 12.6
| 18
| 0.555556
| 12
| 63
| 2.916667
| 0.5
| 0.228571
| 0.342857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 63
| 4
| 19
| 15.75
| 0.777778
| 0
| 0
| 0.5
| 0
| 0
| 0.174603
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
f09437d6ae096a8ef64b68e1f6a88a1b15d428f1
| 77
|
py
|
Python
|
app/DjangoTestFramework/models/generator.py
|
Vallher/django-test
|
821d9bd51f5d6b23be82ecc609624315549d568c
|
[
"MIT"
] | null | null | null |
app/DjangoTestFramework/models/generator.py
|
Vallher/django-test
|
821d9bd51f5d6b23be82ecc609624315549d568c
|
[
"MIT"
] | null | null | null |
app/DjangoTestFramework/models/generator.py
|
Vallher/django-test
|
821d9bd51f5d6b23be82ecc609624315549d568c
|
[
"MIT"
] | null | null | null |
class BaseGenerator:
pass
class CharGenerator(BaseGenerator):
pass
| 11
| 35
| 0.74026
| 7
| 77
| 8.142857
| 0.571429
| 0.596491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.207792
| 77
| 6
| 36
| 12.833333
| 0.934426
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.