hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7216293508f30856c09ec8f6cc0f0a4c59f840b | 400 | py | Python | test/distributed/test_ddp_under_dist_autograd.py | wenhaopeter/read_pytorch_code | 491f989cd918cf08874dd4f671fb7f0142a0bc4f | [
"Intel",
"X11"
] | null | null | null | test/distributed/test_ddp_under_dist_autograd.py | wenhaopeter/read_pytorch_code | 491f989cd918cf08874dd4f671fb7f0142a0bc4f | [
"Intel",
"X11"
] | null | null | null | test/distributed/test_ddp_under_dist_autograd.py | wenhaopeter/read_pytorch_code | 491f989cd918cf08874dd4f671fb7f0142a0bc4f | [
"Intel",
"X11"
] | null | null | null | #!/usr/bin/env python3
from torch.testing._internal.distributed import ddp_under_dist_autograd_test
from torch.testing._internal.common_utils import (
run_tests,
)
class TestDdpUnderDistAutogradWrapper(ddp_under_dist_autograd_test.TestDdpUnderDistAutograd):
pass
class TestDdpComparison(ddp_under_dist_autograd_test.TestDdpComparison):
pass
if __name__ == "__main__":
run_tests()
| 25 | 93 | 0.82 |
from torch.testing._internal.distributed import ddp_under_dist_autograd_test
from torch.testing._internal.common_utils import (
run_tests,
)
class TestDdpUnderDistAutogradWrapper(ddp_under_dist_autograd_test.TestDdpUnderDistAutograd):
pass
class TestDdpComparison(ddp_under_dist_autograd_test.TestDdpComparison):
pass
if __name__ == "__main__":
run_tests()
| true | true |
f7216360a3f39f268083811c68d247e2aa9fdaad | 5,037 | py | Python | models/pointnet_seg.py | 3D-semantic-Sgmentation/pointnet | 029c0217143e6b69e685ab57cf243e322d47860f | [
"MIT"
] | null | null | null | models/pointnet_seg.py | 3D-semantic-Sgmentation/pointnet | 029c0217143e6b69e685ab57cf243e322d47860f | [
"MIT"
] | null | null | null | models/pointnet_seg.py | 3D-semantic-Sgmentation/pointnet | 029c0217143e6b69e685ab57cf243e322d47860f | [
"MIT"
] | null | null | null | # import tensorflow as tf
import numpy as np
import math
import sys
import os
import tensorflow.compat.v1 as tf
import tensorflow as tf2
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
from transform_nets import input_transform_net, feature_transform_net
def placeholder_inputs(batch_size, num_point):
tf.compat.v1.disable_eager_execution()
pointclouds_pl = tf.placeholder(tf.float32,
shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32,
shape=(batch_size, num_point))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output BxNx50 """
batch_size = point_cloud.get_shape()[0]
num_point = point_cloud.get_shape()[1]
end_points = {}
with tf.variable_scope('transform_net1') as sc:
transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
point_cloud_transformed = tf.matmul(point_cloud, transform)
input_image = tf.expand_dims(point_cloud_transformed, -1)
net = tf_util.conv2d(input_image, 64, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
with tf.variable_scope('transform_net2') as sc:
transform = feature_transform_net(net, is_training, bn_decay, K=64)
end_points['transform'] = transform
net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
point_feat = tf.expand_dims(net_transformed, [2])
print(point_feat)
net = tf_util.conv2d(point_feat, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
global_feat = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
print(global_feat)
global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
concat_feat = tf.concat(axis=3, values=[point_feat, global_feat_expand])
print(concat_feat)
net = tf_util.conv2d(concat_feat, 512, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv6', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv7', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv8', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv9', bn_decay=bn_decay)
net = tf_util.conv2d(net, 9, [1,1],
padding='VALID', stride=[1,1], activation_fn=None,
scope='conv10')
net = tf.squeeze(net, [2]) # BxNxC
return net, end_points
def get_loss(pred, label, end_points, reg_weight=0.001):
""" pred: BxNxC,
label: BxN, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf2.summary.scalar('classify loss', classify_loss)
# Enforce the transformation as orthogonal matrix
transform = end_points['transform'] # BxKxK
K = transform.get_shape()[1]
mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1]))
mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
mat_diff_loss = tf.nn.l2_loss(mat_diff)
tf2.summary.scalar('mat_loss', mat_diff_loss)
return classify_loss + mat_diff_loss * reg_weight
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024,3))
labels = tf.zeros((32,1024))
print(labels.shape.rank)
pred, end_points = get_model(inputs, tf.constant(True))
loss = get_loss(pred, labels, end_points)
print(outputs)
| 40.95122 | 84 | 0.592416 |
import numpy as np
import math
import sys
import os
import tensorflow.compat.v1 as tf
import tensorflow as tf2
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
from transform_nets import input_transform_net, feature_transform_net
def placeholder_inputs(batch_size, num_point):
tf.compat.v1.disable_eager_execution()
pointclouds_pl = tf.placeholder(tf.float32,
shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32,
shape=(batch_size, num_point))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, bn_decay=None):
batch_size = point_cloud.get_shape()[0]
num_point = point_cloud.get_shape()[1]
end_points = {}
with tf.variable_scope('transform_net1') as sc:
transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
point_cloud_transformed = tf.matmul(point_cloud, transform)
input_image = tf.expand_dims(point_cloud_transformed, -1)
net = tf_util.conv2d(input_image, 64, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
with tf.variable_scope('transform_net2') as sc:
transform = feature_transform_net(net, is_training, bn_decay, K=64)
end_points['transform'] = transform
net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
point_feat = tf.expand_dims(net_transformed, [2])
print(point_feat)
net = tf_util.conv2d(point_feat, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
global_feat = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
print(global_feat)
global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
concat_feat = tf.concat(axis=3, values=[point_feat, global_feat_expand])
print(concat_feat)
net = tf_util.conv2d(concat_feat, 512, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv6', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv7', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv8', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv9', bn_decay=bn_decay)
net = tf_util.conv2d(net, 9, [1,1],
padding='VALID', stride=[1,1], activation_fn=None,
scope='conv10')
net = tf.squeeze(net, [2])
return net, end_points
def get_loss(pred, label, end_points, reg_weight=0.001):
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf2.summary.scalar('classify loss', classify_loss)
transform = end_points['transform']
K = transform.get_shape()[1]
mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1]))
mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
mat_diff_loss = tf.nn.l2_loss(mat_diff)
tf2.summary.scalar('mat_loss', mat_diff_loss)
return classify_loss + mat_diff_loss * reg_weight
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024,3))
labels = tf.zeros((32,1024))
print(labels.shape.rank)
pred, end_points = get_model(inputs, tf.constant(True))
loss = get_loss(pred, labels, end_points)
print(outputs)
| true | true |
f721636de9ed88c4501fc4920a1f38058472b148 | 8,344 | py | Python | tests/adapters/test_dataframe_input.py | vedashree29296/BentoML | 79f94d543a0684e04551207d102a2d254b770ad3 | [
"Apache-2.0"
] | null | null | null | tests/adapters/test_dataframe_input.py | vedashree29296/BentoML | 79f94d543a0684e04551207d102a2d254b770ad3 | [
"Apache-2.0"
] | null | null | null | tests/adapters/test_dataframe_input.py | vedashree29296/BentoML | 79f94d543a0684e04551207d102a2d254b770ad3 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=redefined-outer-name
import itertools
import json
import math
import time
import flask
import numpy as np
import pandas as pd
import psutil # noqa # pylint: disable=unused-import
import pytest
from bentoml.adapters import DataframeInput
from bentoml.adapters.dataframe_input import read_dataframes_from_json_n_csv
from bentoml.utils.csv import csv_splitlines
from bentoml.utils.dataframe_util import guess_orient
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
def test_dataframe_request_schema():
input_adapter = DataframeInput(
dtype={"col1": "int", "col2": "float", "col3": "string"}
)
schema = input_adapter.request_schema["application/json"]["schema"]
assert "object" == schema["type"]
assert 3 == len(schema["properties"])
assert "array" == schema["properties"]["col1"]["type"]
assert "integer" == schema["properties"]["col1"]["items"]["type"]
assert "number" == schema["properties"]["col2"]["items"]["type"]
assert "string" == schema["properties"]["col3"]["items"]["type"]
def test_dataframe_handle_cli(capsys, make_api, tmpdir):
def test_func(df):
return df["name"]
input_adapter = DataframeInput()
api = make_api(input_adapter, test_func)
json_file = tmpdir.join("test.json")
with open(str(json_file), "w") as f:
f.write('[{"name": "john","game": "mario","city": "sf"}]')
test_args = ["--input-file", str(json_file)]
api.handle_cli(test_args)
out, _ = capsys.readouterr()
assert "john" in out
def test_dataframe_handle_aws_lambda_event(make_api):
test_content = '[{"name": "john","game": "mario","city": "sf"}]'
def test_func(df):
return df["name"]
input_adapter = DataframeInput()
api = make_api(input_adapter, test_func)
event = {
"headers": {"Content-Type": "application/json"},
"body": test_content,
}
response = api.handle_aws_lambda_event(event)
assert response["statusCode"] == 200
assert response["body"] == '[{"name":"john"}]'
event_without_content_type_header = {
"headers": {},
"body": test_content,
}
response = api.handle_aws_lambda_event(event_without_content_type_header)
assert response["statusCode"] == 200
assert response["body"] == '[{"name":"john"}]'
event_with_bad_input = {
"headers": {},
"body": "bad_input_content",
}
response = api.handle_aws_lambda_event(event_with_bad_input)
assert response["statusCode"] == 400
def test_dataframe_handle_request_csv(make_api):
def test_func(df):
return df["name"]
input_adapter = DataframeInput()
api = make_api(input_adapter, test_func)
csv_data = b'name,game,city\njohn,mario,sf'
request = MagicMock(spec=flask.Request)
request.headers = {'Content-Type': 'text/csv'}
request.get_data.return_value = csv_data
result = api.handle_request(request)
assert result.get_data().decode('utf-8') == '[{"name":"john"}]'
def assert_df_equal(left: pd.DataFrame, right: pd.DataFrame):
'''
Compare two instances of pandas.DataFrame ignoring index and columns
'''
try:
left_array = left.values
right_array = right.values
if right_array.dtype == np.float:
np.testing.assert_array_almost_equal(left_array, right_array)
else:
np.testing.assert_array_equal(left_array, right_array)
except AssertionError:
raise AssertionError(
f"\n{left.to_string()}\n is not equal to \n{right.to_string()}\n"
)
DF_CASES = (
pd.DataFrame(np.random.rand(1, 3)),
pd.DataFrame(np.random.rand(2, 3)),
pd.DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C']),
pd.DataFrame(["str1", "str2", "str3"]), # single dim sting array
pd.DataFrame([np.nan]), # special values
pd.DataFrame([math.nan]), # special values
pd.DataFrame([" ", 'a"b', "a,b", "a\nb"]), # special values
pd.DataFrame({"test": [" ", 'a"b', "a,b", "a\nb"]}), # special values
# pd.Series(np.random.rand(2)), # TODO: Series support
# pd.DataFrame([""]), # TODO: -> NaN
)
@pytest.fixture(params=DF_CASES)
def df(request):
return request.param
@pytest.fixture(params=pytest.DF_ORIENTS)
def orient(request):
return request.param
def test_batch_read_dataframes_from_mixed_json_n_csv(df):
test_datas = []
test_types = []
# test content_type=application/json with various orients
for orient in pytest.DF_ORIENTS:
try:
assert_df_equal(df, pd.read_json(df.to_json(orient=orient)))
except (AssertionError, ValueError):
# skip cases not supported by official pandas
continue
test_datas.extend([df.to_json(orient=orient).encode()] * 3)
test_types.extend(['json'] * 3)
test_datas.extend([df.to_csv(index=False).encode()] * 3)
test_types.extend(['csv'] * 3)
df_merged, counts = read_dataframes_from_json_n_csv(test_datas, test_types)
i = 0
for count in counts:
assert_df_equal(df_merged[i : i + count], df)
i += count
def test_batch_read_dataframes_from_csv_other_CRLF(df):
csv_str = df.to_csv(index=False)
if '\r\n' in csv_str:
csv_str = '\n'.join(csv_splitlines(csv_str)).encode()
else:
csv_str = '\r\n'.join(csv_splitlines(csv_str)).encode()
df_merged, _ = read_dataframes_from_json_n_csv([csv_str], ['csv'])
assert_df_equal(df_merged, df)
def test_batch_read_dataframes_from_json_of_orients(df, orient):
test_datas = [df.to_json(orient=orient).encode()] * 3
test_types = ['json'] * 3
df_merged, counts = read_dataframes_from_json_n_csv(test_datas, test_types, orient)
i = 0
for count in counts:
assert_df_equal(df_merged[i : i + count], df)
i += count
def test_batch_read_dataframes_from_json_with_wrong_orients(df, orient):
test_datas = [df.to_json(orient='table').encode()] * 3
test_types = ['json'] * 3
df_merged, counts = read_dataframes_from_json_n_csv(test_datas, test_types, orient)
assert not df_merged
for count in counts:
assert not count
def test_batch_read_dataframes_from_json_in_mixed_order():
# different column order when orient=records
df_json = b'[{"A": 1, "B": 2, "C": 3}, {"C": 6, "A": 2, "B": 4}]'
df_merged, counts = read_dataframes_from_json_n_csv([df_json], ['json'])
i = 0
for count in counts:
assert_df_equal(df_merged[i : i + count], pd.read_json(df_json))
i += count
# different row/column order when orient=columns
df_json1 = b'{"A": {"1": 1, "2": 2}, "B": {"1": 2, "2": 4}, "C": {"1": 3, "2": 6}}'
df_json2 = b'{"B": {"1": 2, "2": 4}, "A": {"1": 1, "2": 2}, "C": {"1": 3, "2": 6}}'
df_json3 = b'{"A": {"1": 1, "2": 2}, "B": {"2": 4, "1": 2}, "C": {"1": 3, "2": 6}}'
df_merged, counts = read_dataframes_from_json_n_csv(
[df_json1, df_json2, df_json3], ['json'] * 3
)
i = 0
for count in counts:
assert_df_equal(
df_merged[i : i + count][["A", "B", "C"]],
pd.read_json(df_json1)[["A", "B", "C"]],
)
i += count
def test_guess_orient(df, orient):
json_str = df.to_json(orient=orient)
guessed_orient = guess_orient(json.loads(json_str), strict=True)
assert orient == guessed_orient or orient in guessed_orient
@pytest.mark.skipif('not psutil.POSIX')
def test_benchmark_load_dataframes():
'''
read_dataframes_from_json_n_csv should be 30x faster than pd.read_json + pd.concat
'''
test_count = 50
dfs = [pd.DataFrame(np.random.rand(10, 100)) for _ in range(test_count)]
inputs = [df.to_json().encode() for df in dfs]
time_st = time.time()
dfs = [pd.read_json(i) for i in inputs]
result1 = pd.concat(dfs)
time1 = time.time() - time_st
time_st = time.time()
result2, _ = read_dataframes_from_json_n_csv(
inputs, itertools.repeat('json'), 'columns'
)
time2 = time.time() - time_st
assert_df_equal(result1, result2)
# 5 is just an estimate on the smaller end, which should be true for most
# development machines and Github actions CI environment, the actual ratio depends
# on the hardware and available computing resource
assert time1 / time2 > 5
| 32.341085 | 87 | 0.647651 |
import itertools
import json
import math
import time
import flask
import numpy as np
import pandas as pd
import psutil apters import DataframeInput
from bentoml.adapters.dataframe_input import read_dataframes_from_json_n_csv
from bentoml.utils.csv import csv_splitlines
from bentoml.utils.dataframe_util import guess_orient
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
def test_dataframe_request_schema():
input_adapter = DataframeInput(
dtype={"col1": "int", "col2": "float", "col3": "string"}
)
schema = input_adapter.request_schema["application/json"]["schema"]
assert "object" == schema["type"]
assert 3 == len(schema["properties"])
assert "array" == schema["properties"]["col1"]["type"]
assert "integer" == schema["properties"]["col1"]["items"]["type"]
assert "number" == schema["properties"]["col2"]["items"]["type"]
assert "string" == schema["properties"]["col3"]["items"]["type"]
def test_dataframe_handle_cli(capsys, make_api, tmpdir):
def test_func(df):
return df["name"]
input_adapter = DataframeInput()
api = make_api(input_adapter, test_func)
json_file = tmpdir.join("test.json")
with open(str(json_file), "w") as f:
f.write('[{"name": "john","game": "mario","city": "sf"}]')
test_args = ["--input-file", str(json_file)]
api.handle_cli(test_args)
out, _ = capsys.readouterr()
assert "john" in out
def test_dataframe_handle_aws_lambda_event(make_api):
test_content = '[{"name": "john","game": "mario","city": "sf"}]'
def test_func(df):
return df["name"]
input_adapter = DataframeInput()
api = make_api(input_adapter, test_func)
event = {
"headers": {"Content-Type": "application/json"},
"body": test_content,
}
response = api.handle_aws_lambda_event(event)
assert response["statusCode"] == 200
assert response["body"] == '[{"name":"john"}]'
event_without_content_type_header = {
"headers": {},
"body": test_content,
}
response = api.handle_aws_lambda_event(event_without_content_type_header)
assert response["statusCode"] == 200
assert response["body"] == '[{"name":"john"}]'
event_with_bad_input = {
"headers": {},
"body": "bad_input_content",
}
response = api.handle_aws_lambda_event(event_with_bad_input)
assert response["statusCode"] == 400
def test_dataframe_handle_request_csv(make_api):
def test_func(df):
return df["name"]
input_adapter = DataframeInput()
api = make_api(input_adapter, test_func)
csv_data = b'name,game,city\njohn,mario,sf'
request = MagicMock(spec=flask.Request)
request.headers = {'Content-Type': 'text/csv'}
request.get_data.return_value = csv_data
result = api.handle_request(request)
assert result.get_data().decode('utf-8') == '[{"name":"john"}]'
def assert_df_equal(left: pd.DataFrame, right: pd.DataFrame):
try:
left_array = left.values
right_array = right.values
if right_array.dtype == np.float:
np.testing.assert_array_almost_equal(left_array, right_array)
else:
np.testing.assert_array_equal(left_array, right_array)
except AssertionError:
raise AssertionError(
f"\n{left.to_string()}\n is not equal to \n{right.to_string()}\n"
)
DF_CASES = (
pd.DataFrame(np.random.rand(1, 3)),
pd.DataFrame(np.random.rand(2, 3)),
pd.DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C']),
pd.DataFrame(["str1", "str2", "str3"]),
pd.DataFrame([np.nan]),
pd.DataFrame([math.nan]),
pd.DataFrame([" ", 'a"b', "a,b", "a\nb"]), # special values
pd.DataFrame({"test": [" ", 'a"b', "a,b", "a\nb"]}),
CASES)
def df(request):
return request.param
@pytest.fixture(params=pytest.DF_ORIENTS)
def orient(request):
return request.param
def test_batch_read_dataframes_from_mixed_json_n_csv(df):
test_datas = []
test_types = []
for orient in pytest.DF_ORIENTS:
try:
assert_df_equal(df, pd.read_json(df.to_json(orient=orient)))
except (AssertionError, ValueError):
continue
test_datas.extend([df.to_json(orient=orient).encode()] * 3)
test_types.extend(['json'] * 3)
test_datas.extend([df.to_csv(index=False).encode()] * 3)
test_types.extend(['csv'] * 3)
df_merged, counts = read_dataframes_from_json_n_csv(test_datas, test_types)
i = 0
for count in counts:
assert_df_equal(df_merged[i : i + count], df)
i += count
def test_batch_read_dataframes_from_csv_other_CRLF(df):
csv_str = df.to_csv(index=False)
if '\r\n' in csv_str:
csv_str = '\n'.join(csv_splitlines(csv_str)).encode()
else:
csv_str = '\r\n'.join(csv_splitlines(csv_str)).encode()
df_merged, _ = read_dataframes_from_json_n_csv([csv_str], ['csv'])
assert_df_equal(df_merged, df)
def test_batch_read_dataframes_from_json_of_orients(df, orient):
test_datas = [df.to_json(orient=orient).encode()] * 3
test_types = ['json'] * 3
df_merged, counts = read_dataframes_from_json_n_csv(test_datas, test_types, orient)
i = 0
for count in counts:
assert_df_equal(df_merged[i : i + count], df)
i += count
def test_batch_read_dataframes_from_json_with_wrong_orients(df, orient):
test_datas = [df.to_json(orient='table').encode()] * 3
test_types = ['json'] * 3
df_merged, counts = read_dataframes_from_json_n_csv(test_datas, test_types, orient)
assert not df_merged
for count in counts:
assert not count
def test_batch_read_dataframes_from_json_in_mixed_order():
df_json = b'[{"A": 1, "B": 2, "C": 3}, {"C": 6, "A": 2, "B": 4}]'
df_merged, counts = read_dataframes_from_json_n_csv([df_json], ['json'])
i = 0
for count in counts:
assert_df_equal(df_merged[i : i + count], pd.read_json(df_json))
i += count
df_json1 = b'{"A": {"1": 1, "2": 2}, "B": {"1": 2, "2": 4}, "C": {"1": 3, "2": 6}}'
df_json2 = b'{"B": {"1": 2, "2": 4}, "A": {"1": 1, "2": 2}, "C": {"1": 3, "2": 6}}'
df_json3 = b'{"A": {"1": 1, "2": 2}, "B": {"2": 4, "1": 2}, "C": {"1": 3, "2": 6}}'
df_merged, counts = read_dataframes_from_json_n_csv(
[df_json1, df_json2, df_json3], ['json'] * 3
)
i = 0
for count in counts:
assert_df_equal(
df_merged[i : i + count][["A", "B", "C"]],
pd.read_json(df_json1)[["A", "B", "C"]],
)
i += count
def test_guess_orient(df, orient):
json_str = df.to_json(orient=orient)
guessed_orient = guess_orient(json.loads(json_str), strict=True)
assert orient == guessed_orient or orient in guessed_orient
@pytest.mark.skipif('not psutil.POSIX')
def test_benchmark_load_dataframes():
test_count = 50
dfs = [pd.DataFrame(np.random.rand(10, 100)) for _ in range(test_count)]
inputs = [df.to_json().encode() for df in dfs]
time_st = time.time()
dfs = [pd.read_json(i) for i in inputs]
result1 = pd.concat(dfs)
time1 = time.time() - time_st
time_st = time.time()
result2, _ = read_dataframes_from_json_n_csv(
inputs, itertools.repeat('json'), 'columns'
)
time2 = time.time() - time_st
assert_df_equal(result1, result2)
assert time1 / time2 > 5
| true | true |
f721649ced49c4e8a9613dfffcb798078e8b305e | 383 | py | Python | vespa-cloud/cord-19-search/scripts/convert-to-feed.py | kuipertan/sample-apps | d52b942ea228336435d29a7ed007e72113aec827 | [
"Apache-2.0"
] | null | null | null | vespa-cloud/cord-19-search/scripts/convert-to-feed.py | kuipertan/sample-apps | d52b942ea228336435d29a7ed007e72113aec827 | [
"Apache-2.0"
] | null | null | null | vespa-cloud/cord-19-search/scripts/convert-to-feed.py | kuipertan/sample-apps | d52b942ea228336435d29a7ed007e72113aec827 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
import sys
import json
json_file = sys.argv[1]
with open(json_file, 'r') as f:
data = json.load(f)
for doc in data:
vespa_doc = {
'put': 'id:covid-19:doc::%s' % doc['id'],
'fields': doc
}
print(json.dumps(vespa_doc))
| 23.9375 | 111 | 0.64752 |
import sys
import json
json_file = sys.argv[1]
with open(json_file, 'r') as f:
data = json.load(f)
for doc in data:
vespa_doc = {
'put': 'id:covid-19:doc::%s' % doc['id'],
'fields': doc
}
print(json.dumps(vespa_doc))
| true | true |
f72164ba62f9af6d6912ac1fc695a0949c138d93 | 1,051 | py | Python | webservice/search/zeroconf_factory.py | PedalController/PedalPiREST | aa9418d44f2f5dbec604753a03bf8a74057c627c | [
"Apache-2.0"
] | null | null | null | webservice/search/zeroconf_factory.py | PedalController/PedalPiREST | aa9418d44f2f5dbec604753a03bf8a74057c627c | [
"Apache-2.0"
] | 42 | 2016-07-04T11:17:54.000Z | 2018-03-18T18:36:09.000Z | webservice/search/zeroconf_factory.py | PedalController/PedalPiREST | aa9418d44f2f5dbec604753a03bf8a74057c627c | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 SrMouraSilva
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import MagicMock
from webservice.search.pybonjour_service import PybonjourService
from webservice.search.zeroconf_service import ZeroconfService
class ZeroconfFactory(object):
@staticmethod
def generate(name, port):
if PybonjourService.has_support():
return PybonjourService(name, port)
elif ZeroconfService.has_support():
return ZeroconfService(name, port)
else:
return MagicMock()
| 33.903226 | 74 | 0.744053 |
from unittest.mock import MagicMock
from webservice.search.pybonjour_service import PybonjourService
from webservice.search.zeroconf_service import ZeroconfService
class ZeroconfFactory(object):
@staticmethod
def generate(name, port):
if PybonjourService.has_support():
return PybonjourService(name, port)
elif ZeroconfService.has_support():
return ZeroconfService(name, port)
else:
return MagicMock()
| true | true |
f72164bc7374018f80baa8ffb8176085266dae60 | 397 | py | Python | CodingTest_Study1/week11/ex9095.py | FridayAlgorithm/taesong_study | 50c07ee6ead0fb5bb80e0decb03b801cbbbabf9c | [
"MIT"
] | null | null | null | CodingTest_Study1/week11/ex9095.py | FridayAlgorithm/taesong_study | 50c07ee6ead0fb5bb80e0decb03b801cbbbabf9c | [
"MIT"
] | null | null | null | CodingTest_Study1/week11/ex9095.py | FridayAlgorithm/taesong_study | 50c07ee6ead0fb5bb80e0decb03b801cbbbabf9c | [
"MIT"
] | 2 | 2020-12-27T15:03:46.000Z | 2021-03-06T14:13:34.000Z | # BOJ 1,2,3 더하기 9095
T = int(input()) # 테스트 케이스의 개수 T가 주어짐
sum_list = []
for i in range(T):
n = int(input())
sum_list.append(n)
def oneTwoThreeSum(n):
if n == 1:
return 1
if n == 2:
return 2
if n == 3:
return 4
else:
return oneTwoThreeSum(n-3) + oneTwoThreeSum(n-2) + oneTwoThreeSum(n-1)
for k in sum_list:
print(oneTwoThreeSum(k))
| 18.045455 | 78 | 0.561713 |
T = int(input())
sum_list = []
for i in range(T):
n = int(input())
sum_list.append(n)
def oneTwoThreeSum(n):
if n == 1:
return 1
if n == 2:
return 2
if n == 3:
return 4
else:
return oneTwoThreeSum(n-3) + oneTwoThreeSum(n-2) + oneTwoThreeSum(n-1)
for k in sum_list:
print(oneTwoThreeSum(k))
| true | true |
f7216512710c309d4a2ab0b0e09080660ee5e81b | 1,794 | py | Python | src/features/utils.py | iamhuy/rumour-veracity-verification | e7e7f0c100545c2758584719e9f20f20cb6d0a85 | [
"MIT"
] | null | null | null | src/features/utils.py | iamhuy/rumour-veracity-verification | e7e7f0c100545c2758584719e9f20f20cb6d0a85 | [
"MIT"
] | 7 | 2020-03-24T15:24:51.000Z | 2021-06-01T21:43:16.000Z | src/features/utils.py | iamhuy/rumour-veracity-verification | e7e7f0c100545c2758584719e9f20f20cb6d0a85 | [
"MIT"
] | null | null | null | from dateutil import parser
import preprocessor as p
def timestamp_to_date(timestamp):
"""
Conver a twitter timestamp to a datetime object
:param timestamp: a string represent the timestamp
:return: a datetime object
"""
return parser.parse(timestamp)
def day_diff(timestamp1, timestamp2):
"""
Number of days between 2 timestamps
:param timestamp1: first timestamp
:param timestamp2: second timestamp
:return: An integer indicating number of days between 2 timestamps
"""
return (timestamp_to_date(timestamp1) - timestamp_to_date(timestamp2)).days
def read_brown_cluster_file(brown_cluster_text_file):
"""
Read brown cluster text file and save into a dict
:param brown_cluster_text_file: brown cluster text file
:return: A dict, which keys are tokens and values are cluster ids
"""
brown_cluster_dict = dict()
cluster_id_dict = dict()
cluster_count = 0
for line in brown_cluster_text_file.read().splitlines():
arr = line.split('\t')
cluster_str = arr[0]
token = arr[1]
if not cluster_id_dict.has_key(cluster_str):
cluster_id_dict[cluster_str] = cluster_count
cluster_count+=1
brown_cluster_dict[token] = cluster_id_dict[cluster_str]
return brown_cluster_dict
def preprocess_tweet(tweet):
"""
Clean the tweet before feeding to other functions
:param tweet: a raw tweet
:return: tweet with URL, MENTIONS, EMOJI, HASTHTAGS removed
"""
cleaned_tweet = tweet.lower() # lowercase the tweet
p.set_options(p.OPT.URL, p.OPT.EMOJI, p.OPT.MENTION, p.OPT.HASHTAG) # set options for the preprocessor
cleaned_tweet = p.clean(cleaned_tweet.encode("ascii", "ignore"))
return cleaned_tweet;
| 29.9 | 107 | 0.696767 | from dateutil import parser
import preprocessor as p
def timestamp_to_date(timestamp):
return parser.parse(timestamp)
def day_diff(timestamp1, timestamp2):
return (timestamp_to_date(timestamp1) - timestamp_to_date(timestamp2)).days
def read_brown_cluster_file(brown_cluster_text_file):
brown_cluster_dict = dict()
cluster_id_dict = dict()
cluster_count = 0
for line in brown_cluster_text_file.read().splitlines():
arr = line.split('\t')
cluster_str = arr[0]
token = arr[1]
if not cluster_id_dict.has_key(cluster_str):
cluster_id_dict[cluster_str] = cluster_count
cluster_count+=1
brown_cluster_dict[token] = cluster_id_dict[cluster_str]
return brown_cluster_dict
def preprocess_tweet(tweet):
cleaned_tweet = tweet.lower()
p.set_options(p.OPT.URL, p.OPT.EMOJI, p.OPT.MENTION, p.OPT.HASHTAG)
cleaned_tweet = p.clean(cleaned_tweet.encode("ascii", "ignore"))
return cleaned_tweet;
| true | true |
f72165731dd934a6ef471e84e61e6bbeae4d50c9 | 2,651 | py | Python | vtpl_api/models/destination_type.py | vtpl1/videonetics_api | bef179df12f449db0c50c3910daca50b7d40ac49 | [
"RSA-MD"
] | null | null | null | vtpl_api/models/destination_type.py | vtpl1/videonetics_api | bef179df12f449db0c50c3910daca50b7d40ac49 | [
"RSA-MD"
] | 1 | 2021-02-26T07:31:37.000Z | 2021-02-26T07:31:37.000Z | vtpl_api/models/destination_type.py | vtpl1/videonetics_api | bef179df12f449db0c50c3910daca50b7d40ac49 | [
"RSA-MD"
] | 2 | 2020-11-04T02:52:55.000Z | 2020-11-05T08:09:50.000Z | # coding: utf-8
"""
Engine api
Engine APIs # noqa: E501
OpenAPI spec version: 1.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DestinationType(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
NONE = "none"
RTSP = "rtsp"
HTTP = "http"
FILE = "file"
FTP = "ftp"
VMS = "vms"
MQTT = "mqtt"
AMQP = "amqp"
S3 = "S3"
VS3 = "VS3"
BASEURL = "BaseUrl"
RELATIVEURL = "RelativeUrl"
ZEROMQ = "ZeroMQ"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""DestinationType - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DestinationType, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DestinationType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.990196 | 80 | 0.536024 |
import pprint
import re
import six
class DestinationType(object):
NONE = "none"
RTSP = "rtsp"
HTTP = "http"
FILE = "file"
FTP = "ftp"
VMS = "vms"
MQTT = "mqtt"
AMQP = "amqp"
S3 = "S3"
VS3 = "VS3"
BASEURL = "BaseUrl"
RELATIVEURL = "RelativeUrl"
ZEROMQ = "ZeroMQ"
swagger_types = {
}
attribute_map = {
}
def __init__(self):
self.discriminator = None
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DestinationType, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, DestinationType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f721659832fd95400b106db9d00e562f8df54211 | 183 | py | Python | shopee_crawler/toolkit/__init__.py | ptrkhh/shopee-crawler | 6d85748daa802ad9bb2f42ba56695b31d692f4b4 | [
"MIT"
] | 5 | 2021-09-09T18:32:49.000Z | 2022-01-10T10:31:17.000Z | shopee_crawler/toolkit/__init__.py | ptrkhh/shopee-crawler | 6d85748daa802ad9bb2f42ba56695b31d692f4b4 | [
"MIT"
] | 2 | 2021-09-10T14:28:52.000Z | 2021-09-12T14:57:41.000Z | shopee_crawler/toolkit/__init__.py | ptrkhh/shopee-crawler | 6d85748daa802ad9bb2f42ba56695b31d692f4b4 | [
"MIT"
] | 6 | 2021-09-25T14:03:57.000Z | 2022-03-19T14:44:04.000Z | from .crawl_by_cat_url import crawl_by_cat_url
from .crawl_by_search import crawl_by_search
from .crawl_by_shop_url import crawl_by_shop_url
from .crawl_cat_list import crawl_cat_list | 45.75 | 48 | 0.896175 | from .crawl_by_cat_url import crawl_by_cat_url
from .crawl_by_search import crawl_by_search
from .crawl_by_shop_url import crawl_by_shop_url
from .crawl_cat_list import crawl_cat_list | true | true |
f72166b67f4730956f03af23668fb17b0bfb75ba | 170 | py | Python | old/dronekit-python/dronekit/util.py | sirmammingtonham/droneee | 1c0e1921a902b26958d298f3a0204465bf3e960d | [
"Unlicense"
] | null | null | null | old/dronekit-python/dronekit/util.py | sirmammingtonham/droneee | 1c0e1921a902b26958d298f3a0204465bf3e960d | [
"Unlicense"
] | null | null | null | old/dronekit-python/dronekit/util.py | sirmammingtonham/droneee | 1c0e1921a902b26958d298f3a0204465bf3e960d | [
"Unlicense"
] | null | null | null | from __future__ import print_function
import sys
def errprinter(*args):
logger(*args)
def logger(*args):
print(*args, file=sys.stderr)
sys.stderr.flush()
| 14.166667 | 37 | 0.7 | from __future__ import print_function
import sys
def errprinter(*args):
logger(*args)
def logger(*args):
print(*args, file=sys.stderr)
sys.stderr.flush()
| true | true |
f72168144f40c3dc94f255559a486ee91e85c71f | 10,646 | py | Python | userbot/plugins/chatinfo.py | meaall-com/Telebot | a08193ae6c3e5814b309d079e95c4951eafcbc19 | [
"MIT"
] | 3 | 2020-09-04T09:34:51.000Z | 2020-09-04T09:39:26.000Z | userbot/plugins/chatinfo.py | meaall-com/Telebot | a08193ae6c3e5814b309d079e95c4951eafcbc19 | [
"MIT"
] | null | null | null | userbot/plugins/chatinfo.py | meaall-com/Telebot | a08193ae6c3e5814b309d079e95c4951eafcbc19 | [
"MIT"
] | null | null | null | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Credits to Hitalo-Sama and FTG Modules
from datetime import datetime
from emoji import emojize
from math import sqrt
from telethon.tl.functions.channels import GetFullChannelRequest, GetParticipantsRequest
from telethon.tl.functions.messages import GetFullChatRequest, GetHistoryRequest
from telethon.tl.types import MessageActionChannelMigrateFrom, ChannelParticipantsAdmins
from telethon.errors import (
ChannelInvalidError,
ChannelPrivateError,
ChannelPublicGroupNaError)
from telethon.utils import get_input_location
from userbot import CMD_HELP
from userbot.events import register
@register(pattern=".chatinfo(?: |$)(.*)", outgoing=True)
async def info(event):
await event.edit("`Analysing the chat...`")
chat = await get_chatinfo(event)
caption = await fetch_info(chat, event)
try:
await event.edit(caption, parse_mode="html")
except Exception as e:
print("Exception:", e)
await event.edit("`An unexpected error has occurred.`")
return
async def get_chatinfo(event):
chat = event.pattern_match.group(1)
chat_info = None
if chat:
try:
chat = int(chat)
except ValueError:
pass
if not chat:
if event.reply_to_msg_id:
replied_msg = await event.get_reply_message()
if replied_msg.fwd_from and replied_msg.fwd_from.channel_id is not None:
chat = replied_msg.fwd_from.channel_id
else:
chat = event.chat_id
try:
chat_info = await event.client(GetFullChatRequest(chat))
except BaseException:
try:
chat_info = await event.client(GetFullChannelRequest(chat))
except ChannelInvalidError:
await event.edit("`Invalid channel/group`")
return None
except ChannelPrivateError:
await event.edit("`This is a private channel/group or I am banned from there`")
return None
except ChannelPublicGroupNaError:
await event.edit("`Channel or supergroup doesn't exist`")
return None
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return chat_info
async def fetch_info(chat, event):
# chat.chats is a list so we use get_entity() to avoid IndexError
chat_obj_info = await event.client.get_entity(chat.full_chat.id)
broadcast = chat_obj_info.broadcast if hasattr(
chat_obj_info, "broadcast") else False
chat_type = "Channel" if broadcast else "Group"
chat_title = chat_obj_info.title
warn_emoji = emojize(":warning:")
try:
msg_info = await event.client(GetHistoryRequest(peer=chat_obj_info.id, offset_id=0, offset_date=datetime(2010, 1, 1),
add_offset=-1, limit=1, max_id=0, min_id=0, hash=0))
except Exception as e:
msg_info = None
print("Exception:", e)
# No chance for IndexError as it checks for msg_info.messages first
first_msg_valid = True if msg_info and msg_info.messages and msg_info.messages[
0].id == 1 else False
# Same for msg_info.users
creator_valid = True if first_msg_valid and msg_info.users else False
creator_id = msg_info.users[0].id if creator_valid else None
creator_firstname = msg_info.users[0].first_name if creator_valid and msg_info.users[
0].first_name is not None else "Deleted Account"
creator_username = msg_info.users[0].username if creator_valid and msg_info.users[0].username is not None else None
created = msg_info.messages[0].date if first_msg_valid else None
former_title = msg_info.messages[0].action.title if first_msg_valid and isinstance(
msg_info.messages[0].action,
MessageActionChannelMigrateFrom) and msg_info.messages[0].action.title != chat_title else None
try:
dc_id, location = get_input_location(chat.full_chat.chat_photo)
except Exception as e:
dc_id = "Unknown"
str(e)
# this is some spaghetti I need to change
description = chat.full_chat.about
members = chat.full_chat.participants_count if hasattr(
chat.full_chat, "participants_count") else chat_obj_info.participants_count
admins = chat.full_chat.admins_count if hasattr(
chat.full_chat, "admins_count") else None
banned_users = chat.full_chat.kicked_count if hasattr(
chat.full_chat, "kicked_count") else None
restrcited_users = chat.full_chat.banned_count if hasattr(
chat.full_chat, "banned_count") else None
members_online = chat.full_chat.online_count if hasattr(
chat.full_chat, "online_count") else 0
group_stickers = chat.full_chat.stickerset.title if hasattr(
chat.full_chat, "stickerset") and chat.full_chat.stickerset else None
messages_viewable = msg_info.count if msg_info else None
messages_sent = chat.full_chat.read_inbox_max_id if hasattr(
chat.full_chat, "read_inbox_max_id") else None
messages_sent_alt = chat.full_chat.read_outbox_max_id if hasattr(
chat.full_chat, "read_outbox_max_id") else None
exp_count = chat.full_chat.pts if hasattr(chat.full_chat, "pts") else None
username = chat_obj_info.username if hasattr(
chat_obj_info, "username") else None
bots_list = chat.full_chat.bot_info # this is a list
bots = 0
supergroup = "<b>Yes</b>" if hasattr(chat_obj_info,
"megagroup") and chat_obj_info.megagroup else "No"
slowmode = "<b>Yes</b>" if hasattr(chat_obj_info,
"slowmode_enabled") and chat_obj_info.slowmode_enabled else "No"
slowmode_time = chat.full_chat.slowmode_seconds if hasattr(
chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else None
restricted = "<b>Yes</b>" if hasattr(chat_obj_info,
"restricted") and chat_obj_info.restricted else "No"
verified = "<b>Yes</b>" if hasattr(chat_obj_info,
"verified") and chat_obj_info.verified else "No"
username = "@{}".format(username) if username else None
creator_username = "@{}".format(
creator_username) if creator_username else None
# end of spaghetti block
if admins is None:
# use this alternative way if chat.full_chat.admins_count is None,
# works even without being an admin
try:
participants_admins = await event.client(GetParticipantsRequest(channel=chat.full_chat.id, filter=ChannelParticipantsAdmins(),
offset=0, limit=0, hash=0))
admins = participants_admins.count if participants_admins else None
except Exception as e:
print("Exception:", e)
if bots_list:
for bot in bots_list:
bots += 1
caption = "<b>CHAT INFO:</b>\n"
caption += f"ID: <code>{chat_obj_info.id}</code>\n"
if chat_title is not None:
caption += f"{chat_type} name: {chat_title}\n"
if former_title is not None: # Meant is the very first title
caption += f"Former name: {former_title}\n"
if username is not None:
caption += f"{chat_type} type: Public\n"
caption += f"Link: {username}\n"
else:
caption += f"{chat_type} type: Private\n"
if creator_username is not None:
caption += f"Creator: {creator_username}\n"
elif creator_valid:
caption += f"Creator: <a href=\"tg://user?id={creator_id}\">{creator_firstname}</a>\n"
if created is not None:
caption += f"Created: <code>{created.date().strftime('%b %d, %Y')} - {created.time()}</code>\n"
else:
caption += f"Created: <code>{chat_obj_info.date.date().strftime('%b %d, %Y')} - {chat_obj_info.date.time()}</code> {warn_emoji}\n"
caption += f"Data Centre ID: {dc_id}\n"
if exp_count is not None:
chat_level = int((1 + sqrt(1 + 7 * exp_count / 14)) / 2)
caption += f"{chat_type} level: <code>{chat_level}</code>\n"
if messages_viewable is not None:
caption += f"Viewable messages: <code>{messages_viewable}</code>\n"
if messages_sent:
caption += f"Messages sent: <code>{messages_sent}</code>\n"
elif messages_sent_alt:
caption += f"Messages sent: <code>{messages_sent_alt}</code> {warn_emoji}\n"
if members is not None:
caption += f"Members: <code>{members}</code>\n"
if admins is not None:
caption += f"Administrators: <code>{admins}</code>\n"
if bots_list:
caption += f"Bots: <code>{bots}</code>\n"
if members_online:
caption += f"Currently online: <code>{members_online}</code>\n"
if restrcited_users is not None:
caption += f"Restricted users: <code>{restrcited_users}</code>\n"
if banned_users is not None:
caption += f"Banned users: <code>{banned_users}</code>\n"
if group_stickers is not None:
caption += f"{chat_type} stickers: <a href=\"t.me/addstickers/{chat.full_chat.stickerset.short_name}\">{group_stickers}</a>\n"
caption += "\n"
if not broadcast:
caption += f"Slow mode: {slowmode}"
if hasattr(
chat_obj_info,
"slowmode_enabled") and chat_obj_info.slowmode_enabled:
caption += f", <code>{slowmode_time}s</code>\n\n"
else:
caption += "\n\n"
if not broadcast:
caption += f"Supergroup: {supergroup}\n\n"
if hasattr(chat_obj_info, "restricted"):
caption += f"Restricted: {restricted}\n"
if chat_obj_info.restricted:
caption += f"> Platform: {chat_obj_info.restriction_reason[0].platform}\n"
caption += f"> Reason: {chat_obj_info.restriction_reason[0].reason}\n"
caption += f"> Text: {chat_obj_info.restriction_reason[0].text}\n\n"
else:
caption += "\n"
if hasattr(chat_obj_info, "scam") and chat_obj_info.scam:
caption += "Scam: <b>Yes</b>\n\n"
if hasattr(chat_obj_info, "verified"):
caption += f"Verified by Telegram: {verified}\n\n"
if description:
caption += f"Description: \n<code>{description}</code>\n"
return caption
CMD_HELP.update({
"chatinfo":
".chatinfo [optional: <reply/tag/chat id/invite link>]\
\nUsage: Gets info of a chat. Some info might be limited due to missing permissions."
}) | 46.286957 | 138 | 0.656021 |
from datetime import datetime
from emoji import emojize
from math import sqrt
from telethon.tl.functions.channels import GetFullChannelRequest, GetParticipantsRequest
from telethon.tl.functions.messages import GetFullChatRequest, GetHistoryRequest
from telethon.tl.types import MessageActionChannelMigrateFrom, ChannelParticipantsAdmins
from telethon.errors import (
ChannelInvalidError,
ChannelPrivateError,
ChannelPublicGroupNaError)
from telethon.utils import get_input_location
from userbot import CMD_HELP
from userbot.events import register
@register(pattern=".chatinfo(?: |$)(.*)", outgoing=True)
async def info(event):
await event.edit("`Analysing the chat...`")
chat = await get_chatinfo(event)
caption = await fetch_info(chat, event)
try:
await event.edit(caption, parse_mode="html")
except Exception as e:
print("Exception:", e)
await event.edit("`An unexpected error has occurred.`")
return
async def get_chatinfo(event):
chat = event.pattern_match.group(1)
chat_info = None
if chat:
try:
chat = int(chat)
except ValueError:
pass
if not chat:
if event.reply_to_msg_id:
replied_msg = await event.get_reply_message()
if replied_msg.fwd_from and replied_msg.fwd_from.channel_id is not None:
chat = replied_msg.fwd_from.channel_id
else:
chat = event.chat_id
try:
chat_info = await event.client(GetFullChatRequest(chat))
except BaseException:
try:
chat_info = await event.client(GetFullChannelRequest(chat))
except ChannelInvalidError:
await event.edit("`Invalid channel/group`")
return None
except ChannelPrivateError:
await event.edit("`This is a private channel/group or I am banned from there`")
return None
except ChannelPublicGroupNaError:
await event.edit("`Channel or supergroup doesn't exist`")
return None
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return chat_info
async def fetch_info(chat, event):
# chat.chats is a list so we use get_entity() to avoid IndexError
chat_obj_info = await event.client.get_entity(chat.full_chat.id)
broadcast = chat_obj_info.broadcast if hasattr(
chat_obj_info, "broadcast") else False
chat_type = "Channel" if broadcast else "Group"
chat_title = chat_obj_info.title
warn_emoji = emojize(":warning:")
try:
msg_info = await event.client(GetHistoryRequest(peer=chat_obj_info.id, offset_id=0, offset_date=datetime(2010, 1, 1),
add_offset=-1, limit=1, max_id=0, min_id=0, hash=0))
except Exception as e:
msg_info = None
print("Exception:", e)
# No chance for IndexError as it checks for msg_info.messages first
first_msg_valid = True if msg_info and msg_info.messages and msg_info.messages[
0].id == 1 else False
# Same for msg_info.users
creator_valid = True if first_msg_valid and msg_info.users else False
creator_id = msg_info.users[0].id if creator_valid else None
creator_firstname = msg_info.users[0].first_name if creator_valid and msg_info.users[
0].first_name is not None else "Deleted Account"
creator_username = msg_info.users[0].username if creator_valid and msg_info.users[0].username is not None else None
created = msg_info.messages[0].date if first_msg_valid else None
former_title = msg_info.messages[0].action.title if first_msg_valid and isinstance(
msg_info.messages[0].action,
MessageActionChannelMigrateFrom) and msg_info.messages[0].action.title != chat_title else None
try:
dc_id, location = get_input_location(chat.full_chat.chat_photo)
except Exception as e:
dc_id = "Unknown"
str(e)
# this is some spaghetti I need to change
description = chat.full_chat.about
members = chat.full_chat.participants_count if hasattr(
chat.full_chat, "participants_count") else chat_obj_info.participants_count
admins = chat.full_chat.admins_count if hasattr(
chat.full_chat, "admins_count") else None
banned_users = chat.full_chat.kicked_count if hasattr(
chat.full_chat, "kicked_count") else None
restrcited_users = chat.full_chat.banned_count if hasattr(
chat.full_chat, "banned_count") else None
members_online = chat.full_chat.online_count if hasattr(
chat.full_chat, "online_count") else 0
group_stickers = chat.full_chat.stickerset.title if hasattr(
chat.full_chat, "stickerset") and chat.full_chat.stickerset else None
messages_viewable = msg_info.count if msg_info else None
messages_sent = chat.full_chat.read_inbox_max_id if hasattr(
chat.full_chat, "read_inbox_max_id") else None
messages_sent_alt = chat.full_chat.read_outbox_max_id if hasattr(
chat.full_chat, "read_outbox_max_id") else None
exp_count = chat.full_chat.pts if hasattr(chat.full_chat, "pts") else None
username = chat_obj_info.username if hasattr(
chat_obj_info, "username") else None
bots_list = chat.full_chat.bot_info # this is a list
bots = 0
supergroup = "<b>Yes</b>" if hasattr(chat_obj_info,
"megagroup") and chat_obj_info.megagroup else "No"
slowmode = "<b>Yes</b>" if hasattr(chat_obj_info,
"slowmode_enabled") and chat_obj_info.slowmode_enabled else "No"
slowmode_time = chat.full_chat.slowmode_seconds if hasattr(
chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else None
restricted = "<b>Yes</b>" if hasattr(chat_obj_info,
"restricted") and chat_obj_info.restricted else "No"
verified = "<b>Yes</b>" if hasattr(chat_obj_info,
"verified") and chat_obj_info.verified else "No"
username = "@{}".format(username) if username else None
creator_username = "@{}".format(
creator_username) if creator_username else None
# end of spaghetti block
if admins is None:
# use this alternative way if chat.full_chat.admins_count is None,
# works even without being an admin
try:
participants_admins = await event.client(GetParticipantsRequest(channel=chat.full_chat.id, filter=ChannelParticipantsAdmins(),
offset=0, limit=0, hash=0))
admins = participants_admins.count if participants_admins else None
except Exception as e:
print("Exception:", e)
if bots_list:
for bot in bots_list:
bots += 1
caption = "<b>CHAT INFO:</b>\n"
caption += f"ID: <code>{chat_obj_info.id}</code>\n"
if chat_title is not None:
caption += f"{chat_type} name: {chat_title}\n"
if former_title is not None: # Meant is the very first title
caption += f"Former name: {former_title}\n"
if username is not None:
caption += f"{chat_type} type: Public\n"
caption += f"Link: {username}\n"
else:
caption += f"{chat_type} type: Private\n"
if creator_username is not None:
caption += f"Creator: {creator_username}\n"
elif creator_valid:
caption += f"Creator: <a href=\"tg://user?id={creator_id}\">{creator_firstname}</a>\n"
if created is not None:
caption += f"Created: <code>{created.date().strftime('%b %d, %Y')} - {created.time()}</code>\n"
else:
caption += f"Created: <code>{chat_obj_info.date.date().strftime('%b %d, %Y')} - {chat_obj_info.date.time()}</code> {warn_emoji}\n"
caption += f"Data Centre ID: {dc_id}\n"
if exp_count is not None:
chat_level = int((1 + sqrt(1 + 7 * exp_count / 14)) / 2)
caption += f"{chat_type} level: <code>{chat_level}</code>\n"
if messages_viewable is not None:
caption += f"Viewable messages: <code>{messages_viewable}</code>\n"
if messages_sent:
caption += f"Messages sent: <code>{messages_sent}</code>\n"
elif messages_sent_alt:
caption += f"Messages sent: <code>{messages_sent_alt}</code> {warn_emoji}\n"
if members is not None:
caption += f"Members: <code>{members}</code>\n"
if admins is not None:
caption += f"Administrators: <code>{admins}</code>\n"
if bots_list:
caption += f"Bots: <code>{bots}</code>\n"
if members_online:
caption += f"Currently online: <code>{members_online}</code>\n"
if restrcited_users is not None:
caption += f"Restricted users: <code>{restrcited_users}</code>\n"
if banned_users is not None:
caption += f"Banned users: <code>{banned_users}</code>\n"
if group_stickers is not None:
caption += f"{chat_type} stickers: <a href=\"t.me/addstickers/{chat.full_chat.stickerset.short_name}\">{group_stickers}</a>\n"
caption += "\n"
if not broadcast:
caption += f"Slow mode: {slowmode}"
if hasattr(
chat_obj_info,
"slowmode_enabled") and chat_obj_info.slowmode_enabled:
caption += f", <code>{slowmode_time}s</code>\n\n"
else:
caption += "\n\n"
if not broadcast:
caption += f"Supergroup: {supergroup}\n\n"
if hasattr(chat_obj_info, "restricted"):
caption += f"Restricted: {restricted}\n"
if chat_obj_info.restricted:
caption += f"> Platform: {chat_obj_info.restriction_reason[0].platform}\n"
caption += f"> Reason: {chat_obj_info.restriction_reason[0].reason}\n"
caption += f"> Text: {chat_obj_info.restriction_reason[0].text}\n\n"
else:
caption += "\n"
if hasattr(chat_obj_info, "scam") and chat_obj_info.scam:
caption += "Scam: <b>Yes</b>\n\n"
if hasattr(chat_obj_info, "verified"):
caption += f"Verified by Telegram: {verified}\n\n"
if description:
caption += f"Description: \n<code>{description}</code>\n"
return caption
CMD_HELP.update({
"chatinfo":
".chatinfo [optional: <reply/tag/chat id/invite link>]\
\nUsage: Gets info of a chat. Some info might be limited due to missing permissions."
}) | true | true |
f72168324e6096dddf572876cab151217254f430 | 3,592 | py | Python | examples/resume_train_segm.py | dani-lbnl/msdnet | 20f503322524ceb340379448f1778a58bb1f9a18 | [
"MIT"
] | 24 | 2019-08-24T06:42:51.000Z | 2021-10-09T14:27:51.000Z | examples/resume_train_segm.py | dani-lbnl/msdnet | 20f503322524ceb340379448f1778a58bb1f9a18 | [
"MIT"
] | 12 | 2019-07-31T06:56:19.000Z | 2020-12-05T18:08:54.000Z | examples/resume_train_segm.py | dani-lbnl/msdnet | 20f503322524ceb340379448f1778a58bb1f9a18 | [
"MIT"
] | 11 | 2019-09-17T02:39:24.000Z | 2022-03-30T21:28:35.000Z | #-----------------------------------------------------------------------
#Copyright 2019 Centrum Wiskunde & Informatica, Amsterdam
#
#Author: Daniel M. Pelt
#Contact: D.M.Pelt@cwi.nl
#Website: http://dmpelt.github.io/msdnet/
#License: MIT
#
#This file is part of MSDNet, a Python implementation of the
#Mixed-Scale Dense Convolutional Neural Network.
#-----------------------------------------------------------------------
"""
Example 09: Resume training a network for segmentation
======================================================
This script resumes an earlier training of a MS-D network for
segmentation (i.e. labeling)
Run generatedata.py first to generate required training data, and
train_segm.py to generate a partially trained network.
"""
# Import code
import msdnet
from pathlib import Path
# Define training data
# First, create lists of input files (noisy) and target files (labels)
flsin = sorted((Path('train') / 'noisy').glob('*.tiff'))
flstg = sorted((Path('train') / 'label').glob('*.tiff'))
# Create list of datapoints (i.e. input/target pairs)
dats = []
for i in range(len(flsin)):
# Create datapoint with file names
d = msdnet.data.ImageFileDataPoint(str(flsin[i]),str(flstg[i]))
# Convert datapoint to one-hot, using labels 0, 1, 2, 3, and 4,
# which are the labels given in each label TIFF file.
d_oh = msdnet.data.OneHotDataPoint(d, [0,1,2,3,4])
# Augment data by rotating and flipping
d_augm = msdnet.data.RotateAndFlipDataPoint(d_oh)
# Add augmented datapoint to list
dats.append(d_augm)
# Note: The above can also be achieved using a utility function for such 'simple' cases:
# dats = msdnet.utils.load_simple_data('train/noisy/*.tiff', 'train/label/*.tiff', augment=True, labels=[0,1,2,3,4])
# Use image batches of a single image
bprov = msdnet.data.BatchProvider(dats,1)
# Define validation data (not using augmentation)
flsin = sorted((Path('val') / 'noisy').glob('*.tiff'))
flstg = sorted((Path('val') / 'label').glob('*.tiff'))
datsv = []
for i in range(len(flsin)):
d = msdnet.data.ImageFileDataPoint(str(flsin[i]),str(flstg[i]))
d_oh = msdnet.data.OneHotDataPoint(d, [0,1,2,3,4])
datsv.append(d_oh)
# Note: The above can also be achieved using a utility function for such 'simple' cases:
# datsv = msdnet.utils.load_simple_data('train/noisy/*.tiff', 'train/label/*.tiff', augment=False, labels=[0,1,2,3,4])
# Load network, training algorithm, and validation object from checkpoint of previous training
n, t, val = msdnet.train.restore_training('segm_params.checkpoint', msdnet.network.SegmentationMSDNet, msdnet.train.AdamAlgorithm, msdnet.validate.MSEValidation, datsv, gpu=True)
# Select loss function
celoss = msdnet.loss.CrossEntropyLoss()
val.loss = celoss
t.loss = celoss
# Log error metrics to console
consolelog = msdnet.loggers.ConsoleLogger()
# Log error metrics to file
filelog = msdnet.loggers.FileLogger('log_segm.txt')
# Log typical, worst, and best images to image files
imagelog = msdnet.loggers.ImageLabelLogger('log_segm', onlyifbetter=True)
# Log typical, worst, and best images to image files
# Output probability map for a single channel (in this case, channel 3)
singlechannellog = msdnet.loggers.ImageLogger('log_segm_singlechannel', chan_out=3, onlyifbetter=True)
# Train network until program is stopped manually
# Network parameters are saved in segm_params.h5
# Validation is run after every len(datsv) (=25)
# training steps.
msdnet.train.train(n, t, val, bprov, 'segm_params_resumed.h5',loggers=[consolelog,filelog,imagelog,singlechannellog], val_every=len(datsv))
| 43.277108 | 178 | 0.700724 |
import msdnet
from pathlib import Path
flsin = sorted((Path('train') / 'noisy').glob('*.tiff'))
flstg = sorted((Path('train') / 'label').glob('*.tiff'))
dats = []
for i in range(len(flsin)):
d = msdnet.data.ImageFileDataPoint(str(flsin[i]),str(flstg[i]))
d_oh = msdnet.data.OneHotDataPoint(d, [0,1,2,3,4])
d_augm = msdnet.data.RotateAndFlipDataPoint(d_oh)
dats.append(d_augm)
bprov = msdnet.data.BatchProvider(dats,1)
flsin = sorted((Path('val') / 'noisy').glob('*.tiff'))
flstg = sorted((Path('val') / 'label').glob('*.tiff'))
datsv = []
for i in range(len(flsin)):
d = msdnet.data.ImageFileDataPoint(str(flsin[i]),str(flstg[i]))
d_oh = msdnet.data.OneHotDataPoint(d, [0,1,2,3,4])
datsv.append(d_oh)
n, t, val = msdnet.train.restore_training('segm_params.checkpoint', msdnet.network.SegmentationMSDNet, msdnet.train.AdamAlgorithm, msdnet.validate.MSEValidation, datsv, gpu=True)
celoss = msdnet.loss.CrossEntropyLoss()
val.loss = celoss
t.loss = celoss
consolelog = msdnet.loggers.ConsoleLogger()
filelog = msdnet.loggers.FileLogger('log_segm.txt')
imagelog = msdnet.loggers.ImageLabelLogger('log_segm', onlyifbetter=True)
singlechannellog = msdnet.loggers.ImageLogger('log_segm_singlechannel', chan_out=3, onlyifbetter=True)
msdnet.train.train(n, t, val, bprov, 'segm_params_resumed.h5',loggers=[consolelog,filelog,imagelog,singlechannellog], val_every=len(datsv))
| true | true |
f721694c28a049e466ab20f52517ffcffb2f736f | 1,578 | py | Python | github.py | anoadragon453/msc-chatbot | ae8bc4b900df500e4f31b85041de2ebfbedd8dd9 | [
"Apache-2.0"
] | 2 | 2019-10-06T18:13:46.000Z | 2019-12-07T22:02:40.000Z | github.py | anoadragon453/msc-chatbot | ae8bc4b900df500e4f31b85041de2ebfbedd8dd9 | [
"Apache-2.0"
] | null | null | null | github.py | anoadragon453/msc-chatbot | ae8bc4b900df500e4f31b85041de2ebfbedd8dd9 | [
"Apache-2.0"
] | null | null | null | import requests
import json
from errors import BotException
import logging
logger = logging.getLogger(__name__)
class Github(object):
def __init__(self, repo_slug: str):
"""
Args:
repo_slug: The slug (user/repo_name) of the github repository
"""
# TODO: Add support for custom token
self.repo_slug = repo_slug
self.api_base = "https://api.github.com"
def get_info_for_issue_pr(self, num: int) -> dict:
"""Get the metadata of a github issue/PR
Args:
num: The issue/PR number
Returns:
dict[str, str]: Metadata about the issue/PR
Raises:
FileNotFoundError: The issue/PR was not found
"""
# Assume it's a PR. Query github's API
resp = requests.get(self.api_base + f"/repos/{self.repo_slug}/pulls/{num}")
if resp.status_code == 404 or not resp.content:
raise FileNotFoundError
# Load JSON
body = json.loads(resp.content)
if resp.status_code == 403:
# Check if this is a rate limit hit or an invalid token
if "message" in body:
logger.error(f"Rate-limit hit on {resp.url}. Consider using your own Github token.")
raise PermissionError("rate-limit hit")
logger.error(f"Forbidden on contacting {resp.url}. Check your access token.")
raise PermissionError("forbidden")
if resp.status_code != 200:
raise BotException(f"HTTP error ({resp.status_code})")
return body
| 30.941176 | 100 | 0.603295 | import requests
import json
from errors import BotException
import logging
logger = logging.getLogger(__name__)
class Github(object):
def __init__(self, repo_slug: str):
self.repo_slug = repo_slug
self.api_base = "https://api.github.com"
def get_info_for_issue_pr(self, num: int) -> dict:
resp = requests.get(self.api_base + f"/repos/{self.repo_slug}/pulls/{num}")
if resp.status_code == 404 or not resp.content:
raise FileNotFoundError
body = json.loads(resp.content)
if resp.status_code == 403:
if "message" in body:
logger.error(f"Rate-limit hit on {resp.url}. Consider using your own Github token.")
raise PermissionError("rate-limit hit")
logger.error(f"Forbidden on contacting {resp.url}. Check your access token.")
raise PermissionError("forbidden")
if resp.status_code != 200:
raise BotException(f"HTTP error ({resp.status_code})")
return body
| true | true |
f721696ba4b25105e5eb43dca6f3445e9352f0a4 | 265 | py | Python | ctfweb/admin.py | pdogg/ctfmanager | d8f0ac7d7e12d7973b7eb39cd30a0bc81e4cb770 | [
"BSD-3-Clause"
] | 10 | 2015-01-27T23:01:03.000Z | 2016-12-14T01:00:49.000Z | ctfweb/admin.py | pdogg/ctfmanager | d8f0ac7d7e12d7973b7eb39cd30a0bc81e4cb770 | [
"BSD-3-Clause"
] | null | null | null | ctfweb/admin.py | pdogg/ctfmanager | d8f0ac7d7e12d7973b7eb39cd30a0bc81e4cb770 | [
"BSD-3-Clause"
] | 8 | 2015-03-01T16:57:05.000Z | 2022-02-20T03:48:04.000Z | from django.contrib import admin
from ctfweb.models import *
admin.site.register(Game)
admin.site.register(Category)
admin.site.register(Challenge)
admin.site.register(Hint)
admin.site.register(Competitor)
admin.site.register(Solved)
admin.site.register(RegCodes)
| 24.090909 | 32 | 0.822642 | from django.contrib import admin
from ctfweb.models import *
admin.site.register(Game)
admin.site.register(Category)
admin.site.register(Challenge)
admin.site.register(Hint)
admin.site.register(Competitor)
admin.site.register(Solved)
admin.site.register(RegCodes)
| true | true |
f7216b0dc1766301347181cd7059ad601ead0155 | 11,484 | py | Python | components/app_update/otatool.py | thomasonw/esp-idf | abea9e4c02bb17e86298aec4e299780399e4789f | [
"Apache-2.0"
] | 6 | 2018-12-28T04:00:22.000Z | 2021-05-17T08:01:41.000Z | components/app_update/otatool.py | Wangrenai/esp-idf | abea9e4c02bb17e86298aec4e299780399e4789f | [
"Apache-2.0"
] | 1 | 2019-02-15T06:43:13.000Z | 2019-02-15T06:43:13.000Z | components/app_update/otatool.py | Wangrenai/esp-idf | abea9e4c02bb17e86298aec4e299780399e4789f | [
"Apache-2.0"
] | 1 | 2019-05-01T14:00:23.000Z | 2019-05-01T14:00:23.000Z | #!/usr/bin/env python
#
# otatool is used to perform ota-level operations - flashing ota partition
# erasing ota partition and switching ota partition
#
# Copyright 2018 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import argparse
import os
import sys
import binascii
import subprocess
import tempfile
import collections
import struct
__version__ = '1.0'
IDF_COMPONENTS_PATH = os.path.expandvars(os.path.join("$IDF_PATH", "components"))
PARTTOOL_PY = os.path.join(IDF_COMPONENTS_PATH, "partition_table", "parttool.py")
SPI_FLASH_SEC_SIZE = 0x2000
quiet = False
def status(msg):
if not quiet:
print(msg)
def _invoke_parttool(parttool_args, args, output=False, partition=None):
invoke_args = []
if partition:
invoke_args += [sys.executable, PARTTOOL_PY] + partition
else:
invoke_args += [sys.executable, PARTTOOL_PY, "--partition-type", "data", "--partition-subtype", "ota"]
if quiet:
invoke_args += ["-q"]
if args.port != "":
invoke_args += ["--port", args.port]
if args.partition_table_file:
invoke_args += ["--partition-table-file", args.partition_table_file]
if args.partition_table_offset:
invoke_args += ["--partition-table-offset", args.partition_table_offset]
invoke_args += parttool_args
if output:
return subprocess.check_output(invoke_args)
else:
return subprocess.check_call(invoke_args)
def _get_otadata_contents(args, check=True):
global quiet
if check:
check_args = ["get_partition_info", "--info", "offset", "size"]
quiet = True
output = _invoke_parttool(check_args, args, True).split(b" ")
quiet = args.quiet
if not output:
raise RuntimeError("No ota_data partition found")
with tempfile.NamedTemporaryFile() as otadata_file:
invoke_args = ["read_partition", "--output", otadata_file.name]
_invoke_parttool(invoke_args, args)
return otadata_file.read()
def _get_otadata_status(otadata_contents):
status = []
otadata_status = collections.namedtuple("otadata_status", "seq crc")
for i in range(2):
start = i * (SPI_FLASH_SEC_SIZE >> 1)
seq = bytearray(otadata_contents[start:start + 4])
crc = bytearray(otadata_contents[start + 28:start + 32])
seq = struct.unpack('>I', seq)
crc = struct.unpack('>I', crc)
status.append(otadata_status(seq[0], crc[0]))
return status
def read_otadata(args):
status("Reading ota_data partition contents...")
otadata_info = _get_otadata_contents(args)
otadata_info = _get_otadata_status(otadata_info)
print(otadata_info)
print("\t\t{:11}\t{:8s}|\t{:8s}\t{:8s}".format("OTA_SEQ", "CRC", "OTA_SEQ", "CRC"))
print("Firmware: 0x{:8x} \t 0x{:8x} |\t0x{:8x} \t 0x{:8x}".format(otadata_info[0].seq, otadata_info[0].crc,
otadata_info[1].seq, otadata_info[1].crc))
def erase_otadata(args):
status("Erasing ota_data partition contents...")
_invoke_parttool(["erase_partition"], args)
status("Erased ota_data partition contents")
def switch_otadata(args):
sys.path.append(os.path.join(IDF_COMPONENTS_PATH, "partition_table"))
import gen_esp32part as gen
def is_otadata_status_valid(status):
seq = status.seq % (1 << 32)
crc = hex(binascii.crc32(struct.pack("I", seq), 0xFFFFFFFF) % (1 << 32))
return seq < (int('0xFFFFFFFF', 16) % (1 << 32)) and status.crc == crc
status("Looking for ota app partitions...")
# In order to get the number of ota app partitions, we need the partition table
partition_table = None
with tempfile.NamedTemporaryFile() as partition_table_file:
invoke_args = ["get_partition_info", "--table", partition_table_file.name]
_invoke_parttool(invoke_args, args)
partition_table = partition_table_file.read()
partition_table = gen.PartitionTable.from_binary(partition_table)
ota_partitions = list()
for i in range(gen.NUM_PARTITION_SUBTYPE_APP_OTA):
ota_partition = filter(lambda p: p.subtype == (gen.MIN_PARTITION_SUBTYPE_APP_OTA + i), partition_table)
try:
ota_partitions.append(list(ota_partition)[0])
except IndexError:
break
ota_partitions = sorted(ota_partitions, key=lambda p: p.subtype)
if not ota_partitions:
raise RuntimeError("No ota app partitions found")
status("Verifying partition to switch to exists...")
# Look for the app partition to switch to
ota_partition_next = None
try:
if args.name:
ota_partition_next = filter(lambda p: p.name == args.name, ota_partitions)
else:
ota_partition_next = filter(lambda p: p.subtype - gen.MIN_PARTITION_SUBTYPE_APP_OTA == args.slot, ota_partitions)
ota_partition_next = list(ota_partition_next)[0]
except IndexError:
raise RuntimeError("Partition to switch to not found")
otadata_contents = _get_otadata_contents(args)
otadata_status = _get_otadata_status(otadata_contents)
# Find the copy to base the computation for ota sequence number on
otadata_compute_base = -1
# Both are valid, take the max as computation base
if is_otadata_status_valid(otadata_status[0]) and is_otadata_status_valid(otadata_status[1]):
if otadata_status[0].seq >= otadata_status[1].seq:
otadata_compute_base = 0
else:
otadata_compute_base = 1
# Only one copy is valid, use that
elif is_otadata_status_valid(otadata_status[0]):
otadata_compute_base = 0
elif is_otadata_status_valid(otadata_status[1]):
otadata_compute_base = 1
# Both are invalid (could be initial state - all 0xFF's)
else:
pass
ota_seq_next = 0
ota_partitions_num = len(ota_partitions)
target_seq = (ota_partition_next.subtype & 0x0F) + 1
# Find the next ota sequence number
if otadata_compute_base == 0 or otadata_compute_base == 1:
base_seq = otadata_status[otadata_compute_base].seq % (1 << 32)
i = 0
while base_seq > target_seq % ota_partitions_num + i * ota_partitions_num:
i += 1
ota_seq_next = target_seq % ota_partitions_num + i * ota_partitions_num
else:
ota_seq_next = target_seq
# Create binary data from computed values
ota_seq_next = struct.pack("I", ota_seq_next)
ota_seq_crc_next = binascii.crc32(ota_seq_next, 0xFFFFFFFF) % (1 << 32)
ota_seq_crc_next = struct.pack("I", ota_seq_crc_next)
with tempfile.NamedTemporaryFile() as otadata_next_file:
start = (1 if otadata_compute_base == 0 else 0) * (SPI_FLASH_SEC_SIZE >> 1)
otadata_next_file.write(otadata_contents)
otadata_next_file.seek(start)
otadata_next_file.write(ota_seq_next)
otadata_next_file.seek(start + 28)
otadata_next_file.write(ota_seq_crc_next)
otadata_next_file.flush()
_invoke_parttool(["write_partition", "--input", otadata_next_file.name], args)
status("Updated ota_data partition")
def _get_partition_specifier(args):
if args.name:
return ["--partition-name", args.name]
else:
return ["--partition-type", "app", "--partition-subtype", "ota_" + str(args.slot)]
def read_ota_partition(args):
invoke_args = ["read_partition", "--output", args.output]
_invoke_parttool(invoke_args, args, partition=_get_partition_specifier(args))
status("Read ota partition contents to file {}".format(args.output))
def write_ota_partition(args):
invoke_args = ["write_partition", "--input", args.input]
_invoke_parttool(invoke_args, args, partition=_get_partition_specifier(args))
status("Written contents of file {} to ota partition".format(args.input))
def erase_ota_partition(args):
invoke_args = ["erase_partition"]
_invoke_parttool(invoke_args, args, partition=_get_partition_specifier(args))
status("Erased contents of ota partition")
def main():
global quiet
parser = argparse.ArgumentParser("ESP-IDF OTA Partitions Tool")
parser.add_argument("--quiet", "-q", help="suppress stderr messages", action="store_true")
# There are two possible sources for the partition table: a device attached to the host
# or a partition table CSV/binary file. These sources are mutually exclusive.
partition_table_info_source_args = parser.add_mutually_exclusive_group()
partition_table_info_source_args.add_argument("--port", "-p", help="port where the device to read the partition table from is attached", default="")
partition_table_info_source_args.add_argument("--partition-table-file", "-f", help="file (CSV/binary) to read the partition table from", default="")
parser.add_argument("--partition-table-offset", "-o", help="offset to read the partition table from", default="0x8000")
subparsers = parser.add_subparsers(dest="operation", help="run otatool -h for additional help")
# Specify the supported operations
subparsers.add_parser("read_otadata", help="read otadata partition")
subparsers.add_parser("erase_otadata", help="erase otadata partition")
slot_or_name_parser = argparse.ArgumentParser(add_help=False)
slot_or_name_parser_args = slot_or_name_parser.add_mutually_exclusive_group()
slot_or_name_parser_args.add_argument("--slot", help="slot number of the ota partition", type=int)
slot_or_name_parser_args.add_argument("--name", help="name of the ota partition")
subparsers.add_parser("switch_otadata", help="switch otadata partition", parents=[slot_or_name_parser])
read_ota_partition_subparser = subparsers.add_parser("read_ota_partition", help="read contents of an ota partition", parents=[slot_or_name_parser])
read_ota_partition_subparser.add_argument("--output", help="file to write the contents of the ota partition to")
write_ota_partition_subparser = subparsers.add_parser("write_ota_partition", help="write contents to an ota partition", parents=[slot_or_name_parser])
write_ota_partition_subparser.add_argument("--input", help="file whose contents to write to the ota partition")
subparsers.add_parser("erase_ota_partition", help="erase contents of an ota partition", parents=[slot_or_name_parser])
args = parser.parse_args()
quiet = args.quiet
# No operation specified, display help and exit
if args.operation is None:
if not quiet:
parser.print_help()
sys.exit(1)
# Else execute the operation
operation_func = globals()[args.operation]
if quiet:
# If exceptions occur, suppress and exit quietly
try:
operation_func(args)
except Exception:
sys.exit(2)
else:
operation_func(args)
if __name__ == '__main__':
main()
| 35.012195 | 154 | 0.698015 |
function, division
import argparse
import os
import sys
import binascii
import subprocess
import tempfile
import collections
import struct
__version__ = '1.0'
IDF_COMPONENTS_PATH = os.path.expandvars(os.path.join("$IDF_PATH", "components"))
PARTTOOL_PY = os.path.join(IDF_COMPONENTS_PATH, "partition_table", "parttool.py")
SPI_FLASH_SEC_SIZE = 0x2000
quiet = False
def status(msg):
if not quiet:
print(msg)
def _invoke_parttool(parttool_args, args, output=False, partition=None):
invoke_args = []
if partition:
invoke_args += [sys.executable, PARTTOOL_PY] + partition
else:
invoke_args += [sys.executable, PARTTOOL_PY, "--partition-type", "data", "--partition-subtype", "ota"]
if quiet:
invoke_args += ["-q"]
if args.port != "":
invoke_args += ["--port", args.port]
if args.partition_table_file:
invoke_args += ["--partition-table-file", args.partition_table_file]
if args.partition_table_offset:
invoke_args += ["--partition-table-offset", args.partition_table_offset]
invoke_args += parttool_args
if output:
return subprocess.check_output(invoke_args)
else:
return subprocess.check_call(invoke_args)
def _get_otadata_contents(args, check=True):
global quiet
if check:
check_args = ["get_partition_info", "--info", "offset", "size"]
quiet = True
output = _invoke_parttool(check_args, args, True).split(b" ")
quiet = args.quiet
if not output:
raise RuntimeError("No ota_data partition found")
with tempfile.NamedTemporaryFile() as otadata_file:
invoke_args = ["read_partition", "--output", otadata_file.name]
_invoke_parttool(invoke_args, args)
return otadata_file.read()
def _get_otadata_status(otadata_contents):
status = []
otadata_status = collections.namedtuple("otadata_status", "seq crc")
for i in range(2):
start = i * (SPI_FLASH_SEC_SIZE >> 1)
seq = bytearray(otadata_contents[start:start + 4])
crc = bytearray(otadata_contents[start + 28:start + 32])
seq = struct.unpack('>I', seq)
crc = struct.unpack('>I', crc)
status.append(otadata_status(seq[0], crc[0]))
return status
def read_otadata(args):
status("Reading ota_data partition contents...")
otadata_info = _get_otadata_contents(args)
otadata_info = _get_otadata_status(otadata_info)
print(otadata_info)
print("\t\t{:11}\t{:8s}|\t{:8s}\t{:8s}".format("OTA_SEQ", "CRC", "OTA_SEQ", "CRC"))
print("Firmware: 0x{:8x} \t 0x{:8x} |\t0x{:8x} \t 0x{:8x}".format(otadata_info[0].seq, otadata_info[0].crc,
otadata_info[1].seq, otadata_info[1].crc))
def erase_otadata(args):
status("Erasing ota_data partition contents...")
_invoke_parttool(["erase_partition"], args)
status("Erased ota_data partition contents")
def switch_otadata(args):
sys.path.append(os.path.join(IDF_COMPONENTS_PATH, "partition_table"))
import gen_esp32part as gen
def is_otadata_status_valid(status):
seq = status.seq % (1 << 32)
crc = hex(binascii.crc32(struct.pack("I", seq), 0xFFFFFFFF) % (1 << 32))
return seq < (int('0xFFFFFFFF', 16) % (1 << 32)) and status.crc == crc
status("Looking for ota app partitions...")
partition_table = None
with tempfile.NamedTemporaryFile() as partition_table_file:
invoke_args = ["get_partition_info", "--table", partition_table_file.name]
_invoke_parttool(invoke_args, args)
partition_table = partition_table_file.read()
partition_table = gen.PartitionTable.from_binary(partition_table)
ota_partitions = list()
for i in range(gen.NUM_PARTITION_SUBTYPE_APP_OTA):
ota_partition = filter(lambda p: p.subtype == (gen.MIN_PARTITION_SUBTYPE_APP_OTA + i), partition_table)
try:
ota_partitions.append(list(ota_partition)[0])
except IndexError:
break
ota_partitions = sorted(ota_partitions, key=lambda p: p.subtype)
if not ota_partitions:
raise RuntimeError("No ota app partitions found")
status("Verifying partition to switch to exists...")
ota_partition_next = None
try:
if args.name:
ota_partition_next = filter(lambda p: p.name == args.name, ota_partitions)
else:
ota_partition_next = filter(lambda p: p.subtype - gen.MIN_PARTITION_SUBTYPE_APP_OTA == args.slot, ota_partitions)
ota_partition_next = list(ota_partition_next)[0]
except IndexError:
raise RuntimeError("Partition to switch to not found")
otadata_contents = _get_otadata_contents(args)
otadata_status = _get_otadata_status(otadata_contents)
otadata_compute_base = -1
if is_otadata_status_valid(otadata_status[0]) and is_otadata_status_valid(otadata_status[1]):
if otadata_status[0].seq >= otadata_status[1].seq:
otadata_compute_base = 0
else:
otadata_compute_base = 1
elif is_otadata_status_valid(otadata_status[0]):
otadata_compute_base = 0
elif is_otadata_status_valid(otadata_status[1]):
otadata_compute_base = 1
else:
pass
ota_seq_next = 0
ota_partitions_num = len(ota_partitions)
target_seq = (ota_partition_next.subtype & 0x0F) + 1
# Find the next ota sequence number
if otadata_compute_base == 0 or otadata_compute_base == 1:
base_seq = otadata_status[otadata_compute_base].seq % (1 << 32)
i = 0
while base_seq > target_seq % ota_partitions_num + i * ota_partitions_num:
i += 1
ota_seq_next = target_seq % ota_partitions_num + i * ota_partitions_num
else:
ota_seq_next = target_seq
# Create binary data from computed values
ota_seq_next = struct.pack("I", ota_seq_next)
ota_seq_crc_next = binascii.crc32(ota_seq_next, 0xFFFFFFFF) % (1 << 32)
ota_seq_crc_next = struct.pack("I", ota_seq_crc_next)
with tempfile.NamedTemporaryFile() as otadata_next_file:
start = (1 if otadata_compute_base == 0 else 0) * (SPI_FLASH_SEC_SIZE >> 1)
otadata_next_file.write(otadata_contents)
otadata_next_file.seek(start)
otadata_next_file.write(ota_seq_next)
otadata_next_file.seek(start + 28)
otadata_next_file.write(ota_seq_crc_next)
otadata_next_file.flush()
_invoke_parttool(["write_partition", "--input", otadata_next_file.name], args)
status("Updated ota_data partition")
def _get_partition_specifier(args):
if args.name:
return ["--partition-name", args.name]
else:
return ["--partition-type", "app", "--partition-subtype", "ota_" + str(args.slot)]
def read_ota_partition(args):
invoke_args = ["read_partition", "--output", args.output]
_invoke_parttool(invoke_args, args, partition=_get_partition_specifier(args))
status("Read ota partition contents to file {}".format(args.output))
def write_ota_partition(args):
invoke_args = ["write_partition", "--input", args.input]
_invoke_parttool(invoke_args, args, partition=_get_partition_specifier(args))
status("Written contents of file {} to ota partition".format(args.input))
def erase_ota_partition(args):
invoke_args = ["erase_partition"]
_invoke_parttool(invoke_args, args, partition=_get_partition_specifier(args))
status("Erased contents of ota partition")
def main():
global quiet
parser = argparse.ArgumentParser("ESP-IDF OTA Partitions Tool")
parser.add_argument("--quiet", "-q", help="suppress stderr messages", action="store_true")
# There are two possible sources for the partition table: a device attached to the host
# or a partition table CSV/binary file. These sources are mutually exclusive.
partition_table_info_source_args = parser.add_mutually_exclusive_group()
partition_table_info_source_args.add_argument("--port", "-p", help="port where the device to read the partition table from is attached", default="")
partition_table_info_source_args.add_argument("--partition-table-file", "-f", help="file (CSV/binary) to read the partition table from", default="")
parser.add_argument("--partition-table-offset", "-o", help="offset to read the partition table from", default="0x8000")
subparsers = parser.add_subparsers(dest="operation", help="run otatool -h for additional help")
# Specify the supported operations
subparsers.add_parser("read_otadata", help="read otadata partition")
subparsers.add_parser("erase_otadata", help="erase otadata partition")
slot_or_name_parser = argparse.ArgumentParser(add_help=False)
slot_or_name_parser_args = slot_or_name_parser.add_mutually_exclusive_group()
slot_or_name_parser_args.add_argument("--slot", help="slot number of the ota partition", type=int)
slot_or_name_parser_args.add_argument("--name", help="name of the ota partition")
subparsers.add_parser("switch_otadata", help="switch otadata partition", parents=[slot_or_name_parser])
read_ota_partition_subparser = subparsers.add_parser("read_ota_partition", help="read contents of an ota partition", parents=[slot_or_name_parser])
read_ota_partition_subparser.add_argument("--output", help="file to write the contents of the ota partition to")
write_ota_partition_subparser = subparsers.add_parser("write_ota_partition", help="write contents to an ota partition", parents=[slot_or_name_parser])
write_ota_partition_subparser.add_argument("--input", help="file whose contents to write to the ota partition")
subparsers.add_parser("erase_ota_partition", help="erase contents of an ota partition", parents=[slot_or_name_parser])
args = parser.parse_args()
quiet = args.quiet
# No operation specified, display help and exit
if args.operation is None:
if not quiet:
parser.print_help()
sys.exit(1)
# Else execute the operation
operation_func = globals()[args.operation]
if quiet:
# If exceptions occur, suppress and exit quietly
try:
operation_func(args)
except Exception:
sys.exit(2)
else:
operation_func(args)
if __name__ == '__main__':
main()
| true | true |
f7216b59c00f5f5b82ec9c7b9bf5292699ace5fe | 82 | py | Python | akshare/fx/__init__.py | ghmole/akshare | eeeec96f90c6738bcd9ce92fcfa6b9c9176928a6 | [
"MIT"
] | 12 | 2020-12-30T02:50:01.000Z | 2021-11-08T11:32:51.000Z | akshare/fx/__init__.py | ghmole/akshare | eeeec96f90c6738bcd9ce92fcfa6b9c9176928a6 | [
"MIT"
] | 3 | 2021-01-26T09:31:43.000Z | 2021-12-08T08:31:54.000Z | akshare/fx/__init__.py | ghmole/akshare | eeeec96f90c6738bcd9ce92fcfa6b9c9176928a6 | [
"MIT"
] | 13 | 2020-07-08T08:48:33.000Z | 2022-03-23T08:37:11.000Z | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2019/10/20 10:57
Desc:
"""
| 11.714286 | 22 | 0.54878 | true | true | |
f7216c4cb45aea88f34bb4f84f11c15334366e5e | 6,588 | py | Python | tools/Polygraphy/tests/comparator/test_comparator.py | SsisyphusTao/TensorRT | 69f5a5093a39184e137a55c908d5c4d1340b009a | [
"Apache-2.0"
] | 5,249 | 2019-06-17T17:20:34.000Z | 2022-03-31T17:56:05.000Z | tools/Polygraphy/tests/comparator/test_comparator.py | SsisyphusTao/TensorRT | 69f5a5093a39184e137a55c908d5c4d1340b009a | [
"Apache-2.0"
] | 1,721 | 2019-06-17T18:13:29.000Z | 2022-03-31T16:09:53.000Z | tools/Polygraphy/tests/comparator/test_comparator.py | SsisyphusTao/TensorRT | 69f5a5093a39184e137a55c908d5c4d1340b009a | [
"Apache-2.0"
] | 1,414 | 2019-06-18T04:01:17.000Z | 2022-03-31T09:16:53.000Z | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess as sp
import numpy as np
import pytest
import tensorrt as trt
from polygraphy.backend.onnx import BytesFromOnnx, OnnxFromTfGraph, GsFromOnnx
from polygraphy.backend.onnxrt import OnnxrtRunner, SessionFromOnnx
from polygraphy.backend.pluginref import PluginRefRunner
from polygraphy.backend.tf import SessionFromGraph, TfRunner
from polygraphy.backend.trt import EngineFromNetwork, NetworkFromOnnxBytes, TrtRunner
from polygraphy.exception import PolygraphyException
from polygraphy.comparator import Comparator, CompareFunc, DataLoader, IterationResult, PostprocessFunc, RunResults
from polygraphy import mod
from tests.models.meta import ONNX_MODELS, TF_MODELS
class TestComparator(object):
def test_warmup_runs(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = OnnxrtRunner(SessionFromOnnx(onnx_loader))
run_results = Comparator.run([runner], warm_up=2)
assert len(run_results[runner.name]) == 1
def test_list_as_data_loader(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = OnnxrtRunner(SessionFromOnnx(onnx_loader), name="onnx_runner")
data = [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}] * 2
run_results = Comparator.run([runner], data_loader=data)
iter_results = run_results["onnx_runner"]
assert len(iter_results) == 2
for actual, expected in zip(iter_results, data):
assert np.all(actual["y"] == expected["x"])
def test_generator_as_data_loader(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = OnnxrtRunner(SessionFromOnnx(onnx_loader), name="onnx_runner")
def data():
for feed_dict in [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}] * 2:
yield feed_dict
run_results = Comparator.run([runner], data_loader=data())
iter_results = run_results["onnx_runner"]
assert len(iter_results) == 2
for actual, expected in zip(iter_results, data()):
assert np.all(actual["y"] == expected["x"])
def test_multiple_runners(self):
load_tf = TF_MODELS["identity"].loader
build_tf_session = SessionFromGraph(load_tf)
onnx_model = OnnxFromTfGraph(load_tf)
load_serialized_onnx = BytesFromOnnx(onnx_model)
build_onnxrt_session = SessionFromOnnx(load_serialized_onnx)
load_engine = EngineFromNetwork(NetworkFromOnnxBytes(load_serialized_onnx))
gs_graph = GsFromOnnx(onnx_model)
runners = [
TfRunner(build_tf_session),
OnnxrtRunner(build_onnxrt_session),
PluginRefRunner(gs_graph),
TrtRunner(load_engine),
]
run_results = Comparator.run(runners)
compare_func = CompareFunc.simple(check_shapes=mod.version(trt.__version__) >= mod.version("7.0"))
assert bool(Comparator.compare_accuracy(run_results, compare_func=compare_func))
assert len(list(run_results.values())[0]) == 1 # Default number of iterations
def test_postprocess(self):
onnx_loader = ONNX_MODELS["identity"].loader
run_results = Comparator.run([OnnxrtRunner(SessionFromOnnx(onnx_loader))], use_subprocess=True)
# Output shape is (1, 1, 2, 2)
postprocessed = Comparator.postprocess(run_results, postprocess_func=PostprocessFunc.topk_func(k=1, axis=-1))
for _, results in postprocessed.items():
for result in results:
for _, output in result.items():
assert output.shape == (1, 1, 2, 1)
def test_errors_do_not_hang(self):
# Should error because interface is not implemented correctly.
class FakeRunner(object):
def __init__(self):
self.name = "fake"
runners = [FakeRunner()]
with pytest.raises(PolygraphyException):
Comparator.run(runners, use_subprocess=True, subprocess_polling_interval=1)
def test_segfault_does_not_hang(self):
def raise_called_process_error():
class FakeSegfault(sp.CalledProcessError):
pass
raise FakeSegfault(-11, ["simulate", "segfault"])
runners = [TrtRunner(EngineFromNetwork(raise_called_process_error))]
with pytest.raises(PolygraphyException):
Comparator.run(runners, use_subprocess=True, subprocess_polling_interval=1)
def test_multirun_outputs_are_different(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = TrtRunner(EngineFromNetwork(NetworkFromOnnxBytes(onnx_loader)))
run_results = Comparator.run([runner], data_loader=DataLoader(iterations=2))
iteration0 = run_results[runner.name][0]
iteration1 = run_results[runner.name][1]
for name in iteration0.keys():
assert np.any(iteration0[name] != iteration1[name])
def test_validate_nan(self):
run_results = RunResults()
run_results["fake-runner"] = [IterationResult(outputs={"x": np.array(np.nan)})]
assert not Comparator.validate(run_results)
def test_validate_inf(self):
run_results = RunResults()
run_results["fake-runner"] = [IterationResult(outputs={"x": np.array(np.inf)})]
assert not Comparator.validate(run_results, check_inf=True)
def test_dim_param_trt_onnxrt(self):
load_onnx_bytes = ONNX_MODELS["dim_param"].loader
build_onnxrt_session = SessionFromOnnx(load_onnx_bytes)
load_engine = EngineFromNetwork(NetworkFromOnnxBytes(load_onnx_bytes))
runners = [
OnnxrtRunner(build_onnxrt_session),
TrtRunner(load_engine),
]
run_results = Comparator.run(runners)
compare_func = CompareFunc.simple(check_shapes=mod.version(trt.__version__) >= mod.version("7.0"))
assert bool(Comparator.compare_accuracy(run_results, compare_func=compare_func))
assert len(list(run_results.values())[0]) == 1 # Default number of iterations
| 43.92 | 117 | 0.696266 |
import subprocess as sp
import numpy as np
import pytest
import tensorrt as trt
from polygraphy.backend.onnx import BytesFromOnnx, OnnxFromTfGraph, GsFromOnnx
from polygraphy.backend.onnxrt import OnnxrtRunner, SessionFromOnnx
from polygraphy.backend.pluginref import PluginRefRunner
from polygraphy.backend.tf import SessionFromGraph, TfRunner
from polygraphy.backend.trt import EngineFromNetwork, NetworkFromOnnxBytes, TrtRunner
from polygraphy.exception import PolygraphyException
from polygraphy.comparator import Comparator, CompareFunc, DataLoader, IterationResult, PostprocessFunc, RunResults
from polygraphy import mod
from tests.models.meta import ONNX_MODELS, TF_MODELS
class TestComparator(object):
def test_warmup_runs(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = OnnxrtRunner(SessionFromOnnx(onnx_loader))
run_results = Comparator.run([runner], warm_up=2)
assert len(run_results[runner.name]) == 1
def test_list_as_data_loader(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = OnnxrtRunner(SessionFromOnnx(onnx_loader), name="onnx_runner")
data = [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}] * 2
run_results = Comparator.run([runner], data_loader=data)
iter_results = run_results["onnx_runner"]
assert len(iter_results) == 2
for actual, expected in zip(iter_results, data):
assert np.all(actual["y"] == expected["x"])
def test_generator_as_data_loader(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = OnnxrtRunner(SessionFromOnnx(onnx_loader), name="onnx_runner")
def data():
for feed_dict in [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}] * 2:
yield feed_dict
run_results = Comparator.run([runner], data_loader=data())
iter_results = run_results["onnx_runner"]
assert len(iter_results) == 2
for actual, expected in zip(iter_results, data()):
assert np.all(actual["y"] == expected["x"])
def test_multiple_runners(self):
load_tf = TF_MODELS["identity"].loader
build_tf_session = SessionFromGraph(load_tf)
onnx_model = OnnxFromTfGraph(load_tf)
load_serialized_onnx = BytesFromOnnx(onnx_model)
build_onnxrt_session = SessionFromOnnx(load_serialized_onnx)
load_engine = EngineFromNetwork(NetworkFromOnnxBytes(load_serialized_onnx))
gs_graph = GsFromOnnx(onnx_model)
runners = [
TfRunner(build_tf_session),
OnnxrtRunner(build_onnxrt_session),
PluginRefRunner(gs_graph),
TrtRunner(load_engine),
]
run_results = Comparator.run(runners)
compare_func = CompareFunc.simple(check_shapes=mod.version(trt.__version__) >= mod.version("7.0"))
assert bool(Comparator.compare_accuracy(run_results, compare_func=compare_func))
assert len(list(run_results.values())[0]) == 1
def test_postprocess(self):
onnx_loader = ONNX_MODELS["identity"].loader
run_results = Comparator.run([OnnxrtRunner(SessionFromOnnx(onnx_loader))], use_subprocess=True)
postprocessed = Comparator.postprocess(run_results, postprocess_func=PostprocessFunc.topk_func(k=1, axis=-1))
for _, results in postprocessed.items():
for result in results:
for _, output in result.items():
assert output.shape == (1, 1, 2, 1)
def test_errors_do_not_hang(self):
class FakeRunner(object):
def __init__(self):
self.name = "fake"
runners = [FakeRunner()]
with pytest.raises(PolygraphyException):
Comparator.run(runners, use_subprocess=True, subprocess_polling_interval=1)
def test_segfault_does_not_hang(self):
def raise_called_process_error():
class FakeSegfault(sp.CalledProcessError):
pass
raise FakeSegfault(-11, ["simulate", "segfault"])
runners = [TrtRunner(EngineFromNetwork(raise_called_process_error))]
with pytest.raises(PolygraphyException):
Comparator.run(runners, use_subprocess=True, subprocess_polling_interval=1)
def test_multirun_outputs_are_different(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = TrtRunner(EngineFromNetwork(NetworkFromOnnxBytes(onnx_loader)))
run_results = Comparator.run([runner], data_loader=DataLoader(iterations=2))
iteration0 = run_results[runner.name][0]
iteration1 = run_results[runner.name][1]
for name in iteration0.keys():
assert np.any(iteration0[name] != iteration1[name])
def test_validate_nan(self):
run_results = RunResults()
run_results["fake-runner"] = [IterationResult(outputs={"x": np.array(np.nan)})]
assert not Comparator.validate(run_results)
def test_validate_inf(self):
run_results = RunResults()
run_results["fake-runner"] = [IterationResult(outputs={"x": np.array(np.inf)})]
assert not Comparator.validate(run_results, check_inf=True)
def test_dim_param_trt_onnxrt(self):
load_onnx_bytes = ONNX_MODELS["dim_param"].loader
build_onnxrt_session = SessionFromOnnx(load_onnx_bytes)
load_engine = EngineFromNetwork(NetworkFromOnnxBytes(load_onnx_bytes))
runners = [
OnnxrtRunner(build_onnxrt_session),
TrtRunner(load_engine),
]
run_results = Comparator.run(runners)
compare_func = CompareFunc.simple(check_shapes=mod.version(trt.__version__) >= mod.version("7.0"))
assert bool(Comparator.compare_accuracy(run_results, compare_func=compare_func))
assert len(list(run_results.values())[0]) == 1
| true | true |
f7216d1ac89a7301575efb5070db47b073f062f7 | 1,614 | py | Python | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/sub_protection_policy.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/sub_protection_policy.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-11-29T14:46:42.000Z | 2018-11-29T14:46:42.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/sub_protection_policy.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SubProtectionPolicy(Model):
"""Sub-protection policy which includes schedule and retention.
:param policy_type: Type of backup policy type
:type policy_type: str
:param schedule_policy: Backup schedule specified as part of backup
policy.
:type schedule_policy:
~azure.mgmt.recoveryservicesbackup.models.SchedulePolicy
:param retention_policy: Retention policy with the details on backup copy
retention ranges.
:type retention_policy:
~azure.mgmt.recoveryservicesbackup.models.RetentionPolicy
"""
_attribute_map = {
'policy_type': {'key': 'policyType', 'type': 'str'},
'schedule_policy': {'key': 'schedulePolicy', 'type': 'SchedulePolicy'},
'retention_policy': {'key': 'retentionPolicy', 'type': 'RetentionPolicy'},
}
def __init__(self, **kwargs):
super(SubProtectionPolicy, self).__init__(**kwargs)
self.policy_type = kwargs.get('policy_type', None)
self.schedule_policy = kwargs.get('schedule_policy', None)
self.retention_policy = kwargs.get('retention_policy', None)
| 39.365854 | 82 | 0.64746 |
from msrest.serialization import Model
class SubProtectionPolicy(Model):
_attribute_map = {
'policy_type': {'key': 'policyType', 'type': 'str'},
'schedule_policy': {'key': 'schedulePolicy', 'type': 'SchedulePolicy'},
'retention_policy': {'key': 'retentionPolicy', 'type': 'RetentionPolicy'},
}
def __init__(self, **kwargs):
super(SubProtectionPolicy, self).__init__(**kwargs)
self.policy_type = kwargs.get('policy_type', None)
self.schedule_policy = kwargs.get('schedule_policy', None)
self.retention_policy = kwargs.get('retention_policy', None)
| true | true |
f7216ef57361718e2a601232dbdfdcdcad313aad | 640 | py | Python | backend/colaboradores/schema.py | leonunesbs/medico | 384796f346b001d028e1bec2676ae7242749a79a | [
"MIT"
] | 1 | 2021-12-26T03:27:26.000Z | 2021-12-26T03:27:26.000Z | backend/colaboradores/schema.py | leonunesbs/medico | 384796f346b001d028e1bec2676ae7242749a79a | [
"MIT"
] | 6 | 2021-09-01T19:52:46.000Z | 2022-02-15T20:48:27.000Z | backend/colaboradores/schema.py | leonunesbs/medico | 384796f346b001d028e1bec2676ae7242749a79a | [
"MIT"
] | null | null | null | from graphene import relay, ObjectType
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from .models import Colaborador
class ColaboradorNode(DjangoObjectType):
class Meta:
model = Colaborador
filter_fields = '__all__'
interfaces = (relay.Node, )
def resolve_id(self, info):
return super().resolve_id(info)
class Query(ObjectType):
colaborador = relay.Node.Field(ColaboradorNode)
all_colaboradores = DjangoFilterConnectionField(ColaboradorNode)
class Mutation(ObjectType):
pass
class Subscription(ObjectType):
pass
| 22.068966 | 68 | 0.753125 | from graphene import relay, ObjectType
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from .models import Colaborador
class ColaboradorNode(DjangoObjectType):
class Meta:
model = Colaborador
filter_fields = '__all__'
interfaces = (relay.Node, )
def resolve_id(self, info):
return super().resolve_id(info)
class Query(ObjectType):
colaborador = relay.Node.Field(ColaboradorNode)
all_colaboradores = DjangoFilterConnectionField(ColaboradorNode)
class Mutation(ObjectType):
pass
class Subscription(ObjectType):
pass
| true | true |
f721704148332e77abcaafead1bc2fa7b96d4007 | 1,233 | py | Python | src/server/services/mp/settings/save.py | jhchen3121/wechat_shop | c9d9ad009df1e5bb0eb23ca8d830dd5c15df5328 | [
"Apache-2.0"
] | null | null | null | src/server/services/mp/settings/save.py | jhchen3121/wechat_shop | c9d9ad009df1e5bb0eb23ca8d830dd5c15df5328 | [
"Apache-2.0"
] | 5 | 2021-01-28T21:18:27.000Z | 2022-03-25T19:10:01.000Z | src/server/services/mp/settings/save.py | jhchen3121/wechat_shop | c9d9ad009df1e5bb0eb23ca8d830dd5c15df5328 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from core_backend import context
from core_backend.service import handler
from core_backend.libs.exception import Error
from server.domain.models import WechatshopUser
import re
import time
import base64
import logging
import settings
logger = logging.getLogger(__name__)
class Handler(handler.handler):
""" 保存用户信息 """
def dispatch(self, session):
req_body = self.context.request.body
resp_body = self.context.response.body
name = req_body.name
mobile = req_body.mobile
user_id = req_body.userId
if len(mobile) < 11:
raise Error(-1, '长度不对')
if not mobile or not name:
raise Error(-1, '手机或名字不可为空')
mobile_re = re.compile('^(13\d|14[5|7]|15\d|166|17[3|6|7]|18\d)\d{8}$')
res = re.search(mobile_re, int(mobile))
if not res:
raise Error(-1, '请输入正确手机号')
data = {
'name': name,
'mobile': mobile,
'name_mobile': 1
}
session.query(WechatshopUser).filter(WechatshopUser.id == user_id).update(data)
session.flush()
| 25.163265 | 87 | 0.633414 |
from __future__ import unicode_literals
from __future__ import absolute_import
from core_backend import context
from core_backend.service import handler
from core_backend.libs.exception import Error
from server.domain.models import WechatshopUser
import re
import time
import base64
import logging
import settings
logger = logging.getLogger(__name__)
class Handler(handler.handler):
def dispatch(self, session):
req_body = self.context.request.body
resp_body = self.context.response.body
name = req_body.name
mobile = req_body.mobile
user_id = req_body.userId
if len(mobile) < 11:
raise Error(-1, '长度不对')
if not mobile or not name:
raise Error(-1, '手机或名字不可为空')
mobile_re = re.compile('^(13\d|14[5|7]|15\d|166|17[3|6|7]|18\d)\d{8}$')
res = re.search(mobile_re, int(mobile))
if not res:
raise Error(-1, '请输入正确手机号')
data = {
'name': name,
'mobile': mobile,
'name_mobile': 1
}
session.query(WechatshopUser).filter(WechatshopUser.id == user_id).update(data)
session.flush()
| true | true |
f7217194f4c19697a8e59fe9babfa90a23edf214 | 2,031 | py | Python | tests/test_db_utils.py | larssl780/thin_wrappers | c0791d76a734303708892a25cce2e237caf9920a | [
"MIT"
] | null | null | null | tests/test_db_utils.py | larssl780/thin_wrappers | c0791d76a734303708892a25cce2e237caf9920a | [
"MIT"
] | 4 | 2022-02-04T15:18:31.000Z | 2022-02-07T15:07:43.000Z | tests/test_db_utils.py | larssl780/thin_wrappers | c0791d76a734303708892a25cce2e237caf9920a | [
"MIT"
] | null | null | null | import pytest
import pathlib
import sys
import requests
import io
import zipfile
import tempfile
import pandas as pd
import os
HERE = pathlib.Path(__file__).resolve().parent
# insert at 1, 0 is the script path (or '' in REPL)
# temporary hack until package is published and we can inherit from there:
sys.path.insert(1, '%s/thin_wrappers' % HERE.parent)
import db_utils as db # NOQA: E402
def headers():
return {'Accept': 'application/json, text/plain, */*',
'Accept-Language': 'en-US,en;q=0.5',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'DNT': '1',
'Pragma': 'no-cache',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
}
def download_data():
url = 'https://eforexcel.com/wp/wp-content/uploads/2017/07/100-CC-Records.zip'
res = requests.get(url, headers=headers())
filebytes = io.BytesIO(res.content)
tmp = zipfile.ZipFile(filebytes)
temp = tempfile.NamedTemporaryFile(delete=False, suffix='.csv')
with open(temp.name, 'wb') as fp:
fp.write(tmp.read('100 CC Records.csv'))
datum = pd.read_csv(temp.name, encoding='cp1252')
return datum
def test_database():
"""Test that it works writig data to an sqlite db and then read it.
"""
df = download_data()
db.write_db_table('dummy', df, 'replace', 'test_db.sqlite')
assert os.path.exists('test_db.sqlite'), "Did not find database?!"
n_records = len(df)
from_db = db.read_sql_table('dummy', 'test_db.sqlite')
assert len(
from_db) == n_records, "Number of records does not match between database and data!"
db.write_db_table('dummy', df, 'append', 'test_db.sqlite')
from_db = db.read_sql_table('dummy', 'test_db.sqlite')
assert len(from_db) == (
2 * n_records), "Number of records does not match between database and data!"
if __name__ == '__main__':
pytest.main([__file__])
| 30.313433 | 148 | 0.65485 | import pytest
import pathlib
import sys
import requests
import io
import zipfile
import tempfile
import pandas as pd
import os
HERE = pathlib.Path(__file__).resolve().parent
sys.path.insert(1, '%s/thin_wrappers' % HERE.parent)
import db_utils as db
def headers():
return {'Accept': 'application/json, text/plain, */*',
'Accept-Language': 'en-US,en;q=0.5',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'DNT': '1',
'Pragma': 'no-cache',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
}
def download_data():
url = 'https://eforexcel.com/wp/wp-content/uploads/2017/07/100-CC-Records.zip'
res = requests.get(url, headers=headers())
filebytes = io.BytesIO(res.content)
tmp = zipfile.ZipFile(filebytes)
temp = tempfile.NamedTemporaryFile(delete=False, suffix='.csv')
with open(temp.name, 'wb') as fp:
fp.write(tmp.read('100 CC Records.csv'))
datum = pd.read_csv(temp.name, encoding='cp1252')
return datum
def test_database():
df = download_data()
db.write_db_table('dummy', df, 'replace', 'test_db.sqlite')
assert os.path.exists('test_db.sqlite'), "Did not find database?!"
n_records = len(df)
from_db = db.read_sql_table('dummy', 'test_db.sqlite')
assert len(
from_db) == n_records, "Number of records does not match between database and data!"
db.write_db_table('dummy', df, 'append', 'test_db.sqlite')
from_db = db.read_sql_table('dummy', 'test_db.sqlite')
assert len(from_db) == (
2 * n_records), "Number of records does not match between database and data!"
if __name__ == '__main__':
pytest.main([__file__])
| true | true |
f721726ac088dd61876dfef95afdd66374bad3ee | 9,061 | py | Python | cinder/tests/unit/image/fake.py | 2020human/cinder | 04528318848620e4ce2639ea2dd5323783dc7a1f | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/image/fake.py | 2020human/cinder | 04528318848620e4ce2639ea2dd5323783dc7a1f | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/image/fake.py | 2020human/cinder | 04528318848620e4ce2639ea2dd5323783dc7a1f | [
"Apache-2.0"
] | null | null | null | # Copyright 2011 Justin Santa Barbara
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake image service."""
import copy
import datetime
import mock
import uuid
from cinder import exception
import cinder.image.glance
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
def __init__(self):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'private',
'protected': False,
'container_format': 'raw',
'disk_format': 'raw',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64'}}
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': None,
'disk_format': None,
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'size': 1024,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {
'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'False'}}
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
self.create(None, image6)
self.create(None, image7)
self._imagedata = {}
self.temp_images = mock.MagicMock()
super(_FakeImageService, self).__init__()
# TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
def download(self, context, image_id, data):
self.show(context, image_id)
data.write(self._imagedata.get(image_id, ''))
def show(self, context, image_id):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
raise exception.ImageNotFound(image_id=image_id)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises: Duplicate if the image already exist.
"""
image_id = str(metadata.get('id', uuid.uuid4()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.Duplicate()
self.images[image_id] = copy.deepcopy(metadata)
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None,
purge_props=False):
"""Replace the contents of the given image with the new data.
:raises: ImageNotFound if the image does not exist.
"""
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
if purge_props:
self.images[image_id] = copy.deepcopy(metadata)
else:
image = self.images[image_id]
try:
image['properties'].update(metadata.pop('properties'))
except Exception:
pass
image.update(metadata)
return self.images[image_id]
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
"""
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def get_location(self, context, image_id):
if image_id in self.images:
return 'fake_location'
return None
def add_location(self, context, image_id, url, metadata):
self.update(context, image_id, {'locations': [{'url': url,
'metadata': metadata}]})
return True
_fakeImageService = _FakeImageService()
def FakeImageService():
return _fakeImageService
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
def mock_image_service(testcase):
testcase.mock_object(cinder.image.glance, 'get_remote_image_service',
lambda x, y: (FakeImageService(), y))
testcase.mock_object(cinder.image.glance, 'get_default_image_service',
mock.Mock(side_effect=FakeImageService))
| 36.833333 | 79 | 0.522238 |
import copy
import datetime
import mock
import uuid
from cinder import exception
import cinder.image.glance
class _FakeImageService(object):
def __init__(self):
self.images = {}
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'private',
'protected': False,
'container_format': 'raw',
'disk_format': 'raw',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64'}}
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': None,
'disk_format': None,
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'size': 1024,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {
'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'False'}}
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
self.create(None, image6)
self.create(None, image7)
self._imagedata = {}
self.temp_images = mock.MagicMock()
super(_FakeImageService, self).__init__()
def detail(self, context, **kwargs):
return copy.deepcopy(self.images.values())
def download(self, context, image_id, data):
self.show(context, image_id)
data.write(self._imagedata.get(image_id, ''))
def show(self, context, image_id):
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
raise exception.ImageNotFound(image_id=image_id)
def create(self, context, metadata, data=None):
image_id = str(metadata.get('id', uuid.uuid4()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.Duplicate()
self.images[image_id] = copy.deepcopy(metadata)
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None,
purge_props=False):
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
if purge_props:
self.images[image_id] = copy.deepcopy(metadata)
else:
image = self.images[image_id]
try:
image['properties'].update(metadata.pop('properties'))
except Exception:
pass
image.update(metadata)
return self.images[image_id]
def delete(self, context, image_id):
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def get_location(self, context, image_id):
if image_id in self.images:
return 'fake_location'
return None
def add_location(self, context, image_id, url, metadata):
self.update(context, image_id, {'locations': [{'url': url,
'metadata': metadata}]})
return True
_fakeImageService = _FakeImageService()
def FakeImageService():
return _fakeImageService
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
def mock_image_service(testcase):
testcase.mock_object(cinder.image.glance, 'get_remote_image_service',
lambda x, y: (FakeImageService(), y))
testcase.mock_object(cinder.image.glance, 'get_default_image_service',
mock.Mock(side_effect=FakeImageService))
| true | true |
f721754672bebac235baff6704cad30073fc6e3a | 2,231 | py | Python | recipes/python/template/template/trainingdataloader.py | tumulurik/acp-data-services-dsw-reference | 4ec0a161203a1097069bb5c0044eb6df137c5f6d | [
"Apache-2.0"
] | null | null | null | recipes/python/template/template/trainingdataloader.py | tumulurik/acp-data-services-dsw-reference | 4ec0a161203a1097069bb5c0044eb6df137c5f6d | [
"Apache-2.0"
] | null | null | null | recipes/python/template/template/trainingdataloader.py | tumulurik/acp-data-services-dsw-reference | 4ec0a161203a1097069bb5c0044eb6df137c5f6d | [
"Apache-2.0"
] | 1 | 2018-11-15T19:15:50.000Z | 2018-11-15T19:15:50.000Z | #####################################################################
# ADOBE CONFIDENTIAL
# ___________________
#
# Copyright 2017 Adobe
# All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains
# the property of Adobe and its suppliers, if any. The intellectual
# and technical concepts contained herein are proprietary to Adobe
# and its suppliers and are protected by all applicable intellectual
# property laws, including trade secret and copyright laws.
# Dissemination of this information or reproduction of this material
# is strictly forbidden unless prior written permission is obtained
# from Adobe.
#####################################################################
import numpy as np
import pandas as pd
from data_access_sdk_python.reader import DataSetReader
def load(configProperties):
# This variable will hold the part of the data on which we train our model
train = None
print("Training Data Load Start")
#########################################
# Extract fields from configProperties
#########################################
# data = configProperties['data']
# train_start = configProperties['train_start']
# train_end = configProperties['train_end']
#########################################
# Load Data
#########################################
### From CSV ###
# df = pd.read_csv(data)
### - OR - From Data Access SDK ###
# prodreader = DataSetReader(ims_url=configProperties['ims_url'],
# catalog_url=configProperties['catalog_url'],
# client_id=configProperties['client_id'],
# client_secret=configProperties['client_secret'],
# code=configProperties['code'])
# df = prodreader.load(configProperties['data_set_id'], configProperties['ims_org'])
#########################################
# Data Preparation/Feature Engineering
#########################################
### Add/Remove/Modify DataFrame below ###
### Then return the training data ###
# test = df[train_start:train_end]
print("Training Data Load Finish")
return train
| 34.859375 | 88 | 0.556253 | true | true | |
f72175eba1256181da7f1dbcf593e18eb8a344a6 | 7,472 | py | Python | neo/io/__init__.py | Warfley/python-neo | 875e23a417e1a65d5cb45403e6e3261155e2741d | [
"BSD-3-Clause"
] | 1 | 2020-06-08T14:00:03.000Z | 2020-06-08T14:00:03.000Z | neo/io/__init__.py | Warfley/python-neo | 875e23a417e1a65d5cb45403e6e3261155e2741d | [
"BSD-3-Clause"
] | 22 | 2016-09-13T13:31:25.000Z | 2019-05-14T17:07:16.000Z | neo/io/__init__.py | Warfley/python-neo | 875e23a417e1a65d5cb45403e6e3261155e2741d | [
"BSD-3-Clause"
] | null | null | null | """
:mod:`neo.io` provides classes for reading and/or writing
electrophysiological data files.
Note that if the package dependency is not satisfied for one io, it does not
raise an error but a warning.
:attr:`neo.io.iolist` provides a list of successfully imported io classes.
Functions:
.. autofunction:: neo.io.get_io
Classes:
* :attr:`AlphaOmegaIO`
* :attr:`AsciiImageIO`
* :attr:`AsciiSignalIO`
* :attr:`AsciiSpikeTrainIO`
* :attr:`AxographIO`
* :attr:`AxonIO`
* :attr:`BCI2000IO`
* :attr:`BlackrockIO`
* :attr:`BlkIO`
* :attr:`BrainVisionIO`
* :attr:`BrainwareDamIO`
* :attr:`BrainwareF32IO`
* :attr:`BrainwareSrcIO`
* :attr:`ElanIO`
* :attr:`IgorIO`
* :attr:`IntanIO`
* :attr:`KlustaKwikIO`
* :attr:`KwikIO`
* :attr:`MicromedIO`
* :attr:`NeoHdf5IO`
* :attr:`NeoMatlabIO`
* :attr:`NestIO`
* :attr:`NeuralynxIO`
* :attr:`NeuroExplorerIO`
* :attr:`NeuroScopeIO`
* :attr:`NeuroshareIO`
* :attr:`NixIO`
* :attr:`NSDFIO`
* :attr:`OpenEphysIO`
* :attr:`PickleIO`
* :attr:`PlexonIO`
* :attr:`RawBinarySignalIO`
* :attr:`RawMCSIO`
* :attr:`Spike2IO`
* :attr:`StimfitIO`
* :attr:`TdtIO`
* :attr:`TiffIO`
* :attr:`WinEdrIO`
* :attr:`WinWcpIO`
.. autoclass:: neo.io.AlphaOmegaIO
.. autoattribute:: extensions
.. autoclass:: neo.io.AsciiImageIO
.. autoattribute:: extensions
.. autoclass:: neo.io.AsciiSignalIO
.. autoattribute:: extensions
.. autoclass:: neo.io.AsciiSpikeTrainIO
.. autoattribute:: extensions
.. autoclass:: neo.io.AxographIO
.. autoattribute:: extensions
.. autoclass:: neo.io.AxonIO
.. autoattribute:: extensions
.. autoclass:: neo.io.BCI2000IO
.. autoattribute:: extensions
.. autoclass:: neo.io.BlackrockIO
.. autoattribute:: extensions
.. autoclass:: neo.io.BlkIO
.. autoattribute:: extensions
.. autoclass:: neo.io.BrainVisionIO
.. autoattribute:: extensions
.. autoclass:: neo.io.BrainwareDamIO
.. autoattribute:: extensions
.. autoclass:: neo.io.BrainwareF32IO
.. autoattribute:: extensions
.. autoclass:: neo.io.BrainwareSrcIO
.. autoattribute:: extensions
.. autoclass:: neo.io.ElanIO
.. autoattribute:: extensions
.. .. autoclass:: neo.io.ElphyIO
.. autoattribute:: extensions
.. autoclass:: neo.io.IgorIO
.. autoattribute:: extensions
.. autoclass:: neo.io.IntanIO
.. autoattribute:: extensions
.. autoclass:: neo.io.KlustaKwikIO
.. autoattribute:: extensions
.. autoclass:: neo.io.KwikIO
.. autoattribute:: extensions
.. autoclass:: neo.io.MicromedIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NeoHdf5IO
.. autoattribute:: extensions
.. autoclass:: neo.io.NeoMatlabIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NestIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NeuralynxIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NeuroExplorerIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NeuroScopeIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NeuroshareIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NixIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NSDFIO
.. autoattribute:: extensions
.. autoclass:: neo.io.OpenEphysIO
.. autoattribute:: extensions
.. autoclass:: neo.io.PickleIO
.. autoattribute:: extensions
.. autoclass:: neo.io.PlexonIO
.. autoattribute:: extensions
.. autoclass:: neo.io.RawBinarySignalIO
.. autoattribute:: extensions
.. autoclass:: neo.io.RawMCSIO
.. autoattribute:: extensions
.. autoclass:: Spike2IO
.. autoattribute:: extensions
.. autoclass:: neo.io.StimfitIO
.. autoattribute:: extensions
.. autoclass:: neo.io.TdtIO
.. autoattribute:: extensions
.. autoclass:: neo.io.TiffIO
.. autoattribute:: extensions
.. autoclass:: neo.io.WinEdrIO
.. autoattribute:: extensions
.. autoclass:: neo.io.WinWcpIO
.. autoattribute:: extensions
"""
import os.path
# try to import the neuroshare library.
# if it is present, use the neuroshareapiio to load neuroshare files
# if it is not present, use the neurosharectypesio to load files
try:
import neuroshare as ns
except ImportError as err:
from neo.io.neurosharectypesio import NeurosharectypesIO as NeuroshareIO
# print("\n neuroshare library not found, loading data with ctypes" )
# print("\n to use the API be sure to install the library found at:")
# print("\n www.http://pythonhosted.org/neuroshare/")
else:
from neo.io.neuroshareapiio import NeuroshareapiIO as NeuroshareIO
# print("neuroshare library successfully imported")
# print("\n loading with API...")
from neo.io.alphaomegaio import AlphaOmegaIO
from neo.io.asciiimageio import AsciiImageIO
from neo.io.asciisignalio import AsciiSignalIO
from neo.io.asciispiketrainio import AsciiSpikeTrainIO
from neo.io.axographio import AxographIO
from neo.io.axonio import AxonIO
from neo.io.blackrockio import BlackrockIO
from neo.io.blackrockio_v4 import BlackrockIO as OldBlackrockIO
from neo.io.blkio import BlkIO
from neo.io.bci2000io import BCI2000IO
from neo.io.brainvisionio import BrainVisionIO
from neo.io.brainwaredamio import BrainwareDamIO
from neo.io.brainwaref32io import BrainwareF32IO
from neo.io.brainwaresrcio import BrainwareSrcIO
from neo.io.elanio import ElanIO
# from neo.io.elphyio import ElphyIO
from neo.io.exampleio import ExampleIO
from neo.io.igorproio import IgorIO
from neo.io.intanio import IntanIO
from neo.io.klustakwikio import KlustaKwikIO
from neo.io.kwikio import KwikIO
from neo.io.micromedio import MicromedIO
from neo.io.hdf5io import NeoHdf5IO
from neo.io.neomatlabio import NeoMatlabIO
from neo.io.nestio import NestIO
from neo.io.neuralynxio import NeuralynxIO
from neo.io.neuralynxio_v1 import NeuralynxIO as OldNeuralynxIO
from neo.io.neuroexplorerio import NeuroExplorerIO
from neo.io.neuroscopeio import NeuroScopeIO
from neo.io.nixio import NixIO
from neo.io.nixio_fr import NixIO as NixIOFr
from neo.io.nsdfio import NSDFIO
from neo.io.openephysio import OpenEphysIO
from neo.io.pickleio import PickleIO
from neo.io.plexonio import PlexonIO
from neo.io.rawbinarysignalio import RawBinarySignalIO
from neo.io.rawmcsio import RawMCSIO
from neo.io.spike2io import Spike2IO
from neo.io.stimfitio import StimfitIO
from neo.io.tdtio import TdtIO
from neo.io.tiffio import TiffIO
from neo.io.winedrio import WinEdrIO
from neo.io.winwcpio import WinWcpIO
iolist = [
AlphaOmegaIO,
AsciiImageIO,
AsciiSignalIO,
AsciiSpikeTrainIO,
AxographIO,
AxonIO,
BCI2000IO,
BlackrockIO,
BlkIO,
BrainVisionIO,
BrainwareDamIO,
BrainwareF32IO,
BrainwareSrcIO,
ElanIO,
# ElphyIO,
ExampleIO,
IgorIO,
IntanIO,
KlustaKwikIO,
KwikIO,
MicromedIO,
NixIO, # place NixIO before NeoHdf5IO to make it the default for .h5 files
NeoHdf5IO,
NeoMatlabIO,
NestIO,
NeuralynxIO,
NeuroExplorerIO,
NeuroScopeIO,
NeuroshareIO,
NSDFIO,
OpenEphysIO,
PickleIO,
PlexonIO,
RawBinarySignalIO,
RawMCSIO,
Spike2IO,
StimfitIO,
TdtIO,
TiffIO,
WinEdrIO,
WinWcpIO
]
def get_io(filename, *args, **kwargs):
"""
Return a Neo IO instance, guessing the type based on the filename suffix.
"""
extension = os.path.splitext(filename)[1][1:]
for io in iolist:
if extension in io.extensions:
return io(filename, *args, **kwargs)
raise IOError("File extension %s not registered" % extension)
| 22.172107 | 79 | 0.720021 |
import os.path
try:
import neuroshare as ns
except ImportError as err:
from neo.io.neurosharectypesio import NeurosharectypesIO as NeuroshareIO
else:
from neo.io.neuroshareapiio import NeuroshareapiIO as NeuroshareIO
from neo.io.alphaomegaio import AlphaOmegaIO
from neo.io.asciiimageio import AsciiImageIO
from neo.io.asciisignalio import AsciiSignalIO
from neo.io.asciispiketrainio import AsciiSpikeTrainIO
from neo.io.axographio import AxographIO
from neo.io.axonio import AxonIO
from neo.io.blackrockio import BlackrockIO
from neo.io.blackrockio_v4 import BlackrockIO as OldBlackrockIO
from neo.io.blkio import BlkIO
from neo.io.bci2000io import BCI2000IO
from neo.io.brainvisionio import BrainVisionIO
from neo.io.brainwaredamio import BrainwareDamIO
from neo.io.brainwaref32io import BrainwareF32IO
from neo.io.brainwaresrcio import BrainwareSrcIO
from neo.io.elanio import ElanIO
from neo.io.exampleio import ExampleIO
from neo.io.igorproio import IgorIO
from neo.io.intanio import IntanIO
from neo.io.klustakwikio import KlustaKwikIO
from neo.io.kwikio import KwikIO
from neo.io.micromedio import MicromedIO
from neo.io.hdf5io import NeoHdf5IO
from neo.io.neomatlabio import NeoMatlabIO
from neo.io.nestio import NestIO
from neo.io.neuralynxio import NeuralynxIO
from neo.io.neuralynxio_v1 import NeuralynxIO as OldNeuralynxIO
from neo.io.neuroexplorerio import NeuroExplorerIO
from neo.io.neuroscopeio import NeuroScopeIO
from neo.io.nixio import NixIO
from neo.io.nixio_fr import NixIO as NixIOFr
from neo.io.nsdfio import NSDFIO
from neo.io.openephysio import OpenEphysIO
from neo.io.pickleio import PickleIO
from neo.io.plexonio import PlexonIO
from neo.io.rawbinarysignalio import RawBinarySignalIO
from neo.io.rawmcsio import RawMCSIO
from neo.io.spike2io import Spike2IO
from neo.io.stimfitio import StimfitIO
from neo.io.tdtio import TdtIO
from neo.io.tiffio import TiffIO
from neo.io.winedrio import WinEdrIO
from neo.io.winwcpio import WinWcpIO
iolist = [
AlphaOmegaIO,
AsciiImageIO,
AsciiSignalIO,
AsciiSpikeTrainIO,
AxographIO,
AxonIO,
BCI2000IO,
BlackrockIO,
BlkIO,
BrainVisionIO,
BrainwareDamIO,
BrainwareF32IO,
BrainwareSrcIO,
ElanIO,
ExampleIO,
IgorIO,
IntanIO,
KlustaKwikIO,
KwikIO,
MicromedIO,
NixIO,
NeoHdf5IO,
NeoMatlabIO,
NestIO,
NeuralynxIO,
NeuroExplorerIO,
NeuroScopeIO,
NeuroshareIO,
NSDFIO,
OpenEphysIO,
PickleIO,
PlexonIO,
RawBinarySignalIO,
RawMCSIO,
Spike2IO,
StimfitIO,
TdtIO,
TiffIO,
WinEdrIO,
WinWcpIO
]
def get_io(filename, *args, **kwargs):
extension = os.path.splitext(filename)[1][1:]
for io in iolist:
if extension in io.extensions:
return io(filename, *args, **kwargs)
raise IOError("File extension %s not registered" % extension)
| true | true |
f721766457ab8501938015654594e370b906deb0 | 3,890 | py | Python | workflow/scripts/combine_virsorter_virfinder.py | rdenise/virome_pipeline | 3c629aef75b184bf39f2d14043f94e8787e3ea14 | [
"MIT"
] | 1 | 2022-03-29T21:18:53.000Z | 2022-03-29T21:18:53.000Z | workflow/scripts/combine_virsorter_virfinder.py | rdenise/virome_pipeline | 3c629aef75b184bf39f2d14043f94e8787e3ea14 | [
"MIT"
] | null | null | null | workflow/scripts/combine_virsorter_virfinder.py | rdenise/virome_pipeline | 3c629aef75b184bf39f2d14043f94e8787e3ea14 | [
"MIT"
] | null | null | null | from Bio import SeqIO
import pandas as pd
import sys
import os
# Put error and out into the log file
sys.stderr = sys.stdout = open(snakemake.log[0], "w")
###########################################################
###########################################################
# List that will contains all the contigs to filter
all_contig_ids = []
# Dataframe that contains all the informations about
output_df = pd.DataFrame(columns=["contig_id", "virsorter_cat", "deepvirfinder"])
# Get all the names from the virsorter keep2 list
ids_virsorter_keep2 = snakemake.input.ids_virsorter_keep2_checked
with open(ids_virsorter_keep2) as r_file:
r_file.readline()
for line in r_file:
rstrip_line = line.rstrip()
rstrip_line = rstrip_line.split("||")[0]
all_contig_ids.append(rstrip_line)
output_df.at[rstrip_line, "contig_id"] = rstrip_line
output_df.at[rstrip_line, "virsorter_cat"] = "keep2_checked"
# Get all the names from the virsorter keep1 list and remove redondant name
ids_virsorter_keep1 = snakemake.input.ids_virsorter_keep1
with open(ids_virsorter_keep1) as r_file:
r_file.readline()
for line in r_file:
rstrip_line = line.rstrip()
rstrip_line = rstrip_line.split("||")[0]
if rstrip_line not in all_contig_ids:
all_contig_ids.append(rstrip_line)
output_df.at[rstrip_line, "contig_id"] = rstrip_line
output_df.at[rstrip_line, "virsorter_cat"] = "keep1"
# Get all the names from the deepvirfinder list and remove redondant name
ids_virfinder = snakemake.input.ids_virfinder
with open(ids_virfinder) as r_file:
r_file.readline()
for line in r_file:
rstrip_line = line.rstrip()
output_df.at[rstrip_line, "contig_id"] = rstrip_line
output_df.at[rstrip_line, "deepvirfinder"] = "Yes"
if rstrip_line not in all_contig_ids:
all_contig_ids.append(rstrip_line)
# Fill the informations missing now the list of contigs we keep is set
dict_map_virsorter = {}
files_with_info = {
snakemake.input.ids_virsorter_keep2_suspicious: "keep2_suspicious",
snakemake.input.ids_virsorter_manual_check: "to_manual_check",
snakemake.input.ids_virsorter_discarded: "discarded",
}
for file_ids in files_with_info:
with open(file_ids) as r_file:
r_file.readline()
for line in r_file:
rstrip_line = line.rstrip()
rstrip_line = rstrip_line.split("||")[0]
if rstrip_line not in all_contig_ids:
dict_map_virsorter[rstrip_line] = files_with_info[file_ids]
# Fill the dataframe
list_contig2add_virsorter_cat = list(dict_map_virsorter.keys())
output_df.loc[
output_df.contig_id.isin(list_contig2add_virsorter_cat), "virsorter_cat"
] = output_df.loc[
output_df.contig_id.isin(list_contig2add_virsorter_cat), "contig_id"
].map(
dict_map_virsorter
)
output_df.fillna("No", inplace=True)
# Parse the fasta of the contig and create the new one
fasta_contigs = snakemake.input.contigs
with open(snakemake.output.fasta, "w") as w_file:
with open(snakemake.output.translation_table, "w") as tsv_file:
tsv_file.write("old_contig_name\tnew_contig_name\n")
parser = SeqIO.parse(fasta_contigs, "fasta")
for contig in parser:
if contig.id in all_contig_ids:
contig_id = f"{snakemake.wildcards.sample}-{contig.id}".replace(
"_", "-"
)
tsv_file.write(f"{contig.id}\t{contig_id}\n")
contig.id = contig_id
contig.name = ""
contig.description = ""
SeqIO.write(contig, w_file, "fasta")
output_df.to_csv(snakemake.output.tsv, sep="\t", index=False)
###########################################################
###########################################################
| 31.626016 | 81 | 0.648072 | from Bio import SeqIO
import pandas as pd
import sys
import os
sys.stderr = sys.stdout = open(snakemake.log[0], "w")
| true | true |
f7217746e68b217cef673ded6405c62a5976ac18 | 5,365 | py | Python | Benchmarking/CM_Benchmark/basic_benchmark/rde.py | CipiOrhei/eecvf | 759fb2127c8d65a570ba2df536ff8429ccf5bdf2 | [
"MIT"
] | 1 | 2021-04-02T15:33:12.000Z | 2021-04-02T15:33:12.000Z | Benchmarking/CM_Benchmark/basic_benchmark/rde.py | CipiOrhei/eecvf | 759fb2127c8d65a570ba2df536ff8429ccf5bdf2 | [
"MIT"
] | null | null | null | Benchmarking/CM_Benchmark/basic_benchmark/rde.py | CipiOrhei/eecvf | 759fb2127c8d65a570ba2df536ff8429ccf5bdf2 | [
"MIT"
] | 1 | 2021-08-14T09:07:22.000Z | 2021-08-14T09:07:22.000Z | import math
import os
from math import log10
# noinspection PyPackageRequirements
import cv2
import numpy as np
from scipy.ndimage import distance_transform_edt
import config_main
from Utils.log_handler import log_setup_info_to_console, log_error_to_console, log_benchmark_info_to_console
from Benchmarking.Util.image_parsing import find_img_extension
from Benchmarking.Config.create_benchmark_job import set_gt_location, set_image_set, set_input_location, job_set
def rde_calc(img, img_gt, k_value):
"""
Dubuisson, M.P.; Jain, A.K. A modified Hausdorff distance for object matching. IEEE ICPR 1994, 1, 566-568
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.1.8155&rep=rep1&type=pdf
:param img: edge map resulting of algorithm
:param img_gt: ground truth image
:return: psnr value for image
"""
# calculate distances
dist_gt = distance_transform_edt(np.invert(img_gt))
dist_dc = distance_transform_edt(np.invert(img))
# calculate sum(d^k(D))
sum_dc = 0.0
sum_gt = 0.0
left = 0.0
right = 0.0
for i in range(0, img_gt.shape[0]):
for j in range(0, img_gt.shape[1]):
if img_gt[i, j]:
sum_dc += dist_dc[i, j] ** k_value
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
if img[i, j]:
sum_gt += dist_gt[i, j] ** k_value
cn_cd = np.count_nonzero(img)
cn_gt = np.count_nonzero(img_gt)
if cn_cd != 0 :
left = math.pow(sum_gt / cn_cd, 1.0/k_value)
if cn_gt != 0:
right = math.pow(sum_dc / cn_gt, 1.0/k_value)
if cn_cd==0:
rde = 1000
else:
rde = left + right
return rde
# noinspection PyPep8Naming
def run_RDE_benchmark(input_location: str, gt_location: str,
raw_image: str, jobs_set: list,
k: int):
"""
xxx
:param input_location: location of algorithm images
:param gt_location: location of gt images
:param raw_image: location of raw images
:param jobs_set: algo sets to evaluate
:return: None
"""
set_gt_location(gt_location)
set_input_location(input_location)
set_image_set(raw_image)
job_set(jobs_set)
run_CM_benchmark_RDE(k)
def run_CM_benchmark_RDE(k_value):
"""
:return:
"""
log_setup_info_to_console("BENCHMARKING CM RDEK" + int(k_value).__str__())
idx = 0
for set in config_main.BENCHMARK_SETS:
log_benchmark_info_to_console('Current set: {number}\{total} : {set}'.format(number=idx, total=len(config_main.BENCHMARK_SETS), set=set))
idx += 1
# try:
if True:
# Write results to disk
results_path = os.path.join(os.getcwd(), config_main.BENCHMARK_RESULTS, "RDEK" + int(k_value).__str__())
if not os.path.exists(results_path):
os.makedirs(results_path)
csv = open(os.path.join(results_path, set + '.log'), "w+")
csv.write('Per image (#, RDEK' + int(k_value).__str__() + ':\n')
# log_benchmark_info_to_console('Per image (#, RDE):\n')
avg = 0
count = 0
for file in config_main.BENCHMARK_SAMPLE_NAMES:
# find extension of images and gt_images
if config_main.APPL_SAVE_JOB_NAME is True:
img_extension = find_img_extension(os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, set + '_' + file))
else:
img_extension = find_img_extension(os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, file))
gt_extension = find_img_extension(os.path.join(config_main.BENCHMARK_GT_LOCATION, file))
path_img_gt = os.path.join(config_main.BENCHMARK_GT_LOCATION, file + gt_extension)
if config_main.APPL_SAVE_JOB_NAME is True:
path_img_al = os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, set + '_' + file + img_extension)
else:
path_img_al = os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, file + img_extension)
img_gt = cv2.cvtColor(cv2.imread(path_img_gt), cv2.COLOR_BGR2GRAY)
img_al = cv2.cvtColor(cv2.imread(path_img_al), cv2.COLOR_BGR2GRAY)
try:
val = rde_calc(img_al, img_gt, k_value)
avg += val
count += 1
csv.write('{:<10s} {:<10.6f}\n'.format(file, val))
# log_benchmark_info_to_console('{:<10s} {:<10.6f}\n'.format(file, val))
except Exception as ex:
log_error_to_console("BENCHMARK CM RDEK{val}: {file}".format(val=int(k_value).__str__(), file=file), ex.__str__())
log_benchmark_info_to_console('RDEK{val}: {set:<10s} {cnt:<10.6f}\n'.format(val=int(k_value).__str__(), set=set, cnt=avg / count))
csv.write('RDEK{val}: {set:<10s} {cnt:<10.6f}\n'.format(val=int(k_value).__str__(), set=set, cnt=avg / count))
# except Exception as ex:
# log_error_to_console('BENCHMARK CM RDEK' + int(k_value).__str__() + 'NOK', ex.__str__())
if __name__ == "__main__":
pass
| 37.517483 | 146 | 0.608574 | import math
import os
from math import log10
import cv2
import numpy as np
from scipy.ndimage import distance_transform_edt
import config_main
from Utils.log_handler import log_setup_info_to_console, log_error_to_console, log_benchmark_info_to_console
from Benchmarking.Util.image_parsing import find_img_extension
from Benchmarking.Config.create_benchmark_job import set_gt_location, set_image_set, set_input_location, job_set
def rde_calc(img, img_gt, k_value):
dist_gt = distance_transform_edt(np.invert(img_gt))
dist_dc = distance_transform_edt(np.invert(img))
sum_dc = 0.0
sum_gt = 0.0
left = 0.0
right = 0.0
for i in range(0, img_gt.shape[0]):
for j in range(0, img_gt.shape[1]):
if img_gt[i, j]:
sum_dc += dist_dc[i, j] ** k_value
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
if img[i, j]:
sum_gt += dist_gt[i, j] ** k_value
cn_cd = np.count_nonzero(img)
cn_gt = np.count_nonzero(img_gt)
if cn_cd != 0 :
left = math.pow(sum_gt / cn_cd, 1.0/k_value)
if cn_gt != 0:
right = math.pow(sum_dc / cn_gt, 1.0/k_value)
if cn_cd==0:
rde = 1000
else:
rde = left + right
return rde
def run_RDE_benchmark(input_location: str, gt_location: str,
raw_image: str, jobs_set: list,
k: int):
set_gt_location(gt_location)
set_input_location(input_location)
set_image_set(raw_image)
job_set(jobs_set)
run_CM_benchmark_RDE(k)
def run_CM_benchmark_RDE(k_value):
log_setup_info_to_console("BENCHMARKING CM RDEK" + int(k_value).__str__())
idx = 0
for set in config_main.BENCHMARK_SETS:
log_benchmark_info_to_console('Current set: {number}\{total} : {set}'.format(number=idx, total=len(config_main.BENCHMARK_SETS), set=set))
idx += 1
if True:
results_path = os.path.join(os.getcwd(), config_main.BENCHMARK_RESULTS, "RDEK" + int(k_value).__str__())
if not os.path.exists(results_path):
os.makedirs(results_path)
csv = open(os.path.join(results_path, set + '.log'), "w+")
csv.write('Per image (#, RDEK' + int(k_value).__str__() + ':\n')
avg = 0
count = 0
for file in config_main.BENCHMARK_SAMPLE_NAMES:
if config_main.APPL_SAVE_JOB_NAME is True:
img_extension = find_img_extension(os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, set + '_' + file))
else:
img_extension = find_img_extension(os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, file))
gt_extension = find_img_extension(os.path.join(config_main.BENCHMARK_GT_LOCATION, file))
path_img_gt = os.path.join(config_main.BENCHMARK_GT_LOCATION, file + gt_extension)
if config_main.APPL_SAVE_JOB_NAME is True:
path_img_al = os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, set + '_' + file + img_extension)
else:
path_img_al = os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, file + img_extension)
img_gt = cv2.cvtColor(cv2.imread(path_img_gt), cv2.COLOR_BGR2GRAY)
img_al = cv2.cvtColor(cv2.imread(path_img_al), cv2.COLOR_BGR2GRAY)
try:
val = rde_calc(img_al, img_gt, k_value)
avg += val
count += 1
csv.write('{:<10s} {:<10.6f}\n'.format(file, val))
except Exception as ex:
log_error_to_console("BENCHMARK CM RDEK{val}: {file}".format(val=int(k_value).__str__(), file=file), ex.__str__())
log_benchmark_info_to_console('RDEK{val}: {set:<10s} {cnt:<10.6f}\n'.format(val=int(k_value).__str__(), set=set, cnt=avg / count))
csv.write('RDEK{val}: {set:<10s} {cnt:<10.6f}\n'.format(val=int(k_value).__str__(), set=set, cnt=avg / count))
if __name__ == "__main__":
pass
| true | true |
f7217797ff9948fe15504b1554d32d09382f057d | 3,899 | py | Python | PaddleCV/tracking/ltr/data/loader.py | zhousanfu/paddle-demo | 56860c5241874fe6111def46ea2f3f91e3ba80de | [
"Apache-2.0"
] | 1 | 2021-07-07T11:04:11.000Z | 2021-07-07T11:04:11.000Z | PaddleCV/tracking/ltr/data/loader.py | zhousanfu/paddle_demo | 56860c5241874fe6111def46ea2f3f91e3ba80de | [
"Apache-2.0"
] | null | null | null | PaddleCV/tracking/ltr/data/loader.py | zhousanfu/paddle_demo | 56860c5241874fe6111def46ea2f3f91e3ba80de | [
"Apache-2.0"
] | 1 | 2021-05-18T06:36:32.000Z | 2021-05-18T06:36:32.000Z | import os
import sys
import dataflow as df
import numpy as np
class LTRLoader(df.DataFlow):
"""
Data loader. Combines a dataset and a sampler, and provides
single- or multi-process iterators over the dataset.
Note: an additional option stack_dim is available to
select along which dimension the data should be stacked to form a batch.
Arguments:
dataset (Dataset): dataset from which to load the data.
batch_size (int, optional): how many samples per batch to load
(default: 1).
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: False).
sampler (Sampler, optional): defines the strategy to draw samples from
the dataset. If specified, ``shuffle`` must be False.
batch_sampler (Sampler, optional): like sampler, but returns a batch of
indices at a time. Mutually exclusive with batch_size, shuffle,
sampler, and drop_last.
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means that the data will be loaded in the main process.
(default: 0)
collate_fn (callable, optional): merges a list of samples to form a mini-batch.
stack_dim (int): Dimension along which to stack to form the batch. (default: 0)
pin_memory (bool, optional): If ``True``, the data loader will copy tensors
into CUDA pinned memory before returning them.
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last batch
will be smaller. (default: False)
timeout (numeric, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: 0)
worker_init_fn (callable, optional): If not None, this will be called on each
worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as
input, after seeding and before data loading. (default: None)
.. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an
unpicklable object, e.g., a lambda function.
"""
__initialized = False
def __init__(self,
name,
dataset,
training=True,
batch_size=1,
shuffle=False,
sampler=None,
batch_sampler=None,
num_workers=0,
epoch_interval=1,
collate_fn=None,
stack_dim=0,
pin_memory=False,
drop_last=False,
timeout=0,
worker_init_fn=None):
super().__init__()
ds = df.RepeatedData(dataset, -1)
ds = df.MultiProcessRunnerZMQ(ds, num_proc=num_workers, hwm=300)
# ds = df.MultiThreadRunner(lambda: ds, num_prefetch=1024, num_thread=num_workers)
ds = df.BatchData(ds, batch_size)
self.ds = ds
self.name = name
self.training = training
self.epoch_interval = epoch_interval
self.stack_dim = stack_dim
self.batches_per_epoch = len(dataset) // batch_size
def __len__(self):
return self.batches_per_epoch
def __iter__(self):
if not self.__initialized:
self.reset_state()
self.__initialized = True
for d in self.ds:
if self.stack_dim > 0:
for k, v in d.items():
if len(v.shape) >= self.stack_dim + 1:
d[k] = np.swapaxes(v, 0, self.stack_dim)
yield d
def reset_state(self):
self.ds.reset_state()
| 39.383838 | 90 | 0.60118 | import os
import sys
import dataflow as df
import numpy as np
class LTRLoader(df.DataFlow):
__initialized = False
def __init__(self,
name,
dataset,
training=True,
batch_size=1,
shuffle=False,
sampler=None,
batch_sampler=None,
num_workers=0,
epoch_interval=1,
collate_fn=None,
stack_dim=0,
pin_memory=False,
drop_last=False,
timeout=0,
worker_init_fn=None):
super().__init__()
ds = df.RepeatedData(dataset, -1)
ds = df.MultiProcessRunnerZMQ(ds, num_proc=num_workers, hwm=300)
ds = df.BatchData(ds, batch_size)
self.ds = ds
self.name = name
self.training = training
self.epoch_interval = epoch_interval
self.stack_dim = stack_dim
self.batches_per_epoch = len(dataset) // batch_size
def __len__(self):
return self.batches_per_epoch
def __iter__(self):
if not self.__initialized:
self.reset_state()
self.__initialized = True
for d in self.ds:
if self.stack_dim > 0:
for k, v in d.items():
if len(v.shape) >= self.stack_dim + 1:
d[k] = np.swapaxes(v, 0, self.stack_dim)
yield d
def reset_state(self):
self.ds.reset_state()
| true | true |
f72177dda3702aa0aa6df33982088a3eb433c9ba | 13,260 | py | Python | Lib/test/test_module.py | ErikBjare/cpython | b68431fadb3150134ac6ccbf501cdfeaf4c75678 | [
"0BSD"
] | 5 | 2021-12-03T23:11:53.000Z | 2022-01-08T21:02:50.000Z | Lib/test/test_module.py | dalakatt/cpython | 2f49b97cc5426087b46515254b9a97a22ee8c807 | [
"0BSD"
] | 8 | 2022-01-07T11:31:11.000Z | 2022-03-04T00:07:16.000Z | Lib/test/test_module.py | dalakatt/cpython | 2f49b97cc5426087b46515254b9a97a22ee8c807 | [
"0BSD"
] | 1 | 2022-03-27T18:34:54.000Z | 2022-03-27T18:34:54.000Z | # Test the module type
import unittest
import weakref
from test.support import gc_collect
from test.support import import_helper
from test.support.script_helper import assert_python_ok
import sys
ModuleType = type(sys)
class FullLoader:
@classmethod
def module_repr(cls, m):
return "<module '{}' (crafted)>".format(m.__name__)
class BareLoader:
pass
class ModuleTests(unittest.TestCase):
def test_uninitialized(self):
# An uninitialized module has no __dict__ or __name__,
# and __doc__ is None
foo = ModuleType.__new__(ModuleType)
self.assertTrue(isinstance(foo.__dict__, dict))
self.assertEqual(dir(foo), [])
try:
s = foo.__name__
self.fail("__name__ = %s" % repr(s))
except AttributeError:
pass
self.assertEqual(foo.__doc__, ModuleType.__doc__)
def test_uninitialized_missing_getattr(self):
# Issue 8297
# test the text in the AttributeError of an uninitialized module
foo = ModuleType.__new__(ModuleType)
self.assertRaisesRegex(
AttributeError, "module has no attribute 'not_here'",
getattr, foo, "not_here")
def test_missing_getattr(self):
# Issue 8297
# test the text in the AttributeError
foo = ModuleType("foo")
self.assertRaisesRegex(
AttributeError, "module 'foo' has no attribute 'not_here'",
getattr, foo, "not_here")
def test_no_docstring(self):
# Regularly initialized module, no docstring
foo = ModuleType("foo")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, None)
self.assertIs(foo.__loader__, None)
self.assertIs(foo.__package__, None)
self.assertIs(foo.__spec__, None)
self.assertEqual(foo.__dict__, {"__name__": "foo", "__doc__": None,
"__loader__": None, "__package__": None,
"__spec__": None})
def test_ascii_docstring(self):
# ASCII docstring
foo = ModuleType("foo", "foodoc")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc")
self.assertEqual(foo.__dict__,
{"__name__": "foo", "__doc__": "foodoc",
"__loader__": None, "__package__": None,
"__spec__": None})
def test_unicode_docstring(self):
# Unicode docstring
foo = ModuleType("foo", "foodoc\u1234")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc\u1234")
self.assertEqual(foo.__dict__,
{"__name__": "foo", "__doc__": "foodoc\u1234",
"__loader__": None, "__package__": None,
"__spec__": None})
def test_reinit(self):
# Reinitialization should not replace the __dict__
foo = ModuleType("foo", "foodoc\u1234")
foo.bar = 42
d = foo.__dict__
foo.__init__("foo", "foodoc")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc")
self.assertEqual(foo.bar, 42)
self.assertEqual(foo.__dict__,
{"__name__": "foo", "__doc__": "foodoc", "bar": 42,
"__loader__": None, "__package__": None, "__spec__": None})
self.assertTrue(foo.__dict__ is d)
def test_dont_clear_dict(self):
# See issue 7140.
def f():
foo = ModuleType("foo")
foo.bar = 4
return foo
gc_collect()
self.assertEqual(f().__dict__["bar"], 4)
def test_clear_dict_in_ref_cycle(self):
destroyed = []
m = ModuleType("foo")
m.destroyed = destroyed
s = """class A:
def __init__(self, l):
self.l = l
def __del__(self):
self.l.append(1)
a = A(destroyed)"""
exec(s, m.__dict__)
del m
gc_collect()
self.assertEqual(destroyed, [1])
def test_weakref(self):
m = ModuleType("foo")
wr = weakref.ref(m)
self.assertIs(wr(), m)
del m
gc_collect()
self.assertIs(wr(), None)
def test_module_getattr(self):
import test.good_getattr as gga
from test.good_getattr import test
self.assertEqual(test, "There is test")
self.assertEqual(gga.x, 1)
self.assertEqual(gga.y, 2)
with self.assertRaisesRegex(AttributeError,
"Deprecated, use whatever instead"):
gga.yolo
self.assertEqual(gga.whatever, "There is whatever")
del sys.modules['test.good_getattr']
def test_module_getattr_errors(self):
import test.bad_getattr as bga
from test import bad_getattr2
self.assertEqual(bga.x, 1)
self.assertEqual(bad_getattr2.x, 1)
with self.assertRaises(TypeError):
bga.nope
with self.assertRaises(TypeError):
bad_getattr2.nope
del sys.modules['test.bad_getattr']
if 'test.bad_getattr2' in sys.modules:
del sys.modules['test.bad_getattr2']
def test_module_dir(self):
import test.good_getattr as gga
self.assertEqual(dir(gga), ['a', 'b', 'c'])
del sys.modules['test.good_getattr']
def test_module_dir_errors(self):
import test.bad_getattr as bga
from test import bad_getattr2
with self.assertRaises(TypeError):
dir(bga)
with self.assertRaises(TypeError):
dir(bad_getattr2)
del sys.modules['test.bad_getattr']
if 'test.bad_getattr2' in sys.modules:
del sys.modules['test.bad_getattr2']
def test_module_getattr_tricky(self):
from test import bad_getattr3
# these lookups should not crash
with self.assertRaises(AttributeError):
bad_getattr3.one
with self.assertRaises(AttributeError):
bad_getattr3.delgetattr
if 'test.bad_getattr3' in sys.modules:
del sys.modules['test.bad_getattr3']
def test_module_repr_minimal(self):
# reprs when modules have no __file__, __name__, or __loader__
m = ModuleType('foo')
del m.__name__
self.assertEqual(repr(m), "<module '?'>")
def test_module_repr_with_name(self):
m = ModuleType('foo')
self.assertEqual(repr(m), "<module 'foo'>")
def test_module_repr_with_name_and_filename(self):
m = ModuleType('foo')
m.__file__ = '/tmp/foo.py'
self.assertEqual(repr(m), "<module 'foo' from '/tmp/foo.py'>")
def test_module_repr_with_filename_only(self):
m = ModuleType('foo')
del m.__name__
m.__file__ = '/tmp/foo.py'
self.assertEqual(repr(m), "<module '?' from '/tmp/foo.py'>")
def test_module_repr_with_loader_as_None(self):
m = ModuleType('foo')
assert m.__loader__ is None
self.assertEqual(repr(m), "<module 'foo'>")
def test_module_repr_with_bare_loader_but_no_name(self):
m = ModuleType('foo')
del m.__name__
# Yes, a class not an instance.
m.__loader__ = BareLoader
loader_repr = repr(BareLoader)
self.assertEqual(
repr(m), "<module '?' ({})>".format(loader_repr))
def test_module_repr_with_full_loader_but_no_name(self):
# m.__loader__.module_repr() will fail because the module has no
# m.__name__. This exception will get suppressed and instead the
# loader's repr will be used.
m = ModuleType('foo')
del m.__name__
# Yes, a class not an instance.
m.__loader__ = FullLoader
loader_repr = repr(FullLoader)
self.assertEqual(
repr(m), "<module '?' ({})>".format(loader_repr))
def test_module_repr_with_bare_loader(self):
m = ModuleType('foo')
# Yes, a class not an instance.
m.__loader__ = BareLoader
module_repr = repr(BareLoader)
self.assertEqual(
repr(m), "<module 'foo' ({})>".format(module_repr))
def test_module_repr_with_full_loader(self):
m = ModuleType('foo')
# Yes, a class not an instance.
m.__loader__ = FullLoader
self.assertEqual(
repr(m), "<module 'foo' (crafted)>")
def test_module_repr_with_bare_loader_and_filename(self):
# Because the loader has no module_repr(), use the file name.
m = ModuleType('foo')
# Yes, a class not an instance.
m.__loader__ = BareLoader
m.__file__ = '/tmp/foo.py'
self.assertEqual(repr(m), "<module 'foo' from '/tmp/foo.py'>")
def test_module_repr_with_full_loader_and_filename(self):
# Even though the module has an __file__, use __loader__.module_repr()
m = ModuleType('foo')
# Yes, a class not an instance.
m.__loader__ = FullLoader
m.__file__ = '/tmp/foo.py'
self.assertEqual(repr(m), "<module 'foo' (crafted)>")
def test_module_repr_builtin(self):
self.assertEqual(repr(sys), "<module 'sys' (built-in)>")
def test_module_repr_source(self):
r = repr(unittest)
starts_with = "<module 'unittest' from '"
ends_with = "__init__.py'>"
self.assertEqual(r[:len(starts_with)], starts_with,
'{!r} does not start with {!r}'.format(r, starts_with))
self.assertEqual(r[-len(ends_with):], ends_with,
'{!r} does not end with {!r}'.format(r, ends_with))
def test_module_finalization_at_shutdown(self):
# Module globals and builtins should still be available during shutdown
rc, out, err = assert_python_ok("-c", "from test import final_a")
self.assertFalse(err)
lines = out.splitlines()
self.assertEqual(set(lines), {
b"x = a",
b"x = b",
b"final_a.x = a",
b"final_b.x = b",
b"len = len",
b"shutil.rmtree = rmtree"})
def test_descriptor_errors_propagate(self):
class Descr:
def __get__(self, o, t):
raise RuntimeError
class M(ModuleType):
melon = Descr()
self.assertRaises(RuntimeError, getattr, M("mymod"), "melon")
def test_lazy_create_annotations(self):
# module objects lazy create their __annotations__ dict on demand.
# the annotations dict is stored in module.__dict__.
# a freshly created module shouldn't have an annotations dict yet.
foo = ModuleType("foo")
for i in range(4):
self.assertFalse("__annotations__" in foo.__dict__)
d = foo.__annotations__
self.assertTrue("__annotations__" in foo.__dict__)
self.assertEqual(foo.__annotations__, d)
self.assertEqual(foo.__dict__['__annotations__'], d)
if i % 2:
del foo.__annotations__
else:
del foo.__dict__['__annotations__']
def test_setting_annotations(self):
foo = ModuleType("foo")
for i in range(4):
self.assertFalse("__annotations__" in foo.__dict__)
d = {'a': int}
foo.__annotations__ = d
self.assertTrue("__annotations__" in foo.__dict__)
self.assertEqual(foo.__annotations__, d)
self.assertEqual(foo.__dict__['__annotations__'], d)
if i % 2:
del foo.__annotations__
else:
del foo.__dict__['__annotations__']
def test_annotations_getset_raises(self):
# double delete
foo = ModuleType("foo")
foo.__annotations__ = {}
del foo.__annotations__
with self.assertRaises(AttributeError):
del foo.__annotations__
def test_annotations_are_created_correctly(self):
ann_module4 = import_helper.import_fresh_module('test.ann_module4')
self.assertTrue("__annotations__" in ann_module4.__dict__)
del ann_module4.__annotations__
self.assertFalse("__annotations__" in ann_module4.__dict__)
def test_repeated_attribute_pops(self):
# Repeated accesses to module attribute will be specialized
# Check that popping the attribute doesn't break it
m = ModuleType("test")
d = m.__dict__
count = 0
for _ in range(100):
m.attr = 1
count += m.attr # Might be specialized
d.pop("attr")
self.assertEqual(count, 100)
# frozen and namespace module reprs are tested in importlib.
def test_subclass_with_slots(self):
# In 3.11alpha this crashed, as the slots weren't NULLed.
class ModuleWithSlots(ModuleType):
__slots__ = ("a", "b")
def __init__(self, name):
super().__init__(name)
m = ModuleWithSlots("name")
with self.assertRaises(AttributeError):
m.a
with self.assertRaises(AttributeError):
m.b
m.a, m.b = 1, 2
self.assertEqual(m.a, 1)
self.assertEqual(m.b, 2)
if __name__ == '__main__':
unittest.main()
| 35.74124 | 80 | 0.597511 |
import unittest
import weakref
from test.support import gc_collect
from test.support import import_helper
from test.support.script_helper import assert_python_ok
import sys
ModuleType = type(sys)
class FullLoader:
@classmethod
def module_repr(cls, m):
return "<module '{}' (crafted)>".format(m.__name__)
class BareLoader:
pass
class ModuleTests(unittest.TestCase):
def test_uninitialized(self):
foo = ModuleType.__new__(ModuleType)
self.assertTrue(isinstance(foo.__dict__, dict))
self.assertEqual(dir(foo), [])
try:
s = foo.__name__
self.fail("__name__ = %s" % repr(s))
except AttributeError:
pass
self.assertEqual(foo.__doc__, ModuleType.__doc__)
def test_uninitialized_missing_getattr(self):
foo = ModuleType.__new__(ModuleType)
self.assertRaisesRegex(
AttributeError, "module has no attribute 'not_here'",
getattr, foo, "not_here")
def test_missing_getattr(self):
foo = ModuleType("foo")
self.assertRaisesRegex(
AttributeError, "module 'foo' has no attribute 'not_here'",
getattr, foo, "not_here")
def test_no_docstring(self):
foo = ModuleType("foo")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, None)
self.assertIs(foo.__loader__, None)
self.assertIs(foo.__package__, None)
self.assertIs(foo.__spec__, None)
self.assertEqual(foo.__dict__, {"__name__": "foo", "__doc__": None,
"__loader__": None, "__package__": None,
"__spec__": None})
def test_ascii_docstring(self):
foo = ModuleType("foo", "foodoc")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc")
self.assertEqual(foo.__dict__,
{"__name__": "foo", "__doc__": "foodoc",
"__loader__": None, "__package__": None,
"__spec__": None})
def test_unicode_docstring(self):
foo = ModuleType("foo", "foodoc\u1234")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc\u1234")
self.assertEqual(foo.__dict__,
{"__name__": "foo", "__doc__": "foodoc\u1234",
"__loader__": None, "__package__": None,
"__spec__": None})
def test_reinit(self):
foo = ModuleType("foo", "foodoc\u1234")
foo.bar = 42
d = foo.__dict__
foo.__init__("foo", "foodoc")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc")
self.assertEqual(foo.bar, 42)
self.assertEqual(foo.__dict__,
{"__name__": "foo", "__doc__": "foodoc", "bar": 42,
"__loader__": None, "__package__": None, "__spec__": None})
self.assertTrue(foo.__dict__ is d)
def test_dont_clear_dict(self):
def f():
foo = ModuleType("foo")
foo.bar = 4
return foo
gc_collect()
self.assertEqual(f().__dict__["bar"], 4)
def test_clear_dict_in_ref_cycle(self):
destroyed = []
m = ModuleType("foo")
m.destroyed = destroyed
s = """class A:
def __init__(self, l):
self.l = l
def __del__(self):
self.l.append(1)
a = A(destroyed)"""
exec(s, m.__dict__)
del m
gc_collect()
self.assertEqual(destroyed, [1])
def test_weakref(self):
m = ModuleType("foo")
wr = weakref.ref(m)
self.assertIs(wr(), m)
del m
gc_collect()
self.assertIs(wr(), None)
def test_module_getattr(self):
import test.good_getattr as gga
from test.good_getattr import test
self.assertEqual(test, "There is test")
self.assertEqual(gga.x, 1)
self.assertEqual(gga.y, 2)
with self.assertRaisesRegex(AttributeError,
"Deprecated, use whatever instead"):
gga.yolo
self.assertEqual(gga.whatever, "There is whatever")
del sys.modules['test.good_getattr']
def test_module_getattr_errors(self):
import test.bad_getattr as bga
from test import bad_getattr2
self.assertEqual(bga.x, 1)
self.assertEqual(bad_getattr2.x, 1)
with self.assertRaises(TypeError):
bga.nope
with self.assertRaises(TypeError):
bad_getattr2.nope
del sys.modules['test.bad_getattr']
if 'test.bad_getattr2' in sys.modules:
del sys.modules['test.bad_getattr2']
def test_module_dir(self):
import test.good_getattr as gga
self.assertEqual(dir(gga), ['a', 'b', 'c'])
del sys.modules['test.good_getattr']
def test_module_dir_errors(self):
import test.bad_getattr as bga
from test import bad_getattr2
with self.assertRaises(TypeError):
dir(bga)
with self.assertRaises(TypeError):
dir(bad_getattr2)
del sys.modules['test.bad_getattr']
if 'test.bad_getattr2' in sys.modules:
del sys.modules['test.bad_getattr2']
def test_module_getattr_tricky(self):
from test import bad_getattr3
with self.assertRaises(AttributeError):
bad_getattr3.one
with self.assertRaises(AttributeError):
bad_getattr3.delgetattr
if 'test.bad_getattr3' in sys.modules:
del sys.modules['test.bad_getattr3']
def test_module_repr_minimal(self):
m = ModuleType('foo')
del m.__name__
self.assertEqual(repr(m), "<module '?'>")
def test_module_repr_with_name(self):
m = ModuleType('foo')
self.assertEqual(repr(m), "<module 'foo'>")
def test_module_repr_with_name_and_filename(self):
m = ModuleType('foo')
m.__file__ = '/tmp/foo.py'
self.assertEqual(repr(m), "<module 'foo' from '/tmp/foo.py'>")
def test_module_repr_with_filename_only(self):
m = ModuleType('foo')
del m.__name__
m.__file__ = '/tmp/foo.py'
self.assertEqual(repr(m), "<module '?' from '/tmp/foo.py'>")
def test_module_repr_with_loader_as_None(self):
m = ModuleType('foo')
assert m.__loader__ is None
self.assertEqual(repr(m), "<module 'foo'>")
def test_module_repr_with_bare_loader_but_no_name(self):
m = ModuleType('foo')
del m.__name__
m.__loader__ = BareLoader
loader_repr = repr(BareLoader)
self.assertEqual(
repr(m), "<module '?' ({})>".format(loader_repr))
def test_module_repr_with_full_loader_but_no_name(self):
m = ModuleType('foo')
del m.__name__
# Yes, a class not an instance.
m.__loader__ = FullLoader
loader_repr = repr(FullLoader)
self.assertEqual(
repr(m), "<module '?' ({})>".format(loader_repr))
def test_module_repr_with_bare_loader(self):
m = ModuleType('foo')
# Yes, a class not an instance.
m.__loader__ = BareLoader
module_repr = repr(BareLoader)
self.assertEqual(
repr(m), "<module 'foo' ({})>".format(module_repr))
def test_module_repr_with_full_loader(self):
m = ModuleType('foo')
# Yes, a class not an instance.
m.__loader__ = FullLoader
self.assertEqual(
repr(m), "<module 'foo' (crafted)>")
def test_module_repr_with_bare_loader_and_filename(self):
# Because the loader has no module_repr(), use the file name.
m = ModuleType('foo')
# Yes, a class not an instance.
m.__loader__ = BareLoader
m.__file__ = '/tmp/foo.py'
self.assertEqual(repr(m), "<module 'foo' from '/tmp/foo.py'>")
def test_module_repr_with_full_loader_and_filename(self):
# Even though the module has an __file__, use __loader__.module_repr()
m = ModuleType('foo')
# Yes, a class not an instance.
m.__loader__ = FullLoader
m.__file__ = '/tmp/foo.py'
self.assertEqual(repr(m), "<module 'foo' (crafted)>")
def test_module_repr_builtin(self):
self.assertEqual(repr(sys), "<module 'sys' (built-in)>")
def test_module_repr_source(self):
r = repr(unittest)
starts_with = "<module 'unittest' from '"
ends_with = "__init__.py'>"
self.assertEqual(r[:len(starts_with)], starts_with,
'{!r} does not start with {!r}'.format(r, starts_with))
self.assertEqual(r[-len(ends_with):], ends_with,
'{!r} does not end with {!r}'.format(r, ends_with))
def test_module_finalization_at_shutdown(self):
# Module globals and builtins should still be available during shutdown
rc, out, err = assert_python_ok("-c", "from test import final_a")
self.assertFalse(err)
lines = out.splitlines()
self.assertEqual(set(lines), {
b"x = a",
b"x = b",
b"final_a.x = a",
b"final_b.x = b",
b"len = len",
b"shutil.rmtree = rmtree"})
def test_descriptor_errors_propagate(self):
class Descr:
def __get__(self, o, t):
raise RuntimeError
class M(ModuleType):
melon = Descr()
self.assertRaises(RuntimeError, getattr, M("mymod"), "melon")
def test_lazy_create_annotations(self):
# module objects lazy create their __annotations__ dict on demand.
# the annotations dict is stored in module.__dict__.
# a freshly created module shouldn't have an annotations dict yet.
foo = ModuleType("foo")
for i in range(4):
self.assertFalse("__annotations__" in foo.__dict__)
d = foo.__annotations__
self.assertTrue("__annotations__" in foo.__dict__)
self.assertEqual(foo.__annotations__, d)
self.assertEqual(foo.__dict__['__annotations__'], d)
if i % 2:
del foo.__annotations__
else:
del foo.__dict__['__annotations__']
def test_setting_annotations(self):
foo = ModuleType("foo")
for i in range(4):
self.assertFalse("__annotations__" in foo.__dict__)
d = {'a': int}
foo.__annotations__ = d
self.assertTrue("__annotations__" in foo.__dict__)
self.assertEqual(foo.__annotations__, d)
self.assertEqual(foo.__dict__['__annotations__'], d)
if i % 2:
del foo.__annotations__
else:
del foo.__dict__['__annotations__']
def test_annotations_getset_raises(self):
foo = ModuleType("foo")
foo.__annotations__ = {}
del foo.__annotations__
with self.assertRaises(AttributeError):
del foo.__annotations__
def test_annotations_are_created_correctly(self):
ann_module4 = import_helper.import_fresh_module('test.ann_module4')
self.assertTrue("__annotations__" in ann_module4.__dict__)
del ann_module4.__annotations__
self.assertFalse("__annotations__" in ann_module4.__dict__)
def test_repeated_attribute_pops(self):
m = ModuleType("test")
d = m.__dict__
count = 0
for _ in range(100):
m.attr = 1
count += m.attr # Might be specialized
d.pop("attr")
self.assertEqual(count, 100)
# frozen and namespace module reprs are tested in importlib.
def test_subclass_with_slots(self):
# In 3.11alpha this crashed, as the slots weren't NULLed.
class ModuleWithSlots(ModuleType):
__slots__ = ("a", "b")
def __init__(self, name):
super().__init__(name)
m = ModuleWithSlots("name")
with self.assertRaises(AttributeError):
m.a
with self.assertRaises(AttributeError):
m.b
m.a, m.b = 1, 2
self.assertEqual(m.a, 1)
self.assertEqual(m.b, 2)
if __name__ == '__main__':
unittest.main()
| true | true |
f7217a596eab242de146ed6262830949ee89e841 | 3,214 | py | Python | tsa/links/crawl.py | chbrown/topic-sentiment-authorship | e8cacf11b06583d9ed85ff790e1d5322e59f2fd6 | [
"MIT"
] | null | null | null | tsa/links/crawl.py | chbrown/topic-sentiment-authorship | e8cacf11b06583d9ed85ff790e1d5322e59f2fd6 | [
"MIT"
] | null | null | null | tsa/links/crawl.py | chbrown/topic-sentiment-authorship | e8cacf11b06583d9ed85ff790e1d5322e59f2fd6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import socket
import urllib.parse
from datetime import datetime
import requests
import requests.exceptions as reqexc
import sqlalchemy.exc as sqlexc
from tsa import stdoutn
from tsa.lib import html
from tsa.models import Endpoint, create_session
from tsa import logging
logger = logging.getLogger(__name__)
whitespace_translations = dict((ord(whitespace), ' ') for whitespace in '\t\n\r')
def add_url(url, parent_id=None):
DBSession = create_session()
endpoint = Endpoint(url=url, parent_id=parent_id)
DBSession.add(endpoint)
try:
DBSession.commit()
except sqlexc.IntegrityError as exc:
# simply ignore duplicates
DBSession.rollback()
print(exc)
def process_untried_endpoints():
DBSession = create_session()
# id, parent_id, url, status_code, redirect, html, content, created, accessed, timeout
# find endpoints that aren't already fetched
query = DBSession.query(Endpoint).\
filter(Endpoint.status_code == None).\
filter(Endpoint.timeout == None).\
filter(Endpoint.error == None).\
order_by(Endpoint.id)
logger.info('Processing %d untried endpoints', query.count())
while True:
endpoint = query.first()
if not endpoint:
break
print(endpoint.id, endpoint.url)
# one of three things happens:
try:
# 1. set status_code
get = requests.get(endpoint.url, allow_redirects=False, timeout=10)
endpoint.status_code = get.status_code
endpoint.accessed = datetime.utcnow()
if get.status_code in [301, 302, 303]:
endpoint.redirect = get.headers['location']
# and add the result to the queue:
add_url(endpoint.redirect, endpoint.id)
else:
endpoint.html = get.text
# remove boilerplate from html
endpoint.content = html.to_text(endpoint.html)
except (socket.timeout, reqexc.Timeout):
# 2. set endpoint.timeout
endpoint.timeout = datetime.utcnow()
except (reqexc.ConnectionError, reqexc.SSLError, reqexc.MissingSchema,
reqexc.InvalidURL, reqexc.URLRequired):
# 3. set endpoint.error
endpoint.error = datetime.utcnow()
except Exception:
print(endpoint.url)
raise
DBSession.commit()
def tabulate(endpoints):
stdoutn('endpoint_id\turls\tdomain\ttext')
max_len = 65536/2 - 10
for endpoint in endpoints:
trail = ' -> '.join(endpoint.trail())
domain = urllib.parse.urlparse(endpoint.url).netloc.lstrip('www.')
text = endpoint.content.translate(whitespace_translations)
line = '\t'.join([str(endpoint.id), trail, domain, text[:max_len]])
stdoutn(line)
def analyze_content_length(endpoints):
lengths = []
for endpoint in endpoints:
lengths += [len(endpoint.content)]
# for percentile in range(
mean = float(sum(lengths)) / float(len(lengths))
median = sorted(lengths)[len(lengths) / 2]
logger.info('endpoint content length: mean=%0.3f median=%0.1f', mean, median)
| 32.14 | 90 | 0.641257 |
import socket
import urllib.parse
from datetime import datetime
import requests
import requests.exceptions as reqexc
import sqlalchemy.exc as sqlexc
from tsa import stdoutn
from tsa.lib import html
from tsa.models import Endpoint, create_session
from tsa import logging
logger = logging.getLogger(__name__)
whitespace_translations = dict((ord(whitespace), ' ') for whitespace in '\t\n\r')
def add_url(url, parent_id=None):
DBSession = create_session()
endpoint = Endpoint(url=url, parent_id=parent_id)
DBSession.add(endpoint)
try:
DBSession.commit()
except sqlexc.IntegrityError as exc:
DBSession.rollback()
print(exc)
def process_untried_endpoints():
DBSession = create_session()
query = DBSession.query(Endpoint).\
filter(Endpoint.status_code == None).\
filter(Endpoint.timeout == None).\
filter(Endpoint.error == None).\
order_by(Endpoint.id)
logger.info('Processing %d untried endpoints', query.count())
while True:
endpoint = query.first()
if not endpoint:
break
print(endpoint.id, endpoint.url)
# one of three things happens:
try:
# 1. set status_code
get = requests.get(endpoint.url, allow_redirects=False, timeout=10)
endpoint.status_code = get.status_code
endpoint.accessed = datetime.utcnow()
if get.status_code in [301, 302, 303]:
endpoint.redirect = get.headers['location']
# and add the result to the queue:
add_url(endpoint.redirect, endpoint.id)
else:
endpoint.html = get.text
# remove boilerplate from html
endpoint.content = html.to_text(endpoint.html)
except (socket.timeout, reqexc.Timeout):
# 2. set endpoint.timeout
endpoint.timeout = datetime.utcnow()
except (reqexc.ConnectionError, reqexc.SSLError, reqexc.MissingSchema,
reqexc.InvalidURL, reqexc.URLRequired):
# 3. set endpoint.error
endpoint.error = datetime.utcnow()
except Exception:
print(endpoint.url)
raise
DBSession.commit()
def tabulate(endpoints):
stdoutn('endpoint_id\turls\tdomain\ttext')
max_len = 65536/2 - 10
for endpoint in endpoints:
trail = ' -> '.join(endpoint.trail())
domain = urllib.parse.urlparse(endpoint.url).netloc.lstrip('www.')
text = endpoint.content.translate(whitespace_translations)
line = '\t'.join([str(endpoint.id), trail, domain, text[:max_len]])
stdoutn(line)
def analyze_content_length(endpoints):
lengths = []
for endpoint in endpoints:
lengths += [len(endpoint.content)]
# for percentile in range(
mean = float(sum(lengths)) / float(len(lengths))
median = sorted(lengths)[len(lengths) / 2]
logger.info('endpoint content length: mean=%0.3f median=%0.1f', mean, median)
| true | true |
f7217b021c92c57203280273bd959699cf6039c7 | 46,777 | py | Python | learningTolearn/backbone/common.py | ximingxing/Learning-To-Learn | 0135cb41521a61d1f3248cf3fe409e51f824fe25 | [
"MIT"
] | 5 | 2019-12-01T02:52:39.000Z | 2020-10-20T01:51:40.000Z | learningTolearn/backbone/common.py | ximingxing/DeepLearningWithPytorch | 0135cb41521a61d1f3248cf3fe409e51f824fe25 | [
"MIT"
] | 1 | 2019-11-18T13:26:50.000Z | 2019-11-18T13:26:50.000Z | learningTolearn/backbone/common.py | ximingxing/Learning-To-Learn | 0135cb41521a61d1f3248cf3fe409e51f824fe25 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Description : Common routines for models in PyTorch.
Author : xxm
"""
__all__ = ['round_channels', 'Identity', 'Swish', 'HSigmoid', 'HSwish', 'get_activation_layer', 'conv1x1', 'conv3x3',
'depthwise_conv3x3', 'ConvBlock', 'conv1x1_block', 'conv3x3_block', 'conv7x7_block', 'dwconv_block',
'dwconv3x3_block', 'dwconv5x5_block', 'dwsconv3x3_block', 'PreConvBlock', 'pre_conv1x1_block',
'pre_conv3x3_block', 'InterpolationBlock', 'ChannelShuffle', 'ChannelShuffle2', 'SEBlock', 'IBN',
'DualPathSequential', 'Concurrent', 'SequentialConcurrent', 'ParametricSequential', 'ParametricConcurrent',
'Hourglass', 'SesquialteralHourglass', 'MultiOutputSequential', 'Flatten']
import math
from inspect import isfunction
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchmeta.modules import MetaModule, MetaSequential, MetaConv2d, MetaBatchNorm2d
def round_channels(channels,
divisor=8):
"""
Round weighted channel number (make divisible operation).
Parameters:
----------
channels : int or float
Original number of channels.
divisor : int, default 8
Alignment value.
Returns
-------
int
Weighted number of channels.
"""
rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor)
if float(rounded_channels) < 0.9 * channels:
rounded_channels += divisor
return rounded_channels
class Identity(nn.Module):
"""
Identity block.
"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Swish(nn.Module):
"""
Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941.
"""
def forward(self, x):
return x * torch.sigmoid(x)
class HSigmoid(nn.Module):
"""
Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,'
https://arxiv.org/abs/1905.02244.
"""
def forward(self, x):
return F.relu6(x + 3.0, inplace=True) / 6.0
class HSwish(nn.Module):
"""
H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
inplace : bool
Whether to use inplace version of the module.
"""
def __init__(self, inplace=False):
super(HSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_activation_layer(activation):
"""
Create activation layer from string/function.
Parameters:
----------
activation : function, or str, or nn.Module
Activation function or name of activation function.
Returns
-------
nn.Module
Activation layer.
"""
assert (activation is not None)
if isfunction(activation):
return activation()
elif isinstance(activation, str):
if activation == "relu":
return nn.ReLU(inplace=True)
elif activation == "relu6":
return nn.ReLU6(inplace=True)
elif activation == "swish":
return Swish()
elif activation == "hswish":
return HSwish(inplace=True)
elif activation == "sigmoid":
return nn.Sigmoid()
elif activation == "hsigmoid":
return HSigmoid()
elif activation == "identity":
return Identity()
else:
raise NotImplementedError()
else:
assert (isinstance(activation, nn.Module))
return activation
def conv1x1(in_channels,
out_channels,
stride=1,
groups=1,
bias=False):
"""
Convolution 1x1 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
groups=groups,
bias=bias)
def conv3x3(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False):
"""
Convolution 3x3 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
def depthwise_conv3x3(channels,
stride):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
"""
return nn.Conv2d(
in_channels=channels,
out_channels=channels,
kernel_size=3,
stride=stride,
padding=1,
groups=channels,
bias=False)
class ConvBlock(nn.Module):
"""
Standard convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(ConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
class MetaConvBlock(MetaModule):
"""
Meta convolution block with Batch normalization and activation.
Weight and
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(MetaConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = MetaConv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = MetaBatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x, params=None):
x = self.conv(x, params=self.get_subdict(params, 'conv'))
if self.use_bn:
x = self.bn(x, params=self.get_subdict(params, 'bn'))
if self.activate:
x = self.activ(x)
return x
def conv1x1_block(in_channels,
out_channels,
stride=1,
padding=0,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
mode=''):
"""
1x1 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
if mode == 'maml':
return MetaConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
else:
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
mode=''):
"""
3x3 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
if mode == 'maml':
return MetaConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
else:
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
groups=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
mode=''):
"""
5x5 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
if mode == 'maml':
return MetaConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
bn_eps=bn_eps,
activation=activation)
else:
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def conv7x7_block(in_channels,
out_channels,
stride=1,
padding=3,
bias=False,
use_bn=True,
activation=(lambda: nn.ReLU(inplace=True)),
mode='maml'):
"""
7x7 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
if mode == 'maml':
return MetaSequential(MetaConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=stride,
padding=padding,
bias=bias,
use_bn=use_bn,
activation=activation))
else:
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=stride,
padding=padding,
bias=bias,
use_bn=use_bn,
activation=activation)
def dwconv_block(in_channels,
out_channels,
kernel_size,
stride=1,
padding=1,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
Depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def dwconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def dwconv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
5x5 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
activation=activation)
class DwsConvBlock(nn.Module):
"""
Depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
dw_activation=(lambda: nn.ReLU(inplace=True)),
pw_activation=(lambda: nn.ReLU(inplace=True))):
super(DwsConvBlock, self).__init__()
self.dw_conv = dwconv_block(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=dw_activation)
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=pw_activation)
def forward(self, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
def dwsconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
bn_eps=1e-5,
dw_activation=(lambda: nn.ReLU(inplace=True)),
pw_activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 depthwise separable version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
"""
return DwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
dw_activation=dw_activation,
pw_activation=pw_activation)
class PreConvBlock(nn.Module):
"""
Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
return_preact=False,
activate=True):
super(PreConvBlock, self).__init__()
self.return_preact = return_preact
self.activate = activate
self.bn = nn.BatchNorm2d(num_features=in_channels)
if self.activate:
self.activ = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
def forward(self, x):
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_conv1x1_block(in_channels,
out_channels,
stride=1,
bias=False,
return_preact=False,
activate=True):
"""
1x1 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
bias=bias,
return_preact=return_preact,
activate=activate)
def pre_conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
return_preact=False,
activate=True):
"""
3x3 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
return_preact=return_preact,
activate=activate)
class InterpolationBlock(nn.Module):
"""
Interpolation upsampling block.
Parameters:
----------
scale_factor : float
Multiplier for spatial size.
mode : str, default 'bilinear'
Algorithm used for upsampling.
align_corners : bool, default True
Whether to align the corner pixels of the input and output tensors
"""
def __init__(self,
scale_factor,
mode="bilinear",
align_corners=True):
super(InterpolationBlock, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
return F.interpolate(
input=x,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners)
def __repr__(self):
s = '{name}(scale_factor={scale_factor}, mode={mode}, align_corners={align_corners})'
return s.format(
name=self.__class__.__name__,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners)
def calc_flops(self, x):
assert (x.shape[0] == 1)
if self.mode == "bilinear":
num_flops = 9 * x.numel()
else:
num_flops = 4 * x.numel()
num_macs = 0
return num_flops, num_macs
def channel_shuffle(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
Returns
-------
Tensor
Resulted tensor.
"""
batch, channels, height, width = x.size()
# assert (channels % groups == 0)
channels_per_group = channels // groups
x = x.view(batch, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle(nn.Module):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelShuffle, self).__init__()
# assert (channels % groups == 0)
if channels % groups != 0:
raise ValueError('channels must be divisible by groups')
self.groups = groups
def forward(self, x):
return channel_shuffle(x, self.groups)
def channel_shuffle2(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083. The alternative version.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
Returns
-------
Tensor
Resulted tensor.
"""
batch, channels, height, width = x.size()
# assert (channels % groups == 0)
channels_per_group = channels // groups
x = x.view(batch, channels_per_group, groups, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle2(nn.Module):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
The alternative version.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelShuffle2, self).__init__()
# assert (channels % groups == 0)
if channels % groups != 0:
raise ValueError('channels must be divisible by groups')
self.groups = groups
def forward(self, x):
return channel_shuffle2(x, self.groups)
class SEBlock(nn.Module):
"""
Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
round_mid : bool, default False
Whether to round middle channel number (make divisible by 8).
activation : function, or str, or nn.Module, default 'relu'
Activation function after the first convolution.
out_activation : function, or str, or nn.Module, default 'sigmoid'
Activation function after the last convolution.
"""
def __init__(self,
channels,
reduction=16,
round_mid=False,
mid_activation=(lambda: nn.ReLU(inplace=True)),
out_activation=(lambda: nn.Sigmoid())):
super(SEBlock, self).__init__()
mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
self.conv1 = conv1x1(
in_channels=channels,
out_channels=mid_channels,
bias=True)
self.activ = get_activation_layer(mid_activation)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=channels,
bias=True)
self.sigmoid = get_activation_layer(out_activation)
def forward(self, x):
w = self.pool(x)
w = self.conv1(w)
w = self.activ(w)
w = self.conv2(w)
w = self.sigmoid(w)
x = x * w
return x
class IBN(nn.Module):
"""
Instance-Batch Normalization block from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : int
Number of channels.
inst_fraction : float, default 0.5
The first fraction of channels for normalization.
inst_first : bool, default True
Whether instance normalization be on the first part of channels.
"""
def __init__(self,
channels,
first_fraction=0.5,
inst_first=True):
super(IBN, self).__init__()
self.inst_first = inst_first
h1_channels = int(math.floor(channels * first_fraction))
h2_channels = channels - h1_channels
self.split_sections = [h1_channels, h2_channels]
if self.inst_first:
self.inst_norm = nn.InstanceNorm2d(
num_features=h1_channels,
affine=True)
self.batch_norm = nn.BatchNorm2d(num_features=h2_channels)
else:
self.batch_norm = nn.BatchNorm2d(num_features=h1_channels)
self.inst_norm = nn.InstanceNorm2d(
num_features=h2_channels,
affine=True)
def forward(self, x):
x1, x2 = torch.split(x, split_size_or_sections=self.split_sections, dim=1)
if self.inst_first:
x1 = self.inst_norm(x1.contiguous())
x2 = self.batch_norm(x2.contiguous())
else:
x1 = self.batch_norm(x1.contiguous())
x2 = self.inst_norm(x2.contiguous())
x = torch.cat((x1, x2), dim=1)
return x
class DualPathSequential(nn.Sequential):
"""
A sequential container for modules with dual inputs/outputs.
Modules will be executed in the order they are added.
Parameters:
----------
return_two : bool, default True
Whether to return two output after execution.
first_ordinals : int, default 0
Number of the first modules with single input/output.
last_ordinals : int, default 0
Number of the final modules with single input/output.
dual_path_scheme : function
Scheme of dual path response for a module.
dual_path_scheme_ordinal : function
Scheme of dual path response for an ordinal module.
"""
def __init__(self,
return_two=True,
first_ordinals=0,
last_ordinals=0,
dual_path_scheme=(lambda module, x1, x2: module(x1, x2)),
dual_path_scheme_ordinal=(lambda module, x1, x2: (module(x1), x2))):
super(DualPathSequential, self).__init__()
self.return_two = return_two
self.first_ordinals = first_ordinals
self.last_ordinals = last_ordinals
self.dual_path_scheme = dual_path_scheme
self.dual_path_scheme_ordinal = dual_path_scheme_ordinal
def forward(self, x1, x2=None):
length = len(self._modules.values())
for i, module in enumerate(self._modules.values()):
if (i < self.first_ordinals) or (i >= length - self.last_ordinals):
x1, x2 = self.dual_path_scheme_ordinal(module, x1, x2)
else:
x1, x2 = self.dual_path_scheme(module, x1, x2)
if self.return_two:
return x1, x2
else:
return x1
class Concurrent(nn.Sequential):
"""
A container for concatenation of modules on the base of the sequential container.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
stack : bool, default False
Whether to concatenate tensors along a new dimension.
"""
def __init__(self,
axis=1,
stack=False):
super(Concurrent, self).__init__()
self.axis = axis
self.stack = stack
def forward(self, x):
out = []
for module in self._modules.values():
out.append(module(x))
if self.stack:
out = torch.stack(tuple(out), dim=self.axis)
else:
out = torch.cat(tuple(out), dim=self.axis)
return out
class SequentialConcurrent(nn.Sequential):
"""
A sequential container with concatenated outputs.
Modules will be executed in the order they are added.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
stack : bool, default False
Whether to concatenate tensors along a new dimension.
cat_input : bool, default True
Whether to concatenate input tensor.
"""
def __init__(self,
axis=1,
stack=False,
cat_input=True):
super(SequentialConcurrent, self).__init__()
self.axis = axis
self.stack = stack
self.cat_input = cat_input
def forward(self, x):
out = [x] if self.cat_input else []
for module in self._modules.values():
x = module(x)
out.append(x)
if self.stack:
out = torch.stack(tuple(out), dim=self.axis)
else:
out = torch.cat(tuple(out), dim=self.axis)
return out
class ParametricSequential(nn.Sequential):
"""
A sequential container for modules with parameters.
Modules will be executed in the order they are added.
"""
def __init__(self, *args):
super(ParametricSequential, self).__init__(*args)
def forward(self, x, **kwargs):
for module in self._modules.values():
x = module(x, **kwargs)
return x
class ParametricConcurrent(nn.Sequential):
"""
A container for concatenation of modules with parameters.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
"""
def __init__(self, axis=1):
super(ParametricConcurrent, self).__init__()
self.axis = axis
def forward(self, x, **kwargs):
out = []
for module in self._modules.values():
out.append(module(x, **kwargs))
out = torch.cat(tuple(out), dim=self.axis)
return out
class Hourglass(nn.Module):
"""
A hourglass block.
Parameters:
----------
down_seq : nn.Sequential
Down modules as sequential.
up_seq : nn.Sequential
Up modules as sequential.
skip_seq : nn.Sequential
Skip connection modules as sequential.
merge_type : str, default 'add'
Type of concatenation of up and skip outputs.
return_first_skip : bool, default False
Whether return the first skip connection output. Used in ResAttNet.
"""
def __init__(self,
down_seq,
up_seq,
skip_seq,
merge_type="add",
return_first_skip=False):
super(Hourglass, self).__init__()
assert (len(up_seq) == len(down_seq))
assert (len(skip_seq) == len(down_seq))
assert (merge_type in ["add"])
self.merge_type = merge_type
self.return_first_skip = return_first_skip
self.depth = len(down_seq)
self.down_seq = down_seq
self.up_seq = up_seq
self.skip_seq = skip_seq
def forward(self, x, **kwargs):
y = None
down_outs = [x]
for down_module in self.down_seq._modules.values():
x = down_module(x)
down_outs.append(x)
for i in range(len(down_outs)):
if i != 0:
y = down_outs[self.depth - i]
skip_module = self.skip_seq[self.depth - i]
y = skip_module(y)
if (y is not None) and (self.merge_type == "add"):
x = x + y
if i != len(down_outs) - 1:
up_module = self.up_seq[self.depth - 1 - i]
x = up_module(x)
if self.return_first_skip:
return x, y
else:
return x
class SesquialteralHourglass(nn.Module):
"""
A sesquialteral hourglass block.
Parameters:
----------
down1_seq : nn.Sequential
The first down modules as sequential.
skip1_seq : nn.Sequential
The first skip connection modules as sequential.
up_seq : nn.Sequential
Up modules as sequential.
skip2_seq : nn.Sequential
The second skip connection modules as sequential.
down2_seq : nn.Sequential
The second down modules as sequential.
merge_type : str, default 'con'
Type of concatenation of up and skip outputs.
"""
def __init__(self,
down1_seq,
skip1_seq,
up_seq,
skip2_seq,
down2_seq,
merge_type="cat"):
super(SesquialteralHourglass, self).__init__()
assert (len(down1_seq) == len(up_seq))
assert (len(down1_seq) == len(down2_seq))
assert (len(skip1_seq) == len(skip2_seq))
assert (len(down1_seq) == len(skip1_seq) - 1)
assert (merge_type in ["cat", "add"])
self.merge_type = merge_type
self.depth = len(down1_seq)
self.down1_seq = down1_seq
self.skip1_seq = skip1_seq
self.up_seq = up_seq
self.skip2_seq = skip2_seq
self.down2_seq = down2_seq
def _merge(self, x, y):
if y is not None:
if self.merge_type == "cat":
x = torch.cat((x, y), dim=1)
elif self.merge_type == "add":
x = x + y
return x
def forward(self, x, **kwargs):
y = self.skip1_seq[0](x)
skip1_outs = [y]
for i in range(self.depth):
x = self.down1_seq[i](x)
y = self.skip1_seq[i + 1](x)
skip1_outs.append(y)
x = skip1_outs[self.depth]
y = self.skip2_seq[0](x)
skip2_outs = [y]
for i in range(self.depth):
x = self.up_seq[i](x)
y = skip1_outs[self.depth - 1 - i]
x = self._merge(x, y)
y = self.skip2_seq[i + 1](x)
skip2_outs.append(y)
x = self.skip2_seq[self.depth](x)
for i in range(self.depth):
x = self.down2_seq[i](x)
y = skip2_outs[self.depth - 1 - i]
x = self._merge(x, y)
return x
class MultiOutputSequential(nn.Sequential):
"""
A sequential container with multiple outputs.
Modules will be executed in the order they are added.
"""
def __init__(self):
super(MultiOutputSequential, self).__init__()
def forward(self, x):
outs = []
for module in self._modules.values():
x = module(x)
if hasattr(module, "do_output") and module.do_output:
outs.append(x)
return [x] + outs
class Flatten(nn.Module):
"""
Simple flatten module.
"""
def forward(self, x):
return x.view(x.size(0), -1)
| 31.122422 | 120 | 0.579195 |
__all__ = ['round_channels', 'Identity', 'Swish', 'HSigmoid', 'HSwish', 'get_activation_layer', 'conv1x1', 'conv3x3',
'depthwise_conv3x3', 'ConvBlock', 'conv1x1_block', 'conv3x3_block', 'conv7x7_block', 'dwconv_block',
'dwconv3x3_block', 'dwconv5x5_block', 'dwsconv3x3_block', 'PreConvBlock', 'pre_conv1x1_block',
'pre_conv3x3_block', 'InterpolationBlock', 'ChannelShuffle', 'ChannelShuffle2', 'SEBlock', 'IBN',
'DualPathSequential', 'Concurrent', 'SequentialConcurrent', 'ParametricSequential', 'ParametricConcurrent',
'Hourglass', 'SesquialteralHourglass', 'MultiOutputSequential', 'Flatten']
import math
from inspect import isfunction
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchmeta.modules import MetaModule, MetaSequential, MetaConv2d, MetaBatchNorm2d
def round_channels(channels,
divisor=8):
rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor)
if float(rounded_channels) < 0.9 * channels:
rounded_channels += divisor
return rounded_channels
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class HSigmoid(nn.Module):
def forward(self, x):
return F.relu6(x + 3.0, inplace=True) / 6.0
class HSwish(nn.Module):
def __init__(self, inplace=False):
super(HSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_activation_layer(activation):
assert (activation is not None)
if isfunction(activation):
return activation()
elif isinstance(activation, str):
if activation == "relu":
return nn.ReLU(inplace=True)
elif activation == "relu6":
return nn.ReLU6(inplace=True)
elif activation == "swish":
return Swish()
elif activation == "hswish":
return HSwish(inplace=True)
elif activation == "sigmoid":
return nn.Sigmoid()
elif activation == "hsigmoid":
return HSigmoid()
elif activation == "identity":
return Identity()
else:
raise NotImplementedError()
else:
assert (isinstance(activation, nn.Module))
return activation
def conv1x1(in_channels,
out_channels,
stride=1,
groups=1,
bias=False):
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
groups=groups,
bias=bias)
def conv3x3(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False):
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
def depthwise_conv3x3(channels,
stride):
return nn.Conv2d(
in_channels=channels,
out_channels=channels,
kernel_size=3,
stride=stride,
padding=1,
groups=channels,
bias=False)
class ConvBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(ConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
class MetaConvBlock(MetaModule):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(MetaConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = MetaConv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = MetaBatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x, params=None):
x = self.conv(x, params=self.get_subdict(params, 'conv'))
if self.use_bn:
x = self.bn(x, params=self.get_subdict(params, 'bn'))
if self.activate:
x = self.activ(x)
return x
def conv1x1_block(in_channels,
out_channels,
stride=1,
padding=0,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
mode=''):
if mode == 'maml':
return MetaConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
else:
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
mode=''):
if mode == 'maml':
return MetaConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
else:
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
groups=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
mode=''):
if mode == 'maml':
return MetaConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
bn_eps=bn_eps,
activation=activation)
else:
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def conv7x7_block(in_channels,
out_channels,
stride=1,
padding=3,
bias=False,
use_bn=True,
activation=(lambda: nn.ReLU(inplace=True)),
mode='maml'):
if mode == 'maml':
return MetaSequential(MetaConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=stride,
padding=padding,
bias=bias,
use_bn=use_bn,
activation=activation))
else:
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=stride,
padding=padding,
bias=bias,
use_bn=use_bn,
activation=activation)
def dwconv_block(in_channels,
out_channels,
kernel_size,
stride=1,
padding=1,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def dwconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def dwconv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
activation=activation)
class DwsConvBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
dw_activation=(lambda: nn.ReLU(inplace=True)),
pw_activation=(lambda: nn.ReLU(inplace=True))):
super(DwsConvBlock, self).__init__()
self.dw_conv = dwconv_block(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=dw_activation)
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=pw_activation)
def forward(self, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
def dwsconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
bn_eps=1e-5,
dw_activation=(lambda: nn.ReLU(inplace=True)),
pw_activation=(lambda: nn.ReLU(inplace=True))):
return DwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
dw_activation=dw_activation,
pw_activation=pw_activation)
class PreConvBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
return_preact=False,
activate=True):
super(PreConvBlock, self).__init__()
self.return_preact = return_preact
self.activate = activate
self.bn = nn.BatchNorm2d(num_features=in_channels)
if self.activate:
self.activ = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
def forward(self, x):
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_conv1x1_block(in_channels,
out_channels,
stride=1,
bias=False,
return_preact=False,
activate=True):
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
bias=bias,
return_preact=return_preact,
activate=activate)
def pre_conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
return_preact=False,
activate=True):
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
return_preact=return_preact,
activate=activate)
class InterpolationBlock(nn.Module):
def __init__(self,
scale_factor,
mode="bilinear",
align_corners=True):
super(InterpolationBlock, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
return F.interpolate(
input=x,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners)
def __repr__(self):
s = '{name}(scale_factor={scale_factor}, mode={mode}, align_corners={align_corners})'
return s.format(
name=self.__class__.__name__,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners)
def calc_flops(self, x):
assert (x.shape[0] == 1)
if self.mode == "bilinear":
num_flops = 9 * x.numel()
else:
num_flops = 4 * x.numel()
num_macs = 0
return num_flops, num_macs
def channel_shuffle(x,
groups):
batch, channels, height, width = x.size()
channels_per_group = channels // groups
x = x.view(batch, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle(nn.Module):
def __init__(self,
channels,
groups):
super(ChannelShuffle, self).__init__()
if channels % groups != 0:
raise ValueError('channels must be divisible by groups')
self.groups = groups
def forward(self, x):
return channel_shuffle(x, self.groups)
def channel_shuffle2(x,
groups):
batch, channels, height, width = x.size()
channels_per_group = channels // groups
x = x.view(batch, channels_per_group, groups, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle2(nn.Module):
def __init__(self,
channels,
groups):
super(ChannelShuffle2, self).__init__()
if channels % groups != 0:
raise ValueError('channels must be divisible by groups')
self.groups = groups
def forward(self, x):
return channel_shuffle2(x, self.groups)
class SEBlock(nn.Module):
def __init__(self,
channels,
reduction=16,
round_mid=False,
mid_activation=(lambda: nn.ReLU(inplace=True)),
out_activation=(lambda: nn.Sigmoid())):
super(SEBlock, self).__init__()
mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
self.conv1 = conv1x1(
in_channels=channels,
out_channels=mid_channels,
bias=True)
self.activ = get_activation_layer(mid_activation)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=channels,
bias=True)
self.sigmoid = get_activation_layer(out_activation)
def forward(self, x):
w = self.pool(x)
w = self.conv1(w)
w = self.activ(w)
w = self.conv2(w)
w = self.sigmoid(w)
x = x * w
return x
class IBN(nn.Module):
def __init__(self,
channels,
first_fraction=0.5,
inst_first=True):
super(IBN, self).__init__()
self.inst_first = inst_first
h1_channels = int(math.floor(channels * first_fraction))
h2_channels = channels - h1_channels
self.split_sections = [h1_channels, h2_channels]
if self.inst_first:
self.inst_norm = nn.InstanceNorm2d(
num_features=h1_channels,
affine=True)
self.batch_norm = nn.BatchNorm2d(num_features=h2_channels)
else:
self.batch_norm = nn.BatchNorm2d(num_features=h1_channels)
self.inst_norm = nn.InstanceNorm2d(
num_features=h2_channels,
affine=True)
def forward(self, x):
x1, x2 = torch.split(x, split_size_or_sections=self.split_sections, dim=1)
if self.inst_first:
x1 = self.inst_norm(x1.contiguous())
x2 = self.batch_norm(x2.contiguous())
else:
x1 = self.batch_norm(x1.contiguous())
x2 = self.inst_norm(x2.contiguous())
x = torch.cat((x1, x2), dim=1)
return x
class DualPathSequential(nn.Sequential):
def __init__(self,
return_two=True,
first_ordinals=0,
last_ordinals=0,
dual_path_scheme=(lambda module, x1, x2: module(x1, x2)),
dual_path_scheme_ordinal=(lambda module, x1, x2: (module(x1), x2))):
super(DualPathSequential, self).__init__()
self.return_two = return_two
self.first_ordinals = first_ordinals
self.last_ordinals = last_ordinals
self.dual_path_scheme = dual_path_scheme
self.dual_path_scheme_ordinal = dual_path_scheme_ordinal
def forward(self, x1, x2=None):
length = len(self._modules.values())
for i, module in enumerate(self._modules.values()):
if (i < self.first_ordinals) or (i >= length - self.last_ordinals):
x1, x2 = self.dual_path_scheme_ordinal(module, x1, x2)
else:
x1, x2 = self.dual_path_scheme(module, x1, x2)
if self.return_two:
return x1, x2
else:
return x1
class Concurrent(nn.Sequential):
def __init__(self,
axis=1,
stack=False):
super(Concurrent, self).__init__()
self.axis = axis
self.stack = stack
def forward(self, x):
out = []
for module in self._modules.values():
out.append(module(x))
if self.stack:
out = torch.stack(tuple(out), dim=self.axis)
else:
out = torch.cat(tuple(out), dim=self.axis)
return out
class SequentialConcurrent(nn.Sequential):
def __init__(self,
axis=1,
stack=False,
cat_input=True):
super(SequentialConcurrent, self).__init__()
self.axis = axis
self.stack = stack
self.cat_input = cat_input
def forward(self, x):
out = [x] if self.cat_input else []
for module in self._modules.values():
x = module(x)
out.append(x)
if self.stack:
out = torch.stack(tuple(out), dim=self.axis)
else:
out = torch.cat(tuple(out), dim=self.axis)
return out
class ParametricSequential(nn.Sequential):
def __init__(self, *args):
super(ParametricSequential, self).__init__(*args)
def forward(self, x, **kwargs):
for module in self._modules.values():
x = module(x, **kwargs)
return x
class ParametricConcurrent(nn.Sequential):
def __init__(self, axis=1):
super(ParametricConcurrent, self).__init__()
self.axis = axis
def forward(self, x, **kwargs):
out = []
for module in self._modules.values():
out.append(module(x, **kwargs))
out = torch.cat(tuple(out), dim=self.axis)
return out
class Hourglass(nn.Module):
def __init__(self,
down_seq,
up_seq,
skip_seq,
merge_type="add",
return_first_skip=False):
super(Hourglass, self).__init__()
assert (len(up_seq) == len(down_seq))
assert (len(skip_seq) == len(down_seq))
assert (merge_type in ["add"])
self.merge_type = merge_type
self.return_first_skip = return_first_skip
self.depth = len(down_seq)
self.down_seq = down_seq
self.up_seq = up_seq
self.skip_seq = skip_seq
def forward(self, x, **kwargs):
y = None
down_outs = [x]
for down_module in self.down_seq._modules.values():
x = down_module(x)
down_outs.append(x)
for i in range(len(down_outs)):
if i != 0:
y = down_outs[self.depth - i]
skip_module = self.skip_seq[self.depth - i]
y = skip_module(y)
if (y is not None) and (self.merge_type == "add"):
x = x + y
if i != len(down_outs) - 1:
up_module = self.up_seq[self.depth - 1 - i]
x = up_module(x)
if self.return_first_skip:
return x, y
else:
return x
class SesquialteralHourglass(nn.Module):
def __init__(self,
down1_seq,
skip1_seq,
up_seq,
skip2_seq,
down2_seq,
merge_type="cat"):
super(SesquialteralHourglass, self).__init__()
assert (len(down1_seq) == len(up_seq))
assert (len(down1_seq) == len(down2_seq))
assert (len(skip1_seq) == len(skip2_seq))
assert (len(down1_seq) == len(skip1_seq) - 1)
assert (merge_type in ["cat", "add"])
self.merge_type = merge_type
self.depth = len(down1_seq)
self.down1_seq = down1_seq
self.skip1_seq = skip1_seq
self.up_seq = up_seq
self.skip2_seq = skip2_seq
self.down2_seq = down2_seq
def _merge(self, x, y):
if y is not None:
if self.merge_type == "cat":
x = torch.cat((x, y), dim=1)
elif self.merge_type == "add":
x = x + y
return x
def forward(self, x, **kwargs):
y = self.skip1_seq[0](x)
skip1_outs = [y]
for i in range(self.depth):
x = self.down1_seq[i](x)
y = self.skip1_seq[i + 1](x)
skip1_outs.append(y)
x = skip1_outs[self.depth]
y = self.skip2_seq[0](x)
skip2_outs = [y]
for i in range(self.depth):
x = self.up_seq[i](x)
y = skip1_outs[self.depth - 1 - i]
x = self._merge(x, y)
y = self.skip2_seq[i + 1](x)
skip2_outs.append(y)
x = self.skip2_seq[self.depth](x)
for i in range(self.depth):
x = self.down2_seq[i](x)
y = skip2_outs[self.depth - 1 - i]
x = self._merge(x, y)
return x
class MultiOutputSequential(nn.Sequential):
def __init__(self):
super(MultiOutputSequential, self).__init__()
def forward(self, x):
outs = []
for module in self._modules.values():
x = module(x)
if hasattr(module, "do_output") and module.do_output:
outs.append(x)
return [x] + outs
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
| true | true |
f7217b1eb67a285016b2a98bb8fdd6162553f11b | 1,418 | py | Python | crds/jwst/__init__.py | nden/crds | b72f14cf07531ca70b61daa6b58e762e5899afa4 | [
"BSD-3-Clause"
] | null | null | null | crds/jwst/__init__.py | nden/crds | b72f14cf07531ca70b61daa6b58e762e5899afa4 | [
"BSD-3-Clause"
] | null | null | null | crds/jwst/__init__.py | nden/crds | b72f14cf07531ca70b61daa6b58e762e5899afa4 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os.path
from crds import reftypes
HERE = os.path.dirname(__file__) or "."
TYPES = reftypes.from_package_file(__file__)
INSTRUMENTS = TYPES.instruments
EXTENSIONS = TYPES.extensions
TEXT_DESCR = TYPES.text_descr
FILEKINDS = TYPES.filekinds
UNDEFINED_PARKEY_SUBST_VALUE = "UNDEFINED"
INSTRUMENT_FIXERS = {
}
TYPE_FIXERS = {
}
PROVENANCE_KEYWORDS = ("META.REFFILE.DESCRIPTION", "META.REFFILE.PEDIGREE", "META.REFFILE.USEAFTER","META.REFFILE.HISTORY", "META.REFFILE.AUTHOR")
# PROVENANCE_KEYWORDS = ("DESCRIP", "PEDIGREE", "USEAFTER","HISTORY", "AUTHOR")
USEAFTER_KEYWORDS = ("META.OBSERVATION.DATE", "META.OBSERVATION.TIME") # Dataset keywords matching in UseAfter selectors
DEFAULT_SELECTORS = ("Match", "UseAfter") # Normal selector hierarchy in rmap
# When loading headers, make sure each keyword in a tuple is represented with
# the same value enabling any form to be used. Case insensitive.
CROSS_STRAPPED_KEYWORDS = {
"META.INSTRUMENT.NAME" : ["INSTRUME", "INSTRUMENT", "META.INSTRUMENT.TYPE"],
"META.TELESCOPE" : ["TELESCOP","TELESCOPE"],
"META.REFFILE.AUTHOR" : ["AUTHOR"],
"META.REFFILE.PEDIGREE" : ["PEDIGREE"],
"META.REFFILE.USEAFTER" : ["USEAFTER"],
"META.REFFILE.DESCRIPTION" : ["DESCRIP","DESCRIPTION"],
"META.REFFILE.HISTORY" : ["HISTORY"],
}
| 33.761905 | 146 | 0.738364 | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os.path
from crds import reftypes
HERE = os.path.dirname(__file__) or "."
TYPES = reftypes.from_package_file(__file__)
INSTRUMENTS = TYPES.instruments
EXTENSIONS = TYPES.extensions
TEXT_DESCR = TYPES.text_descr
FILEKINDS = TYPES.filekinds
UNDEFINED_PARKEY_SUBST_VALUE = "UNDEFINED"
INSTRUMENT_FIXERS = {
}
TYPE_FIXERS = {
}
PROVENANCE_KEYWORDS = ("META.REFFILE.DESCRIPTION", "META.REFFILE.PEDIGREE", "META.REFFILE.USEAFTER","META.REFFILE.HISTORY", "META.REFFILE.AUTHOR")
USEAFTER_KEYWORDS = ("META.OBSERVATION.DATE", "META.OBSERVATION.TIME")
DEFAULT_SELECTORS = ("Match", "UseAfter")
CROSS_STRAPPED_KEYWORDS = {
"META.INSTRUMENT.NAME" : ["INSTRUME", "INSTRUMENT", "META.INSTRUMENT.TYPE"],
"META.TELESCOPE" : ["TELESCOP","TELESCOPE"],
"META.REFFILE.AUTHOR" : ["AUTHOR"],
"META.REFFILE.PEDIGREE" : ["PEDIGREE"],
"META.REFFILE.USEAFTER" : ["USEAFTER"],
"META.REFFILE.DESCRIPTION" : ["DESCRIP","DESCRIPTION"],
"META.REFFILE.HISTORY" : ["HISTORY"],
}
| true | true |
f7217bf8d6fabaf470f63ef2822e2cba3024153c | 7,292 | py | Python | tensorflow_datasets/audio/fuss.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | 2 | 2022-02-14T09:51:39.000Z | 2022-02-14T13:27:49.000Z | tensorflow_datasets/audio/fuss.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/audio/fuss.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | 1 | 2020-12-13T22:11:33.000Z | 2020-12-13T22:11:33.000Z | # coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FUSS dataset."""
import os
from absl import logging
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_CITATION = r"""\
@inproceedings{wisdom2020fuss,
title = {What's All the {FUSS} About Free Universal Sound Separation Data?},
author = {Scott Wisdom and Hakan Erdogan and Daniel P. W. Ellis and Romain Serizel and Nicolas Turpault and Eduardo Fonseca and Justin Salamon and Prem Seetharaman and John R. Hershey},
year = {2020},
url = {https://arxiv.org/abs/2011.00803},
}
@inproceedings{fonseca2020fsd50k,
author = {Eduardo Fonseca and Xavier Favory and Jordi Pons and Frederic Font Corbera and Xavier Serra},
title = {{FSD}50k: an open dataset of human-labeled sound events},
year = {2020},
url = {https://arxiv.org/abs/2010.00475},
}
"""
_DESCRIPTION = """\
The Free Universal Sound Separation (FUSS) Dataset is a database of arbitrary
sound mixtures and source-level references, for use in experiments on arbitrary
sound separation.
This is the official sound separation data for the DCASE2020 Challenge Task 4:
Sound Event Detection and Separation in Domestic Environments.
Overview: FUSS audio data is sourced from a pre-release of Freesound dataset
known as (FSD50k), a sound event dataset composed of Freesound content annotated
with labels from the AudioSet Ontology. Using the FSD50K labels, these source
files have been screened such that they likely only contain a single type of
sound. Labels are not provided for these source files, and are not considered
part of the challenge. For the purpose of the DCASE Task4 Sound Separation and
Event Detection challenge, systems should not use FSD50K labels, even though
they may become available upon FSD50K release.
To create mixtures, 10 second clips of sources are convolved with simulated room
impulse responses and added together. Each 10 second mixture contains between
1 and 4 sources. Source files longer than 10 seconds are considered "background"
sources. Every mixture contains one background source, which is active for the
entire duration. We provide: a software recipe to create the dataset, the room
impulse responses, and the original source audio.
"""
_URL = "https://github.com/google-research/sound-separation/blob/master/datasets/fuss/FUSS_license_doc/README.md"
_DL_METADATA = {
"reverberant":
("https://zenodo.org/record/3743844/files/FUSS_ssdata_reverb.tar.gz",
"ssdata_reverb"),
"unprocessed":
("https://zenodo.org/record/3743844/files/FUSS_ssdata.tar.gz", "ssdata"
),
}
class Fuss(tfds.core.GeneratorBasedBuilder):
"""FUSS: Free Universal Sound Separation dataset."""
BUILDER_CONFIGS = [
tfds.core.BuilderConfig(
name="reverberant",
description="Default reverberated audio.",
version=tfds.core.Version("1.2.0")),
tfds.core.BuilderConfig(
name="unprocessed",
description="Unprocessed audio without additional reverberation.",
version=tfds.core.Version("1.2.0")),
]
def _info(self):
source_labels = ["background0", "foreground0", "foreground1", "foreground2"]
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"mixture_audio":
tfds.features.Audio(
file_format="wav",
shape=(160000,),
sample_rate=16000,
dtype=tf.int16),
"sources":
tfds.features.Sequence({
"audio":
tfds.features.Audio(
file_format="wav",
shape=(160000,),
sample_rate=16000,
dtype=tf.int16),
"label":
tfds.features.ClassLabel(names=source_labels),
}),
"segments":
tfds.features.Sequence({
"start_time_seconds": tf.float32,
"end_time_seconds": tf.float32,
"label": tf.string
}),
"jams":
tf.string,
"id":
tf.string,
}),
supervised_keys=("mixture_audio", "sources"),
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
url, extracted_dirname = _DL_METADATA[self.builder_config.name]
base_dir = dl_manager.download_and_extract(url)
splits = []
for split_name, split_dir in [(tfds.Split.TRAIN, "train"),
(tfds.Split.VALIDATION, "validation"),
(tfds.Split.TEST, "eval")]:
splits.append(
tfds.core.SplitGenerator(
name=split_name,
gen_kwargs={
"base_dir": os.path.join(base_dir, extracted_dirname),
"split": split_dir,
}))
return splits
def _parse_segments(self, path):
segments = []
if not tf.io.gfile.exists(path):
# Some segments files are missing in the "unprocessed" set.
logging.info("Missing segments file: %s", path)
return segments
with tf.io.gfile.GFile(path) as f:
for l in f:
try:
start, end, label = l.split()
except ValueError:
continue
segments.append({
"start_time_seconds": float(start),
"end_time_seconds": float(end),
"label": label
})
return segments
def _generate_examples(self, base_dir, split):
"""Generates examples for the given split."""
path = os.path.join(base_dir, "%s_example_list.txt" % split)
split_dir = os.path.join(base_dir, split)
with tf.io.gfile.GFile(path) as example_list:
for line in example_list:
paths = line.split()
key = _basename_without_ext(paths[0])
sources = []
for p in paths[1:]:
sources.append({
"audio": os.path.join(base_dir, p),
"label": _basename_without_ext(p).split("_")[0],
})
segments = self._parse_segments(os.path.join(split_dir, "%s.txt" % key))
jams = tf.io.gfile.GFile(os.path.join(split_dir,
"%s.jams" % key)).read()
example = {
"mixture_audio": os.path.join(base_dir, paths[0]),
"sources": sources,
"segments": segments,
"jams": jams,
"id": key,
}
yield key, example
def _basename_without_ext(p):
basename, _ = os.path.splitext(os.path.basename(p))
return basename
| 37.782383 | 187 | 0.62932 |
import os
from absl import logging
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_CITATION = r"""\
@inproceedings{wisdom2020fuss,
title = {What's All the {FUSS} About Free Universal Sound Separation Data?},
author = {Scott Wisdom and Hakan Erdogan and Daniel P. W. Ellis and Romain Serizel and Nicolas Turpault and Eduardo Fonseca and Justin Salamon and Prem Seetharaman and John R. Hershey},
year = {2020},
url = {https://arxiv.org/abs/2011.00803},
}
@inproceedings{fonseca2020fsd50k,
author = {Eduardo Fonseca and Xavier Favory and Jordi Pons and Frederic Font Corbera and Xavier Serra},
title = {{FSD}50k: an open dataset of human-labeled sound events},
year = {2020},
url = {https://arxiv.org/abs/2010.00475},
}
"""
_DESCRIPTION = """\
The Free Universal Sound Separation (FUSS) Dataset is a database of arbitrary
sound mixtures and source-level references, for use in experiments on arbitrary
sound separation.
This is the official sound separation data for the DCASE2020 Challenge Task 4:
Sound Event Detection and Separation in Domestic Environments.
Overview: FUSS audio data is sourced from a pre-release of Freesound dataset
known as (FSD50k), a sound event dataset composed of Freesound content annotated
with labels from the AudioSet Ontology. Using the FSD50K labels, these source
files have been screened such that they likely only contain a single type of
sound. Labels are not provided for these source files, and are not considered
part of the challenge. For the purpose of the DCASE Task4 Sound Separation and
Event Detection challenge, systems should not use FSD50K labels, even though
they may become available upon FSD50K release.
To create mixtures, 10 second clips of sources are convolved with simulated room
impulse responses and added together. Each 10 second mixture contains between
1 and 4 sources. Source files longer than 10 seconds are considered "background"
sources. Every mixture contains one background source, which is active for the
entire duration. We provide: a software recipe to create the dataset, the room
impulse responses, and the original source audio.
"""
_URL = "https://github.com/google-research/sound-separation/blob/master/datasets/fuss/FUSS_license_doc/README.md"
_DL_METADATA = {
"reverberant":
("https://zenodo.org/record/3743844/files/FUSS_ssdata_reverb.tar.gz",
"ssdata_reverb"),
"unprocessed":
("https://zenodo.org/record/3743844/files/FUSS_ssdata.tar.gz", "ssdata"
),
}
class Fuss(tfds.core.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
tfds.core.BuilderConfig(
name="reverberant",
description="Default reverberated audio.",
version=tfds.core.Version("1.2.0")),
tfds.core.BuilderConfig(
name="unprocessed",
description="Unprocessed audio without additional reverberation.",
version=tfds.core.Version("1.2.0")),
]
def _info(self):
source_labels = ["background0", "foreground0", "foreground1", "foreground2"]
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"mixture_audio":
tfds.features.Audio(
file_format="wav",
shape=(160000,),
sample_rate=16000,
dtype=tf.int16),
"sources":
tfds.features.Sequence({
"audio":
tfds.features.Audio(
file_format="wav",
shape=(160000,),
sample_rate=16000,
dtype=tf.int16),
"label":
tfds.features.ClassLabel(names=source_labels),
}),
"segments":
tfds.features.Sequence({
"start_time_seconds": tf.float32,
"end_time_seconds": tf.float32,
"label": tf.string
}),
"jams":
tf.string,
"id":
tf.string,
}),
supervised_keys=("mixture_audio", "sources"),
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
url, extracted_dirname = _DL_METADATA[self.builder_config.name]
base_dir = dl_manager.download_and_extract(url)
splits = []
for split_name, split_dir in [(tfds.Split.TRAIN, "train"),
(tfds.Split.VALIDATION, "validation"),
(tfds.Split.TEST, "eval")]:
splits.append(
tfds.core.SplitGenerator(
name=split_name,
gen_kwargs={
"base_dir": os.path.join(base_dir, extracted_dirname),
"split": split_dir,
}))
return splits
def _parse_segments(self, path):
segments = []
if not tf.io.gfile.exists(path):
# Some segments files are missing in the "unprocessed" set.
logging.info("Missing segments file: %s", path)
return segments
with tf.io.gfile.GFile(path) as f:
for l in f:
try:
start, end, label = l.split()
except ValueError:
continue
segments.append({
"start_time_seconds": float(start),
"end_time_seconds": float(end),
"label": label
})
return segments
def _generate_examples(self, base_dir, split):
path = os.path.join(base_dir, "%s_example_list.txt" % split)
split_dir = os.path.join(base_dir, split)
with tf.io.gfile.GFile(path) as example_list:
for line in example_list:
paths = line.split()
key = _basename_without_ext(paths[0])
sources = []
for p in paths[1:]:
sources.append({
"audio": os.path.join(base_dir, p),
"label": _basename_without_ext(p).split("_")[0],
})
segments = self._parse_segments(os.path.join(split_dir, "%s.txt" % key))
jams = tf.io.gfile.GFile(os.path.join(split_dir,
"%s.jams" % key)).read()
example = {
"mixture_audio": os.path.join(base_dir, paths[0]),
"sources": sources,
"segments": segments,
"jams": jams,
"id": key,
}
yield key, example
def _basename_without_ext(p):
basename, _ = os.path.splitext(os.path.basename(p))
return basename
| true | true |
f7217c7974021f0ec405e5dff2a600a77498317d | 538 | py | Python | src/svm/get_vocab_dict.py | dimart10/machine-learning | 0f33bef65a9335c0f7fed680f1112419bae8fabc | [
"MIT"
] | null | null | null | src/svm/get_vocab_dict.py | dimart10/machine-learning | 0f33bef65a9335c0f7fed680f1112419bae8fabc | [
"MIT"
] | null | null | null | src/svm/get_vocab_dict.py | dimart10/machine-learning | 0f33bef65a9335c0f7fed680f1112419bae8fabc | [
"MIT"
] | null | null | null | def getVocabDict(reverse=False):
"""
Function to read in the supplied vocab list text file into a dictionary.
Dictionary key is the stemmed word, value is the index in the text file
If "reverse", the keys and values are switched.
"""
vocab_dict = {}
with open("../data/emails/vocab.txt") as f:
for line in f:
(val, key) = line.split()
if not reverse:
vocab_dict[key] = int(val)
else:
vocab_dict[int(val)] = key
return vocab_dict
| 31.647059 | 76 | 0.581784 | def getVocabDict(reverse=False):
vocab_dict = {}
with open("../data/emails/vocab.txt") as f:
for line in f:
(val, key) = line.split()
if not reverse:
vocab_dict[key] = int(val)
else:
vocab_dict[int(val)] = key
return vocab_dict
| true | true |
f7217cb6c5888d602826730dbf6b55ce8ad59ff8 | 1,125 | py | Python | clients/python/marquez_client/models.py | aridwiprayogo/marquez | b15e44fb7c2a0efcbe8ee8ce412144ac5ee68e0e | [
"Apache-2.0"
] | 999 | 2018-07-07T01:36:21.000Z | 2022-03-31T18:25:18.000Z | clients/python/marquez_client/models.py | aridwiprayogo/marquez | b15e44fb7c2a0efcbe8ee8ce412144ac5ee68e0e | [
"Apache-2.0"
] | 1,681 | 2018-07-19T23:45:31.000Z | 2022-03-31T22:21:07.000Z | clients/python/marquez_client/models.py | aridwiprayogo/marquez | b15e44fb7c2a0efcbe8ee8ce412144ac5ee68e0e | [
"Apache-2.0"
] | 182 | 2018-08-02T11:35:45.000Z | 2022-03-31T07:02:14.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
class DatasetId:
def __init__(self, namespace: str, name: str):
self.namespace = namespace
self.name = name
class JobId:
def __init__(self, namespace: str, name: str):
self.namespace = namespace
self.name = name
class DatasetType(Enum):
DB_TABLE = "DB_TABLE"
STREAM = "STREAM"
class JobType(Enum):
BATCH = "BATCH"
STREAM = "STREAM"
SERVICE = "SERVICE"
class RunState(Enum):
NEW = 'NEW'
RUNNING = 'RUNNING'
COMPLETED = 'COMPLETED'
FAILED = 'FAILED'
ABORTED = 'ABORTED'
| 25 | 74 | 0.688889 |
from enum import Enum
class DatasetId:
def __init__(self, namespace: str, name: str):
self.namespace = namespace
self.name = name
class JobId:
def __init__(self, namespace: str, name: str):
self.namespace = namespace
self.name = name
class DatasetType(Enum):
DB_TABLE = "DB_TABLE"
STREAM = "STREAM"
class JobType(Enum):
BATCH = "BATCH"
STREAM = "STREAM"
SERVICE = "SERVICE"
class RunState(Enum):
NEW = 'NEW'
RUNNING = 'RUNNING'
COMPLETED = 'COMPLETED'
FAILED = 'FAILED'
ABORTED = 'ABORTED'
| true | true |
f7217ef251ef43a682f902818aa9a8aa8f1b0d93 | 2,145 | py | Python | app/s3_client/s3_csv_client.py | alphagov/notify-admin-frontend | 70f2a6a97aefe2432d7a3b54dc1555c030dd3693 | [
"MIT"
] | 33 | 2016-01-11T20:16:17.000Z | 2021-11-23T12:50:29.000Z | app/s3_client/s3_csv_client.py | alphagov/notify-admin-frontend | 70f2a6a97aefe2432d7a3b54dc1555c030dd3693 | [
"MIT"
] | 1,249 | 2015-11-30T16:43:21.000Z | 2022-03-24T13:04:55.000Z | app/s3_client/s3_csv_client.py | alphagov/notify-admin-frontend | 70f2a6a97aefe2432d7a3b54dc1555c030dd3693 | [
"MIT"
] | 36 | 2015-12-02T09:49:26.000Z | 2021-04-10T18:05:41.000Z | import uuid
import botocore
from flask import current_app
from notifications_utils.s3 import s3upload as utils_s3upload
from app.s3_client.s3_logo_client import get_s3_object
FILE_LOCATION_STRUCTURE = 'service-{}-notify/{}.csv'
def get_csv_location(service_id, upload_id, bucket=None):
return (
bucket or current_app.config['CSV_UPLOAD_BUCKET_NAME'],
FILE_LOCATION_STRUCTURE.format(service_id, upload_id),
)
def get_csv_upload(service_id, upload_id, bucket=None):
return get_s3_object(*get_csv_location(service_id, upload_id, bucket))
def s3upload(service_id, filedata, region, bucket=None):
upload_id = str(uuid.uuid4())
bucket_name, file_location = get_csv_location(service_id, upload_id, bucket)
utils_s3upload(
filedata=filedata['data'],
region=region,
bucket_name=bucket_name,
file_location=file_location,
)
return upload_id
def s3download(service_id, upload_id, bucket=None):
contents = ''
try:
key = get_csv_upload(service_id, upload_id, bucket)
contents = key.get()['Body'].read().decode('utf-8')
except botocore.exceptions.ClientError as e:
current_app.logger.error("Unable to download s3 file {}".format(
FILE_LOCATION_STRUCTURE.format(service_id, upload_id)))
raise e
return contents
def set_metadata_on_csv_upload(service_id, upload_id, bucket=None, **kwargs):
get_csv_upload(
service_id, upload_id, bucket=bucket
).copy_from(
CopySource='{}/{}'.format(*get_csv_location(service_id, upload_id, bucket=bucket)),
ServerSideEncryption='AES256',
Metadata={
key: str(value) for key, value in kwargs.items()
},
MetadataDirective='REPLACE',
)
def get_csv_metadata(service_id, upload_id, bucket=None):
try:
key = get_csv_upload(service_id, upload_id, bucket)
return key.get()['Metadata']
except botocore.exceptions.ClientError as e:
current_app.logger.error("Unable to download s3 file {}".format(
FILE_LOCATION_STRUCTURE.format(service_id, upload_id)))
raise e
| 31.544118 | 91 | 0.699301 | import uuid
import botocore
from flask import current_app
from notifications_utils.s3 import s3upload as utils_s3upload
from app.s3_client.s3_logo_client import get_s3_object
FILE_LOCATION_STRUCTURE = 'service-{}-notify/{}.csv'
def get_csv_location(service_id, upload_id, bucket=None):
return (
bucket or current_app.config['CSV_UPLOAD_BUCKET_NAME'],
FILE_LOCATION_STRUCTURE.format(service_id, upload_id),
)
def get_csv_upload(service_id, upload_id, bucket=None):
return get_s3_object(*get_csv_location(service_id, upload_id, bucket))
def s3upload(service_id, filedata, region, bucket=None):
upload_id = str(uuid.uuid4())
bucket_name, file_location = get_csv_location(service_id, upload_id, bucket)
utils_s3upload(
filedata=filedata['data'],
region=region,
bucket_name=bucket_name,
file_location=file_location,
)
return upload_id
def s3download(service_id, upload_id, bucket=None):
contents = ''
try:
key = get_csv_upload(service_id, upload_id, bucket)
contents = key.get()['Body'].read().decode('utf-8')
except botocore.exceptions.ClientError as e:
current_app.logger.error("Unable to download s3 file {}".format(
FILE_LOCATION_STRUCTURE.format(service_id, upload_id)))
raise e
return contents
def set_metadata_on_csv_upload(service_id, upload_id, bucket=None, **kwargs):
get_csv_upload(
service_id, upload_id, bucket=bucket
).copy_from(
CopySource='{}/{}'.format(*get_csv_location(service_id, upload_id, bucket=bucket)),
ServerSideEncryption='AES256',
Metadata={
key: str(value) for key, value in kwargs.items()
},
MetadataDirective='REPLACE',
)
def get_csv_metadata(service_id, upload_id, bucket=None):
try:
key = get_csv_upload(service_id, upload_id, bucket)
return key.get()['Metadata']
except botocore.exceptions.ClientError as e:
current_app.logger.error("Unable to download s3 file {}".format(
FILE_LOCATION_STRUCTURE.format(service_id, upload_id)))
raise e
| true | true |
f7217f0a995fcc98786c4617f284dd074799a176 | 3,622 | py | Python | dfs_search.py | orionoiro/path_searcher | 198888a4570b40812a53e8485387e8cd59fe20ee | [
"MIT"
] | null | null | null | dfs_search.py | orionoiro/path_searcher | 198888a4570b40812a53e8485387e8cd59fe20ee | [
"MIT"
] | 1 | 2021-06-08T19:43:09.000Z | 2021-06-08T19:43:09.000Z | dfs_search.py | orionoiro/path_searcher | 198888a4570b40812a53e8485387e8cd59fe20ee | [
"MIT"
] | null | null | null | from graph import Digraph, Node, WeightedEdge
def load_map(map_filename):
"""
Parses the map file and constructs a directed graph
Assumes:
Each entry in the map file consists of the following four positive
integers, separated by a blank space:
32 76 54 23
This entry would become an edge from 32 to 76.
Returns:
a Digraph representing the map
"""
g = Digraph()
with open(map_filename, 'r') as file:
read_data = file.read().split('\n')
for elem in read_data:
read_data[read_data.index(elem)] = elem.split(' ')
read_data.remove([''])
for elem in read_data:
start = Node(elem[0])
dest = Node(elem[1])
try:
g.add_node(start)
except ValueError:
pass
try:
g.add_node(dest)
except ValueError:
pass
edge1 = WeightedEdge(start, dest, int(elem[2]), int(elem[3]))
try:
g.add_edge(edge1)
except ValueError:
pass
return g
def get_best_path(digraph, start, end, path, max_dist_outdoors, best_dist,
best_path):
"""
Finds the shortest path between buildings.
Returns:
A tuple with the shortest-path from start to end, represented by
a list of building numbers and the distance of that path.
If there exists no path that satisfies max_total_dist and
max_dist_outdoors constraints, then return None.
"""
start = Node(start)
end = Node(end)
path[0].append(start.get_name())
if start not in digraph.nodes or end not in digraph.nodes:
raise ValueError
elif start == end:
return tuple([path[0].copy(), path[1]])
else:
for edge in digraph.edges[start]:
if edge.get_destination().get_name() not in path[0]:
if len(best_path) == 0 or len(path[0]) < len(best_path):
if path[2] + edge.get_outdoor_distance() <= max_dist_outdoors:
path[1] += edge.get_total_distance()
path[2] += edge.get_outdoor_distance()
next_path = get_best_path(digraph, edge.get_destination(), end, path,
max_dist_outdoors, best_dist, best_path)
path[0].remove(edge.get_destination().get_name())
path[1] -= edge.get_total_distance()
path[2] -= edge.get_outdoor_distance()
else:
continue
if next_path is not None:
if best_dist == 0 or next_path[1] < best_dist:
best_path = next_path[0]
best_dist = next_path[1]
if best_dist == 0:
return None
return tuple([best_path, best_dist])
def directed_dfs(digraph, start, end, max_total_dist, max_dist_outdoors):
"""
Finds the shortest path from start to end using a directed depth-first
search.
Returns:
The shortest-path from start to end, represented by
a list of building numbers (in strings).
If there exists no path that satisfies max_total_dist and
max_dist_outdoors constraints, then raises a ValueError.
"""
search_result = get_best_path(digraph, start, end, [[], 0, 0], max_dist_outdoors, 0, [])
try:
if search_result[-1] <= max_total_dist:
return search_result[0]
else:
raise ValueError
except TypeError:
raise ValueError
| 32.927273 | 93 | 0.570403 | from graph import Digraph, Node, WeightedEdge
def load_map(map_filename):
g = Digraph()
with open(map_filename, 'r') as file:
read_data = file.read().split('\n')
for elem in read_data:
read_data[read_data.index(elem)] = elem.split(' ')
read_data.remove([''])
for elem in read_data:
start = Node(elem[0])
dest = Node(elem[1])
try:
g.add_node(start)
except ValueError:
pass
try:
g.add_node(dest)
except ValueError:
pass
edge1 = WeightedEdge(start, dest, int(elem[2]), int(elem[3]))
try:
g.add_edge(edge1)
except ValueError:
pass
return g
def get_best_path(digraph, start, end, path, max_dist_outdoors, best_dist,
best_path):
start = Node(start)
end = Node(end)
path[0].append(start.get_name())
if start not in digraph.nodes or end not in digraph.nodes:
raise ValueError
elif start == end:
return tuple([path[0].copy(), path[1]])
else:
for edge in digraph.edges[start]:
if edge.get_destination().get_name() not in path[0]:
if len(best_path) == 0 or len(path[0]) < len(best_path):
if path[2] + edge.get_outdoor_distance() <= max_dist_outdoors:
path[1] += edge.get_total_distance()
path[2] += edge.get_outdoor_distance()
next_path = get_best_path(digraph, edge.get_destination(), end, path,
max_dist_outdoors, best_dist, best_path)
path[0].remove(edge.get_destination().get_name())
path[1] -= edge.get_total_distance()
path[2] -= edge.get_outdoor_distance()
else:
continue
if next_path is not None:
if best_dist == 0 or next_path[1] < best_dist:
best_path = next_path[0]
best_dist = next_path[1]
if best_dist == 0:
return None
return tuple([best_path, best_dist])
def directed_dfs(digraph, start, end, max_total_dist, max_dist_outdoors):
search_result = get_best_path(digraph, start, end, [[], 0, 0], max_dist_outdoors, 0, [])
try:
if search_result[-1] <= max_total_dist:
return search_result[0]
else:
raise ValueError
except TypeError:
raise ValueError
| true | true |
f72180e784ecfee3622da10e4ca8c64c9fb89d32 | 3,450 | py | Python | tests/functional/test_cli.py | garnaat/aws-lambda-builders | 0ce436cacb7e5e756c65cb4fa4d78877ada307e5 | [
"Apache-2.0"
] | 2 | 2020-11-12T22:58:17.000Z | 2021-03-22T16:13:34.000Z | tests/functional/test_cli.py | awood45/aws-lambda-builders | 3744cea731403fc5d5aad36c4f60d9512231fd78 | [
"Apache-2.0"
] | null | null | null | tests/functional/test_cli.py | awood45/aws-lambda-builders | 3744cea731403fc5d5aad36c4f60d9512231fd78 | [
"Apache-2.0"
] | null | null | null |
import json
import os
import shutil
import tempfile
import subprocess
import copy
from unittest import TestCase
from parameterized import parameterized
class TestCliWithHelloWorkflow(TestCase):
HELLO_WORKFLOW_MODULE = "hello_workflow.write_hello"
TEST_WORKFLOWS_FOLDER = os.path.join(os.path.dirname(__file__), "testdata", "workflows")
def setUp(self):
self.source_dir = tempfile.mkdtemp()
self.artifacts_dir = tempfile.mkdtemp()
# Capabilities supported by the Hello workflow
self.language = "test"
self.dependency_manager = "test"
self.application_framework = "test"
# The builder should write a file called hello.txt with contents "Hello World"
self.expected_filename = os.path.join(self.artifacts_dir, 'hello.txt')
self.expected_contents = "Hello World"
self.command_name = "lambda-builders-dev" if os.environ.get("LAMBDA_BUILDERS_DEV") else "lambda-builders"
# Make sure the test workflow is in PYTHONPATH to be automatically loaded
self.python_path_list = os.environ.get("PYTHONPATH", '').split(os.pathsep) + [self.TEST_WORKFLOWS_FOLDER]
self.python_path = os.pathsep.join(filter(bool, self.python_path_list))
def tearDown(self):
shutil.rmtree(self.source_dir)
shutil.rmtree(self.artifacts_dir)
@parameterized.expand([
("request_through_stdin"),
("request_through_argument")
])
def test_run_hello_workflow(self, flavor):
request_json = json.dumps({
"jsonschema": "2.0",
"id": 1234,
"method": "LambdaBuilder.build",
"params": {
"capability": {
"language": self.language,
"dependency_manager": self.dependency_manager,
"application_framework": self.application_framework
},
"supported_workflows": [self.HELLO_WORKFLOW_MODULE],
"source_dir": self.source_dir,
"artifacts_dir": self.artifacts_dir,
"scratch_dir": "/ignored",
"manifest_path": "/ignored",
"runtime": "ignored",
"optimizations": {},
"options": {},
}
})
env = copy.deepcopy(os.environ)
env["PYTHONPATH"] = self.python_path
stdout_data = None
if flavor == "request_through_stdin":
p = subprocess.Popen([self.command_name], env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout_data = p.communicate(input=request_json.encode('utf-8'))[0]
elif flavor == "request_through_argument":
p = subprocess.Popen([self.command_name, request_json], env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout_data = p.communicate()[0]
else:
raise ValueError("Invalid test flavor")
# Validate the response object. It should be successful response
response = json.loads(stdout_data)
self.assertNotIn('error', response)
self.assertIn('result', response)
self.assertEquals(response['result']['artifacts_dir'], self.artifacts_dir)
self.assertTrue(os.path.exists(self.expected_filename))
contents = ''
with open(self.expected_filename, 'r') as fp:
contents = fp.read()
self.assertEquals(contents, self.expected_contents)
| 35.9375 | 123 | 0.630145 |
import json
import os
import shutil
import tempfile
import subprocess
import copy
from unittest import TestCase
from parameterized import parameterized
class TestCliWithHelloWorkflow(TestCase):
HELLO_WORKFLOW_MODULE = "hello_workflow.write_hello"
TEST_WORKFLOWS_FOLDER = os.path.join(os.path.dirname(__file__), "testdata", "workflows")
def setUp(self):
self.source_dir = tempfile.mkdtemp()
self.artifacts_dir = tempfile.mkdtemp()
self.language = "test"
self.dependency_manager = "test"
self.application_framework = "test"
self.expected_filename = os.path.join(self.artifacts_dir, 'hello.txt')
self.expected_contents = "Hello World"
self.command_name = "lambda-builders-dev" if os.environ.get("LAMBDA_BUILDERS_DEV") else "lambda-builders"
self.python_path_list = os.environ.get("PYTHONPATH", '').split(os.pathsep) + [self.TEST_WORKFLOWS_FOLDER]
self.python_path = os.pathsep.join(filter(bool, self.python_path_list))
def tearDown(self):
shutil.rmtree(self.source_dir)
shutil.rmtree(self.artifacts_dir)
@parameterized.expand([
("request_through_stdin"),
("request_through_argument")
])
def test_run_hello_workflow(self, flavor):
request_json = json.dumps({
"jsonschema": "2.0",
"id": 1234,
"method": "LambdaBuilder.build",
"params": {
"capability": {
"language": self.language,
"dependency_manager": self.dependency_manager,
"application_framework": self.application_framework
},
"supported_workflows": [self.HELLO_WORKFLOW_MODULE],
"source_dir": self.source_dir,
"artifacts_dir": self.artifacts_dir,
"scratch_dir": "/ignored",
"manifest_path": "/ignored",
"runtime": "ignored",
"optimizations": {},
"options": {},
}
})
env = copy.deepcopy(os.environ)
env["PYTHONPATH"] = self.python_path
stdout_data = None
if flavor == "request_through_stdin":
p = subprocess.Popen([self.command_name], env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout_data = p.communicate(input=request_json.encode('utf-8'))[0]
elif flavor == "request_through_argument":
p = subprocess.Popen([self.command_name, request_json], env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout_data = p.communicate()[0]
else:
raise ValueError("Invalid test flavor")
response = json.loads(stdout_data)
self.assertNotIn('error', response)
self.assertIn('result', response)
self.assertEquals(response['result']['artifacts_dir'], self.artifacts_dir)
self.assertTrue(os.path.exists(self.expected_filename))
contents = ''
with open(self.expected_filename, 'r') as fp:
contents = fp.read()
self.assertEquals(contents, self.expected_contents)
| true | true |
f721837c57c136970d438343cccd809cda08ff22 | 19,515 | py | Python | pype/vendor/capture_gui/accordion.py | kalisp/pype | 28bbffaf2d12ccee48313cd9985e8dfa05e81a5c | [
"MIT"
] | 52 | 2017-03-28T02:44:25.000Z | 2021-08-13T08:32:56.000Z | pype/vendor/capture_gui/accordion.py | kalisp/pype | 28bbffaf2d12ccee48313cd9985e8dfa05e81a5c | [
"MIT"
] | 51 | 2017-04-05T08:27:29.000Z | 2020-05-08T14:40:31.000Z | pype/vendor/capture_gui/accordion.py | kalisp/pype | 28bbffaf2d12ccee48313cd9985e8dfa05e81a5c | [
"MIT"
] | 12 | 2016-09-19T11:55:03.000Z | 2021-10-15T09:21:31.000Z | from .vendor.Qt import QtCore, QtWidgets, QtGui
class AccordionItem(QtWidgets.QGroupBox):
trigger = QtCore.Signal(bool)
def __init__(self, accordion, title, widget):
QtWidgets.QGroupBox.__init__(self, parent=accordion)
# create the layout
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(6, 12, 6, 6)
layout.setSpacing(0)
layout.addWidget(widget)
self._accordianWidget = accordion
self._rolloutStyle = 2
self._dragDropMode = 0
self.setAcceptDrops(True)
self.setLayout(layout)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.showMenu)
# create custom properties
self._widget = widget
self._collapsed = False
self._collapsible = True
self._clicked = False
self._customData = {}
# set common properties
self.setTitle(title)
def accordionWidget(self):
"""
\remarks grabs the parent item for the accordian widget
\return <blurdev.gui.widgets.accordianwidget.AccordianWidget>
"""
return self._accordianWidget
def customData(self, key, default=None):
"""
\remarks return a custom pointer to information stored with this item
\param key <str>
\param default <variant> default value to return if the key was not found
\return <variant> data
"""
return self._customData.get(str(key), default)
def dragEnterEvent(self, event):
if not self._dragDropMode:
return
source = event.source()
if source != self and source.parent() == self.parent() and isinstance(
source, AccordionItem):
event.acceptProposedAction()
def dragDropRect(self):
return QtCore.QRect(25, 7, 10, 6)
def dragDropMode(self):
return self._dragDropMode
def dragMoveEvent(self, event):
if not self._dragDropMode:
return
source = event.source()
if source != self and source.parent() == self.parent() and isinstance(
source, AccordionItem):
event.acceptProposedAction()
def dropEvent(self, event):
widget = event.source()
layout = self.parent().layout()
layout.insertWidget(layout.indexOf(self), widget)
self._accordianWidget.emitItemsReordered()
def expandCollapseRect(self):
return QtCore.QRect(0, 0, self.width(), 20)
def enterEvent(self, event):
self.accordionWidget().leaveEvent(event)
event.accept()
def leaveEvent(self, event):
self.accordionWidget().enterEvent(event)
event.accept()
def mouseReleaseEvent(self, event):
if self._clicked and self.expandCollapseRect().contains(event.pos()):
self.toggleCollapsed()
event.accept()
else:
event.ignore()
self._clicked = False
def mouseMoveEvent(self, event):
event.ignore()
def mousePressEvent(self, event):
# handle an internal move
# start a drag event
if event.button() == QtCore.Qt.LeftButton and self.dragDropRect().contains(
event.pos()):
# create the pixmap
pixmap = QtGui.QPixmap.grabWidget(self, self.rect())
# create the mimedata
mimeData = QtCore.QMimeData()
mimeData.setText('ItemTitle::%s' % (self.title()))
# create the drag
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
drag.setPixmap(pixmap)
drag.setHotSpot(event.pos())
if not drag.exec_():
self._accordianWidget.emitItemDragFailed(self)
event.accept()
# determine if the expand/collapse should occur
elif event.button() == QtCore.Qt.LeftButton and self.expandCollapseRect().contains(
event.pos()):
self._clicked = True
event.accept()
else:
event.ignore()
def isCollapsed(self):
return self._collapsed
def isCollapsible(self):
return self._collapsible
def __drawTriangle(self, painter, x, y):
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 160),
QtCore.Qt.SolidPattern)
if not self.isCollapsed():
tl, tr, tp = QtCore.QPoint(x + 9, y + 8), QtCore.QPoint(x + 19,
y + 8), QtCore.QPoint(
x + 14, y + 13.0)
points = [tl, tr, tp]
triangle = QtGui.QPolygon(points)
else:
tl, tr, tp = QtCore.QPoint(x + 11, y + 6), QtCore.QPoint(x + 16,
y + 11), QtCore.QPoint(
x + 11, y + 16.0)
points = [tl, tr, tp]
triangle = QtGui.QPolygon(points)
currentBrush = painter.brush()
painter.setBrush(brush)
painter.drawPolygon(triangle)
painter.setBrush(currentBrush)
def paintEvent(self, event):
painter = QtGui.QPainter()
painter.begin(self)
painter.setRenderHint(painter.Antialiasing)
font = painter.font()
font.setBold(True)
painter.setFont(font)
x = self.rect().x()
y = self.rect().y()
w = self.rect().width() - 1
h = self.rect().height() - 1
r = 8
# draw a rounded style
if self._rolloutStyle == 2:
# draw the text
painter.drawText(x + 33, y + 3, w, 16,
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop,
self.title())
# draw the triangle
self.__drawTriangle(painter, x, y)
# draw the borders
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.6)
painter.setPen(pen)
painter.drawRoundedRect(x + 1, y + 1, w - 1, h - 1, r, r)
pen.setColor(self.palette().color(QtGui.QPalette.Shadow))
painter.setPen(pen)
painter.drawRoundedRect(x, y, w - 1, h - 1, r, r)
# draw a square style
if self._rolloutStyle == 3:
# draw the text
painter.drawText(x + 33, y + 3, w, 16,
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop,
self.title())
self.__drawTriangle(painter, x, y)
# draw the borders
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.6)
painter.setPen(pen)
painter.drawRect(x + 1, y + 1, w - 1, h - 1)
pen.setColor(self.palette().color(QtGui.QPalette.Shadow))
painter.setPen(pen)
painter.drawRect(x, y, w - 1, h - 1)
# draw a Maya style
if self._rolloutStyle == 4:
# draw the text
painter.drawText(x + 33, y + 3, w, 16,
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop,
self.title())
painter.setRenderHint(QtGui.QPainter.Antialiasing, False)
self.__drawTriangle(painter, x, y)
# draw the borders - top
headerHeight = 20
headerRect = QtCore.QRect(x + 1, y + 1, w - 1, headerHeight)
headerRectShadow = QtCore.QRect(x - 1, y - 1, w + 1,
headerHeight + 2)
# Highlight
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.4)
painter.setPen(pen)
painter.drawRect(headerRect)
painter.fillRect(headerRect, QtGui.QColor(255, 255, 255, 18))
# Shadow
pen.setColor(self.palette().color(QtGui.QPalette.Dark))
painter.setPen(pen)
painter.drawRect(headerRectShadow)
if not self.isCollapsed():
# draw the lover border
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Dark))
pen.setWidthF(0.8)
painter.setPen(pen)
offSet = headerHeight + 3
bodyRect = QtCore.QRect(x, y + offSet, w, h - offSet)
bodyRectShadow = QtCore.QRect(x + 1, y + offSet, w + 1,
h - offSet + 1)
painter.drawRect(bodyRect)
pen.setColor(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.4)
painter.setPen(pen)
painter.drawRect(bodyRectShadow)
# draw a boxed style
elif self._rolloutStyle == 1:
if self.isCollapsed():
arect = QtCore.QRect(x + 1, y + 9, w - 1, 4)
brect = QtCore.QRect(x, y + 8, w - 1, 4)
text = '+'
else:
arect = QtCore.QRect(x + 1, y + 9, w - 1, h - 9)
brect = QtCore.QRect(x, y + 8, w - 1, h - 9)
text = '-'
# draw the borders
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.6)
painter.setPen(pen)
painter.drawRect(arect)
pen.setColor(self.palette().color(QtGui.QPalette.Shadow))
painter.setPen(pen)
painter.drawRect(brect)
painter.setRenderHint(painter.Antialiasing, False)
painter.setBrush(
self.palette().color(QtGui.QPalette.Window).darker(120))
painter.drawRect(x + 10, y + 1, w - 20, 16)
painter.drawText(x + 16, y + 1,
w - 32, 16,
QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter,
text)
painter.drawText(x + 10, y + 1,
w - 20, 16,
QtCore.Qt.AlignCenter,
self.title())
if self.dragDropMode():
rect = self.dragDropRect()
# draw the lines
l = rect.left()
r = rect.right()
cy = rect.center().y()
for y in (cy - 3, cy, cy + 3):
painter.drawLine(l, y, r, y)
painter.end()
def setCollapsed(self, state=True):
if self.isCollapsible():
accord = self.accordionWidget()
accord.setUpdatesEnabled(False)
self._collapsed = state
if state:
self.setMinimumHeight(22)
self.setMaximumHeight(22)
self.widget().setVisible(False)
else:
self.setMinimumHeight(0)
self.setMaximumHeight(1000000)
self.widget().setVisible(True)
self._accordianWidget.emitItemCollapsed(self)
accord.setUpdatesEnabled(True)
def setCollapsible(self, state=True):
self._collapsible = state
def setCustomData(self, key, value):
"""
\remarks set a custom pointer to information stored on this item
\param key <str>
\param value <variant>
"""
self._customData[str(key)] = value
def setDragDropMode(self, mode):
self._dragDropMode = mode
def setRolloutStyle(self, style):
self._rolloutStyle = style
def showMenu(self):
if QtCore.QRect(0, 0, self.width(), 20).contains(
self.mapFromGlobal(QtGui.QCursor.pos())):
self._accordianWidget.emitItemMenuRequested(self)
def rolloutStyle(self):
return self._rolloutStyle
def toggleCollapsed(self):
# enable signaling here
collapse_state = not self.isCollapsed()
self.setCollapsed(collapse_state)
return collapse_state
def widget(self):
return self._widget
class AccordionWidget(QtWidgets.QScrollArea):
"""Accordion style widget.
A collapsible accordion widget like Maya's attribute editor.
This is a modified version bsed on Blur's Accordion Widget to
include a Maya style.
"""
itemCollapsed = QtCore.Signal(AccordionItem)
itemMenuRequested = QtCore.Signal(AccordionItem)
itemDragFailed = QtCore.Signal(AccordionItem)
itemsReordered = QtCore.Signal()
Boxed = 1
Rounded = 2
Square = 3
Maya = 4
NoDragDrop = 0
InternalMove = 1
def __init__(self, parent):
QtWidgets.QScrollArea.__init__(self, parent)
self.setFrameShape(QtWidgets.QScrollArea.NoFrame)
self.setAutoFillBackground(False)
self.setWidgetResizable(True)
self.setMouseTracking(True)
self.verticalScrollBar().setMaximumWidth(10)
widget = QtWidgets.QWidget(self)
# define custom properties
self._rolloutStyle = AccordionWidget.Rounded
self._dragDropMode = AccordionWidget.NoDragDrop
self._scrolling = False
self._scrollInitY = 0
self._scrollInitVal = 0
self._itemClass = AccordionItem
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(2, 2, 2, 6)
layout.setSpacing(2)
layout.addStretch(1)
widget.setLayout(layout)
self.setWidget(widget)
def setSpacing(self, spaceInt):
self.widget().layout().setSpacing(spaceInt)
def addItem(self, title, widget, collapsed=False):
self.setUpdatesEnabled(False)
item = self._itemClass(self, title, widget)
item.setRolloutStyle(self.rolloutStyle())
item.setDragDropMode(self.dragDropMode())
layout = self.widget().layout()
layout.insertWidget(layout.count() - 1, item)
layout.setStretchFactor(item, 0)
if collapsed:
item.setCollapsed(collapsed)
self.setUpdatesEnabled(True)
return item
def clear(self):
self.setUpdatesEnabled(False)
layout = self.widget().layout()
while layout.count() > 1:
item = layout.itemAt(0)
# remove the item from the layout
w = item.widget()
layout.removeItem(item)
# close the widget and delete it
w.close()
w.deleteLater()
self.setUpdatesEnabled(True)
def eventFilter(self, object, event):
if event.type() == QtCore.QEvent.MouseButtonPress:
self.mousePressEvent(event)
return True
elif event.type() == QtCore.QEvent.MouseMove:
self.mouseMoveEvent(event)
return True
elif event.type() == QtCore.QEvent.MouseButtonRelease:
self.mouseReleaseEvent(event)
return True
return False
def canScroll(self):
return self.verticalScrollBar().maximum() > 0
def count(self):
return self.widget().layout().count() - 1
def dragDropMode(self):
return self._dragDropMode
def indexOf(self, widget):
"""
\remarks Searches for widget(not including child layouts).
Returns the index of widget, or -1 if widget is not found
\return <int>
"""
layout = self.widget().layout()
for index in range(layout.count()):
if layout.itemAt(index).widget().widget() == widget:
return index
return -1
def isBoxedMode(self):
return self._rolloutStyle == AccordionWidget.Maya
def itemClass(self):
return self._itemClass
def itemAt(self, index):
layout = self.widget().layout()
if 0 <= index and index < layout.count() - 1:
return layout.itemAt(index).widget()
return None
def emitItemCollapsed(self, item):
if not self.signalsBlocked():
self.itemCollapsed.emit(item)
def emitItemDragFailed(self, item):
if not self.signalsBlocked():
self.itemDragFailed.emit(item)
def emitItemMenuRequested(self, item):
if not self.signalsBlocked():
self.itemMenuRequested.emit(item)
def emitItemsReordered(self):
if not self.signalsBlocked():
self.itemsReordered.emit()
def enterEvent(self, event):
if self.canScroll():
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.OpenHandCursor)
def leaveEvent(self, event):
if self.canScroll():
QtWidgets.QApplication.restoreOverrideCursor()
def mouseMoveEvent(self, event):
if self._scrolling:
sbar = self.verticalScrollBar()
smax = sbar.maximum()
# calculate the distance moved for the moust point
dy = event.globalY() - self._scrollInitY
# calculate the percentage that is of the scroll bar
dval = smax * (dy / float(sbar.height()))
# calculate the new value
sbar.setValue(self._scrollInitVal - dval)
event.accept()
def mousePressEvent(self, event):
# handle a scroll event
if event.button() == QtCore.Qt.LeftButton and self.canScroll():
self._scrolling = True
self._scrollInitY = event.globalY()
self._scrollInitVal = self.verticalScrollBar().value()
QtWidgets.QApplication.setOverrideCursor(
QtCore.Qt.ClosedHandCursor)
event.accept()
def mouseReleaseEvent(self, event):
if self._scrolling:
QtWidgets.QApplication.restoreOverrideCursor()
self._scrolling = False
self._scrollInitY = 0
self._scrollInitVal = 0
event.accept()
def moveItemDown(self, index):
layout = self.widget().layout()
if (layout.count() - 1) > (index + 1):
widget = layout.takeAt(index).widget()
layout.insertWidget(index + 1, widget)
def moveItemUp(self, index):
if index > 0:
layout = self.widget().layout()
widget = layout.takeAt(index).widget()
layout.insertWidget(index - 1, widget)
def setBoxedMode(self, state):
if state:
self._rolloutStyle = AccordionWidget.Boxed
else:
self._rolloutStyle = AccordionWidget.Rounded
def setDragDropMode(self, dragDropMode):
self._dragDropMode = dragDropMode
for item in self.findChildren(AccordionItem):
item.setDragDropMode(self._dragDropMode)
def setItemClass(self, itemClass):
self._itemClass = itemClass
def setRolloutStyle(self, rolloutStyle):
self._rolloutStyle = rolloutStyle
for item in self.findChildren(AccordionItem):
item.setRolloutStyle(self._rolloutStyle)
def rolloutStyle(self):
return self._rolloutStyle
def takeAt(self, index):
self.setUpdatesEnabled(False)
layout = self.widget().layout()
widget = None
if 0 <= index and index < layout.count() - 1:
item = layout.itemAt(index)
widget = item.widget()
layout.removeItem(item)
widget.close()
self.setUpdatesEnabled(True)
return widget
def widgetAt(self, index):
item = self.itemAt(index)
if item:
return item.widget()
return None
pyBoxedMode = QtCore.Property('bool', isBoxedMode, setBoxedMode)
| 31.224 | 92 | 0.564386 | from .vendor.Qt import QtCore, QtWidgets, QtGui
class AccordionItem(QtWidgets.QGroupBox):
trigger = QtCore.Signal(bool)
def __init__(self, accordion, title, widget):
QtWidgets.QGroupBox.__init__(self, parent=accordion)
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(6, 12, 6, 6)
layout.setSpacing(0)
layout.addWidget(widget)
self._accordianWidget = accordion
self._rolloutStyle = 2
self._dragDropMode = 0
self.setAcceptDrops(True)
self.setLayout(layout)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.showMenu)
self._widget = widget
self._collapsed = False
self._collapsible = True
self._clicked = False
self._customData = {}
self.setTitle(title)
def accordionWidget(self):
return self._accordianWidget
def customData(self, key, default=None):
return self._customData.get(str(key), default)
def dragEnterEvent(self, event):
if not self._dragDropMode:
return
source = event.source()
if source != self and source.parent() == self.parent() and isinstance(
source, AccordionItem):
event.acceptProposedAction()
def dragDropRect(self):
return QtCore.QRect(25, 7, 10, 6)
def dragDropMode(self):
return self._dragDropMode
def dragMoveEvent(self, event):
if not self._dragDropMode:
return
source = event.source()
if source != self and source.parent() == self.parent() and isinstance(
source, AccordionItem):
event.acceptProposedAction()
def dropEvent(self, event):
widget = event.source()
layout = self.parent().layout()
layout.insertWidget(layout.indexOf(self), widget)
self._accordianWidget.emitItemsReordered()
def expandCollapseRect(self):
return QtCore.QRect(0, 0, self.width(), 20)
def enterEvent(self, event):
self.accordionWidget().leaveEvent(event)
event.accept()
def leaveEvent(self, event):
self.accordionWidget().enterEvent(event)
event.accept()
def mouseReleaseEvent(self, event):
if self._clicked and self.expandCollapseRect().contains(event.pos()):
self.toggleCollapsed()
event.accept()
else:
event.ignore()
self._clicked = False
def mouseMoveEvent(self, event):
event.ignore()
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton and self.dragDropRect().contains(
event.pos()):
pixmap = QtGui.QPixmap.grabWidget(self, self.rect())
mimeData = QtCore.QMimeData()
mimeData.setText('ItemTitle::%s' % (self.title()))
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
drag.setPixmap(pixmap)
drag.setHotSpot(event.pos())
if not drag.exec_():
self._accordianWidget.emitItemDragFailed(self)
event.accept()
elif event.button() == QtCore.Qt.LeftButton and self.expandCollapseRect().contains(
event.pos()):
self._clicked = True
event.accept()
else:
event.ignore()
def isCollapsed(self):
return self._collapsed
def isCollapsible(self):
return self._collapsible
def __drawTriangle(self, painter, x, y):
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 160),
QtCore.Qt.SolidPattern)
if not self.isCollapsed():
tl, tr, tp = QtCore.QPoint(x + 9, y + 8), QtCore.QPoint(x + 19,
y + 8), QtCore.QPoint(
x + 14, y + 13.0)
points = [tl, tr, tp]
triangle = QtGui.QPolygon(points)
else:
tl, tr, tp = QtCore.QPoint(x + 11, y + 6), QtCore.QPoint(x + 16,
y + 11), QtCore.QPoint(
x + 11, y + 16.0)
points = [tl, tr, tp]
triangle = QtGui.QPolygon(points)
currentBrush = painter.brush()
painter.setBrush(brush)
painter.drawPolygon(triangle)
painter.setBrush(currentBrush)
def paintEvent(self, event):
painter = QtGui.QPainter()
painter.begin(self)
painter.setRenderHint(painter.Antialiasing)
font = painter.font()
font.setBold(True)
painter.setFont(font)
x = self.rect().x()
y = self.rect().y()
w = self.rect().width() - 1
h = self.rect().height() - 1
r = 8
if self._rolloutStyle == 2:
painter.drawText(x + 33, y + 3, w, 16,
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop,
self.title())
self.__drawTriangle(painter, x, y)
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.6)
painter.setPen(pen)
painter.drawRoundedRect(x + 1, y + 1, w - 1, h - 1, r, r)
pen.setColor(self.palette().color(QtGui.QPalette.Shadow))
painter.setPen(pen)
painter.drawRoundedRect(x, y, w - 1, h - 1, r, r)
if self._rolloutStyle == 3:
painter.drawText(x + 33, y + 3, w, 16,
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop,
self.title())
self.__drawTriangle(painter, x, y)
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.6)
painter.setPen(pen)
painter.drawRect(x + 1, y + 1, w - 1, h - 1)
pen.setColor(self.palette().color(QtGui.QPalette.Shadow))
painter.setPen(pen)
painter.drawRect(x, y, w - 1, h - 1)
if self._rolloutStyle == 4:
painter.drawText(x + 33, y + 3, w, 16,
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop,
self.title())
painter.setRenderHint(QtGui.QPainter.Antialiasing, False)
self.__drawTriangle(painter, x, y)
headerHeight = 20
headerRect = QtCore.QRect(x + 1, y + 1, w - 1, headerHeight)
headerRectShadow = QtCore.QRect(x - 1, y - 1, w + 1,
headerHeight + 2)
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.4)
painter.setPen(pen)
painter.drawRect(headerRect)
painter.fillRect(headerRect, QtGui.QColor(255, 255, 255, 18))
pen.setColor(self.palette().color(QtGui.QPalette.Dark))
painter.setPen(pen)
painter.drawRect(headerRectShadow)
if not self.isCollapsed():
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Dark))
pen.setWidthF(0.8)
painter.setPen(pen)
offSet = headerHeight + 3
bodyRect = QtCore.QRect(x, y + offSet, w, h - offSet)
bodyRectShadow = QtCore.QRect(x + 1, y + offSet, w + 1,
h - offSet + 1)
painter.drawRect(bodyRect)
pen.setColor(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.4)
painter.setPen(pen)
painter.drawRect(bodyRectShadow)
elif self._rolloutStyle == 1:
if self.isCollapsed():
arect = QtCore.QRect(x + 1, y + 9, w - 1, 4)
brect = QtCore.QRect(x, y + 8, w - 1, 4)
text = '+'
else:
arect = QtCore.QRect(x + 1, y + 9, w - 1, h - 9)
brect = QtCore.QRect(x, y + 8, w - 1, h - 9)
text = '-'
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.6)
painter.setPen(pen)
painter.drawRect(arect)
pen.setColor(self.palette().color(QtGui.QPalette.Shadow))
painter.setPen(pen)
painter.drawRect(brect)
painter.setRenderHint(painter.Antialiasing, False)
painter.setBrush(
self.palette().color(QtGui.QPalette.Window).darker(120))
painter.drawRect(x + 10, y + 1, w - 20, 16)
painter.drawText(x + 16, y + 1,
w - 32, 16,
QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter,
text)
painter.drawText(x + 10, y + 1,
w - 20, 16,
QtCore.Qt.AlignCenter,
self.title())
if self.dragDropMode():
rect = self.dragDropRect()
l = rect.left()
r = rect.right()
cy = rect.center().y()
for y in (cy - 3, cy, cy + 3):
painter.drawLine(l, y, r, y)
painter.end()
def setCollapsed(self, state=True):
if self.isCollapsible():
accord = self.accordionWidget()
accord.setUpdatesEnabled(False)
self._collapsed = state
if state:
self.setMinimumHeight(22)
self.setMaximumHeight(22)
self.widget().setVisible(False)
else:
self.setMinimumHeight(0)
self.setMaximumHeight(1000000)
self.widget().setVisible(True)
self._accordianWidget.emitItemCollapsed(self)
accord.setUpdatesEnabled(True)
def setCollapsible(self, state=True):
self._collapsible = state
def setCustomData(self, key, value):
self._customData[str(key)] = value
def setDragDropMode(self, mode):
self._dragDropMode = mode
def setRolloutStyle(self, style):
self._rolloutStyle = style
def showMenu(self):
if QtCore.QRect(0, 0, self.width(), 20).contains(
self.mapFromGlobal(QtGui.QCursor.pos())):
self._accordianWidget.emitItemMenuRequested(self)
def rolloutStyle(self):
return self._rolloutStyle
def toggleCollapsed(self):
collapse_state = not self.isCollapsed()
self.setCollapsed(collapse_state)
return collapse_state
def widget(self):
return self._widget
class AccordionWidget(QtWidgets.QScrollArea):
itemCollapsed = QtCore.Signal(AccordionItem)
itemMenuRequested = QtCore.Signal(AccordionItem)
itemDragFailed = QtCore.Signal(AccordionItem)
itemsReordered = QtCore.Signal()
Boxed = 1
Rounded = 2
Square = 3
Maya = 4
NoDragDrop = 0
InternalMove = 1
def __init__(self, parent):
QtWidgets.QScrollArea.__init__(self, parent)
self.setFrameShape(QtWidgets.QScrollArea.NoFrame)
self.setAutoFillBackground(False)
self.setWidgetResizable(True)
self.setMouseTracking(True)
self.verticalScrollBar().setMaximumWidth(10)
widget = QtWidgets.QWidget(self)
self._rolloutStyle = AccordionWidget.Rounded
self._dragDropMode = AccordionWidget.NoDragDrop
self._scrolling = False
self._scrollInitY = 0
self._scrollInitVal = 0
self._itemClass = AccordionItem
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(2, 2, 2, 6)
layout.setSpacing(2)
layout.addStretch(1)
widget.setLayout(layout)
self.setWidget(widget)
def setSpacing(self, spaceInt):
self.widget().layout().setSpacing(spaceInt)
def addItem(self, title, widget, collapsed=False):
self.setUpdatesEnabled(False)
item = self._itemClass(self, title, widget)
item.setRolloutStyle(self.rolloutStyle())
item.setDragDropMode(self.dragDropMode())
layout = self.widget().layout()
layout.insertWidget(layout.count() - 1, item)
layout.setStretchFactor(item, 0)
if collapsed:
item.setCollapsed(collapsed)
self.setUpdatesEnabled(True)
return item
def clear(self):
self.setUpdatesEnabled(False)
layout = self.widget().layout()
while layout.count() > 1:
item = layout.itemAt(0)
w = item.widget()
layout.removeItem(item)
w.close()
w.deleteLater()
self.setUpdatesEnabled(True)
def eventFilter(self, object, event):
if event.type() == QtCore.QEvent.MouseButtonPress:
self.mousePressEvent(event)
return True
elif event.type() == QtCore.QEvent.MouseMove:
self.mouseMoveEvent(event)
return True
elif event.type() == QtCore.QEvent.MouseButtonRelease:
self.mouseReleaseEvent(event)
return True
return False
def canScroll(self):
return self.verticalScrollBar().maximum() > 0
def count(self):
return self.widget().layout().count() - 1
def dragDropMode(self):
return self._dragDropMode
def indexOf(self, widget):
layout = self.widget().layout()
for index in range(layout.count()):
if layout.itemAt(index).widget().widget() == widget:
return index
return -1
def isBoxedMode(self):
return self._rolloutStyle == AccordionWidget.Maya
def itemClass(self):
return self._itemClass
def itemAt(self, index):
layout = self.widget().layout()
if 0 <= index and index < layout.count() - 1:
return layout.itemAt(index).widget()
return None
def emitItemCollapsed(self, item):
if not self.signalsBlocked():
self.itemCollapsed.emit(item)
def emitItemDragFailed(self, item):
if not self.signalsBlocked():
self.itemDragFailed.emit(item)
def emitItemMenuRequested(self, item):
if not self.signalsBlocked():
self.itemMenuRequested.emit(item)
def emitItemsReordered(self):
if not self.signalsBlocked():
self.itemsReordered.emit()
def enterEvent(self, event):
if self.canScroll():
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.OpenHandCursor)
def leaveEvent(self, event):
if self.canScroll():
QtWidgets.QApplication.restoreOverrideCursor()
def mouseMoveEvent(self, event):
if self._scrolling:
sbar = self.verticalScrollBar()
smax = sbar.maximum()
dy = event.globalY() - self._scrollInitY
dval = smax * (dy / float(sbar.height()))
sbar.setValue(self._scrollInitVal - dval)
event.accept()
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton and self.canScroll():
self._scrolling = True
self._scrollInitY = event.globalY()
self._scrollInitVal = self.verticalScrollBar().value()
QtWidgets.QApplication.setOverrideCursor(
QtCore.Qt.ClosedHandCursor)
event.accept()
def mouseReleaseEvent(self, event):
if self._scrolling:
QtWidgets.QApplication.restoreOverrideCursor()
self._scrolling = False
self._scrollInitY = 0
self._scrollInitVal = 0
event.accept()
def moveItemDown(self, index):
layout = self.widget().layout()
if (layout.count() - 1) > (index + 1):
widget = layout.takeAt(index).widget()
layout.insertWidget(index + 1, widget)
def moveItemUp(self, index):
if index > 0:
layout = self.widget().layout()
widget = layout.takeAt(index).widget()
layout.insertWidget(index - 1, widget)
def setBoxedMode(self, state):
if state:
self._rolloutStyle = AccordionWidget.Boxed
else:
self._rolloutStyle = AccordionWidget.Rounded
def setDragDropMode(self, dragDropMode):
self._dragDropMode = dragDropMode
for item in self.findChildren(AccordionItem):
item.setDragDropMode(self._dragDropMode)
def setItemClass(self, itemClass):
self._itemClass = itemClass
def setRolloutStyle(self, rolloutStyle):
self._rolloutStyle = rolloutStyle
for item in self.findChildren(AccordionItem):
item.setRolloutStyle(self._rolloutStyle)
def rolloutStyle(self):
return self._rolloutStyle
def takeAt(self, index):
self.setUpdatesEnabled(False)
layout = self.widget().layout()
widget = None
if 0 <= index and index < layout.count() - 1:
item = layout.itemAt(index)
widget = item.widget()
layout.removeItem(item)
widget.close()
self.setUpdatesEnabled(True)
return widget
def widgetAt(self, index):
item = self.itemAt(index)
if item:
return item.widget()
return None
pyBoxedMode = QtCore.Property('bool', isBoxedMode, setBoxedMode)
| true | true |
f721842d767265f7f548ee0d34b73c892bd60f1b | 183 | py | Python | pystrometry/example_subpkg/setup_package.py | Johannes-Sahlmann/pystrometry | 79dc67369be2ce46ddb0ebc73e5fe3570d20c025 | [
"BSD-3-Clause"
] | 9 | 2019-12-06T13:12:33.000Z | 2021-10-05T12:47:15.000Z | pystrometry/example_subpkg/setup_package.py | Johannes-Sahlmann/pystrometry | 79dc67369be2ce46ddb0ebc73e5fe3570d20c025 | [
"BSD-3-Clause"
] | 2 | 2019-11-28T17:20:27.000Z | 2019-12-09T18:44:35.000Z | pystrometry/example_subpkg/setup_package.py | Johannes-Sahlmann/pystrometry | 79dc67369be2ce46ddb0ebc73e5fe3570d20c025 | [
"BSD-3-Clause"
] | 3 | 2019-11-28T17:04:22.000Z | 2021-10-19T13:12:34.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import
def get_package_data():
return {'pystrometry.example_subpkg': ['data/*']}
| 26.142857 | 63 | 0.754098 |
from __future__ import absolute_import
def get_package_data():
return {'pystrometry.example_subpkg': ['data/*']}
| true | true |
f721854d6db9efb92a7df07e88cf428c0d746223 | 3,699 | py | Python | muti/glmu.py | invertedv/utilities | 42c331893b1beee73b2d21df6cb2bad73b872bb7 | [
"MIT"
] | null | null | null | muti/glmu.py | invertedv/utilities | 42c331893b1beee73b2d21df6cb2bad73b872bb7 | [
"MIT"
] | null | null | null | muti/glmu.py | invertedv/utilities | 42c331893b1beee73b2d21df6cb2bad73b872bb7 | [
"MIT"
] | null | null | null | from muti import genu
import clickhouse_driver
import pandas as pd
from modeling.glm import glm
import numpy as np
import math
def build_model_formula(features_dict: dict, target: str):
"""
Builds the model formula for glm from modeling based on the features_dict specification.
Does not included embedded features
:param features_dict: features dictionary
:param target: dependent variable
:return: model formula
:rtype str
"""
ms = target + '~'
extra = ''
for feature in features_dict:
if features_dict[feature][0] == 'cts':
ms += extra + feature
elif features_dict[feature][0] == 'spl':
ms += extra + 'h(' + feature + ',' + features_dict[feature][1] + ',0)'
elif features_dict[feature][0] == 'cat':
ms += extra + 'c(' + feature + ',' + features_dict[feature][2] + ')'
extra = ' + '
return ms
def incr_build(model: str, target_var: str, start_list: list, add_list: list, get_data_fn, sample_size: int,
client: clickhouse_driver.Client, global_valid_df_in: pd.DataFrame, family='normal'):
"""
This function builds a sequence of GLM models. The get_data_fn takes a list of values as contained in
start_list and add_list and returns data subset to those values. The initial model is built on the
values of start_list and then evaluated on the data subset to the first value of add_list.
At the next step, the data in the first element of add_list is added to the start_list data, the model
is updated and the evaluation is conducted on the second element of add_list.
This function is the GLM counterpart to incr_build
:param model: model specification for glm
:param target_var: response variable we're modeling
:param start_list: list of (general) time periods for model build for the first model build
:param add_list: list of out-of-time periods to evaluate
:param get_data_fn: function to get a pandas DataFrame of data to work on
:param sample_size: size of pandas DataFrames to get
:param client: db connector
:param family: family of the model ('normal' or 'binomial')
:param global_valid_df_in: pandas DataFrame covering all the values of add_list for validation
:return: lists of out-of-sample values:
add_list
rmse root mean squared error
corr correlation
"""
build_list = start_list
global_valid_df = global_valid_df_in.copy()
global_valid_df['model_glm_inc'] = np.full((global_valid_df.shape[0]), 0.0)
rmse_valid = []
corr_valid = []
segs = []
for j, valid in enumerate(add_list):
segs += [valid]
model_df = get_data_fn(build_list, sample_size, client)
valid_df = get_data_fn([valid], sample_size, client)
print('Data sizes for out-of-sample value {0}: build {1}, validate {2}'.format(valid, model_df.shape[0],
valid_df.shape[0]))
# print('Build list: {0}'.format(build_list))
glm_model = glm(model, model_df, family=family)
build_list += [valid]
gyh = glm_model.predict(global_valid_df)
i = global_valid_df['vintage'] == valid
global_valid_df.loc[i, 'model_glm_inc'] = gyh[i]
yh = glm_model.predict(valid_df)
res = valid_df[target_var] - np.array(yh).flatten()
rmse_valid += [math.sqrt(np.square(res).mean())]
valid_df['yh'] = yh
cor = genu.r_square(valid_df['yh'], valid_df[target_var])
corr_valid += [cor]
return segs, rmse_valid, corr_valid, global_valid_df
| 41.561798 | 112 | 0.651798 | from muti import genu
import clickhouse_driver
import pandas as pd
from modeling.glm import glm
import numpy as np
import math
def build_model_formula(features_dict: dict, target: str):
ms = target + '~'
extra = ''
for feature in features_dict:
if features_dict[feature][0] == 'cts':
ms += extra + feature
elif features_dict[feature][0] == 'spl':
ms += extra + 'h(' + feature + ',' + features_dict[feature][1] + ',0)'
elif features_dict[feature][0] == 'cat':
ms += extra + 'c(' + feature + ',' + features_dict[feature][2] + ')'
extra = ' + '
return ms
def incr_build(model: str, target_var: str, start_list: list, add_list: list, get_data_fn, sample_size: int,
client: clickhouse_driver.Client, global_valid_df_in: pd.DataFrame, family='normal'):
build_list = start_list
global_valid_df = global_valid_df_in.copy()
global_valid_df['model_glm_inc'] = np.full((global_valid_df.shape[0]), 0.0)
rmse_valid = []
corr_valid = []
segs = []
for j, valid in enumerate(add_list):
segs += [valid]
model_df = get_data_fn(build_list, sample_size, client)
valid_df = get_data_fn([valid], sample_size, client)
print('Data sizes for out-of-sample value {0}: build {1}, validate {2}'.format(valid, model_df.shape[0],
valid_df.shape[0]))
glm_model = glm(model, model_df, family=family)
build_list += [valid]
gyh = glm_model.predict(global_valid_df)
i = global_valid_df['vintage'] == valid
global_valid_df.loc[i, 'model_glm_inc'] = gyh[i]
yh = glm_model.predict(valid_df)
res = valid_df[target_var] - np.array(yh).flatten()
rmse_valid += [math.sqrt(np.square(res).mean())]
valid_df['yh'] = yh
cor = genu.r_square(valid_df['yh'], valid_df[target_var])
corr_valid += [cor]
return segs, rmse_valid, corr_valid, global_valid_df
| true | true |
f7218599cb5a20deb178638895ef1d333f863936 | 4,015 | py | Python | scripts/fastRequests.py | Hitoshirenu/muchspace | e3db813b148941d6caf6e3b13e82c0fc48f454bf | [
"MIT"
] | null | null | null | scripts/fastRequests.py | Hitoshirenu/muchspace | e3db813b148941d6caf6e3b13e82c0fc48f454bf | [
"MIT"
] | null | null | null | scripts/fastRequests.py | Hitoshirenu/muchspace | e3db813b148941d6caf6e3b13e82c0fc48f454bf | [
"MIT"
] | null | null | null | # import threading
from pathlib import Path
from multiprocessing.dummy import Pool as ThreadPool
from more_itertools import unique_everseen
import requests, json, datetime
from scripts.byteSize import human_byte_size
# Initialization
Total_Size = 0
Processed_URLs = 0
Progress = 0
Total_URLs = 0
Rate = 0
Report = False
ReportJson = []
""" Main fuction to gather info about URL """
def url_info(URL):
linkStatus = {}
global Total_Size, Processed_URLs, Progress, Total_URLs, Rate, Report
if URL not in [' ','']: # Ignoring any whitespaces within the list
try:
File_Size = 0 # Initialize
fileLink = requests.head(URL, stream=True) # Get the link header info
fileLink.raise_for_status() # To catch 404 and 500 earlier
# Why i use get instead of head, Source: https://stackoverflow.com/questions/14270698/get-file-size-using-python-requests-while-only-getting-the-header
HEAD = requests.get(URL, stream=True).headers # Invoked if 400 series
File_Size = int(HEAD['Content-length']) # Get only the headers not the entire content
Progress += Rate
Processed_URLs = Processed_URLs + 1
Total_Size += File_Size
print('URLs Done:{0}/{1} File Size:{2} Total Size:{3} Progress:{4:.2f}%'.format(Processed_URLs, Total_URLs, human_byte_size(File_Size), human_byte_size(Total_Size), Progress))
except requests.exceptions.HTTPError as errh:
print ("Http Error:",errh)
except requests.exceptions.ConnectionError as errc:
print ("Error Connecting:",errc)
except requests.exceptions.Timeout as errt:
print ("Timeout Error:",errt)
except requests.exceptions.RequestException as err:
print ("Oops: Something Else",err)
if Report is True:
linkStatus['link'] = URL
linkStatus['size'] = human_byte_size(File_Size)
linkStatus['status'] = fileLink.status_code
linkStatus['last-checked'] = datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S")
ReportJson.append(linkStatus)
def thread_series_creator(List_Of_URLs):
global Total_Size, Processed_URLs, Progress, Total_URLs, Rate, Report
# Make the Pool of workers
pool = ThreadPool(100)
# Open the urls in their own threads and return the results
results = pool.map(url_info, List_Of_URLs)
# close the pool and wait for the work to finish
pool.close()
pool.join()
def main(file_path, report=False):
global Total_Size, Processed_URLs, Progress, Total_URLs, Rate, Report
# If exist check if it is a file
file_of_links = Path(file_path)
if file_of_links.is_file():
try:
# Preprocessing
with open(file_of_links,'r') as f: # Loading URLs into list for faster access
List_of_URLs = list(unique_everseen(f.read().splitlines())) # Removing duplicates without changing order
Total_URLs = len(List_of_URLs) # Total number of links
Rate = 100/Total_URLs # Calculate each link percentage
except IOError:
print("IO Error : Unable to read from file")
print("Exiting...")
return
else:
print("Error! Invalid file path!")
print("Exiting...")
return
Report = report
thread_series_creator(List_of_URLs)
if Report is True: # Creating report
Date = datetime.date.today().strftime('%d.%b.%Y')
with open("muchspace.Report."+Date+".json", "w") as write_file:
json.dump(ReportJson, write_file, indent=4)
# Final Console Report
print("******Final Diagnostic Report******")
print("Total URLs: {0} Processed URLs: {1} Rate of completion: {2:.2f}%".format(Total_URLs, Processed_URLs, Progress))
print("Total size of {}/{} links is: {}".format(Processed_URLs, Total_URLs, human_byte_size(Total_Size)))
| 43.641304 | 187 | 0.646077 |
from pathlib import Path
from multiprocessing.dummy import Pool as ThreadPool
from more_itertools import unique_everseen
import requests, json, datetime
from scripts.byteSize import human_byte_size
Total_Size = 0
Processed_URLs = 0
Progress = 0
Total_URLs = 0
Rate = 0
Report = False
ReportJson = []
def url_info(URL):
linkStatus = {}
global Total_Size, Processed_URLs, Progress, Total_URLs, Rate, Report
if URL not in [' ','']:
try:
File_Size = 0
fileLink = requests.head(URL, stream=True)
fileLink.raise_for_status()
HEAD = requests.get(URL, stream=True).headers
File_Size = int(HEAD['Content-length'])
Progress += Rate
Processed_URLs = Processed_URLs + 1
Total_Size += File_Size
print('URLs Done:{0}/{1} File Size:{2} Total Size:{3} Progress:{4:.2f}%'.format(Processed_URLs, Total_URLs, human_byte_size(File_Size), human_byte_size(Total_Size), Progress))
except requests.exceptions.HTTPError as errh:
print ("Http Error:",errh)
except requests.exceptions.ConnectionError as errc:
print ("Error Connecting:",errc)
except requests.exceptions.Timeout as errt:
print ("Timeout Error:",errt)
except requests.exceptions.RequestException as err:
print ("Oops: Something Else",err)
if Report is True:
linkStatus['link'] = URL
linkStatus['size'] = human_byte_size(File_Size)
linkStatus['status'] = fileLink.status_code
linkStatus['last-checked'] = datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S")
ReportJson.append(linkStatus)
def thread_series_creator(List_Of_URLs):
global Total_Size, Processed_URLs, Progress, Total_URLs, Rate, Report
pool = ThreadPool(100)
results = pool.map(url_info, List_Of_URLs)
pool.close()
pool.join()
def main(file_path, report=False):
global Total_Size, Processed_URLs, Progress, Total_URLs, Rate, Report
file_of_links = Path(file_path)
if file_of_links.is_file():
try:
with open(file_of_links,'r') as f:
List_of_URLs = list(unique_everseen(f.read().splitlines()))
Total_URLs = len(List_of_URLs)
Rate = 100/Total_URLs
except IOError:
print("IO Error : Unable to read from file")
print("Exiting...")
return
else:
print("Error! Invalid file path!")
print("Exiting...")
return
Report = report
thread_series_creator(List_of_URLs)
if Report is True:
Date = datetime.date.today().strftime('%d.%b.%Y')
with open("muchspace.Report."+Date+".json", "w") as write_file:
json.dump(ReportJson, write_file, indent=4)
print("******Final Diagnostic Report******")
print("Total URLs: {0} Processed URLs: {1} Rate of completion: {2:.2f}%".format(Total_URLs, Processed_URLs, Progress))
print("Total size of {}/{} links is: {}".format(Processed_URLs, Total_URLs, human_byte_size(Total_Size)))
| true | true |
f72186852716593e8409116793bd82e2b2526084 | 2,714 | py | Python | src/pipelines/epidemiology/nl_authority.py | nelhage/data | 50a1ab91b786c9f89a8ff6ff10ea57ea5335490d | [
"Apache-2.0"
] | null | null | null | src/pipelines/epidemiology/nl_authority.py | nelhage/data | 50a1ab91b786c9f89a8ff6ff10ea57ea5335490d | [
"Apache-2.0"
] | null | null | null | src/pipelines/epidemiology/nl_authority.py | nelhage/data | 50a1ab91b786c9f89a8ff6ff10ea57ea5335490d | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import Any, Dict, List
from pandas import DataFrame, concat, merge
from lib.pipeline import DataSource
from lib.time import datetime_isoformat
from lib.utils import grouped_diff
class NetherlandsDataSource(DataSource):
def parse_dataframes(
self, dataframes: List[DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
# Rename the appropriate columns
data = dataframes[0].rename(
columns={
"Date_of_report": "date",
"Municipality_code": "subregion2_code",
"Municipality_name": "subregion2_name",
"Province": "subregion1_name",
"Total_reported": "confirmed",
"Hospital_admission": "hospitalized",
"Deceased": "deceased",
}
)
# Drop data without a clear demarcation
data = data[~data.subregion1_name.isna()]
data = data[~data.subregion2_code.isna()]
data = data[~data.subregion2_name.isna()]
# Get date in ISO format
data.date = data.date.apply(lambda x: datetime.fromisoformat(x).date().isoformat())
# Make sure the region code is zero-padded and without prefix
data["subregion2_code"] = data["subregion2_code"].apply(lambda x: x[2:])
data = data.drop(columns=["subregion1_name", "subregion2_name"])
data = data.merge(aux["metadata"], on="subregion2_code")
# We only need to keep key-date pair for identification
data = data[["date", "key", "confirmed", "deceased", "hospitalized"]]
# Compute the daily counts
data = grouped_diff(data, ["key", "date"])
# Group by level 2 region, and add the parts
l2 = data.copy()
l2["key"] = l2.key.apply(lambda x: x[:5])
l2 = l2.groupby(["key", "date"]).sum().reset_index()
# Group by country level, and add the parts
l1 = l2.copy().drop(columns=["key"])
l1 = l1.groupby("date").sum().reset_index()
l1["key"] = "NL"
# Output the results
return concat([l1, l2, data])
| 37.178082 | 91 | 0.637804 |
from datetime import datetime
from typing import Any, Dict, List
from pandas import DataFrame, concat, merge
from lib.pipeline import DataSource
from lib.time import datetime_isoformat
from lib.utils import grouped_diff
class NetherlandsDataSource(DataSource):
def parse_dataframes(
self, dataframes: List[DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = dataframes[0].rename(
columns={
"Date_of_report": "date",
"Municipality_code": "subregion2_code",
"Municipality_name": "subregion2_name",
"Province": "subregion1_name",
"Total_reported": "confirmed",
"Hospital_admission": "hospitalized",
"Deceased": "deceased",
}
)
data = data[~data.subregion1_name.isna()]
data = data[~data.subregion2_code.isna()]
data = data[~data.subregion2_name.isna()]
data.date = data.date.apply(lambda x: datetime.fromisoformat(x).date().isoformat())
data["subregion2_code"] = data["subregion2_code"].apply(lambda x: x[2:])
data = data.drop(columns=["subregion1_name", "subregion2_name"])
data = data.merge(aux["metadata"], on="subregion2_code")
data = data[["date", "key", "confirmed", "deceased", "hospitalized"]]
data = grouped_diff(data, ["key", "date"])
l2 = data.copy()
l2["key"] = l2.key.apply(lambda x: x[:5])
l2 = l2.groupby(["key", "date"]).sum().reset_index()
l1 = l2.copy().drop(columns=["key"])
l1 = l1.groupby("date").sum().reset_index()
l1["key"] = "NL"
return concat([l1, l2, data])
| true | true |
f72187bfd6178c0257c0f81666097723e96f4c4d | 21,206 | py | Python | tests/controller_test.py | elmopl/homekit_python | bb2b07e66fce3c3034b012ef679695a3da77f787 | [
"Apache-2.0"
] | null | null | null | tests/controller_test.py | elmopl/homekit_python | bb2b07e66fce3c3034b012ef679695a3da77f787 | [
"Apache-2.0"
] | null | null | null | tests/controller_test.py | elmopl/homekit_python | bb2b07e66fce3c3034b012ef679695a3da77f787 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2018 Joachim Lusiardi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import tempfile
import threading
import time
from homekit import Controller
from homekit import AccessoryServer
from homekit.exceptions import AccessoryNotFoundError, AlreadyPairedError, UnavailableError, FormatError, \
ConfigLoadingError, ConfigSavingError, MalformedPinError
from homekit.model import Accessory
from homekit.model.services import LightBulbService
from homekit.model import mixin as model_mixin
from homekit.tools import BLE_TRANSPORT_SUPPORTED, IP_TRANSPORT_SUPPORTED
if BLE_TRANSPORT_SUPPORTED:
from homekit.controller.ble_impl import BlePairing
if IP_TRANSPORT_SUPPORTED:
from homekit.controller.ip_implementation import IpPairing
class T(threading.Thread):
def __init__(self, accessoryServer):
threading.Thread.__init__(self)
self.a_s = accessoryServer
def run(self):
self.a_s.publish_device()
self.a_s.serve_forever()
value = 0
identify = 0
def identify_callback():
global identify
identify = 1
def set_value(new_value):
global value
value = new_value
class TestControllerIpUnpaired(unittest.TestCase):
@classmethod
def setUpClass(cls):
# prepare config file for unpaired accessory server
cls.config_file = tempfile.NamedTemporaryFile()
cls.config_file.write("""{
"accessory_ltpk": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"accessory_ltsk": "3d99f3e959a1f93af4056966f858074b2a1fdec1c5fd84a51ea96f9fa004156a",
"accessory_pairing_id": "12:34:56:00:01:0B",
"accessory_pin": "010-22-020",
"c#": 0,
"category": "Lightbulb",
"host_ip": "127.0.0.1",
"host_port": 54321,
"name": "unittestLight",
"peers": {
},
"unsuccessful_tries": 0
}""".encode())
cls.config_file.flush()
# Make sure get_id() numbers are stable between tests
model_mixin.id_counter = 0
cls.httpd = AccessoryServer(cls.config_file.name, None)
cls.httpd.set_identify_callback(identify_callback)
accessory = Accessory('Testlicht', 'lusiardi.de', 'Demoserver', '0001', '0.1')
accessory.set_identify_callback(identify_callback)
lightBulbService = LightBulbService()
lightBulbService.set_on_set_callback(set_value)
accessory.services.append(lightBulbService)
cls.httpd.add_accessory(accessory)
t = T(cls.httpd)
t.start()
time.sleep(10)
cls.controller_file = tempfile.NamedTemporaryFile()
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
self.controller_file = tempfile.NamedTemporaryFile()
@classmethod
def tearDownClass(cls):
cls.httpd.unpublish_device()
cls.httpd.shutdown()
cls.config_file.close()
def setUp(self):
self.controller = Controller()
def test_01_1_discover(self):
"""Try to discover the test accessory"""
result = self.controller.discover()
found = False
for device in result:
if '12:34:56:00:01:0B' == device['id']:
found = True
self.assertTrue(found)
def test_01_2_unpaired_identify(self):
"""Try to trigger the identification of the test accessory"""
global identify
self.controller.identify('12:34:56:00:01:0B')
self.assertEqual(1, identify)
identify = 0
def test_01_3_unpaired_identify_not_found(self):
"""Try to identify a non existing accessory. This should result in AccessoryNotFoundError"""
self.assertRaises(AccessoryNotFoundError, self.controller.identify, '12:34:56:00:01:0C')
def test_02_pair(self):
"""Try to pair the test accessory"""
self.controller.perform_pairing('alias', '12:34:56:00:01:0B', '010-22-020')
pairings = self.controller.get_pairings()
self.controller.save_data(self.controller_file.name)
self.assertIn('alias', pairings)
def test_02_pair_accessory_not_found(self):
""""""
self.assertRaises(AccessoryNotFoundError, self.controller.perform_pairing, 'alias1', '12:34:56:00:01:1B',
'010-22-020')
def test_02_pair_wrong_pin(self):
""""""
self.assertRaises(UnavailableError, self.controller.perform_pairing, 'alias2', '12:34:56:00:01:0B',
'010-22-021')
def test_02_pair_malformed_pin(self):
""""""
self.assertRaises(MalformedPinError, self.controller.perform_pairing, 'alias2', '12:34:56:00:01:0B',
'01022021')
class TestControllerIpPaired(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.config_file = tempfile.NamedTemporaryFile()
cls.config_file.write("""{
"accessory_ltpk": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"accessory_ltsk": "3d99f3e959a1f93af4056966f858074b2a1fdec1c5fd84a51ea96f9fa004156a",
"accessory_pairing_id": "12:34:56:00:01:0A",
"accessory_pin": "031-45-154",
"c#": 1,
"category": "Lightbulb",
"host_ip": "127.0.0.1",
"host_port": 51842,
"name": "unittestLight",
"peers": {
"decc6fa3-de3e-41c9-adba-ef7409821bfc": {
"admin": true,
"key": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8"
},
"ABCDEFfa3-de3e-41c9-adba-ef7409821bfc": {
"admin": false,
"key": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8"
}
},
"unsuccessful_tries": 0
}""".encode())
cls.config_file.flush()
# Make sure get_id() numbers are stable between tests
model_mixin.id_counter = 0
cls.httpd = AccessoryServer(cls.config_file.name, None)
cls.httpd.set_identify_callback(identify_callback)
accessory = Accessory('Testlicht', 'lusiardi.de', 'Demoserver', '0001', '0.1')
accessory.set_identify_callback(identify_callback)
lightBulbService = LightBulbService()
lightBulbService.set_on_set_callback(set_value)
accessory.services.append(lightBulbService)
cls.httpd.add_accessory(accessory)
t = T(cls.httpd)
t.start()
time.sleep(5)
cls.controller_file = tempfile.NamedTemporaryFile()
cls.controller_file.write("""{
"alias": {
"Connection": "IP",
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryPort": 51842,
"AccessoryIP": "127.0.0.1",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
}
}""".encode())
cls.controller_file.flush()
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
@classmethod
def tearDownClass(cls):
cls.httpd.unpublish_device()
cls.httpd.shutdown()
cls.config_file.close()
def setUp(self):
self.controller = Controller()
def tearDown(self):
self.controller.shutdown()
def test_01_1_discover(self):
result = self.controller.discover(5)
found = None
for device in result:
if '12:34:56:00:01:0A' == device['id']:
found = device
self.assertIsNotNone(found)
def test_02_pair_alias_exists(self):
"""Try to pair the test accessory"""
self.controller.load_data(self.controller_file.name)
self.assertRaises(AlreadyPairedError, self.controller.perform_pairing, 'alias', '12:34:56:00:01:0B',
'010-22-020')
def test_02_paired_identify_wrong_method(self):
"""Try to identify an already paired accessory via the controller's method for unpaired accessories."""
self.assertRaises(AlreadyPairedError, self.controller.identify, '12:34:56:00:01:0A')
def test_03_get_accessories(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.list_accessories_and_characteristics()
for characteristic in result[0]['services'][0]['characteristics']:
if characteristic['format'] == 'bool':
self.assertNotIn('maxDataLen', characteristic)
self.assertNotIn('maxLen', characteristic)
self.assertEqual(1, len(result))
result = result[0]
self.assertIn('aid', result)
self.assertIn('services', result)
def test_04_1_get_characteristic(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)])
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertEqual(['value'], list(result[(1, 4)].keys()))
def test_04_2_get_characteristics(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4), (1, 10)])
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn((1, 10), result)
self.assertIn('value', result[(1, 10)])
self.assertEqual(False, result[(1, 10)]['value'])
def test_04_3_get_characteristic_with_events(self):
"""This tests the include_events flag on get_characteristics"""
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)], include_events=True)
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn('ev', result[(1, 4)])
def test_04_4_get_characteristic_with_type(self):
"""This tests the include_type flag on get_characteristics"""
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)], include_type=True)
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn('type', result[(1, 4)])
self.assertEqual('20', result[(1, 4)]['type'])
def test_04_5_get_characteristic_with_perms(self):
"""This tests the include_perms flag on get_characteristics"""
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)], include_perms=True)
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn('perms', result[(1, 4)])
self.assertEqual(['pr'], result[(1, 4)]['perms'])
result = pairing.get_characteristics([(1, 3)], include_perms=True)
self.assertEqual(['pw'], result[(1, 3)]['perms'])
def test_04_4_get_characteristic_with_meta(self):
"""This tests the include_meta flag on get_characteristics"""
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)], include_meta=True)
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn('format', result[(1, 4)])
self.assertEqual('string', result[(1, 4)]['format'])
self.assertIn('maxLen', result[(1, 4)])
self.assertEqual(64, result[(1, 4)]['maxLen'])
def test_05_1_put_characteristic(self):
""""""
global value
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.put_characteristics([(1, 10, 'On')])
self.assertEqual(result, {})
self.assertEqual(1, value)
result = pairing.put_characteristics([(1, 10, 'Off')])
self.assertEqual(result, {})
self.assertEqual(0, value)
def test_05_2_put_characteristic_do_conversion(self):
""""""
global value
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.put_characteristics([(1, 10, 'On')], do_conversion=True)
self.assertEqual(result, {})
self.assertEqual(1, value)
result = pairing.put_characteristics([(1, 10, 'Off')], do_conversion=True)
self.assertEqual(result, {})
self.assertEqual(0, value)
def test_05_2_put_characteristic_do_conversion_wrong_value(self):
"""Tests that values that are not convertible to boolean cause a HomeKitTypeException"""
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
self.assertRaises(FormatError, pairing.put_characteristics, [(1, 10, 'Hallo Welt')], do_conversion=True)
def test_06_list_pairings(self):
"""Gets the listing of registered controllers of the device. Count must be 1."""
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
results = pairing.list_pairings()
self.assertEqual(2, len(results))
result = results[0]
self.assertIn('pairingId', result)
self.assertEqual('ABCDEFfa3-de3e-41c9-adba-ef7409821bfc', result['pairingId'])
self.assertIn('controllerType', result)
self.assertEqual(result['controllerType'], 'regular')
self.assertIn('publicKey', result)
self.assertIn('permissions', result)
self.assertEqual(result['permissions'], 0)
self.assertIn('pairingId', result)
result = results[1]
self.assertEqual('decc6fa3-de3e-41c9-adba-ef7409821bfc', result['pairingId'])
self.assertEqual(result['controllerType'], 'admin')
self.assertEqual(result['permissions'], 1)
def test_07_paired_identify(self):
"""Tests the paired variant of the identify method."""
global identify
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.identify()
self.assertTrue(result)
self.assertEqual(1, identify)
identify = 0
def test_99_remove_pairing(self):
"""Tests that a removed pairing is not present in the list of pairings anymore."""
self.controller.load_data(self.controller_file.name)
self.controller.remove_pairing('alias')
pairings = self.controller.get_pairings()
self.assertNotIn('alias', pairings)
class TestController(unittest.TestCase):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
def setUp(self):
self.controller = Controller()
@unittest.skipIf(not BLE_TRANSPORT_SUPPORTED, 'BLE no supported')
def test_load_pairings_both_type(self):
controller_file = tempfile.NamedTemporaryFile()
controller_file.write("""{
"alias_ip": {
"Connection": "IP",
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryPort": 51842,
"AccessoryIP": "127.0.0.1",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
},
"alias_ble": {
"Connection": "BLE",
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryMAC": "FD:3C:D4:13:02:59",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
}
}""".encode())
controller_file.flush()
self.controller.load_data(controller_file.name)
self.assertIsInstance(self.controller.get_pairings()['alias_ip'], IpPairing)
self.assertEqual(self.controller.get_pairings()['alias_ip'].pairing_data['Connection'], 'IP')
self.assertIsInstance(self.controller.get_pairings()['alias_ble'], BlePairing)
controller_file.close()
@unittest.skipIf(not BLE_TRANSPORT_SUPPORTED, 'BLE no supported')
def test_load_pairings_missing_type(self):
controller_file = tempfile.NamedTemporaryFile()
controller_file.write("""{
"alias_ip": {
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryPort": 51842,
"AccessoryIP": "127.0.0.1",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
},
"alias_ble": {
"Connection": "BLE",
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryMAC": "FD:3C:D4:13:02:59",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
}
}""".encode())
controller_file.flush()
self.controller.load_data(controller_file.name)
self.assertIsInstance(self.controller.get_pairings()['alias_ip'], IpPairing)
self.assertIsInstance(self.controller.get_pairings()['alias_ble'], BlePairing)
controller_file.close()
def test_load_pairings_unknown_type(self):
controller_file = tempfile.NamedTemporaryFile()
controller_file.write("""{
"alias_unknown": {
"Connection": "UNKNOWN"
}
}""".encode())
controller_file.flush()
self.controller.load_data(controller_file.name)
self.assertEqual(0, len(self.controller.get_pairings()))
controller_file.close()
def test_load_pairings_invalid_json(self):
controller_file = tempfile.NamedTemporaryFile()
controller_file.write("""{
"alias_unknown": {
"Connection": "UNKNOWN",
}
}""".encode())
controller_file.flush()
self.assertRaises(ConfigLoadingError, self.controller.load_data, controller_file.name)
controller_file.close()
def test_load_pairings_missing_file(self):
self.assertRaises(ConfigLoadingError, self.controller.load_data, 'test')
def test_load_pairings_permissions(self):
self.assertRaises(ConfigLoadingError, self.controller.load_data, '/etc/shadow')
def test_save_pairings_permissions(self):
self.assertRaises(ConfigSavingError, self.controller.save_data, '/root/shadow')
def test_save_pairings_missing_file(self):
self.assertRaises(ConfigSavingError, self.controller.save_data, '/tmp/shadow/foo')
| 42.927126 | 113 | 0.650712 |
import unittest
import tempfile
import threading
import time
from homekit import Controller
from homekit import AccessoryServer
from homekit.exceptions import AccessoryNotFoundError, AlreadyPairedError, UnavailableError, FormatError, \
ConfigLoadingError, ConfigSavingError, MalformedPinError
from homekit.model import Accessory
from homekit.model.services import LightBulbService
from homekit.model import mixin as model_mixin
from homekit.tools import BLE_TRANSPORT_SUPPORTED, IP_TRANSPORT_SUPPORTED
if BLE_TRANSPORT_SUPPORTED:
from homekit.controller.ble_impl import BlePairing
if IP_TRANSPORT_SUPPORTED:
from homekit.controller.ip_implementation import IpPairing
class T(threading.Thread):
def __init__(self, accessoryServer):
threading.Thread.__init__(self)
self.a_s = accessoryServer
def run(self):
self.a_s.publish_device()
self.a_s.serve_forever()
value = 0
identify = 0
def identify_callback():
global identify
identify = 1
def set_value(new_value):
global value
value = new_value
class TestControllerIpUnpaired(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.config_file = tempfile.NamedTemporaryFile()
cls.config_file.write("""{
"accessory_ltpk": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"accessory_ltsk": "3d99f3e959a1f93af4056966f858074b2a1fdec1c5fd84a51ea96f9fa004156a",
"accessory_pairing_id": "12:34:56:00:01:0B",
"accessory_pin": "010-22-020",
"c#": 0,
"category": "Lightbulb",
"host_ip": "127.0.0.1",
"host_port": 54321,
"name": "unittestLight",
"peers": {
},
"unsuccessful_tries": 0
}""".encode())
cls.config_file.flush()
model_mixin.id_counter = 0
cls.httpd = AccessoryServer(cls.config_file.name, None)
cls.httpd.set_identify_callback(identify_callback)
accessory = Accessory('Testlicht', 'lusiardi.de', 'Demoserver', '0001', '0.1')
accessory.set_identify_callback(identify_callback)
lightBulbService = LightBulbService()
lightBulbService.set_on_set_callback(set_value)
accessory.services.append(lightBulbService)
cls.httpd.add_accessory(accessory)
t = T(cls.httpd)
t.start()
time.sleep(10)
cls.controller_file = tempfile.NamedTemporaryFile()
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
self.controller_file = tempfile.NamedTemporaryFile()
@classmethod
def tearDownClass(cls):
cls.httpd.unpublish_device()
cls.httpd.shutdown()
cls.config_file.close()
def setUp(self):
self.controller = Controller()
def test_01_1_discover(self):
result = self.controller.discover()
found = False
for device in result:
if '12:34:56:00:01:0B' == device['id']:
found = True
self.assertTrue(found)
def test_01_2_unpaired_identify(self):
global identify
self.controller.identify('12:34:56:00:01:0B')
self.assertEqual(1, identify)
identify = 0
def test_01_3_unpaired_identify_not_found(self):
self.assertRaises(AccessoryNotFoundError, self.controller.identify, '12:34:56:00:01:0C')
def test_02_pair(self):
self.controller.perform_pairing('alias', '12:34:56:00:01:0B', '010-22-020')
pairings = self.controller.get_pairings()
self.controller.save_data(self.controller_file.name)
self.assertIn('alias', pairings)
def test_02_pair_accessory_not_found(self):
self.assertRaises(AccessoryNotFoundError, self.controller.perform_pairing, 'alias1', '12:34:56:00:01:1B',
'010-22-020')
def test_02_pair_wrong_pin(self):
self.assertRaises(UnavailableError, self.controller.perform_pairing, 'alias2', '12:34:56:00:01:0B',
'010-22-021')
def test_02_pair_malformed_pin(self):
self.assertRaises(MalformedPinError, self.controller.perform_pairing, 'alias2', '12:34:56:00:01:0B',
'01022021')
class TestControllerIpPaired(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.config_file = tempfile.NamedTemporaryFile()
cls.config_file.write("""{
"accessory_ltpk": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"accessory_ltsk": "3d99f3e959a1f93af4056966f858074b2a1fdec1c5fd84a51ea96f9fa004156a",
"accessory_pairing_id": "12:34:56:00:01:0A",
"accessory_pin": "031-45-154",
"c#": 1,
"category": "Lightbulb",
"host_ip": "127.0.0.1",
"host_port": 51842,
"name": "unittestLight",
"peers": {
"decc6fa3-de3e-41c9-adba-ef7409821bfc": {
"admin": true,
"key": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8"
},
"ABCDEFfa3-de3e-41c9-adba-ef7409821bfc": {
"admin": false,
"key": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8"
}
},
"unsuccessful_tries": 0
}""".encode())
cls.config_file.flush()
model_mixin.id_counter = 0
cls.httpd = AccessoryServer(cls.config_file.name, None)
cls.httpd.set_identify_callback(identify_callback)
accessory = Accessory('Testlicht', 'lusiardi.de', 'Demoserver', '0001', '0.1')
accessory.set_identify_callback(identify_callback)
lightBulbService = LightBulbService()
lightBulbService.set_on_set_callback(set_value)
accessory.services.append(lightBulbService)
cls.httpd.add_accessory(accessory)
t = T(cls.httpd)
t.start()
time.sleep(5)
cls.controller_file = tempfile.NamedTemporaryFile()
cls.controller_file.write("""{
"alias": {
"Connection": "IP",
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryPort": 51842,
"AccessoryIP": "127.0.0.1",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
}
}""".encode())
cls.controller_file.flush()
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
@classmethod
def tearDownClass(cls):
cls.httpd.unpublish_device()
cls.httpd.shutdown()
cls.config_file.close()
def setUp(self):
self.controller = Controller()
def tearDown(self):
self.controller.shutdown()
def test_01_1_discover(self):
result = self.controller.discover(5)
found = None
for device in result:
if '12:34:56:00:01:0A' == device['id']:
found = device
self.assertIsNotNone(found)
def test_02_pair_alias_exists(self):
self.controller.load_data(self.controller_file.name)
self.assertRaises(AlreadyPairedError, self.controller.perform_pairing, 'alias', '12:34:56:00:01:0B',
'010-22-020')
def test_02_paired_identify_wrong_method(self):
self.assertRaises(AlreadyPairedError, self.controller.identify, '12:34:56:00:01:0A')
def test_03_get_accessories(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.list_accessories_and_characteristics()
for characteristic in result[0]['services'][0]['characteristics']:
if characteristic['format'] == 'bool':
self.assertNotIn('maxDataLen', characteristic)
self.assertNotIn('maxLen', characteristic)
self.assertEqual(1, len(result))
result = result[0]
self.assertIn('aid', result)
self.assertIn('services', result)
def test_04_1_get_characteristic(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)])
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertEqual(['value'], list(result[(1, 4)].keys()))
def test_04_2_get_characteristics(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4), (1, 10)])
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn((1, 10), result)
self.assertIn('value', result[(1, 10)])
self.assertEqual(False, result[(1, 10)]['value'])
def test_04_3_get_characteristic_with_events(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)], include_events=True)
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn('ev', result[(1, 4)])
def test_04_4_get_characteristic_with_type(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)], include_type=True)
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn('type', result[(1, 4)])
self.assertEqual('20', result[(1, 4)]['type'])
def test_04_5_get_characteristic_with_perms(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)], include_perms=True)
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn('perms', result[(1, 4)])
self.assertEqual(['pr'], result[(1, 4)]['perms'])
result = pairing.get_characteristics([(1, 3)], include_perms=True)
self.assertEqual(['pw'], result[(1, 3)]['perms'])
def test_04_4_get_characteristic_with_meta(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)], include_meta=True)
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn('format', result[(1, 4)])
self.assertEqual('string', result[(1, 4)]['format'])
self.assertIn('maxLen', result[(1, 4)])
self.assertEqual(64, result[(1, 4)]['maxLen'])
def test_05_1_put_characteristic(self):
global value
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.put_characteristics([(1, 10, 'On')])
self.assertEqual(result, {})
self.assertEqual(1, value)
result = pairing.put_characteristics([(1, 10, 'Off')])
self.assertEqual(result, {})
self.assertEqual(0, value)
def test_05_2_put_characteristic_do_conversion(self):
global value
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.put_characteristics([(1, 10, 'On')], do_conversion=True)
self.assertEqual(result, {})
self.assertEqual(1, value)
result = pairing.put_characteristics([(1, 10, 'Off')], do_conversion=True)
self.assertEqual(result, {})
self.assertEqual(0, value)
def test_05_2_put_characteristic_do_conversion_wrong_value(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
self.assertRaises(FormatError, pairing.put_characteristics, [(1, 10, 'Hallo Welt')], do_conversion=True)
def test_06_list_pairings(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
results = pairing.list_pairings()
self.assertEqual(2, len(results))
result = results[0]
self.assertIn('pairingId', result)
self.assertEqual('ABCDEFfa3-de3e-41c9-adba-ef7409821bfc', result['pairingId'])
self.assertIn('controllerType', result)
self.assertEqual(result['controllerType'], 'regular')
self.assertIn('publicKey', result)
self.assertIn('permissions', result)
self.assertEqual(result['permissions'], 0)
self.assertIn('pairingId', result)
result = results[1]
self.assertEqual('decc6fa3-de3e-41c9-adba-ef7409821bfc', result['pairingId'])
self.assertEqual(result['controllerType'], 'admin')
self.assertEqual(result['permissions'], 1)
def test_07_paired_identify(self):
global identify
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.identify()
self.assertTrue(result)
self.assertEqual(1, identify)
identify = 0
def test_99_remove_pairing(self):
self.controller.load_data(self.controller_file.name)
self.controller.remove_pairing('alias')
pairings = self.controller.get_pairings()
self.assertNotIn('alias', pairings)
class TestController(unittest.TestCase):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
def setUp(self):
self.controller = Controller()
@unittest.skipIf(not BLE_TRANSPORT_SUPPORTED, 'BLE no supported')
def test_load_pairings_both_type(self):
controller_file = tempfile.NamedTemporaryFile()
controller_file.write("""{
"alias_ip": {
"Connection": "IP",
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryPort": 51842,
"AccessoryIP": "127.0.0.1",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
},
"alias_ble": {
"Connection": "BLE",
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryMAC": "FD:3C:D4:13:02:59",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
}
}""".encode())
controller_file.flush()
self.controller.load_data(controller_file.name)
self.assertIsInstance(self.controller.get_pairings()['alias_ip'], IpPairing)
self.assertEqual(self.controller.get_pairings()['alias_ip'].pairing_data['Connection'], 'IP')
self.assertIsInstance(self.controller.get_pairings()['alias_ble'], BlePairing)
controller_file.close()
@unittest.skipIf(not BLE_TRANSPORT_SUPPORTED, 'BLE no supported')
def test_load_pairings_missing_type(self):
controller_file = tempfile.NamedTemporaryFile()
controller_file.write("""{
"alias_ip": {
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryPort": 51842,
"AccessoryIP": "127.0.0.1",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
},
"alias_ble": {
"Connection": "BLE",
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryMAC": "FD:3C:D4:13:02:59",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
}
}""".encode())
controller_file.flush()
self.controller.load_data(controller_file.name)
self.assertIsInstance(self.controller.get_pairings()['alias_ip'], IpPairing)
self.assertIsInstance(self.controller.get_pairings()['alias_ble'], BlePairing)
controller_file.close()
def test_load_pairings_unknown_type(self):
controller_file = tempfile.NamedTemporaryFile()
controller_file.write("""{
"alias_unknown": {
"Connection": "UNKNOWN"
}
}""".encode())
controller_file.flush()
self.controller.load_data(controller_file.name)
self.assertEqual(0, len(self.controller.get_pairings()))
controller_file.close()
def test_load_pairings_invalid_json(self):
controller_file = tempfile.NamedTemporaryFile()
controller_file.write("""{
"alias_unknown": {
"Connection": "UNKNOWN",
}
}""".encode())
controller_file.flush()
self.assertRaises(ConfigLoadingError, self.controller.load_data, controller_file.name)
controller_file.close()
def test_load_pairings_missing_file(self):
self.assertRaises(ConfigLoadingError, self.controller.load_data, 'test')
def test_load_pairings_permissions(self):
self.assertRaises(ConfigLoadingError, self.controller.load_data, '/etc/shadow')
def test_save_pairings_permissions(self):
self.assertRaises(ConfigSavingError, self.controller.save_data, '/root/shadow')
def test_save_pairings_missing_file(self):
self.assertRaises(ConfigSavingError, self.controller.save_data, '/tmp/shadow/foo')
| true | true |
f721880fe59e59ce9a574f51f4ac11921a0ea939 | 4,792 | py | Python | zendesk/endpoints.py | optixx/zendesk | 7a4439f1c5b46913acad6b3153266d52f011c11e | [
"MIT"
] | 31 | 2015-01-02T01:44:18.000Z | 2021-06-10T16:29:54.000Z | zendesk/endpoints.py | optixx/zendesk | 7a4439f1c5b46913acad6b3153266d52f011c11e | [
"MIT"
] | 1 | 2015-04-08T07:54:50.000Z | 2015-04-09T14:29:38.000Z | zendesk/endpoints.py | optixx/zendesk | 7a4439f1c5b46913acad6b3153266d52f011c11e | [
"MIT"
] | 23 | 2015-01-12T23:42:34.000Z | 2021-09-08T11:20:12.000Z | """
API MAPPING
"""
mapping_table = {
# Rest API: Organizations
'list_organizations': {
'path': '/organizations.json',
'method': 'GET',
'status': 200,
},
'show_organization': {
'path': '/organizations/{{organization_id}}.json',
'method': 'GET',
'status': 200,
},
'create_organization': {
'path': '/organizations.json',
'method': 'POST',
'status': 201,
},
'update_organization': {
'path': '/organizations/{{organization_id}}.json',
'method': 'PUT',
'status': 200,
},
'delete_organization': {
'path': '/organizations/{{organization_id}}.json',
'method': 'DELETE',
'status': 200,
},
# Rest API: Groups
'list_groups': {
'path': '/groups.json',
'method': 'GET',
'status': 200,
},
'show_group': {
'path': '/groups/{{group_id}}.json',
'method': 'GET',
'status': 200,
},
'create_group': {
'path': '/groups.json',
'method': 'POST',
'status': 201,
},
'update_group': {
'path': '/groups/{{group_id}}.json',
'method': 'PUT',
'status': 200,
},
'delete_group': {
'path': '/groups/{{group_id}}.json',
'method': 'DELETE',
'status': 200,
},
# Rest API: Tickets
'list_tickets': {
'path': '/rules/{{view_id}}.json',
'valid_params': ('page', ),
'method': 'GET',
'status': 200,
},
'show_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'GET',
'status': 200,
},
'create_ticket': {
'path': '/tickets.json',
'method': 'POST',
'status': 201,
},
'update_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'PUT',
'status': 200,
},
'comment_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'PUT',
'status': 200,
},
'delete_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'DELETE',
'status': 200,
},
# Rest API: Attachment
'create_attachment': {
'path': '/uploads.json',
'valid_params': ('filename', 'token'),
'method': 'POST',
'status': 201,
},
# Rest API: Users
'list_users': {
'path': '/users.json',
'valid_params': ('page', ),
'method': 'GET',
'status': 200,
},
'search_users': {
'path': '/users.json',
'valid_params': ('query', 'role', 'page'),
'method': 'GET',
'status': 200,
},
'show_user': {
'path': '/users/{{user_id}}.json',
'method': 'GET',
'status': 200,
},
'create_user': {
'path': '/users.json',
'method': 'POST',
'status': 201,
},
'update_user': {
'path': '/users/{{user_id}}.json',
'method': 'PUT',
'status': 200,
},
'delete_user': {
'path': '/users/{{user_id}}.json',
'method': 'DELETE',
'status': 200,
},
'list_user_identities': {
'path': '/users/{{user_id}}/user_identities.json',
'method': 'GET',
'status': 200,
},
'add_user_email': {
'path': '/users/{{user_id}}/user_identities.json',
'method': 'POST',
'status': 201,
},
'add_twitter_handle': {
'path': '/users/{{user_id}}/user_identities.json',
'method': 'POST',
'status': 201,
},
'make_identity_primary': {
'path': '/users/{{user_id}}/user_identities/{{identity_id}}/make_primary',
'method': 'POST',
'status': 200,
},
'delete_identity': {
'path': '/users/{{user_id}}/user_identities/{{identity_id}}',
'method': 'DELETE',
'status': 200,
},
# Rest API: Tags
'list_tags': {
'path': '/tags.json',
'method': 'GET',
'status': 200,
},
'list_assets': {
'path': '/tags/{{tag_id}}.json',
'valid_params': ('asset_type', 'page'),
'method': 'GET',
'status': 200,
},
# Rest API: Ticket Fields
'list_ticket_fields': {
'path': '/ticket_fields.json',
'method': 'GET',
'status': 200,
},
# Rest API: Macros
'list_macros': {
'path': '/macros.json',
'method': 'GET',
'status': 200,
},
'evaluate_macro': {
'path': '/macros/{{macro_id}}/apply.json',
'valid_params': ('ticket_id', ),
'method': 'POST',
'status': 201,
},
# Rest API: Search
'search': {
'path': '/search.json',
'valid_params': ('query', 'page'),
'method': 'GET',
'status': 200,
},
}
| 24.701031 | 82 | 0.459098 |
mapping_table = {
'list_organizations': {
'path': '/organizations.json',
'method': 'GET',
'status': 200,
},
'show_organization': {
'path': '/organizations/{{organization_id}}.json',
'method': 'GET',
'status': 200,
},
'create_organization': {
'path': '/organizations.json',
'method': 'POST',
'status': 201,
},
'update_organization': {
'path': '/organizations/{{organization_id}}.json',
'method': 'PUT',
'status': 200,
},
'delete_organization': {
'path': '/organizations/{{organization_id}}.json',
'method': 'DELETE',
'status': 200,
},
'list_groups': {
'path': '/groups.json',
'method': 'GET',
'status': 200,
},
'show_group': {
'path': '/groups/{{group_id}}.json',
'method': 'GET',
'status': 200,
},
'create_group': {
'path': '/groups.json',
'method': 'POST',
'status': 201,
},
'update_group': {
'path': '/groups/{{group_id}}.json',
'method': 'PUT',
'status': 200,
},
'delete_group': {
'path': '/groups/{{group_id}}.json',
'method': 'DELETE',
'status': 200,
},
'list_tickets': {
'path': '/rules/{{view_id}}.json',
'valid_params': ('page', ),
'method': 'GET',
'status': 200,
},
'show_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'GET',
'status': 200,
},
'create_ticket': {
'path': '/tickets.json',
'method': 'POST',
'status': 201,
},
'update_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'PUT',
'status': 200,
},
'comment_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'PUT',
'status': 200,
},
'delete_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'DELETE',
'status': 200,
},
'create_attachment': {
'path': '/uploads.json',
'valid_params': ('filename', 'token'),
'method': 'POST',
'status': 201,
},
'list_users': {
'path': '/users.json',
'valid_params': ('page', ),
'method': 'GET',
'status': 200,
},
'search_users': {
'path': '/users.json',
'valid_params': ('query', 'role', 'page'),
'method': 'GET',
'status': 200,
},
'show_user': {
'path': '/users/{{user_id}}.json',
'method': 'GET',
'status': 200,
},
'create_user': {
'path': '/users.json',
'method': 'POST',
'status': 201,
},
'update_user': {
'path': '/users/{{user_id}}.json',
'method': 'PUT',
'status': 200,
},
'delete_user': {
'path': '/users/{{user_id}}.json',
'method': 'DELETE',
'status': 200,
},
'list_user_identities': {
'path': '/users/{{user_id}}/user_identities.json',
'method': 'GET',
'status': 200,
},
'add_user_email': {
'path': '/users/{{user_id}}/user_identities.json',
'method': 'POST',
'status': 201,
},
'add_twitter_handle': {
'path': '/users/{{user_id}}/user_identities.json',
'method': 'POST',
'status': 201,
},
'make_identity_primary': {
'path': '/users/{{user_id}}/user_identities/{{identity_id}}/make_primary',
'method': 'POST',
'status': 200,
},
'delete_identity': {
'path': '/users/{{user_id}}/user_identities/{{identity_id}}',
'method': 'DELETE',
'status': 200,
},
'list_tags': {
'path': '/tags.json',
'method': 'GET',
'status': 200,
},
'list_assets': {
'path': '/tags/{{tag_id}}.json',
'valid_params': ('asset_type', 'page'),
'method': 'GET',
'status': 200,
},
'list_ticket_fields': {
'path': '/ticket_fields.json',
'method': 'GET',
'status': 200,
},
'list_macros': {
'path': '/macros.json',
'method': 'GET',
'status': 200,
},
'evaluate_macro': {
'path': '/macros/{{macro_id}}/apply.json',
'valid_params': ('ticket_id', ),
'method': 'POST',
'status': 201,
},
'search': {
'path': '/search.json',
'valid_params': ('query', 'page'),
'method': 'GET',
'status': 200,
},
}
| true | true |
f721881eea115b79515a4c824cdd061fe585c80c | 6,885 | py | Python | logging/tests/unit/handlers/test__helpers.py | rodrigodias27/google-cloud-python | 7d1161f70744c0dbbe67a3f472ea95667eaafe50 | [
"Apache-2.0"
] | 1 | 2021-01-04T11:40:17.000Z | 2021-01-04T11:40:17.000Z | logging/tests/unit/handlers/test__helpers.py | rodrigodias27/google-cloud-python | 7d1161f70744c0dbbe67a3f472ea95667eaafe50 | [
"Apache-2.0"
] | null | null | null | logging/tests/unit/handlers/test__helpers.py | rodrigodias27/google-cloud-python | 7d1161f70744c0dbbe67a3f472ea95667eaafe50 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
import mock
import six
try:
from webapp2 import RequestHandler
except SyntaxError:
# webapp2 has not been ported to python3, so it will give a syntax
# error if we try. We'll just skip the webapp2 tests in that case.
RequestHandler = object
class Test_get_trace_id_from_flask(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id_from_flask()
@staticmethod
def create_app():
import flask
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'test flask trace' # pragma: NO COVER
return app
def test_no_context_header(self):
app = self.create_app()
with app.test_request_context(
path='/',
headers={}):
trace_id = self._call_fut()
self.assertIsNone(trace_id)
def test_valid_context_header(self):
flask_trace_header = 'X_CLOUD_TRACE_CONTEXT'
expected_trace_id = 'testtraceidflask'
flask_trace_id = expected_trace_id + '/testspanid'
app = self.create_app()
context = app.test_request_context(
path='/',
headers={flask_trace_header: flask_trace_id})
with context:
trace_id = self._call_fut()
self.assertEqual(trace_id, expected_trace_id)
class _GetTraceId(RequestHandler):
def get(self):
from google.cloud.logging.handlers import _helpers
trace_id = _helpers.get_trace_id_from_webapp2()
self.response.content_type = 'application/json'
self.response.out.write(json.dumps(trace_id))
@unittest.skipIf(six.PY3, 'webapp2 is Python 2 only')
class Test_get_trace_id_from_webapp2(unittest.TestCase):
@staticmethod
def create_app():
import webapp2
app = webapp2.WSGIApplication([
('/', _GetTraceId),
])
return app
def test_no_context_header(self):
import webob
req = webob.BaseRequest.blank('/')
response = req.get_response(self.create_app())
trace_id = json.loads(response.body)
self.assertEqual(None, trace_id)
def test_valid_context_header(self):
import webob
webapp2_trace_header = 'X-Cloud-Trace-Context'
expected_trace_id = 'testtraceidwebapp2'
webapp2_trace_id = expected_trace_id + '/testspanid'
req = webob.BaseRequest.blank(
'/',
headers={webapp2_trace_header: webapp2_trace_id})
response = req.get_response(self.create_app())
trace_id = json.loads(response.body)
self.assertEqual(trace_id, expected_trace_id)
class Test_get_trace_id_from_django(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id_from_django()
def setUp(self):
from django.conf import settings
from django.test.utils import setup_test_environment
if not settings.configured:
settings.configure()
setup_test_environment()
def tearDown(self):
from django.test.utils import teardown_test_environment
from google.cloud.logging.handlers.middleware import request
teardown_test_environment()
request._thread_locals.__dict__.clear()
def test_no_context_header(self):
from django.test import RequestFactory
from google.cloud.logging.handlers.middleware import request
django_request = RequestFactory().get('/')
middleware = request.RequestMiddleware()
middleware.process_request(django_request)
trace_id = self._call_fut()
self.assertIsNone(trace_id)
def test_valid_context_header(self):
from django.test import RequestFactory
from google.cloud.logging.handlers.middleware import request
django_trace_header = 'HTTP_X_CLOUD_TRACE_CONTEXT'
expected_trace_id = 'testtraceiddjango'
django_trace_id = expected_trace_id + '/testspanid'
django_request = RequestFactory().get(
'/',
**{django_trace_header: django_trace_id})
middleware = request.RequestMiddleware()
middleware.process_request(django_request)
trace_id = self._call_fut()
self.assertEqual(trace_id, expected_trace_id)
class Test_get_trace_id(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id()
def _helper(self, django_return, flask_return):
django_patch = mock.patch(
'google.cloud.logging.handlers._helpers.get_trace_id_from_django',
return_value=django_return)
flask_patch = mock.patch(
'google.cloud.logging.handlers._helpers.get_trace_id_from_flask',
return_value=flask_return)
with django_patch as django_mock:
with flask_patch as flask_mock:
trace_id = self._call_fut()
return django_mock, flask_mock, trace_id
def test_from_django(self):
django_mock, flask_mock, trace_id = self._helper(
'test-django-trace-id', None)
self.assertEqual(trace_id, django_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_not_called()
def test_from_flask(self):
django_mock, flask_mock, trace_id = self._helper(
None, 'test-flask-trace-id')
self.assertEqual(trace_id, flask_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_called_once_with()
def test_from_django_and_flask(self):
django_mock, flask_mock, trace_id = self._helper(
'test-django-trace-id', 'test-flask-trace-id')
# Django wins.
self.assertEqual(trace_id, django_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_not_called()
def test_missing(self):
django_mock, flask_mock, trace_id = self._helper(None, None)
self.assertIsNone(trace_id)
django_mock.assert_called_once_with()
flask_mock.assert_called_once_with()
| 30.197368 | 78 | 0.679448 |
import json
import unittest
import mock
import six
try:
from webapp2 import RequestHandler
except SyntaxError:
RequestHandler = object
class Test_get_trace_id_from_flask(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id_from_flask()
@staticmethod
def create_app():
import flask
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'test flask trace' # pragma: NO COVER
return app
def test_no_context_header(self):
app = self.create_app()
with app.test_request_context(
path='/',
headers={}):
trace_id = self._call_fut()
self.assertIsNone(trace_id)
def test_valid_context_header(self):
flask_trace_header = 'X_CLOUD_TRACE_CONTEXT'
expected_trace_id = 'testtraceidflask'
flask_trace_id = expected_trace_id + '/testspanid'
app = self.create_app()
context = app.test_request_context(
path='/',
headers={flask_trace_header: flask_trace_id})
with context:
trace_id = self._call_fut()
self.assertEqual(trace_id, expected_trace_id)
class _GetTraceId(RequestHandler):
def get(self):
from google.cloud.logging.handlers import _helpers
trace_id = _helpers.get_trace_id_from_webapp2()
self.response.content_type = 'application/json'
self.response.out.write(json.dumps(trace_id))
@unittest.skipIf(six.PY3, 'webapp2 is Python 2 only')
class Test_get_trace_id_from_webapp2(unittest.TestCase):
@staticmethod
def create_app():
import webapp2
app = webapp2.WSGIApplication([
('/', _GetTraceId),
])
return app
def test_no_context_header(self):
import webob
req = webob.BaseRequest.blank('/')
response = req.get_response(self.create_app())
trace_id = json.loads(response.body)
self.assertEqual(None, trace_id)
def test_valid_context_header(self):
import webob
webapp2_trace_header = 'X-Cloud-Trace-Context'
expected_trace_id = 'testtraceidwebapp2'
webapp2_trace_id = expected_trace_id + '/testspanid'
req = webob.BaseRequest.blank(
'/',
headers={webapp2_trace_header: webapp2_trace_id})
response = req.get_response(self.create_app())
trace_id = json.loads(response.body)
self.assertEqual(trace_id, expected_trace_id)
class Test_get_trace_id_from_django(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id_from_django()
def setUp(self):
from django.conf import settings
from django.test.utils import setup_test_environment
if not settings.configured:
settings.configure()
setup_test_environment()
def tearDown(self):
from django.test.utils import teardown_test_environment
from google.cloud.logging.handlers.middleware import request
teardown_test_environment()
request._thread_locals.__dict__.clear()
def test_no_context_header(self):
from django.test import RequestFactory
from google.cloud.logging.handlers.middleware import request
django_request = RequestFactory().get('/')
middleware = request.RequestMiddleware()
middleware.process_request(django_request)
trace_id = self._call_fut()
self.assertIsNone(trace_id)
def test_valid_context_header(self):
from django.test import RequestFactory
from google.cloud.logging.handlers.middleware import request
django_trace_header = 'HTTP_X_CLOUD_TRACE_CONTEXT'
expected_trace_id = 'testtraceiddjango'
django_trace_id = expected_trace_id + '/testspanid'
django_request = RequestFactory().get(
'/',
**{django_trace_header: django_trace_id})
middleware = request.RequestMiddleware()
middleware.process_request(django_request)
trace_id = self._call_fut()
self.assertEqual(trace_id, expected_trace_id)
class Test_get_trace_id(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id()
def _helper(self, django_return, flask_return):
django_patch = mock.patch(
'google.cloud.logging.handlers._helpers.get_trace_id_from_django',
return_value=django_return)
flask_patch = mock.patch(
'google.cloud.logging.handlers._helpers.get_trace_id_from_flask',
return_value=flask_return)
with django_patch as django_mock:
with flask_patch as flask_mock:
trace_id = self._call_fut()
return django_mock, flask_mock, trace_id
def test_from_django(self):
django_mock, flask_mock, trace_id = self._helper(
'test-django-trace-id', None)
self.assertEqual(trace_id, django_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_not_called()
def test_from_flask(self):
django_mock, flask_mock, trace_id = self._helper(
None, 'test-flask-trace-id')
self.assertEqual(trace_id, flask_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_called_once_with()
def test_from_django_and_flask(self):
django_mock, flask_mock, trace_id = self._helper(
'test-django-trace-id', 'test-flask-trace-id')
# Django wins.
self.assertEqual(trace_id, django_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_not_called()
def test_missing(self):
django_mock, flask_mock, trace_id = self._helper(None, None)
self.assertIsNone(trace_id)
django_mock.assert_called_once_with()
flask_mock.assert_called_once_with()
| true | true |
f7218951799b74c37930bbca42f5a8dabc271ee3 | 8,665 | py | Python | pattoo/ingest/files.py | palisadoes/pattoo | 57bd3e82e49d51e3426b13ad53ed8326a735ce29 | [
"Apache-2.0"
] | null | null | null | pattoo/ingest/files.py | palisadoes/pattoo | 57bd3e82e49d51e3426b13ad53ed8326a735ce29 | [
"Apache-2.0"
] | null | null | null | pattoo/ingest/files.py | palisadoes/pattoo | 57bd3e82e49d51e3426b13ad53ed8326a735ce29 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""Pattoo classes that manage various data."""
# Standard imports
import os
import time
# Import project libraries
from pattoo_shared import log, files, converter
from pattoo.configuration import ConfigIngester as Config
from pattoo.constants import PATTOO_API_AGENT_NAME, PATTOO_INGESTER_NAME
from .records import Records
class Cache():
"""Process ingest cache data."""
def __init__(self, batch_size=500, age=0):
"""Initialize the class.
Args:
batch_size: Number of files to read
age: Minimum age of files to be read per batch
Returns:
None
"""
# Get cache directory
config = Config()
directory = config.agent_cache_directory(PATTOO_API_AGENT_NAME)
self._batch_id = int(time.time() * 1000)
# Read data from cache. Stop if there is no data found.
self._data = files.read_json_files(
directory, die=False, age=age, count=batch_size)
# Save the number of files read
self.files = len(self._data)
def records(self):
"""Create PattooDBrecord objects from cache directory.
Args:
None
Returns:
result: List of list of PattooDBrecord objects grouped by agent_id
"""
# Initialize list of files that have been processed
_cache = {}
result = []
# Read data from files
for filepath, json_data in sorted(self._data):
# Get data from JSON file. Convert to rows of key-pairs
if bool(json_data) is True and isinstance(json_data, dict) is True:
pdbrs = converter.cache_to_keypairs(json_data)
if bool(pdbrs) is False:
log_message = ('''\
File {} has invalid data. It will not be processed'''.format(filepath))
log.log2info(20026, log_message)
continue
# Group data by agent_id
pattoo_agent_id = pdbrs[0].pattoo_agent_id
if pattoo_agent_id in _cache:
_cache[pattoo_agent_id].extend(pdbrs)
else:
_cache[pattoo_agent_id] = pdbrs
# Aggregate data
if bool(_cache) is True:
for _, item in sorted(_cache.items()):
result.append(item)
# Return
return result
def purge(self):
"""Purge cache files.
Args:
None
Returns:
None
"""
# Initialize key variables
filepaths = [filepath for filepath, _ in self._data]
# Delete cache files after processing
for filepath in filepaths:
if os.path.exists(filepath):
try:
os.remove(filepath)
except:
log_message = ('''\
Error deleting cache file {}.'''.format(filepath))
log.log2warning(20110, log_message)
def ingest(self):
"""Ingest cache data into the database.
Args:
None
Returns:
records: Number of records processed
"""
# Process
_data = self.records()
if bool(_data) is True:
# Log
log_message = ('''\
Processing ingest cache files. Batch ID: {}'''.format(self._batch_id))
log.log2debug(20004, log_message)
# Add records to the database
_records = Records(_data)
_records.ingest()
self.purge()
# Log
log_message = ('''\
Finished processing ingest cache files. Batch ID: {}'''.format(self._batch_id))
log.log2debug(20117, log_message)
# Determine the number of key pairs read
records = 0
for item in _data:
records += len(item)
return records
def process_cache(batch_size=500, max_duration=3600, fileage=10, script=False):
"""Ingest data.
Args:
batch_size: Number of files to process at a time
max_duration: Maximum duration
fileage: Minimum age of files to be processed in seconds
Returns:
success: True if successful
Method:
1) Read the files in the cache directory older than a threshold
2) Process the data in the files
3) Repeat, if new files are found that are older than the threshold,
or we have been running too long.
Batches of files are read to reduce the risk of overloading available
memory, and ensure we can exit if we are running too long.
"""
# Initialize key variables
records = 0
start = time.time()
looptime = 0
files_read = 0
success = True
# Get cache directory
config = Config()
directory = config.agent_cache_directory(PATTOO_API_AGENT_NAME)
# Log what we are doing
log_message = 'Processing ingest cache.'
log.log2info(20085, log_message)
# Get the number of files in the directory
files_found = len(
[_ for _ in os.listdir(directory) if _.endswith('.json')])
# Create lockfile only if running as a script.
# The daemon has its own locking mechanism
if bool(script) is True:
success = _lock()
if bool(success) is False:
return bool(success)
# Process the files in batches to reduce the database connection count
# This can cause errors
while True:
# Agents constantly update files. We don't want an infinite loop
# situation where we always have files available that are newer than
# the desired fileage.
loopstart = time.time()
fileage = fileage + looptime
# Automatically stop if we are going on too long.(1 of 2)
duration = loopstart - start
if duration > max_duration:
log_message = ('''\
Stopping ingester after exceeding the maximum runtime duration of {}s. \
This can be adjusted on the CLI.'''.format(max_duration))
log.log2info(20022, log_message)
break
# Automatically stop if we are going on too long.(2 of 2)
if files_read >= files_found:
# No need to log. This is an expected outcome.
break
# Read data from cache. Stop if there is no data found.
cache = Cache(batch_size=batch_size, age=fileage)
count = cache.ingest()
# Automatically stop if we are going on too long.(2 of 2)
if bool(cache.files) is False:
# No need to log. This is an expected outcome.
break
# Get the records processed, looptime and files read
records += count
files_read += cache.files
looptime = max(time.time() - loopstart, looptime)
# Print result
duration = time.time() - start
if bool(records) is True and bool(duration) is True:
log_message = ('''\
Agent cache ingest completed. {0} records processed in {1:.2f} seconds, \
{2:.2f} records / second. {3} files read. \
'''.format(records, duration, records / duration, files_read))
log.log2info(20084, log_message)
else:
log_message = 'No files found to ingest'
log.log2info(20021, log_message)
# Delete lockfile only if running as a script.
# The daemon has its own locking mechanism
if bool(script) is True:
success = _lock(delete=True)
# Log what we are doing
log_message = 'Finished processing ingest cache.'
log.log2info(20020, log_message)
return bool(success)
def _lock(delete=False):
"""Create a lock file.
Args:
delete: Delete the file if true
Returns:
None
"""
# Initialize key variables
config = Config()
lockfile = files.lock_file(PATTOO_INGESTER_NAME, config)
success = False
# Lock
if bool(delete) is False:
if os.path.exists(lockfile) is True:
log_message = ('''\
Lockfile {} exists. Will not start ingester script. Is another Ingester \
instance running? If not, delete the lockfile and rerun this script.\
'''.format(lockfile))
log.log2warning(20023, log_message)
else:
open(lockfile, 'a').close()
success = True
else:
if os.path.exists(lockfile) is True:
try:
os.remove(lockfile)
success = True
except:
log_message = ('Error deleting lockfile {}.'.format(lockfile))
log.log2warning(20107, log_message)
else:
log_message = ('Lockfile {} not found.'.format(lockfile))
log.log2warning(20108, log_message)
return success
| 30.191638 | 79 | 0.599308 |
import os
import time
from pattoo_shared import log, files, converter
from pattoo.configuration import ConfigIngester as Config
from pattoo.constants import PATTOO_API_AGENT_NAME, PATTOO_INGESTER_NAME
from .records import Records
class Cache():
def __init__(self, batch_size=500, age=0):
config = Config()
directory = config.agent_cache_directory(PATTOO_API_AGENT_NAME)
self._batch_id = int(time.time() * 1000)
self._data = files.read_json_files(
directory, die=False, age=age, count=batch_size)
self.files = len(self._data)
def records(self):
_cache = {}
result = []
for filepath, json_data in sorted(self._data):
if bool(json_data) is True and isinstance(json_data, dict) is True:
pdbrs = converter.cache_to_keypairs(json_data)
if bool(pdbrs) is False:
log_message = ('''\
File {} has invalid data. It will not be processed'''.format(filepath))
log.log2info(20026, log_message)
continue
pattoo_agent_id = pdbrs[0].pattoo_agent_id
if pattoo_agent_id in _cache:
_cache[pattoo_agent_id].extend(pdbrs)
else:
_cache[pattoo_agent_id] = pdbrs
if bool(_cache) is True:
for _, item in sorted(_cache.items()):
result.append(item)
return result
def purge(self):
filepaths = [filepath for filepath, _ in self._data]
for filepath in filepaths:
if os.path.exists(filepath):
try:
os.remove(filepath)
except:
log_message = ('''\
Error deleting cache file {}.'''.format(filepath))
log.log2warning(20110, log_message)
def ingest(self):
_data = self.records()
if bool(_data) is True:
log_message = ('''\
Processing ingest cache files. Batch ID: {}'''.format(self._batch_id))
log.log2debug(20004, log_message)
_records = Records(_data)
_records.ingest()
self.purge()
log_message = ('''\
Finished processing ingest cache files. Batch ID: {}'''.format(self._batch_id))
log.log2debug(20117, log_message)
records = 0
for item in _data:
records += len(item)
return records
def process_cache(batch_size=500, max_duration=3600, fileage=10, script=False):
records = 0
start = time.time()
looptime = 0
files_read = 0
success = True
config = Config()
directory = config.agent_cache_directory(PATTOO_API_AGENT_NAME)
log_message = 'Processing ingest cache.'
log.log2info(20085, log_message)
files_found = len(
[_ for _ in os.listdir(directory) if _.endswith('.json')])
if bool(script) is True:
success = _lock()
if bool(success) is False:
return bool(success)
while True:
# situation where we always have files available that are newer than
# the desired fileage.
loopstart = time.time()
fileage = fileage + looptime
# Automatically stop if we are going on too long.(1 of 2)
duration = loopstart - start
if duration > max_duration:
log_message = ('''\
Stopping ingester after exceeding the maximum runtime duration of {}s. \
This can be adjusted on the CLI.'''.format(max_duration))
log.log2info(20022, log_message)
break
# Automatically stop if we are going on too long.(2 of 2)
if files_read >= files_found:
# No need to log. This is an expected outcome.
break
# Read data from cache. Stop if there is no data found.
cache = Cache(batch_size=batch_size, age=fileage)
count = cache.ingest()
# Automatically stop if we are going on too long.(2 of 2)
if bool(cache.files) is False:
# No need to log. This is an expected outcome.
break
# Get the records processed, looptime and files read
records += count
files_read += cache.files
looptime = max(time.time() - loopstart, looptime)
# Print result
duration = time.time() - start
if bool(records) is True and bool(duration) is True:
log_message = ('''\
Agent cache ingest completed. {0} records processed in {1:.2f} seconds, \
{2:.2f} records / second. {3} files read. \
'''.format(records, duration, records / duration, files_read))
log.log2info(20084, log_message)
else:
log_message = 'No files found to ingest'
log.log2info(20021, log_message)
# Delete lockfile only if running as a script.
# The daemon has its own locking mechanism
if bool(script) is True:
success = _lock(delete=True)
# Log what we are doing
log_message = 'Finished processing ingest cache.'
log.log2info(20020, log_message)
return bool(success)
def _lock(delete=False):
# Initialize key variables
config = Config()
lockfile = files.lock_file(PATTOO_INGESTER_NAME, config)
success = False
# Lock
if bool(delete) is False:
if os.path.exists(lockfile) is True:
log_message = ('''\
Lockfile {} exists. Will not start ingester script. Is another Ingester \
instance running? If not, delete the lockfile and rerun this script.\
'''.format(lockfile))
log.log2warning(20023, log_message)
else:
open(lockfile, 'a').close()
success = True
else:
if os.path.exists(lockfile) is True:
try:
os.remove(lockfile)
success = True
except:
log_message = ('Error deleting lockfile {}.'.format(lockfile))
log.log2warning(20107, log_message)
else:
log_message = ('Lockfile {} not found.'.format(lockfile))
log.log2warning(20108, log_message)
return success
| true | true |
f7218963b535569939ecb7f8ec24da1fd34de53b | 8,127 | py | Python | Pytorch/class_wrapper.py | BensonRen/idlm_Ben | 0d83780232d6341575daf88792959542aef82132 | [
"MIT"
] | 3 | 2019-08-28T17:10:29.000Z | 2020-11-22T14:06:45.000Z | Pytorch/class_wrapper.py | BensonRen/idlm_Ben | 0d83780232d6341575daf88792959542aef82132 | [
"MIT"
] | 1 | 2019-11-03T12:02:43.000Z | 2019-11-20T02:04:36.000Z | Pytorch/class_wrapper.py | BensonRen/idlm_Ben | 0d83780232d6341575daf88792959542aef82132 | [
"MIT"
] | 2 | 2019-08-29T02:32:56.000Z | 2019-12-22T17:44:26.000Z | """
The class wrapper for the networks
"""
# Built-in
import os
import time
# Torch
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
# Libs
import numpy as np
# Own module
class Network(object):
def __init__(self, model_fn, flags, train_loader, test_loader,
ckpt_dir=os.path.join(os.path.abspath(''), 'models'),
inference_mode=False, saved_model=None):
self.model_fn = model_fn # The model maker function
self.flags = flags # The Flags containing the specs
if inference_mode: # If inference mode, use saved model
self.ckpt_dir = os.path.join(ckpt_dir, saved_model)
self.saved_model = saved_model
else: # training mode, create a new ckpt folder
self.ckpt_dir = os.path.join(ckpt_dir, time.strftime('%Y%m%d_%H%M%S', time.localtime()))
self.model = self.create_model() # The model itself
self.loss = self.make_loss() # The loss function
self.optm = self.make_optimizer() # The optimizer
self.train_loader = train_loader # The train data loader
self.test_loader = test_loader # The test data loader
self.log = SummaryWriter(self.ckpt_dir) # Create a summary writer for keeping the summary to the tensor board
self.best_validation_loss = float('inf') # Set the BVL to large number
def create_model(self):
"""
Function to create the network module from provided model fn and flags
:return: the created nn module
"""
model = self.model_fn(self.flags)
#summary(model, input_size=(128, 8))
print(model)
return model
def make_loss(self, logit=None, labels=None):
"""
Create a tensor that represents the loss. This is consistant both at training time \
and inference time for Backward model
:param logit: The output of the network
:return: the total loss
"""
if logit is None:
return None
MSE_loss = nn.functional.mse_loss(logit, labels) # The MSE Loss of the
BDY_loss = 0 # Implemenation later
return MSE_loss + BDY_loss
def make_optimizer(self):
"""
Make the corresponding optimizer from the flags. Only below optimizers are allowed. Welcome to add more
:return:
"""
if self.flags.optim == 'Adam':
op = torch.optim.Adam(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
elif self.flags.optim == 'RMSprop':
op = torch.optim.RMSprop(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
elif self.flags.optim == 'SGD':
op = torch.optim.SGD(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
else:
raise Exception("Your Optimizer is neither Adam, RMSprop or SGD, please change in param or contact Ben")
return op
def save(self):
"""
Saving the model to the current check point folder with name best_model.pt
:return: None
"""
#torch.save(self.model.state_dict, os.path.join(self.ckpt_dir, 'best_model_state_dict.pt'))
torch.save(self.model, os.path.join(self.ckpt_dir, 'best_model.pt'))
def load(self):
"""
Loading the model from the check point folder with name best_model.pt
:return:
"""
#self.model.load_state_dict(torch.load(os.path.join(self.ckpt_dir, 'best_model_state_dict.pt')))
self.model.load(torch.load(os.path.join(self.ckpt_dir, 'best_model.pt')))
def train(self):
"""
The major training function. This would start the training using information given in the flags
:return: None
"""
cuda = True if torch.cuda.is_available() else False
if cuda:
self.model.cuda()
for epoch in range(self.flags.train_step):
# Set to Training Mode
train_loss = 0
self.model.train()
for j, (geometry, spectra) in enumerate(self.train_loader):
if cuda:
geometry = geometry.cuda() # Put data onto GPU
spectra = spectra.cuda() # Put data onto GPU
self.optm.zero_grad() # Zero the gradient first
logit = self.model(geometry) # Get the output
loss = self.make_loss(logit, spectra) # Get the loss tensor
loss.backward() # Calculate the backward gradients
self.optm.step() # Move one step the optimizer
train_loss += loss # Aggregate the loss
if epoch % self.flags.eval_step: # For eval steps, do the evaluations and tensor board
# Record the training loss to the tensorboard
train_avg_loss = train_loss.data.numpy() / (j+1)
self.log.add_scalar('Loss/train', train_avg_loss, epoch)
# Set to Evaluation Mode
self.model.eval()
print("Doing Evaluation on the model now")
test_loss = 0
for j, (geometry, spectra) in enumerate(self.test_loader): # Loop through the eval set
if cuda:
geometry = geometry.cuda()
spectra = spectra.cuda()
logit = self.model(geometry)
loss = self.make_loss(logit, spectra) # compute the loss
test_loss += loss # Aggregate the loss
# Record the testing loss to the tensorboard
test_avg_loss = test_loss.data.numpy() / (j+1)
self.log.add_scalar('Loss/test', test_avg_loss, epoch)
print("This is Epoch %d, training loss %.5f, validation loss %.5f" \
% (epoch, train_avg_loss, test_avg_loss ))
# Model improving, save the model down
if test_avg_loss < self.best_validation_loss:
self.best_validation_loss = test_avg_loss
self.save()
print("Saving the model down...")
if self.best_validation_loss < self.flags.stop_threshold:
print("Training finished EARLIER at epoch %d, reaching loss of %.5f" %\
(epoch, self.best_validation_loss))
return None
def evaluate(self, save_dir='data/'):
self.load()
self.model.eval() # Evaluation mode
# Get the file names
Ypred_file = os.path.join(save_dir, 'test_Ypred_{}.csv'.format(self.saved_model))
Xtruth_file = os.path.join(save_dir, 'test_Xtruth_{}.csv'.format(self.saved_model))
Ytruth_file = os.path.join(save_dir, 'test_Ytruth_{}.csv'.format(self.saved_model))
#Xpred_file = os.path.join(save_dir, 'test_Xpred_{}.csv'.format(self.saved_model)) # For pure forward model, there is no Xpred
# Open those files to append
with open(Xtruth_file,'a') as fxt,open(Ytruth_file, 'a') as fyt, open(Ypred_file,'a') as fyp:
# Loop through the eval data and evaluate
for ind, (geometry, spectra) in enumerate(self.test_loader):
logits = self.model(geometry)
np.savetxt(fxt, geometry.numpy(), fmt='%.3f')
np.savetxt(fyt, spectra.numpy(), fmt='%.3f')
np.savetxt(fyp, logits.numpy(), fmt='%.3f')
| 47.526316 | 135 | 0.556909 |
import os
import time
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
import numpy as np
class Network(object):
def __init__(self, model_fn, flags, train_loader, test_loader,
ckpt_dir=os.path.join(os.path.abspath(''), 'models'),
inference_mode=False, saved_model=None):
self.model_fn = model_fn
self.flags = flags
if inference_mode:
self.ckpt_dir = os.path.join(ckpt_dir, saved_model)
self.saved_model = saved_model
else:
self.ckpt_dir = os.path.join(ckpt_dir, time.strftime('%Y%m%d_%H%M%S', time.localtime()))
self.model = self.create_model()
self.loss = self.make_loss()
self.optm = self.make_optimizer()
self.train_loader = train_loader
self.test_loader = test_loader
self.log = SummaryWriter(self.ckpt_dir)
self.best_validation_loss = float('inf')
def create_model(self):
model = self.model_fn(self.flags)
print(model)
return model
def make_loss(self, logit=None, labels=None):
if logit is None:
return None
MSE_loss = nn.functional.mse_loss(logit, labels)
BDY_loss = 0
return MSE_loss + BDY_loss
def make_optimizer(self):
if self.flags.optim == 'Adam':
op = torch.optim.Adam(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
elif self.flags.optim == 'RMSprop':
op = torch.optim.RMSprop(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
elif self.flags.optim == 'SGD':
op = torch.optim.SGD(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
else:
raise Exception("Your Optimizer is neither Adam, RMSprop or SGD, please change in param or contact Ben")
return op
def save(self):
torch.save(self.model, os.path.join(self.ckpt_dir, 'best_model.pt'))
def load(self):
self.model.load(torch.load(os.path.join(self.ckpt_dir, 'best_model.pt')))
def train(self):
cuda = True if torch.cuda.is_available() else False
if cuda:
self.model.cuda()
for epoch in range(self.flags.train_step):
train_loss = 0
self.model.train()
for j, (geometry, spectra) in enumerate(self.train_loader):
if cuda:
geometry = geometry.cuda()
spectra = spectra.cuda()
self.optm.zero_grad()
logit = self.model(geometry)
loss = self.make_loss(logit, spectra)
loss.backward()
self.optm.step()
train_loss += loss
if epoch % self.flags.eval_step:
train_avg_loss = train_loss.data.numpy() / (j+1)
self.log.add_scalar('Loss/train', train_avg_loss, epoch)
self.model.eval()
print("Doing Evaluation on the model now")
test_loss = 0
for j, (geometry, spectra) in enumerate(self.test_loader):
if cuda:
geometry = geometry.cuda()
spectra = spectra.cuda()
logit = self.model(geometry)
loss = self.make_loss(logit, spectra)
test_loss += loss
test_avg_loss = test_loss.data.numpy() / (j+1)
self.log.add_scalar('Loss/test', test_avg_loss, epoch)
print("This is Epoch %d, training loss %.5f, validation loss %.5f" \
% (epoch, train_avg_loss, test_avg_loss ))
if test_avg_loss < self.best_validation_loss:
self.best_validation_loss = test_avg_loss
self.save()
print("Saving the model down...")
if self.best_validation_loss < self.flags.stop_threshold:
print("Training finished EARLIER at epoch %d, reaching loss of %.5f" %\
(epoch, self.best_validation_loss))
return None
def evaluate(self, save_dir='data/'):
self.load()
self.model.eval()
Ypred_file = os.path.join(save_dir, 'test_Ypred_{}.csv'.format(self.saved_model))
Xtruth_file = os.path.join(save_dir, 'test_Xtruth_{}.csv'.format(self.saved_model))
Ytruth_file = os.path.join(save_dir, 'test_Ytruth_{}.csv'.format(self.saved_model))
') as fxt,open(Ytruth_file, 'a') as fyt, open(Ypred_file,'a') as fyp:
for ind, (geometry, spectra) in enumerate(self.test_loader):
logits = self.model(geometry)
np.savetxt(fxt, geometry.numpy(), fmt='%.3f')
np.savetxt(fyt, spectra.numpy(), fmt='%.3f')
np.savetxt(fyp, logits.numpy(), fmt='%.3f')
| true | true |
f72189c34849c418bee945e1e54df7340ce233c9 | 435 | py | Python | virtual/lib/python3.8/site-packages/wtforms/fields/__init__.py | Esther-Anyona/mylearner | d49d1c4c8dbeb93cc384f2037c48236be5dc89e1 | [
"MIT"
] | 3 | 2022-01-04T18:26:21.000Z | 2022-02-02T00:10:50.000Z | venv/lib/python3.10/site-packages/wtforms/fields/__init__.py | superiorkid/rbac | 40f45849687075bc46a52985af22eab6cf83cbda | [
"MIT"
] | 1 | 2021-12-30T10:36:57.000Z | 2021-12-30T10:36:57.000Z | venv/lib/python3.10/site-packages/wtforms/fields/__init__.py | superiorkid/rbac | 40f45849687075bc46a52985af22eab6cf83cbda | [
"MIT"
] | 2 | 2022-02-12T15:33:59.000Z | 2022-02-14T15:36:31.000Z | from wtforms.fields.choices import *
from wtforms.fields.choices import SelectFieldBase
from wtforms.fields.core import Field
from wtforms.fields.core import Flags
from wtforms.fields.core import Label
from wtforms.fields.datetime import *
from wtforms.fields.form import *
from wtforms.fields.list import *
from wtforms.fields.numeric import *
from wtforms.fields.simple import *
from wtforms.utils import unset_value as _unset_value
| 36.25 | 53 | 0.832184 | from wtforms.fields.choices import *
from wtforms.fields.choices import SelectFieldBase
from wtforms.fields.core import Field
from wtforms.fields.core import Flags
from wtforms.fields.core import Label
from wtforms.fields.datetime import *
from wtforms.fields.form import *
from wtforms.fields.list import *
from wtforms.fields.numeric import *
from wtforms.fields.simple import *
from wtforms.utils import unset_value as _unset_value
| true | true |
f7218c5841c78da8df7b09b9049a325f9cfeaba6 | 8,968 | py | Python | custom_admin/views.py | samuira/TutionMastor | 5b6d89efc90a9ebb54766530554d7dc9d5ee8298 | [
"MIT"
] | 1 | 2019-11-09T17:18:10.000Z | 2019-11-09T17:18:10.000Z | custom_admin/views.py | abhisek11/TutionMastor | 5b6d89efc90a9ebb54766530554d7dc9d5ee8298 | [
"MIT"
] | 19 | 2019-12-05T00:13:31.000Z | 2022-03-11T23:58:13.000Z | custom_admin/views.py | abhisek11/TutionMastor | 5b6d89efc90a9ebb54766530554d7dc9d5ee8298 | [
"MIT"
] | 1 | 2020-02-29T07:35:25.000Z | 2020-02-29T07:35:25.000Z | from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.core.exceptions import ValidationError
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse, HttpResponseRedirect
from django.urls import reverse_lazy, reverse
from django.utils.text import slugify
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import ListView, CreateView
from blog.models import BlogPost
from custom_admin.models import User
from custom_admin.utils import Util
from .forms import LoginForm, RegisterForm, BlogPostCreateForm, BlogPostEditForm, UserEditForm
from django.shortcuts import redirect
from datetime import datetime
class Dashboard(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/dashboard.html'
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
return render(request, self.template_name)
class Login(View):
template_name = 'custom_admin/account/login.html'
form_class = LoginForm
context = dict()
def get(self, request, *args, **kwargs):
self.context.clear()
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST)
self.context['form'] = form
if form.is_valid():
user = authenticate(request=request, email=request.POST['email'], password=request.POST['password'])
if user:
login(request, user)
return redirect('dashboard')
else:
messages.error(request, 'Incorrect Email or Password')
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class Register(View):
template_name = 'custom_admin/account/register.html'
form_class = RegisterForm
context = dict()
def get(self, request, *args, **kwargs):
self.context.clear()
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST, request=request)
self.context['form'] = form
if form.is_valid():
try:
user = User.objects.create_user(email=request.POST['email'], password=request.POST['password'])
except ValidationError as e:
[messages.error(request, error[0]) for error in e.message_dict.values()]
else:
return redirect('login')
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class Logout(LoginRequiredMixin, UserPassesTestMixin, View):
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
logout(request)
return HttpResponseRedirect(reverse('login'))
class BlogList(LoginRequiredMixin, UserPassesTestMixin, ListView):
template_name = 'custom_admin/blog/list.html'
login_url = reverse_lazy('login')
queryset = BlogPost.objects.all()
paginate_by = 10
context_object_name = 'blog_post'
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
class BlogCreate(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/create.html'
login_url = reverse_lazy('login')
form_class = BlogPostCreateForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
self.context.clear()
self.context['ckeditor'] = True
print(self.context)
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST, request.FILES)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
BlogPost.objects.create(
created_by=request.user,
title_image=form.cleaned_data.get('title_image', ''),
title=form.cleaned_data.get('title'),
description=form.cleaned_data.get('bp_description'),
slug=slugify(form.cleaned_data.get('title'))
)
messages.success(self.request, 'Blog has been created successfully.')
return HttpResponseRedirect(reverse('blog-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class BlogEdit(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/edit.html'
login_url = reverse_lazy('login')
form_class = BlogPostEditForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
self.context['ckeditor'] = True
self.context['blog'] = BlogPost.objects.get(pk=kwargs['pk'])
print(self.context, kwargs['pk'])
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST, request.FILES, pk=self.context['blog'].id)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
blog = self.context['blog']
blog.title_image = form.cleaned_data.get('title_image', '') or blog.title_image
blog.title = form.cleaned_data.get('title')
blog.is_verified = form.cleaned_data.get('is_verified')
blog.published_on = datetime.now() if form.cleaned_data.get('is_verified') and not blog.published_on else blog.published_on
blog.description = form.cleaned_data.get('bp_description')
blog.slug = slugify(form.cleaned_data.get('title'))
blog.save()
messages.success(self.request, 'Blog has been updated successfully.')
return HttpResponseRedirect(reverse('blog-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class BlogDelete(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/list.html'
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
BlogPost.objects.get(pk=kwargs['pk']).delete()
messages.success(self.request, 'Blog has been deleted successfully.')
return HttpResponseRedirect(reverse('blog-list'))
class UserList(LoginRequiredMixin, UserPassesTestMixin, ListView):
template_name = 'custom_admin/user/list.html'
login_url = reverse_lazy('login')
queryset = User.objects.all()
paginate_by = 10
context_object_name = 'user_list'
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
class UserEdit(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/user/edit.html'
login_url = reverse_lazy('login')
form_class = UserEditForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
self.context['user'] = User.objects.get(pk=kwargs['pk'])
print(self.context, kwargs['pk'])
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
self.context['user'] = User.objects.get(pk=kwargs['pk'])
form = self.form_class(request.POST, request.FILES, pk=self.context['user'].id)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
user = self.context['user']
user.avatar = form.cleaned_data.get('avatar') or user.avatar
user.first_name = form.cleaned_data.get('first_name', '')
user.last_name = form.cleaned_data.get('last_name', '')
user.phone = form.cleaned_data.get('phone', '')
user.is_superuser = form.cleaned_data.get('is_superuser', False)
user.is_staff = form.cleaned_data.get('is_staff', False)
user.is_active = form.cleaned_data.get('is_active', False)
user.save()
messages.success(self.request, 'User has been updated successfully.')
return HttpResponseRedirect(reverse('user-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
print('Error:', error)
return render(request, self.template_name, self.context)
| 33.092251 | 126 | 0.748104 | from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.core.exceptions import ValidationError
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse, HttpResponseRedirect
from django.urls import reverse_lazy, reverse
from django.utils.text import slugify
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import ListView, CreateView
from blog.models import BlogPost
from custom_admin.models import User
from custom_admin.utils import Util
from .forms import LoginForm, RegisterForm, BlogPostCreateForm, BlogPostEditForm, UserEditForm
from django.shortcuts import redirect
from datetime import datetime
class Dashboard(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/dashboard.html'
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
return render(request, self.template_name)
class Login(View):
template_name = 'custom_admin/account/login.html'
form_class = LoginForm
context = dict()
def get(self, request, *args, **kwargs):
self.context.clear()
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST)
self.context['form'] = form
if form.is_valid():
user = authenticate(request=request, email=request.POST['email'], password=request.POST['password'])
if user:
login(request, user)
return redirect('dashboard')
else:
messages.error(request, 'Incorrect Email or Password')
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class Register(View):
template_name = 'custom_admin/account/register.html'
form_class = RegisterForm
context = dict()
def get(self, request, *args, **kwargs):
self.context.clear()
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST, request=request)
self.context['form'] = form
if form.is_valid():
try:
user = User.objects.create_user(email=request.POST['email'], password=request.POST['password'])
except ValidationError as e:
[messages.error(request, error[0]) for error in e.message_dict.values()]
else:
return redirect('login')
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class Logout(LoginRequiredMixin, UserPassesTestMixin, View):
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
logout(request)
return HttpResponseRedirect(reverse('login'))
class BlogList(LoginRequiredMixin, UserPassesTestMixin, ListView):
template_name = 'custom_admin/blog/list.html'
login_url = reverse_lazy('login')
queryset = BlogPost.objects.all()
paginate_by = 10
context_object_name = 'blog_post'
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
class BlogCreate(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/create.html'
login_url = reverse_lazy('login')
form_class = BlogPostCreateForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
self.context.clear()
self.context['ckeditor'] = True
print(self.context)
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST, request.FILES)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
BlogPost.objects.create(
created_by=request.user,
title_image=form.cleaned_data.get('title_image', ''),
title=form.cleaned_data.get('title'),
description=form.cleaned_data.get('bp_description'),
slug=slugify(form.cleaned_data.get('title'))
)
messages.success(self.request, 'Blog has been created successfully.')
return HttpResponseRedirect(reverse('blog-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class BlogEdit(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/edit.html'
login_url = reverse_lazy('login')
form_class = BlogPostEditForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
self.context['ckeditor'] = True
self.context['blog'] = BlogPost.objects.get(pk=kwargs['pk'])
print(self.context, kwargs['pk'])
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST, request.FILES, pk=self.context['blog'].id)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
blog = self.context['blog']
blog.title_image = form.cleaned_data.get('title_image', '') or blog.title_image
blog.title = form.cleaned_data.get('title')
blog.is_verified = form.cleaned_data.get('is_verified')
blog.published_on = datetime.now() if form.cleaned_data.get('is_verified') and not blog.published_on else blog.published_on
blog.description = form.cleaned_data.get('bp_description')
blog.slug = slugify(form.cleaned_data.get('title'))
blog.save()
messages.success(self.request, 'Blog has been updated successfully.')
return HttpResponseRedirect(reverse('blog-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class BlogDelete(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/list.html'
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
BlogPost.objects.get(pk=kwargs['pk']).delete()
messages.success(self.request, 'Blog has been deleted successfully.')
return HttpResponseRedirect(reverse('blog-list'))
class UserList(LoginRequiredMixin, UserPassesTestMixin, ListView):
template_name = 'custom_admin/user/list.html'
login_url = reverse_lazy('login')
queryset = User.objects.all()
paginate_by = 10
context_object_name = 'user_list'
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
class UserEdit(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/user/edit.html'
login_url = reverse_lazy('login')
form_class = UserEditForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
self.context['user'] = User.objects.get(pk=kwargs['pk'])
print(self.context, kwargs['pk'])
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
self.context['user'] = User.objects.get(pk=kwargs['pk'])
form = self.form_class(request.POST, request.FILES, pk=self.context['user'].id)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
user = self.context['user']
user.avatar = form.cleaned_data.get('avatar') or user.avatar
user.first_name = form.cleaned_data.get('first_name', '')
user.last_name = form.cleaned_data.get('last_name', '')
user.phone = form.cleaned_data.get('phone', '')
user.is_superuser = form.cleaned_data.get('is_superuser', False)
user.is_staff = form.cleaned_data.get('is_staff', False)
user.is_active = form.cleaned_data.get('is_active', False)
user.save()
messages.success(self.request, 'User has been updated successfully.')
return HttpResponseRedirect(reverse('user-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
print('Error:', error)
return render(request, self.template_name, self.context)
| true | true |
f7218c9e437eabf2dfc69680b59fad493a030b44 | 1,925 | py | Python | src/braille/braille_translator.py | stuart-stanley/dotspicejar | bcf0c4656764011744581c5ea052b47ee70a34f1 | [
"MIT"
] | null | null | null | src/braille/braille_translator.py | stuart-stanley/dotspicejar | bcf0c4656764011744581c5ea052b47ee70a34f1 | [
"MIT"
] | null | null | null | src/braille/braille_translator.py | stuart-stanley/dotspicejar | bcf0c4656764011744581c5ea052b47ee70a34f1 | [
"MIT"
] | null | null | null | from .braille_cell import BrailleCell
from .braille_string import BrailleString
class BrailleTranslator(object):
_simple_cells = None
def __init__(self, text):
self.__raw_text = text
if BrailleTranslator._simple_cells is None:
self.__setup_class_simple_cells()
@property
def as_grade_1(self):
cell_list = []
for c in self.__raw_text:
cell = self._simple_cells[c]
cell_list.append(cell)
bs = BrailleString(self.__raw_text, cell_list)
return bs
def __setup_class_simple_cells(self):
cd = {}
cd['a'] = BrailleCell('a', '*..', '...')
cd['b'] = BrailleCell('b', '**.', '...')
cd['c'] = BrailleCell('c', '*..', '*..')
cd['d'] = BrailleCell('d', '*..', '**.')
cd['e'] = BrailleCell('e', '*..', '.*.')
cd['f'] = BrailleCell('f', '**.', '*..')
cd['g'] = BrailleCell('g', '**.', '**.')
cd['h'] = BrailleCell('h', '**.', '.*.')
cd['i'] = BrailleCell('i', '.*.', '*..')
cd['j'] = BrailleCell('j', '.*.', '**.')
cd['k'] = BrailleCell('k', '*.*', '...')
cd['l'] = BrailleCell('l', '***', '...')
cd['m'] = BrailleCell('m', '*.*', '*..')
cd['n'] = BrailleCell('n', '*.*', '**.')
cd['o'] = BrailleCell('o', '*.*', '.*.')
cd['p'] = BrailleCell('p', '***', '*..')
cd['q'] = BrailleCell('q', '***', '**.')
cd['r'] = BrailleCell('r', '***', '.*.')
cd['s'] = BrailleCell('s', '.**', '*..')
cd['t'] = BrailleCell('t', '.**', '**.')
cd['u'] = BrailleCell('u', '*.*', '..*')
cd['v'] = BrailleCell('v', '***', '..*')
cd['w'] = BrailleCell('w', '.*.', '***')
cd['x'] = BrailleCell('x', '*.*', '*.*')
cd['y'] = BrailleCell('y', '*.*', '***')
cd['z'] = BrailleCell('z', '*.*', '.**')
BrailleTranslator._simple_cells = cd
| 37.745098 | 54 | 0.424935 | from .braille_cell import BrailleCell
from .braille_string import BrailleString
class BrailleTranslator(object):
_simple_cells = None
def __init__(self, text):
self.__raw_text = text
if BrailleTranslator._simple_cells is None:
self.__setup_class_simple_cells()
@property
def as_grade_1(self):
cell_list = []
for c in self.__raw_text:
cell = self._simple_cells[c]
cell_list.append(cell)
bs = BrailleString(self.__raw_text, cell_list)
return bs
def __setup_class_simple_cells(self):
cd = {}
cd['a'] = BrailleCell('a', '*..', '...')
cd['b'] = BrailleCell('b', '**.', '...')
cd['c'] = BrailleCell('c', '*..', '*..')
cd['d'] = BrailleCell('d', '*..', '**.')
cd['e'] = BrailleCell('e', '*..', '.*.')
cd['f'] = BrailleCell('f', '**.', '*..')
cd['g'] = BrailleCell('g', '**.', '**.')
cd['h'] = BrailleCell('h', '**.', '.*.')
cd['i'] = BrailleCell('i', '.*.', '*..')
cd['j'] = BrailleCell('j', '.*.', '**.')
cd['k'] = BrailleCell('k', '*.*', '...')
cd['l'] = BrailleCell('l', '***', '...')
cd['m'] = BrailleCell('m', '*.*', '*..')
cd['n'] = BrailleCell('n', '*.*', '**.')
cd['o'] = BrailleCell('o', '*.*', '.*.')
cd['p'] = BrailleCell('p', '***', '*..')
cd['q'] = BrailleCell('q', '***', '**.')
cd['r'] = BrailleCell('r', '***', '.*.')
cd['s'] = BrailleCell('s', '.**', '*..')
cd['t'] = BrailleCell('t', '.**', '**.')
cd['u'] = BrailleCell('u', '*.*', '..*')
cd['v'] = BrailleCell('v', '***', '..*')
cd['w'] = BrailleCell('w', '.*.', '***')
cd['x'] = BrailleCell('x', '*.*', '*.*')
cd['y'] = BrailleCell('y', '*.*', '***')
cd['z'] = BrailleCell('z', '*.*', '.**')
BrailleTranslator._simple_cells = cd
| true | true |
f7218cb7a745b7fd90503d36440f0281125e16d4 | 3,402 | py | Python | cogs/error.py | Py-Verse/PyBot | dfbb029925f4d207eaabbb4d02884c27fb3c4164 | [
"MIT"
] | 8 | 2021-03-07T08:52:31.000Z | 2021-04-24T21:44:36.000Z | cogs/error.py | Developing-Studio/ci-PyBot | 4eb5aa44c0e469e2ec4f4fb51094229c3bee9441 | [
"MIT"
] | 1 | 2021-03-07T10:21:08.000Z | 2021-03-07T10:32:08.000Z | cogs/error.py | Developing-Studio/ci-PyBot | 4eb5aa44c0e469e2ec4f4fb51094229c3bee9441 | [
"MIT"
] | 4 | 2021-03-07T10:30:51.000Z | 2021-03-11T14:30:14.000Z | import math
import os
import sys
import traceback
import discord
from discord.ext import commands
class Errors(commands.Cog):
"""
Error handler
"""
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print("Error cog loaded successfully")
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if hasattr(ctx.command, "on_error"):
return
# get the original exception
error = getattr(error, "original", error)
if isinstance(error, commands.BotMissingPermissions):
missing = [
perm.replace("_", " ").replace("guild", "server").title()
for perm in error.missing_perms
]
if len(missing) > 2:
fmt = "{}, and {}".format("**, **".join(missing[:-1]), missing[-1])
else:
fmt = " and ".join(missing)
embed = discord.Embed(
title="Missing Permissions",
description=f"I am missing **{fmt}** permissions to run this command :(",
color=0xFF0000,
)
return
if isinstance(error, commands.DisabledCommand):
await ctx.send("This command has been disabled.")
return
if isinstance(error, commands.CommandOnCooldown):
embed = discord.Embed(
title="Cooldown",
description=f"This command is on cooldown, please retry in {math.ceil(error.retry_after)}s.",
color=0xFF0000,
)
await ctx.send(embed=embed)
return
if isinstance(error, commands.MissingPermissions):
missing = [
perm.replace("_", " ").replace("guild", "server").title()
for perm in error.missing_perms
]
if len(missing) > 2:
fmt = "{}, and {}".format("**, **".join(missing[:-1]), missing[-1])
else:
fmt = " and ".join(missing)
embed = discord.Embed(
title="Insufficient Permission(s)",
description=f"You need the **{fmt}** permission(s) to use this command.",
color=0xFF0000,
)
await ctx.send(embed=embed)
return
if isinstance(error, commands.UserInputError):
embed = discord.Embed(
title="Error",
color=0xFF0000,
)
await ctx.send(embed=embed)
return
if isinstance(error, commands.NoPrivateMessage):
try:
await ctx.author.send("This command cannot be used in direct messages.")
except discord.Forbidden:
raise error
return
if isinstance(error, commands.CheckFailure):
embed = discord.Embed(
title="Permissions Not Satisfied",
color=0xFF0000,
)
await ctx.send(embed=embed)
return
if isinstance(error, commands.CommandNotFound):
return
print("Ignoring exception in command {}:".format(ctx.command), file=sys.stderr)
traceback.print_exception(
type(error), error, error.__traceback__, file=sys.stderr
)
def setup(bot):
bot.add_cog(Errors(bot))
| 30.648649 | 109 | 0.531452 | import math
import os
import sys
import traceback
import discord
from discord.ext import commands
class Errors(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print("Error cog loaded successfully")
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if hasattr(ctx.command, "on_error"):
return
error = getattr(error, "original", error)
if isinstance(error, commands.BotMissingPermissions):
missing = [
perm.replace("_", " ").replace("guild", "server").title()
for perm in error.missing_perms
]
if len(missing) > 2:
fmt = "{}, and {}".format("**, **".join(missing[:-1]), missing[-1])
else:
fmt = " and ".join(missing)
embed = discord.Embed(
title="Missing Permissions",
description=f"I am missing **{fmt}** permissions to run this command :(",
color=0xFF0000,
)
return
if isinstance(error, commands.DisabledCommand):
await ctx.send("This command has been disabled.")
return
if isinstance(error, commands.CommandOnCooldown):
embed = discord.Embed(
title="Cooldown",
description=f"This command is on cooldown, please retry in {math.ceil(error.retry_after)}s.",
color=0xFF0000,
)
await ctx.send(embed=embed)
return
if isinstance(error, commands.MissingPermissions):
missing = [
perm.replace("_", " ").replace("guild", "server").title()
for perm in error.missing_perms
]
if len(missing) > 2:
fmt = "{}, and {}".format("**, **".join(missing[:-1]), missing[-1])
else:
fmt = " and ".join(missing)
embed = discord.Embed(
title="Insufficient Permission(s)",
description=f"You need the **{fmt}** permission(s) to use this command.",
color=0xFF0000,
)
await ctx.send(embed=embed)
return
if isinstance(error, commands.UserInputError):
embed = discord.Embed(
title="Error",
color=0xFF0000,
)
await ctx.send(embed=embed)
return
if isinstance(error, commands.NoPrivateMessage):
try:
await ctx.author.send("This command cannot be used in direct messages.")
except discord.Forbidden:
raise error
return
if isinstance(error, commands.CheckFailure):
embed = discord.Embed(
title="Permissions Not Satisfied",
color=0xFF0000,
)
await ctx.send(embed=embed)
return
if isinstance(error, commands.CommandNotFound):
return
print("Ignoring exception in command {}:".format(ctx.command), file=sys.stderr)
traceback.print_exception(
type(error), error, error.__traceback__, file=sys.stderr
)
def setup(bot):
bot.add_cog(Errors(bot))
| true | true |
f7218d6bb7dd8dbb82f3a28fabfbe622d4a4680d | 220 | py | Python | userlogin_test.py | kilonzijnr/passstore | e1f73d2599bbbd209e0242416c706c4ce259d3a5 | [
"MIT"
] | null | null | null | userlogin_test.py | kilonzijnr/passstore | e1f73d2599bbbd209e0242416c706c4ce259d3a5 | [
"MIT"
] | null | null | null | userlogin_test.py | kilonzijnr/passstore | e1f73d2599bbbd209e0242416c706c4ce259d3a5 | [
"MIT"
] | null | null | null | import unittest
from userlogin import User
class TestUser(unittest.TestCase):
"""
Test class to define test cases for the User class
Args:
unittest.TestCase: TestCase class creates test cases
""" | 24.444444 | 60 | 0.709091 | import unittest
from userlogin import User
class TestUser(unittest.TestCase): | true | true |
f7218df71a44862b66afa5b5c925534e4b131f25 | 3,289 | py | Python | computer_vision/learning-opencv-practical/image-process-100ask/Question_31_40/answers/answer_40.py | magic428/subjects_notes | 6930adbb3f445c11ca9d024abb12a53d6aca19e7 | [
"MIT"
] | 2 | 2020-03-18T17:13:00.000Z | 2020-03-25T02:34:03.000Z | computer_vision/learning-opencv-practical/image-process-100ask/Question_31_40/answers/answer_40.py | magic428/subjects_notes | 6930adbb3f445c11ca9d024abb12a53d6aca19e7 | [
"MIT"
] | null | null | null | computer_vision/learning-opencv-practical/image-process-100ask/Question_31_40/answers/answer_40.py | magic428/subjects_notes | 6930adbb3f445c11ca9d024abb12a53d6aca19e7 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import matplotlib.pyplot as plt
# Read image
img = cv2.imread("imori.jpg").astype(np.float32)
H, W, C = img.shape
# RGB > YCbCr
Y = 0.2990 * img[..., 2] + 0.5870 * img[..., 1] + 0.1140 * img[..., 0]
Cb = -0.1687 * img[..., 2] - 0.3313 * img[..., 1] + 0.5 * img[..., 0] + 128.
Cr = 0.5 * img[..., 2] - 0.4187 * img[..., 1] - 0.0813 * img[..., 0] + 128.
YCC = np.zeros_like(img, dtype=np.float32)
YCC[..., 0] = Y
YCC[..., 1] = Cb
YCC[..., 2] = Cr
# DCT
T = 8
K = 8
X = np.zeros((H, W, C), dtype=np.float64)
Q1 = np.array(((16, 11, 10, 16, 24, 40, 51, 61),
(12, 12, 14, 19, 26, 58, 60, 55),
(14, 13, 16, 24, 40, 57, 69, 56),
(14, 17, 22, 29, 51, 87, 80, 62),
(18, 22, 37, 56, 68, 109, 103, 77),
(24, 35, 55, 64, 81, 104, 113, 92),
(49, 64, 78, 87, 103, 121, 120, 101),
(72, 92, 95, 98, 112, 100, 103, 99)), dtype=np.float32)
Q2 = np.array(((17, 18, 24, 47, 99, 99, 99, 99),
(18, 21, 26, 66, 99, 99, 99, 99),
(24, 26, 56, 99, 99, 99, 99, 99),
(47, 66, 99, 99, 99, 99, 99, 99),
(99, 99, 99, 99, 99, 99, 99, 99),
(99, 99, 99, 99, 99, 99, 99, 99),
(99, 99, 99, 99, 99, 99, 99, 99),
(99, 99, 99, 99, 99, 99, 99, 99)), dtype=np.float32)
def w(x, y, u, v):
cu = 1.
cv = 1.
if u == 0:
cu /= np.sqrt(2)
if v == 0:
cv /= np.sqrt(2)
theta = np.pi / (2 * T)
return (( 2 * cu * cv / T) * np.cos((2*x+1)*u*theta) * np.cos((2*y+1)*v*theta))
for yi in range(0, H, T):
for xi in range(0, W, T):
for v in range(T):
for u in range(T):
for y in range(T):
for x in range(T):
for c in range(C):
X[v+yi, u+xi, c] += YCC[y+yi, x+xi, c] * w(x,y,u,v)
X[yi:yi+T, xi:xi+T, 0] = np.round(X[yi:yi+T, xi:xi+T, 0] / Q1) * Q1
X[yi:yi+T, xi:xi+T, 1] = np.round(X[yi:yi+T, xi:xi+T, 1] / Q2) * Q2
X[yi:yi+T, xi:xi+T, 2] = np.round(X[yi:yi+T, xi:xi+T, 2] / Q2) * Q2
# IDCT
IYCC = np.zeros((H, W, 3), dtype=np.float64)
for yi in range(0, H, T):
for xi in range(0, W, T):
for y in range(T):
for x in range(T):
for v in range(K):
for u in range(K):
IYCC[y+yi, x+xi] += X[v+yi, u+xi] * w(x,y,u,v)
# YCbCr > RGB
out = np.zeros_like(img, dtype=np.float32)
out[..., 2] = IYCC[..., 0] + (IYCC[..., 2] - 128.) * 1.4020
out[..., 1] = IYCC[..., 0] - (IYCC[..., 1] - 128.) * 0.3441 - (IYCC[..., 2] - 128.) * 0.7139
out[..., 0] = IYCC[..., 0] + (IYCC[..., 1] - 128.) * 1.7718
out[out>255] = 255
out = out.astype(np.uint8)
# MSE
v_max = 255.
mse = np.sum(np.power(np.abs(img.astype(np.float32) - out.astype(np.float32)), 2)) / (H * W * C)
psnr = 10 * np.log10(v_max ** 2 / mse)
print("PSNR >>", psnr)
bitrate = 1. * T * K ** 2 / (T ** 2)
print("bitrate >>", bitrate)
# Save result
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.imwrite("out.jpg", out)
| 32.245098 | 97 | 0.419884 | import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread("imori.jpg").astype(np.float32)
H, W, C = img.shape
Y = 0.2990 * img[..., 2] + 0.5870 * img[..., 1] + 0.1140 * img[..., 0]
Cb = -0.1687 * img[..., 2] - 0.3313 * img[..., 1] + 0.5 * img[..., 0] + 128.
Cr = 0.5 * img[..., 2] - 0.4187 * img[..., 1] - 0.0813 * img[..., 0] + 128.
YCC = np.zeros_like(img, dtype=np.float32)
YCC[..., 0] = Y
YCC[..., 1] = Cb
YCC[..., 2] = Cr
T = 8
K = 8
X = np.zeros((H, W, C), dtype=np.float64)
Q1 = np.array(((16, 11, 10, 16, 24, 40, 51, 61),
(12, 12, 14, 19, 26, 58, 60, 55),
(14, 13, 16, 24, 40, 57, 69, 56),
(14, 17, 22, 29, 51, 87, 80, 62),
(18, 22, 37, 56, 68, 109, 103, 77),
(24, 35, 55, 64, 81, 104, 113, 92),
(49, 64, 78, 87, 103, 121, 120, 101),
(72, 92, 95, 98, 112, 100, 103, 99)), dtype=np.float32)
Q2 = np.array(((17, 18, 24, 47, 99, 99, 99, 99),
(18, 21, 26, 66, 99, 99, 99, 99),
(24, 26, 56, 99, 99, 99, 99, 99),
(47, 66, 99, 99, 99, 99, 99, 99),
(99, 99, 99, 99, 99, 99, 99, 99),
(99, 99, 99, 99, 99, 99, 99, 99),
(99, 99, 99, 99, 99, 99, 99, 99),
(99, 99, 99, 99, 99, 99, 99, 99)), dtype=np.float32)
def w(x, y, u, v):
cu = 1.
cv = 1.
if u == 0:
cu /= np.sqrt(2)
if v == 0:
cv /= np.sqrt(2)
theta = np.pi / (2 * T)
return (( 2 * cu * cv / T) * np.cos((2*x+1)*u*theta) * np.cos((2*y+1)*v*theta))
for yi in range(0, H, T):
for xi in range(0, W, T):
for v in range(T):
for u in range(T):
for y in range(T):
for x in range(T):
for c in range(C):
X[v+yi, u+xi, c] += YCC[y+yi, x+xi, c] * w(x,y,u,v)
X[yi:yi+T, xi:xi+T, 0] = np.round(X[yi:yi+T, xi:xi+T, 0] / Q1) * Q1
X[yi:yi+T, xi:xi+T, 1] = np.round(X[yi:yi+T, xi:xi+T, 1] / Q2) * Q2
X[yi:yi+T, xi:xi+T, 2] = np.round(X[yi:yi+T, xi:xi+T, 2] / Q2) * Q2
IYCC = np.zeros((H, W, 3), dtype=np.float64)
for yi in range(0, H, T):
for xi in range(0, W, T):
for y in range(T):
for x in range(T):
for v in range(K):
for u in range(K):
IYCC[y+yi, x+xi] += X[v+yi, u+xi] * w(x,y,u,v)
out = np.zeros_like(img, dtype=np.float32)
out[..., 2] = IYCC[..., 0] + (IYCC[..., 2] - 128.) * 1.4020
out[..., 1] = IYCC[..., 0] - (IYCC[..., 1] - 128.) * 0.3441 - (IYCC[..., 2] - 128.) * 0.7139
out[..., 0] = IYCC[..., 0] + (IYCC[..., 1] - 128.) * 1.7718
out[out>255] = 255
out = out.astype(np.uint8)
v_max = 255.
mse = np.sum(np.power(np.abs(img.astype(np.float32) - out.astype(np.float32)), 2)) / (H * W * C)
psnr = 10 * np.log10(v_max ** 2 / mse)
print("PSNR >>", psnr)
bitrate = 1. * T * K ** 2 / (T ** 2)
print("bitrate >>", bitrate)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.imwrite("out.jpg", out)
| true | true |
f72190bc142f0445507b2063ace8933a5d98baaf | 2,238 | py | Python | examples/visexp.py | BatsiBoy/PyFrac | a898f6111295fa9196c382613639fc84e73d6035 | [
"MIT"
] | null | null | null | examples/visexp.py | BatsiBoy/PyFrac | a898f6111295fa9196c382613639fc84e73d6035 | [
"MIT"
] | null | null | null | examples/visexp.py | BatsiBoy/PyFrac | a898f6111295fa9196c382613639fc84e73d6035 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#Name: Fractal Example - Exponential Curves
#Author: Sean Pope
#Example use of the fractal engine and coefficient block.
#Creates random coefficient blocks and draws frames to create a simple animation.
#This one is optimized for the exponential variation.
import matplotlib.pyplot as plt
import PyFrac as pf
plt.style.use('dark_background') #Mostly just used for the black background.
ax = plt.subplot(111,frameon=False) #Create a figure and axes for drawing.
ax.axes.get_xaxis().set_visible(False) #Hide axis
ax.axes.get_yaxis().set_visible(False)
plt.xlim(-1,1) #This function looks best in the biunit square.
plt.ylim(-1,1)
def quitloop(*args): #Closes the event loop when no longer needed.
global run
run = 0
return
fig = plt.gcf() #Get the figure that pyplot spawned.
fig.canvas.mpl_connect('close_event', quitloop) #If the window is closed, exit loop.
fig.canvas.mpl_connect('key_press_event', quitloop) #If a button is pressed, close.
mng = plt.get_current_fig_manager() #Grab the figure window
mng.full_screen_toggle() #Maximize the image to fill the screen.
""" Runtime variables """
run = 1 #Set to continue drawing frames, unset to terminate
framecount = 0 #Used to set frames drawn per coefficient block
frameclear = 0 #Starts deleting frames when set
coeffs = pf.coeffs.rand(0.9,0.2)
""" Main event loop. """
while(run):
framecount += 1
if framecount == 40: #Draws a new coefficient set if the current image is done.
frameclear = 1
coeffs = pf.coeffs.rand(0.9,0.2)
framecount -= 40 #Reset frame counter.
fractal = pf.engine.fractpoints(coeffs, 200, pf.variations.exponential) #Run the engine to get a figure.
plt.scatter(fractal['x'], fractal['y'], #Get the x,y coordinates for each point
marker='.', alpha=0.8, #Use small pixel markers with low opacity
c=fractal['color'], cmap='plasma', #Map the color row to this colormap.
s=25, edgecolor='none'
)
if frameclear:
del ax.collections[0] #Remove the oldest frame.
plt.pause(.01) #This pause draws the frame before looping.
plt.close(fig) | 34.430769 | 109 | 0.683199 |
import matplotlib.pyplot as plt
import PyFrac as pf
plt.style.use('dark_background')
ax = plt.subplot(111,frameon=False)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
plt.xlim(-1,1)
plt.ylim(-1,1)
def quitloop(*args):
global run
run = 0
return
fig = plt.gcf()
fig.canvas.mpl_connect('close_event', quitloop)
fig.canvas.mpl_connect('key_press_event', quitloop)
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
run = 1
framecount = 0
frameclear = 0
coeffs = pf.coeffs.rand(0.9,0.2)
while(run):
framecount += 1
if framecount == 40:
frameclear = 1
coeffs = pf.coeffs.rand(0.9,0.2)
framecount -= 40
fractal = pf.engine.fractpoints(coeffs, 200, pf.variations.exponential)
plt.scatter(fractal['x'], fractal['y'],
marker='.', alpha=0.8,
c=fractal['color'], cmap='plasma',
s=25, edgecolor='none'
)
if frameclear:
del ax.collections[0]
plt.pause(.01)
plt.close(fig) | true | true |
f721923f0db0c229c58f961a74feaeb820d768fc | 306 | py | Python | src/basic/011_thirds/use_requests.py | hbulpf/pydemo | 2989cc50781230718e46dcac5dc0ca70630ebffe | [
"Apache-2.0"
] | 6 | 2020-03-24T15:58:42.000Z | 2020-04-18T13:32:41.000Z | src/basic/011_thirds/use_requests.py | hbulpf/pydemo | 2989cc50781230718e46dcac5dc0ca70630ebffe | [
"Apache-2.0"
] | 1 | 2022-01-13T03:51:17.000Z | 2022-01-13T03:51:17.000Z | src/basic/011_thirds/use_requests.py | hbulpf/pydemo | 2989cc50781230718e46dcac5dc0ca70630ebffe | [
"Apache-2.0"
] | 1 | 2020-02-01T09:36:05.000Z | 2020-02-01T09:36:05.000Z | import requests
r = requests.get('https://www.baidu.com/')
print(f'status_code:{r.status_code}')
print(f'text:{r.text}')
r = requests.get('https://www.baidu.com/', params={'wd': 'python'})
print(f'url:{r.url}')
print(f'status_code:{r.status_code}')
print(f'text:{r.text}')
print(f'encoding:{r.encoding}') | 27.818182 | 67 | 0.679739 | import requests
r = requests.get('https://www.baidu.com/')
print(f'status_code:{r.status_code}')
print(f'text:{r.text}')
r = requests.get('https://www.baidu.com/', params={'wd': 'python'})
print(f'url:{r.url}')
print(f'status_code:{r.status_code}')
print(f'text:{r.text}')
print(f'encoding:{r.encoding}') | true | true |
f721925123231063587335f88669b985aa41c584 | 489 | py | Python | examples/loadsheet.py | Daviid1010/ethercalc-python | af79cb5c69e2caa0b7f1d88b14be5ca60e7d6a0b | [
"BSD-2-Clause"
] | 3 | 2017-01-26T11:29:18.000Z | 2018-02-02T14:54:03.000Z | examples/loadsheet.py | Daviid1010/ethercalc-python | af79cb5c69e2caa0b7f1d88b14be5ca60e7d6a0b | [
"BSD-2-Clause"
] | null | null | null | examples/loadsheet.py | Daviid1010/ethercalc-python | af79cb5c69e2caa0b7f1d88b14be5ca60e7d6a0b | [
"BSD-2-Clause"
] | 6 | 2016-05-11T15:42:59.000Z | 2022-02-25T19:50:34.000Z | #!/usr/bin/env python3
import ethercalc
import argparse
import pprint
import sys
parser = argparse.ArgumentParser(description="Dump ethercalc sheet")
parser.add_argument("sheet", metavar='sheet', help="sheet name")
parser.add_argument("-f", "--format", dest="format",
help="format", default="socialcalc")
args = parser.parse_args()
data = sys.stdin.buffer.read()
e = ethercalc.EtherCalc("http://localhost:8000")
a = e.update(data, format=args.format, id=args.sheet)
| 28.764706 | 68 | 0.715746 |
import ethercalc
import argparse
import pprint
import sys
parser = argparse.ArgumentParser(description="Dump ethercalc sheet")
parser.add_argument("sheet", metavar='sheet', help="sheet name")
parser.add_argument("-f", "--format", dest="format",
help="format", default="socialcalc")
args = parser.parse_args()
data = sys.stdin.buffer.read()
e = ethercalc.EtherCalc("http://localhost:8000")
a = e.update(data, format=args.format, id=args.sheet)
| true | true |
f72194e07175df8c6208e51d9aafe054145aca68 | 200 | py | Python | drone_squadron/api/thruster_api.py | OrderAndCh4oS/drone_squadron_api_prototype | 4d7c22cebb03576986d443634b17910cb460a60f | [
"MIT"
] | 1 | 2020-05-20T09:44:37.000Z | 2020-05-20T09:44:37.000Z | drone_squadron/api/thruster_api.py | sarcoma/drone_squadron_api_prototype | 4d7c22cebb03576986d443634b17910cb460a60f | [
"MIT"
] | 1 | 2021-06-01T22:30:10.000Z | 2021-06-01T22:30:10.000Z | drone_squadron/api/thruster_api.py | OrderAndCh4oS/drone_squadron_api_prototype | 4d7c22cebb03576986d443634b17910cb460a60f | [
"MIT"
] | null | null | null | from drone_squadron.api.base_api import BaseApi
from drone_squadron.crud.thruster_crud import ThrusterCrud
class ThrusterApi(BaseApi):
def __init__(self):
super().__init__(ThrusterCrud)
| 25 | 58 | 0.79 | from drone_squadron.api.base_api import BaseApi
from drone_squadron.crud.thruster_crud import ThrusterCrud
class ThrusterApi(BaseApi):
def __init__(self):
super().__init__(ThrusterCrud)
| true | true |
f7219557f313231bf047af09d8d81a13981c3f2b | 368 | py | Python | durgo_sdk/integrations/django/middleware.py | safwanrahman/durgo-python | 79b740e0500e1ba2bce7edcb47996587a9449964 | [
"BSD-3-Clause"
] | 1 | 2020-08-12T21:56:45.000Z | 2020-08-12T21:56:45.000Z | durgo_sdk/integrations/django/middleware.py | Alig1493/durgo-python | 79b740e0500e1ba2bce7edcb47996587a9449964 | [
"BSD-3-Clause"
] | null | null | null | durgo_sdk/integrations/django/middleware.py | Alig1493/durgo-python | 79b740e0500e1ba2bce7edcb47996587a9449964 | [
"BSD-3-Clause"
] | 1 | 2020-03-21T18:30:28.000Z | 2020-03-21T18:30:28.000Z | from django.utils import timezone
class DurgoMiddleware:
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
start_time = timezone.now()
response = self.get_response(request)
end_time = timezone.now()
return response
| 21.647059 | 52 | 0.673913 | from django.utils import timezone
class DurgoMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
start_time = timezone.now()
response = self.get_response(request)
end_time = timezone.now()
return response
| true | true |
f72196382b201f0b3ce9c05e95a1507ab101ac39 | 367 | py | Python | test/testDepthfilling.py | zer01ike/HoleFilling | b1591485f37975c0793839880dbb6185a132d3f9 | [
"Apache-2.0"
] | 4 | 2019-02-18T08:58:19.000Z | 2021-11-05T01:20:32.000Z | test/testDepthfilling.py | zer01ike/HoleFilling | b1591485f37975c0793839880dbb6185a132d3f9 | [
"Apache-2.0"
] | null | null | null | test/testDepthfilling.py | zer01ike/HoleFilling | b1591485f37975c0793839880dbb6185a132d3f9 | [
"Apache-2.0"
] | 6 | 2018-05-21T10:08:20.000Z | 2021-11-05T01:20:35.000Z | from DepthFilling import DepthFilling
import cv2
DepthedImg = cv2.imread('../DataSet/Sequence/Warped/depth_0_w.bmp', 0)
DF = DepthFilling.DepthFilling(DepthedImg,63,63)
#depth_filled = DF.testKmeans(DepthedImg)
depth_filled = DF.depthfill()
cv2.imshow('depth', depth_filled)
cv2.imwrite('depthfill_book_0.bmp',depth_filled)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 28.230769 | 70 | 0.792916 | from DepthFilling import DepthFilling
import cv2
DepthedImg = cv2.imread('../DataSet/Sequence/Warped/depth_0_w.bmp', 0)
DF = DepthFilling.DepthFilling(DepthedImg,63,63)
depth_filled = DF.depthfill()
cv2.imshow('depth', depth_filled)
cv2.imwrite('depthfill_book_0.bmp',depth_filled)
cv2.waitKey(0)
cv2.destroyAllWindows()
| true | true |
f7219684ce3f2077f43b7fa0f52973b32fe1628b | 1,607 | py | Python | tests/components/recorder/test_util.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | null | null | null | tests/components/recorder/test_util.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | null | null | null | tests/components/recorder/test_util.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | 1 | 2019-04-24T14:10:08.000Z | 2019-04-24T14:10:08.000Z | """Test util methods."""
from unittest.mock import MagicMock, patch
import pytest
from openpeerpower.components.recorder import util
from openpeerpower.components.recorder.const import DATA_INSTANCE
from tests.common import get_test_open_peer_power, init_recorder_component
@pytest.fixture
def opp_recorder():
"""Open Peer Power fixture with in-memory recorder."""
opp = get_test_open_peer_power()
def setup_recorder(config=None):
"""Set up with params."""
init_recorder_component(opp, config)
opp.start()
opp.block_till_done()
opp.data[DATA_INSTANCE].block_till_done()
return opp
yield setup_recorder
opp.stop()
def test_recorder_bad_commit(opp_recorder):
"""Bad _commit should retry 3 times."""
opp = opp_recorder()
def work(session):
"""Bad work."""
session.execute("select * from notthere")
with patch(
"openpeerpower.components.recorder.time.sleep"
) as e_mock, util.session_scope(opp=opp) as session:
res = util.commit(session, work)
assert res is False
assert e_mock.call_count == 3
def test_recorder_bad_execute(opp_recorder):
"""Bad execute, retry 3 times."""
from sqlalchemy.exc import SQLAlchemyError
opp_recorder()
def to_native():
"""Rasie exception."""
raise SQLAlchemyError()
mck1 = MagicMock()
mck1.to_native = to_native
with pytest.raises(SQLAlchemyError), patch(
"openpeerpower.components.recorder.time.sleep"
) as e_mock:
util.execute((mck1,))
assert e_mock.call_count == 2
| 25.109375 | 74 | 0.684505 | from unittest.mock import MagicMock, patch
import pytest
from openpeerpower.components.recorder import util
from openpeerpower.components.recorder.const import DATA_INSTANCE
from tests.common import get_test_open_peer_power, init_recorder_component
@pytest.fixture
def opp_recorder():
opp = get_test_open_peer_power()
def setup_recorder(config=None):
init_recorder_component(opp, config)
opp.start()
opp.block_till_done()
opp.data[DATA_INSTANCE].block_till_done()
return opp
yield setup_recorder
opp.stop()
def test_recorder_bad_commit(opp_recorder):
opp = opp_recorder()
def work(session):
session.execute("select * from notthere")
with patch(
"openpeerpower.components.recorder.time.sleep"
) as e_mock, util.session_scope(opp=opp) as session:
res = util.commit(session, work)
assert res is False
assert e_mock.call_count == 3
def test_recorder_bad_execute(opp_recorder):
from sqlalchemy.exc import SQLAlchemyError
opp_recorder()
def to_native():
raise SQLAlchemyError()
mck1 = MagicMock()
mck1.to_native = to_native
with pytest.raises(SQLAlchemyError), patch(
"openpeerpower.components.recorder.time.sleep"
) as e_mock:
util.execute((mck1,))
assert e_mock.call_count == 2
| true | true |
f72196e96928e506436940a1aaab2796da44a560 | 31,440 | py | Python | 02 Main/mainRUN.py | dengniewei/Face-Recognition-Class-Attendance-System | 58aa85ff3b378991da3ccebd69e6ace5ec2af93f | [
"MIT"
] | null | null | null | 02 Main/mainRUN.py | dengniewei/Face-Recognition-Class-Attendance-System | 58aa85ff3b378991da3ccebd69e6ace5ec2af93f | [
"MIT"
] | null | null | null | 02 Main/mainRUN.py | dengniewei/Face-Recognition-Class-Attendance-System | 58aa85ff3b378991da3ccebd69e6ace5ec2af93f | [
"MIT"
] | null | null | null | # 导入必要的模块
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import QApplication, QWidget, QMessageBox, QInputDialog
from PyQt5.QtGui import QImage, QIcon, QPixmap
from PyQt5.QtCore import QTimer, QDateTime, QCoreApplication, QThread
import sys, os
import cv2, imutils
# 导入UI主界面
import main
# 导入信息采集框界面
import infoUI
# 导入打印中文脚本
import ChinesePutText
# 导入人脸识别检测包
from imutils.video import VideoStream
import numpy as np
import pickle
# 导入眨眼检测必要的包
from scipy.spatial import distance as dist
from imutils import face_utils
from datetime import datetime
import dlib
# 导入数据库操作包
import pymysql
# 定义活体检测-眨眼检测类
class BlinksDetectThread(QThread):
trigger = QtCore.pyqtSignal()
def __init__(self):
super(BlinksDetectThread, self).__init__()
# 定义两个常数,一个用于眼睛纵横比以指示眨眼,第二个作为眨眼连续帧数的阈值
self.EYE_AR_THRESH = 0.25
self.EYE_AR_CONSEC_FRAMES = 3
# 初始化帧计数器和总闪烁次数
self.COUNTER = 0
self.TOTAL = 0
# 初始化变量
self.A = 0
self.B = 0
self.C = 0
self.leftEye = 0
self.rightEye = 0
self.leftEAR = 0
self.rightEAR = 0
self.ear = 0
# 线程启动停止标识符
self.BlinksFlag = 1
# 初始化摄像头
self.cap3 = cv2.VideoCapture()
# 定义眨眼检测距离函数
def eye_aspect_ratio(self, eye):
# 计算两组垂直方向上的眼睛标记(x,y)坐标之间的欧氏距离
self.A = dist.euclidean(eye[1], eye[5])
self.B = dist.euclidean(eye[2], eye[4])
# 计算水平方向上的眼睛标记(x,y)坐标之间的欧氏距离
self.C = dist.euclidean(eye[0], eye[3])
# 计算眼睛的纵横比
ear = (self.A + self.B) / (2.0 * self.C)
# 返回眼睛的纵横比
return ear
def run(self):
if self.BlinksFlag == 1:
# 初始化dlib的人脸检测器(基于HOG),然后创建面部标志预测器
print("[INFO] loading facial landmark predictor...")
shape_predictor_path = "shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(shape_predictor_path)
# 分别提取左眼和右眼的面部标志的索引
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
# 在视频流的帧中循环
self.cap3.open(cv2.CAP_DSHOW)
while self.BlinksFlag == 1:
# 从线程视频文件流中抓取帧,调整其大小,并将其转换为灰度通道
vs = VideoStream(src=cv2.CAP_DSHOW).start()
frame3 = vs.read()
# ret, frame3 = self.cap3.read()
QApplication.processEvents()
frame3 = imutils.resize(frame3, width=900)
gray = cv2.cvtColor(frame3, cv2.COLOR_BGR2GRAY)
# 检测灰度帧中的人脸
rects = detector(gray, 0)
# 循环检测人脸
for rect in rects:
# 确定面部区域的面部标记,然后将面部标记(x,y)坐标转换为NumPy阵列
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# 提取左眼和右眼坐标,然后使用坐标计算双眼的眼睛纵横比
self.leftEye = shape[lStart:lEnd]
self.rightEye = shape[rStart:rEnd]
self.leftEAR = self.eye_aspect_ratio(self.leftEye)
self.rightEAR = self.eye_aspect_ratio(self.rightEye)
# 两只眼睛的平均眼睛纵横比
self.ear = (self.leftEAR + self.rightEAR) / 2.0
# 检查眼睛纵横比是否低于闪烁阈值,如果是,则增加闪烁帧计数器;否则执行else
if self.ear < self.EYE_AR_THRESH:
self.COUNTER += 1
else:
# 如果眼睛闭合次数足够则增加眨眼总数
if self.COUNTER >= self.EYE_AR_CONSEC_FRAMES:
self.TOTAL += 1
# 重置眼框计数器
self.COUNTER = 0
self.trigger.emit()
if self.TOTAL == 1:
print("活体!眨眼次数为: {}".format(self.TOTAL))
# 定义停止线程操作
def terminate(self):
self.BlinksFlag = 0
if flag2 == 0:
VideoStream(src=cv2.CAP_DSHOW).stop()
#########################################################################################
class MainWindow(QWidget):
# 类构造函数
def __init__(self):
# super()构造器方法返回父级的对象。__init__()方法是构造器的一个方法。
super().__init__()
self.ui = main.Ui_Form()
self.ui.setupUi(self)
# 设置窗口名称和图标
self.setWindowTitle('人脸识别考勤系统')
self.setWindowIcon(QIcon('fcblogo.jpg'))
# label_time显示系统时间
timer = QTimer(self)
timer.timeout.connect(self.showTimeText)
timer.start()
# 初始化摄像头
# self.url = 0 # 这样调用摄像头会报错,并且会卡死。
self.url = cv2.CAP_DSHOW # 默认调用0,如果要调用摄像头1,可以这样写:cv2.CAP_DSHOW + 1
self.cap = cv2.VideoCapture()
# 设置单张图片背景
pixmap = QPixmap('background1.png')
self.ui.label_camera.setPixmap(pixmap)
# 设置摄像头按键连接函数
self.ui.bt_openCamera.clicked.connect(self.openCamera)
# 设置开始考勤按键的回调函数
self.ui.bt_startCheck.clicked.connect(self.autoControl)
# 设置活体检测按键的回调函数
self.ui.bt_blinks.clicked.connect(self.BlinksThread)
# 设置“退出系统”按键事件, 按下之后退出主界面
self.ui.bt_exit.clicked.connect(QCoreApplication.instance().quit)
# 设置信息采集按键连接
self.bt_gathering = self.ui.bt_gathering
# 设置区分打开摄像头还是人脸识别的标识符
self.switch_bt = 0
global flag2
flag2 = 0
# 初始化需要记录的人名
self.record_name1 = ([])
# 设置更新人脸数据库的按键连接函数
self.ui.bt_generator.clicked.connect(self.trainModel)
# 设置查询班级人数按键的连接函数
self.ui.bt_check.clicked.connect(self.checkNums)
# 设置请假按键的连接函数
self.ui.bt_leave.clicked.connect(self.leaveButton)
# 设置漏签补签按键的连接函数
self.ui.bt_Supplement.clicked.connect(self.supplymentButton)
# 设置对输入内容的删除提示
self.ui.lineEdit.setClearButtonEnabled(True)
self.ui.lineEdit_2.setClearButtonEnabled(True)
# 设置查看结果(显示未到和迟到)按键的连接函数
self.ui.bt_view.clicked.connect(self.showLateAbsentee)
self.checkTime, ok = QInputDialog.getText(self, '考勤时间设定', '请输入考勤时间(格式为00:00:00):')
# 显示系统时间以及相关文字提示函数
def showTimeText(self):
# 设置宽度
self.ui.label_time.setFixedWidth(200)
# 设置显示文本格式
self.ui.label_time.setStyleSheet(
# "QLabel{background:white;}" 此处设置背景色
# "QLabel{color:rgb(300,300,300,120); font-size:14px; font-weight:bold; font-family:宋体;}"
"QLabel{font-size:14px; font-weight:bold; font-family:宋体;}"
)
datetime = QDateTime.currentDateTime().toString()
self.ui.label_time.setText("" + datetime)
# 显示“人脸识别考勤系统”文字
self.ui.label_title.setFixedWidth(400)
self.ui.label_title.setStyleSheet(
"QLabel{font-size:30px; font-weight:bold; font-family:宋体;}")
self.ui.label_title.setText("人脸识别考勤系统")
def openCamera(self, url):
# 判断摄像头是否打开,如果打开则为true,反之为false
flag = self.cap.isOpened()
if flag == False:
self.ui.label_logo.clear()
self.cap.open(self.url)
self.showCamera()
elif flag == True:
self.cap.release()
self.ui.label_logo.clear()
self.ui.label_camera.clear()
self.ui.bt_openCamera.setText(u'打开相机')
# 进入考勤模式,通过switch_bt进行控制的函数
def autoControl(self):
if self.switch_bt == 0:
self.switch_bt = 1
flag2 = 1
self.ui.bt_startCheck.setText(u'退出考勤')
self.showCamera()
elif self.switch_bt == 1:
self.switch_bt = 0
flag2 = 0
self.ui.bt_startCheck.setText(u'开始考勤')
self.showCamera()
def BlinksThread(self):
bt_text = self.ui.bt_blinks.text()
if bt_text == '活体检测':
# 初始化眨眼检测线程
self.startThread = BlinksDetectThread()
self.startThread.start() # 启动线程
self.ui.bt_blinks.setText('停止检测')
else:
self.ui.bt_blinks.setText('活体检测')
# self.startThread.terminate() # 停止线程
def showCamera(self):
# 如果按键按下
if self.switch_bt == 0:
self.ui.label_logo.clear()
self.ui.bt_openCamera.setText(u'关闭相机')
while (self.cap.isOpened()):
# 以BGR格式读取图像
ret, self.image = self.cap.read(cv2.CAP_DSHOW)
QApplication.processEvents() # 这句代码告诉QT处理来处理任何没有被处理的事件,并且将控制权返回给调用者,让代码变的没有那么卡
# 将图像转换为RGB格式
show = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB) # 这里指的是显示原图
# opencv 读取图片的样式,不能通过Qlabel进行显示,需要转换为Qimage QImage(uchar * data, int width,
self.showImage = QImage(show.data, show.shape[1], show.shape[0], QImage.Format_RGB888)
self.ui.label_camera.setPixmap(QPixmap.fromImage(self.showImage))
# 因为最后会存留一张图像在lable上,需要对lable进行清理
self.ui.label_camera.clear()
self.ui.bt_openCamera.setText(u'打开相机')
elif self.switch_bt == 1:
self.ui.label_logo.clear()
self.ui.bt_startCheck.setText(u'退出考勤')
# OpenCV深度学习人脸检测器的路径
detector = "face_detection_model"
# OpenCV深度学习面部嵌入模型的路径
embedding_model = "face_detection_model/openface_nn4.small2.v1.t7"
# 训练模型以识别面部的路径
recognizer_path = "output/recognizer.pickle"
# 标签编码器的路径
le_path = "output/le.pickle"
# 置信度
confidence_default = 0.5
# 从磁盘加载序列化面部检测器
protoPath = os.path.sep.join([detector, "deploy.prototxt"])
modelPath = os.path.sep.join([detector, "res10_300x300_ssd_iter_140000.caffemodel"])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# 从磁盘加载我们的序列化面嵌入模型
print("[INFO] loading face recognizer...")
embedder = cv2.dnn.readNetFromTorch(embedding_model)
# 加载实际的人脸识别模型和标签
recognizer = pickle.loads(open(recognizer_path, "rb").read())
le = pickle.loads(open(le_path, "rb").read())
# 循环来自视频文件流的帧
while (self.cap.isOpened()):
# 从线程视频流中抓取帧
ret, frame = self.cap.read()
QApplication.processEvents()
# 调整框架的大小以使其宽度为900像素(同时保持纵横比),然后抓取图像尺寸
frame = imutils.resize(frame, width=900)
(h, w) = frame.shape[:2]
# 从图像构造一个blob
imageBlob = cv2.dnn.blobFromImage(
cv2.resize(frame, (300, 300)), 1.0, (300, 300),
(104.0, 177.0, 123.0), swapRB=False, crop=False)
# 应用OpenCV的基于深度学习的人脸检测器来定位输入图像中的人脸
detector.setInput(imageBlob)
detections = detector.forward()
# 保存识别到的人脸
face_names = []
# 循环检测
for i in np.arange(0, detections.shape[2]):
# 提取与预测相关的置信度(即概率)
confidence = detections[0, 0, i, 2]
# 用于更新相机开关按键信息
flag = self.cap.isOpened()
if flag == False:
self.ui.bt_openCamera.setText(u'打开相机')
elif flag == True:
self.ui.bt_openCamera.setText(u'关闭相机')
# 过滤弱检测
if confidence > confidence_default:
# 计算面部边界框的(x,y)坐标
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# 提取面部ROI
face = frame[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
# 确保面部宽度和高度足够大
if fW < 20 or fH < 20:
continue
# 为面部ROI构造一个blob,然后通过我们的面部嵌入模型传递blob以获得面部的128-d量化
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()
# 执行分类识别面部
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
# 绘制面部的边界框以及相关的概率
text = "{}: {:.2f}%".format(name, proba * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 0, 255), 2)
frame = cv2.putText(frame, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
face_names.append(name)
bt_liveness = self.ui.bt_blinks.text()
if bt_liveness == '停止检测':
ChineseText = ChinesePutText.put_chinese_text('microsoft.ttf')
frame = ChineseText.draw_text(frame, (330, 80), ' 请眨眨眼睛 ', 25, (55, 255, 55))
# 显示输出框架
show_video = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # 这里指的是显示原图
# opencv读取图片的样式,不能通过Qlabel进行显示,需要转换为Qimage。
# QImage(uchar * data, int width, int height, int bytesPerLine, Format format)
self.showImage = QImage(show_video.data, show_video.shape[1], show_video.shape[0], QImage.Format_RGB888)
self.ui.label_camera.setPixmap(QPixmap.fromImage(self.showImage))
self.set_name = set(face_names)
self.set_names = tuple(self.set_name)
self.recordNames()
# 因为最后一张画面会显示在GUI中,此处实现清除。
self.ui.label_camera.clear()
def recordNames(self):
if self.set_name.issubset(self.record_name1): # 如果self.set_names是self.record_names 的子集返回ture
pass # record_name1是要写进数据库中的名字信息 set_name是从摄像头中读出人脸的tuple形式
else:
self.different_name1 = self.set_name.difference(self.record_name1) # 获取到self.set_name有而self.record_name无的名字
self.record_name1 = self.set_name.union(self.record_name1) # 把self.record_name变成两个集合的并集
# different_name是为了获取到之前没有捕捉到的人脸,并再次将record_name1进行更新
# 将集合变成tuple,并统计人数
self.write_data = tuple(self.different_name1)
names_num = len(self.write_data)
# 显示签到人数
self.ui.lcd_2.display(len(self.record_name1))
if names_num > 0:
# 将签到信息写入数据库
self.lineTextInfo2 = []
# 打开数据库连接
db2 = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
# 使用cursor()方法获取操作游标
cursor2 = db2.cursor()
# 获取系统时间,保存到秒
import datetime
currentTime2 = str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
results2 = self.useIDGetInfo(self.write_data[0])
# 判断是否迟到
import datetime
self.ymd = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.ymd2 = datetime.datetime.now().strftime("%H:%M:%S")
compareResult2 = self.compare_time('{}'.format(self.ymd2), '{}'.format(self.checkTime))
# 82800表示23个小时,在compare_time()函数中,如果第一个时间小于第二个时间,则为第一个时间加24h后再减去第二时间;
# 而正常的结果应该为'正常'.
if compareResult2 <= 82800:
self.description2 = '迟到'
else:
self.description2 = '正常'
self.lineTextInfo2.append((results2[0], results2[1], results2[2], currentTime2, self.description2))
print(self.lineTextInfo2)
# 写入数据库
try:
# 如果存在数据,先删除再写入。前提是设置唯一索引字段或者主键。
insert_sql2 = "replace into checkin(Name, ID, Class, Time, Description) values(%s, %s, %s, %s, %s)"
users2 = self.lineTextInfo2
cursor2.executemany(insert_sql2, users2)
except Exception as e:
print(e)
print("SQL execute failed!")
else:
print("SQL execute success!")
QMessageBox.information(self, "Tips", "签到成功,请勿重复操作!", QMessageBox.Yes | QMessageBox.No)
# 提交到数据库执行
db2.commit()
cursor2.close()
db2.close()
# 比较时间大小,判断是否迟到
def compare_time(self, time1, time2):
import datetime
s_time = datetime.datetime.strptime(time1, '%H:%M:%S')
e_time = datetime.datetime.strptime(time2, '%H:%M:%S')
delta = s_time - e_time
return delta.seconds
# 查询班级人数
def checkNums(self):
# 选择的班级
input_Class = self.ui.comboBox.currentText()
# 打开数据库连接
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# 查询语句,实现通过ID关键字检索个人信息的功能
sql = "select * from studentnums where class = {}".format(input_Class)
# 执行查询
if input_Class != '':
try:
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
self.nums = []
for i in results:
self.nums.append(i[1])
except:
print("Error: unable to fetch data")
# 用于查询每班的实到人数
sql2 = "select * from checkin where class = {}".format(input_Class)
# 执行查询
if input_Class != '':
try:
cursor.execute(sql2)
# 获取所有记录列表
results2 = cursor.fetchall()
self.nums2 = []
for i in results2:
self.nums2.append(i[2])
except:
print("Error: unable to fetch data")
# lcd控件显示人数
self.ui.lcd_1.display(self.nums[0])
self.ui.lcd_2.display(len(self.nums2))
# 关闭数据库连接
db.close()
# 请假/补签登记
def leaveButton(self):
self.leaveStudents(1)
def supplymentButton(self):
self.leaveStudents(2)
def leaveStudents(self, button):
self.lineTextInfo = []
# 为防止输入为空卡死,先进行是否输入数据的判断
if self.ui.lineEdit.isModified() or self.ui.lineEdit_2.isModified():
# 打开数据库连接
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# 获取系统时间,保存到秒
currentTime = str(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
if button == 1:
self.description = '请假'
self.lineTextID = self.ui.lineEdit.text()
results = self.useIDGetInfo(self.lineTextID)
elif button == 2:
self.description = '漏签补签'
self.lineTextID = self.ui.lineEdit_2.text()
results = self.useIDGetInfo(self.lineTextID)
self.lineTextInfo.append((results[0], results[1], results[2], currentTime, self.description))
# 写入数据库
try:
# 如果存在数据,先删除再写入。前提是设置唯一索引字段或者主键。
insert_sql = "replace into checkin(Name, ID, Class, Time, Description) values(%s, %s, %s, %s, %s)"
users = self.lineTextInfo
cursor.executemany(insert_sql, users)
except Exception as e:
print(e)
print("sql execute failed")
else:
print("sql execute success")
QMessageBox.warning(self, "warning", "{} 登记成功,请勿重复操作!".format(self.description), QMessageBox.Yes | QMessageBox.No)
# 提交到数据库执行
db.commit()
cursor.close()
db.close()
else:
QMessageBox.warning(self, "warning", "学号不能为空,请输入后重试!", QMessageBox.Yes | QMessageBox.No)
# 输入框清零
self.ui.lineEdit.clear()
self.ui.lineEdit_2.clear()
# 使用ID当索引找到其它信息
def useIDGetInfo(self, ID):
# 打开数据库连接
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# 查询语句,实现通过ID关键字检索个人信息的功能
sql = "select * from students where ID = {}".format(ID)
# 执行查询
if ID != '':
try:
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
self.checkInfo = []
for i in results:
self.checkInfo.append(i[1])
self.checkInfo.append(i[0])
self.checkInfo.append(i[2])
return self.checkInfo
except:
print("Error: unable to fetch data")
# 显示迟到和未到
def showLateAbsentee(self):
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
cursor = db.cursor()
# 一定要注意字符串在检索时要加''!
sql1 = "select name from checkin where Description = '{}'".format('迟到')
sql2 = "select name from students"
try:
cursor.execute(sql1)
results = cursor.fetchall()
self.lateNums = []
for x in results:
self.lateNums.append(x[0])
self.lateNums.sort()
# print(self.lateNums)
except:
print("Error: unable to fetch latedata")
try:
cursor.execute(sql2)
results2 = cursor.fetchall()
self.allNums = []
for i in results2:
self.allNums.append(i[0])
self.allNums.sort()
print(self.allNums)
except:
print("Error: unable to fetch absenteedata")
db.commit()
cursor.close()
db.close()
# 集合运算,算出未到的和迟到的
self.AbsenteeNums = set(set(self.allNums) - set(self.lateNums))
self.AbsenteeNums = list(self.AbsenteeNums)
self.AbsenteeNums.sort()
# 在控件中显示未到的同学
rowLate = len(self.lateNums)
rowAbsentee = len(self.AbsenteeNums)
model1 = QtGui.QStandardItemModel(rowLate, 0)
# 设置数据行、列标题
model1.setHorizontalHeaderLabels(['姓名'])
# 设置填入数据内容
for row in range(rowLate):
item = QtGui.QStandardItem(self.lateNums[row])
# 设置每个位置的文本值
model1.setItem(row, 0, item)
# 指定显示的tableView控件,实例化表格视图
View1 = self.ui.tableView_escape
View1.setModel(model1)
# 迟到显示
model2 = QtGui.QStandardItemModel(rowAbsentee, 0)
# 设置数据行、列标题
model2.setHorizontalHeaderLabels(['姓名'])
# 设置填入数据内容
for row in range(rowAbsentee):
item = QtGui.QStandardItem(self.AbsenteeNums[row])
# 设置每个位置的文本值
model2.setItem(row, 0, item)
# 指定显示的tableView控件,实例化表格视图
View2 = self.ui.tableView_late
View2.setModel(model2)
# 训练人脸识别模型
def trainModel(self):
import GeneratorModel
GeneratorModel.Generator()
GeneratorModel.TrainModel()
print('Model have been trained!')
##########################################################################################
class infoDialog(QWidget):
def __init__(self):
# super()构造器方法返回父级的对象。__init__()方法是构造器的一个方法。
super().__init__()
self.Dialog = infoUI.Ui_Form()
self.Dialog.setupUi(self)
# 设置窗口名称和图标
self.setWindowTitle('个人信息采集')
self.setWindowIcon(QIcon('fcblogo.jpg'))
# 设置单张图片背景
pixmap = QPixmap('background2.png')
self.Dialog.label_capture.setPixmap(pixmap)
# 设置信息采集按键连接函数
self.Dialog.bt_collectInfo.clicked.connect(self.openCam)
# 设置拍照按键连接函数
self.Dialog.bt_takephoto.clicked.connect(self.takePhoto)
# 设置查询信息按键连接函数
self.Dialog.bt_checkInfo.clicked.connect(self.checkInfo)
# 设置写入信息按键连接函数
self.Dialog.bt_changeInfo.clicked.connect(self.changeInfo)
# 初始化信息导入列表
self.users = []
# 初始化摄像头
self.url2 = cv2.CAP_DSHOW
self.cap2 = cv2.VideoCapture()
# 初始化保存人脸数目
self.photos = 0
def handle_click(self):
if not self.isVisible():
self.show()
def handle_close(self):
self.close()
def openCam(self):
# 判断摄像头是否打开,如果打开则为true,反之为false
flagCam = self.cap2.isOpened()
if flagCam == False:
# 通过对话框设置被采集人学号
self.text, self.ok = QInputDialog.getText(self, '创建个人图像数据库', '请输入学号:')
if self.ok and self.text != '':
self.Dialog.label_capture.clear()
self.cap2.open(self.url2)
self.showCapture()
elif flagCam == True:
self.cap2.release()
self.Dialog.label_capture.clear()
self.Dialog.bt_collectInfo.setText(u'采集人像')
def showCapture(self):
self.Dialog.bt_collectInfo.setText(u'停止采集')
self.Dialog.label_capture.clear()
# 导入opencv人脸检测xml文件
cascade = 'haarcascades_cuda/haarcascade_frontalface_default.xml'
# 加载 Haar级联人脸检测库
detector = cv2.CascadeClassifier(cascade)
print("[INFO] starting video stream...")
# 循环来自视频文件流的帧
while self.cap2.isOpened():
ret, frame2 = self.cap2.read()
QApplication.processEvents()
self.orig = frame2.copy()
frame2 = imutils.resize(frame2, width=500)
rects = detector.detectMultiScale(cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY), scaleFactor=1.1,
minNeighbors=5, minSize=(30, 30))
for (x, y, w, h) in rects:
cv2.rectangle(frame2, (x, y), (x + w, y + h), (0, 255, 0), 2)
frame2 = cv2.putText(frame2, "Have token {}/20 faces".format(self.photos), (50, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
(200, 100, 50), 2)
# 显示输出框架
show_video2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB) # 这里指的是显示原图
# opencv读取图片的样式,不能通过Qlabel进行显示,需要转换为Qimage。
# QImage(uchar * data, int width, int height, int bytesPerLine, Format format)
self.showImage2 = QImage(show_video2.data, show_video2.shape[1], show_video2.shape[0], QImage.Format_RGB888)
self.Dialog.label_capture.setPixmap(QPixmap.fromImage(self.showImage2))
# 因为最后一张画面会显示在GUI中,此处实现清除。
self.Dialog.label_capture.clear()
# 创建文件夹
def mkdir(self, path):
# 去除首位空格
path = path.strip()
# 去除尾部 \ 符号
path = path.rstrip("\\")
# 判断路径是否存在, 存在=True; 不存在=False
isExists = os.path.exists(path)
# 判断结果
if not isExists:
# 如果不存在则创建目录
os.makedirs(path)
return True
def takePhoto(self):
self.photos += 1
self.filename = "D:\\Github\\class-attendance-system-based-on-face-recognition\\02 Main\\dataset\\{}\\".format(self.text)
self.mkdir(self.filename)
photo_save_path = os.path.join(os.path.dirname(os.path.abspath('__file__')), '{}'.format(self.filename))
self.showImage2.save(photo_save_path + datetime.now().strftime("%Y%m%d%H%M%S") + ".png")
# p = os.path.sep.join([output, "{}.png".format(str(total).zfill(5))])
# cv2.imwrite(p, self.showImage2)
if self.photos == 20:
QMessageBox.information(self, "Information", self.tr("采集成功!"), QMessageBox.Yes | QMessageBox.No)
# 数据库查询
def checkInfo(self):
# 键入ID
self.input_ID = self.Dialog.lineEdit_ID.text()
# 打开数据库连接
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# 查询语句,实现通过ID关键字检索个人信息的功能
sql = "SELECT * FROM STUDENTS WHERE ID = {}".format(self.input_ID)
# 执行查询
if self.input_ID != '':
try:
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
self.lists = []
for i in results:
self.lists.append(i[0])
self.lists.append(i[1])
self.lists.append(i[2])
self.lists.append(i[3])
self.lists.append(i[4])
except:
print("Error: unable to fetch data")
# 设置显示数据层次结构,5行2列(包含行表头)
self.model = QtGui.QStandardItemModel(5, 0)
# 设置数据行、列标题
self.model.setHorizontalHeaderLabels(['值'])
self.model.setVerticalHeaderLabels(['学号', '姓名', '班级', '性别', '生日'])
# 设置填入数据内容
nums = len(self.lists)
if nums == 0:
QMessageBox.warning(self, "warning", "人脸数据库中无此人信息,请马上录入!", QMessageBox.Yes | QMessageBox.No)
for row in range(nums):
item = QtGui.QStandardItem(self.lists[row])
# 设置每个位置的文本值
self.model.setItem(row, 0, item)
# 指定显示的tableView控件,实例化表格视图
self.View = self.Dialog.tableView
self.View.setModel(self.model)
# 关闭数据库连接
db.close()
# 将采集信息写入数据库
def userInfo(self):
ID = self.Dialog.lineEdit_ID.text()
Name = self.Dialog.lineEdit_Name.text()
Class = self.Dialog.lineEdit_Class.text()
Sex = self.Dialog.lineEdit_Sex.text()
Birth = self.Dialog.lineEdit_Birth.text()
self.users.append((ID, Name, Class, Sex, Birth))
return self.users
def changeInfo(self):
# 打开数据库连接
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# 写入数据库
try:
# 如果存在数据,先删除再写入。前提是设置唯一索引字段或者主键。
insert_sql = "replace into students(ID, Name, Class, Sex, Birthday) values(%s, %s, %s, %s, %s)"
users = self.userInfo()
cursor.executemany(insert_sql, users)
except Exception as e:
print(e)
print("sql execute failed")
else:
print("sql execute success")
QMessageBox.warning(self, "warning", "录入成功,请勿重复操作!", QMessageBox.Yes | QMessageBox.No)
# 提交到数据库执行
db.commit()
# 关闭数据库
cursor.close()
# 关闭数据库连接
db.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
# 创建并显示窗口
mainWindow = MainWindow()
infoWindow = infoDialog()
mainWindow.ui.bt_gathering.clicked.connect(infoWindow.handle_click)
mainWindow.show()
sys.exit(app.exec_()) | 39.847909 | 132 | 0.535401 |
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import QApplication, QWidget, QMessageBox, QInputDialog
from PyQt5.QtGui import QImage, QIcon, QPixmap
from PyQt5.QtCore import QTimer, QDateTime, QCoreApplication, QThread
import sys, os
import cv2, imutils
import main
import infoUI
import ChinesePutText
from imutils.video import VideoStream
import numpy as np
import pickle
from scipy.spatial import distance as dist
from imutils import face_utils
from datetime import datetime
import dlib
import pymysql
class BlinksDetectThread(QThread):
trigger = QtCore.pyqtSignal()
def __init__(self):
super(BlinksDetectThread, self).__init__()
self.EYE_AR_THRESH = 0.25
self.EYE_AR_CONSEC_FRAMES = 3
self.COUNTER = 0
self.TOTAL = 0
self.A = 0
self.B = 0
self.C = 0
self.leftEye = 0
self.rightEye = 0
self.leftEAR = 0
self.rightEAR = 0
self.ear = 0
self.BlinksFlag = 1
self.cap3 = cv2.VideoCapture()
def eye_aspect_ratio(self, eye):
self.A = dist.euclidean(eye[1], eye[5])
self.B = dist.euclidean(eye[2], eye[4])
self.C = dist.euclidean(eye[0], eye[3])
ear = (self.A + self.B) / (2.0 * self.C)
return ear
def run(self):
if self.BlinksFlag == 1:
print("[INFO] loading facial landmark predictor...")
shape_predictor_path = "shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(shape_predictor_path)
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
self.cap3.open(cv2.CAP_DSHOW)
while self.BlinksFlag == 1:
vs = VideoStream(src=cv2.CAP_DSHOW).start()
frame3 = vs.read()
QApplication.processEvents()
frame3 = imutils.resize(frame3, width=900)
gray = cv2.cvtColor(frame3, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
for rect in rects:
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
self.leftEye = shape[lStart:lEnd]
self.rightEye = shape[rStart:rEnd]
self.leftEAR = self.eye_aspect_ratio(self.leftEye)
self.rightEAR = self.eye_aspect_ratio(self.rightEye)
self.ear = (self.leftEAR + self.rightEAR) / 2.0
if self.ear < self.EYE_AR_THRESH:
self.COUNTER += 1
else:
if self.COUNTER >= self.EYE_AR_CONSEC_FRAMES:
self.TOTAL += 1
self.COUNTER = 0
self.trigger.emit()
if self.TOTAL == 1:
print("活体!眨眼次数为: {}".format(self.TOTAL))
def terminate(self):
self.BlinksFlag = 0
if flag2 == 0:
VideoStream(src=cv2.CAP_DSHOW).stop()
self.ui.bt_openCamera.setText(u'打开相机')
elif self.switch_bt == 1:
self.ui.label_logo.clear()
self.ui.bt_startCheck.setText(u'退出考勤')
detector = "face_detection_model"
embedding_model = "face_detection_model/openface_nn4.small2.v1.t7"
recognizer_path = "output/recognizer.pickle"
le_path = "output/le.pickle"
confidence_default = 0.5
protoPath = os.path.sep.join([detector, "deploy.prototxt"])
modelPath = os.path.sep.join([detector, "res10_300x300_ssd_iter_140000.caffemodel"])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
print("[INFO] loading face recognizer...")
embedder = cv2.dnn.readNetFromTorch(embedding_model)
recognizer = pickle.loads(open(recognizer_path, "rb").read())
le = pickle.loads(open(le_path, "rb").read())
while (self.cap.isOpened()):
ret, frame = self.cap.read()
QApplication.processEvents()
frame = imutils.resize(frame, width=900)
(h, w) = frame.shape[:2]
imageBlob = cv2.dnn.blobFromImage(
cv2.resize(frame, (300, 300)), 1.0, (300, 300),
(104.0, 177.0, 123.0), swapRB=False, crop=False)
detector.setInput(imageBlob)
detections = detector.forward()
face_names = []
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
flag = self.cap.isOpened()
if flag == False:
self.ui.bt_openCamera.setText(u'打开相机')
elif flag == True:
self.ui.bt_openCamera.setText(u'关闭相机')
if confidence > confidence_default:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
face = frame[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
if fW < 20 or fH < 20:
continue
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
text = "{}: {:.2f}%".format(name, proba * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 0, 255), 2)
frame = cv2.putText(frame, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
face_names.append(name)
bt_liveness = self.ui.bt_blinks.text()
if bt_liveness == '停止检测':
ChineseText = ChinesePutText.put_chinese_text('microsoft.ttf')
frame = ChineseText.draw_text(frame, (330, 80), ' 请眨眨眼睛 ', 25, (55, 255, 55))
show_video = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.showImage = QImage(show_video.data, show_video.shape[1], show_video.shape[0], QImage.Format_RGB888)
self.ui.label_camera.setPixmap(QPixmap.fromImage(self.showImage))
self.set_name = set(face_names)
self.set_names = tuple(self.set_name)
self.recordNames()
self.ui.label_camera.clear()
def recordNames(self):
if self.set_name.issubset(self.record_name1):
pass
else:
self.different_name1 = self.set_name.difference(self.record_name1)
self.record_name1 = self.set_name.union(self.record_name1)
self.write_data = tuple(self.different_name1)
names_num = len(self.write_data)
self.ui.lcd_2.display(len(self.record_name1))
if names_num > 0:
self.lineTextInfo2 = []
db2 = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
cursor2 = db2.cursor()
import datetime
currentTime2 = str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
results2 = self.useIDGetInfo(self.write_data[0])
import datetime
self.ymd = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.ymd2 = datetime.datetime.now().strftime("%H:%M:%S")
compareResult2 = self.compare_time('{}'.format(self.ymd2), '{}'.format(self.checkTime))
if compareResult2 <= 82800:
self.description2 = '迟到'
else:
self.description2 = '正常'
self.lineTextInfo2.append((results2[0], results2[1], results2[2], currentTime2, self.description2))
print(self.lineTextInfo2)
try:
insert_sql2 = "replace into checkin(Name, ID, Class, Time, Description) values(%s, %s, %s, %s, %s)"
users2 = self.lineTextInfo2
cursor2.executemany(insert_sql2, users2)
except Exception as e:
print(e)
print("SQL execute failed!")
else:
print("SQL execute success!")
QMessageBox.information(self, "Tips", "签到成功,请勿重复操作!", QMessageBox.Yes | QMessageBox.No)
db2.commit()
cursor2.close()
db2.close()
def compare_time(self, time1, time2):
import datetime
s_time = datetime.datetime.strptime(time1, '%H:%M:%S')
e_time = datetime.datetime.strptime(time2, '%H:%M:%S')
delta = s_time - e_time
return delta.seconds
def checkNums(self):
input_Class = self.ui.comboBox.currentText()
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
cursor = db.cursor()
sql = "select * from studentnums where class = {}".format(input_Class)
if input_Class != '':
try:
cursor.execute(sql)
results = cursor.fetchall()
self.nums = []
for i in results:
self.nums.append(i[1])
except:
print("Error: unable to fetch data")
sql2 = "select * from checkin where class = {}".format(input_Class)
if input_Class != '':
try:
cursor.execute(sql2)
results2 = cursor.fetchall()
self.nums2 = []
for i in results2:
self.nums2.append(i[2])
except:
print("Error: unable to fetch data")
self.ui.lcd_1.display(self.nums[0])
self.ui.lcd_2.display(len(self.nums2))
db.close()
def leaveButton(self):
self.leaveStudents(1)
def supplymentButton(self):
self.leaveStudents(2)
def leaveStudents(self, button):
self.lineTextInfo = []
if self.ui.lineEdit.isModified() or self.ui.lineEdit_2.isModified():
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
cursor = db.cursor()
currentTime = str(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
if button == 1:
self.description = '请假'
self.lineTextID = self.ui.lineEdit.text()
results = self.useIDGetInfo(self.lineTextID)
elif button == 2:
self.description = '漏签补签'
self.lineTextID = self.ui.lineEdit_2.text()
results = self.useIDGetInfo(self.lineTextID)
self.lineTextInfo.append((results[0], results[1], results[2], currentTime, self.description))
try:
insert_sql = "replace into checkin(Name, ID, Class, Time, Description) values(%s, %s, %s, %s, %s)"
users = self.lineTextInfo
cursor.executemany(insert_sql, users)
except Exception as e:
print(e)
print("sql execute failed")
else:
print("sql execute success")
QMessageBox.warning(self, "warning", "{} 登记成功,请勿重复操作!".format(self.description), QMessageBox.Yes | QMessageBox.No)
db.commit()
cursor.close()
db.close()
else:
QMessageBox.warning(self, "warning", "学号不能为空,请输入后重试!", QMessageBox.Yes | QMessageBox.No)
self.ui.lineEdit.clear()
self.ui.lineEdit_2.clear()
def useIDGetInfo(self, ID):
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
cursor = db.cursor()
sql = "select * from students where ID = {}".format(ID)
if ID != '':
try:
cursor.execute(sql)
results = cursor.fetchall()
self.checkInfo = []
for i in results:
self.checkInfo.append(i[1])
self.checkInfo.append(i[0])
self.checkInfo.append(i[2])
return self.checkInfo
except:
print("Error: unable to fetch data")
def showLateAbsentee(self):
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
cursor = db.cursor()
sql1 = "select name from checkin where Description = '{}'".format('迟到')
sql2 = "select name from students"
try:
cursor.execute(sql1)
results = cursor.fetchall()
self.lateNums = []
for x in results:
self.lateNums.append(x[0])
self.lateNums.sort()
except:
print("Error: unable to fetch latedata")
try:
cursor.execute(sql2)
results2 = cursor.fetchall()
self.allNums = []
for i in results2:
self.allNums.append(i[0])
self.allNums.sort()
print(self.allNums)
except:
print("Error: unable to fetch absenteedata")
db.commit()
cursor.close()
db.close()
self.AbsenteeNums = set(set(self.allNums) - set(self.lateNums))
self.AbsenteeNums = list(self.AbsenteeNums)
self.AbsenteeNums.sort()
rowLate = len(self.lateNums)
rowAbsentee = len(self.AbsenteeNums)
model1 = QtGui.QStandardItemModel(rowLate, 0)
model1.setHorizontalHeaderLabels(['姓名'])
for row in range(rowLate):
item = QtGui.QStandardItem(self.lateNums[row])
model1.setItem(row, 0, item)
View1 = self.ui.tableView_escape
View1.setModel(model1)
model2 = QtGui.QStandardItemModel(rowAbsentee, 0)
model2.setHorizontalHeaderLabels(['姓名'])
for row in range(rowAbsentee):
item = QtGui.QStandardItem(self.AbsenteeNums[row])
model2.setItem(row, 0, item)
View2 = self.ui.tableView_late
View2.setModel(model2)
def trainModel(self):
import GeneratorModel
GeneratorModel.Generator()
GeneratorModel.TrainModel()
print('Model have been trained!')
)
if self.input_ID != '':
try:
cursor.execute(sql)
results = cursor.fetchall()
self.lists = []
for i in results:
self.lists.append(i[0])
self.lists.append(i[1])
self.lists.append(i[2])
self.lists.append(i[3])
self.lists.append(i[4])
except:
print("Error: unable to fetch data")
self.model = QtGui.QStandardItemModel(5, 0)
self.model.setHorizontalHeaderLabels(['值'])
self.model.setVerticalHeaderLabels(['学号', '姓名', '班级', '性别', '生日'])
nums = len(self.lists)
if nums == 0:
QMessageBox.warning(self, "warning", "人脸数据库中无此人信息,请马上录入!", QMessageBox.Yes | QMessageBox.No)
for row in range(nums):
item = QtGui.QStandardItem(self.lists[row])
self.model.setItem(row, 0, item)
self.View = self.Dialog.tableView
self.View.setModel(self.model)
db.close()
def userInfo(self):
ID = self.Dialog.lineEdit_ID.text()
Name = self.Dialog.lineEdit_Name.text()
Class = self.Dialog.lineEdit_Class.text()
Sex = self.Dialog.lineEdit_Sex.text()
Birth = self.Dialog.lineEdit_Birth.text()
self.users.append((ID, Name, Class, Sex, Birth))
return self.users
def changeInfo(self):
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
cursor = db.cursor()
try:
insert_sql = "replace into students(ID, Name, Class, Sex, Birthday) values(%s, %s, %s, %s, %s)"
users = self.userInfo()
cursor.executemany(insert_sql, users)
except Exception as e:
print(e)
print("sql execute failed")
else:
print("sql execute success")
QMessageBox.warning(self, "warning", "录入成功,请勿重复操作!", QMessageBox.Yes | QMessageBox.No)
db.commit()
cursor.close()
db.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWindow = MainWindow()
infoWindow = infoDialog()
mainWindow.ui.bt_gathering.clicked.connect(infoWindow.handle_click)
mainWindow.show()
sys.exit(app.exec_()) | true | true |
f721978ae0032f3792c8f2bb1e955820288a7de7 | 33,135 | py | Python | src/urllib3/response.py | imkaka/urllib3 | c96cf403fb4f24d414f40faf4691174e4c54ea0b | [
"MIT"
] | null | null | null | src/urllib3/response.py | imkaka/urllib3 | c96cf403fb4f24d414f40faf4691174e4c54ea0b | [
"MIT"
] | 1 | 2022-01-04T12:19:09.000Z | 2022-01-04T12:19:09.000Z | src/urllib3/response.py | sethmlarson/urllib3 | d4c25791cd5002a5234d882a28040db94ca38595 | [
"MIT"
] | null | null | null | import io
import json as _json
import logging
import zlib
from contextlib import contextmanager
from http.client import HTTPMessage as _HttplibHTTPMessage
from http.client import HTTPResponse as _HttplibHTTPResponse
from socket import timeout as SocketTimeout
from typing import (
TYPE_CHECKING,
Any,
Generator,
Iterator,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
)
try:
try:
import brotlicffi as brotli # type: ignore[import]
except ImportError:
import brotli # type: ignore[import]
except ImportError:
brotli = None
from ._collections import HTTPHeaderDict
from .connection import _TYPE_BODY, BaseSSLError, HTTPConnection, HTTPException
from .exceptions import (
BodyNotHttplibCompatible,
DecodeError,
HTTPError,
IncompleteRead,
InvalidChunkLength,
InvalidHeader,
ProtocolError,
ReadTimeoutError,
ResponseNotChunked,
SSLError,
)
from .util.response import is_fp_closed, is_response_to_head
from .util.retry import Retry
if TYPE_CHECKING:
from typing_extensions import Literal
from .connectionpool import HTTPConnectionPool
log = logging.getLogger(__name__)
class ContentDecoder:
def decompress(self, data: bytes) -> bytes:
raise NotImplementedError()
def flush(self) -> bytes:
raise NotImplementedError()
class DeflateDecoder(ContentDecoder):
def __init__(self) -> None:
self._first_try = True
self._data = b""
self._obj = zlib.decompressobj()
def decompress(self, data: bytes) -> bytes:
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
decompressed = self._obj.decompress(data)
if decompressed:
self._first_try = False
self._data = None # type: ignore[assignment]
return decompressed
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None # type: ignore[assignment]
def flush(self) -> bytes:
return self._obj.flush()
class GzipDecoderState:
FIRST_MEMBER = 0
OTHER_MEMBERS = 1
SWALLOW_DATA = 2
class GzipDecoder(ContentDecoder):
def __init__(self) -> None:
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
self._state = GzipDecoderState.FIRST_MEMBER
def decompress(self, data: bytes) -> bytes:
ret = bytearray()
if self._state == GzipDecoderState.SWALLOW_DATA or not data:
return bytes(ret)
while True:
try:
ret += self._obj.decompress(data)
except zlib.error:
previous_state = self._state
# Ignore data after the first error
self._state = GzipDecoderState.SWALLOW_DATA
if previous_state == GzipDecoderState.OTHER_MEMBERS:
# Allow trailing garbage acceptable in other gzip clients
return bytes(ret)
raise
data = self._obj.unused_data
if not data:
return bytes(ret)
self._state = GzipDecoderState.OTHER_MEMBERS
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def flush(self) -> bytes:
return self._obj.flush()
if brotli is not None:
class BrotliDecoder(ContentDecoder):
# Supports both 'brotlipy' and 'Brotli' packages
# since they share an import name. The top branches
# are for 'brotlipy' and bottom branches for 'Brotli'
def __init__(self) -> None:
self._obj = brotli.Decompressor()
if hasattr(self._obj, "decompress"):
setattr(self, "decompress", self._obj.decompress)
else:
setattr(self, "decompress", self._obj.process)
def flush(self) -> bytes:
if hasattr(self._obj, "flush"):
return self._obj.flush() # type: ignore[no-any-return]
return b""
class MultiDecoder(ContentDecoder):
"""
From RFC7231:
If one or more encodings have been applied to a representation, the
sender that applied the encodings MUST generate a Content-Encoding
header field that lists the content codings in the order in which
they were applied.
"""
def __init__(self, modes: str) -> None:
self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")]
def flush(self) -> bytes:
return self._decoders[0].flush()
def decompress(self, data: bytes) -> bytes:
for d in reversed(self._decoders):
data = d.decompress(data)
return data
def _get_decoder(mode: str) -> ContentDecoder:
if "," in mode:
return MultiDecoder(mode)
if mode == "gzip":
return GzipDecoder()
if brotli is not None and mode == "br":
return BrotliDecoder()
return DeflateDecoder()
class BaseHTTPResponse(io.IOBase):
CONTENT_DECODERS = ["gzip", "deflate"]
if brotli is not None:
CONTENT_DECODERS += ["br"]
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
DECODER_ERROR_CLASSES: Tuple[Type[Exception], ...] = (IOError, zlib.error)
if brotli is not None:
DECODER_ERROR_CLASSES += (brotli.error,)
def __init__(
self,
*,
headers: Optional[Union[Mapping[str, str], Mapping[bytes, bytes]]] = None,
status: int,
version: int,
reason: Optional[str],
decode_content: bool,
request_url: Optional[str],
retries: Optional[Retry] = None,
) -> None:
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers) # type: ignore[arg-type]
self.status = status
self.version = version
self.reason = reason
self.decode_content = decode_content
self.request_url: Optional[str]
self.retries = retries
self.chunked = False
tr_enc = self.headers.get("transfer-encoding", "").lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
self._decoder: Optional[ContentDecoder] = None
def get_redirect_location(self) -> Union[Optional[str], "Literal[False]"]:
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get("location")
return False
@property
def data(self) -> bytes:
raise NotImplementedError()
def json(self) -> Any:
"""
Parses the body of the HTTP response as JSON.
To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to the decoder.
This method can raise either `UnicodeDecodeError` or `json.JSONDecodeError`.
Read more :ref:`here <json>`.
"""
data = self.data.decode("utf-8")
return _json.loads(data)
@property
def url(self) -> Optional[str]:
raise NotImplementedError()
@property
def closed(self) -> bool:
raise NotImplementedError()
@property
def connection(self) -> Optional[HTTPConnection]:
raise NotImplementedError()
def stream(
self, amt: Optional[int] = 2 ** 16, decode_content: Optional[bool] = None
) -> Iterator[bytes]:
raise NotImplementedError()
def read(
self,
amt: Optional[int] = None,
decode_content: Optional[bool] = None,
cache_content: bool = False,
) -> bytes:
raise NotImplementedError()
def read_chunked(
self,
amt: Optional[int] = None,
decode_content: Optional[bool] = None,
) -> Iterator[bytes]:
raise NotImplementedError()
def release_conn(self) -> None:
raise NotImplementedError()
def drain_conn(self) -> None:
raise NotImplementedError()
def close(self) -> None:
raise NotImplementedError()
def _init_decoder(self) -> None:
"""
Set-up the _decoder attribute if necessary.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get("content-encoding", "").lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
elif "," in content_encoding:
encodings = [
e.strip()
for e in content_encoding.split(",")
if e.strip() in self.CONTENT_DECODERS
]
if encodings:
self._decoder = _get_decoder(content_encoding)
def _decode(
self, data: bytes, decode_content: Optional[bool], flush_decoder: bool
) -> bytes:
"""
Decode the data passed in and potentially flush the decoder.
"""
if not decode_content:
return data
try:
if self._decoder:
data = self._decoder.decompress(data)
except self.DECODER_ERROR_CLASSES as e:
content_encoding = self.headers.get("content-encoding", "").lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e,
) from e
if flush_decoder:
data += self._flush_decoder()
return data
def _flush_decoder(self) -> bytes:
"""
Flushes the decoder. Should only be called if the decoder is actually
being used.
"""
if self._decoder:
return self._decoder.decompress(b"") + self._decoder.flush()
return b""
# Compatibility methods for `io` module
def readable(self) -> bool:
return True
def readinto(self, b: bytearray) -> int:
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[: len(temp)] = temp
return len(temp)
# Compatibility methods for http.client.HTTPResponse
def getheaders(self) -> List[Tuple[str, str]]:
return list(self.headers.items())
def getheader(self, name: str, default: Optional[str] = None) -> Optional[str]:
return self.headers.get(name, default)
# Compatibility method for http.cookiejar
def info(self) -> HTTPHeaderDict:
return self.headers
def geturl(self) -> Optional[Union[str, "Literal[False]"]]:
return self.url
class HTTPResponse(BaseHTTPResponse):
"""
HTTP Response container.
Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param original_response:
When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse`
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
:param retries:
The retries contains the last :class:`~urllib3.util.retry.Retry` that
was used during the request.
:param enforce_content_length:
Enforce content length checking. Body returned by server must match
value of Content-Length header, if present. Otherwise, raise error.
"""
def __init__(
self,
body: _TYPE_BODY = "",
headers: Optional[Union[Mapping[str, str], Mapping[bytes, bytes]]] = None,
status: int = 0,
version: int = 0,
reason: Optional[str] = None,
preload_content: bool = True,
decode_content: bool = True,
original_response: Optional[_HttplibHTTPResponse] = None,
pool: Optional["HTTPConnectionPool"] = None,
connection: Optional[HTTPConnection] = None,
msg: Optional[_HttplibHTTPMessage] = None,
retries: Optional[Retry] = None,
enforce_content_length: bool = False,
request_method: Optional[str] = None,
request_url: Optional[str] = None,
auto_close: bool = True,
) -> None:
super().__init__(
headers=headers,
status=status,
version=version,
reason=reason,
decode_content=decode_content,
request_url=request_url,
retries=retries,
)
self.enforce_content_length = enforce_content_length
self.auto_close = auto_close
self._body = None
self._fp: Optional[_HttplibHTTPResponse] = None
self._original_response = original_response
self._fp_bytes_read = 0
self.msg = msg
if self.retries is not None and self.retries.history:
self._request_url = self.retries.history[-1].redirect_location
else:
self._request_url = request_url
if body and isinstance(body, (str, bytes)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, "read"):
self._fp = body # type: ignore[assignment]
# Are we using the chunked-style of transfer encoding?
self.chunk_left: Optional[int] = None
# Determine length of response
self.length_remaining = self._init_length(request_method)
# If requested, preload the body.
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def release_conn(self) -> None:
if not self._pool or not self._connection:
return None
self._pool._put_conn(self._connection)
self._connection = None
def drain_conn(self) -> None:
"""
Read and discard any remaining HTTP response data in the response connection.
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
"""
try:
self.read()
except (HTTPError, OSError, BaseSSLError, HTTPException):
pass
@property
def data(self) -> bytes:
# For backwards-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body # type: ignore[return-value]
if self._fp:
return self.read(cache_content=True)
return None # type: ignore[return-value]
@property
def connection(self) -> Optional[HTTPConnection]:
return self._connection
def isclosed(self) -> bool:
return is_fp_closed(self._fp)
def tell(self) -> int:
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``urllib3.response.HTTPResponse.read``
if bytes are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_length(self, request_method: Optional[str]) -> Optional[int]:
"""
Set initial length value for Response content if available.
"""
length: Optional[int]
content_length: Optional[str] = self.headers.get("content-length")
if content_length is not None:
if self.chunked:
# This Response will fail with an IncompleteRead if it can't be
# received as chunked. This method falls back to attempt reading
# the response before raising an exception.
log.warning(
"Received response with both Content-Length and "
"Transfer-Encoding set. This is expressly forbidden "
"by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
"attempting to process response as Transfer-Encoding: "
"chunked."
)
return None
try:
# RFC 7230 section 3.3.2 specifies multiple content lengths can
# be sent in a single Content-Length header
# (e.g. Content-Length: 42, 42). This line ensures the values
# are all valid ints and that as long as the `set` length is 1,
# all values are the same. Otherwise, the header is invalid.
lengths = {int(val) for val in content_length.split(",")}
if len(lengths) > 1:
raise InvalidHeader(
"Content-Length contained multiple "
"unmatching values (%s)" % content_length
)
length = lengths.pop()
except ValueError:
length = None
else:
if length < 0:
length = None
else: # if content_length is None
length = None
# Convert status to int for comparison
# In some cases, httplib returns a status of "_UNKNOWN"
try:
status = int(self.status)
except ValueError:
status = 0
# Check for responses that shouldn't include a body
if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD":
length = 0
return length
@contextmanager
def _error_catcher(self) -> Generator[None, None, None]:
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
clean_exit = False
try:
try:
yield
except SocketTimeout as e:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if "read operation timed out" not in str(e):
# SSL errors related to framing/MAC get wrapped and reraised here
raise SSLError(e) from e
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except (HTTPException, OSError) as e:
# This includes IncompleteRead.
raise ProtocolError(f"Connection broken: {e!r}", e) from e
# If no exception is thrown, we should avoid cleaning up
# unnecessarily.
clean_exit = True
finally:
# If we didn't terminate cleanly, we need to throw away our
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
# anymore so close it now to ensure that the connection is
# released back to the pool.
if self._original_response:
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection:
self._connection.close()
# If we hold the original response but it's closed now, we should
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
def read(
self,
amt: Optional[int] = None,
decode_content: Optional[bool] = None,
cache_content: bool = False,
) -> bytes:
"""
Similar to :meth:`http.client.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return None # type: ignore[return-value]
flush_decoder = False
fp_closed = getattr(self._fp, "closed", False)
with self._error_catcher():
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read() if not fp_closed else b""
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt) if not fp_closed else b""
if (
amt != 0 and not data
): # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
if (
self.enforce_content_length
and self.length_remaining is not None
and self.length_remaining != 0
):
# This is an edge case that httplib failed to cover due
# to concerns of backward compatibility. We're
# addressing it here to make sure IncompleteRead is
# raised during streaming, so all calls with incorrect
# Content-Length are caught.
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
if data:
self._fp_bytes_read += len(data)
if self.length_remaining is not None:
self.length_remaining -= len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
def stream(
self, amt: Optional[int] = 2 ** 16, decode_content: Optional[bool] = None
) -> Generator[bytes, None, None]:
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked and self.supports_chunked_reads():
yield from self.read_chunked(amt, decode_content=decode_content)
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(
ResponseCls: Type["HTTPResponse"], r: _HttplibHTTPResponse, **response_kw: Any
) -> "HTTPResponse":
"""
Given an :class:`http.client.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
headers = HTTPHeaderDict(headers.items()) # type: ignore[assignment]
resp = ResponseCls(
body=r,
headers=headers, # type: ignore[arg-type]
status=r.status,
version=r.version,
reason=r.reason,
original_response=r,
**response_kw,
)
return resp
# Overrides from io.IOBase
def close(self) -> None:
if not self.closed and self._fp:
self._fp.close()
if self._connection:
self._connection.close()
if not self.auto_close:
io.IOBase.close(self)
@property
def closed(self) -> bool:
if not self.auto_close:
return io.IOBase.closed.__get__(self) # type: ignore[no-any-return, attr-defined]
elif self._fp is None:
return True
elif hasattr(self._fp, "isclosed"):
return self._fp.isclosed()
elif hasattr(self._fp, "closed"):
return self._fp.closed
else:
return True
def fileno(self) -> int:
if self._fp is None:
raise OSError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise OSError(
"The file-like object this HTTPResponse is wrapped "
"around has no file descriptor"
)
def flush(self) -> None:
if (
self._fp is not None
and hasattr(self._fp, "flush")
and not getattr(self._fp, "closed", False)
):
return self._fp.flush()
def supports_chunked_reads(self) -> bool:
"""
Checks if the underlying file-like object looks like a
:class:`http.client.HTTPResponse` object. We do this by testing for
the fp attribute. If it is present we assume it returns raw chunks as
processed by read_chunked().
"""
return hasattr(self._fp, "fp")
def _update_chunk_length(self) -> None:
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return None
line = self._fp.fp.readline() # type: ignore[union-attr]
line = line.split(b";", 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise InvalidChunkLength(self, line) from None
def _handle_chunk(self, amt: Optional[int]) -> bytes:
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]
returned_chunk = chunk
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif self.chunk_left is not None and amt < self.chunk_left:
value = self._fp._safe_read(amt) # type: ignore[union-attr]
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt) # type: ignore[union-attr]
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk # type: ignore[no-any-return]
def read_chunked(
self, amt: Optional[int] = None, decode_content: Optional[bool] = None
) -> Generator[bytes, None, None]:
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked(
"Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing."
)
if not self.supports_chunked_reads():
raise BodyNotHttplibCompatible(
"Body should be http.client.HTTPResponse like. "
"It should have have an fp attribute which returns raw chunks."
)
with self._error_catcher():
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return None
# If a response is already read and closed
# then return immediately.
if self._fp.fp is None: # type: ignore[union-attr]
return None
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
decoded = self._decode(
chunk, decode_content=decode_content, flush_decoder=False
)
if decoded:
yield decoded
if decode_content:
# On CPython and PyPy, we should never need to flush the
# decoder. However, on Jython we *might* need to, so
# lets defensively do it anyway.
decoded = self._flush_decoder()
if decoded: # Platform-specific: Jython.
yield decoded
# Chunk content ends with \r\n: discard it.
while self._fp is not None:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b"\r\n":
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
@property
def url(self) -> Optional[str]:
"""
Returns the URL that was the source of this response.
If the request that generated this response redirected, this method
will return the final redirect location.
"""
return self._request_url
@url.setter
def url(self, url: str) -> None:
self._request_url = url
def __iter__(self) -> Iterator[bytes]:
buffer: List[bytes] = []
for chunk in self.stream(decode_content=True):
if b"\n" in chunk:
chunks = chunk.split(b"\n")
yield b"".join(buffer) + chunks[0] + b"\n"
for x in chunks[1:-1]:
yield x + b"\n"
if chunks[-1]:
buffer = [chunks[-1]]
else:
buffer = []
else:
buffer.append(chunk)
if buffer:
yield b"".join(buffer)
| 34.952532 | 110 | 0.582979 | import io
import json as _json
import logging
import zlib
from contextlib import contextmanager
from http.client import HTTPMessage as _HttplibHTTPMessage
from http.client import HTTPResponse as _HttplibHTTPResponse
from socket import timeout as SocketTimeout
from typing import (
TYPE_CHECKING,
Any,
Generator,
Iterator,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
)
try:
try:
import brotlicffi as brotli
except ImportError:
import brotli
except ImportError:
brotli = None
from ._collections import HTTPHeaderDict
from .connection import _TYPE_BODY, BaseSSLError, HTTPConnection, HTTPException
from .exceptions import (
BodyNotHttplibCompatible,
DecodeError,
HTTPError,
IncompleteRead,
InvalidChunkLength,
InvalidHeader,
ProtocolError,
ReadTimeoutError,
ResponseNotChunked,
SSLError,
)
from .util.response import is_fp_closed, is_response_to_head
from .util.retry import Retry
if TYPE_CHECKING:
from typing_extensions import Literal
from .connectionpool import HTTPConnectionPool
log = logging.getLogger(__name__)
class ContentDecoder:
def decompress(self, data: bytes) -> bytes:
raise NotImplementedError()
def flush(self) -> bytes:
raise NotImplementedError()
class DeflateDecoder(ContentDecoder):
def __init__(self) -> None:
self._first_try = True
self._data = b""
self._obj = zlib.decompressobj()
def decompress(self, data: bytes) -> bytes:
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
decompressed = self._obj.decompress(data)
if decompressed:
self._first_try = False
self._data = None
return decompressed
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
def flush(self) -> bytes:
return self._obj.flush()
class GzipDecoderState:
FIRST_MEMBER = 0
OTHER_MEMBERS = 1
SWALLOW_DATA = 2
class GzipDecoder(ContentDecoder):
def __init__(self) -> None:
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
self._state = GzipDecoderState.FIRST_MEMBER
def decompress(self, data: bytes) -> bytes:
ret = bytearray()
if self._state == GzipDecoderState.SWALLOW_DATA or not data:
return bytes(ret)
while True:
try:
ret += self._obj.decompress(data)
except zlib.error:
previous_state = self._state
self._state = GzipDecoderState.SWALLOW_DATA
if previous_state == GzipDecoderState.OTHER_MEMBERS:
return bytes(ret)
raise
data = self._obj.unused_data
if not data:
return bytes(ret)
self._state = GzipDecoderState.OTHER_MEMBERS
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def flush(self) -> bytes:
return self._obj.flush()
if brotli is not None:
class BrotliDecoder(ContentDecoder):
def __init__(self) -> None:
self._obj = brotli.Decompressor()
if hasattr(self._obj, "decompress"):
setattr(self, "decompress", self._obj.decompress)
else:
setattr(self, "decompress", self._obj.process)
def flush(self) -> bytes:
if hasattr(self._obj, "flush"):
return self._obj.flush()
return b""
class MultiDecoder(ContentDecoder):
def __init__(self, modes: str) -> None:
self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")]
def flush(self) -> bytes:
return self._decoders[0].flush()
def decompress(self, data: bytes) -> bytes:
for d in reversed(self._decoders):
data = d.decompress(data)
return data
def _get_decoder(mode: str) -> ContentDecoder:
if "," in mode:
return MultiDecoder(mode)
if mode == "gzip":
return GzipDecoder()
if brotli is not None and mode == "br":
return BrotliDecoder()
return DeflateDecoder()
class BaseHTTPResponse(io.IOBase):
CONTENT_DECODERS = ["gzip", "deflate"]
if brotli is not None:
CONTENT_DECODERS += ["br"]
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
DECODER_ERROR_CLASSES: Tuple[Type[Exception], ...] = (IOError, zlib.error)
if brotli is not None:
DECODER_ERROR_CLASSES += (brotli.error,)
def __init__(
self,
*,
headers: Optional[Union[Mapping[str, str], Mapping[bytes, bytes]]] = None,
status: int,
version: int,
reason: Optional[str],
decode_content: bool,
request_url: Optional[str],
retries: Optional[Retry] = None,
) -> None:
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.decode_content = decode_content
self.request_url: Optional[str]
self.retries = retries
self.chunked = False
tr_enc = self.headers.get("transfer-encoding", "").lower()
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
self._decoder: Optional[ContentDecoder] = None
def get_redirect_location(self) -> Union[Optional[str], "Literal[False]"]:
if self.status in self.REDIRECT_STATUSES:
return self.headers.get("location")
return False
@property
def data(self) -> bytes:
raise NotImplementedError()
def json(self) -> Any:
data = self.data.decode("utf-8")
return _json.loads(data)
@property
def url(self) -> Optional[str]:
raise NotImplementedError()
@property
def closed(self) -> bool:
raise NotImplementedError()
@property
def connection(self) -> Optional[HTTPConnection]:
raise NotImplementedError()
def stream(
self, amt: Optional[int] = 2 ** 16, decode_content: Optional[bool] = None
) -> Iterator[bytes]:
raise NotImplementedError()
def read(
self,
amt: Optional[int] = None,
decode_content: Optional[bool] = None,
cache_content: bool = False,
) -> bytes:
raise NotImplementedError()
def read_chunked(
self,
amt: Optional[int] = None,
decode_content: Optional[bool] = None,
) -> Iterator[bytes]:
raise NotImplementedError()
def release_conn(self) -> None:
raise NotImplementedError()
def drain_conn(self) -> None:
raise NotImplementedError()
def close(self) -> None:
raise NotImplementedError()
def _init_decoder(self) -> None:
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get("content-encoding", "").lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
elif "," in content_encoding:
encodings = [
e.strip()
for e in content_encoding.split(",")
if e.strip() in self.CONTENT_DECODERS
]
if encodings:
self._decoder = _get_decoder(content_encoding)
def _decode(
self, data: bytes, decode_content: Optional[bool], flush_decoder: bool
) -> bytes:
if not decode_content:
return data
try:
if self._decoder:
data = self._decoder.decompress(data)
except self.DECODER_ERROR_CLASSES as e:
content_encoding = self.headers.get("content-encoding", "").lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e,
) from e
if flush_decoder:
data += self._flush_decoder()
return data
def _flush_decoder(self) -> bytes:
if self._decoder:
return self._decoder.decompress(b"") + self._decoder.flush()
return b""
# Compatibility methods for `io` module
def readable(self) -> bool:
return True
def readinto(self, b: bytearray) -> int:
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[: len(temp)] = temp
return len(temp)
# Compatibility methods for http.client.HTTPResponse
def getheaders(self) -> List[Tuple[str, str]]:
return list(self.headers.items())
def getheader(self, name: str, default: Optional[str] = None) -> Optional[str]:
return self.headers.get(name, default)
# Compatibility method for http.cookiejar
def info(self) -> HTTPHeaderDict:
return self.headers
def geturl(self) -> Optional[Union[str, "Literal[False]"]]:
return self.url
class HTTPResponse(BaseHTTPResponse):
def __init__(
self,
body: _TYPE_BODY = "",
headers: Optional[Union[Mapping[str, str], Mapping[bytes, bytes]]] = None,
status: int = 0,
version: int = 0,
reason: Optional[str] = None,
preload_content: bool = True,
decode_content: bool = True,
original_response: Optional[_HttplibHTTPResponse] = None,
pool: Optional["HTTPConnectionPool"] = None,
connection: Optional[HTTPConnection] = None,
msg: Optional[_HttplibHTTPMessage] = None,
retries: Optional[Retry] = None,
enforce_content_length: bool = False,
request_method: Optional[str] = None,
request_url: Optional[str] = None,
auto_close: bool = True,
) -> None:
super().__init__(
headers=headers,
status=status,
version=version,
reason=reason,
decode_content=decode_content,
request_url=request_url,
retries=retries,
)
self.enforce_content_length = enforce_content_length
self.auto_close = auto_close
self._body = None
self._fp: Optional[_HttplibHTTPResponse] = None
self._original_response = original_response
self._fp_bytes_read = 0
self.msg = msg
if self.retries is not None and self.retries.history:
self._request_url = self.retries.history[-1].redirect_location
else:
self._request_url = request_url
if body and isinstance(body, (str, bytes)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, "read"):
self._fp = body # type: ignore[assignment]
# Are we using the chunked-style of transfer encoding?
self.chunk_left: Optional[int] = None
# Determine length of response
self.length_remaining = self._init_length(request_method)
# If requested, preload the body.
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def release_conn(self) -> None:
if not self._pool or not self._connection:
return None
self._pool._put_conn(self._connection)
self._connection = None
def drain_conn(self) -> None:
try:
self.read()
except (HTTPError, OSError, BaseSSLError, HTTPException):
pass
@property
def data(self) -> bytes:
# For backwards-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body # type: ignore[return-value]
if self._fp:
return self.read(cache_content=True)
return None # type: ignore[return-value]
@property
def connection(self) -> Optional[HTTPConnection]:
return self._connection
def isclosed(self) -> bool:
return is_fp_closed(self._fp)
def tell(self) -> int:
return self._fp_bytes_read
def _init_length(self, request_method: Optional[str]) -> Optional[int]:
length: Optional[int]
content_length: Optional[str] = self.headers.get("content-length")
if content_length is not None:
if self.chunked:
# This Response will fail with an IncompleteRead if it can't be
log.warning(
"Received response with both Content-Length and "
"Transfer-Encoding set. This is expressly forbidden "
"by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
"attempting to process response as Transfer-Encoding: "
"chunked."
)
return None
try:
lengths = {int(val) for val in content_length.split(",")}
if len(lengths) > 1:
raise InvalidHeader(
"Content-Length contained multiple "
"unmatching values (%s)" % content_length
)
length = lengths.pop()
except ValueError:
length = None
else:
if length < 0:
length = None
else:
length = None
try:
status = int(self.status)
except ValueError:
status = 0
if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD":
length = 0
return length
@contextmanager
def _error_catcher(self) -> Generator[None, None, None]:
clean_exit = False
try:
try:
yield
except SocketTimeout as e:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e
except BaseSSLError as e:
if "read operation timed out" not in str(e):
raise SSLError(e) from e
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e
except (HTTPException, OSError) as e:
raise ProtocolError(f"Connection broken: {e!r}", e) from e
clean_exit = True
finally:
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
if self._original_response:
self._original_response.close()
if self._connection:
self._connection.close()
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
def read(
self,
amt: Optional[int] = None,
decode_content: Optional[bool] = None,
cache_content: bool = False,
) -> bytes:
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return None # type: ignore[return-value]
flush_decoder = False
fp_closed = getattr(self._fp, "closed", False)
with self._error_catcher():
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read() if not fp_closed else b""
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt) if not fp_closed else b""
if (
amt != 0 and not data
):
self._fp.close()
flush_decoder = True
if (
self.enforce_content_length
and self.length_remaining is not None
and self.length_remaining != 0
):
# addressing it here to make sure IncompleteRead is
# raised during streaming, so all calls with incorrect
# Content-Length are caught.
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
if data:
self._fp_bytes_read += len(data)
if self.length_remaining is not None:
self.length_remaining -= len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
def stream(
self, amt: Optional[int] = 2 ** 16, decode_content: Optional[bool] = None
) -> Generator[bytes, None, None]:
if self.chunked and self.supports_chunked_reads():
yield from self.read_chunked(amt, decode_content=decode_content)
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(
ResponseCls: Type["HTTPResponse"], r: _HttplibHTTPResponse, **response_kw: Any
) -> "HTTPResponse":
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
headers = HTTPHeaderDict(headers.items()) # type: ignore[assignment]
resp = ResponseCls(
body=r,
headers=headers, # type: ignore[arg-type]
status=r.status,
version=r.version,
reason=r.reason,
original_response=r,
**response_kw,
)
return resp
# Overrides from io.IOBase
def close(self) -> None:
if not self.closed and self._fp:
self._fp.close()
if self._connection:
self._connection.close()
if not self.auto_close:
io.IOBase.close(self)
@property
def closed(self) -> bool:
if not self.auto_close:
return io.IOBase.closed.__get__(self) # type: ignore[no-any-return, attr-defined]
elif self._fp is None:
return True
elif hasattr(self._fp, "isclosed"):
return self._fp.isclosed()
elif hasattr(self._fp, "closed"):
return self._fp.closed
else:
return True
def fileno(self) -> int:
if self._fp is None:
raise OSError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise OSError(
"The file-like object this HTTPResponse is wrapped "
"around has no file descriptor"
)
def flush(self) -> None:
if (
self._fp is not None
and hasattr(self._fp, "flush")
and not getattr(self._fp, "closed", False)
):
return self._fp.flush()
def supports_chunked_reads(self) -> bool:
return hasattr(self._fp, "fp")
def _update_chunk_length(self) -> None:
# First, we'll figure out length of a chunk and then
if self.chunk_left is not None:
return None
line = self._fp.fp.readline() # type: ignore[union-attr]
line = line.split(b";", 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise InvalidChunkLength(self, line) from None
def _handle_chunk(self, amt: Optional[int]) -> bytes:
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]
returned_chunk = chunk
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif self.chunk_left is not None and amt < self.chunk_left:
value = self._fp._safe_read(amt) # type: ignore[union-attr]
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt) # type: ignore[union-attr]
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk # type: ignore[no-any-return]
def read_chunked(
self, amt: Optional[int] = None, decode_content: Optional[bool] = None
) -> Generator[bytes, None, None]:
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked(
"Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing."
)
if not self.supports_chunked_reads():
raise BodyNotHttplibCompatible(
"Body should be http.client.HTTPResponse like. "
"It should have have an fp attribute which returns raw chunks."
)
with self._error_catcher():
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return None
if self._fp.fp is None:
return None
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
decoded = self._decode(
chunk, decode_content=decode_content, flush_decoder=False
)
if decoded:
yield decoded
if decode_content:
decoded = self._flush_decoder()
if decoded:
yield decoded
while self._fp is not None:
line = self._fp.fp.readline()
if not line:
break
if line == b"\r\n":
break
if self._original_response:
self._original_response.close()
@property
def url(self) -> Optional[str]:
return self._request_url
@url.setter
def url(self, url: str) -> None:
self._request_url = url
def __iter__(self) -> Iterator[bytes]:
buffer: List[bytes] = []
for chunk in self.stream(decode_content=True):
if b"\n" in chunk:
chunks = chunk.split(b"\n")
yield b"".join(buffer) + chunks[0] + b"\n"
for x in chunks[1:-1]:
yield x + b"\n"
if chunks[-1]:
buffer = [chunks[-1]]
else:
buffer = []
else:
buffer.append(chunk)
if buffer:
yield b"".join(buffer)
| true | true |
f7219c3cecb551332ea0053120d9d5497f55a298 | 4,400 | py | Python | pongcontroller.py | afghanimah/Pong | ad799bae29ed5f5cff2f2f70a7e42a5f02df7336 | [
"MIT"
] | null | null | null | pongcontroller.py | afghanimah/Pong | ad799bae29ed5f5cff2f2f70a7e42a5f02df7336 | [
"MIT"
] | 5 | 2020-02-29T01:15:24.000Z | 2020-02-29T21:55:03.000Z | pongcontroller.py | afghanimah/Pong | ad799bae29ed5f5cff2f2f70a7e42a5f02df7336 | [
"MIT"
] | null | null | null | from pyglet.window import key
import random
from pygletplus.controller import Controller
class PongController(Controller):
def __init__(self, scene):
super().__init__(scene)
self.keys = scene.keys
self.player = scene.player
self.cpu = scene.cpu
self.ball = scene.ball
self.close = scene.close
def update(self, dt):
if self.scene.paused:
return
self.player.update(dt)
self.cpu.follow(self.ball.sprite.x, self.ball.sprite.y)
self.cpu.update(dt)
self.ball.update(dt)
self.window_bound()
self.bounce_ball()
def on_key_press(self, symbol, _):
if symbol == key.ESCAPE:
self.close()
if symbol == key.SPACE:
self.scene.paused = not self.scene.paused
# player movement (decouple from player class):
if symbol == key.UP:
self.player.vy += self.player.speed
elif symbol == key.DOWN:
self.player.vy -= self.player.speed
def on_key_release(self, symbol, _):
if symbol == key.UP:
self.player.vy -= self.player.speed
elif symbol == key.DOWN:
self.player.vy += self.player.speed
@staticmethod
def bound_x(e, mini, maxi):
mini += e.sprite.width / 2
maxi -= e.sprite.width / 2
if e.sprite.x < mini:
e.sprite.x = mini
elif e.sprite.x > maxi:
e.sprite.x = maxi
@staticmethod
def bound_y(e, mini, maxi):
mini += e.sprite.height / 2
maxi -= e.sprite.height / 2
if e.sprite.y < mini:
e.sprite.y = mini
elif e.sprite.y > maxi:
e.sprite.y = maxi
def window_bound(self):
self.bound_x(self.player, 0, self.scene.width)
self.bound_y(self.player, 0, self.scene.height)
self.bound_x(self.cpu, 0, self.scene.width)
self.bound_y(self.cpu, 0, self.scene.height)
def bounce_ball(self):
x_min = self.scene.ball_img.anchor_x
x_max = self.scene.width - self.scene.ball_img.anchor_x
y_min = self.scene.ball_img.anchor_y
y_max = self.scene.height - self.scene.ball_img.anchor_y
# bounce off top and bottom walls of window
if self.ball.sprite.y < y_min:
self.ball.sprite.y = y_min
self.ball.vy *= -1
self.scene.bounce_sound.play()
elif self.ball.sprite.y > y_max:
self.ball.sprite.y = y_max
self.ball.vy *= -1
self.scene.bounce_sound.play()
# score a point if touch left or right walls of window
if self.ball.sprite.x < x_min:
self.ball.sprite.x = self.scene.width / 2 - 200
self.ball.sprite.y = self.scene.height / 2
self.ball.vx = random.randint(300, 350)
self.ball.vy = random.randint(300, 350) * (-1 if random.randint(0, 1) == 0 else 1)
self.scene.cpu_score += 1
self.scene.cpu_label.text = str(self.scene.cpu_score)
self.scene.point_sound.play()
elif self.ball.sprite.x > x_max:
self.ball.sprite.x = self.scene.width / 2 + 200
self.ball.sprite.y = self.scene.height / 2
self.ball.vx = -random.randint(300, 350)
self.ball.vy = -random.randint(300, 350) * (-1 if random.randint(0, 1) == 0 else 1)
self.scene.player_score += 1
self.scene.player_label.text = str(self.scene.player_score)
self.scene.point_sound.play()
if (self.player.sprite.x < self.ball.sprite.x < self.player.sprite.x + self.scene.paddle_img.anchor_x and
self.player.sprite.y - self.scene.paddle_img.anchor_y < self.ball.sprite.y <
self.player.sprite.y + self.scene.paddle_img.anchor_y):
self.ball.sprite.x = self.player.sprite.x + self.scene.paddle_img.anchor_x
self.ball.vx *= -1
self.scene.bounce_sound.play()
elif (self.cpu.sprite.x > self.ball.sprite.x > self.cpu.sprite.x - self.scene.paddle_img.anchor_x and
self.cpu.sprite.y - self.scene.paddle_img.anchor_y < self.ball.sprite.y <
self.cpu.sprite.y + self.scene.paddle_img.anchor_y):
self.ball.sprite.x = self.cpu.sprite.x - self.scene.ball_img.anchor_x
self.ball.vx *= -1
self.scene.bounce_sound.play()
| 39.285714 | 113 | 0.588636 | from pyglet.window import key
import random
from pygletplus.controller import Controller
class PongController(Controller):
def __init__(self, scene):
super().__init__(scene)
self.keys = scene.keys
self.player = scene.player
self.cpu = scene.cpu
self.ball = scene.ball
self.close = scene.close
def update(self, dt):
if self.scene.paused:
return
self.player.update(dt)
self.cpu.follow(self.ball.sprite.x, self.ball.sprite.y)
self.cpu.update(dt)
self.ball.update(dt)
self.window_bound()
self.bounce_ball()
def on_key_press(self, symbol, _):
if symbol == key.ESCAPE:
self.close()
if symbol == key.SPACE:
self.scene.paused = not self.scene.paused
if symbol == key.UP:
self.player.vy += self.player.speed
elif symbol == key.DOWN:
self.player.vy -= self.player.speed
def on_key_release(self, symbol, _):
if symbol == key.UP:
self.player.vy -= self.player.speed
elif symbol == key.DOWN:
self.player.vy += self.player.speed
@staticmethod
def bound_x(e, mini, maxi):
mini += e.sprite.width / 2
maxi -= e.sprite.width / 2
if e.sprite.x < mini:
e.sprite.x = mini
elif e.sprite.x > maxi:
e.sprite.x = maxi
@staticmethod
def bound_y(e, mini, maxi):
mini += e.sprite.height / 2
maxi -= e.sprite.height / 2
if e.sprite.y < mini:
e.sprite.y = mini
elif e.sprite.y > maxi:
e.sprite.y = maxi
def window_bound(self):
self.bound_x(self.player, 0, self.scene.width)
self.bound_y(self.player, 0, self.scene.height)
self.bound_x(self.cpu, 0, self.scene.width)
self.bound_y(self.cpu, 0, self.scene.height)
def bounce_ball(self):
x_min = self.scene.ball_img.anchor_x
x_max = self.scene.width - self.scene.ball_img.anchor_x
y_min = self.scene.ball_img.anchor_y
y_max = self.scene.height - self.scene.ball_img.anchor_y
if self.ball.sprite.y < y_min:
self.ball.sprite.y = y_min
self.ball.vy *= -1
self.scene.bounce_sound.play()
elif self.ball.sprite.y > y_max:
self.ball.sprite.y = y_max
self.ball.vy *= -1
self.scene.bounce_sound.play()
if self.ball.sprite.x < x_min:
self.ball.sprite.x = self.scene.width / 2 - 200
self.ball.sprite.y = self.scene.height / 2
self.ball.vx = random.randint(300, 350)
self.ball.vy = random.randint(300, 350) * (-1 if random.randint(0, 1) == 0 else 1)
self.scene.cpu_score += 1
self.scene.cpu_label.text = str(self.scene.cpu_score)
self.scene.point_sound.play()
elif self.ball.sprite.x > x_max:
self.ball.sprite.x = self.scene.width / 2 + 200
self.ball.sprite.y = self.scene.height / 2
self.ball.vx = -random.randint(300, 350)
self.ball.vy = -random.randint(300, 350) * (-1 if random.randint(0, 1) == 0 else 1)
self.scene.player_score += 1
self.scene.player_label.text = str(self.scene.player_score)
self.scene.point_sound.play()
if (self.player.sprite.x < self.ball.sprite.x < self.player.sprite.x + self.scene.paddle_img.anchor_x and
self.player.sprite.y - self.scene.paddle_img.anchor_y < self.ball.sprite.y <
self.player.sprite.y + self.scene.paddle_img.anchor_y):
self.ball.sprite.x = self.player.sprite.x + self.scene.paddle_img.anchor_x
self.ball.vx *= -1
self.scene.bounce_sound.play()
elif (self.cpu.sprite.x > self.ball.sprite.x > self.cpu.sprite.x - self.scene.paddle_img.anchor_x and
self.cpu.sprite.y - self.scene.paddle_img.anchor_y < self.ball.sprite.y <
self.cpu.sprite.y + self.scene.paddle_img.anchor_y):
self.ball.sprite.x = self.cpu.sprite.x - self.scene.ball_img.anchor_x
self.ball.vx *= -1
self.scene.bounce_sound.play()
| true | true |
f7219cec0e09ba36054e4f7cf2c47cdd0bc5592a | 397 | py | Python | greaterwms/wsgi.py | chinxianjun2016/GreaterWMS | aacd0e15e0114f103eb57002e93670c008cce63b | [
"Apache-2.0"
] | 1 | 2021-02-17T14:04:29.000Z | 2021-02-17T14:04:29.000Z | greaterwms/wsgi.py | AntInso/GreaterWMS | 9eabb1b9b0f5376dcccd89ed86dd76995955a8ec | [
"Apache-2.0"
] | null | null | null | greaterwms/wsgi.py | AntInso/GreaterWMS | 9eabb1b9b0f5376dcccd89ed86dd76995955a8ec | [
"Apache-2.0"
] | null | null | null | """
WSGI config for greaterwms project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'greaterwms.settings')
application = get_wsgi_application()
| 23.352941 | 78 | 0.788413 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'greaterwms.settings')
application = get_wsgi_application()
| true | true |
f7219e0b94e3c48818431af7be65a1ddd8fdbbac | 2,454 | py | Python | tests/settings.py | hugorodgerbrown/django-onfido | 9e534f4725b61d982ffb2cd6a018ed1fffc353b6 | [
"MIT"
] | 6 | 2016-11-14T13:31:46.000Z | 2022-02-17T20:39:42.000Z | tests/settings.py | hugorodgerbrown/django-onfido | 9e534f4725b61d982ffb2cd6a018ed1fffc353b6 | [
"MIT"
] | 23 | 2016-10-21T11:18:34.000Z | 2021-12-08T17:33:01.000Z | tests/settings.py | hugorodgerbrown/django-onfido | 9e534f4725b61d982ffb2cd6a018ed1fffc353b6 | [
"MIT"
] | 7 | 2016-11-14T18:19:09.000Z | 2021-10-01T11:34:48.000Z | from os import getenv, path
from django.core.exceptions import ImproperlyConfigured
DEBUG = True
TEMPLATE_DEBUG = True
USE_TZ = True
USE_L10N = True
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": "onfido.db"}}
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"onfido",
"tests.test_app",
)
MIDDLEWARE = [
# default django middleware
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
PROJECT_DIR = path.abspath(path.join(path.dirname(__file__)))
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [path.join(PROJECT_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.messages.context_processors.messages",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.request",
]
},
}
]
AUTH_USER_MODEL = "test_app.User"
STATIC_URL = "/static/"
SECRET_KEY = "onfido" # noqa: S105
ALLOWED_HOSTS = [
"127.0.0.1",
".ngrok.io",
]
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {"simple": {"format": "%(levelname)s %(message)s"}},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "simple",
}
},
"loggers": {
"": {"handlers": ["console"], "propagate": True, "level": "DEBUG"},
# 'django': {
# 'handlers': ['console'],
# 'propagate': True,
# 'level': 'WARNING',
# },
"onfido": {
"handlers": ["console"],
"level": "DEBUG",
"propagate": False,
},
},
}
ROOT_URLCONF = "tests.urls"
if not DEBUG:
raise ImproperlyConfigured("This settings file can only be used with DEBUG=True")
# False by default, but if True this will run the integration tests in test_integration
TEST_INTEGRATION = bool(getenv("ONFIDO_TEST_INTEGRATION", False))
| 26.106383 | 87 | 0.609617 | from os import getenv, path
from django.core.exceptions import ImproperlyConfigured
DEBUG = True
TEMPLATE_DEBUG = True
USE_TZ = True
USE_L10N = True
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": "onfido.db"}}
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"onfido",
"tests.test_app",
)
MIDDLEWARE = [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
PROJECT_DIR = path.abspath(path.join(path.dirname(__file__)))
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [path.join(PROJECT_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.messages.context_processors.messages",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.request",
]
},
}
]
AUTH_USER_MODEL = "test_app.User"
STATIC_URL = "/static/"
SECRET_KEY = "onfido"
ALLOWED_HOSTS = [
"127.0.0.1",
".ngrok.io",
]
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {"simple": {"format": "%(levelname)s %(message)s"}},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "simple",
}
},
"loggers": {
"": {"handlers": ["console"], "propagate": True, "level": "DEBUG"},
"onfido": {
"handlers": ["console"],
"level": "DEBUG",
"propagate": False,
},
},
}
ROOT_URLCONF = "tests.urls"
if not DEBUG:
raise ImproperlyConfigured("This settings file can only be used with DEBUG=True")
TEST_INTEGRATION = bool(getenv("ONFIDO_TEST_INTEGRATION", False))
| true | true |
f7219f3f9ed21cb04bfe7f510681ceaf677c32c5 | 4,351 | py | Python | src/dataload/contrib/docm/__init__.py | IsmailM/myvariant.info | 5af6ad68fc2c1eb539ab9e683a34bafd51ed5cb1 | [
"Apache-2.0"
] | null | null | null | src/dataload/contrib/docm/__init__.py | IsmailM/myvariant.info | 5af6ad68fc2c1eb539ab9e683a34bafd51ed5cb1 | [
"Apache-2.0"
] | null | null | null | src/dataload/contrib/docm/__init__.py | IsmailM/myvariant.info | 5af6ad68fc2c1eb539ab9e683a34bafd51ed5cb1 | [
"Apache-2.0"
] | 1 | 2018-11-17T09:16:59.000Z | 2018-11-17T09:16:59.000Z | __METADATA__ = {
"src_name": 'DOCM',
"src_url": 'http://docm.genome.wustl.edu/',
"version": None,
"field": "docm"
}
def load_data():
'''docm data are pre-loaded in our db.'''
raise NotImplementedError
def get_mapping():
mapping = {
"docm": {
"properties": {
"domain": {
"type": "string"
},
"all_domains": {
"type": "string"
},
"ref": {
"type": "string",
"analyzer": "string_lowercase"
},
"alt": {
"type": "string",
"analyzer": "string_lowercase"
},
"primary": {
"type": "byte" # just 0 or 1
},
"transcript_species": {
"type": "string",
"index": "no"
},
"ensembl_gene_id": {
"type": "string",
"analyzer": "string_lowercase"
},
"transcript_version": {
"type": "string",
"index": "no"
},
"transcript_source": {
"type": "string",
"index": "no"
},
"source": {
"type": "string",
"analyzer": "string_lowercase"
},
"pubmed_id": {
"type": "string",
"index": "not_analyzed"
},
"type": {
"type": "string",
"analyzer": "string_lowercase"
},
"doid": {
"type": "string",
"analyzer": "string_lowercase"
},
"c_position": {
"type": "string",
"analyzer": "string_lowercase"
},
"hg19": {
"properties": {
"start": {
"type": "long"
},
"end": {
"type": "long"
}
}
},
"strand": {
"type": "byte",
"index": "no"
},
"deletion_substructures": {
"type": "string",
"index": "no"
},
"genename_source": {
"type": "string",
"index": "no"
},
"default_gene_name": {
"type": "string",
"analyzer": "string_lowercase"
},
"aa_change": {
"type": "string",
"analyzer": "string_lowercase"
},
"url": {
"type": "string",
"index": "no"
},
"transcript_status": {
"type": "string",
"analyzer": "string_lowercase"
},
"trv_type": {
"type": "string",
"analyzer": "string_lowercase"
},
"disease": {
"type": "string",
"analyzer": "string_lowercase"
},
"transcript_name": {
"type": "string",
"analyzer": "string_lowercase"
},
"chrom": {
"type": "string", # actual value is integer
"analyzer": "string_lowercase"
},
"transcript_error": {
"type": "string",
"index": "no"
},
"genename": {
"type": "string",
"analyzer": "string_lowercase",
"include_in_all": True
},
"ucsc_cons": {
"type": "double"
}
}
}
}
return mapping
| 30.858156 | 79 | 0.290738 | __METADATA__ = {
"src_name": 'DOCM',
"src_url": 'http://docm.genome.wustl.edu/',
"version": None,
"field": "docm"
}
def load_data():
raise NotImplementedError
def get_mapping():
mapping = {
"docm": {
"properties": {
"domain": {
"type": "string"
},
"all_domains": {
"type": "string"
},
"ref": {
"type": "string",
"analyzer": "string_lowercase"
},
"alt": {
"type": "string",
"analyzer": "string_lowercase"
},
"primary": {
"type": "byte"
},
"transcript_species": {
"type": "string",
"index": "no"
},
"ensembl_gene_id": {
"type": "string",
"analyzer": "string_lowercase"
},
"transcript_version": {
"type": "string",
"index": "no"
},
"transcript_source": {
"type": "string",
"index": "no"
},
"source": {
"type": "string",
"analyzer": "string_lowercase"
},
"pubmed_id": {
"type": "string",
"index": "not_analyzed"
},
"type": {
"type": "string",
"analyzer": "string_lowercase"
},
"doid": {
"type": "string",
"analyzer": "string_lowercase"
},
"c_position": {
"type": "string",
"analyzer": "string_lowercase"
},
"hg19": {
"properties": {
"start": {
"type": "long"
},
"end": {
"type": "long"
}
}
},
"strand": {
"type": "byte",
"index": "no"
},
"deletion_substructures": {
"type": "string",
"index": "no"
},
"genename_source": {
"type": "string",
"index": "no"
},
"default_gene_name": {
"type": "string",
"analyzer": "string_lowercase"
},
"aa_change": {
"type": "string",
"analyzer": "string_lowercase"
},
"url": {
"type": "string",
"index": "no"
},
"transcript_status": {
"type": "string",
"analyzer": "string_lowercase"
},
"trv_type": {
"type": "string",
"analyzer": "string_lowercase"
},
"disease": {
"type": "string",
"analyzer": "string_lowercase"
},
"transcript_name": {
"type": "string",
"analyzer": "string_lowercase"
},
"chrom": {
"type": "string",
"analyzer": "string_lowercase"
},
"transcript_error": {
"type": "string",
"index": "no"
},
"genename": {
"type": "string",
"analyzer": "string_lowercase",
"include_in_all": True
},
"ucsc_cons": {
"type": "double"
}
}
}
}
return mapping
| true | true |
f721a0175b21509fd3c11cdf9bddad74e4242372 | 12,176 | py | Python | yolov3_tf2/models.py | AVsolutionsai/YOLOv3_custom | d974e8305310cef31621b20128ba29c3b09ce2af | [
"MIT",
"OLDAP-2.2.1",
"Unlicense"
] | null | null | null | yolov3_tf2/models.py | AVsolutionsai/YOLOv3_custom | d974e8305310cef31621b20128ba29c3b09ce2af | [
"MIT",
"OLDAP-2.2.1",
"Unlicense"
] | null | null | null | yolov3_tf2/models.py | AVsolutionsai/YOLOv3_custom | d974e8305310cef31621b20128ba29c3b09ce2af | [
"MIT",
"OLDAP-2.2.1",
"Unlicense"
] | null | null | null | from absl import flags
from absl.flags import FLAGS
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import (
Add,
Concatenate,
Conv2D,
Input,
Lambda,
LeakyReLU,
MaxPool2D,
UpSampling2D,
ZeroPadding2D,
)
from tensorflow.keras.regularizers import l2
from tensorflow.keras.losses import (
binary_crossentropy,
sparse_categorical_crossentropy
)
from .batch_norm import BatchNormalization
from .utils import broadcast_iou
yolo_max_boxes = 100
yolo_iou_threshold = 0.1
yolo_score_threshold = 0.1
# customize your model through the following parameters
flags.DEFINE_integer('yolo_max_boxes', 100, 'maximum number of detections at one time')
flags.DEFINE_float('yolo_iou_threshold', 0.5, 'iou threshold')
flags.DEFINE_float('yolo_score_threshold', 0.5, 'score threshold')
yolo_anchors = np.array([(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),
(59, 119), (116, 90), (156, 198), (373, 326)],
np.float32) / 416
yolo_anchor_masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])
yolo_tiny_anchors = np.array([(10, 14), (23, 27), (37, 58),
(81, 82), (135, 169), (344, 319)],
np.float32) / 416
yolo_tiny_anchor_masks = np.array([[3, 4, 5], [0, 1, 2]])
def DarknetConv(x, filters, size, strides=1, batch_norm=True):
if strides == 1:
padding = 'same'
else:
x = ZeroPadding2D(((1, 0), (1, 0)))(x) # top left half-padding
padding = 'valid'
x = Conv2D(filters=filters, kernel_size=size,
strides=strides, padding=padding,
use_bias=not batch_norm, kernel_regularizer=l2(0.0005))(x)
if batch_norm:
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.1)(x)
return x
def DarknetResidual(x, filters):
prev = x
x = DarknetConv(x, filters // 2, 1)
x = DarknetConv(x, filters, 3)
x = Add()([prev, x])
return x
def DarknetBlock(x, filters, blocks):
x = DarknetConv(x, filters, 3, strides=2)
for _ in range(blocks):
x = DarknetResidual(x, filters)
return x
def Darknet(name=None):
x = inputs = Input([None, None, 3])
x = DarknetConv(x, 32, 3)
x = DarknetBlock(x, 64, 1)
x = DarknetBlock(x, 128, 2) # skip connection
x = x_36 = DarknetBlock(x, 256, 8) # skip connection
x = x_61 = DarknetBlock(x, 512, 8)
x = DarknetBlock(x, 1024, 4)
return tf.keras.Model(inputs, (x_36, x_61, x), name=name)
def DarknetTiny(name=None):
x = inputs = Input([None, None, 3])
x = DarknetConv(x, 16, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 32, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 64, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 128, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = x_8 = DarknetConv(x, 256, 3) # skip connection
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 512, 3)
x = MaxPool2D(2, 1, 'same')(x)
x = DarknetConv(x, 1024, 3)
return tf.keras.Model(inputs, (x_8, x), name=name)
def YoloConv(filters, name=None):
def yolo_conv(x_in):
if isinstance(x_in, tuple):
inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])
x, x_skip = inputs
# concat with skip connection
x = DarknetConv(x, filters, 1)
x = UpSampling2D(2)(x)
x = Concatenate()([x, x_skip])
else:
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters, 1)
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, filters, 1)
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, filters, 1)
return Model(inputs, x, name=name)(x_in)
return yolo_conv
def YoloConvTiny(filters, name=None):
def yolo_conv(x_in):
if isinstance(x_in, tuple):
inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])
x, x_skip = inputs
# concat with skip connection
x = DarknetConv(x, filters, 1)
x = UpSampling2D(2)(x)
x = Concatenate()([x, x_skip])
else:
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters, 1)
return Model(inputs, x, name=name)(x_in)
return yolo_conv
def YoloOutput(filters, anchors, classes, name=None):
def yolo_output(x_in):
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, anchors * (classes + 5), 1, batch_norm=False)
x = Lambda(lambda x: tf.reshape(x, (-1, tf.shape(x)[1], tf.shape(x)[2],
anchors, classes + 5)))(x)
return tf.keras.Model(inputs, x, name=name)(x_in)
return yolo_output
def yolo_boxes(pred, anchors, classes):
# pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))
grid_size = tf.shape(pred)[1]
box_xy, box_wh, objectness, class_probs = tf.split(
pred, (2, 2, 1, classes), axis=-1)
box_xy = tf.sigmoid(box_xy)
objectness = tf.sigmoid(objectness)
class_probs = tf.sigmoid(class_probs)
pred_box = tf.concat((box_xy, box_wh), axis=-1) # original xywh for loss
# !!! grid[x][y] == (y, x)
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]
box_xy = (box_xy + tf.cast(grid, tf.float32)) / \
tf.cast(grid_size, tf.float32)
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2
box_x2y2 = box_xy + box_wh / 2
bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
return bbox, objectness, class_probs, pred_box
def yolo_nms(outputs, anchors, masks, classes):
# boxes, conf, type
b, c, t = [], [], []
for o in outputs:
b.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))
c.append(tf.reshape(o[1], (tf.shape(o[1])[0], -1, tf.shape(o[1])[-1])))
t.append(tf.reshape(o[2], (tf.shape(o[2])[0], -1, tf.shape(o[2])[-1])))
bbox = tf.concat(b, axis=1)
confidence = tf.concat(c, axis=1)
class_probs = tf.concat(t, axis=1)
scores = confidence * class_probs
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(bbox, (tf.shape(bbox)[0], -1, 1, 4)),
scores=tf.reshape(
scores, (tf.shape(scores)[0], -1, tf.shape(scores)[-1])),
max_output_size_per_class=yolo_max_boxes,
max_total_size=yolo_max_boxes,
iou_threshold=yolo_iou_threshold,
score_threshold=yolo_score_threshold
)
return boxes, scores, classes, valid_detections
def YoloV3(size=None, channels=3, anchors=yolo_anchors,
masks=yolo_anchor_masks, classes=80, training=False):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
x = inputs = Input([size, size, channels], name='input')
x_36, x_61, x = Darknet(name='yolo_darknet')(x)
x = YoloConv(512, name='yolo_conv_0')(x)
output_0 = YoloOutput(512, len(masks[0]), classes, name='yolo_output_0')(x)
x = YoloConv(256, name='yolo_conv_1')((x, x_61))
output_1 = YoloOutput(256, len(masks[1]), classes, name='yolo_output_1')(x)
x = YoloConv(128, name='yolo_conv_2')((x, x_36))
output_2 = YoloOutput(128, len(masks[2]), classes, name='yolo_output_2')(x)
if training:
return Model(inputs, (output_0, output_1, output_2), name='yolov3')
boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),
name='yolo_boxes_0')(output_0)
boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),
name='yolo_boxes_1')(output_1)
boxes_2 = Lambda(lambda x: yolo_boxes(x, anchors[masks[2]], classes),
name='yolo_boxes_2')(output_2)
outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),
name='yolo_nms')((boxes_0[:3], boxes_1[:3], boxes_2[:3]))
return Model(inputs, outputs, name='yolov3')
def YoloV3Tiny(size=None, channels=3, anchors=yolo_tiny_anchors,
masks=yolo_tiny_anchor_masks, classes=80, training=False):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
x = inputs = Input([size, size, channels], name='input')
x_8, x = DarknetTiny(name='yolo_darknet')(x)
x = YoloConvTiny(256, name='yolo_conv_0')(x)
output_0 = YoloOutput(256, len(masks[0]), classes, name='yolo_output_0')(x)
x = YoloConvTiny(128, name='yolo_conv_1')((x, x_8))
output_1 = YoloOutput(128, len(masks[1]), classes, name='yolo_output_1')(x)
if training:
return Model(inputs, (output_0, output_1), name='yolov3')
boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),
name='yolo_boxes_0')(output_0)
boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),
name='yolo_boxes_1')(output_1)
outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),
name='yolo_nms')((boxes_0[:3], boxes_1[:3]))
return Model(inputs, outputs, name='yolov3_tiny')
def YoloLoss(anchors, classes=80, ignore_thresh=0.5):
def yolo_loss(y_true, y_pred):
# 1. transform all pred outputs
# y_pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...cls))
pred_box, pred_obj, pred_class, pred_xywh = yolo_boxes(
y_pred, anchors, classes)
pred_xy = pred_xywh[..., 0:2]
pred_wh = pred_xywh[..., 2:4]
# 2. transform all true outputs
# y_true: (batch_size, grid, grid, anchors, (x1, y1, x2, y2, obj, cls))
true_box, true_obj, true_class_idx = tf.split(
y_true, (4, 1, 1), axis=-1)
true_xy = (true_box[..., 0:2] + true_box[..., 2:4]) / 2
true_wh = true_box[..., 2:4] - true_box[..., 0:2]
# give higher weights to small boxes
box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]
# 3. inverting the pred box equations
grid_size = tf.shape(y_true)[1]
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
true_xy = true_xy * tf.cast(grid_size, tf.float32) - \
tf.cast(grid, tf.float32)
true_wh = tf.math.log(true_wh / anchors)
true_wh = tf.where(tf.math.is_inf(true_wh),
tf.zeros_like(true_wh), true_wh)
# 4. calculate all masks
obj_mask = tf.squeeze(true_obj, -1)
# ignore false positive when iou is over threshold
best_iou = tf.map_fn(
lambda x: tf.reduce_max(broadcast_iou(x[0], tf.boolean_mask(
x[1], tf.cast(x[2], tf.bool))), axis=-1),
(pred_box, true_box, obj_mask),
tf.float32)
ignore_mask = tf.cast(best_iou < ignore_thresh, tf.float32)
# 5. calculate all losses
xy_loss = obj_mask * box_loss_scale * \
tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)
wh_loss = obj_mask * box_loss_scale * \
tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)
obj_loss = binary_crossentropy(true_obj, pred_obj)
obj_loss = obj_mask * obj_loss + \
(1 - obj_mask) * ignore_mask * obj_loss
# TODO: use binary_crossentropy instead
class_loss = obj_mask * sparse_categorical_crossentropy(
true_class_idx, pred_class)
# 6. sum over (batch, gridx, gridy, anchors) => (batch, 1)
xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))
wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))
obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3))
class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))
return xy_loss + wh_loss + obj_loss + class_loss
return yolo_loss
| 37.235474 | 87 | 0.604221 | from absl import flags
from absl.flags import FLAGS
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import (
Add,
Concatenate,
Conv2D,
Input,
Lambda,
LeakyReLU,
MaxPool2D,
UpSampling2D,
ZeroPadding2D,
)
from tensorflow.keras.regularizers import l2
from tensorflow.keras.losses import (
binary_crossentropy,
sparse_categorical_crossentropy
)
from .batch_norm import BatchNormalization
from .utils import broadcast_iou
yolo_max_boxes = 100
yolo_iou_threshold = 0.1
yolo_score_threshold = 0.1
flags.DEFINE_integer('yolo_max_boxes', 100, 'maximum number of detections at one time')
flags.DEFINE_float('yolo_iou_threshold', 0.5, 'iou threshold')
flags.DEFINE_float('yolo_score_threshold', 0.5, 'score threshold')
yolo_anchors = np.array([(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),
(59, 119), (116, 90), (156, 198), (373, 326)],
np.float32) / 416
yolo_anchor_masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])
yolo_tiny_anchors = np.array([(10, 14), (23, 27), (37, 58),
(81, 82), (135, 169), (344, 319)],
np.float32) / 416
yolo_tiny_anchor_masks = np.array([[3, 4, 5], [0, 1, 2]])
def DarknetConv(x, filters, size, strides=1, batch_norm=True):
if strides == 1:
padding = 'same'
else:
x = ZeroPadding2D(((1, 0), (1, 0)))(x)
padding = 'valid'
x = Conv2D(filters=filters, kernel_size=size,
strides=strides, padding=padding,
use_bias=not batch_norm, kernel_regularizer=l2(0.0005))(x)
if batch_norm:
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.1)(x)
return x
def DarknetResidual(x, filters):
prev = x
x = DarknetConv(x, filters // 2, 1)
x = DarknetConv(x, filters, 3)
x = Add()([prev, x])
return x
def DarknetBlock(x, filters, blocks):
x = DarknetConv(x, filters, 3, strides=2)
for _ in range(blocks):
x = DarknetResidual(x, filters)
return x
def Darknet(name=None):
x = inputs = Input([None, None, 3])
x = DarknetConv(x, 32, 3)
x = DarknetBlock(x, 64, 1)
x = DarknetBlock(x, 128, 2)
x = x_36 = DarknetBlock(x, 256, 8)
x = x_61 = DarknetBlock(x, 512, 8)
x = DarknetBlock(x, 1024, 4)
return tf.keras.Model(inputs, (x_36, x_61, x), name=name)
def DarknetTiny(name=None):
x = inputs = Input([None, None, 3])
x = DarknetConv(x, 16, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 32, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 64, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 128, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = x_8 = DarknetConv(x, 256, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 512, 3)
x = MaxPool2D(2, 1, 'same')(x)
x = DarknetConv(x, 1024, 3)
return tf.keras.Model(inputs, (x_8, x), name=name)
def YoloConv(filters, name=None):
def yolo_conv(x_in):
if isinstance(x_in, tuple):
inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])
x, x_skip = inputs
x = DarknetConv(x, filters, 1)
x = UpSampling2D(2)(x)
x = Concatenate()([x, x_skip])
else:
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters, 1)
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, filters, 1)
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, filters, 1)
return Model(inputs, x, name=name)(x_in)
return yolo_conv
def YoloConvTiny(filters, name=None):
def yolo_conv(x_in):
if isinstance(x_in, tuple):
inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])
x, x_skip = inputs
x = DarknetConv(x, filters, 1)
x = UpSampling2D(2)(x)
x = Concatenate()([x, x_skip])
else:
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters, 1)
return Model(inputs, x, name=name)(x_in)
return yolo_conv
def YoloOutput(filters, anchors, classes, name=None):
def yolo_output(x_in):
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, anchors * (classes + 5), 1, batch_norm=False)
x = Lambda(lambda x: tf.reshape(x, (-1, tf.shape(x)[1], tf.shape(x)[2],
anchors, classes + 5)))(x)
return tf.keras.Model(inputs, x, name=name)(x_in)
return yolo_output
def yolo_boxes(pred, anchors, classes):
grid_size = tf.shape(pred)[1]
box_xy, box_wh, objectness, class_probs = tf.split(
pred, (2, 2, 1, classes), axis=-1)
box_xy = tf.sigmoid(box_xy)
objectness = tf.sigmoid(objectness)
class_probs = tf.sigmoid(class_probs)
pred_box = tf.concat((box_xy, box_wh), axis=-1)
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
box_xy = (box_xy + tf.cast(grid, tf.float32)) / \
tf.cast(grid_size, tf.float32)
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2
box_x2y2 = box_xy + box_wh / 2
bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
return bbox, objectness, class_probs, pred_box
def yolo_nms(outputs, anchors, masks, classes):
b, c, t = [], [], []
for o in outputs:
b.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))
c.append(tf.reshape(o[1], (tf.shape(o[1])[0], -1, tf.shape(o[1])[-1])))
t.append(tf.reshape(o[2], (tf.shape(o[2])[0], -1, tf.shape(o[2])[-1])))
bbox = tf.concat(b, axis=1)
confidence = tf.concat(c, axis=1)
class_probs = tf.concat(t, axis=1)
scores = confidence * class_probs
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(bbox, (tf.shape(bbox)[0], -1, 1, 4)),
scores=tf.reshape(
scores, (tf.shape(scores)[0], -1, tf.shape(scores)[-1])),
max_output_size_per_class=yolo_max_boxes,
max_total_size=yolo_max_boxes,
iou_threshold=yolo_iou_threshold,
score_threshold=yolo_score_threshold
)
return boxes, scores, classes, valid_detections
def YoloV3(size=None, channels=3, anchors=yolo_anchors,
masks=yolo_anchor_masks, classes=80, training=False):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
x = inputs = Input([size, size, channels], name='input')
x_36, x_61, x = Darknet(name='yolo_darknet')(x)
x = YoloConv(512, name='yolo_conv_0')(x)
output_0 = YoloOutput(512, len(masks[0]), classes, name='yolo_output_0')(x)
x = YoloConv(256, name='yolo_conv_1')((x, x_61))
output_1 = YoloOutput(256, len(masks[1]), classes, name='yolo_output_1')(x)
x = YoloConv(128, name='yolo_conv_2')((x, x_36))
output_2 = YoloOutput(128, len(masks[2]), classes, name='yolo_output_2')(x)
if training:
return Model(inputs, (output_0, output_1, output_2), name='yolov3')
boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),
name='yolo_boxes_0')(output_0)
boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),
name='yolo_boxes_1')(output_1)
boxes_2 = Lambda(lambda x: yolo_boxes(x, anchors[masks[2]], classes),
name='yolo_boxes_2')(output_2)
outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),
name='yolo_nms')((boxes_0[:3], boxes_1[:3], boxes_2[:3]))
return Model(inputs, outputs, name='yolov3')
def YoloV3Tiny(size=None, channels=3, anchors=yolo_tiny_anchors,
masks=yolo_tiny_anchor_masks, classes=80, training=False):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
x = inputs = Input([size, size, channels], name='input')
x_8, x = DarknetTiny(name='yolo_darknet')(x)
x = YoloConvTiny(256, name='yolo_conv_0')(x)
output_0 = YoloOutput(256, len(masks[0]), classes, name='yolo_output_0')(x)
x = YoloConvTiny(128, name='yolo_conv_1')((x, x_8))
output_1 = YoloOutput(128, len(masks[1]), classes, name='yolo_output_1')(x)
if training:
return Model(inputs, (output_0, output_1), name='yolov3')
boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),
name='yolo_boxes_0')(output_0)
boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),
name='yolo_boxes_1')(output_1)
outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),
name='yolo_nms')((boxes_0[:3], boxes_1[:3]))
return Model(inputs, outputs, name='yolov3_tiny')
def YoloLoss(anchors, classes=80, ignore_thresh=0.5):
def yolo_loss(y_true, y_pred):
pred_box, pred_obj, pred_class, pred_xywh = yolo_boxes(
y_pred, anchors, classes)
pred_xy = pred_xywh[..., 0:2]
pred_wh = pred_xywh[..., 2:4]
true_box, true_obj, true_class_idx = tf.split(
y_true, (4, 1, 1), axis=-1)
true_xy = (true_box[..., 0:2] + true_box[..., 2:4]) / 2
true_wh = true_box[..., 2:4] - true_box[..., 0:2]
box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]
grid_size = tf.shape(y_true)[1]
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
true_xy = true_xy * tf.cast(grid_size, tf.float32) - \
tf.cast(grid, tf.float32)
true_wh = tf.math.log(true_wh / anchors)
true_wh = tf.where(tf.math.is_inf(true_wh),
tf.zeros_like(true_wh), true_wh)
obj_mask = tf.squeeze(true_obj, -1)
best_iou = tf.map_fn(
lambda x: tf.reduce_max(broadcast_iou(x[0], tf.boolean_mask(
x[1], tf.cast(x[2], tf.bool))), axis=-1),
(pred_box, true_box, obj_mask),
tf.float32)
ignore_mask = tf.cast(best_iou < ignore_thresh, tf.float32)
xy_loss = obj_mask * box_loss_scale * \
tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)
wh_loss = obj_mask * box_loss_scale * \
tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)
obj_loss = binary_crossentropy(true_obj, pred_obj)
obj_loss = obj_mask * obj_loss + \
(1 - obj_mask) * ignore_mask * obj_loss
class_loss = obj_mask * sparse_categorical_crossentropy(
true_class_idx, pred_class)
xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))
wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))
obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3))
class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))
return xy_loss + wh_loss + obj_loss + class_loss
return yolo_loss
| true | true |
f721a01f25bf915b93bced32999e9d5635c07fda | 5,196 | py | Python | data_steward/cdr_cleaner/cleaning_rules/null_person_birthdate.py | lrwb-aou/curation | e80447e56d269dc2c9c8bc79e78218d4b0dc504c | [
"MIT"
] | 16 | 2017-06-30T20:05:05.000Z | 2022-03-08T21:03:19.000Z | data_steward/cdr_cleaner/cleaning_rules/null_person_birthdate.py | lrwb-aou/curation | e80447e56d269dc2c9c8bc79e78218d4b0dc504c | [
"MIT"
] | 342 | 2017-06-23T21:37:40.000Z | 2022-03-30T16:44:16.000Z | data_steward/cdr_cleaner/cleaning_rules/null_person_birthdate.py | lrwb-aou/curation | e80447e56d269dc2c9c8bc79e78218d4b0dc504c | [
"MIT"
] | 33 | 2017-07-01T00:12:20.000Z | 2022-01-26T18:06:53.000Z | """
Null Person Table Birth Date Fields
In the person table, the fields month_of_birth, day_of_birth, and birth_datetime should be nulled.
The year_of_birth field should remain unchanged.
Original Issue: DC-1356
"""
# Python imports
import logging
# Project imports
import constants.bq_utils as bq_consts
from cdr_cleaner.cleaning_rules.base_cleaning_rule import BaseCleaningRule
from constants.cdr_cleaner import clean_cdr as cdr_consts
from common import JINJA_ENV, PERSON
from utils import pipeline_logging
LOGGER = logging.getLogger(__name__)
NULL_DATE_QUERY = JINJA_ENV.from_string("""
UPDATE `{{project_id}}.{{dataset_id}}.{{person_table}}`
SET
birth_datetime = NULL,
month_of_birth = NULL,
day_of_birth = NULL
WHERE TRUE
""")
class NullPersonBirthdate(BaseCleaningRule):
def __init__(self, project_id, dataset_id, sandbox_dataset_id):
"""
Initialize the class with proper information.
Set the issue numbers, description and affected datasets. As other tickets may affect
this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
desc = 'Set Patient Birthdate Fields to NULL'
super().__init__(issue_numbers=['DC1356'],
description=desc,
affected_datasets=[cdr_consts.CONTROLLED_TIER_DEID],
affected_tables=PERSON,
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id)
def setup_rule(self, client, *args, **keyword_args):
"""
Load required resources prior to executing cleaning rule queries.
Method to run data upload options before executing the first cleaning
rule of a class. For example, if your class requires loading a static
table, that load operation should be defined here. It SHOULD NOT BE
defined as part of get_query_specs().
:param client:
:return:
"""
pass
def get_query_specs(self, *args, **keyword_args):
"""
Interface to return a list of query dictionaries.
:returns: a list of query dictionaries. Each dictionary specifies
the query to execute and how to execute. The dictionaries are
stored in list order and returned in list order to maintain
an ordering.
"""
update_query = dict()
update_query[cdr_consts.QUERY] = NULL_DATE_QUERY.render(
project_id=self.project_id,
dataset_id=self.dataset_id,
person_table=PERSON)
return [update_query]
def setup_validation(self, client, *args, **keyword_args):
"""
Run required steps for validation setup
Method to run to setup validation on cleaning rules that will be updating or deleting the values.
For example:
if your class updates all the datetime fields you should be implementing the
logic to get the initial list of values which adhere to a condition we are looking for.
if your class deletes a subset of rows in the tables you should be implementing
the logic to get the row counts of the tables prior to applying cleaning rule
"""
raise NotImplementedError("Please fix me.")
def validate_rule(self, client, *args, **keyword_args):
"""
Validates the cleaning rule which deletes or updates the data from the tables
Method to run validation on cleaning rules that will be updating the values.
For example:
if your class updates all the datetime fields you should be implementing the
validation that checks if the date time values that needs to be updated no
longer exists in the table.
if your class deletes a subset of rows in the tables you should be implementing
the validation that checks if the count of final final row counts + deleted rows
should equals to initial row counts of the affected tables.
Raises RunTimeError if the validation fails.
"""
raise NotImplementedError("Please fix me.")
def get_sandbox_tablenames(self):
return [self.sandbox_table_for(PERSON)]
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
ARGS = parser.parse_args()
pipeline_logging.configure(level=logging.DEBUG, add_console_handler=True)
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(ARGS.project_id,
ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(NullPersonBirthdate,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(NullPersonBirthdate,)])
| 37.381295 | 105 | 0.659738 |
import logging
import constants.bq_utils as bq_consts
from cdr_cleaner.cleaning_rules.base_cleaning_rule import BaseCleaningRule
from constants.cdr_cleaner import clean_cdr as cdr_consts
from common import JINJA_ENV, PERSON
from utils import pipeline_logging
LOGGER = logging.getLogger(__name__)
NULL_DATE_QUERY = JINJA_ENV.from_string("""
UPDATE `{{project_id}}.{{dataset_id}}.{{person_table}}`
SET
birth_datetime = NULL,
month_of_birth = NULL,
day_of_birth = NULL
WHERE TRUE
""")
class NullPersonBirthdate(BaseCleaningRule):
def __init__(self, project_id, dataset_id, sandbox_dataset_id):
desc = 'Set Patient Birthdate Fields to NULL'
super().__init__(issue_numbers=['DC1356'],
description=desc,
affected_datasets=[cdr_consts.CONTROLLED_TIER_DEID],
affected_tables=PERSON,
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id)
def setup_rule(self, client, *args, **keyword_args):
pass
def get_query_specs(self, *args, **keyword_args):
update_query = dict()
update_query[cdr_consts.QUERY] = NULL_DATE_QUERY.render(
project_id=self.project_id,
dataset_id=self.dataset_id,
person_table=PERSON)
return [update_query]
def setup_validation(self, client, *args, **keyword_args):
raise NotImplementedError("Please fix me.")
def validate_rule(self, client, *args, **keyword_args):
raise NotImplementedError("Please fix me.")
def get_sandbox_tablenames(self):
return [self.sandbox_table_for(PERSON)]
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
ARGS = parser.parse_args()
pipeline_logging.configure(level=logging.DEBUG, add_console_handler=True)
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(ARGS.project_id,
ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(NullPersonBirthdate,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(NullPersonBirthdate,)])
| true | true |
f721a117918ff0bd279746d0e2b01e1cd2ecaeab | 183 | py | Python | benchmark/pysam_fasta_random_access.py | DishSri1/pyfastx | 4bfa6662fb50b7244565ad00ef6e99962b4f3169 | [
"MIT"
] | 122 | 2019-10-21T16:22:27.000Z | 2022-03-31T06:07:45.000Z | benchmark/pysam_fasta_random_access.py | DishSri1/pyfastx | 4bfa6662fb50b7244565ad00ef6e99962b4f3169 | [
"MIT"
] | 40 | 2019-11-08T14:38:51.000Z | 2022-03-15T13:07:38.000Z | benchmark/pysam_fasta_random_access.py | DishSri1/pyfastx | 4bfa6662fb50b7244565ad00ef6e99962b4f3169 | [
"MIT"
] | 8 | 2020-01-20T01:31:51.000Z | 2021-07-30T10:28:35.000Z | import sys
import pysam
idfile, fafile = sys.argv[1:]
fa = pysam.FastaFile(fafile)
with open(idfile) as fh:
for line in fh:
seqid = line.strip()
s = str(fa[seqid])
print(s)
| 14.076923 | 29 | 0.666667 | import sys
import pysam
idfile, fafile = sys.argv[1:]
fa = pysam.FastaFile(fafile)
with open(idfile) as fh:
for line in fh:
seqid = line.strip()
s = str(fa[seqid])
print(s)
| true | true |
f721a16e8f02f666fcdc92caae18ad6f00ef9e1f | 12,817 | py | Python | tests/utils/log/elasticmock/fake_elasticsearch.py | wileeam/airflow | f46be8152a4d89c57db4ca46f5b3339e4876b723 | [
"Apache-2.0"
] | 1 | 2020-02-17T17:40:14.000Z | 2020-02-17T17:40:14.000Z | tests/utils/log/elasticmock/fake_elasticsearch.py | devlocalca/airflow | 58c3542ed25061320ce61dbe0adf451a44c738dd | [
"Apache-2.0"
] | 2 | 2021-05-12T12:41:51.000Z | 2021-09-29T17:47:43.000Z | tests/utils/log/elasticmock/fake_elasticsearch.py | devlocalca/airflow | 58c3542ed25061320ce61dbe0adf451a44c738dd | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Marcos Cardoso
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
from elasticsearch import Elasticsearch
from elasticsearch.client.utils import query_params
from elasticsearch.exceptions import NotFoundError
from .utilities import get_random_id
# pylint: disable=redefined-builtin
# noinspection PyShadowingBuiltins
class FakeElasticsearch(Elasticsearch):
__documents_dict = None
def __init__(self):
self.__documents_dict = {}
@query_params()
def ping(self, params=None):
return True
@query_params()
def info(self, params=None):
return {
'status': 200,
'cluster_name': 'elasticmock',
'version':
{
'lucene_version': '4.10.4',
'build_hash': '00f95f4ffca6de89d68b7ccaf80d148f1f70e4d4',
'number': '1.7.5',
'build_timestamp': '2016-02-02T09:55:30Z',
'build_snapshot': False
},
'name': 'Nightwatch',
'tagline': 'You Know, for Search'
}
@query_params('consistency', 'op_type', 'parent', 'refresh', 'replication',
'routing', 'timeout', 'timestamp', 'ttl', 'version', 'version_type')
def index(self, index, doc_type, body, id=None, params=None):
if index not in self.__documents_dict:
self.__documents_dict[index] = list()
if id is None:
id = get_random_id()
version = 1
self.__documents_dict[index].append({
'_type': doc_type,
'_id': id,
'_source': body,
'_index': index,
'_version': version
})
return {
'_type': doc_type,
'_id': id,
'created': True,
'_version': version,
'_index': index
}
@query_params('parent', 'preference', 'realtime', 'refresh', 'routing')
def exists(self, index, doc_type, id, params=None):
result = False
if index in self.__documents_dict:
for document in self.__documents_dict[index]:
if document.get('_id') == id and document.get('_type') == doc_type:
result = True
break
return result
@query_params('_source', '_source_exclude', '_source_include', 'fields',
'parent', 'preference', 'realtime', 'refresh', 'routing', 'version',
'version_type')
def get(self, index, id, doc_type='_all', params=None):
result = None
if index in self.__documents_dict:
result = self.find_document(doc_type, id, index, result)
if result:
result['found'] = True
else:
error_data = {
'_index': index,
'_type': doc_type,
'_id': id,
'found': False
}
raise NotFoundError(404, json.dumps(error_data))
return result
def find_document(self, doc_type, id, index, result):
for document in self.__documents_dict[index]:
if document.get('_id') == id:
if doc_type == '_all' or document.get('_type') == doc_type:
result = document
break
return result
@query_params('_source', '_source_exclude', '_source_include', 'parent',
'preference', 'realtime', 'refresh', 'routing', 'version',
'version_type')
def get_source(self, index, doc_type, id, params=None):
document = self.get(index=index, doc_type=doc_type, id=id, params=params)
return document.get('_source')
@query_params('_source', '_source_exclude', '_source_include',
'allow_no_indices', 'analyze_wildcard', 'analyzer', 'default_operator',
'df', 'expand_wildcards', 'explain', 'fielddata_fields', 'fields',
'from_', 'ignore_unavailable', 'lenient', 'lowercase_expanded_terms',
'preference', 'q', 'request_cache', 'routing', 'scroll', 'search_type',
'size', 'sort', 'stats', 'suggest_field', 'suggest_mode',
'suggest_size', 'suggest_text', 'terminate_after', 'timeout',
'track_scores', 'version')
def count(self, index=None, doc_type=None, body=None, params=None):
searchable_indexes = self._normalize_index_to_list(index)
searchable_doc_types = self._normalize_doc_type_to_list(doc_type)
i = 0
for searchable_index in searchable_indexes:
for document in self.__documents_dict[searchable_index]:
if searchable_doc_types\
and document.get('_type') not in searchable_doc_types:
continue
i += 1
result = {
'count': i,
'_shards': {
'successful': 1,
'failed': 0,
'total': 1
}
}
return result
@query_params('_source', '_source_exclude', '_source_include',
'allow_no_indices', 'analyze_wildcard', 'analyzer', 'default_operator',
'df', 'expand_wildcards', 'explain', 'fielddata_fields', 'fields',
'from_', 'ignore_unavailable', 'lenient', 'lowercase_expanded_terms',
'preference', 'q', 'request_cache', 'routing', 'scroll', 'search_type',
'size', 'sort', 'stats', 'suggest_field', 'suggest_mode',
'suggest_size', 'suggest_text', 'terminate_after', 'timeout',
'track_scores', 'version')
def search(self, index=None, doc_type=None, body=None, params=None):
searchable_indexes = self._normalize_index_to_list(index)
matches = self._find_match(index, doc_type, body)
result = {
'hits': {
'total': len(matches),
'max_score': 1.0
},
'_shards': {
# Simulate indexes with 1 shard each
'successful': len(searchable_indexes),
'failed': 0,
'total': len(searchable_indexes)
},
'took': 1,
'timed_out': False
}
hits = []
for match in matches:
match['_score'] = 1.0
hits.append(match)
result['hits']['hits'] = hits
return result
@query_params('consistency', 'parent', 'refresh', 'replication', 'routing',
'timeout', 'version', 'version_type')
def delete(self, index, doc_type, id, params=None):
found = False
if index in self.__documents_dict:
for document in self.__documents_dict[index]:
if document.get('_type') == doc_type and document.get('_id') == id:
found = True
self.__documents_dict[index].remove(document)
break
result_dict = {
'found': found,
'_index': index,
'_type': doc_type,
'_id': id,
'_version': 1,
}
if found:
return result_dict
else:
raise NotFoundError(404, json.dumps(result_dict))
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'preference', 'routing')
def suggest(self, body, index=None, params=None):
if index is not None and index not in self.__documents_dict:
raise NotFoundError(404, 'IndexMissingException[[{0}] missing]'.format(index))
result_dict = {}
for key, value in body.items():
text = value.get('text')
suggestion = int(text) + 1 if isinstance(text, int) \
else '{0}_suggestion'.format(text)
result_dict[key] = [
{
'text': text,
'length': 1,
'options': [
{
'text': suggestion,
'freq': 1,
'score': 1.0
}
],
'offset': 0
}
]
return result_dict
def _find_match(self, index, doc_type, body): # pylint: disable=unused-argument
searchable_indexes = self._normalize_index_to_list(index)
searchable_doc_types = self._normalize_doc_type_to_list(doc_type)
must = body['query']['bool']['must'][0] # only support one must
matches = []
for searchable_index in searchable_indexes:
self.find_document_in_searchable_index(matches, must, searchable_doc_types, searchable_index)
return matches
def find_document_in_searchable_index(self, matches, must, searchable_doc_types, searchable_index):
for document in self.__documents_dict[searchable_index]:
if searchable_doc_types and document.get('_type') not in searchable_doc_types:
continue
if 'match_phrase' in must:
self.match_must_phrase(document, matches, must)
else:
matches.append(document)
@staticmethod
def match_must_phrase(document, matches, must):
for query_id in must['match_phrase']:
query_val = must['match_phrase'][query_id]
if query_id in document['_source']:
if query_val in document['_source'][query_id]:
# use in as a proxy for match_phrase
matches.append(document)
def _normalize_index_to_list(self, index):
# Ensure to have a list of index
if index is None:
searchable_indexes = self.__documents_dict.keys()
elif isinstance(index, str):
searchable_indexes = [index]
elif isinstance(index, list):
searchable_indexes = index
else:
# Is it the correct exception to use ?
raise ValueError("Invalid param 'index'")
# Check index(es) exists
for searchable_index in searchable_indexes:
if searchable_index not in self.__documents_dict:
raise NotFoundError(404,
'IndexMissingException[[{0}] missing]'
.format(searchable_index))
return searchable_indexes
@staticmethod
def _normalize_doc_type_to_list(doc_type):
# Ensure to have a list of index
if doc_type is None:
searchable_doc_types = []
elif isinstance(doc_type, str):
searchable_doc_types = [doc_type]
elif isinstance(doc_type, list):
searchable_doc_types = doc_type
else:
# Is it the correct exception to use ?
raise ValueError("Invalid param 'index'")
return searchable_doc_types
# pylint: enable=redefined-builtin
| 37.920118 | 105 | 0.581727 |
import json
from elasticsearch import Elasticsearch
from elasticsearch.client.utils import query_params
from elasticsearch.exceptions import NotFoundError
from .utilities import get_random_id
class FakeElasticsearch(Elasticsearch):
__documents_dict = None
def __init__(self):
self.__documents_dict = {}
@query_params()
def ping(self, params=None):
return True
@query_params()
def info(self, params=None):
return {
'status': 200,
'cluster_name': 'elasticmock',
'version':
{
'lucene_version': '4.10.4',
'build_hash': '00f95f4ffca6de89d68b7ccaf80d148f1f70e4d4',
'number': '1.7.5',
'build_timestamp': '2016-02-02T09:55:30Z',
'build_snapshot': False
},
'name': 'Nightwatch',
'tagline': 'You Know, for Search'
}
@query_params('consistency', 'op_type', 'parent', 'refresh', 'replication',
'routing', 'timeout', 'timestamp', 'ttl', 'version', 'version_type')
def index(self, index, doc_type, body, id=None, params=None):
if index not in self.__documents_dict:
self.__documents_dict[index] = list()
if id is None:
id = get_random_id()
version = 1
self.__documents_dict[index].append({
'_type': doc_type,
'_id': id,
'_source': body,
'_index': index,
'_version': version
})
return {
'_type': doc_type,
'_id': id,
'created': True,
'_version': version,
'_index': index
}
@query_params('parent', 'preference', 'realtime', 'refresh', 'routing')
def exists(self, index, doc_type, id, params=None):
result = False
if index in self.__documents_dict:
for document in self.__documents_dict[index]:
if document.get('_id') == id and document.get('_type') == doc_type:
result = True
break
return result
@query_params('_source', '_source_exclude', '_source_include', 'fields',
'parent', 'preference', 'realtime', 'refresh', 'routing', 'version',
'version_type')
def get(self, index, id, doc_type='_all', params=None):
result = None
if index in self.__documents_dict:
result = self.find_document(doc_type, id, index, result)
if result:
result['found'] = True
else:
error_data = {
'_index': index,
'_type': doc_type,
'_id': id,
'found': False
}
raise NotFoundError(404, json.dumps(error_data))
return result
def find_document(self, doc_type, id, index, result):
for document in self.__documents_dict[index]:
if document.get('_id') == id:
if doc_type == '_all' or document.get('_type') == doc_type:
result = document
break
return result
@query_params('_source', '_source_exclude', '_source_include', 'parent',
'preference', 'realtime', 'refresh', 'routing', 'version',
'version_type')
def get_source(self, index, doc_type, id, params=None):
document = self.get(index=index, doc_type=doc_type, id=id, params=params)
return document.get('_source')
@query_params('_source', '_source_exclude', '_source_include',
'allow_no_indices', 'analyze_wildcard', 'analyzer', 'default_operator',
'df', 'expand_wildcards', 'explain', 'fielddata_fields', 'fields',
'from_', 'ignore_unavailable', 'lenient', 'lowercase_expanded_terms',
'preference', 'q', 'request_cache', 'routing', 'scroll', 'search_type',
'size', 'sort', 'stats', 'suggest_field', 'suggest_mode',
'suggest_size', 'suggest_text', 'terminate_after', 'timeout',
'track_scores', 'version')
def count(self, index=None, doc_type=None, body=None, params=None):
searchable_indexes = self._normalize_index_to_list(index)
searchable_doc_types = self._normalize_doc_type_to_list(doc_type)
i = 0
for searchable_index in searchable_indexes:
for document in self.__documents_dict[searchable_index]:
if searchable_doc_types\
and document.get('_type') not in searchable_doc_types:
continue
i += 1
result = {
'count': i,
'_shards': {
'successful': 1,
'failed': 0,
'total': 1
}
}
return result
@query_params('_source', '_source_exclude', '_source_include',
'allow_no_indices', 'analyze_wildcard', 'analyzer', 'default_operator',
'df', 'expand_wildcards', 'explain', 'fielddata_fields', 'fields',
'from_', 'ignore_unavailable', 'lenient', 'lowercase_expanded_terms',
'preference', 'q', 'request_cache', 'routing', 'scroll', 'search_type',
'size', 'sort', 'stats', 'suggest_field', 'suggest_mode',
'suggest_size', 'suggest_text', 'terminate_after', 'timeout',
'track_scores', 'version')
def search(self, index=None, doc_type=None, body=None, params=None):
searchable_indexes = self._normalize_index_to_list(index)
matches = self._find_match(index, doc_type, body)
result = {
'hits': {
'total': len(matches),
'max_score': 1.0
},
'_shards': {
'successful': len(searchable_indexes),
'failed': 0,
'total': len(searchable_indexes)
},
'took': 1,
'timed_out': False
}
hits = []
for match in matches:
match['_score'] = 1.0
hits.append(match)
result['hits']['hits'] = hits
return result
@query_params('consistency', 'parent', 'refresh', 'replication', 'routing',
'timeout', 'version', 'version_type')
def delete(self, index, doc_type, id, params=None):
found = False
if index in self.__documents_dict:
for document in self.__documents_dict[index]:
if document.get('_type') == doc_type and document.get('_id') == id:
found = True
self.__documents_dict[index].remove(document)
break
result_dict = {
'found': found,
'_index': index,
'_type': doc_type,
'_id': id,
'_version': 1,
}
if found:
return result_dict
else:
raise NotFoundError(404, json.dumps(result_dict))
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'preference', 'routing')
def suggest(self, body, index=None, params=None):
if index is not None and index not in self.__documents_dict:
raise NotFoundError(404, 'IndexMissingException[[{0}] missing]'.format(index))
result_dict = {}
for key, value in body.items():
text = value.get('text')
suggestion = int(text) + 1 if isinstance(text, int) \
else '{0}_suggestion'.format(text)
result_dict[key] = [
{
'text': text,
'length': 1,
'options': [
{
'text': suggestion,
'freq': 1,
'score': 1.0
}
],
'offset': 0
}
]
return result_dict
def _find_match(self, index, doc_type, body):
searchable_indexes = self._normalize_index_to_list(index)
searchable_doc_types = self._normalize_doc_type_to_list(doc_type)
must = body['query']['bool']['must'][0]
matches = []
for searchable_index in searchable_indexes:
self.find_document_in_searchable_index(matches, must, searchable_doc_types, searchable_index)
return matches
def find_document_in_searchable_index(self, matches, must, searchable_doc_types, searchable_index):
for document in self.__documents_dict[searchable_index]:
if searchable_doc_types and document.get('_type') not in searchable_doc_types:
continue
if 'match_phrase' in must:
self.match_must_phrase(document, matches, must)
else:
matches.append(document)
@staticmethod
def match_must_phrase(document, matches, must):
for query_id in must['match_phrase']:
query_val = must['match_phrase'][query_id]
if query_id in document['_source']:
if query_val in document['_source'][query_id]:
matches.append(document)
def _normalize_index_to_list(self, index):
if index is None:
searchable_indexes = self.__documents_dict.keys()
elif isinstance(index, str):
searchable_indexes = [index]
elif isinstance(index, list):
searchable_indexes = index
else:
raise ValueError("Invalid param 'index'")
for searchable_index in searchable_indexes:
if searchable_index not in self.__documents_dict:
raise NotFoundError(404,
'IndexMissingException[[{0}] missing]'
.format(searchable_index))
return searchable_indexes
@staticmethod
def _normalize_doc_type_to_list(doc_type):
if doc_type is None:
searchable_doc_types = []
elif isinstance(doc_type, str):
searchable_doc_types = [doc_type]
elif isinstance(doc_type, list):
searchable_doc_types = doc_type
else:
raise ValueError("Invalid param 'index'")
return searchable_doc_types
| true | true |
f721a1a1b37e686e4f48a58bde1c7698c1b3c997 | 6,863 | py | Python | secret/gama/genetic_programming/compilers/scikitlearn.py | israel-cj/GAMA-GEISHA | 210101df0e280d5c2eb5d325fc26d551bba74ed6 | [
"Apache-2.0"
] | null | null | null | secret/gama/genetic_programming/compilers/scikitlearn.py | israel-cj/GAMA-GEISHA | 210101df0e280d5c2eb5d325fc26d551bba74ed6 | [
"Apache-2.0"
] | null | null | null | secret/gama/genetic_programming/compilers/scikitlearn.py | israel-cj/GAMA-GEISHA | 210101df0e280d5c2eb5d325fc26d551bba74ed6 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
import logging
import os
import time
from typing import Callable, Tuple, Optional, Sequence
import stopit
from sklearn.base import TransformerMixin, is_classifier
from sklearn.model_selection import ShuffleSplit, cross_validate, check_cv
from sklearn.pipeline import Pipeline
from gama.utilities.evaluation_library import Evaluation
from gama.utilities.generic.stopwatch import Stopwatch
import numpy as np
from gama.utilities.metrics import Metric
from gama.genetic_programming.components import Individual, PrimitiveNode, Fitness
log = logging.getLogger(__name__)
def primitive_node_to_sklearn(primitive_node: PrimitiveNode) -> object:
hyperparameters = {
terminal.output: terminal.value for terminal in primitive_node._terminals
}
return primitive_node._primitive.identifier(**hyperparameters)
def compile_individual(
individual: Individual,
parameter_checks=None,
preprocessing_steps: Sequence[Tuple[str, TransformerMixin]] = None,
) -> Pipeline:
steps = [
(str(i), primitive_node_to_sklearn(primitive))
for i, primitive in enumerate(individual.primitives)
]
if preprocessing_steps:
steps = steps + list(reversed(preprocessing_steps))
return Pipeline(list(reversed(steps)))
def object_is_valid_pipeline(o):
""" Determines if object behaves like a scikit-learn pipeline. """
return (
o is not None
and hasattr(o, "fit")
and hasattr(o, "predict")
and hasattr(o, "steps")
)
def evaluate_pipeline(
pipeline, x, y_train, timeout: float, metrics: Tuple[Metric], cv=5, subsample=None,
) -> Tuple:
""" Score `pipeline` with k-fold CV according to `metrics` on (a subsample of) X, y
Returns
-------
Tuple:
prediction: np.ndarray if successful, None if not
scores: tuple with one float per metric, each value is -inf on fail.
estimators: list of fitted pipelines if successful, None if not
error: None if successful, otherwise an Exception
"""
if not object_is_valid_pipeline(pipeline):
raise TypeError(f"Pipeline must not be None and requires fit, predict, steps.")
if not timeout > 0:
raise ValueError(f"`timeout` must be greater than 0, is {timeout}.")
prediction, estimators = None, None
# default score for e.g. timeout or failure
scores = tuple([float("-inf")] * len(metrics))
with stopit.ThreadingTimeout(timeout) as c_mgr:
try:
if isinstance(subsample, int) and subsample < len(y_train):
sampler = ShuffleSplit(n_splits=1, train_size=subsample, random_state=0)
idx, _ = next(sampler.split(x))
x, y_train = x.iloc[idx, :], y_train[idx]
splitter = check_cv(cv, y_train, is_classifier(pipeline))
result = cross_validate(
pipeline,
x,
y_train,
cv=splitter,
return_estimator=True,
scoring=[m.name for m in metrics],
error_score="raise",
)
scores = tuple([np.mean(result[f"test_{m.name}"]) for m in metrics])
estimators = result["estimator"]
for (estimator, (_, test)) in zip(estimators, splitter.split(x, y_train)):
if any([m.requires_probabilities for m in metrics]):
fold_pred = estimator.predict_proba(x.iloc[test, :])
else:
fold_pred = estimator.predict(x.iloc[test, :])
if prediction is None:
if fold_pred.ndim == 2:
prediction = np.empty(shape=(len(y_train), fold_pred.shape[1]))
else:
prediction = np.empty(shape=(len(y_train),))
prediction[test] = fold_pred
# prediction, scores, estimators = cross_val_predict_score(
# pipeline, x, y_train, cv=cv, metrics=metrics
# )
except stopit.TimeoutException:
# This exception is handled by the ThreadingTimeout context manager.
raise
except KeyboardInterrupt:
raise
except Exception as e:
return prediction, scores, estimators, e
if c_mgr.state == c_mgr.INTERRUPTED:
# A TimeoutException was raised, but not by the context manager.
# This indicates that the outer context manager (the ea) timed out.
raise stopit.utils.TimeoutException()
if not c_mgr:
# For now we treat an eval timeout the same way as
# e.g. NaN exceptions and use the default score.
return prediction, scores, estimators, stopit.TimeoutException()
return prediction, tuple(scores), estimators, None
def evaluate_individual(
individual: Individual,
evaluate_pipeline: Callable,
timeout: float = 1e6,
deadline: Optional[float] = None,
add_length_to_score: bool = True,
**kwargs,
) -> Evaluation:
""" Evaluate the pipeline specified by individual, and record
Parameters
----------
individual: Individual
Blueprint for the pipeline to evaluate.
evaluate_pipeline: Callable
Function which takes the pipeline and produces validation predictions,
scores, estimators and errors.
timeout: float (default=1e6)
Maximum time in seconds that the evaluation is allowed to take.
Don't depend on high accuracy.
A shorter timeout is imposed if `deadline` is in less than `timeout` seconds.
deadline: float, optional
A time in seconds since epoch.
Cut off evaluation at `deadline` even if `timeout` seconds have not yet elapsed.
add_length_to_score: bool (default=True)
Add the length of the individual to the score result of the evaluation.
**kwargs: Dict, optional (default=None)
Passed to `evaluate_pipeline` function.
Returns
-------
Evaluation
"""
result = Evaluation(individual, pid=os.getpid())
result.start_time = datetime.now()
if deadline is not None:
time_to_deadline = deadline - time.time()
timeout = min(timeout, time_to_deadline)
with Stopwatch() as wall_time, Stopwatch(time.process_time) as process_time:
evaluation = evaluate_pipeline(individual.pipeline, timeout=timeout, **kwargs)
result._predictions, result.score, result._estimators, error = evaluation
if error is not None:
result.error = f"{type(error)} {str(error)}"
result.duration = wall_time.elapsed_time
if add_length_to_score:
result.score = result.score + (-len(individual.primitives),)
individual.fitness = Fitness(
result.score,
result.start_time,
wall_time.elapsed_time,
process_time.elapsed_time,
)
return result | 37.298913 | 88 | 0.652047 | from datetime import datetime
import logging
import os
import time
from typing import Callable, Tuple, Optional, Sequence
import stopit
from sklearn.base import TransformerMixin, is_classifier
from sklearn.model_selection import ShuffleSplit, cross_validate, check_cv
from sklearn.pipeline import Pipeline
from gama.utilities.evaluation_library import Evaluation
from gama.utilities.generic.stopwatch import Stopwatch
import numpy as np
from gama.utilities.metrics import Metric
from gama.genetic_programming.components import Individual, PrimitiveNode, Fitness
log = logging.getLogger(__name__)
def primitive_node_to_sklearn(primitive_node: PrimitiveNode) -> object:
hyperparameters = {
terminal.output: terminal.value for terminal in primitive_node._terminals
}
return primitive_node._primitive.identifier(**hyperparameters)
def compile_individual(
individual: Individual,
parameter_checks=None,
preprocessing_steps: Sequence[Tuple[str, TransformerMixin]] = None,
) -> Pipeline:
steps = [
(str(i), primitive_node_to_sklearn(primitive))
for i, primitive in enumerate(individual.primitives)
]
if preprocessing_steps:
steps = steps + list(reversed(preprocessing_steps))
return Pipeline(list(reversed(steps)))
def object_is_valid_pipeline(o):
return (
o is not None
and hasattr(o, "fit")
and hasattr(o, "predict")
and hasattr(o, "steps")
)
def evaluate_pipeline(
pipeline, x, y_train, timeout: float, metrics: Tuple[Metric], cv=5, subsample=None,
) -> Tuple:
if not object_is_valid_pipeline(pipeline):
raise TypeError(f"Pipeline must not be None and requires fit, predict, steps.")
if not timeout > 0:
raise ValueError(f"`timeout` must be greater than 0, is {timeout}.")
prediction, estimators = None, None
scores = tuple([float("-inf")] * len(metrics))
with stopit.ThreadingTimeout(timeout) as c_mgr:
try:
if isinstance(subsample, int) and subsample < len(y_train):
sampler = ShuffleSplit(n_splits=1, train_size=subsample, random_state=0)
idx, _ = next(sampler.split(x))
x, y_train = x.iloc[idx, :], y_train[idx]
splitter = check_cv(cv, y_train, is_classifier(pipeline))
result = cross_validate(
pipeline,
x,
y_train,
cv=splitter,
return_estimator=True,
scoring=[m.name for m in metrics],
error_score="raise",
)
scores = tuple([np.mean(result[f"test_{m.name}"]) for m in metrics])
estimators = result["estimator"]
for (estimator, (_, test)) in zip(estimators, splitter.split(x, y_train)):
if any([m.requires_probabilities for m in metrics]):
fold_pred = estimator.predict_proba(x.iloc[test, :])
else:
fold_pred = estimator.predict(x.iloc[test, :])
if prediction is None:
if fold_pred.ndim == 2:
prediction = np.empty(shape=(len(y_train), fold_pred.shape[1]))
else:
prediction = np.empty(shape=(len(y_train),))
prediction[test] = fold_pred
except stopit.TimeoutException:
raise
except KeyboardInterrupt:
raise
except Exception as e:
return prediction, scores, estimators, e
if c_mgr.state == c_mgr.INTERRUPTED:
raise stopit.utils.TimeoutException()
if not c_mgr:
return prediction, scores, estimators, stopit.TimeoutException()
return prediction, tuple(scores), estimators, None
def evaluate_individual(
individual: Individual,
evaluate_pipeline: Callable,
timeout: float = 1e6,
deadline: Optional[float] = None,
add_length_to_score: bool = True,
**kwargs,
) -> Evaluation:
result = Evaluation(individual, pid=os.getpid())
result.start_time = datetime.now()
if deadline is not None:
time_to_deadline = deadline - time.time()
timeout = min(timeout, time_to_deadline)
with Stopwatch() as wall_time, Stopwatch(time.process_time) as process_time:
evaluation = evaluate_pipeline(individual.pipeline, timeout=timeout, **kwargs)
result._predictions, result.score, result._estimators, error = evaluation
if error is not None:
result.error = f"{type(error)} {str(error)}"
result.duration = wall_time.elapsed_time
if add_length_to_score:
result.score = result.score + (-len(individual.primitives),)
individual.fitness = Fitness(
result.score,
result.start_time,
wall_time.elapsed_time,
process_time.elapsed_time,
)
return result | true | true |
f721a1be56454def41dd34025c62ee217a56159a | 70,696 | py | Python | venv/Lib/site-packages/networkx/algorithms/shortest_paths/weighted.py | amelliaaas/tugastkc4 | f442382c72379e911f3780543b95345a3b1c9407 | [
"Apache-2.0"
] | 5 | 2022-01-05T00:41:46.000Z | 2022-03-21T07:22:58.000Z | venv/Lib/site-packages/networkx/algorithms/shortest_paths/weighted.py | amelliaaas/tugastkc4 | f442382c72379e911f3780543b95345a3b1c9407 | [
"Apache-2.0"
] | 25 | 2021-04-17T09:26:47.000Z | 2022-01-02T20:06:55.000Z | venv/Lib/site-packages/networkx/algorithms/shortest_paths/weighted.py | amelliaaas/tugastkc4 | f442382c72379e911f3780543b95345a3b1c9407 | [
"Apache-2.0"
] | 20 | 2021-11-07T13:55:56.000Z | 2021-12-02T10:54:01.000Z | """
Shortest path algorithms for weighed graphs.
"""
from collections import deque
from heapq import heappush, heappop
from itertools import count
import networkx as nx
from networkx.algorithms.shortest_paths.generic import _build_paths_from_predecessors
__all__ = [
"dijkstra_path",
"dijkstra_path_length",
"bidirectional_dijkstra",
"single_source_dijkstra",
"single_source_dijkstra_path",
"single_source_dijkstra_path_length",
"multi_source_dijkstra",
"multi_source_dijkstra_path",
"multi_source_dijkstra_path_length",
"all_pairs_dijkstra",
"all_pairs_dijkstra_path",
"all_pairs_dijkstra_path_length",
"dijkstra_predecessor_and_distance",
"bellman_ford_path",
"bellman_ford_path_length",
"single_source_bellman_ford",
"single_source_bellman_ford_path",
"single_source_bellman_ford_path_length",
"all_pairs_bellman_ford_path",
"all_pairs_bellman_ford_path_length",
"bellman_ford_predecessor_and_distance",
"negative_edge_cycle",
"goldberg_radzik",
"johnson",
]
def _weight_function(G, weight):
"""Returns a function that returns the weight of an edge.
The returned function is specifically suitable for input to
functions :func:`_dijkstra` and :func:`_bellman_ford_relaxation`.
Parameters
----------
G : NetworkX graph.
weight : string or function
If it is callable, `weight` itself is returned. If it is a string,
it is assumed to be the name of the edge attribute that represents
the weight of an edge. In that case, a function is returned that
gets the edge weight according to the specified edge attribute.
Returns
-------
function
This function returns a callable that accepts exactly three inputs:
a node, an node adjacent to the first one, and the edge attribute
dictionary for the eedge joining those nodes. That function returns
a number representing the weight of an edge.
If `G` is a multigraph, and `weight` is not callable, the
minimum edge weight over all parallel edges is returned. If any edge
does not have an attribute with key `weight`, it is assumed to
have weight one.
"""
if callable(weight):
return weight
# If the weight keyword argument is not callable, we assume it is a
# string representing the edge attribute containing the weight of
# the edge.
if G.is_multigraph():
return lambda u, v, d: min(attr.get(weight, 1) for attr in d.values())
return lambda u, v, data: data.get(weight, 1)
def dijkstra_path(G, source, target, weight="weight"):
"""Returns the shortest weighted path from source to target in G.
Uses Dijkstra's Method to compute the shortest weighted path
between two nodes in a graph.
Parameters
----------
G : NetworkX graph
source : node
Starting node
target : node
Ending node
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
path : list
List of nodes in a shortest path.
Raises
------
NodeNotFound
If `source` is not in `G`.
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G = nx.path_graph(5)
>>> print(nx.dijkstra_path(G, 0, 4))
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The weight function can be used to hide edges by returning None.
So ``weight = lambda u, v, d: 1 if d['color']=="red" else None``
will find the shortest red path.
The weight function can be used to include node weights.
>>> def func(u, v, d):
... node_u_wt = G.nodes[u].get("node_weight", 1)
... node_v_wt = G.nodes[v].get("node_weight", 1)
... edge_wt = d.get("weight", 1)
... return node_u_wt / 2 + node_v_wt / 2 + edge_wt
In this example we take the average of start and end node
weights of an edge and add it to the weight of the edge.
The function :func:`single_source_dijkstra` computes both
path and length-of-path if you need both, use that.
See Also
--------
bidirectional_dijkstra
bellman_ford_path
single_source_dijkstra
"""
(length, path) = single_source_dijkstra(G, source, target=target, weight=weight)
return path
def dijkstra_path_length(G, source, target, weight="weight"):
"""Returns the shortest weighted path length in G from source to target.
Uses Dijkstra's Method to compute the shortest weighted path length
between two nodes in a graph.
Parameters
----------
G : NetworkX graph
source : node label
starting node for path
target : node label
ending node for path
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
length : number
Shortest path length.
Raises
------
NodeNotFound
If `source` is not in `G`.
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G = nx.path_graph(5)
>>> print(nx.dijkstra_path_length(G, 0, 4))
4
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The weight function can be used to hide edges by returning None.
So ``weight = lambda u, v, d: 1 if d['color']=="red" else None``
will find the shortest red path.
The function :func:`single_source_dijkstra` computes both
path and length-of-path if you need both, use that.
See Also
--------
bidirectional_dijkstra
bellman_ford_path_length
single_source_dijkstra
"""
if source == target:
return 0
weight = _weight_function(G, weight)
length = _dijkstra(G, source, weight, target=target)
try:
return length[target]
except KeyError as e:
raise nx.NetworkXNoPath(f"Node {target} not reachable from {source}") from e
def single_source_dijkstra_path(G, source, cutoff=None, weight="weight"):
"""Find shortest weighted paths in G from a source node.
Compute shortest path between source and all other reachable
nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path.
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
paths : dictionary
Dictionary of shortest path lengths keyed by target.
Raises
------
NodeNotFound
If `source` is not in `G`.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = nx.single_source_dijkstra_path(G, 0)
>>> path[4]
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The weight function can be used to hide edges by returning None.
So ``weight = lambda u, v, d: 1 if d['color']=="red" else None``
will find the shortest red path.
See Also
--------
single_source_dijkstra, single_source_bellman_ford
"""
return multi_source_dijkstra_path(G, {source}, cutoff=cutoff, weight=weight)
def single_source_dijkstra_path_length(G, source, cutoff=None, weight="weight"):
"""Find shortest weighted path lengths in G from a source node.
Compute the shortest path length between source and all other
reachable nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
length : dict
Dict keyed by node to shortest path length from source.
Raises
------
NodeNotFound
If `source` is not in `G`.
Examples
--------
>>> G = nx.path_graph(5)
>>> length = nx.single_source_dijkstra_path_length(G, 0)
>>> length[4]
4
>>> for node in [0, 1, 2, 3, 4]:
... print(f"{node}: {length[node]}")
0: 0
1: 1
2: 2
3: 3
4: 4
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The weight function can be used to hide edges by returning None.
So ``weight = lambda u, v, d: 1 if d['color']=="red" else None``
will find the shortest red path.
See Also
--------
single_source_dijkstra, single_source_bellman_ford_path_length
"""
return multi_source_dijkstra_path_length(G, {source}, cutoff=cutoff, weight=weight)
def single_source_dijkstra(G, source, target=None, cutoff=None, weight="weight"):
"""Find shortest weighted paths and lengths from a source node.
Compute the shortest path length between source and all other
reachable nodes for a weighted graph.
Uses Dijkstra's algorithm to compute shortest paths and lengths
between a source and all other reachable nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
target : node label, optional
Ending node for path
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
distance, path : pair of dictionaries, or numeric and list.
If target is None, paths and lengths to all nodes are computed.
The return value is a tuple of two dictionaries keyed by target nodes.
The first dictionary stores distance to each target node.
The second stores the path to each target node.
If target is not None, returns a tuple (distance, path), where
distance is the distance from source to target and path is a list
representing the path from source to target.
Raises
------
NodeNotFound
If `source` is not in `G`.
Examples
--------
>>> G = nx.path_graph(5)
>>> length, path = nx.single_source_dijkstra(G, 0)
>>> print(length[4])
4
>>> for node in [0, 1, 2, 3, 4]:
... print(f"{node}: {length[node]}")
0: 0
1: 1
2: 2
3: 3
4: 4
>>> path[4]
[0, 1, 2, 3, 4]
>>> length, path = nx.single_source_dijkstra(G, 0, 1)
>>> length
1
>>> path
[0, 1]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The weight function can be used to hide edges by returning None.
So ``weight = lambda u, v, d: 1 if d['color']=="red" else None``
will find the shortest red path.
Based on the Python cookbook recipe (119466) at
https://code.activestate.com/recipes/119466/
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
See Also
--------
single_source_dijkstra_path
single_source_dijkstra_path_length
single_source_bellman_ford
"""
return multi_source_dijkstra(
G, {source}, cutoff=cutoff, target=target, weight=weight
)
def multi_source_dijkstra_path(G, sources, cutoff=None, weight="weight"):
"""Find shortest weighted paths in G from a given set of source
nodes.
Compute shortest path between any of the source nodes and all other
reachable nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
sources : non-empty set of nodes
Starting nodes for paths. If this is just a set containing a
single node, then all paths computed by this function will start
from that node. If there are two or more nodes in the set, the
computed paths may begin from any one of the start nodes.
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
paths : dictionary
Dictionary of shortest paths keyed by target.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = nx.multi_source_dijkstra_path(G, {0, 4})
>>> path[1]
[0, 1]
>>> path[3]
[4, 3]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The weight function can be used to hide edges by returning None.
So ``weight = lambda u, v, d: 1 if d['color']=="red" else None``
will find the shortest red path.
Raises
------
ValueError
If `sources` is empty.
NodeNotFound
If any of `sources` is not in `G`.
See Also
--------
multi_source_dijkstra, multi_source_bellman_ford
"""
length, path = multi_source_dijkstra(G, sources, cutoff=cutoff, weight=weight)
return path
def multi_source_dijkstra_path_length(G, sources, cutoff=None, weight="weight"):
"""Find shortest weighted path lengths in G from a given set of
source nodes.
Compute the shortest path length between any of the source nodes and
all other reachable nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
sources : non-empty set of nodes
Starting nodes for paths. If this is just a set containing a
single node, then all paths computed by this function will start
from that node. If there are two or more nodes in the set, the
computed paths may begin from any one of the start nodes.
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
length : dict
Dict keyed by node to shortest path length to nearest source.
Examples
--------
>>> G = nx.path_graph(5)
>>> length = nx.multi_source_dijkstra_path_length(G, {0, 4})
>>> for node in [0, 1, 2, 3, 4]:
... print(f"{node}: {length[node]}")
0: 0
1: 1
2: 2
3: 1
4: 0
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The weight function can be used to hide edges by returning None.
So ``weight = lambda u, v, d: 1 if d['color']=="red" else None``
will find the shortest red path.
Raises
------
ValueError
If `sources` is empty.
NodeNotFound
If any of `sources` is not in `G`.
See Also
--------
multi_source_dijkstra
"""
if not sources:
raise ValueError("sources must not be empty")
weight = _weight_function(G, weight)
return _dijkstra_multisource(G, sources, weight, cutoff=cutoff)
def multi_source_dijkstra(G, sources, target=None, cutoff=None, weight="weight"):
"""Find shortest weighted paths and lengths from a given set of
source nodes.
Uses Dijkstra's algorithm to compute the shortest paths and lengths
between one of the source nodes and the given `target`, or all other
reachable nodes if not specified, for a weighted graph.
Parameters
----------
G : NetworkX graph
sources : non-empty set of nodes
Starting nodes for paths. If this is just a set containing a
single node, then all paths computed by this function will start
from that node. If there are two or more nodes in the set, the
computed paths may begin from any one of the start nodes.
target : node label, optional
Ending node for path
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
distance, path : pair of dictionaries, or numeric and list
If target is None, returns a tuple of two dictionaries keyed by node.
The first dictionary stores distance from one of the source nodes.
The second stores the path from one of the sources to that node.
If target is not None, returns a tuple of (distance, path) where
distance is the distance from source to target and path is a list
representing the path from source to target.
Examples
--------
>>> G = nx.path_graph(5)
>>> length, path = nx.multi_source_dijkstra(G, {0, 4})
>>> for node in [0, 1, 2, 3, 4]:
... print(f"{node}: {length[node]}")
0: 0
1: 1
2: 2
3: 1
4: 0
>>> path[1]
[0, 1]
>>> path[3]
[4, 3]
>>> length, path = nx.multi_source_dijkstra(G, {0, 4}, 1)
>>> length
1
>>> path
[0, 1]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The weight function can be used to hide edges by returning None.
So ``weight = lambda u, v, d: 1 if d['color']=="red" else None``
will find the shortest red path.
Based on the Python cookbook recipe (119466) at
https://code.activestate.com/recipes/119466/
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
Raises
------
ValueError
If `sources` is empty.
NodeNotFound
If any of `sources` is not in `G`.
See Also
--------
multi_source_dijkstra_path
multi_source_dijkstra_path_length
"""
if not sources:
raise ValueError("sources must not be empty")
if target in sources:
return (0, [target])
weight = _weight_function(G, weight)
paths = {source: [source] for source in sources} # dictionary of paths
dist = _dijkstra_multisource(
G, sources, weight, paths=paths, cutoff=cutoff, target=target
)
if target is None:
return (dist, paths)
try:
return (dist[target], paths[target])
except KeyError as e:
raise nx.NetworkXNoPath(f"No path to {target}.") from e
def _dijkstra(G, source, weight, pred=None, paths=None, cutoff=None, target=None):
"""Uses Dijkstra's algorithm to find shortest weighted paths from a
single source.
This is a convenience function for :func:`_dijkstra_multisource`
with all the arguments the same, except the keyword argument
`sources` set to ``[source]``.
"""
return _dijkstra_multisource(
G, [source], weight, pred=pred, paths=paths, cutoff=cutoff, target=target
)
def _dijkstra_multisource(
G, sources, weight, pred=None, paths=None, cutoff=None, target=None
):
"""Uses Dijkstra's algorithm to find shortest weighted paths
Parameters
----------
G : NetworkX graph
sources : non-empty iterable of nodes
Starting nodes for paths. If this is just an iterable containing
a single node, then all paths computed by this function will
start from that node. If there are two or more nodes in this
iterable, the computed paths may begin from any one of the start
nodes.
weight: function
Function with (u, v, data) input that returns that edges weight
pred: dict of lists, optional(default=None)
dict to store a list of predecessors keyed by that node
If None, predecessors are not stored.
paths: dict, optional (default=None)
dict to store the path list from source to each node, keyed by node.
If None, paths are not stored.
target : node label, optional
Ending node for path. Search is halted when target is found.
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
Returns
-------
distance : dictionary
A mapping from node to shortest distance to that node from one
of the source nodes.
Raises
------
NodeNotFound
If any of `sources` is not in `G`.
Notes
-----
The optional predecessor and path dictionaries can be accessed by
the caller through the original pred and paths objects passed
as arguments. No need to explicitly return pred or paths.
"""
G_succ = G._succ if G.is_directed() else G._adj
push = heappush
pop = heappop
dist = {} # dictionary of final distances
seen = {}
# fringe is heapq with 3-tuples (distance,c,node)
# use the count c to avoid comparing nodes (may not be able to)
c = count()
fringe = []
for source in sources:
if source not in G:
raise nx.NodeNotFound(f"Source {source} not in G")
seen[source] = 0
push(fringe, (0, next(c), source))
while fringe:
(d, _, v) = pop(fringe)
if v in dist:
continue # already searched this node.
dist[v] = d
if v == target:
break
for u, e in G_succ[v].items():
cost = weight(v, u, e)
if cost is None:
continue
vu_dist = dist[v] + cost
if cutoff is not None:
if vu_dist > cutoff:
continue
if u in dist:
u_dist = dist[u]
if vu_dist < u_dist:
raise ValueError("Contradictory paths found:", "negative weights?")
elif pred is not None and vu_dist == u_dist:
pred[u].append(v)
elif u not in seen or vu_dist < seen[u]:
seen[u] = vu_dist
push(fringe, (vu_dist, next(c), u))
if paths is not None:
paths[u] = paths[v] + [u]
if pred is not None:
pred[u] = [v]
elif vu_dist == seen[u]:
if pred is not None:
pred[u].append(v)
# The optional predecessor and path dictionaries can be accessed
# by the caller via the pred and paths objects passed as arguments.
return dist
def dijkstra_predecessor_and_distance(G, source, cutoff=None, weight="weight"):
"""Compute weighted shortest path length and predecessors.
Uses Dijkstra's Method to obtain the shortest weighted paths
and return dictionaries of predecessors for each node and
distance for each node from the `source`.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
pred, distance : dictionaries
Returns two dictionaries representing a list of predecessors
of a node and the distance to each node.
Warning: If target is specified, the dicts are incomplete as they
only contain information for the nodes along a path to target.
Raises
------
NodeNotFound
If `source` is not in `G`.
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The list of predecessors contains more than one element only when
there are more than one shortest paths to the key node.
Examples
--------
>>> G = nx.path_graph(5, create_using=nx.DiGraph())
>>> pred, dist = nx.dijkstra_predecessor_and_distance(G, 0)
>>> sorted(pred.items())
[(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])]
>>> sorted(dist.items())
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> pred, dist = nx.dijkstra_predecessor_and_distance(G, 0, 1)
>>> sorted(pred.items())
[(0, []), (1, [0])]
>>> sorted(dist.items())
[(0, 0), (1, 1)]
"""
weight = _weight_function(G, weight)
pred = {source: []} # dictionary of predecessors
return (pred, _dijkstra(G, source, weight, pred=pred, cutoff=cutoff))
def all_pairs_dijkstra(G, cutoff=None, weight="weight"):
"""Find shortest weighted paths and lengths between all nodes.
Parameters
----------
G : NetworkX graph
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edge[u][v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Yields
------
(node, (distance, path)) : (node obj, (dict, dict))
Each source node has two associated dicts. The first holds distance
keyed by target and the second holds paths keyed by target.
(See single_source_dijkstra for the source/target node terminology.)
If desired you can apply `dict()` to this function to create a dict
keyed by source node to the two dicts.
Examples
--------
>>> G = nx.path_graph(5)
>>> len_path = dict(nx.all_pairs_dijkstra(G))
>>> print(len_path[3][0][1])
2
>>> for node in [0, 1, 2, 3, 4]:
... print(f"3 - {node}: {len_path[3][0][node]}")
3 - 0: 3
3 - 1: 2
3 - 2: 1
3 - 3: 0
3 - 4: 1
>>> len_path[3][1][1]
[3, 2, 1]
>>> for n, (dist, path) in nx.all_pairs_dijkstra(G):
... print(path[1])
[0, 1]
[1]
[2, 1]
[3, 2, 1]
[4, 3, 2, 1]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The yielded dicts only have keys for reachable nodes.
"""
for n in G:
dist, path = single_source_dijkstra(G, n, cutoff=cutoff, weight=weight)
yield (n, (dist, path))
def all_pairs_dijkstra_path_length(G, cutoff=None, weight="weight"):
"""Compute shortest path lengths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
distance : iterator
(source, dictionary) iterator with dictionary keyed by target and
shortest path length as the key value.
Examples
--------
>>> G = nx.path_graph(5)
>>> length = dict(nx.all_pairs_dijkstra_path_length(G))
>>> for node in [0, 1, 2, 3, 4]:
... print(f"1 - {node}: {length[1][node]}")
1 - 0: 1
1 - 1: 0
1 - 2: 1
1 - 3: 2
1 - 4: 3
>>> length[3][2]
1
>>> length[2][2]
0
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionary returned only has keys for reachable node pairs.
"""
length = single_source_dijkstra_path_length
for n in G:
yield (n, length(G, n, cutoff=cutoff, weight=weight))
def all_pairs_dijkstra_path(G, cutoff=None, weight="weight"):
"""Compute shortest paths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
distance : dictionary
Dictionary, keyed by source and target, of shortest paths.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = dict(nx.all_pairs_dijkstra_path(G))
>>> print(path[0][4])
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
floyd_warshall, all_pairs_bellman_ford_path
"""
path = single_source_dijkstra_path
# TODO This can be trivially parallelized.
for n in G:
yield (n, path(G, n, cutoff=cutoff, weight=weight))
def bellman_ford_predecessor_and_distance(
G, source, target=None, weight="weight", heuristic=False
):
"""Compute shortest path lengths and predecessors on shortest paths
in weighted graphs.
The algorithm has a running time of $O(mn)$ where $n$ is the number of
nodes and $m$ is the number of edges. It is slower than Dijkstra but
can handle negative edge weights.
Parameters
----------
G : NetworkX graph
The algorithm works for all types of graphs, including directed
graphs and multigraphs.
source: node label
Starting node for path
target : node label, optional
Ending node for path
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
heuristic : bool
Determines whether to use a heuristic to early detect negative
cycles at a hopefully negligible cost.
Returns
-------
pred, dist : dictionaries
Returns two dictionaries keyed by node to predecessor in the
path and to the distance from the source respectively.
Raises
------
NodeNotFound
If `source` is not in `G`.
NetworkXUnbounded
If the (di)graph contains a negative cost (di)cycle, the
algorithm raises an exception to indicate the presence of the
negative cost (di)cycle. Note: any negative weight edge in an
undirected graph is a negative cost cycle.
Examples
--------
>>> G = nx.path_graph(5, create_using=nx.DiGraph())
>>> pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0)
>>> sorted(pred.items())
[(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])]
>>> sorted(dist.items())
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0, 1)
>>> sorted(pred.items())
[(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])]
>>> sorted(dist.items())
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> G = nx.cycle_graph(5, create_using=nx.DiGraph())
>>> G[1][2]["weight"] = -7
>>> nx.bellman_ford_predecessor_and_distance(G, 0)
Traceback (most recent call last):
...
networkx.exception.NetworkXUnbounded: Negative cost cycle detected.
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionaries returned only have keys for nodes reachable from
the source.
In the case where the (di)graph is not connected, if a component
not containing the source contains a negative cost (di)cycle, it
will not be detected.
In NetworkX v2.1 and prior, the source node had predecessor `[None]`.
In NetworkX v2.2 this changed to the source node having predecessor `[]`
"""
if source not in G:
raise nx.NodeNotFound(f"Node {source} is not found in the graph")
weight = _weight_function(G, weight)
if any(weight(u, v, d) < 0 for u, v, d in nx.selfloop_edges(G, data=True)):
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
dist = {source: 0}
pred = {source: []}
if len(G) == 1:
return pred, dist
weight = _weight_function(G, weight)
dist = _bellman_ford(
G, [source], weight, pred=pred, dist=dist, target=target, heuristic=heuristic
)
return (pred, dist)
def _bellman_ford(
G, source, weight, pred=None, paths=None, dist=None, target=None, heuristic=True
):
"""Relaxation loop for Bellman–Ford algorithm.
This is an implementation of the SPFA variant.
See https://en.wikipedia.org/wiki/Shortest_Path_Faster_Algorithm
Parameters
----------
G : NetworkX graph
source: list
List of source nodes. The shortest path from any of the source
nodes will be found if multiple sources are provided.
weight : function
The weight of an edge is the value returned by the function. The
function must accept exactly three positional arguments: the two
endpoints of an edge and the dictionary of edge attributes for
that edge. The function must return a number.
pred: dict of lists, optional (default=None)
dict to store a list of predecessors keyed by that node
If None, predecessors are not stored
paths: dict, optional (default=None)
dict to store the path list from source to each node, keyed by node
If None, paths are not stored
dist: dict, optional (default=None)
dict to store distance from source to the keyed node
If None, returned dist dict contents default to 0 for every node in the
source list
target: node label, optional
Ending node for path. Path lengths to other destinations may (and
probably will) be incorrect.
heuristic : bool
Determines whether to use a heuristic to early detect negative
cycles at a hopefully negligible cost.
Returns
-------
Returns a dict keyed by node to the distance from the source.
Dicts for paths and pred are in the mutated input dicts by those names.
Raises
------
NodeNotFound
If any of `source` is not in `G`.
NetworkXUnbounded
If the (di)graph contains a negative cost (di)cycle, the
algorithm raises an exception to indicate the presence of the
negative cost (di)cycle. Note: any negative weight edge in an
undirected graph is a negative cost cycle
"""
for s in source:
if s not in G:
raise nx.NodeNotFound(f"Source {s} not in G")
if pred is None:
pred = {v: [] for v in source}
if dist is None:
dist = {v: 0 for v in source}
# Heuristic Storage setup. Note: use None because nodes cannot be None
nonexistent_edge = (None, None)
pred_edge = {v: None for v in source}
recent_update = {v: nonexistent_edge for v in source}
G_succ = G.succ if G.is_directed() else G.adj
inf = float("inf")
n = len(G)
count = {}
q = deque(source)
in_q = set(source)
while q:
u = q.popleft()
in_q.remove(u)
# Skip relaxations if any of the predecessors of u is in the queue.
if all(pred_u not in in_q for pred_u in pred[u]):
dist_u = dist[u]
for v, e in G_succ[u].items():
dist_v = dist_u + weight(u, v, e)
if dist_v < dist.get(v, inf):
# In this conditional branch we are updating the path with v.
# If it happens that some earlier update also added node v
# that implies the existence of a negative cycle since
# after the update node v would lie on the update path twice.
# The update path is stored up to one of the source nodes,
# therefore u is always in the dict recent_update
if heuristic:
if v in recent_update[u]:
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
# Transfer the recent update info from u to v if the
# same source node is the head of the update path.
# If the source node is responsible for the cost update,
# then clear the history and use it instead.
if v in pred_edge and pred_edge[v] == u:
recent_update[v] = recent_update[u]
else:
recent_update[v] = (u, v)
if v not in in_q:
q.append(v)
in_q.add(v)
count_v = count.get(v, 0) + 1
if count_v == n:
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
count[v] = count_v
dist[v] = dist_v
pred[v] = [u]
pred_edge[v] = u
elif dist.get(v) is not None and dist_v == dist.get(v):
pred[v].append(u)
if paths is not None:
sources = set(source)
dsts = [target] if target is not None else pred
for dst in dsts:
gen = _build_paths_from_predecessors(sources, dst, pred)
paths[dst] = next(gen)
return dist
def bellman_ford_path(G, source, target, weight="weight"):
"""Returns the shortest path from source to target in a weighted graph G.
Parameters
----------
G : NetworkX graph
source : node
Starting node
target : node
Ending node
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
path : list
List of nodes in a shortest path.
Raises
------
NodeNotFound
If `source` is not in `G`.
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G = nx.path_graph(5)
>>> print(nx.bellman_ford_path(G, 0, 4))
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
dijkstra_path, bellman_ford_path_length
"""
length, path = single_source_bellman_ford(G, source, target=target, weight=weight)
return path
def bellman_ford_path_length(G, source, target, weight="weight"):
"""Returns the shortest path length from source to target
in a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
starting node for path
target : node label
ending node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
length : number
Shortest path length.
Raises
------
NodeNotFound
If `source` is not in `G`.
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G = nx.path_graph(5)
>>> print(nx.bellman_ford_path_length(G, 0, 4))
4
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
dijkstra_path_length, bellman_ford_path
"""
if source == target:
return 0
weight = _weight_function(G, weight)
length = _bellman_ford(G, [source], weight, target=target)
try:
return length[target]
except KeyError as e:
raise nx.NetworkXNoPath(f"node {target} not reachable from {source}") from e
def single_source_bellman_ford_path(G, source, weight="weight"):
"""Compute shortest path between source and all other reachable
nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path.
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
paths : dictionary
Dictionary of shortest path lengths keyed by target.
Raises
------
NodeNotFound
If `source` is not in `G`.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = nx.single_source_bellman_ford_path(G, 0)
>>> path[4]
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
single_source_dijkstra, single_source_bellman_ford
"""
(length, path) = single_source_bellman_ford(G, source, weight=weight)
return path
def single_source_bellman_ford_path_length(G, source, weight="weight"):
"""Compute the shortest path length between source and all other
reachable nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight.
Returns
-------
length : iterator
(target, shortest path length) iterator
Raises
------
NodeNotFound
If `source` is not in `G`.
Examples
--------
>>> G = nx.path_graph(5)
>>> length = dict(nx.single_source_bellman_ford_path_length(G, 0))
>>> length[4]
4
>>> for node in [0, 1, 2, 3, 4]:
... print(f"{node}: {length[node]}")
0: 0
1: 1
2: 2
3: 3
4: 4
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
single_source_dijkstra, single_source_bellman_ford
"""
weight = _weight_function(G, weight)
return _bellman_ford(G, [source], weight)
def single_source_bellman_ford(G, source, target=None, weight="weight"):
"""Compute shortest paths and lengths in a weighted graph G.
Uses Bellman-Ford algorithm for shortest paths.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
target : node label, optional
Ending node for path
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
distance, path : pair of dictionaries, or numeric and list
If target is None, returns a tuple of two dictionaries keyed by node.
The first dictionary stores distance from one of the source nodes.
The second stores the path from one of the sources to that node.
If target is not None, returns a tuple of (distance, path) where
distance is the distance from source to target and path is a list
representing the path from source to target.
Raises
------
NodeNotFound
If `source` is not in `G`.
Examples
--------
>>> G = nx.path_graph(5)
>>> length, path = nx.single_source_bellman_ford(G, 0)
>>> print(length[4])
4
>>> for node in [0, 1, 2, 3, 4]:
... print(f"{node}: {length[node]}")
0: 0
1: 1
2: 2
3: 3
4: 4
>>> path[4]
[0, 1, 2, 3, 4]
>>> length, path = nx.single_source_bellman_ford(G, 0, 1)
>>> length
1
>>> path
[0, 1]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
single_source_dijkstra
single_source_bellman_ford_path
single_source_bellman_ford_path_length
"""
if source == target:
return (0, [source])
weight = _weight_function(G, weight)
paths = {source: [source]} # dictionary of paths
dist = _bellman_ford(G, [source], weight, paths=paths, target=target)
if target is None:
return (dist, paths)
try:
return (dist[target], paths[target])
except KeyError as e:
msg = f"Node {target} not reachable from {source}"
raise nx.NetworkXNoPath(msg) from e
def all_pairs_bellman_ford_path_length(G, weight="weight"):
"""Compute shortest path lengths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
distance : iterator
(source, dictionary) iterator with dictionary keyed by target and
shortest path length as the key value.
Examples
--------
>>> G = nx.path_graph(5)
>>> length = dict(nx.all_pairs_bellman_ford_path_length(G))
>>> for node in [0, 1, 2, 3, 4]:
... print(f"1 - {node}: {length[1][node]}")
1 - 0: 1
1 - 1: 0
1 - 2: 1
1 - 3: 2
1 - 4: 3
>>> length[3][2]
1
>>> length[2][2]
0
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionary returned only has keys for reachable node pairs.
"""
length = single_source_bellman_ford_path_length
for n in G:
yield (n, dict(length(G, n, weight=weight)))
def all_pairs_bellman_ford_path(G, weight="weight"):
"""Compute shortest paths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
distance : dictionary
Dictionary, keyed by source and target, of shortest paths.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = dict(nx.all_pairs_bellman_ford_path(G))
>>> print(path[0][4])
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
floyd_warshall, all_pairs_dijkstra_path
"""
path = single_source_bellman_ford_path
# TODO This can be trivially parallelized.
for n in G:
yield (n, path(G, n, weight=weight))
def goldberg_radzik(G, source, weight="weight"):
"""Compute shortest path lengths and predecessors on shortest paths
in weighted graphs.
The algorithm has a running time of $O(mn)$ where $n$ is the number of
nodes and $m$ is the number of edges. It is slower than Dijkstra but
can handle negative edge weights.
Parameters
----------
G : NetworkX graph
The algorithm works for all types of graphs, including directed
graphs and multigraphs.
source: node label
Starting node for path
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
pred, dist : dictionaries
Returns two dictionaries keyed by node to predecessor in the
path and to the distance from the source respectively.
Raises
------
NodeNotFound
If `source` is not in `G`.
NetworkXUnbounded
If the (di)graph contains a negative cost (di)cycle, the
algorithm raises an exception to indicate the presence of the
negative cost (di)cycle. Note: any negative weight edge in an
undirected graph is a negative cost cycle.
Examples
--------
>>> G = nx.path_graph(5, create_using=nx.DiGraph())
>>> pred, dist = nx.goldberg_radzik(G, 0)
>>> sorted(pred.items())
[(0, None), (1, 0), (2, 1), (3, 2), (4, 3)]
>>> sorted(dist.items())
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> G = nx.cycle_graph(5, create_using=nx.DiGraph())
>>> G[1][2]["weight"] = -7
>>> nx.goldberg_radzik(G, 0)
Traceback (most recent call last):
...
networkx.exception.NetworkXUnbounded: Negative cost cycle detected.
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionaries returned only have keys for nodes reachable from
the source.
In the case where the (di)graph is not connected, if a component
not containing the source contains a negative cost (di)cycle, it
will not be detected.
"""
if source not in G:
raise nx.NodeNotFound(f"Node {source} is not found in the graph")
weight = _weight_function(G, weight)
if any(weight(u, v, d) < 0 for u, v, d in nx.selfloop_edges(G, data=True)):
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
if len(G) == 1:
return {source: None}, {source: 0}
if G.is_directed():
G_succ = G.succ
else:
G_succ = G.adj
inf = float("inf")
d = {u: inf for u in G}
d[source] = 0
pred = {source: None}
def topo_sort(relabeled):
"""Topologically sort nodes relabeled in the previous round and detect
negative cycles.
"""
# List of nodes to scan in this round. Denoted by A in Goldberg and
# Radzik's paper.
to_scan = []
# In the DFS in the loop below, neg_count records for each node the
# number of edges of negative reduced costs on the path from a DFS root
# to the node in the DFS forest. The reduced cost of an edge (u, v) is
# defined as d[u] + weight[u][v] - d[v].
#
# neg_count also doubles as the DFS visit marker array.
neg_count = {}
for u in relabeled:
# Skip visited nodes.
if u in neg_count:
continue
d_u = d[u]
# Skip nodes without out-edges of negative reduced costs.
if all(d_u + weight(u, v, e) >= d[v] for v, e in G_succ[u].items()):
continue
# Nonrecursive DFS that inserts nodes reachable from u via edges of
# nonpositive reduced costs into to_scan in (reverse) topological
# order.
stack = [(u, iter(G_succ[u].items()))]
in_stack = {u}
neg_count[u] = 0
while stack:
u, it = stack[-1]
try:
v, e = next(it)
except StopIteration:
to_scan.append(u)
stack.pop()
in_stack.remove(u)
continue
t = d[u] + weight(u, v, e)
d_v = d[v]
if t <= d_v:
is_neg = t < d_v
d[v] = t
pred[v] = u
if v not in neg_count:
neg_count[v] = neg_count[u] + int(is_neg)
stack.append((v, iter(G_succ[v].items())))
in_stack.add(v)
elif v in in_stack and neg_count[u] + int(is_neg) > neg_count[v]:
# (u, v) is a back edge, and the cycle formed by the
# path v to u and (u, v) contains at least one edge of
# negative reduced cost. The cycle must be of negative
# cost.
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
to_scan.reverse()
return to_scan
def relax(to_scan):
"""Relax out-edges of relabeled nodes."""
relabeled = set()
# Scan nodes in to_scan in topological order and relax incident
# out-edges. Add the relabled nodes to labeled.
for u in to_scan:
d_u = d[u]
for v, e in G_succ[u].items():
w_e = weight(u, v, e)
if d_u + w_e < d[v]:
d[v] = d_u + w_e
pred[v] = u
relabeled.add(v)
return relabeled
# Set of nodes relabled in the last round of scan operations. Denoted by B
# in Goldberg and Radzik's paper.
relabeled = {source}
while relabeled:
to_scan = topo_sort(relabeled)
relabeled = relax(to_scan)
d = {u: d[u] for u in pred}
return pred, d
def negative_edge_cycle(G, weight="weight", heuristic=True):
"""Returns True if there exists a negative edge cycle anywhere in G.
Parameters
----------
G : NetworkX graph
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
heuristic : bool
Determines whether to use a heuristic to early detect negative
cycles at a negligible cost. In case of graphs with a negative cycle,
the performance of detection increases by at least an order of magnitude.
Returns
-------
negative_cycle : bool
True if a negative edge cycle exists, otherwise False.
Examples
--------
>>> G = nx.cycle_graph(5, create_using=nx.DiGraph())
>>> print(nx.negative_edge_cycle(G))
False
>>> G[1][2]["weight"] = -7
>>> print(nx.negative_edge_cycle(G))
True
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
This algorithm uses bellman_ford_predecessor_and_distance() but finds
negative cycles on any component by first adding a new node connected to
every node, and starting bellman_ford_predecessor_and_distance on that
node. It then removes that extra node.
"""
# find unused node to use temporarily
newnode = -1
while newnode in G:
newnode -= 1
# connect it to all nodes
G.add_edges_from([(newnode, n) for n in G])
try:
bellman_ford_predecessor_and_distance(
G, newnode, weight=weight, heuristic=heuristic
)
except nx.NetworkXUnbounded:
return True
finally:
G.remove_node(newnode)
return False
def bidirectional_dijkstra(G, source, target, weight="weight"):
r"""Dijkstra's algorithm for shortest paths using bidirectional search.
Parameters
----------
G : NetworkX graph
source : node
Starting node.
target : node
Ending node.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
length, path : number and list
length is the distance from source to target.
path is a list of nodes on a path from source to target.
Raises
------
NodeNotFound
If either `source` or `target` is not in `G`.
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G = nx.path_graph(5)
>>> length, path = nx.bidirectional_dijkstra(G, 0, 4)
>>> print(length)
4
>>> print(path)
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
In practice bidirectional Dijkstra is much more than twice as fast as
ordinary Dijkstra.
Ordinary Dijkstra expands nodes in a sphere-like manner from the
source. The radius of this sphere will eventually be the length
of the shortest path. Bidirectional Dijkstra will expand nodes
from both the source and the target, making two spheres of half
this radius. Volume of the first sphere is `\pi*r*r` while the
others are `2*\pi*r/2*r/2`, making up half the volume.
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
See Also
--------
shortest_path
shortest_path_length
"""
if source not in G or target not in G:
msg = f"Either source {source} or target {target} is not in G"
raise nx.NodeNotFound(msg)
if source == target:
return (0, [source])
weight = _weight_function(G, weight)
push = heappush
pop = heappop
# Init: [Forward, Backward]
dists = [{}, {}] # dictionary of final distances
paths = [{source: [source]}, {target: [target]}] # dictionary of paths
fringe = [[], []] # heap of (distance, node) for choosing node to expand
seen = [{source: 0}, {target: 0}] # dict of distances to seen nodes
c = count()
# initialize fringe heap
push(fringe[0], (0, next(c), source))
push(fringe[1], (0, next(c), target))
# neighs for extracting correct neighbor information
if G.is_directed():
neighs = [G._succ, G._pred]
else:
neighs = [G._adj, G._adj]
# variables to hold shortest discovered path
# finaldist = 1e30000
finalpath = []
dir = 1
while fringe[0] and fringe[1]:
# choose direction
# dir == 0 is forward direction and dir == 1 is back
dir = 1 - dir
# extract closest to expand
(dist, _, v) = pop(fringe[dir])
if v in dists[dir]:
# Shortest path to v has already been found
continue
# update distance
dists[dir][v] = dist # equal to seen[dir][v]
if v in dists[1 - dir]:
# if we have scanned v in both directions we are done
# we have now discovered the shortest path
return (finaldist, finalpath)
for w, d in neighs[dir][v].items():
if dir == 0: # forward
vwLength = dists[dir][v] + weight(v, w, d)
else: # back, must remember to change v,w->w,v
vwLength = dists[dir][v] + weight(w, v, d)
if w in dists[dir]:
if vwLength < dists[dir][w]:
raise ValueError("Contradictory paths found: negative weights?")
elif w not in seen[dir] or vwLength < seen[dir][w]:
# relaxing
seen[dir][w] = vwLength
push(fringe[dir], (vwLength, next(c), w))
paths[dir][w] = paths[dir][v] + [w]
if w in seen[0] and w in seen[1]:
# see if this path is better than the already
# discovered shortest path
totaldist = seen[0][w] + seen[1][w]
if finalpath == [] or finaldist > totaldist:
finaldist = totaldist
revpath = paths[1][w][:]
revpath.reverse()
finalpath = paths[0][w] + revpath[1:]
raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
def johnson(G, weight="weight"):
r"""Uses Johnson's Algorithm to compute shortest paths.
Johnson's Algorithm finds a shortest path between each pair of
nodes in a weighted graph even if negative weights are present.
Parameters
----------
G : NetworkX graph
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
distance : dictionary
Dictionary, keyed by source and target, of shortest paths.
Raises
------
NetworkXError
If given graph is not weighted.
Examples
--------
>>> graph = nx.DiGraph()
>>> graph.add_weighted_edges_from(
... [("0", "3", 3), ("0", "1", -5), ("0", "2", 2), ("1", "2", 4), ("2", "3", 1)]
... )
>>> paths = nx.johnson(graph, weight="weight")
>>> paths["0"]["2"]
['0', '1', '2']
Notes
-----
Johnson's algorithm is suitable even for graphs with negative weights. It
works by using the Bellman–Ford algorithm to compute a transformation of
the input graph that removes all negative weights, allowing Dijkstra's
algorithm to be used on the transformed graph.
The time complexity of this algorithm is $O(n^2 \log n + n m)$,
where $n$ is the number of nodes and $m$ the number of edges in the
graph. For dense graphs, this may be faster than the Floyd–Warshall
algorithm.
See Also
--------
floyd_warshall_predecessor_and_distance
floyd_warshall_numpy
all_pairs_shortest_path
all_pairs_shortest_path_length
all_pairs_dijkstra_path
bellman_ford_predecessor_and_distance
all_pairs_bellman_ford_path
all_pairs_bellman_ford_path_length
"""
if not nx.is_weighted(G, weight=weight):
raise nx.NetworkXError("Graph is not weighted.")
dist = {v: 0 for v in G}
pred = {v: [] for v in G}
weight = _weight_function(G, weight)
# Calculate distance of shortest paths
dist_bellman = _bellman_ford(G, list(G), weight, pred=pred, dist=dist)
# Update the weight function to take into account the Bellman--Ford
# relaxation distances.
def new_weight(u, v, d):
return weight(u, v, d) + dist_bellman[u] - dist_bellman[v]
def dist_path(v):
paths = {v: [v]}
_dijkstra(G, v, new_weight, paths=paths)
return paths
return {v: dist_path(v) for v in G}
| 32.018116 | 88 | 0.620445 |
from collections import deque
from heapq import heappush, heappop
from itertools import count
import networkx as nx
from networkx.algorithms.shortest_paths.generic import _build_paths_from_predecessors
__all__ = [
"dijkstra_path",
"dijkstra_path_length",
"bidirectional_dijkstra",
"single_source_dijkstra",
"single_source_dijkstra_path",
"single_source_dijkstra_path_length",
"multi_source_dijkstra",
"multi_source_dijkstra_path",
"multi_source_dijkstra_path_length",
"all_pairs_dijkstra",
"all_pairs_dijkstra_path",
"all_pairs_dijkstra_path_length",
"dijkstra_predecessor_and_distance",
"bellman_ford_path",
"bellman_ford_path_length",
"single_source_bellman_ford",
"single_source_bellman_ford_path",
"single_source_bellman_ford_path_length",
"all_pairs_bellman_ford_path",
"all_pairs_bellman_ford_path_length",
"bellman_ford_predecessor_and_distance",
"negative_edge_cycle",
"goldberg_radzik",
"johnson",
]
def _weight_function(G, weight):
if callable(weight):
return weight
if G.is_multigraph():
return lambda u, v, d: min(attr.get(weight, 1) for attr in d.values())
return lambda u, v, data: data.get(weight, 1)
def dijkstra_path(G, source, target, weight="weight"):
(length, path) = single_source_dijkstra(G, source, target=target, weight=weight)
return path
def dijkstra_path_length(G, source, target, weight="weight"):
if source == target:
return 0
weight = _weight_function(G, weight)
length = _dijkstra(G, source, weight, target=target)
try:
return length[target]
except KeyError as e:
raise nx.NetworkXNoPath(f"Node {target} not reachable from {source}") from e
def single_source_dijkstra_path(G, source, cutoff=None, weight="weight"):
return multi_source_dijkstra_path(G, {source}, cutoff=cutoff, weight=weight)
def single_source_dijkstra_path_length(G, source, cutoff=None, weight="weight"):
return multi_source_dijkstra_path_length(G, {source}, cutoff=cutoff, weight=weight)
def single_source_dijkstra(G, source, target=None, cutoff=None, weight="weight"):
return multi_source_dijkstra(
G, {source}, cutoff=cutoff, target=target, weight=weight
)
def multi_source_dijkstra_path(G, sources, cutoff=None, weight="weight"):
length, path = multi_source_dijkstra(G, sources, cutoff=cutoff, weight=weight)
return path
def multi_source_dijkstra_path_length(G, sources, cutoff=None, weight="weight"):
if not sources:
raise ValueError("sources must not be empty")
weight = _weight_function(G, weight)
return _dijkstra_multisource(G, sources, weight, cutoff=cutoff)
def multi_source_dijkstra(G, sources, target=None, cutoff=None, weight="weight"):
if not sources:
raise ValueError("sources must not be empty")
if target in sources:
return (0, [target])
weight = _weight_function(G, weight)
paths = {source: [source] for source in sources}
dist = _dijkstra_multisource(
G, sources, weight, paths=paths, cutoff=cutoff, target=target
)
if target is None:
return (dist, paths)
try:
return (dist[target], paths[target])
except KeyError as e:
raise nx.NetworkXNoPath(f"No path to {target}.") from e
def _dijkstra(G, source, weight, pred=None, paths=None, cutoff=None, target=None):
return _dijkstra_multisource(
G, [source], weight, pred=pred, paths=paths, cutoff=cutoff, target=target
)
def _dijkstra_multisource(
G, sources, weight, pred=None, paths=None, cutoff=None, target=None
):
G_succ = G._succ if G.is_directed() else G._adj
push = heappush
pop = heappop
dist = {}
seen = {}
c = count()
fringe = []
for source in sources:
if source not in G:
raise nx.NodeNotFound(f"Source {source} not in G")
seen[source] = 0
push(fringe, (0, next(c), source))
while fringe:
(d, _, v) = pop(fringe)
if v in dist:
continue
dist[v] = d
if v == target:
break
for u, e in G_succ[v].items():
cost = weight(v, u, e)
if cost is None:
continue
vu_dist = dist[v] + cost
if cutoff is not None:
if vu_dist > cutoff:
continue
if u in dist:
u_dist = dist[u]
if vu_dist < u_dist:
raise ValueError("Contradictory paths found:", "negative weights?")
elif pred is not None and vu_dist == u_dist:
pred[u].append(v)
elif u not in seen or vu_dist < seen[u]:
seen[u] = vu_dist
push(fringe, (vu_dist, next(c), u))
if paths is not None:
paths[u] = paths[v] + [u]
if pred is not None:
pred[u] = [v]
elif vu_dist == seen[u]:
if pred is not None:
pred[u].append(v)
return dist
def dijkstra_predecessor_and_distance(G, source, cutoff=None, weight="weight"):
weight = _weight_function(G, weight)
pred = {source: []}
return (pred, _dijkstra(G, source, weight, pred=pred, cutoff=cutoff))
def all_pairs_dijkstra(G, cutoff=None, weight="weight"):
for n in G:
dist, path = single_source_dijkstra(G, n, cutoff=cutoff, weight=weight)
yield (n, (dist, path))
def all_pairs_dijkstra_path_length(G, cutoff=None, weight="weight"):
length = single_source_dijkstra_path_length
for n in G:
yield (n, length(G, n, cutoff=cutoff, weight=weight))
def all_pairs_dijkstra_path(G, cutoff=None, weight="weight"):
path = single_source_dijkstra_path
for n in G:
yield (n, path(G, n, cutoff=cutoff, weight=weight))
def bellman_ford_predecessor_and_distance(
G, source, target=None, weight="weight", heuristic=False
):
if source not in G:
raise nx.NodeNotFound(f"Node {source} is not found in the graph")
weight = _weight_function(G, weight)
if any(weight(u, v, d) < 0 for u, v, d in nx.selfloop_edges(G, data=True)):
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
dist = {source: 0}
pred = {source: []}
if len(G) == 1:
return pred, dist
weight = _weight_function(G, weight)
dist = _bellman_ford(
G, [source], weight, pred=pred, dist=dist, target=target, heuristic=heuristic
)
return (pred, dist)
def _bellman_ford(
G, source, weight, pred=None, paths=None, dist=None, target=None, heuristic=True
):
for s in source:
if s not in G:
raise nx.NodeNotFound(f"Source {s} not in G")
if pred is None:
pred = {v: [] for v in source}
if dist is None:
dist = {v: 0 for v in source}
nonexistent_edge = (None, None)
pred_edge = {v: None for v in source}
recent_update = {v: nonexistent_edge for v in source}
G_succ = G.succ if G.is_directed() else G.adj
inf = float("inf")
n = len(G)
count = {}
q = deque(source)
in_q = set(source)
while q:
u = q.popleft()
in_q.remove(u)
if all(pred_u not in in_q for pred_u in pred[u]):
dist_u = dist[u]
for v, e in G_succ[u].items():
dist_v = dist_u + weight(u, v, e)
if dist_v < dist.get(v, inf):
if heuristic:
if v in recent_update[u]:
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
if v in pred_edge and pred_edge[v] == u:
recent_update[v] = recent_update[u]
else:
recent_update[v] = (u, v)
if v not in in_q:
q.append(v)
in_q.add(v)
count_v = count.get(v, 0) + 1
if count_v == n:
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
count[v] = count_v
dist[v] = dist_v
pred[v] = [u]
pred_edge[v] = u
elif dist.get(v) is not None and dist_v == dist.get(v):
pred[v].append(u)
if paths is not None:
sources = set(source)
dsts = [target] if target is not None else pred
for dst in dsts:
gen = _build_paths_from_predecessors(sources, dst, pred)
paths[dst] = next(gen)
return dist
def bellman_ford_path(G, source, target, weight="weight"):
length, path = single_source_bellman_ford(G, source, target=target, weight=weight)
return path
def bellman_ford_path_length(G, source, target, weight="weight"):
if source == target:
return 0
weight = _weight_function(G, weight)
length = _bellman_ford(G, [source], weight, target=target)
try:
return length[target]
except KeyError as e:
raise nx.NetworkXNoPath(f"node {target} not reachable from {source}") from e
def single_source_bellman_ford_path(G, source, weight="weight"):
(length, path) = single_source_bellman_ford(G, source, weight=weight)
return path
def single_source_bellman_ford_path_length(G, source, weight="weight"):
weight = _weight_function(G, weight)
return _bellman_ford(G, [source], weight)
def single_source_bellman_ford(G, source, target=None, weight="weight"):
if source == target:
return (0, [source])
weight = _weight_function(G, weight)
paths = {source: [source]}
dist = _bellman_ford(G, [source], weight, paths=paths, target=target)
if target is None:
return (dist, paths)
try:
return (dist[target], paths[target])
except KeyError as e:
msg = f"Node {target} not reachable from {source}"
raise nx.NetworkXNoPath(msg) from e
def all_pairs_bellman_ford_path_length(G, weight="weight"):
length = single_source_bellman_ford_path_length
for n in G:
yield (n, dict(length(G, n, weight=weight)))
def all_pairs_bellman_ford_path(G, weight="weight"):
path = single_source_bellman_ford_path
for n in G:
yield (n, path(G, n, weight=weight))
def goldberg_radzik(G, source, weight="weight"):
if source not in G:
raise nx.NodeNotFound(f"Node {source} is not found in the graph")
weight = _weight_function(G, weight)
if any(weight(u, v, d) < 0 for u, v, d in nx.selfloop_edges(G, data=True)):
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
if len(G) == 1:
return {source: None}, {source: 0}
if G.is_directed():
G_succ = G.succ
else:
G_succ = G.adj
inf = float("inf")
d = {u: inf for u in G}
d[source] = 0
pred = {source: None}
def topo_sort(relabeled):
to_scan = []
# In the DFS in the loop below, neg_count records for each node the
# number of edges of negative reduced costs on the path from a DFS root
# to the node in the DFS forest. The reduced cost of an edge (u, v) is
# defined as d[u] + weight[u][v] - d[v].
#
# neg_count also doubles as the DFS visit marker array.
neg_count = {}
for u in relabeled:
# Skip visited nodes.
if u in neg_count:
continue
d_u = d[u]
# Skip nodes without out-edges of negative reduced costs.
if all(d_u + weight(u, v, e) >= d[v] for v, e in G_succ[u].items()):
continue
# Nonrecursive DFS that inserts nodes reachable from u via edges of
# nonpositive reduced costs into to_scan in (reverse) topological
# order.
stack = [(u, iter(G_succ[u].items()))]
in_stack = {u}
neg_count[u] = 0
while stack:
u, it = stack[-1]
try:
v, e = next(it)
except StopIteration:
to_scan.append(u)
stack.pop()
in_stack.remove(u)
continue
t = d[u] + weight(u, v, e)
d_v = d[v]
if t <= d_v:
is_neg = t < d_v
d[v] = t
pred[v] = u
if v not in neg_count:
neg_count[v] = neg_count[u] + int(is_neg)
stack.append((v, iter(G_succ[v].items())))
in_stack.add(v)
elif v in in_stack and neg_count[u] + int(is_neg) > neg_count[v]:
# (u, v) is a back edge, and the cycle formed by the
# path v to u and (u, v) contains at least one edge of
# negative reduced cost. The cycle must be of negative
# cost.
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
to_scan.reverse()
return to_scan
def relax(to_scan):
relabeled = set()
# Scan nodes in to_scan in topological order and relax incident
# out-edges. Add the relabled nodes to labeled.
for u in to_scan:
d_u = d[u]
for v, e in G_succ[u].items():
w_e = weight(u, v, e)
if d_u + w_e < d[v]:
d[v] = d_u + w_e
pred[v] = u
relabeled.add(v)
return relabeled
# Set of nodes relabled in the last round of scan operations. Denoted by B
# in Goldberg and Radzik's paper.
relabeled = {source}
while relabeled:
to_scan = topo_sort(relabeled)
relabeled = relax(to_scan)
d = {u: d[u] for u in pred}
return pred, d
def negative_edge_cycle(G, weight="weight", heuristic=True):
newnode = -1
while newnode in G:
newnode -= 1
G.add_edges_from([(newnode, n) for n in G])
try:
bellman_ford_predecessor_and_distance(
G, newnode, weight=weight, heuristic=heuristic
)
except nx.NetworkXUnbounded:
return True
finally:
G.remove_node(newnode)
return False
def bidirectional_dijkstra(G, source, target, weight="weight"):
if source not in G or target not in G:
msg = f"Either source {source} or target {target} is not in G"
raise nx.NodeNotFound(msg)
if source == target:
return (0, [source])
weight = _weight_function(G, weight)
push = heappush
pop = heappop
dists = [{}, {}]
paths = [{source: [source]}, {target: [target]}]
fringe = [[], []]
seen = [{source: 0}, {target: 0}]
c = count()
push(fringe[0], (0, next(c), source))
push(fringe[1], (0, next(c), target))
if G.is_directed():
neighs = [G._succ, G._pred]
else:
neighs = [G._adj, G._adj]
finalpath = []
dir = 1
while fringe[0] and fringe[1]:
dir = 1 - dir
(dist, _, v) = pop(fringe[dir])
if v in dists[dir]:
continue
dists[dir][v] = dist
if v in dists[1 - dir]:
return (finaldist, finalpath)
for w, d in neighs[dir][v].items():
if dir == 0:
vwLength = dists[dir][v] + weight(v, w, d)
else:
vwLength = dists[dir][v] + weight(w, v, d)
if w in dists[dir]:
if vwLength < dists[dir][w]:
raise ValueError("Contradictory paths found: negative weights?")
elif w not in seen[dir] or vwLength < seen[dir][w]:
seen[dir][w] = vwLength
push(fringe[dir], (vwLength, next(c), w))
paths[dir][w] = paths[dir][v] + [w]
if w in seen[0] and w in seen[1]:
totaldist = seen[0][w] + seen[1][w]
if finalpath == [] or finaldist > totaldist:
finaldist = totaldist
revpath = paths[1][w][:]
revpath.reverse()
finalpath = paths[0][w] + revpath[1:]
raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
def johnson(G, weight="weight"):
if not nx.is_weighted(G, weight=weight):
raise nx.NetworkXError("Graph is not weighted.")
dist = {v: 0 for v in G}
pred = {v: [] for v in G}
weight = _weight_function(G, weight)
dist_bellman = _bellman_ford(G, list(G), weight, pred=pred, dist=dist)
def new_weight(u, v, d):
return weight(u, v, d) + dist_bellman[u] - dist_bellman[v]
def dist_path(v):
paths = {v: [v]}
_dijkstra(G, v, new_weight, paths=paths)
return paths
return {v: dist_path(v) for v in G}
| true | true |
f721a2657cff9163e52336d5c42f2f8b73f6cf7e | 383 | py | Python | configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py | yypurpose/mmdetection | ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c | [
"Apache-2.0"
] | null | null | null | configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py | yypurpose/mmdetection | ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c | [
"Apache-2.0"
] | null | null | null | configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py | yypurpose/mmdetection | ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c | [
"Apache-2.0"
] | null | null | null | _base_ = './faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'))
| 27.357143 | 54 | 0.563969 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'))
| true | true |
f721a43045ff008abbe0323da19119831a8f5c4e | 4,415 | py | Python | howtolens/simulators/chapter_4/mass_sie__source_sersic__2.py | rakaar/PyAutoLens | bc140c5d196c426092c1178b8abfa492c6fab859 | [
"MIT"
] | null | null | null | howtolens/simulators/chapter_4/mass_sie__source_sersic__2.py | rakaar/PyAutoLens | bc140c5d196c426092c1178b8abfa492c6fab859 | [
"MIT"
] | null | null | null | howtolens/simulators/chapter_4/mass_sie__source_sersic__2.py | rakaar/PyAutoLens | bc140c5d196c426092c1178b8abfa492c6fab859 | [
"MIT"
] | null | null | null | from os import path
import autolens as al
"""
This script simulates `Imaging` of a strong lens where:
- The lens `Galaxy`'s total mass distribution is a *SphericalIsothermal*.
- The source `Galaxy`'s `LightProfile` is a *SphericalExponential*.
This dataset is used in chapter 2, tutorials 1-3.
"""
"""
The `dataset_type` describes the type of data being simulated (in this case, `Imaging` data) and `dataset_name`
gives it a descriptive name. They define the folder the dataset is output to on your hard-disk:
- The image will be output to `/autolens_workspace/dataset/dataset_type/dataset_name/image.fits`.
- The noise-map will be output to `/autolens_workspace/dataset/dataset_type/dataset_name/lens_name/noise_map.fits`.
- The psf will be output to `/autolens_workspace/dataset/dataset_type/dataset_name/psf.fits`.
"""
dataset_type = "chapter_4"
dataset_name = "mass_sie__source_sersic__2"
"""
The path where the dataset will be output, which in this case is:
`/autolens_workspace/howtolens/dataset/chapter_2/mass_sis__source_exp/`
"""
dataset_path = path.join("dataset", "howtolens", dataset_type, dataset_name)
"""
For simulating an image of a strong lens, we recommend using a GridIterate object. This represents a grid of $(y,x)$
coordinates like an ordinary Grid, but when the light-profile`s image is evaluated below (using the Tracer) the
sub-size of the grid is iteratively increased (in steps of 2, 4, 8, 16, 24) until the input fractional accuracy of
99.99% is met.
This ensures that the divergent and bright central regions of the source galaxy are fully resolved when determining the
total flux emitted within a pixel.
"""
grid = al.GridIterate.uniform(
shape_2d=(150, 150),
pixel_scales=0.05,
fractional_accuracy=0.9999,
sub_steps=[2, 4, 8, 16, 24],
)
"""Simulate a simple Gaussian PSF for the image."""
psf = al.Kernel.from_gaussian(
shape_2d=(11, 11), sigma=0.1, pixel_scales=grid.pixel_scales
)
"""
To simulate the `Imaging` dataset we first create a simulator, which defines the exposure time, background sky,
noise levels and psf of the dataset that is simulated.
"""
simulator = al.SimulatorImaging(
exposure_time=300.0, psf=psf, background_sky_level=0.1, add_poisson_noise=True
)
"""
Setup the lens `Galaxy`'s mass (SIE+Shear) and source galaxy light (elliptical Sersic) for this simulated lens.
For lens modeling, defining ellipticity in terms of the `elliptical_comps` improves the model-fitting procedure.
However, for simulating a strong lens you may find it more intuitive to define the elliptical geometry using the
axis-ratio of the profile (axis_ratio = semi-major axis / semi-minor axis = b/a) and position angle phi, where phi is
in degrees and defined counter clockwise from the positive x-axis.
We can use the **PyAutoLens** `convert` module to determine the elliptical components from the axis-ratio and phi.
"""
lens_galaxy = al.Galaxy(
redshift=0.5,
mass=al.mp.EllipticalIsothermal(
centre=(0.0, 0.0), elliptical_comps=(0.1, 0.0), einstein_radius=1.6
),
)
source_galaxy = al.Galaxy(
redshift=1.0,
bulge=al.lp.EllipticalSersic(
centre=(0.1, 0.1),
elliptical_comps=(0.1, 0.0),
intensity=0.2,
effective_radius=0.3,
sersic_index=1.0,
),
)
"""Use these galaxies to setup a tracer, which will generate the image for the simulated `Imaging` dataset."""
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
"""
We can now pass this simulator a tracer, which creates the ray-traced image plotted above and simulates it as an
imaging dataset.
"""
imaging = simulator.from_tracer_and_grid(tracer=tracer, grid=grid)
"""Output our simulated dataset to the dataset path as .fits files"""
imaging.output_to_fits(
image_path=path.join(dataset_path, "image.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
overwrite=True,
)
"""
Pickle the `Tracer` in the dataset folder, ensuring the true `Tracer` is safely stored and available if we need to
check how the dataset was simulated in the future.
This will also be accessible via the `Aggregator` if a model-fit is performed using the dataset.
"""
tracer.save(file_path=dataset_path, filename="true_tracer")
| 39.070796 | 120 | 0.727973 | from os import path
import autolens as al
dataset_type = "chapter_4"
dataset_name = "mass_sie__source_sersic__2"
dataset_path = path.join("dataset", "howtolens", dataset_type, dataset_name)
grid = al.GridIterate.uniform(
shape_2d=(150, 150),
pixel_scales=0.05,
fractional_accuracy=0.9999,
sub_steps=[2, 4, 8, 16, 24],
)
psf = al.Kernel.from_gaussian(
shape_2d=(11, 11), sigma=0.1, pixel_scales=grid.pixel_scales
)
simulator = al.SimulatorImaging(
exposure_time=300.0, psf=psf, background_sky_level=0.1, add_poisson_noise=True
)
lens_galaxy = al.Galaxy(
redshift=0.5,
mass=al.mp.EllipticalIsothermal(
centre=(0.0, 0.0), elliptical_comps=(0.1, 0.0), einstein_radius=1.6
),
)
source_galaxy = al.Galaxy(
redshift=1.0,
bulge=al.lp.EllipticalSersic(
centre=(0.1, 0.1),
elliptical_comps=(0.1, 0.0),
intensity=0.2,
effective_radius=0.3,
sersic_index=1.0,
),
)
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
imaging = simulator.from_tracer_and_grid(tracer=tracer, grid=grid)
imaging.output_to_fits(
image_path=path.join(dataset_path, "image.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
overwrite=True,
)
tracer.save(file_path=dataset_path, filename="true_tracer")
| true | true |
f721a452377d10ba2fe32cd315a6bdce392c234d | 594 | py | Python | hc/accounts/tests/test_team_access_middleware.py | andela/-healthchecks_spartans | 4dd6480fc178996c0e386548816ca8c74e4af50d | [
"BSD-3-Clause"
] | null | null | null | hc/accounts/tests/test_team_access_middleware.py | andela/-healthchecks_spartans | 4dd6480fc178996c0e386548816ca8c74e4af50d | [
"BSD-3-Clause"
] | null | null | null | hc/accounts/tests/test_team_access_middleware.py | andela/-healthchecks_spartans | 4dd6480fc178996c0e386548816ca8c74e4af50d | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.auth.models import User
from django.test import TestCase
from hc.accounts.models import Profile
class TeamAccessMiddlewareTestCase(TestCase):
def test_it_handles_missing_profile(self):
user = User(username="ned", email="ned@example.org")
user.set_password("password")
user.save()
self.client.login(username="ned@example.org", password="password")
r = self.client.get("/about/")
self.assertEqual(r.status_code, 200)
### Assert the new Profile objects count
self.assertEqual(Profile.objects.count(), 1)
| 31.263158 | 74 | 0.695286 | from django.contrib.auth.models import User
from django.test import TestCase
from hc.accounts.models import Profile
class TeamAccessMiddlewareTestCase(TestCase):
def test_it_handles_missing_profile(self):
user = User(username="ned", email="ned@example.org")
user.set_password("password")
user.save()
self.client.login(username="ned@example.org", password="password")
r = self.client.get("/about/")
self.assertEqual(r.status_code, 200)
| true | true |
f721a4751de1cbbd852750a103606b9e45275fbe | 2,081 | py | Python | pysptools/skl/docstring.py | ctherien/pysptools | fbcd3ecaa7ab27f0158b28b4327537c3e75db160 | [
"Apache-2.0"
] | 35 | 2016-03-20T15:25:07.000Z | 2022-03-29T04:05:56.000Z | pysptools/skl/docstring.py | ctherien/pysptools | fbcd3ecaa7ab27f0158b28b4327537c3e75db160 | [
"Apache-2.0"
] | 12 | 2016-03-24T13:38:52.000Z | 2021-04-06T07:11:19.000Z | pysptools/skl/docstring.py | ctherien/pysptools | fbcd3ecaa7ab27f0158b28b4327537c3e75db160 | [
"Apache-2.0"
] | 14 | 2016-03-21T17:26:46.000Z | 2022-01-18T08:39:27.000Z | #
#------------------------------------------------------------------------------
# Copyright (c) 2013-2017, Christian Therien
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
#
# docstring.py - This file is part of the PySptools package.
#
plot_fi_docstring = """
Plot the feature importances.
The output can be split in n graphs.
Parameters:
path: `string`
The path where to save the plot.
n_labels: `string or integer`
The number of labels to output by graph. If the value is 'all',
only one graph is generated.
height: `float [default 0.2]`
The bar height (in fact width).
sort: `boolean [default False]`
If true the feature importances are sorted.
suffix: `string [default None]`
Add a suffix to the file name.
"""
display_fi_docstring = """
Display the feature importances.
The output can be split in n graphs.
Parameters:
n_labels: `string or integer`
The number of labels to output by graph. If the value is 'all',
only one graph is generated.
height: `float [default 0.2]`
The bar height (in fact width).
sort: `boolean [default False]`
If true the feature importances are sorted.
suffix: `string [default None]`
Add a suffix to the file name.
"""
| 32.515625 | 79 | 0.566555 |
plot_fi_docstring = """
Plot the feature importances.
The output can be split in n graphs.
Parameters:
path: `string`
The path where to save the plot.
n_labels: `string or integer`
The number of labels to output by graph. If the value is 'all',
only one graph is generated.
height: `float [default 0.2]`
The bar height (in fact width).
sort: `boolean [default False]`
If true the feature importances are sorted.
suffix: `string [default None]`
Add a suffix to the file name.
"""
display_fi_docstring = """
Display the feature importances.
The output can be split in n graphs.
Parameters:
n_labels: `string or integer`
The number of labels to output by graph. If the value is 'all',
only one graph is generated.
height: `float [default 0.2]`
The bar height (in fact width).
sort: `boolean [default False]`
If true the feature importances are sorted.
suffix: `string [default None]`
Add a suffix to the file name.
"""
| true | true |
f721a4c6a5e4336c0f4cb7515b1636b493ef02d6 | 6,182 | py | Python | tools/pysa_integration_tests/utils.py | joehendrix/pyre-check | 23693455b1e0b4a7287efba9337be6bbfe23ada4 | [
"MIT"
] | 1 | 2022-02-10T10:51:32.000Z | 2022-02-10T10:51:32.000Z | tools/pysa_integration_tests/utils.py | joehendrix/pyre-check | 23693455b1e0b4a7287efba9337be6bbfe23ada4 | [
"MIT"
] | null | null | null | tools/pysa_integration_tests/utils.py | joehendrix/pyre-check | 23693455b1e0b4a7287efba9337be6bbfe23ada4 | [
"MIT"
] | null | null | null | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import json
import logging
import subprocess
import sys
from pathlib import Path
from typing import final, Sequence, Optional
LOG: logging.Logger = logging.getLogger(__name__)
@final
class PyreErrorException(Exception):
"""
Custom Exception to raise when Pyre errors out
"""
pass
def normalized_json_dump(
results: str, salient_keys_only: bool, filter_issues: bool
) -> str:
"""
Returns a normalised JSON string from results keeping only essential items.
Removes all keys that are not salient to determining if results have changed
when 'salient_keys_only' is true. Filters issues down to issues that have
the code we intend to test for if 'filter_issues' is true.
"""
normalized = json.loads(results)
if "errors" in normalized:
pretty_error = json.dumps(normalized, sort_keys=True, indent=2)
raise PyreErrorException(
f"Errors were found when processing results:\n{pretty_error}"
)
if filter_issues:
# Filter down to only issues that have the code that we intended to
# test for. This prevents the introduction of new rules or false
# positives from breaking existing tests.
normalized = [
issue for issue in normalized if f"test_{issue['code']}_" in issue["define"]
]
normalized = sorted(
normalized,
key=lambda issue: (
issue["code"],
issue["path"],
issue["line"],
issue["column"],
),
)
if salient_keys_only:
salient_keys = {"code", "define", "description", "path"}
stripped_issues = []
for issue in normalized:
stripped_issue = {
key: value for key, value in issue.items() if key in salient_keys
}
if set(stripped_issue.keys()) != salient_keys:
raise KeyError(
f"Expected issue to contain {salient_keys} keys, "
+ f"but instead found: {issue}"
)
stripped_issues.append(stripped_issue)
normalized = stripped_issues
return json.dumps(normalized, sort_keys=True, indent=2) + "\n"
def compare_results(
actual_results: str,
expected_results: str,
current_directory: Path,
filter_issues: bool,
) -> None:
normalized_pysa_results = normalized_json_dump(
actual_results, salient_keys_only=True, filter_issues=filter_issues
)
normalized_expected_results = normalized_json_dump(
expected_results, salient_keys_only=True, filter_issues=filter_issues
)
if normalized_pysa_results != normalized_expected_results:
actual_full_results_path = current_directory / "full_result.actual"
actual_full_results_path.write_text(
normalized_json_dump(
actual_results, salient_keys_only=False, filter_issues=filter_issues
)
)
actual_invariant_results_path = (
current_directory / "position_invariant_result.actual"
)
actual_invariant_results_path.write_text(normalized_pysa_results)
expected_invariant_results_path = (
current_directory / "position_invariant_result.json"
)
expected_invariant_results_path.write_text(normalized_expected_results)
result = subprocess.run(
[
"diff",
"-u",
expected_invariant_results_path,
actual_invariant_results_path,
],
text=True,
stdout=subprocess.PIPE,
)
friendly_exit(
"Output differs from expected:",
result.stdout,
"output-differs-from-expected",
)
else:
LOG.info("Run produced expected results")
def friendly_exit(error_message: str, logs: str, suggested_hash: str) -> None:
"""
Error function to print error message using LOG and exit
"""
LOG.error("----BEGIN PYSA INTEGRATION TEST ERROR----")
LOG.error(error_message)
LOG.error(logs)
LOG.error("----END PYSA INTEGRATION TEST ERROR----")
sys.exit(1)
def run_pysa_integration_test(
current_directory: Path,
passthrough_args: Sequence[str],
skip_model_verification: bool,
filter_issues: bool,
save_results_to: Optional[Path],
run_from_source: bool = False,
) -> None:
"""
Runs pysa and compares the output to that in full_results.json. Creates
raw_results.json file that contains the output. Creates
position_invariant_result.json that contains position information to
compare using diff with position_invariant_result.actual before exiting if
there is a mismatch between the specified and detected issues.
"""
LOG.info("Running `pyre analyze`")
if run_from_source:
command = [
"python",
"-m" "pyre-check.client.pyre",
]
else:
command = ["pyre"]
command.extend(["--noninteractive", "analyze"])
if save_results_to is not None:
command.extend(["--save-results-to", str(save_results_to)])
if skip_model_verification:
command.append("--no-verify")
command.extend(passthrough_args)
LOG.debug(f"Using command: {command}")
pysa_results: str
try:
pysa_results = subprocess.check_output(
command, text=True, cwd=current_directory
)
if save_results_to is not None:
pysa_results = (save_results_to / "errors.json").read_text()
except subprocess.CalledProcessError as exception:
friendly_exit(
"Command failed with output:",
exception.stdout,
"found-x-model-verification-error",
)
(current_directory / "raw_results.json").write_text(pysa_results)
expected_results = (current_directory / "full_result.json").read_text()
compare_results(pysa_results, expected_results, current_directory, filter_issues)
| 32.197917 | 88 | 0.651084 |
from __future__ import annotations
import json
import logging
import subprocess
import sys
from pathlib import Path
from typing import final, Sequence, Optional
LOG: logging.Logger = logging.getLogger(__name__)
@final
class PyreErrorException(Exception):
pass
def normalized_json_dump(
results: str, salient_keys_only: bool, filter_issues: bool
) -> str:
normalized = json.loads(results)
if "errors" in normalized:
pretty_error = json.dumps(normalized, sort_keys=True, indent=2)
raise PyreErrorException(
f"Errors were found when processing results:\n{pretty_error}"
)
if filter_issues:
normalized = [
issue for issue in normalized if f"test_{issue['code']}_" in issue["define"]
]
normalized = sorted(
normalized,
key=lambda issue: (
issue["code"],
issue["path"],
issue["line"],
issue["column"],
),
)
if salient_keys_only:
salient_keys = {"code", "define", "description", "path"}
stripped_issues = []
for issue in normalized:
stripped_issue = {
key: value for key, value in issue.items() if key in salient_keys
}
if set(stripped_issue.keys()) != salient_keys:
raise KeyError(
f"Expected issue to contain {salient_keys} keys, "
+ f"but instead found: {issue}"
)
stripped_issues.append(stripped_issue)
normalized = stripped_issues
return json.dumps(normalized, sort_keys=True, indent=2) + "\n"
def compare_results(
actual_results: str,
expected_results: str,
current_directory: Path,
filter_issues: bool,
) -> None:
normalized_pysa_results = normalized_json_dump(
actual_results, salient_keys_only=True, filter_issues=filter_issues
)
normalized_expected_results = normalized_json_dump(
expected_results, salient_keys_only=True, filter_issues=filter_issues
)
if normalized_pysa_results != normalized_expected_results:
actual_full_results_path = current_directory / "full_result.actual"
actual_full_results_path.write_text(
normalized_json_dump(
actual_results, salient_keys_only=False, filter_issues=filter_issues
)
)
actual_invariant_results_path = (
current_directory / "position_invariant_result.actual"
)
actual_invariant_results_path.write_text(normalized_pysa_results)
expected_invariant_results_path = (
current_directory / "position_invariant_result.json"
)
expected_invariant_results_path.write_text(normalized_expected_results)
result = subprocess.run(
[
"diff",
"-u",
expected_invariant_results_path,
actual_invariant_results_path,
],
text=True,
stdout=subprocess.PIPE,
)
friendly_exit(
"Output differs from expected:",
result.stdout,
"output-differs-from-expected",
)
else:
LOG.info("Run produced expected results")
def friendly_exit(error_message: str, logs: str, suggested_hash: str) -> None:
LOG.error("----BEGIN PYSA INTEGRATION TEST ERROR----")
LOG.error(error_message)
LOG.error(logs)
LOG.error("----END PYSA INTEGRATION TEST ERROR----")
sys.exit(1)
def run_pysa_integration_test(
current_directory: Path,
passthrough_args: Sequence[str],
skip_model_verification: bool,
filter_issues: bool,
save_results_to: Optional[Path],
run_from_source: bool = False,
) -> None:
LOG.info("Running `pyre analyze`")
if run_from_source:
command = [
"python",
"-m" "pyre-check.client.pyre",
]
else:
command = ["pyre"]
command.extend(["--noninteractive", "analyze"])
if save_results_to is not None:
command.extend(["--save-results-to", str(save_results_to)])
if skip_model_verification:
command.append("--no-verify")
command.extend(passthrough_args)
LOG.debug(f"Using command: {command}")
pysa_results: str
try:
pysa_results = subprocess.check_output(
command, text=True, cwd=current_directory
)
if save_results_to is not None:
pysa_results = (save_results_to / "errors.json").read_text()
except subprocess.CalledProcessError as exception:
friendly_exit(
"Command failed with output:",
exception.stdout,
"found-x-model-verification-error",
)
(current_directory / "raw_results.json").write_text(pysa_results)
expected_results = (current_directory / "full_result.json").read_text()
compare_results(pysa_results, expected_results, current_directory, filter_issues)
| true | true |
f721a628ef8e42f4b26f07888d6e70148b933809 | 4,668 | py | Python | homeassistant/components/velux/cover.py | orcema/core | ce144bf63145813c76fbbe4f9423341764695057 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/velux/cover.py | orcema/core | ce144bf63145813c76fbbe4f9423341764695057 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/velux/cover.py | orcema/core | ce144bf63145813c76fbbe4f9423341764695057 | [
"Apache-2.0"
] | null | null | null | """Support for Velux covers."""
from __future__ import annotations
from typing import Any
from pyvlx import OpeningDevice, Position
from pyvlx.opening_device import Awning, Blind, GarageDoor, Gate, RollerShutter, Window
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
CoverDeviceClass,
CoverEntity,
CoverEntityFeature,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import DATA_VELUX, VeluxEntity
PARALLEL_UPDATES = 1
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up cover(s) for Velux platform."""
entities = []
for node in hass.data[DATA_VELUX].pyvlx.nodes:
if isinstance(node, OpeningDevice):
entities.append(VeluxCover(node))
async_add_entities(entities)
class VeluxCover(VeluxEntity, CoverEntity):
"""Representation of a Velux cover."""
@property
def supported_features(self) -> int:
"""Flag supported features."""
supported_features = (
CoverEntityFeature.OPEN
| CoverEntityFeature.CLOSE
| CoverEntityFeature.SET_POSITION
| CoverEntityFeature.STOP
)
if self.current_cover_tilt_position is not None:
supported_features |= (
CoverEntityFeature.OPEN_TILT
| CoverEntityFeature.CLOSE_TILT
| CoverEntityFeature.SET_TILT_POSITION
| CoverEntityFeature.STOP_TILT
)
return supported_features
@property
def current_cover_position(self) -> int:
"""Return the current position of the cover."""
return 100 - self.node.position.position_percent
@property
def current_cover_tilt_position(self) -> int | None:
"""Return the current position of the cover."""
if isinstance(self.node, Blind):
return 100 - self.node.orientation.position_percent
return None
@property
def device_class(self) -> CoverDeviceClass:
"""Define this cover as either awning, blind, garage, gate, shutter or window."""
if isinstance(self.node, Awning):
return CoverDeviceClass.AWNING
if isinstance(self.node, Blind):
return CoverDeviceClass.BLIND
if isinstance(self.node, GarageDoor):
return CoverDeviceClass.GARAGE
if isinstance(self.node, Gate):
return CoverDeviceClass.GATE
if isinstance(self.node, RollerShutter):
return CoverDeviceClass.SHUTTER
if isinstance(self.node, Window):
return CoverDeviceClass.WINDOW
return CoverDeviceClass.WINDOW
@property
def is_closed(self) -> bool:
"""Return if the cover is closed."""
return self.node.position.closed
async def async_close_cover(self, **kwargs: Any) -> None:
"""Close the cover."""
await self.node.close(wait_for_completion=False)
async def async_open_cover(self, **kwargs: Any) -> None:
"""Open the cover."""
await self.node.open(wait_for_completion=False)
async def async_set_cover_position(self, **kwargs: Any) -> None:
"""Move the cover to a specific position."""
position_percent = 100 - kwargs[ATTR_POSITION]
await self.node.set_position(
Position(position_percent=position_percent), wait_for_completion=False
)
async def async_stop_cover(self, **kwargs: Any) -> None:
"""Stop the cover."""
await self.node.stop(wait_for_completion=False)
async def async_close_cover_tilt(self, **kwargs: Any) -> None:
"""Close cover tilt."""
await self.node.close_orientation(wait_for_completion=False)
async def async_open_cover_tilt(self, **kwargs: Any) -> None:
"""Open cover tilt."""
await self.node.open_orientation(wait_for_completion=False)
async def async_stop_cover_tilt(self, **kwargs: Any) -> None:
"""Stop cover tilt."""
await self.node.stop_orientation(wait_for_completion=False)
async def async_set_cover_tilt_position(self, **kwargs: Any) -> None:
"""Move cover tilt to a specific position."""
position_percent = 100 - kwargs[ATTR_TILT_POSITION]
orientation = Position(position_percent=position_percent)
await self.node.set_orientation(
orientation=orientation, wait_for_completion=False
)
| 35.097744 | 89 | 0.673522 | from __future__ import annotations
from typing import Any
from pyvlx import OpeningDevice, Position
from pyvlx.opening_device import Awning, Blind, GarageDoor, Gate, RollerShutter, Window
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
CoverDeviceClass,
CoverEntity,
CoverEntityFeature,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import DATA_VELUX, VeluxEntity
PARALLEL_UPDATES = 1
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
entities = []
for node in hass.data[DATA_VELUX].pyvlx.nodes:
if isinstance(node, OpeningDevice):
entities.append(VeluxCover(node))
async_add_entities(entities)
class VeluxCover(VeluxEntity, CoverEntity):
@property
def supported_features(self) -> int:
supported_features = (
CoverEntityFeature.OPEN
| CoverEntityFeature.CLOSE
| CoverEntityFeature.SET_POSITION
| CoverEntityFeature.STOP
)
if self.current_cover_tilt_position is not None:
supported_features |= (
CoverEntityFeature.OPEN_TILT
| CoverEntityFeature.CLOSE_TILT
| CoverEntityFeature.SET_TILT_POSITION
| CoverEntityFeature.STOP_TILT
)
return supported_features
@property
def current_cover_position(self) -> int:
return 100 - self.node.position.position_percent
@property
def current_cover_tilt_position(self) -> int | None:
if isinstance(self.node, Blind):
return 100 - self.node.orientation.position_percent
return None
@property
def device_class(self) -> CoverDeviceClass:
if isinstance(self.node, Awning):
return CoverDeviceClass.AWNING
if isinstance(self.node, Blind):
return CoverDeviceClass.BLIND
if isinstance(self.node, GarageDoor):
return CoverDeviceClass.GARAGE
if isinstance(self.node, Gate):
return CoverDeviceClass.GATE
if isinstance(self.node, RollerShutter):
return CoverDeviceClass.SHUTTER
if isinstance(self.node, Window):
return CoverDeviceClass.WINDOW
return CoverDeviceClass.WINDOW
@property
def is_closed(self) -> bool:
return self.node.position.closed
async def async_close_cover(self, **kwargs: Any) -> None:
await self.node.close(wait_for_completion=False)
async def async_open_cover(self, **kwargs: Any) -> None:
await self.node.open(wait_for_completion=False)
async def async_set_cover_position(self, **kwargs: Any) -> None:
position_percent = 100 - kwargs[ATTR_POSITION]
await self.node.set_position(
Position(position_percent=position_percent), wait_for_completion=False
)
async def async_stop_cover(self, **kwargs: Any) -> None:
await self.node.stop(wait_for_completion=False)
async def async_close_cover_tilt(self, **kwargs: Any) -> None:
await self.node.close_orientation(wait_for_completion=False)
async def async_open_cover_tilt(self, **kwargs: Any) -> None:
await self.node.open_orientation(wait_for_completion=False)
async def async_stop_cover_tilt(self, **kwargs: Any) -> None:
await self.node.stop_orientation(wait_for_completion=False)
async def async_set_cover_tilt_position(self, **kwargs: Any) -> None:
position_percent = 100 - kwargs[ATTR_TILT_POSITION]
orientation = Position(position_percent=position_percent)
await self.node.set_orientation(
orientation=orientation, wait_for_completion=False
)
| true | true |
f721a6360f0511e109d096adce51015e18e66e23 | 5,135 | py | Python | Scripts/Slicer.py | rhong3/GBM | 088b1e99f4fe02395b62d324ec4f9e8402417651 | [
"MIT"
] | null | null | null | Scripts/Slicer.py | rhong3/GBM | 088b1e99f4fe02395b62d324ec4f9e8402417651 | [
"MIT"
] | null | null | null | Scripts/Slicer.py | rhong3/GBM | 088b1e99f4fe02395b62d324ec4f9e8402417651 | [
"MIT"
] | null | null | null | """
Tile real scn/svs files; used by Cutter.py
Created on 11/19/2018
*** Removed imlist storage to minimize memory usage 01/24/2019 ***
@author: RH
"""
from openslide import OpenSlide
import numpy as np
import pandas as pd
import multiprocessing as mp
import staintools
from PIL import Image
# check if a tile is background or not; return a blank pixel percentage score
def bgcheck(img, ts):
the_imagea = np.array(img)[:, :, :3]
the_imagea = np.nan_to_num(the_imagea)
mask = (the_imagea[:, :, :3] > 200).astype(np.uint8)
maskb = (the_imagea[:, :, :3] < 50).astype(np.uint8)
greya = ((np.ptp(the_imagea[0])) < 100).astype(np.uint8)
greyb = ((np.ptp(the_imagea[1])) < 100).astype(np.uint8)
greyc = ((np.ptp(the_imagea[2])) < 100).astype(np.uint8)
grey = greya * greyb * greyc
mask = mask[:, :, 0] * mask[:, :, 1] * mask[:, :, 2]
maskb = maskb[:, :, 0] * maskb[:, :, 1] * maskb[:, :, 2]
white = (np.sum(mask) + np.sum(maskb)) / (ts * ts) + grey
return white
# Tile color normalization
def normalization(img, sttd):
img = np.array(img)[:, :, :3]
img = staintools.LuminosityStandardizer.standardize(img)
normalizer = staintools.StainNormalizer(method='vahadane')
normalizer.fit(sttd)
img = normalizer.transform(img)
img = Image.fromarray(img.astype('uint8'), 'RGB')
return img
# tile method; slp is the scn/svs image; n_y is the number of tiles can be cut on y column to be cut;
# x and y are the upper left position of each tile; tile_size is tile size; stepsize of each step; x0 is the row to cut.
# outdir is the output directory for images;
# imloc record each tile's relative and absolute coordinates; imlist is a list of cut tiles (Removed 01/24/2019).
def v_slide(slp, n_y, x, y, tile_size, stepsize, x0, outdir, level, dp, std):
# pid = os.getpid()
# print('{}: start working'.format(pid))
slide = OpenSlide(slp)
imloc = []
y0 = 0
target_x = x0 * stepsize
image_x = (target_x + x)*(4**level)
while y0 < n_y:
target_y = y0 * stepsize
image_y = (target_y + y)*(4**level)
img = slide.read_region((image_x, image_y), level, (tile_size, tile_size))
wscore = bgcheck(img, tile_size)
if 0.01 < wscore < 0.4:
img = img.resize((299, 299))
img = normalization(img, std)
if dp:
img.save(outdir + "/region_x-{}-y-{}_{}.png".format(image_x, image_y, str(dp)))
strr = outdir + "/region_x-{}-y-{}_{}.png".format(image_x, image_y, str(dp))
else:
img.save(outdir + "/region_x-{}-y-{}.png".format(image_x, image_y))
strr = outdir + "/region_x-{}-y-{}.png".format(image_x, image_y)
imloc.append([x0, y0, image_x, image_y, strr])
y0 += 1
slide.close()
return imloc
# image_file is the scn/svs name; outdir is the output directory; path_to_slide is where the scn/svs stored.
# First open the slide, determine how many tiles can be cut, record the residue edges width,
# and calculate the final output prediction heat map size should be. Then, using multithread to cut tiles, and stack up
# tiles and their position dictionaries.
def tile(image_file, outdir, level, std_img, path_to_slide="../images/", dp=None, ft=1):
slide = OpenSlide(path_to_slide+image_file)
slp = str(path_to_slide+image_file)
print(slp)
print(slide.level_dimensions)
bounds_width = slide.level_dimensions[level][0]
bounds_height = slide.level_dimensions[level][1]
x = 0
y = 0
half_width_region = 49*ft
full_width_region = 299*ft
stepsize = (full_width_region - half_width_region)
n_x = int((bounds_width - 1) / stepsize)
n_y = int((bounds_height - 1) / stepsize)
residue_x = int((bounds_width - n_x * stepsize)/50)
residue_y = int((bounds_height - n_y * stepsize)/50)
lowres = slide.read_region((x, y), 2, (int(n_x*stepsize/16), int(n_y*stepsize/16)))
lowres = np.array(lowres)[:,:,:3]
x0 = 0
# create multiporcessing pool
print(mp.cpu_count())
pool = mp.Pool(processes=mp.cpu_count())
tasks = []
while x0 < n_x:
task = tuple((slp, n_y, x, y, full_width_region, stepsize, x0, outdir, level, dp, std_img))
tasks.append(task)
x0 += 1
# slice images with multiprocessing
temp = pool.starmap(v_slide, tasks)
tempdict = list(temp)
temp = None
pool.close()
pool.join()
tempdict = list(filter(None, tempdict))
imloc = []
list(map(imloc.extend, tempdict))
imlocpd = pd.DataFrame(imloc, columns = ["X_pos", "Y_pos", "X", "Y", "Loc"])
imlocpd = imlocpd.sort_values(["X_pos", "Y_pos"], ascending=[True, True])
imlocpd = imlocpd.reset_index(drop=True)
imlocpd = imlocpd.reset_index(drop=False)
imlocpd.columns = ["Num", "X_pos", "Y_pos", "X", "Y", "Loc"]
if dp:
imlocpd.to_csv(outdir + "/{}_dict.csv".format(dp), index=False)
else:
imlocpd.to_csv(outdir + "/dict.csv", index=False)
tempdict = None
ct = len(imloc)
print(ct)
return n_x, n_y, lowres, residue_x, residue_y, ct
| 37.210145 | 120 | 0.63408 | from openslide import OpenSlide
import numpy as np
import pandas as pd
import multiprocessing as mp
import staintools
from PIL import Image
def bgcheck(img, ts):
the_imagea = np.array(img)[:, :, :3]
the_imagea = np.nan_to_num(the_imagea)
mask = (the_imagea[:, :, :3] > 200).astype(np.uint8)
maskb = (the_imagea[:, :, :3] < 50).astype(np.uint8)
greya = ((np.ptp(the_imagea[0])) < 100).astype(np.uint8)
greyb = ((np.ptp(the_imagea[1])) < 100).astype(np.uint8)
greyc = ((np.ptp(the_imagea[2])) < 100).astype(np.uint8)
grey = greya * greyb * greyc
mask = mask[:, :, 0] * mask[:, :, 1] * mask[:, :, 2]
maskb = maskb[:, :, 0] * maskb[:, :, 1] * maskb[:, :, 2]
white = (np.sum(mask) + np.sum(maskb)) / (ts * ts) + grey
return white
def normalization(img, sttd):
img = np.array(img)[:, :, :3]
img = staintools.LuminosityStandardizer.standardize(img)
normalizer = staintools.StainNormalizer(method='vahadane')
normalizer.fit(sttd)
img = normalizer.transform(img)
img = Image.fromarray(img.astype('uint8'), 'RGB')
return img
def v_slide(slp, n_y, x, y, tile_size, stepsize, x0, outdir, level, dp, std):
# pid = os.getpid()
# print('{}: start working'.format(pid))
slide = OpenSlide(slp)
imloc = []
y0 = 0
target_x = x0 * stepsize
image_x = (target_x + x)*(4**level)
while y0 < n_y:
target_y = y0 * stepsize
image_y = (target_y + y)*(4**level)
img = slide.read_region((image_x, image_y), level, (tile_size, tile_size))
wscore = bgcheck(img, tile_size)
if 0.01 < wscore < 0.4:
img = img.resize((299, 299))
img = normalization(img, std)
if dp:
img.save(outdir + "/region_x-{}-y-{}_{}.png".format(image_x, image_y, str(dp)))
strr = outdir + "/region_x-{}-y-{}_{}.png".format(image_x, image_y, str(dp))
else:
img.save(outdir + "/region_x-{}-y-{}.png".format(image_x, image_y))
strr = outdir + "/region_x-{}-y-{}.png".format(image_x, image_y)
imloc.append([x0, y0, image_x, image_y, strr])
y0 += 1
slide.close()
return imloc
# image_file is the scn/svs name; outdir is the output directory; path_to_slide is where the scn/svs stored.
# First open the slide, determine how many tiles can be cut, record the residue edges width,
# and calculate the final output prediction heat map size should be. Then, using multithread to cut tiles, and stack up
# tiles and their position dictionaries.
def tile(image_file, outdir, level, std_img, path_to_slide="../images/", dp=None, ft=1):
slide = OpenSlide(path_to_slide+image_file)
slp = str(path_to_slide+image_file)
print(slp)
print(slide.level_dimensions)
bounds_width = slide.level_dimensions[level][0]
bounds_height = slide.level_dimensions[level][1]
x = 0
y = 0
half_width_region = 49*ft
full_width_region = 299*ft
stepsize = (full_width_region - half_width_region)
n_x = int((bounds_width - 1) / stepsize)
n_y = int((bounds_height - 1) / stepsize)
residue_x = int((bounds_width - n_x * stepsize)/50)
residue_y = int((bounds_height - n_y * stepsize)/50)
lowres = slide.read_region((x, y), 2, (int(n_x*stepsize/16), int(n_y*stepsize/16)))
lowres = np.array(lowres)[:,:,:3]
x0 = 0
# create multiporcessing pool
print(mp.cpu_count())
pool = mp.Pool(processes=mp.cpu_count())
tasks = []
while x0 < n_x:
task = tuple((slp, n_y, x, y, full_width_region, stepsize, x0, outdir, level, dp, std_img))
tasks.append(task)
x0 += 1
# slice images with multiprocessing
temp = pool.starmap(v_slide, tasks)
tempdict = list(temp)
temp = None
pool.close()
pool.join()
tempdict = list(filter(None, tempdict))
imloc = []
list(map(imloc.extend, tempdict))
imlocpd = pd.DataFrame(imloc, columns = ["X_pos", "Y_pos", "X", "Y", "Loc"])
imlocpd = imlocpd.sort_values(["X_pos", "Y_pos"], ascending=[True, True])
imlocpd = imlocpd.reset_index(drop=True)
imlocpd = imlocpd.reset_index(drop=False)
imlocpd.columns = ["Num", "X_pos", "Y_pos", "X", "Y", "Loc"]
if dp:
imlocpd.to_csv(outdir + "/{}_dict.csv".format(dp), index=False)
else:
imlocpd.to_csv(outdir + "/dict.csv", index=False)
tempdict = None
ct = len(imloc)
print(ct)
return n_x, n_y, lowres, residue_x, residue_y, ct
| true | true |
f721a64b1ed80dcb38fc20d3f17da57445b5b1a0 | 9,626 | py | Python | python/ht/ui/menus/parmmenu.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
] | 136 | 2015-01-03T04:03:23.000Z | 2022-02-07T11:08:57.000Z | python/ht/ui/menus/parmmenu.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
] | 11 | 2017-02-09T20:05:04.000Z | 2021-01-24T22:25:59.000Z | python/ht/ui/menus/parmmenu.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
] | 26 | 2015-08-18T12:11:02.000Z | 2020-12-19T01:53:31.000Z | """This module contains functions supporting custom PARMmenu.xml entries."""
# =============================================================================
# IMPORTS
# =============================================================================
# Standard Library
from typing import Dict, List
# Houdini
import hou
# =============================================================================
# NON-PUBLIC FUNCTIONS
# =============================================================================
def _valid_to_convert_to_absolute_reference(parm: hou.Parm) -> bool:
"""Check if a parameter is valid to convert to an absolute reference.
A parameter is valid if it is a node reference string parameter with a raw
value appears to be a relative path and points to a valid node.
:param parm: There parameter to check.
:return: Whether or not the parm can be converted.
"""
parm_template = parm.parmTemplate()
# Check if the parameter is a string parameter.
if isinstance(parm_template, hou.StringParmTemplate):
# Check if the string parameter is a node reference.
if parm_template.stringType() == hou.stringParmType.NodeReference:
# Need to test values to decide whether to show up or not.
path = parm.eval()
# Ignore empty strings.
if not path:
return False
# Ignore paths which already seem to be absolute.
if not path.startswith(".."):
return False
# Can't convert parameters with keyframes/expressions.
if parm.keyframes():
return False
# If the path is the same as the raw path then we can say that we
# can show the menu item. If the path is not the same as the
# unexpanded we won't say yes because it would be some sort of an
# expression which we don't want to mess with.
if path == parm.unexpandedString():
if parm.evalAsNode() is not None:
return True
return False
def _valid_to_convert_to_relative_reference(parm: hou.Parm) -> bool:
"""Check if a parameter is valid to convert to a relative reference.
A parameter is valid if it is a node reference string parameter with a raw
value appears to be an absolute path and points to a valid node.
:param parm: There parameter to check.
:return: Whether or not the parm can be converted.
"""
parm_template = parm.parmTemplate()
# Check if the parameter is a string parameter.
if isinstance(parm_template, hou.StringParmTemplate):
# Check if the string parameter is a node reference.
if parm_template.stringType() == hou.stringParmType.NodeReference:
# Need to test values to decide whether to show up or not.
path = parm.eval()
# Ignore empty strings.
if not path:
return False
# Ignore paths which already seem to be relative.
if not path.startswith("/"):
return False
# Can't convert parameters with keyframes/expressions.
if parm.keyframes():
return False
# If the path is the same as the raw path then we can say that we
# can show the menu item. If the path is not the same as the
# unexpanded we won't say yes because it would be some sort of an
# expression which we don't want to mess with.
if path == parm.unexpandedString():
if parm.evalAsNode() is not None:
return True
return False
# =============================================================================
# FUNCTIONS
# =============================================================================
def convert_absolute_to_relative_path_context(scriptargs: dict) -> bool:
"""Context script for converting any absolute node paths to relative paths.
The menu entry will be shown if there are node reference string parameters
whose values are absolute paths.
:param scriptargs: kwargs dict from PARMmenu entry.
:return: Whether or not to show the menu entry.
"""
parms = scriptargs["parms"]
return any([_valid_to_convert_to_relative_reference(parm) for parm in parms])
def convert_absolute_to_relative_path(scriptargs: dict):
"""Convert any absolute node paths to relative paths.
:param scriptargs: kwargs dict from PARMmenu entry.
:return:
"""
parms = scriptargs["parms"]
for parm in parms:
if _valid_to_convert_to_relative_reference(parm):
target_node = parm.evalAsNode()
parm.set(parm.node().relativePathTo(target_node))
def convert_relative_to_absolute_path_context(scriptargs: dict) -> bool:
"""Context script for converting any relative node paths to absolute paths.
The menu entry will be shown if there are node reference string parameters
whose values are relative paths.
:param scriptargs: kwargs dict from PARMmenu entry.
:return: Whether or not to show the menu entry.
"""
parms = scriptargs["parms"]
return any([_valid_to_convert_to_absolute_reference(parm) for parm in parms])
def convert_relative_to_absolute_path(scriptargs: dict):
"""Convert any absolute node paths to absolute paths.
:param scriptargs: kwargs dict from PARMmenu entry.
:return:
"""
parms = scriptargs["parms"]
for parm in parms:
if _valid_to_convert_to_absolute_reference(parm):
target_node = parm.evalAsNode()
parm.set(target_node.path())
def promote_parameter_to_node(scriptargs: dict): # pylint: disable=too-many-locals
"""Promote a parameter to a target node.
:param scriptargs: kwargs dict from PARMmenu entry.
:return:
"""
# Get the parms to act on.
parms = scriptargs["parms"]
# The start node for the node chooser prompt
start_node = None
parm_tuple: hou.ParmTuple = None
parm_tuple_map: Dict[hou.ParmTuple, List[hou.Parm]] = {}
parm_tuple_nodes = []
# Process all the selected parms, partitioning by parm tuple.
for parm in parms:
parm_tuple = parm.tuple()
# Get or create a list of parms for this tuple.
parms_for_tuple = parm_tuple_map.setdefault(parm_tuple, [])
parms_for_tuple.append(parm)
node = parm_tuple.node()
parm_tuple_nodes.append(node)
# Update the start node to be the parent of this tuple's node.
start_node = node.parent()
# The number of parms in the tuple.
num_components = len(parm_tuple)
# Determine how many components of the tuple we will set.
num_components_to_set = max([len(value) for value in list(parm_tuple_map.values())])
# Prompt for a target node. Start at the parent (the most logical choice?)
result = hou.ui.selectNode(initial_node=start_node)
# Try to find ths selected node.
target_node = hou.node(result)
if target_node is not None:
# Can't promote to a selected node.
if target_node in parm_tuple_nodes:
raise hou.OperationFailed("Cannot promote to a source node.")
# Should the target parm will be set to the source value?
set_value = True
# The target node already has a parm tuple with the desired name so we
# should prompt to use it.
if target_node.parmTuple(parm_tuple.name()) is not None:
choice = hou.ui.displayMessage(
"Parameter already exists on {}. Link to existing parameter?".format(
target_node.path()
),
buttons=(
"Yes and keep current value",
"Yes and update value",
"Cancel",
),
severity=hou.severityType.ImportantMessage,
)
# Use parm but keep value, so don't set.
if choice == 0:
set_value = False
# Use parm and update value.
elif choice == 1:
set_value = True
# Bail out since we're cancelling.
else:
return
# No existing parameter so we'll have to create one.
else:
# Get the target node's parm interface.
target_ptg = target_node.parmTemplateGroup()
# The parameter definition for the parm we are trying to link.
parm_template = parm_tuple.parmTemplate()
# If we are trying to link a single parm inside a tuple then modify
# the parm definition to represent that single parm.
if num_components_to_set != num_components:
parm_template.setNumComponents(1)
# Since we're just setting a single component the parms should all
# have the same name so just grab the first.
parm_template.setName(parms[0].name())
# Add the parameter definition to the parm list.
target_ptg.addParmTemplate(parm_template)
# Update the interface with the new definition.
target_node.setParmTemplateGroup(target_ptg)
# Process each parm to set.
for parm in parms:
# Get the target parm.
target_parm = target_node.parm(parm.name())
# Set the target parm to the current value if required.
if set_value:
target_parm.set(parm.eval())
# Create the channel reference.
parm.set(target_parm)
| 34.134752 | 88 | 0.601911 |
from typing import Dict, List
import hou
def _valid_to_convert_to_absolute_reference(parm: hou.Parm) -> bool:
parm_template = parm.parmTemplate()
if isinstance(parm_template, hou.StringParmTemplate):
if parm_template.stringType() == hou.stringParmType.NodeReference:
path = parm.eval()
if not path:
return False
if not path.startswith(".."):
return False
if parm.keyframes():
return False
# If the path is the same as the raw path then we can say that we
# can show the menu item. If the path is not the same as the
# unexpanded we won't say yes because it would be some sort of an
if path == parm.unexpandedString():
if parm.evalAsNode() is not None:
return True
return False
def _valid_to_convert_to_relative_reference(parm: hou.Parm) -> bool:
parm_template = parm.parmTemplate()
# Check if the parameter is a string parameter.
if isinstance(parm_template, hou.StringParmTemplate):
# Check if the string parameter is a node reference.
if parm_template.stringType() == hou.stringParmType.NodeReference:
# Need to test values to decide whether to show up or not.
path = parm.eval()
# Ignore empty strings.
if not path:
return False
# Ignore paths which already seem to be relative.
if not path.startswith("/"):
return False
# Can't convert parameters with keyframes/expressions.
if parm.keyframes():
return False
# expression which we don't want to mess with.
if path == parm.unexpandedString():
if parm.evalAsNode() is not None:
return True
return False
def convert_absolute_to_relative_path_context(scriptargs: dict) -> bool:
parms = scriptargs["parms"]
return any([_valid_to_convert_to_relative_reference(parm) for parm in parms])
def convert_absolute_to_relative_path(scriptargs: dict):
parms = scriptargs["parms"]
for parm in parms:
if _valid_to_convert_to_relative_reference(parm):
target_node = parm.evalAsNode()
parm.set(parm.node().relativePathTo(target_node))
def convert_relative_to_absolute_path_context(scriptargs: dict) -> bool:
parms = scriptargs["parms"]
return any([_valid_to_convert_to_absolute_reference(parm) for parm in parms])
def convert_relative_to_absolute_path(scriptargs: dict):
parms = scriptargs["parms"]
for parm in parms:
if _valid_to_convert_to_absolute_reference(parm):
target_node = parm.evalAsNode()
parm.set(target_node.path())
def promote_parameter_to_node(scriptargs: dict):
parms = scriptargs["parms"]
start_node = None
parm_tuple: hou.ParmTuple = None
parm_tuple_map: Dict[hou.ParmTuple, List[hou.Parm]] = {}
parm_tuple_nodes = []
for parm in parms:
parm_tuple = parm.tuple()
parms_for_tuple = parm_tuple_map.setdefault(parm_tuple, [])
parms_for_tuple.append(parm)
node = parm_tuple.node()
parm_tuple_nodes.append(node)
start_node = node.parent()
# The number of parms in the tuple.
num_components = len(parm_tuple)
# Determine how many components of the tuple we will set.
num_components_to_set = max([len(value) for value in list(parm_tuple_map.values())])
# Prompt for a target node. Start at the parent (the most logical choice?)
result = hou.ui.selectNode(initial_node=start_node)
# Try to find ths selected node.
target_node = hou.node(result)
if target_node is not None:
# Can't promote to a selected node.
if target_node in parm_tuple_nodes:
raise hou.OperationFailed("Cannot promote to a source node.")
set_value = True
if target_node.parmTuple(parm_tuple.name()) is not None:
choice = hou.ui.displayMessage(
"Parameter already exists on {}. Link to existing parameter?".format(
target_node.path()
),
buttons=(
"Yes and keep current value",
"Yes and update value",
"Cancel",
),
severity=hou.severityType.ImportantMessage,
)
if choice == 0:
set_value = False
# Use parm and update value.
elif choice == 1:
set_value = True
# Bail out since we're cancelling.
else:
return
else:
# Get the target node's parm interface.
target_ptg = target_node.parmTemplateGroup()
parm_template = parm_tuple.parmTemplate()
if num_components_to_set != num_components:
parm_template.setNumComponents(1)
# have the same name so just grab the first.
parm_template.setName(parms[0].name())
# Add the parameter definition to the parm list.
target_ptg.addParmTemplate(parm_template)
# Update the interface with the new definition.
target_node.setParmTemplateGroup(target_ptg)
# Process each parm to set.
for parm in parms:
# Get the target parm.
target_parm = target_node.parm(parm.name())
# Set the target parm to the current value if required.
if set_value:
target_parm.set(parm.eval())
# Create the channel reference.
parm.set(target_parm)
| true | true |
f721a752d81135177ab54ecb6768ca98ba8ac9c6 | 6,793 | py | Python | controller/modules/Logger.py | avinashnatesan/Controllers | 85a005a87e61d50a3ada660e8d90739745e211af | [
"MIT"
] | null | null | null | controller/modules/Logger.py | avinashnatesan/Controllers | 85a005a87e61d50a3ada660e8d90739745e211af | [
"MIT"
] | null | null | null | controller/modules/Logger.py | avinashnatesan/Controllers | 85a005a87e61d50a3ada660e8d90739745e211af | [
"MIT"
] | null | null | null | # ipop-project
# Copyright 2016, University of Florida
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import logging.handlers as lh
import os
from controller.framework.ControllerModule import ControllerModule
class Logger(ControllerModule):
def __init__(self, cfx_handle, module_config, module_name):
super(Logger, self).__init__(cfx_handle, module_config, module_name)
def initialize(self):
# Extracts the controller Log Level from the ipop-config file,
# If nothing is provided the default is INFO
if "LogLevel" in self._cm_config:
level = getattr(logging, self._cm_config["LogLevel"])
else:
level = getattr(logging, "info")
# If the Logging is set to Console by the User
if self._cm_config["Device"] == "Console":
# Console logging
logging.basicConfig(format="[%(asctime)s.%(msecs)03d] %(levelname)s: %(message)s",
datefmt="%H:%M:%S",
level=level)
self.logger = logging.getLogger("IPOP console logger")
# If the Logging is set to File by the User
elif self._cm_config["Device"] == "File":
# Extracts the filepath else sets logs to current working directory
filepath = self._cm_config.get("Directory", "./")
fqname = filepath + \
self._cm_config.get("CtrlLogFileName", "ctrl.log")
if not os.path.isdir(filepath):
os.mkdir(filepath)
self.logger = logging.getLogger("IPOP Rotating Log")
self.logger.setLevel(level)
# Creates rotating filehandler
handler = lh.RotatingFileHandler(filename=fqname,
maxBytes=self._cm_config["MaxFileSize"],
backupCount=self._cm_config["MaxArchives"])
formatter = logging.Formatter(
"[%(asctime)s.%(msecs)03d] %(levelname)s:%(message)s", datefmt="%Y%m%d %H:%M:%S")
handler.setFormatter(formatter)
# Adds the filehandler to the Python logger module
self.logger.addHandler(handler)
# If the Logging is set to All by the User
else:
self.logger = logging.getLogger("IPOP Console & File Logger")
self.logger.setLevel(level)
#Console Logger
console_handler = logging.StreamHandler()
console_log_formatter = logging.Formatter(
"[%(asctime)s.%(msecs)03d] %(levelname)s: %(message)s",
datefmt="%H:%M:%S")
console_handler.setFormatter(console_log_formatter)
self.logger.addHandler(console_handler)
# Extracts the filepath else sets logs to current working directory
filepath = self._cm_config.get("Directory", "./")
fqname = filepath + \
self._cm_config.get("CtrlLogFileName", "ctrl.log")
if not os.path.isdir(filepath):
os.mkdir(filepath)
#File Logger
# Creates rotating filehandler
file_handler = lh.RotatingFileHandler(filename=fqname)
file_log_formatter = logging.Formatter(
"[%(asctime)s.%(msecs)03d] %(levelname)s:%(message)s", datefmt="%Y%m%d %H:%M:%S")
file_handler.setFormatter(file_log_formatter)
self.logger.addHandler(file_handler)
self.logger.info("Logger: Module loaded")
# PKTDUMP mode dumps packet information
logging.addLevelName(5, "PKTDUMP")
logging.PKTDUMP = 5
def process_cbt(self, cbt):
if cbt.op_type == "Request":
log_entry = "{0}: {1}".format(cbt.request.initiator, cbt.request.params)
# Extracting the logging level information from the CBT action tag
if cbt.request.action == "LOG_DEBUG" or cbt.request.action == "debug":
self.logger.debug(log_entry)
cbt.set_response(None, True)
elif cbt.request.action == "LOG_INFO" or cbt.request.action == "info":
self.logger.info(log_entry)
cbt.set_response(None, True)
elif cbt.request.action == "LOG_WARNING" or cbt.request.action == "warning":
self.logger.warning(log_entry)
cbt.set_response(None, True)
elif cbt.request.action == "LOG_ERROR" or cbt.request.action == "error":
self.logger.error(log_entry)
cbt.set_response(None, True)
elif cbt.request.action == "pktdump":
self.pktdump(message=cbt.request.params.get("message"),
dump=cbt.request.params.get("dump"))
cbt.set_response(None, True)
elif cbt.request.action == "LOG_QUERY_CONFIG":
cbt.set_response(self._cm_config, True)
else:
log = "Unsupported CBT action {0}".format(cbt)
self.logger.warning("{0}: {1}".format(self._module_name, log))
cbt.set_response(log, False)
self.complete_cbt(cbt)
elif cbt.op_type == "Response":
self.free_cbt(cbt)
def timer_method(self):
pass
def pktdump(self, message, dump=None, *args, **argv):
""" Packet Information dumping method"""
hext = ""
if dump:
for i in range(0, len(dump), 2):
hext += dump[i:i + 2].encode("hex")
hext += " "
if i % 16 == 14:
hext += "\n"
logging.log(5, message + "\n" + hext)
else:
logging.log(5, message, *args, **argv)
def terminate(self):
logging.shutdown()
| 45.590604 | 97 | 0.602532 |
import logging
import logging.handlers as lh
import os
from controller.framework.ControllerModule import ControllerModule
class Logger(ControllerModule):
def __init__(self, cfx_handle, module_config, module_name):
super(Logger, self).__init__(cfx_handle, module_config, module_name)
def initialize(self):
if "LogLevel" in self._cm_config:
level = getattr(logging, self._cm_config["LogLevel"])
else:
level = getattr(logging, "info")
if self._cm_config["Device"] == "Console":
logging.basicConfig(format="[%(asctime)s.%(msecs)03d] %(levelname)s: %(message)s",
datefmt="%H:%M:%S",
level=level)
self.logger = logging.getLogger("IPOP console logger")
elif self._cm_config["Device"] == "File":
filepath = self._cm_config.get("Directory", "./")
fqname = filepath + \
self._cm_config.get("CtrlLogFileName", "ctrl.log")
if not os.path.isdir(filepath):
os.mkdir(filepath)
self.logger = logging.getLogger("IPOP Rotating Log")
self.logger.setLevel(level)
handler = lh.RotatingFileHandler(filename=fqname,
maxBytes=self._cm_config["MaxFileSize"],
backupCount=self._cm_config["MaxArchives"])
formatter = logging.Formatter(
"[%(asctime)s.%(msecs)03d] %(levelname)s:%(message)s", datefmt="%Y%m%d %H:%M:%S")
handler.setFormatter(formatter)
self.logger.addHandler(handler)
else:
self.logger = logging.getLogger("IPOP Console & File Logger")
self.logger.setLevel(level)
console_handler = logging.StreamHandler()
console_log_formatter = logging.Formatter(
"[%(asctime)s.%(msecs)03d] %(levelname)s: %(message)s",
datefmt="%H:%M:%S")
console_handler.setFormatter(console_log_formatter)
self.logger.addHandler(console_handler)
filepath = self._cm_config.get("Directory", "./")
fqname = filepath + \
self._cm_config.get("CtrlLogFileName", "ctrl.log")
if not os.path.isdir(filepath):
os.mkdir(filepath)
file_handler = lh.RotatingFileHandler(filename=fqname)
file_log_formatter = logging.Formatter(
"[%(asctime)s.%(msecs)03d] %(levelname)s:%(message)s", datefmt="%Y%m%d %H:%M:%S")
file_handler.setFormatter(file_log_formatter)
self.logger.addHandler(file_handler)
self.logger.info("Logger: Module loaded")
logging.addLevelName(5, "PKTDUMP")
logging.PKTDUMP = 5
def process_cbt(self, cbt):
if cbt.op_type == "Request":
log_entry = "{0}: {1}".format(cbt.request.initiator, cbt.request.params)
if cbt.request.action == "LOG_DEBUG" or cbt.request.action == "debug":
self.logger.debug(log_entry)
cbt.set_response(None, True)
elif cbt.request.action == "LOG_INFO" or cbt.request.action == "info":
self.logger.info(log_entry)
cbt.set_response(None, True)
elif cbt.request.action == "LOG_WARNING" or cbt.request.action == "warning":
self.logger.warning(log_entry)
cbt.set_response(None, True)
elif cbt.request.action == "LOG_ERROR" or cbt.request.action == "error":
self.logger.error(log_entry)
cbt.set_response(None, True)
elif cbt.request.action == "pktdump":
self.pktdump(message=cbt.request.params.get("message"),
dump=cbt.request.params.get("dump"))
cbt.set_response(None, True)
elif cbt.request.action == "LOG_QUERY_CONFIG":
cbt.set_response(self._cm_config, True)
else:
log = "Unsupported CBT action {0}".format(cbt)
self.logger.warning("{0}: {1}".format(self._module_name, log))
cbt.set_response(log, False)
self.complete_cbt(cbt)
elif cbt.op_type == "Response":
self.free_cbt(cbt)
def timer_method(self):
pass
def pktdump(self, message, dump=None, *args, **argv):
hext = ""
if dump:
for i in range(0, len(dump), 2):
hext += dump[i:i + 2].encode("hex")
hext += " "
if i % 16 == 14:
hext += "\n"
logging.log(5, message + "\n" + hext)
else:
logging.log(5, message, *args, **argv)
def terminate(self):
logging.shutdown()
| true | true |
f721aa11249df76d852759230ba85c6a027c2c3e | 3,271 | py | Python | libs/parse_ansible.py | realglobe-Inc/atom-autocomplete-ansible | 3752b7d893be35ca93a8e424c960e328c0d75bb9 | [
"MIT"
] | 32 | 2016-07-22T06:17:00.000Z | 2021-09-24T16:19:11.000Z | libs/parse_ansible.py | realglobe-Inc/atom-autocomplete-ansible | 3752b7d893be35ca93a8e424c960e328c0d75bb9 | [
"MIT"
] | 50 | 2016-06-28T09:36:00.000Z | 2022-03-18T13:03:18.000Z | libs/parse_ansible.py | realglobe-Inc/atom-autocomplete-ansible | 3752b7d893be35ca93a8e424c960e328c0d75bb9 | [
"MIT"
] | 22 | 2016-09-20T16:56:04.000Z | 2022-03-25T23:24:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import __main__
import json
import os
from ansible.cli.doc import DocCLI
from ansible.playbook import Play
from ansible.playbook.block import Block
from ansible.playbook.role import Role
from ansible.playbook.task import Task
from ansible.utils.display import Display
try:
from ansible.plugins.loader import lookup_loader, module_loader
from ansible.utils import plugin_docs
use_old_loader = False
BLACKLIST_MODULES = plugin_docs.BLACKLIST['MODULE']
except ImportError:
from ansible.plugins import lookup_loader, module_loader
from ansible.utils import module_docs as plugin_docs
use_old_loader = True
BLACKLIST_MODULES = plugin_docs.BLACKLIST_MODULES
try:
from ansible.plugins.loader import fragment_loader
USE_FRAGMENT_LOADER = True
except ImportError:
fragment_loader = None
USE_FRAGMENT_LOADER = False
__main__.display = Display()
doc_cli = DocCLI(['ansible atom'])
def get_module_list():
module_paths = module_loader._get_paths()
for path in module_paths:
if use_old_loader:
doc_cli.find_modules(path)
else:
try:
founds = doc_cli.find_plugins(path, 'module')
except TypeError:
founds = doc_cli.find_plugins(path, 'plugins', 'module')
if founds:
doc_cli.plugin_list.update(founds)
module_list = (
doc_cli.module_list if use_old_loader else doc_cli.plugin_list)
return sorted(set(module_list))
def main():
module_keys = ('module', 'short_description', 'options', 'deprecated')
result = {'modules': [], 'directives': {}, 'lookup_plugins': []}
for module in get_module_list():
if module in BLACKLIST_MODULES:
continue
filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
get_docstring_args = ((filename, fragment_loader)
if USE_FRAGMENT_LOADER else (filename,))
try:
doc = plugin_docs.get_docstring(*get_docstring_args)[0]
filtered_doc = {key: doc.get(key, None) for key in module_keys}
result['modules'].append(filtered_doc)
except Exception as e:
pass
for aclass in (Play, Role, Block, Task):
aobj = aclass()
name = type(aobj).__name__
for attr in aobj.__dict__['_attributes']:
if 'private' in attr and attr.private:
continue
direct_target = result['directives'].setdefault(attr, [])
direct_target.append(name)
if attr == 'action':
local_action = result['directives'].setdefault(
'local_action', [])
local_action.append(name)
result['directives']['with_'] = ['Task']
for lookup in lookup_loader.all(path_only=True):
name = os.path.splitext(os.path.basename(lookup))[0]
result['lookup_plugins'].append(name)
return json.dumps(result)
if __name__ == '__main__':
print(main())
| 32.068627 | 75 | 0.64812 |
from __future__ import print_function, unicode_literals
import __main__
import json
import os
from ansible.cli.doc import DocCLI
from ansible.playbook import Play
from ansible.playbook.block import Block
from ansible.playbook.role import Role
from ansible.playbook.task import Task
from ansible.utils.display import Display
try:
from ansible.plugins.loader import lookup_loader, module_loader
from ansible.utils import plugin_docs
use_old_loader = False
BLACKLIST_MODULES = plugin_docs.BLACKLIST['MODULE']
except ImportError:
from ansible.plugins import lookup_loader, module_loader
from ansible.utils import module_docs as plugin_docs
use_old_loader = True
BLACKLIST_MODULES = plugin_docs.BLACKLIST_MODULES
try:
from ansible.plugins.loader import fragment_loader
USE_FRAGMENT_LOADER = True
except ImportError:
fragment_loader = None
USE_FRAGMENT_LOADER = False
__main__.display = Display()
doc_cli = DocCLI(['ansible atom'])
def get_module_list():
module_paths = module_loader._get_paths()
for path in module_paths:
if use_old_loader:
doc_cli.find_modules(path)
else:
try:
founds = doc_cli.find_plugins(path, 'module')
except TypeError:
founds = doc_cli.find_plugins(path, 'plugins', 'module')
if founds:
doc_cli.plugin_list.update(founds)
module_list = (
doc_cli.module_list if use_old_loader else doc_cli.plugin_list)
return sorted(set(module_list))
def main():
module_keys = ('module', 'short_description', 'options', 'deprecated')
result = {'modules': [], 'directives': {}, 'lookup_plugins': []}
for module in get_module_list():
if module in BLACKLIST_MODULES:
continue
filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
get_docstring_args = ((filename, fragment_loader)
if USE_FRAGMENT_LOADER else (filename,))
try:
doc = plugin_docs.get_docstring(*get_docstring_args)[0]
filtered_doc = {key: doc.get(key, None) for key in module_keys}
result['modules'].append(filtered_doc)
except Exception as e:
pass
for aclass in (Play, Role, Block, Task):
aobj = aclass()
name = type(aobj).__name__
for attr in aobj.__dict__['_attributes']:
if 'private' in attr and attr.private:
continue
direct_target = result['directives'].setdefault(attr, [])
direct_target.append(name)
if attr == 'action':
local_action = result['directives'].setdefault(
'local_action', [])
local_action.append(name)
result['directives']['with_'] = ['Task']
for lookup in lookup_loader.all(path_only=True):
name = os.path.splitext(os.path.basename(lookup))[0]
result['lookup_plugins'].append(name)
return json.dumps(result)
if __name__ == '__main__':
print(main())
| true | true |
f721aa8af2cd7cf530a4b76cbb10ce9276f81044 | 5,616 | py | Python | espnet/asr/pytorch_backend/asr_recog.py | MarkWuNLP/StreamingTransformer | df9bfe348608b7e55ef1ff70464070c0055ea799 | [
"Apache-2.0"
] | null | null | null | espnet/asr/pytorch_backend/asr_recog.py | MarkWuNLP/StreamingTransformer | df9bfe348608b7e55ef1ff70464070c0055ea799 | [
"Apache-2.0"
] | null | null | null | espnet/asr/pytorch_backend/asr_recog.py | MarkWuNLP/StreamingTransformer | df9bfe348608b7e55ef1ff70464070c0055ea799 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Training/decoding definition for the speech recognition task."""
import json
import logging
import os
import numpy as np
import torch
from espnet.asr.asr_utils import add_results_to_json, add_single_results
from espnet.asr.asr_utils import get_model_conf
from espnet.asr.asr_utils import torch_load
from espnet.asr.pytorch_backend.asr_init import load_trained_model
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
def _recursive_to(xs, device):
if torch.is_tensor(xs):
return xs.to(device)
if isinstance(xs, tuple):
return tuple(_recursive_to(x, device) for x in xs)
return xs
def recog(args):
"""Decode with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
model.recog_args = args
# read rnnlm
if args.rnnlm:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
if getattr(rnnlm_args, "model_module", "default") != "default":
raise ValueError(
"use '--api v2' option to decode with non-default language model"
)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(train_args.char_list),
rnnlm_args.layer,
rnnlm_args.unit,
getattr(rnnlm_args, "embed_unit", None), # for backward compatibility
)
)
torch_load(args.rnnlm, rnnlm)
rnnlm.eval()
else:
rnnlm = None
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info("gpu id: " + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
# read json data
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) decoding " + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)
feat = feat[0][0]
if args.prefix_decode:
best, ids, score = model.prefix_recognize(feat, args, train_args, train_args.char_list, rnnlm)
new_js[name] = add_single_results(js[name], best, ids, score)
else:
nbest_hyps = model.recognize(
feat, args, train_args.char_list, rnnlm
)
new_js[name] = add_results_to_json(
js[name], nbest_hyps, train_args.char_list
)
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
def viterbi_decode(args):
set_deterministic_pytorch(args)
idim, odim, train_args = get_model_conf(
args.model, os.path.join(os.path.dirname(args.model), 'model.json'))
model_class = dynamic_import(train_args.model_module)
model = model_class(idim, odim, train_args)
if args.model is not None:
load_params = dict(torch.load(args.model))
if 'model' in load_params:
load_params = dict(load_params['model'])
if 'state_dict' in load_params:
load_params = dict(load_params['state_dict'])
model_params = dict(model.named_parameters())
for k, v in load_params.items():
k = k.replace('module.', '')
if k in model_params and v.size() == model_params[k].size():
model_params[k].data = v.data
logging.warning('load parameters {}'.format(k))
model.recog_args = args
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info('gpu id: ' + str(gpu_id))
model.cuda()
with open(args.recog_json, 'rb') as f:
js = json.load(f)['utts']
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode='asr', load_output=False, sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None else args.preprocess_conf,
preprocess_args={'train': False})
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info('(%d/%d) decoding ' + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)
y = np.fromiter(map(int, batch[0][1]['output'][0]['tokenid'].split()), dtype=np.int64)
align = model.viterbi_decode(feat[0][0], y)
assert len(align) == len(y)
new_js[name] = js[name]
new_js[name]['output'][0]['align'] = ' '.join([str(i) for i in list(align)])
with open(args.result_label, 'wb') as f:
f.write(json.dumps({'utts': new_js}, indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
| 34.036364 | 110 | 0.615028 |
import json
import logging
import os
import numpy as np
import torch
from espnet.asr.asr_utils import add_results_to_json, add_single_results
from espnet.asr.asr_utils import get_model_conf
from espnet.asr.asr_utils import torch_load
from espnet.asr.pytorch_backend.asr_init import load_trained_model
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
def _recursive_to(xs, device):
if torch.is_tensor(xs):
return xs.to(device)
if isinstance(xs, tuple):
return tuple(_recursive_to(x, device) for x in xs)
return xs
def recog(args):
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
model.recog_args = args
if args.rnnlm:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
if getattr(rnnlm_args, "model_module", "default") != "default":
raise ValueError(
"use '--api v2' option to decode with non-default language model"
)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(train_args.char_list),
rnnlm_args.layer,
rnnlm_args.unit,
getattr(rnnlm_args, "embed_unit", None),
)
)
torch_load(args.rnnlm, rnnlm)
rnnlm.eval()
else:
rnnlm = None
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info("gpu id: " + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) decoding " + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)
feat = feat[0][0]
if args.prefix_decode:
best, ids, score = model.prefix_recognize(feat, args, train_args, train_args.char_list, rnnlm)
new_js[name] = add_single_results(js[name], best, ids, score)
else:
nbest_hyps = model.recognize(
feat, args, train_args.char_list, rnnlm
)
new_js[name] = add_results_to_json(
js[name], nbest_hyps, train_args.char_list
)
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
def viterbi_decode(args):
set_deterministic_pytorch(args)
idim, odim, train_args = get_model_conf(
args.model, os.path.join(os.path.dirname(args.model), 'model.json'))
model_class = dynamic_import(train_args.model_module)
model = model_class(idim, odim, train_args)
if args.model is not None:
load_params = dict(torch.load(args.model))
if 'model' in load_params:
load_params = dict(load_params['model'])
if 'state_dict' in load_params:
load_params = dict(load_params['state_dict'])
model_params = dict(model.named_parameters())
for k, v in load_params.items():
k = k.replace('module.', '')
if k in model_params and v.size() == model_params[k].size():
model_params[k].data = v.data
logging.warning('load parameters {}'.format(k))
model.recog_args = args
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info('gpu id: ' + str(gpu_id))
model.cuda()
with open(args.recog_json, 'rb') as f:
js = json.load(f)['utts']
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode='asr', load_output=False, sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None else args.preprocess_conf,
preprocess_args={'train': False})
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info('(%d/%d) decoding ' + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)
y = np.fromiter(map(int, batch[0][1]['output'][0]['tokenid'].split()), dtype=np.int64)
align = model.viterbi_decode(feat[0][0], y)
assert len(align) == len(y)
new_js[name] = js[name]
new_js[name]['output'][0]['align'] = ' '.join([str(i) for i in list(align)])
with open(args.result_label, 'wb') as f:
f.write(json.dumps({'utts': new_js}, indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
| true | true |
f721ab5c5621aefa332a1c1c49b2c98c1ff4fa57 | 2,408 | py | Python | pythonup/operations/common.py | uranusjr/pythonup-windows | af25844af1c5fdc8a90ae95435c8ce322e5e41e5 | [
"0BSD"
] | 22 | 2018-01-18T21:03:26.000Z | 2021-06-29T00:19:44.000Z | pythonup/operations/common.py | uranusjr/pythonup-windows | af25844af1c5fdc8a90ae95435c8ce322e5e41e5 | [
"0BSD"
] | 22 | 2018-02-22T17:08:50.000Z | 2021-11-07T09:20:18.000Z | pythonup/operations/common.py | uranusjr/pythonup-windows | af25844af1c5fdc8a90ae95435c8ce322e5e41e5 | [
"0BSD"
] | 2 | 2018-01-18T21:03:30.000Z | 2021-01-18T05:14:18.000Z | import functools
import click
from .. import configs, metadata, versions
def check_installation(version, *, installed=True, on_exit=None):
try:
installation = version.get_installation()
except FileNotFoundError:
if not installed: # Expected to be absent. Return None.
return None
message = '{} is not installed.'
else:
if installed: # Expected to be installed. Return the installation.
return installation
message = '{} is already installed.'
click.echo(message.format(version), err=True)
if on_exit:
on_exit()
click.get_current_context().exit(1)
def get_active_names():
return configs.get_active_names()
def set_active_versions(versions):
configs.set_active_names([v.name for v in versions])
def get_versions(*, installed_only):
vers = versions.get_versions()
names = set(v.name for v in vers)
def should_include(version):
if installed_only and not version.is_installed():
return False
# On a 32-bit host, hide 64-bit names if there is a 32-bit counterpart.
if (not metadata.can_install_64bit() and
not version.name.endswith('-32') and
'{}-32'.format(version.name) in names):
return False
return True
return [v for v in vers if should_include(v)]
def get_version(name):
force_32 = not metadata.can_install_64bit()
try:
version = versions.get_version(name, force_32=force_32)
except versions.VersionNotFoundError:
click.echo('No such version: {}'.format(name), err=True)
click.get_current_context().exit(1)
if version.name != name:
click.echo('Note: Selecting {} instead of {}'.format(
version.name, name,
))
return version
def version_command(*, plural=False, wild_versions=()):
if wild_versions:
def _get_version(n):
if n in wild_versions:
return n
return get_version(n)
else:
_get_version = get_version
def decorator(f):
@functools.wraps(f)
def wrapped(*args, version, **kw):
if plural:
kw['versions'] = [_get_version(n) for n in version]
else:
kw['version'] = _get_version(version)
return f(*args, **kw)
return wrapped
return decorator
| 28 | 79 | 0.618355 | import functools
import click
from .. import configs, metadata, versions
def check_installation(version, *, installed=True, on_exit=None):
try:
installation = version.get_installation()
except FileNotFoundError:
if not installed:
return None
message = '{} is not installed.'
else:
if installed:
return installation
message = '{} is already installed.'
click.echo(message.format(version), err=True)
if on_exit:
on_exit()
click.get_current_context().exit(1)
def get_active_names():
return configs.get_active_names()
def set_active_versions(versions):
configs.set_active_names([v.name for v in versions])
def get_versions(*, installed_only):
vers = versions.get_versions()
names = set(v.name for v in vers)
def should_include(version):
if installed_only and not version.is_installed():
return False
if (not metadata.can_install_64bit() and
not version.name.endswith('-32') and
'{}-32'.format(version.name) in names):
return False
return True
return [v for v in vers if should_include(v)]
def get_version(name):
force_32 = not metadata.can_install_64bit()
try:
version = versions.get_version(name, force_32=force_32)
except versions.VersionNotFoundError:
click.echo('No such version: {}'.format(name), err=True)
click.get_current_context().exit(1)
if version.name != name:
click.echo('Note: Selecting {} instead of {}'.format(
version.name, name,
))
return version
def version_command(*, plural=False, wild_versions=()):
if wild_versions:
def _get_version(n):
if n in wild_versions:
return n
return get_version(n)
else:
_get_version = get_version
def decorator(f):
@functools.wraps(f)
def wrapped(*args, version, **kw):
if plural:
kw['versions'] = [_get_version(n) for n in version]
else:
kw['version'] = _get_version(version)
return f(*args, **kw)
return wrapped
return decorator
| true | true |
f721ab81cbd00ed051aca6942799ab865c6412c5 | 238 | py | Python | frappe/website/doctype/website_route_redirect/website_route_redirect.py | ssuda777/frappe | d3f3df2ce15154aecc1d9d6d07d947e72c2e8c6e | [
"MIT"
] | 1 | 2021-06-03T07:04:48.000Z | 2021-06-03T07:04:48.000Z | frappe/website/doctype/website_route_redirect/website_route_redirect.py | JMBodz/frappe | eb218a06d1cbfc3a8f1cc00ba8dac2c927d2f71d | [
"MIT"
] | 3 | 2021-02-27T11:50:14.000Z | 2021-05-03T06:48:49.000Z | frappe/website/doctype/website_route_redirect/website_route_redirect.py | JMBodz/frappe | eb218a06d1cbfc3a8f1cc00ba8dac2c927d2f71d | [
"MIT"
] | 2 | 2021-09-02T09:51:55.000Z | 2021-09-07T04:55:42.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class WebsiteRouteRedirect(Document):
pass
| 23.8 | 58 | 0.768908 |
from frappe.model.document import Document
class WebsiteRouteRedirect(Document):
pass
| true | true |
f721abc28aeee16569cf14634251ef073a83b8f1 | 2,289 | py | Python | core/models.py | mackay/ble_detector | 4d7c3e9edd7bbeeea0bd0bebce43c1bb9d02ee41 | [
"MIT"
] | null | null | null | core/models.py | mackay/ble_detector | 4d7c3e9edd7bbeeea0bd0bebce43c1bb9d02ee41 | [
"MIT"
] | null | null | null | core/models.py | mackay/ble_detector | 4d7c3e9edd7bbeeea0bd0bebce43c1bb9d02ee41 | [
"MIT"
] | null | null | null | from peewee import *
import json
from datetime import datetime
#set sane default log levels
import logging
logging.getLogger('peewee').setLevel(logging.INFO)
logging.getLogger("peewee.pool").setLevel(logging.DEBUG)
database = SqliteDatabase('detector.db')
class JSONField(TextField):
def db_value(self, value):
if value is not None:
return json.dumps(value)
return None
def python_value(self, value):
if value is not None:
return json.loads(value)
class BaseModel(Model):
def __init__(self, *args, **kwargs):
super(BaseModel, self).__init__( *args, **kwargs )
self._meta.base_uri = self._meta.db_table
class Meta:
database = database
base_uri = "unknown"
class SystemOption(BaseModel):
key = CharField(max_length=64, unique=True, index=True)
value = CharField(max_length=255)
class ActiveEntity(BaseModel):
uuid = CharField(max_length=64, unique=True, index=True)
last_active = DateTimeField(null=True)
total_packets = IntegerField(default=0)
metadata = JSONField(null=True)
class Meta:
order_by = ('uuid', )
class Detector(ActiveEntity):
pass
class Beacon(ActiveEntity):
is_accepted = IntegerField(default=0)
class Agent(ActiveEntity):
pass
class Signal(BaseModel):
date = DateTimeField(default=datetime.utcnow)
detector = ForeignKeyField(rel_model=Detector)
beacon = ForeignKeyField(rel_model=Beacon)
rssi = FloatField()
source_data = CharField(max_length=255, null=True)
class Training(BaseModel):
date = DateTimeField(default=datetime.utcnow)
beacon = ForeignKeyField(rel_model=Beacon)
expectation = JSONField()
is_used = IntegerField(default=1)
class Meta:
order_by = ('date', 'expectation', 'beacon')
class TrainingSignal(BaseModel):
training = ForeignKeyField(rel_model=Training, related_name='signals')
signal = ForeignKeyField(rel_model=Signal)
def initialize():
database.connect()
database.create_tables([ SystemOption ], safe=True)
database.create_tables([ Detector, Beacon, Agent ], safe=True)
database.create_tables([ Signal ], safe=True)
database.create_tables([ Training, TrainingSignal ], safe=True)
database.close()
| 24.094737 | 74 | 0.70118 | from peewee import *
import json
from datetime import datetime
import logging
logging.getLogger('peewee').setLevel(logging.INFO)
logging.getLogger("peewee.pool").setLevel(logging.DEBUG)
database = SqliteDatabase('detector.db')
class JSONField(TextField):
def db_value(self, value):
if value is not None:
return json.dumps(value)
return None
def python_value(self, value):
if value is not None:
return json.loads(value)
class BaseModel(Model):
def __init__(self, *args, **kwargs):
super(BaseModel, self).__init__( *args, **kwargs )
self._meta.base_uri = self._meta.db_table
class Meta:
database = database
base_uri = "unknown"
class SystemOption(BaseModel):
key = CharField(max_length=64, unique=True, index=True)
value = CharField(max_length=255)
class ActiveEntity(BaseModel):
uuid = CharField(max_length=64, unique=True, index=True)
last_active = DateTimeField(null=True)
total_packets = IntegerField(default=0)
metadata = JSONField(null=True)
class Meta:
order_by = ('uuid', )
class Detector(ActiveEntity):
pass
class Beacon(ActiveEntity):
is_accepted = IntegerField(default=0)
class Agent(ActiveEntity):
pass
class Signal(BaseModel):
date = DateTimeField(default=datetime.utcnow)
detector = ForeignKeyField(rel_model=Detector)
beacon = ForeignKeyField(rel_model=Beacon)
rssi = FloatField()
source_data = CharField(max_length=255, null=True)
class Training(BaseModel):
date = DateTimeField(default=datetime.utcnow)
beacon = ForeignKeyField(rel_model=Beacon)
expectation = JSONField()
is_used = IntegerField(default=1)
class Meta:
order_by = ('date', 'expectation', 'beacon')
class TrainingSignal(BaseModel):
training = ForeignKeyField(rel_model=Training, related_name='signals')
signal = ForeignKeyField(rel_model=Signal)
def initialize():
database.connect()
database.create_tables([ SystemOption ], safe=True)
database.create_tables([ Detector, Beacon, Agent ], safe=True)
database.create_tables([ Signal ], safe=True)
database.create_tables([ Training, TrainingSignal ], safe=True)
database.close()
| true | true |
f721ae2772712944094b9c2e009ee6bae9dce86c | 827 | py | Python | app/main/models/EMI.py | pOrgz-dev/financial-api | edf849cfbcedf74a8b81f70683a1edfbea172fb7 | [
"MIT"
] | null | null | null | app/main/models/EMI.py | pOrgz-dev/financial-api | edf849cfbcedf74a8b81f70683a1edfbea172fb7 | [
"MIT"
] | null | null | null | app/main/models/EMI.py | pOrgz-dev/financial-api | edf849cfbcedf74a8b81f70683a1edfbea172fb7 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
from .. import db
class EMI_Information(db.Model):
__tablename__ = "EMI_Information"
EMI_Identifier = db.Column(db.String(45),primary_key = True, nullable = False)
ItemName = db.Column(db.String(45), nullable = False)
ProductPrice = db.Column(db.Float, nullable = False)
InterestRate = db.Column(db.Float, nullable = False)
Tenure = db.Column(db.Integer, nullable = False)
MonthlyEMI = db.Column(db.Float, nullable = False)
def __repr__(self):
# return { c.key : getattr(self, c.key) for c in self.__table__.columns }
return f"<{self.EMI_Identifier}(ItemName = {self.ItemName}, ProductPrice = {self.ProductPrice}, Tenure = {self.Tenure}>"
def toDict(self):
return { c.key : getattr(self, c.key) for c in self.__table__.columns } | 41.35 | 128 | 0.666264 |
from .. import db
class EMI_Information(db.Model):
__tablename__ = "EMI_Information"
EMI_Identifier = db.Column(db.String(45),primary_key = True, nullable = False)
ItemName = db.Column(db.String(45), nullable = False)
ProductPrice = db.Column(db.Float, nullable = False)
InterestRate = db.Column(db.Float, nullable = False)
Tenure = db.Column(db.Integer, nullable = False)
MonthlyEMI = db.Column(db.Float, nullable = False)
def __repr__(self):
return f"<{self.EMI_Identifier}(ItemName = {self.ItemName}, ProductPrice = {self.ProductPrice}, Tenure = {self.Tenure}>"
def toDict(self):
return { c.key : getattr(self, c.key) for c in self.__table__.columns } | true | true |
f721aeecd78fde51b1f23b627ac73ea974b16e4f | 5,118 | py | Python | draw_tracking_line.py | jiyauppal/face-mask-detector.github.io | 210ce81fa37c441a076fbb8db28376268e634412 | [
"Apache-2.0"
] | 1 | 2021-05-13T07:54:08.000Z | 2021-05-13T07:54:08.000Z | draw_tracking_line.py | jiyauppal/face-mask-detector.github.io | 210ce81fa37c441a076fbb8db28376268e634412 | [
"Apache-2.0"
] | null | null | null | draw_tracking_line.py | jiyauppal/face-mask-detector.github.io | 210ce81fa37c441a076fbb8db28376268e634412 | [
"Apache-2.0"
] | null | null | null | import cv2
import datetime
import imutils
import numpy as np
from centroidtracker import CentroidTracker
from collections import defaultdict
protopath = "MobileNetSSD_deploy.prototxt"
modelpath = "MobileNetSSD_deploy.caffemodel"
detector = cv2.dnn.readNetFromCaffe(prototxt=protopath, caffeModel=modelpath)
# Only enable it if you are using OpenVino environment
# detector.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE)
# detector.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
tracker = CentroidTracker(maxDisappeared=80, maxDistance=90)
def non_max_suppression_fast(boxes, overlapThresh):
try:
if len(boxes) == 0:
return []
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
pick = []
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
while len(idxs) > 0:
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
overlap = (w * h) / area[idxs[:last]]
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
return boxes[pick].astype("int")
except Exception as e:
print("Exception occurred in non_max_suppression : {}".format(e))
def main():
cap = cv2.VideoCapture('test_video.mp4')
fps_start_time = datetime.datetime.now()
fps = 0
total_frames = 0
centroid_dict = defaultdict(list)
object_id_list = []
while True:
ret, frame = cap.read()
frame = imutils.resize(frame, width=600)
total_frames = total_frames + 1
(H, W) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
detector.setInput(blob)
person_detections = detector.forward()
rects = []
for i in np.arange(0, person_detections.shape[2]):
confidence = person_detections[0, 0, i, 2]
if confidence > 0.5:
idx = int(person_detections[0, 0, i, 1])
if CLASSES[idx] != "person":
continue
person_box = person_detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = person_box.astype("int")
rects.append(person_box)
boundingboxes = np.array(rects)
boundingboxes = boundingboxes.astype(int)
rects = non_max_suppression_fast(boundingboxes, 0.3)
objects = tracker.update(rects)
for (objectId, bbox) in objects.items():
x1, y1, x2, y2 = bbox
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
cX = int((x1 + x2) / 2.0)
cY = int((y1 + y2) / 2.0)
cv2.circle(frame, (cX, cY), 4, (0, 255, 0), -1)
centroid_dict[objectId].append((cX, cY))
if objectId not in object_id_list:
object_id_list.append(objectId)
start_pt = (cX, cY)
end_pt = (cX, cY)
cv2.line(frame, start_pt, end_pt, (0, 255, 0), 2)
else:
l = len(centroid_dict[objectId])
for pt in range(len(centroid_dict[objectId])):
if not pt + 1 == l:
start_pt = (centroid_dict[objectId][pt][0], centroid_dict[objectId][pt][1])
end_pt = (centroid_dict[objectId][pt + 1][0], centroid_dict[objectId][pt + 1][1])
cv2.line(frame, start_pt, end_pt, (0, 255, 0), 2)
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
text = "ID: {}".format(objectId)
cv2.putText(frame, text, (x1, y1 - 5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
fps_end_time = datetime.datetime.now()
time_diff = fps_end_time - fps_start_time
if time_diff.seconds == 0:
fps = 0.0
else:
fps = (total_frames / time_diff.seconds)
fps_text = "FPS: {:.2f}".format(fps)
cv2.putText(frame, fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
cv2.imshow("Application", frame)
key = cv2.waitKey(1)
if key == ord('q'):
break
cv2.destroyAllWindows()
main()
| 33.45098 | 106 | 0.524424 | import cv2
import datetime
import imutils
import numpy as np
from centroidtracker import CentroidTracker
from collections import defaultdict
protopath = "MobileNetSSD_deploy.prototxt"
modelpath = "MobileNetSSD_deploy.caffemodel"
detector = cv2.dnn.readNetFromCaffe(prototxt=protopath, caffeModel=modelpath)
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
tracker = CentroidTracker(maxDisappeared=80, maxDistance=90)
def non_max_suppression_fast(boxes, overlapThresh):
try:
if len(boxes) == 0:
return []
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
pick = []
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
while len(idxs) > 0:
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
overlap = (w * h) / area[idxs[:last]]
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
return boxes[pick].astype("int")
except Exception as e:
print("Exception occurred in non_max_suppression : {}".format(e))
def main():
cap = cv2.VideoCapture('test_video.mp4')
fps_start_time = datetime.datetime.now()
fps = 0
total_frames = 0
centroid_dict = defaultdict(list)
object_id_list = []
while True:
ret, frame = cap.read()
frame = imutils.resize(frame, width=600)
total_frames = total_frames + 1
(H, W) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
detector.setInput(blob)
person_detections = detector.forward()
rects = []
for i in np.arange(0, person_detections.shape[2]):
confidence = person_detections[0, 0, i, 2]
if confidence > 0.5:
idx = int(person_detections[0, 0, i, 1])
if CLASSES[idx] != "person":
continue
person_box = person_detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = person_box.astype("int")
rects.append(person_box)
boundingboxes = np.array(rects)
boundingboxes = boundingboxes.astype(int)
rects = non_max_suppression_fast(boundingboxes, 0.3)
objects = tracker.update(rects)
for (objectId, bbox) in objects.items():
x1, y1, x2, y2 = bbox
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
cX = int((x1 + x2) / 2.0)
cY = int((y1 + y2) / 2.0)
cv2.circle(frame, (cX, cY), 4, (0, 255, 0), -1)
centroid_dict[objectId].append((cX, cY))
if objectId not in object_id_list:
object_id_list.append(objectId)
start_pt = (cX, cY)
end_pt = (cX, cY)
cv2.line(frame, start_pt, end_pt, (0, 255, 0), 2)
else:
l = len(centroid_dict[objectId])
for pt in range(len(centroid_dict[objectId])):
if not pt + 1 == l:
start_pt = (centroid_dict[objectId][pt][0], centroid_dict[objectId][pt][1])
end_pt = (centroid_dict[objectId][pt + 1][0], centroid_dict[objectId][pt + 1][1])
cv2.line(frame, start_pt, end_pt, (0, 255, 0), 2)
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
text = "ID: {}".format(objectId)
cv2.putText(frame, text, (x1, y1 - 5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
fps_end_time = datetime.datetime.now()
time_diff = fps_end_time - fps_start_time
if time_diff.seconds == 0:
fps = 0.0
else:
fps = (total_frames / time_diff.seconds)
fps_text = "FPS: {:.2f}".format(fps)
cv2.putText(frame, fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
cv2.imshow("Application", frame)
key = cv2.waitKey(1)
if key == ord('q'):
break
cv2.destroyAllWindows()
main()
| true | true |
f721aef7525b920408840cd454d2a33a4df2714c | 1,953 | py | Python | setup.py | PyXRD/pyxrd | 26bacdf64f3153fa74b8caa62e219b76d91a55c1 | [
"BSD-2-Clause"
] | 27 | 2018-06-15T15:28:18.000Z | 2022-03-10T12:23:50.000Z | setup.py | PyXRD/pyxrd | 26bacdf64f3153fa74b8caa62e219b76d91a55c1 | [
"BSD-2-Clause"
] | 22 | 2018-06-14T08:29:16.000Z | 2021-07-05T13:33:44.000Z | setup.py | PyXRD/pyxrd | 26bacdf64f3153fa74b8caa62e219b76d91a55c1 | [
"BSD-2-Clause"
] | 8 | 2019-04-13T13:03:51.000Z | 2021-06-19T09:29:11.000Z | #!/usr/bin/env python3
import os
from setuptools import setup, find_packages
def get_version():
from pyxrd.__version import __version__
if __version__.startswith("v"):
__version__ = __version__.replace("v", "")
return "%s" % __version__
def get_install_requires():
return [
'setuptools',
'numpy>=1.11',
'scipy>=1.1.0',
'matplotlib>=2.2.2',
'Pyro4>=4.41',
'deap>=1.0.1',
'cairocffi',
'pygobject>=3.20'
]
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name="PyXRD",
version=get_version(),
description="PyXRD is a python implementation of the matrix algorithm developed for the X-ray diffraction analysis of disordered lamellar structures",
long_description=read('README.md'),
keywords="XRD disorder mixed-layers",
author="Mathijs Dumon",
author_email="mathijs.dumon@gmail.com",
url="http://github.org/mathijs-dumon/PyXRD",
license="BSD",
setup_requires=[ "setuptools_git >= 1.2", ],
packages=find_packages(exclude=["test.*", "test", "tests_mvc", "tests_mvc.*"]),
include_package_data=True,
install_requires=get_install_requires(),
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.4",
"Environment :: Win32 (MS Windows)",
"Environment :: X11 Applications :: Gnome",
"Environment :: X11 Applications :: GTK",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Science/Research",
"Topic :: Utilities",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Visualization",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
],
)
| 31.5 | 154 | 0.622632 |
import os
from setuptools import setup, find_packages
def get_version():
from pyxrd.__version import __version__
if __version__.startswith("v"):
__version__ = __version__.replace("v", "")
return "%s" % __version__
def get_install_requires():
return [
'setuptools',
'numpy>=1.11',
'scipy>=1.1.0',
'matplotlib>=2.2.2',
'Pyro4>=4.41',
'deap>=1.0.1',
'cairocffi',
'pygobject>=3.20'
]
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name="PyXRD",
version=get_version(),
description="PyXRD is a python implementation of the matrix algorithm developed for the X-ray diffraction analysis of disordered lamellar structures",
long_description=read('README.md'),
keywords="XRD disorder mixed-layers",
author="Mathijs Dumon",
author_email="mathijs.dumon@gmail.com",
url="http://github.org/mathijs-dumon/PyXRD",
license="BSD",
setup_requires=[ "setuptools_git >= 1.2", ],
packages=find_packages(exclude=["test.*", "test", "tests_mvc", "tests_mvc.*"]),
include_package_data=True,
install_requires=get_install_requires(),
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.4",
"Environment :: Win32 (MS Windows)",
"Environment :: X11 Applications :: Gnome",
"Environment :: X11 Applications :: GTK",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Science/Research",
"Topic :: Utilities",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Visualization",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
],
)
| true | true |
f721afa5606a9e63a7128757986d8b2a4eb9a224 | 2,755 | py | Python | scripts/py_scripts/calculate_cluster_average.py | Elenadisa/PhenCo | f320fc286b90ec566afb5edfe3d6d1e3dcc28497 | [
"MIT"
] | 3 | 2020-12-12T03:17:13.000Z | 2021-02-21T01:43:29.000Z | scripts/py_scripts/calculate_cluster_average.py | Elenadisa/PhenCo | f320fc286b90ec566afb5edfe3d6d1e3dcc28497 | [
"MIT"
] | 5 | 2021-02-03T04:15:03.000Z | 2021-03-17T07:29:14.000Z | scripts/py_scripts/calculate_cluster_average.py | Elenadisa/PhenCo | f320fc286b90ec566afb5edfe3d6d1e3dcc28497 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
##############################################################################################################################################
# METHODS
##############################################################################################################################################
import functions as fn
##############################################################################################################################################
# OPTPARSE
##############################################################################################################################################
import optparse
parser = optparse.OptionParser()
parser.add_option("-c", "--cluster file", dest="dictionary",
help="Input file with the clusters of a network", metavar="FILE")
parser.add_option("-A", "--cluster_id", dest="cluster_id",
help="column which have clusters identificators", type='int')
parser.add_option("-B", "--item_id", dest="item_id",
help="column which have HPO o disease identificators", type='int')
parser.add_option("-m", "--model", dest="model_type",
help="network_type", metavar="str")
parser.add_option("-n", "--model_name", dest="model_name",
help="network_name", metavar="str")
parser.add_option("-e", "--enrichment_type", dest="enrichment",
help="type of enrichment", metavar="str")
parser.add_option("-p", "--p_value", dest="pvalue",
help="pvalue", metavar="float")
(options, args) = parser.parse_args()
###############################################################################################################################################
# MAIN
###############################################################################################################################################
import numpy as np
import os.path as path
#If the principal file exits it makes a dictionary cluster HPO
if path.exists(options.dictionary): #if the dictionary has a length different to 0 append the length of every cluster in the empty list, esle append 0.
dictionary = fn.build_dictionary(options.dictionary, options.cluster_id, options.item_id)
size = [] #empty list
if int(len(dictionary)) != 0:
for cluster_id in dictionary:
size.append(len(dictionary[cluster_id]))
else:
size.append(0)
mean = np.mean(size) #Calculate the mean of the clusters length
else : #If the dictionary has length 0 the mean of clusters size is 0
mean = 0
print(options.model_name + "\t" + options.model_type + "\t" + "Average_Cluster_size_" + options.enrichment + "_" + options.pvalue + "\t" + str(mean))
| 50.090909 | 154 | 0.450091 | true | true | |
f721b00012139ce758efe463a3d3ca112283819e | 1,375 | py | Python | docs/development/custom-vectors/secp256k1/verify_secp256k1.py | dvaerum/cryptography | 63dfc57fca688d0f8d0515001f249c317d5e54dc | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 8 | 2015-01-29T19:16:40.000Z | 2021-01-08T05:55:03.000Z | docs/development/custom-vectors/secp256k1/verify_secp256k1.py | dvaerum/cryptography | 63dfc57fca688d0f8d0515001f249c317d5e54dc | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 12 | 2021-01-05T06:46:37.000Z | 2022-03-30T19:06:26.000Z | docs/development/custom-vectors/secp256k1/verify_secp256k1.py | dvaerum/cryptography | 63dfc57fca688d0f8d0515001f249c317d5e54dc | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 5 | 2015-11-06T01:47:01.000Z | 2021-12-01T00:22:52.000Z | from __future__ import absolute_import, print_function
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric.utils import (
encode_dss_signature,
)
from tests.utils import load_fips_ecdsa_signing_vectors, load_vectors_from_file
CRYPTOGRAPHY_HASH_TYPES = {
"SHA-1": hashes.SHA1,
"SHA-224": hashes.SHA224,
"SHA-256": hashes.SHA256,
"SHA-384": hashes.SHA384,
"SHA-512": hashes.SHA512,
}
def verify_one_vector(vector):
digest_algorithm = vector["digest_algorithm"]
message = vector["message"]
x = vector["x"]
y = vector["y"]
signature = encode_dss_signature(vector["r"], vector["s"])
numbers = ec.EllipticCurvePublicNumbers(x, y, ec.SECP256K1())
key = numbers.public_key(default_backend())
verifier = key.verifier(
signature, ec.ECDSA(CRYPTOGRAPHY_HASH_TYPES[digest_algorithm]())
)
verifier.update(message)
verifier.verify()
def verify_vectors(vectors):
for vector in vectors:
verify_one_vector(vector)
vector_path = os.path.join("asymmetric", "ECDSA", "SECP256K1", "SigGen.txt")
secp256k1_vectors = load_vectors_from_file(
vector_path, load_fips_ecdsa_signing_vectors
)
verify_vectors(secp256k1_vectors)
| 25.943396 | 79 | 0.744 | from __future__ import absolute_import, print_function
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric.utils import (
encode_dss_signature,
)
from tests.utils import load_fips_ecdsa_signing_vectors, load_vectors_from_file
CRYPTOGRAPHY_HASH_TYPES = {
"SHA-1": hashes.SHA1,
"SHA-224": hashes.SHA224,
"SHA-256": hashes.SHA256,
"SHA-384": hashes.SHA384,
"SHA-512": hashes.SHA512,
}
def verify_one_vector(vector):
digest_algorithm = vector["digest_algorithm"]
message = vector["message"]
x = vector["x"]
y = vector["y"]
signature = encode_dss_signature(vector["r"], vector["s"])
numbers = ec.EllipticCurvePublicNumbers(x, y, ec.SECP256K1())
key = numbers.public_key(default_backend())
verifier = key.verifier(
signature, ec.ECDSA(CRYPTOGRAPHY_HASH_TYPES[digest_algorithm]())
)
verifier.update(message)
verifier.verify()
def verify_vectors(vectors):
for vector in vectors:
verify_one_vector(vector)
vector_path = os.path.join("asymmetric", "ECDSA", "SECP256K1", "SigGen.txt")
secp256k1_vectors = load_vectors_from_file(
vector_path, load_fips_ecdsa_signing_vectors
)
verify_vectors(secp256k1_vectors)
| true | true |
f721b0aaa3a21ebd95d28ba898211ca8c479b10e | 4,747 | py | Python | mlprodict/onnx_tools/optim/onnx_optimisation_identity.py | henrywu2019/mlprodict | 4c09dc39d5ba7a7235fa321d80c81b5bf4f078ad | [
"MIT"
] | null | null | null | mlprodict/onnx_tools/optim/onnx_optimisation_identity.py | henrywu2019/mlprodict | 4c09dc39d5ba7a7235fa321d80c81b5bf4f078ad | [
"MIT"
] | null | null | null | mlprodict/onnx_tools/optim/onnx_optimisation_identity.py | henrywu2019/mlprodict | 4c09dc39d5ba7a7235fa321d80c81b5bf4f078ad | [
"MIT"
] | null | null | null | """
@file
@brief Optimisation of :epkg:`ONNX` graphs.
"""
from onnx.helper import make_graph
from ._onnx_optimisation_common import ( # pylint: disable=E0611
_rename_node_input,
_rename_node_output,
_apply_optimisation_on_graph,
_apply_remove_node_fct_node
)
def onnx_remove_node_identity(onnx_model, recursive=True, debug_info=None, **options):
"""
Removes as many *Identity* nodes as possible.
The function looks into every node and subgraphs if
*recursive* is True for identity node. Unless such a
node directy connects one input to one output, it will
be removed and every other node gets its inputs or
outputs accordingly renamed.
@param onnx_model onnx model
@param recursive looks into subgraphs
@param debug_info debug information (private)
@param options additional options (unused)
@return new onnx _model
"""
if debug_info is None:
debug_info = [str(type(onnx_model)).rsplit(
'.', maxsplit=1)[-1].strip("'>")]
else:
debug_info = (debug_info +
[str(type(onnx_model)).rsplit('.', maxsplit=1)[-1].strip("'>")])
if hasattr(onnx_model, 'graph'):
return _apply_optimisation_on_graph(
onnx_remove_node_identity, onnx_model,
recursive=recursive, debug_info=debug_info, **options)
graph = onnx_model
inputs = set(i.name for i in graph.input)
outputs = set(o.name for o in graph.output)
def retrieve_idnodes(graph, existing_nodes):
idnodes = []
for i, exnode in enumerate(existing_nodes):
if exnode is None:
continue
if exnode.op_type == 'Identity':
input = exnode.input[0]
output = exnode.output[0]
idnodes.append((i, exnode, input, output))
return idnodes
nodes = list(graph.node)
rem = 1
while rem > 0:
rem = 0
idnodes = retrieve_idnodes(graph, nodes)
restart = False
for i, _, inp, out in idnodes:
if restart:
break # pragma: no cover
if nodes[i] is None:
# Already removed.
continue # pragma: no cover
if inp in inputs and out in outputs:
# Cannot be removed.
continue
if not restart and out not in outputs:
# We cannot change an output name.
for j in range(len(nodes)): # pylint: disable=C0200
if nodes[j] is None:
continue
if out in nodes[j].input:
nodes[j] = _rename_node_input(nodes[j], out, inp)
rem += 1
if nodes[j].op_type == 'Identity':
restart = True # pragma: no cover
nodes[i] = None
rem += 1
continue
if not restart and inp not in inputs and inp not in outputs:
# We cannot change an input name or an output name.
for j in range(len(nodes)): # pylint: disable=C0200
if nodes[j] is None:
continue
if inp in nodes[j].output:
nodes[j] = _rename_node_output(nodes[j], inp, out)
rem += 1
if nodes[j].op_type == 'Identity':
restart = True # pragma: no cover
if inp in nodes[j].input:
nodes[j] = _rename_node_input(nodes[j], inp, out)
rem += 1
if nodes[j].op_type == 'Identity':
restart = True
nodes[i] = None
rem += 1
if recursive:
# Handles subgraphs.
for i in range(len(nodes)): # pylint: disable=C0200
node = nodes[i]
if node is None or not (node.attribute): # pylint: disable=C0325
continue
nodes[i] = _apply_remove_node_fct_node(
onnx_remove_node_identity,
node, recursive=True, debug_info=debug_info + [node.name])
# Finally create the new graph.
nodes = list(filter(lambda n: n is not None, nodes))
graph = make_graph(nodes, onnx_model.name,
onnx_model.input, onnx_model.output,
onnx_model.initializer)
graph.value_info.extend(onnx_model.value_info) # pylint: disable=E1101
return graph
| 39.231405 | 87 | 0.52391 | from onnx.helper import make_graph
from ._onnx_optimisation_common import (
_rename_node_input,
_rename_node_output,
_apply_optimisation_on_graph,
_apply_remove_node_fct_node
)
def onnx_remove_node_identity(onnx_model, recursive=True, debug_info=None, **options):
if debug_info is None:
debug_info = [str(type(onnx_model)).rsplit(
'.', maxsplit=1)[-1].strip("'>")]
else:
debug_info = (debug_info +
[str(type(onnx_model)).rsplit('.', maxsplit=1)[-1].strip("'>")])
if hasattr(onnx_model, 'graph'):
return _apply_optimisation_on_graph(
onnx_remove_node_identity, onnx_model,
recursive=recursive, debug_info=debug_info, **options)
graph = onnx_model
inputs = set(i.name for i in graph.input)
outputs = set(o.name for o in graph.output)
def retrieve_idnodes(graph, existing_nodes):
idnodes = []
for i, exnode in enumerate(existing_nodes):
if exnode is None:
continue
if exnode.op_type == 'Identity':
input = exnode.input[0]
output = exnode.output[0]
idnodes.append((i, exnode, input, output))
return idnodes
nodes = list(graph.node)
rem = 1
while rem > 0:
rem = 0
idnodes = retrieve_idnodes(graph, nodes)
restart = False
for i, _, inp, out in idnodes:
if restart:
break
if nodes[i] is None:
continue
if inp in inputs and out in outputs:
continue
if not restart and out not in outputs:
for j in range(len(nodes)):
if nodes[j] is None:
continue
if out in nodes[j].input:
nodes[j] = _rename_node_input(nodes[j], out, inp)
rem += 1
if nodes[j].op_type == 'Identity':
restart = True
nodes[i] = None
rem += 1
continue
if not restart and inp not in inputs and inp not in outputs:
for j in range(len(nodes)):
if nodes[j] is None:
continue
if inp in nodes[j].output:
nodes[j] = _rename_node_output(nodes[j], inp, out)
rem += 1
if nodes[j].op_type == 'Identity':
restart = True
if inp in nodes[j].input:
nodes[j] = _rename_node_input(nodes[j], inp, out)
rem += 1
if nodes[j].op_type == 'Identity':
restart = True
nodes[i] = None
rem += 1
if recursive:
for i in range(len(nodes)):
node = nodes[i]
if node is None or not (node.attribute):
continue
nodes[i] = _apply_remove_node_fct_node(
onnx_remove_node_identity,
node, recursive=True, debug_info=debug_info + [node.name])
nodes = list(filter(lambda n: n is not None, nodes))
graph = make_graph(nodes, onnx_model.name,
onnx_model.input, onnx_model.output,
onnx_model.initializer)
graph.value_info.extend(onnx_model.value_info)
return graph
| true | true |
f721b154eb6f80cea86ed321cc3199bcce85024f | 300 | py | Python | 01-code-scripts/example.py | calekochenour/python-formatter-env | 9cc0b484e9b8b8d17a8abe5d2f9f49af953a7790 | [
"BSD-3-Clause"
] | null | null | null | 01-code-scripts/example.py | calekochenour/python-formatter-env | 9cc0b484e9b8b8d17a8abe5d2f9f49af953a7790 | [
"BSD-3-Clause"
] | null | null | null | 01-code-scripts/example.py | calekochenour/python-formatter-env | 9cc0b484e9b8b8d17a8abe5d2f9f49af953a7790 | [
"BSD-3-Clause"
] | null | null | null | def example_function(first_parameter, second_parameter, third_parameter, fourth_parameter, fifth_parameter):
"""Example function to test the code formatter."""
parameter_sum = first_parameter + second_parameter + third_parameter + fourth_parameter + fifth_parameter
return parameter_sum
| 50 | 109 | 0.806667 | def example_function(first_parameter, second_parameter, third_parameter, fourth_parameter, fifth_parameter):
parameter_sum = first_parameter + second_parameter + third_parameter + fourth_parameter + fifth_parameter
return parameter_sum
| true | true |
f721b168bc3ebd2c6a8be74cae0fb14973d58fc0 | 4,618 | py | Python | examples/orcid_app.py | jennur/invenio-oauthclient | 9b8bd7bc8bcbbe178aad3f0f8a2e620749c9980b | [
"MIT"
] | 3 | 2015-08-19T12:50:05.000Z | 2017-10-25T00:58:05.000Z | examples/orcid_app.py | jennur/invenio-oauthclient | 9b8bd7bc8bcbbe178aad3f0f8a2e620749c9980b | [
"MIT"
] | 169 | 2015-08-03T11:25:49.000Z | 2022-02-10T08:06:20.000Z | examples/orcid_app.py | jennur/invenio-oauthclient | 9b8bd7bc8bcbbe178aad3f0f8a2e620749c9980b | [
"MIT"
] | 73 | 2015-08-03T15:16:05.000Z | 2022-03-07T15:34:36.000Z | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
r"""Minimal Flask application example for development with orcid handler.
SPHINX-START
1. Register an orcid application with `Authorization callback URL` as
`http://localhost:5000/oauth/authorized/orcid/`
2. Install oauthclient:
.. code-block:: console
cdvirtualenv src/invenio-oauthclient
pip install -e .[orcid]
3. Grab the *Client ID* and *Client Secret* after registering the application
and add them to your instance configuration as `consumer_key` and
`consumer_secret`.
.. code-block:: console
$ export ORCID_APP_CREDENTIALS_KEY=my_orcid_client_id
$ export ORCID_APP_CREDENTIALS_SECRET=my_orcid_client_secret
4. Create database and tables:
.. code-block:: console
$ pip install -e .[all]
$ cd examples
$ export FLASK_APP=orcid_app.py
$ ./app-setup.sh
You can find the database in `examples/orcid_app.db`.
5. Run the development server:
.. code-block:: console
$ flask -a orcid_app.py run -p 5000 -h '0.0.0.0'
6. Open in a browser the page `http://0.0.0.0:5000/orcid`.
You will be redirected to orcid to authorize the application.
Click on `Authorize application` and you will be redirected back to
`http://0.0.0.0:5000/oauth/authorized/orcid/`, where you will be able to
finalize the local user registration, inserting email address.
Insert e.g. `fuu@bar.it` as email address and send the form.
Now, you will be again in homepage but this time it say: `hello fuu@bar.it`.
You have completed the user registration.
7. To be able to uninstall the example app:
.. code-block:: console
$ ./app-teardown.sh
SPHINX-END
"""
import os
from flask import Flask, redirect, url_for
from flask_babelex import Babel
from flask_login import current_user
from flask_menu import Menu as FlaskMenu
from invenio_accounts import InvenioAccounts
from invenio_accounts.views import blueprint as blueprint_user
from invenio_db import InvenioDB
from invenio_mail import InvenioMail as Mail
from invenio_userprofiles import InvenioUserProfiles
from invenio_userprofiles.views import \
blueprint_api_init as blueprint_userprofile_api_init
from invenio_userprofiles.views import \
blueprint_ui_init as blueprint_userprofile_ui_init
from invenio_oauthclient import InvenioOAuthClient
from invenio_oauthclient.contrib import orcid
from invenio_oauthclient.views.client import blueprint as blueprint_client
from invenio_oauthclient.views.settings import blueprint as blueprint_settings
from invenio_oauthclient._compat import monkey_patch_werkzeug # noqa isort:skip
monkey_patch_werkzeug() # noqa isort:skip
from flask_oauthlib.client import OAuth as FlaskOAuth # noqa isort:skip
# [ Configure application credentials ]
ORCID_APP_CREDENTIALS = dict(
consumer_key=os.environ.get('ORCID_APP_CREDENTIALS_KEY'),
consumer_secret=os.environ.get('ORCID_APP_CREDENTIALS_SECRET'),
)
# Create Flask application
app = Flask(__name__)
app.config.update(
SQLALCHEMY_ECHO=False,
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI', 'sqlite:///orcid_app.db'
),
OAUTHCLIENT_REMOTE_APPS=dict(
orcid=orcid.REMOTE_SANDBOX_APP,
),
ORCID_APP_CREDENTIALS=ORCID_APP_CREDENTIALS,
DEBUG=True,
SECRET_KEY='TEST',
SECURITY_PASSWORD_SALT='security-password-salt',
SECURITY_LOGIN_WITHOUT_CONFIRMATION=False,
USERPROFILES_EXTEND_SECURITY_FORMS=True,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
APP_THEME=['semantic-ui'],
THEME_ICONS={
'semantic-ui': dict(
link='linkify icon'
)
}
)
Babel(app)
FlaskMenu(app)
Mail(app)
InvenioDB(app)
InvenioAccounts(app)
InvenioUserProfiles(app)
FlaskOAuth(app)
InvenioOAuthClient(app)
app.register_blueprint(blueprint_user)
app.register_blueprint(blueprint_client)
app.register_blueprint(blueprint_settings)
app.register_blueprint(blueprint_userprofile_api_init)
app.register_blueprint(blueprint_userprofile_ui_init)
@app.route('/')
def index():
"""Homepage."""
return 'Home page (without any restrictions)'
@app.route('/orcid')
def orcid():
"""Try to print user email or redirect to login with orcid."""
if not current_user.is_authenticated:
return redirect(url_for('invenio_oauthclient.login',
remote_app='orcid'))
return 'hello {}'.format(current_user.email)
| 28.8625 | 80 | 0.750325 |
import os
from flask import Flask, redirect, url_for
from flask_babelex import Babel
from flask_login import current_user
from flask_menu import Menu as FlaskMenu
from invenio_accounts import InvenioAccounts
from invenio_accounts.views import blueprint as blueprint_user
from invenio_db import InvenioDB
from invenio_mail import InvenioMail as Mail
from invenio_userprofiles import InvenioUserProfiles
from invenio_userprofiles.views import \
blueprint_api_init as blueprint_userprofile_api_init
from invenio_userprofiles.views import \
blueprint_ui_init as blueprint_userprofile_ui_init
from invenio_oauthclient import InvenioOAuthClient
from invenio_oauthclient.contrib import orcid
from invenio_oauthclient.views.client import blueprint as blueprint_client
from invenio_oauthclient.views.settings import blueprint as blueprint_settings
from invenio_oauthclient._compat import monkey_patch_werkzeug
monkey_patch_werkzeug()
from flask_oauthlib.client import OAuth as FlaskOAuth
ORCID_APP_CREDENTIALS = dict(
consumer_key=os.environ.get('ORCID_APP_CREDENTIALS_KEY'),
consumer_secret=os.environ.get('ORCID_APP_CREDENTIALS_SECRET'),
)
app = Flask(__name__)
app.config.update(
SQLALCHEMY_ECHO=False,
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI', 'sqlite:///orcid_app.db'
),
OAUTHCLIENT_REMOTE_APPS=dict(
orcid=orcid.REMOTE_SANDBOX_APP,
),
ORCID_APP_CREDENTIALS=ORCID_APP_CREDENTIALS,
DEBUG=True,
SECRET_KEY='TEST',
SECURITY_PASSWORD_SALT='security-password-salt',
SECURITY_LOGIN_WITHOUT_CONFIRMATION=False,
USERPROFILES_EXTEND_SECURITY_FORMS=True,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
APP_THEME=['semantic-ui'],
THEME_ICONS={
'semantic-ui': dict(
link='linkify icon'
)
}
)
Babel(app)
FlaskMenu(app)
Mail(app)
InvenioDB(app)
InvenioAccounts(app)
InvenioUserProfiles(app)
FlaskOAuth(app)
InvenioOAuthClient(app)
app.register_blueprint(blueprint_user)
app.register_blueprint(blueprint_client)
app.register_blueprint(blueprint_settings)
app.register_blueprint(blueprint_userprofile_api_init)
app.register_blueprint(blueprint_userprofile_ui_init)
@app.route('/')
def index():
return 'Home page (without any restrictions)'
@app.route('/orcid')
def orcid():
if not current_user.is_authenticated:
return redirect(url_for('invenio_oauthclient.login',
remote_app='orcid'))
return 'hello {}'.format(current_user.email)
| true | true |
f721b1ff207ea23d5cdd699f29d320911240c621 | 743 | py | Python | notes/migrations/0001_initial.py | chalikavanyaa/stu-do-list | b6af2f1072936240a59f1b63cc7fc32999132da4 | [
"Unlicense"
] | 2 | 2021-12-02T07:15:24.000Z | 2021-12-15T06:27:53.000Z | notes/migrations/0001_initial.py | chalikavanyaa/stu-do-list | b6af2f1072936240a59f1b63cc7fc32999132da4 | [
"Unlicense"
] | 1 | 2021-11-05T12:42:12.000Z | 2021-11-05T12:42:12.000Z | notes/migrations/0001_initial.py | chalikavanyaa/stu-do-list | b6af2f1072936240a59f1b63cc7fc32999132da4 | [
"Unlicense"
] | 6 | 2021-10-30T13:44:16.000Z | 2021-12-29T09:14:18.000Z | # Generated by Django 3.2.7 on 2021-11-04 20:13
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NotesModel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Penulis', models.CharField(max_length=150)),
('Matkul', models.CharField(max_length=150)),
('Topik', models.CharField(max_length=150)),
('Keterangan', models.TextField()),
('Link', models.URLField()),
],
),
]
| 27.518519 | 118 | 0.537012 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NotesModel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Penulis', models.CharField(max_length=150)),
('Matkul', models.CharField(max_length=150)),
('Topik', models.CharField(max_length=150)),
('Keterangan', models.TextField()),
('Link', models.URLField()),
],
),
]
| true | true |
f721b33e8da5aa1935c59645b23cb35201dfccdd | 340 | py | Python | Hackerrank/swap-case.py | sourav1122/Hacktoberfest | 3e3a6e1a537632b1f2b7af3b3b69c8696355047c | [
"MIT"
] | 1 | 2019-10-13T13:43:18.000Z | 2019-10-13T13:43:18.000Z | Hackerrank/swap-case.py | sourav1122/Hacktoberfest | 3e3a6e1a537632b1f2b7af3b3b69c8696355047c | [
"MIT"
] | null | null | null | Hackerrank/swap-case.py | sourav1122/Hacktoberfest | 3e3a6e1a537632b1f2b7af3b3b69c8696355047c | [
"MIT"
] | null | null | null | #!/bin/python3
# Swaps case of all chars in provided string
def swap_case(s):
formattedStr = "".join(map(swapChar, s))
return formattedStr
def swapChar(char):
if char.islower():
return char.upper()
else:
return char.lower()
n=input()
if len(n)==1:
print(swapChar(n))
else:
print(swap_case(n))
| 17.894737 | 44 | 0.623529 |
def swap_case(s):
formattedStr = "".join(map(swapChar, s))
return formattedStr
def swapChar(char):
if char.islower():
return char.upper()
else:
return char.lower()
n=input()
if len(n)==1:
print(swapChar(n))
else:
print(swap_case(n))
| true | true |
f721b3f846fa3924e1f8ff5e8b545d82d1f3e494 | 205 | py | Python | 1072.py | FahimFBA/URI-Problem-Solve | d718a95e5a873dffbce19d850998e8917ec87ebb | [
"Apache-2.0"
] | 3 | 2020-11-25T19:05:31.000Z | 2021-03-29T07:29:36.000Z | 1072.py | FahimFBA/URI-Problem-Solve | d718a95e5a873dffbce19d850998e8917ec87ebb | [
"Apache-2.0"
] | null | null | null | 1072.py | FahimFBA/URI-Problem-Solve | d718a95e5a873dffbce19d850998e8917ec87ebb | [
"Apache-2.0"
] | null | null | null | qte = int(input())
sim = 0
nao = 0
for i in range(qte):
valor = int(input())
if(valor >= 10 and valor <= 20):
sim += 1
else:
nao += 1
print("%d in" %sim)
print("%d out" %nao) | 14.642857 | 36 | 0.487805 | qte = int(input())
sim = 0
nao = 0
for i in range(qte):
valor = int(input())
if(valor >= 10 and valor <= 20):
sim += 1
else:
nao += 1
print("%d in" %sim)
print("%d out" %nao) | true | true |
f721b4abc95f52800b933cdfce1558f764e48a65 | 1,087 | py | Python | utils/fonts_scanner.py | sunnywalden/oss_management | 4d417801ba0c55493788b356921c4e3ea462a851 | [
"Apache-2.0"
] | null | null | null | utils/fonts_scanner.py | sunnywalden/oss_management | 4d417801ba0c55493788b356921c4e3ea462a851 | [
"Apache-2.0"
] | null | null | null | utils/fonts_scanner.py | sunnywalden/oss_management | 4d417801ba0c55493788b356921c4e3ea462a851 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: sunnywalden@gmail.com
import os
from utils.get_logger import Log
def get_fonts_from_local():
log = Log()
logger = log.logger_generate('font_scanner')
# fonts_lists = []
for root, dirs, files in os.walk('../fonts'):
logger.info('File found %s, dirs: %s' % (files, dirs))
for file in files:
logger.info('File found %s' % file)
fonts_file_path = os.path.join(root, file)
if os.path.splitext(file)[1] == '.ttf' or os.path.splitext(file)[1] == '.otf':
# fonts_lists.append(os.path.join(root, file))
logger.info('Fonts file found: %s' % fonts_file_path)
yield fonts_file_path
else:
logger.info('Files which is not a fonts be ignored: %s' % file)
# logger.info('Fonts gonna to be uploaded are: %s' % fonts_lists)
# return fonts_lists
if __name__ == '__main__':
get_fonts_files = get_fonts_from_local()
for fonts_file in iter(get_fonts_files):
print(fonts_file)
| 29.378378 | 90 | 0.601656 |
import os
from utils.get_logger import Log
def get_fonts_from_local():
log = Log()
logger = log.logger_generate('font_scanner')
for root, dirs, files in os.walk('../fonts'):
logger.info('File found %s, dirs: %s' % (files, dirs))
for file in files:
logger.info('File found %s' % file)
fonts_file_path = os.path.join(root, file)
if os.path.splitext(file)[1] == '.ttf' or os.path.splitext(file)[1] == '.otf':
logger.info('Fonts file found: %s' % fonts_file_path)
yield fonts_file_path
else:
logger.info('Files which is not a fonts be ignored: %s' % file)
if __name__ == '__main__':
get_fonts_files = get_fonts_from_local()
for fonts_file in iter(get_fonts_files):
print(fonts_file)
| true | true |
f721b4f5eb357708bf5747da4008cd53e3881f89 | 1,359 | py | Python | pyblas/level1/scnrm2.py | timleslie/pyblas | 9109f2cc24e674cf59a3b39f95c2d7b8116ae884 | [
"BSD-3-Clause"
] | null | null | null | pyblas/level1/scnrm2.py | timleslie/pyblas | 9109f2cc24e674cf59a3b39f95c2d7b8116ae884 | [
"BSD-3-Clause"
] | 1 | 2020-10-10T23:23:06.000Z | 2020-10-10T23:23:06.000Z | pyblas/level1/scnrm2.py | timleslie/pyblas | 9109f2cc24e674cf59a3b39f95c2d7b8116ae884 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from ..util import slice_
def scnrm2(N, X, INCX):
"""Computes the Euclidean norm of the vector x
Parameters
----------
N : int
Number of elements in input vector
X : numpy.ndarray
A single precision complex array, dimension (1 + (`N` - 1)*abs(`INCX`))
INCX : int
Storage spacing between elements of `X`
Returns
-------
numpy.single
See Also
--------
snrm2 : Single-precision real euclidean norm
dnrm2 : Double-precision real euclidean norm
dznrm2 : Double-precision complex euclidean norm
Notes
-----
Online PyBLAS documentation: https://nbviewer.jupyter.org/github/timleslie/pyblas/blob/main/docs/scnrm2.ipynb
Reference BLAS documentation: https://github.com/Reference-LAPACK/lapack/blob/v3.9.0/BLAS/SRC/scnrm2.f
Examples
--------
>>> x = np.array([1+2j, 2+3j, 3+4j], dtype=np.complex64)
>>> N = len(x)
>>> incx = 1
>>> print(scnrm2(N, x, incx)
6.5574384
"""
if N <= 0:
return 0
# Note: This implementaiton suffers from potential overflow errors for large vector values.
# More sophisticated implementations can avoid this with appropriate scaling applied before
# taking the square of large values.
return np.sqrt((X[slice_(N, INCX)].conj() * X[slice_(N, INCX)]).sum().real)
| 29.543478 | 113 | 0.636497 | import numpy as np
from ..util import slice_
def scnrm2(N, X, INCX):
if N <= 0:
return 0
return np.sqrt((X[slice_(N, INCX)].conj() * X[slice_(N, INCX)]).sum().real)
| true | true |
f721b541788468f6224ba9b4f3e9d2a8b01d2637 | 3,999 | py | Python | tests/manage/monitoring/prometheus/test_deployment_status.py | shivamdurgbuns/ocs-ci | 0fa3a19cab39dcc76843338e4af357c197c08843 | [
"MIT"
] | null | null | null | tests/manage/monitoring/prometheus/test_deployment_status.py | shivamdurgbuns/ocs-ci | 0fa3a19cab39dcc76843338e4af357c197c08843 | [
"MIT"
] | null | null | null | tests/manage/monitoring/prometheus/test_deployment_status.py | shivamdurgbuns/ocs-ci | 0fa3a19cab39dcc76843338e4af357c197c08843 | [
"MIT"
] | null | null | null | import logging
import pytest
from ocs_ci.framework.testlib import tier4, tier4a
from ocs_ci.ocs import constants
from ocs_ci.utility import prometheus
from ocs_ci.ocs.ocp import OCP
log = logging.getLogger(__name__)
@tier4
@tier4a
@pytest.mark.polarion_id("OCS-1052")
def test_ceph_manager_stopped(measure_stop_ceph_mgr):
"""
Test that there is appropriate alert when ceph manager
is unavailable and that this alert is cleared when the manager
is back online.
"""
api = prometheus.PrometheusAPI()
# get alerts from time when manager deployment was scaled down
alerts = measure_stop_ceph_mgr.get("prometheus_alerts")
target_label = constants.ALERT_MGRISABSENT
target_msg = "Storage metrics collector service not available anymore."
states = ["pending", "firing"]
prometheus.check_alert_list(
label=target_label,
msg=target_msg,
alerts=alerts,
states=states,
severity="critical",
)
api.check_alert_cleared(
label=target_label, measure_end_time=measure_stop_ceph_mgr.get("stop")
)
@tier4
@tier4a
@pytest.mark.polarion_id("OCS-904")
def test_ceph_monitor_stopped(measure_stop_ceph_mon):
"""
Test that there is appropriate alert related to ceph monitor quorum
when there is even number of ceph monitors and that this alert
is cleared when monitors are back online.
"""
api = prometheus.PrometheusAPI()
# get alerts from time when manager deployment was scaled down
alerts = measure_stop_ceph_mon.get("prometheus_alerts")
for target_label, target_msg, target_states, target_severity in [
(
constants.ALERT_MONQUORUMATRISK,
"Storage quorum at risk",
["pending"],
"error",
),
(
constants.ALERT_CLUSTERWARNINGSTATE,
"Storage cluster is in degraded state",
["pending"],
"warning",
),
]:
prometheus.check_alert_list(
label=target_label,
msg=target_msg,
alerts=alerts,
states=target_states,
severity=target_severity,
)
api.check_alert_cleared(
label=target_label, measure_end_time=measure_stop_ceph_mon.get("stop")
)
@tier4
@tier4a
@pytest.mark.polarion_id("OCS-900")
def test_ceph_osd_stopped(measure_stop_ceph_osd):
"""
Test that there is appropriate alert related to situation when ceph osd
is down. Alert is cleared when osd disk is back online.
"""
api = prometheus.PrometheusAPI()
# get alerts from time when manager deployment was scaled down
alerts = measure_stop_ceph_osd.get("prometheus_alerts")
for target_label, target_msg, target_states, target_severity, ignore in [
(
constants.ALERT_OSDDISKNOTRESPONDING,
"Disk not responding",
["pending", "firing"],
"error",
False,
),
(
constants.ALERT_DATARECOVERYTAKINGTOOLONG,
"Data recovery is slow",
["pending"],
"warning",
True,
),
(
constants.ALERT_CLUSTERWARNINGSTATE,
"Storage cluster is in degraded state",
["pending", "firing"],
"warning",
False,
),
]:
prometheus.check_alert_list(
label=target_label,
msg=target_msg,
alerts=alerts,
states=target_states,
severity=target_severity,
ignore_more_occurences=ignore,
)
# the time to wait is increased because it takes more time for osd pod
# to be ready than for other pods
osd_up_wait = 360
api.check_alert_cleared(
label=target_label,
measure_end_time=measure_stop_ceph_osd.get("stop"),
time_min=osd_up_wait,
)
def teardown_module():
ocs_obj = OCP()
ocs_obj.login_as_sa()
| 29.189781 | 82 | 0.632908 | import logging
import pytest
from ocs_ci.framework.testlib import tier4, tier4a
from ocs_ci.ocs import constants
from ocs_ci.utility import prometheus
from ocs_ci.ocs.ocp import OCP
log = logging.getLogger(__name__)
@tier4
@tier4a
@pytest.mark.polarion_id("OCS-1052")
def test_ceph_manager_stopped(measure_stop_ceph_mgr):
api = prometheus.PrometheusAPI()
alerts = measure_stop_ceph_mgr.get("prometheus_alerts")
target_label = constants.ALERT_MGRISABSENT
target_msg = "Storage metrics collector service not available anymore."
states = ["pending", "firing"]
prometheus.check_alert_list(
label=target_label,
msg=target_msg,
alerts=alerts,
states=states,
severity="critical",
)
api.check_alert_cleared(
label=target_label, measure_end_time=measure_stop_ceph_mgr.get("stop")
)
@tier4
@tier4a
@pytest.mark.polarion_id("OCS-904")
def test_ceph_monitor_stopped(measure_stop_ceph_mon):
api = prometheus.PrometheusAPI()
alerts = measure_stop_ceph_mon.get("prometheus_alerts")
for target_label, target_msg, target_states, target_severity in [
(
constants.ALERT_MONQUORUMATRISK,
"Storage quorum at risk",
["pending"],
"error",
),
(
constants.ALERT_CLUSTERWARNINGSTATE,
"Storage cluster is in degraded state",
["pending"],
"warning",
),
]:
prometheus.check_alert_list(
label=target_label,
msg=target_msg,
alerts=alerts,
states=target_states,
severity=target_severity,
)
api.check_alert_cleared(
label=target_label, measure_end_time=measure_stop_ceph_mon.get("stop")
)
@tier4
@tier4a
@pytest.mark.polarion_id("OCS-900")
def test_ceph_osd_stopped(measure_stop_ceph_osd):
api = prometheus.PrometheusAPI()
alerts = measure_stop_ceph_osd.get("prometheus_alerts")
for target_label, target_msg, target_states, target_severity, ignore in [
(
constants.ALERT_OSDDISKNOTRESPONDING,
"Disk not responding",
["pending", "firing"],
"error",
False,
),
(
constants.ALERT_DATARECOVERYTAKINGTOOLONG,
"Data recovery is slow",
["pending"],
"warning",
True,
),
(
constants.ALERT_CLUSTERWARNINGSTATE,
"Storage cluster is in degraded state",
["pending", "firing"],
"warning",
False,
),
]:
prometheus.check_alert_list(
label=target_label,
msg=target_msg,
alerts=alerts,
states=target_states,
severity=target_severity,
ignore_more_occurences=ignore,
)
osd_up_wait = 360
api.check_alert_cleared(
label=target_label,
measure_end_time=measure_stop_ceph_osd.get("stop"),
time_min=osd_up_wait,
)
def teardown_module():
ocs_obj = OCP()
ocs_obj.login_as_sa()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.