blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
19fda0d42f4945fbaab04e4e5af22b7374bb0c3d
|
10e03b0f3569bd4b7f81ac3ec5c4ff992db1047c
|
/tests/helpers.py
|
8c78e10293a00fc6e99e5b477296e47e19846b28
|
[
"BSD-2-Clause"
] |
permissive
|
miracle2k/flask-assets
|
b751e0879871d77cc7b8ec81ca441cf639a6eede
|
8433cc30fd4dff046a95dd02e26a3e901595be21
|
refs/heads/master
| 2023-08-16T15:30:24.992898
| 2020-02-29T00:06:32
| 2020-02-29T00:06:32
| 821,291
| 325
| 75
|
BSD-2-Clause
| 2023-07-24T17:22:55
| 2010-08-06T11:51:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
helpers.py
|
from flask.app import Flask
from webassets.test import TempEnvironmentHelper as BaseTempEnvironmentHelper
from flask_assets import Environment
try:
from flask import Blueprint
Module = None
except ImportError:
# Blueprints only available starting with 0.7,
# fall back to old Modules otherwise.
Blueprint = None
from flask import Module
__all__ = ('TempEnvironmentHelper', 'Module', 'Blueprint')
class TempEnvironmentHelper(BaseTempEnvironmentHelper):
def _create_environment(self, **kwargs):
if not hasattr(self, 'app'):
self.app = Flask(__name__, static_folder=self.tempdir, **kwargs)
self.env = Environment(self.app)
return self.env
try:
from test.test_support import check_warnings
except ImportError:
# Python < 2.6
import contextlib
@contextlib.contextmanager
def check_warnings(*filters, **kwargs):
# We cannot reasonably support this, we'd have to copy to much code.
# (or write our own). Since this is only testing warnings output,
# we might slide by ignoring it.
yield
|
a5df78e2b48da4579205e7645e1d7f1bd94d9aa9
|
ce1c91c33d9b612e97361527e5a974996208c90d
|
/glue/viewers/profile/tests/test_state.py
|
c574558dbee91a2d0a87ef134b2d74e205f364b3
|
[
"BSD-3-Clause"
] |
permissive
|
glue-viz/glue
|
5f52faaf91e1ca4822d3983b6a4b9b60e8807f38
|
1a5c7676c025a1a025068b806f6f90ed53bba543
|
refs/heads/main
| 2023-09-04T09:24:00.519833
| 2023-08-17T09:40:04
| 2023-08-17T09:40:04
| 1,768,238
| 609
| 149
|
NOASSERTION
| 2023-09-13T20:56:14
| 2011-05-18T20:58:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,861
|
py
|
test_state.py
|
from glue.core.data_collection import DataCollection
import numpy as np
from numpy.testing import assert_allclose
from glue.core import Data, Coordinates
from glue.core.tests.test_state import clone
from ..state import ProfileViewerState, ProfileLayerState
class SimpleCoordinates(Coordinates):
def __init__(self):
super().__init__(pixel_n_dim=3, world_n_dim=3)
def pixel_to_world_values(self, *args):
return tuple([2.0 * p for p in args])
def world_to_pixel_values(self, *args):
return tuple([0.5 * w for w in args])
@property
def axis_correlation_matrix(self):
matrix = np.zeros((self.world_n_dim, self.pixel_n_dim), dtype=bool)
matrix[2, 2] = True
matrix[0:2, 0:2] = True
return matrix
class TestProfileViewerState:
def setup_method(self, method):
self.data = Data(label='d1')
self.data.coords = SimpleCoordinates()
self.data['x'] = np.arange(24).reshape((3, 4, 2)).astype(float)
self.data_collection = DataCollection([self.data])
self.viewer_state = ProfileViewerState()
self.layer_state = ProfileLayerState(viewer_state=self.viewer_state,
layer=self.data)
self.viewer_state.layers.append(self.layer_state)
self.viewer_state.function = 'mean'
def test_basic(self):
x, y = self.layer_state.profile
assert_allclose(x, [0, 2, 4])
assert_allclose(y, [3.5, 11.5, 19.5])
def test_basic_world(self):
self.viewer_state.x_att = self.data.world_component_ids[0]
x, y = self.layer_state.profile
assert_allclose(x, [0, 2, 4])
assert_allclose(y, [3.5, 11.5, 19.5])
def test_x_att(self):
self.viewer_state.x_att = self.data.pixel_component_ids[0]
x, y = self.layer_state.profile
assert_allclose(x, [0, 1, 2])
assert_allclose(y, [3.5, 11.5, 19.5])
self.viewer_state.x_att = self.data.pixel_component_ids[1]
x, y = self.layer_state.profile
assert_allclose(x, [0, 1, 2, 3])
assert_allclose(y, [8.5, 10.5, 12.5, 14.5])
self.viewer_state.x_att = self.data.pixel_component_ids[2]
x, y = self.layer_state.profile
assert_allclose(x, [0, 1])
assert_allclose(y, [11, 12])
def test_function(self):
self.viewer_state.function = 'mean'
x, y = self.layer_state.profile
assert_allclose(y, [3.5, 11.5, 19.5])
self.viewer_state.function = 'minimum'
x, y = self.layer_state.profile
assert_allclose(y, [0, 8, 16])
self.viewer_state.function = 'maximum'
x, y = self.layer_state.profile
assert_allclose(y, [7, 15, 23])
self.viewer_state.function = 'sum'
x, y = self.layer_state.profile
assert_allclose(y, [28, 92, 156])
self.viewer_state.function = 'median'
x, y = self.layer_state.profile
assert_allclose(y, [3.5, 11.5, 19.5])
def test_subset(self):
subset = self.data.new_subset()
subset.subset_state = self.data.id['x'] > 10
self.layer_state.layer = subset
x, y = self.layer_state.profile
assert_allclose(x, [0, 2, 4])
assert_allclose(y, [np.nan, 13., 19.5])
subset.subset_state = self.data.id['x'] > 100
x, y = self.layer_state.profile
assert len(x) == 0
assert len(y) == 0
def test_clone(self):
self.viewer_state.x_att = self.data.pixel_component_ids[1]
self.viewer_state.function = 'median'
self.layer_state.attribute = self.data.id['x']
self.layer_state.linewidth = 3
viewer_state_new = clone(self.viewer_state)
assert viewer_state_new.x_att.label == 'Pixel Axis 1 [y]'
assert viewer_state_new.function == 'median'
assert self.layer_state.attribute.label == 'x'
assert self.layer_state.linewidth == 3
def test_limits(self):
self.viewer_state.x_att = self.data.pixel_component_ids[0]
assert self.viewer_state.x_min == -0.5
assert self.viewer_state.x_max == 2.5
self.viewer_state.flip_x()
assert self.viewer_state.x_min == 2.5
assert self.viewer_state.x_max == -0.5
self.viewer_state.x_min = 1
self.viewer_state.x_max = 1.5
assert self.viewer_state.x_min == 1
assert self.viewer_state.x_max == 1.5
self.viewer_state.reset_limits()
assert self.viewer_state.x_min == -0.5
assert self.viewer_state.x_max == 2.5
def test_visible(self):
self.layer_state.visible = False
assert self.layer_state.profile is None
self.layer_state.visible = True
x, y = self.layer_state.profile
assert_allclose(x, [0, 2, 4])
assert_allclose(y, [3.5, 11.5, 19.5])
|
d97a8ee678659d44cb302eb526d0555b01abe955
|
847ebadf2b0e7c01ad33ce92b42528a1a5c4846c
|
/tests/test-jsonrpc.py
|
1df5afa221f8084cc88b1d9822fe4cfe9f4e927e
|
[
"BSD-3-Clause",
"ISC",
"Apache-2.0",
"SISSL",
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
openvswitch/ovs
|
6f782527cf5fde4ccfd25e68d359b91ff41acf8a
|
bc79a7bf033fa4cda8ccfc5481db3cfccd72650c
|
refs/heads/master
| 2023-09-04T06:31:47.899017
| 2023-08-03T16:19:12
| 2023-09-01T20:15:05
| 18,383,364
| 3,366
| 2,259
|
Apache-2.0
| 2023-08-17T13:17:13
| 2014-04-02T22:15:28
|
C
|
UTF-8
|
Python
| false
| false
| 6,952
|
py
|
test-jsonrpc.py
|
# Copyright (c) 2009, 2010, 2011 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import errno
import os
import sys
import ovs.daemon
import ovs.json
import ovs.jsonrpc
import ovs.poller
import ovs.stream
def handle_rpc(rpc, msg):
done = False
reply = None
if msg.type == ovs.jsonrpc.Message.T_REQUEST:
if msg.method == "echo":
reply = ovs.jsonrpc.Message.create_reply(msg.params, msg.id)
else:
reply = ovs.jsonrpc.Message.create_error(
{"error": "unknown method"}, msg.id)
sys.stderr.write("unknown request %s" % msg.method)
elif msg.type == ovs.jsonrpc.Message.T_NOTIFY:
if msg.method == "shutdown":
done = True
else:
rpc.error(errno.ENOTTY)
sys.stderr.write("unknown notification %s" % msg.method)
else:
rpc.error(errno.EPROTO)
sys.stderr.write("unsolicited JSON-RPC reply or error\n")
if reply:
rpc.send(reply)
return done
def do_listen(name):
if sys.platform != 'win32' or (
ovs.daemon._detach and ovs.daemon._detached):
# On Windows the child is a new process created which should be the
# one that creates the PassiveStream. Without this check, the new
# child process will create a new PassiveStream overwriting the one
# that the parent process created.
error, pstream = ovs.stream.PassiveStream.open(name)
if error:
sys.stderr.write("could not listen on \"%s\": %s\n"
% (name, os.strerror(error)))
sys.exit(1)
ovs.daemon.daemonize()
rpcs = []
done = False
while True:
# Accept new connections.
error, stream = pstream.accept()
if stream:
rpcs.append(ovs.jsonrpc.Connection(stream))
elif error != errno.EAGAIN:
sys.stderr.write("PassiveStream.accept() failed\n")
sys.exit(1)
# Service existing connections.
dead_rpcs = []
for rpc in rpcs:
rpc.run()
error = 0
if not rpc.get_backlog():
error, msg = rpc.recv()
if not error:
if handle_rpc(rpc, msg):
done = True
error = rpc.get_status()
if error:
rpc.close()
dead_rpcs.append(rpc)
rpcs = [rpc for rpc in rpcs if rpc not in dead_rpcs]
if done and not rpcs:
break
poller = ovs.poller.Poller()
pstream.wait(poller)
for rpc in rpcs:
rpc.wait(poller)
if not rpc.get_backlog():
rpc.recv_wait(poller)
poller.block()
pstream.close()
def do_request(name, method, params_string):
params = ovs.json.from_string(params_string)
msg = ovs.jsonrpc.Message.create_request(method, params)
s = msg.is_valid()
if s:
sys.stderr.write("not a valid JSON-RPC request: %s\n" % s)
sys.exit(1)
error, stream = ovs.stream.Stream.open_block(ovs.stream.Stream.open(name))
if error:
sys.stderr.write("could not open \"%s\": %s\n"
% (name, os.strerror(error)))
sys.exit(1)
rpc = ovs.jsonrpc.Connection(stream)
error = rpc.send(msg)
if error:
sys.stderr.write("could not send request: %s\n" % os.strerror(error))
sys.exit(1)
error, msg = rpc.recv_block()
if error:
sys.stderr.write("error waiting for reply: %s\n" % os.strerror(error))
sys.exit(1)
print(ovs.json.to_string(msg.to_json()))
rpc.close()
def do_notify(name, method, params_string):
params = ovs.json.from_string(params_string)
msg = ovs.jsonrpc.Message.create_notify(method, params)
s = msg.is_valid()
if s:
sys.stderr.write("not a valid JSON-RPC notification: %s\n" % s)
sys.exit(1)
error, stream = ovs.stream.Stream.open_block(ovs.stream.Stream.open(name))
if error:
sys.stderr.write("could not open \"%s\": %s\n"
% (name, os.strerror(error)))
sys.exit(1)
rpc = ovs.jsonrpc.Connection(stream)
error = rpc.send_block(msg)
if error:
sys.stderr.write("could not send notification: %s\n"
% os.strerror(error))
sys.exit(1)
rpc.close()
def main(argv):
parser = argparse.ArgumentParser(
description="JSON-RPC test utility for Python.",
formatter_class=argparse.RawDescriptionHelpFormatter)
commands = {"listen": (do_listen, 1),
"request": (do_request, 3),
"notify": (do_notify, 3),
"help": (parser.print_help, (0,))}
group_description = """\
listen LOCAL listen for connections on LOCAL
request REMOTE METHOD PARAMS send request, print reply
notify REMOTE METHOD PARAMS send notification and exit
""" + ovs.stream.usage("JSON-RPC")
group = parser.add_argument_group(title="Commands",
description=group_description)
group.add_argument('command', metavar="COMMAND", nargs=1,
choices=commands, help="Command to use.")
group.add_argument('command_args', metavar="ARG", nargs='*',
help="Arguments to COMMAND.")
ovs.daemon.add_args(parser)
args = parser.parse_args()
ovs.daemon.handle_args(args)
command_name = args.command[0]
args = args.command_args
if command_name not in commands:
sys.stderr.write("%s: unknown command \"%s\" "
"(use --help for help)\n" % (argv[0], command_name))
sys.exit(1)
func, n_args = commands[command_name]
if type(n_args) == tuple:
if len(args) < n_args[0]:
sys.stderr.write("%s: \"%s\" requires at least %d arguments but "
"only %d provided\n"
% (argv[0], command_name, n_args, len(args)))
sys.exit(1)
elif type(n_args) == int:
if len(args) != n_args:
sys.stderr.write("%s: \"%s\" requires %d arguments but %d "
"provided\n"
% (argv[0], command_name, n_args, len(args)))
sys.exit(1)
else:
assert False
func(*args)
if __name__ == '__main__':
main(sys.argv)
|
ecf5c815a65e99265e8bdf4e2fb622cef15cf152
|
dacdebab897f9287f37a2e85c5705a926ddd36aa
|
/tests/conftest.py
|
e43b87ba860cbd792ca401143fde446e9fcd3699
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
snakemake/snakemake
|
5d4528193d87786d7b372ca7653ece302ff46965
|
27b224ed12448df8aebc7d1ff8f25e3bf7622232
|
refs/heads/main
| 2023-09-02T08:37:04.323976
| 2023-08-11T10:02:34
| 2023-08-11T10:02:34
| 212,840,200
| 1,941
| 536
|
MIT
| 2023-09-11T09:51:44
| 2019-10-04T14:58:11
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
conftest.py
|
import os
import sys
import pytest
from pathlib import Path
from snakemake.common import ON_WINDOWS
from snakemake.utils import find_bash_on_windows
from snakemake.shell import shell
skip_on_windows = pytest.mark.skipif(ON_WINDOWS, reason="Unix stuff")
only_on_windows = pytest.mark.skipif(not ON_WINDOWS, reason="Windows stuff")
needs_strace = pytest.mark.xfail(
os.system("strace -o /dev/null true") != 0, reason="Missing strace"
)
@pytest.fixture(autouse=True)
def reset_paths_between_tests():
"""Ensure that changes to sys.path are reset between tests"""
org_path = sys.path.copy()
yield
sys.path = org_path
bash_cmd = find_bash_on_windows()
if ON_WINDOWS and bash_cmd:
@pytest.fixture(autouse=True)
def prepend_usable_bash_to_path(monkeypatch):
monkeypatch.setenv("PATH", os.path.dirname(bash_cmd), prepend=os.pathsep)
@pytest.fixture(autouse=True)
def reset_shell_exec_on_windows(prepend_usable_bash_to_path):
shell.executable(None)
|
e1292da60d1993aa207b4b63b3098f504d99da91
|
78297bc868d588dd7a16cfea059ef7365ba18622
|
/scripts/irods/test/test_imeta_admin_mode.py
|
4623d617c77a183306d2ee372a1e08075a5491dd
|
[
"BSD-3-Clause"
] |
permissive
|
irods/irods
|
ab72a41fdf05a4a905c3e3a97bb7ba3c2a6ae52d
|
f3ccaa842218e477395ebcf553639134433b63ee
|
refs/heads/main
| 2023-09-01T20:12:33.322002
| 2023-08-23T18:22:59
| 2023-08-31T13:41:31
| 14,724,975
| 381
| 167
|
NOASSERTION
| 2023-09-11T18:18:14
| 2013-11-26T18:10:18
|
C++
|
UTF-8
|
Python
| false
| false
| 11,887
|
py
|
test_imeta_admin_mode.py
|
import os
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from . import session
from .. import test
rodsusers = [('alice', 'apass')]
rodsadmins = [('otherrods', 'rods')]
class Test_Imeta_Admin_Mode(session.make_sessions_mixin(rodsadmins, rodsusers), unittest.TestCase):
# The tests in this file are focused on collections and data objects because they
# are the only entities that permissions apply to. Admin mode ONLY applies to collections
# and data objects.
def setUp(self):
super(Test_Imeta_Admin_Mode, self).setUp()
self.admin = self.admin_sessions[0]
self.user = self.user_sessions[0]
def tearDown(self):
super(Test_Imeta_Admin_Mode, self).tearDown()
def test_subcommands_add_adda_set_mod_and_rm__issue_6124(self):
attr_name = 'issue_6124_attr'
attr_value = 'issue_6124_value'
test_cases = [
{
'name': os.path.join(self.user.session_collection, 'issue_6124_data_object'),
'type': '-d'
},
{
'name': os.path.join(self.user.session_collection, 'issue_6124_collection'),
'type': '-C'
}
]
for tc in test_cases:
object_name = tc['name']
object_type_option = tc['type']
for op in ['add', 'adda', 'set']:
# rError messages do not return to the client after redirect to provider
if test.settings.TOPOLOGY_FROM_RESOURCE_SERVER and op == 'adda':
continue
try:
# Create a data object or collection based on the object type.
self.user.assert_icommand(['itouch' if object_type_option == '-d' else 'imkdir', object_name])
if op == 'adda':
# "adda" automatically enables admin mode, so the administrator is always allowed
# to add metadata.
self.admin.assert_icommand(['imeta', op, object_type_option, object_name, attr_name, attr_value],
'STDERR', ['"adda" is deprecated. Please use "add" with admin mode enabled instead.'])
else:
# Show that without the admin option, the administrator cannot add/set metadata on
# another user's collection or data object.
self.admin.assert_icommand(['imeta', op, object_type_option, object_name, attr_name, attr_value],
'STDERR', ['CAT_NO_ACCESS_PERMISSION'])
# Show that using the admin option allows the administrator to add metadata to
# collections and data objects they do not have permissions on.
self.admin.assert_icommand(['imeta', '-M', op, object_type_option, object_name, attr_name, attr_value])
# As the owner of the collection or data object, verify that the metadata is attached
# to the target object.
self.user.assert_icommand(['imeta', 'ls', object_type_option, object_name],
'STDOUT', ['attribute: ' + attr_name, 'value: ' + attr_value])
# Show that without the admin option, the administrator cannot modify metadata on
# another user's collection or data object.
new_attr_value = attr_value + '_updated'
self.admin.assert_icommand(['imeta', 'mod', object_type_option, object_name, attr_name, attr_value, 'v:' + new_attr_value],
'STDERR', ['CAT_NO_ACCESS_PERMISSION'])
# Show that using the admin option allows the administrator to modify metadata on
# collections and data objects they do not have permissions on.
self.admin.assert_icommand(['imeta', '-M', 'mod', object_type_option, object_name, attr_name, attr_value, 'v:' + new_attr_value])
# Show that the administrator can also remove metadata attached to other user's data.
self.admin.assert_icommand(['imeta', '-M', 'rm', object_type_option, object_name, attr_name, new_attr_value])
# As the owner of the collection or data object, verify that the metadata is no longer
# attached to the target object.
self.user.assert_icommand(['imeta', 'ls', object_type_option, object_name], 'STDOUT', ['None\n'])
finally:
self.user.run_icommand(['irm', '-rf', object_name])
def test_subcommands_addw_and_rmw__issue_6124(self):
# Create data objects at the root of the current collection and
# inside of the child collection.
data_objects = [
os.path.join(self.user.session_collection, 'foo1'),
os.path.join(self.user.session_collection, 'foo2')
]
for data_object in data_objects:
self.user.assert_icommand(['itouch', data_object])
attr_name = 'issue_6124_attr'
attr_value = 'issue_6124_value'
# Show that without the admin option, the administrator cannot attach metadata to
# another user's data objects they do not have permissions on.
self.admin.assert_icommand(['imeta', 'addw', '-d', self.user.session_collection + '/%', attr_name, attr_value],
'STDERR', ['CAT_NO_ACCESS_PERMISSION'])
# Show that using the admin option allows the administrator to attach metadata to
# data objects they do not have permissions on.
self.admin.assert_icommand(['imeta', '-M', 'addw', '-d', self.user.session_collection + '/%', attr_name, attr_value],
'STDOUT', ['AVU added to 2 data-objects'])
for data_object in data_objects:
# Show that without the admin option, the administrator cannot detach metadata from
# another user's data objects they do not have permissions on.
self.admin.assert_icommand(['imeta', 'rmw', '-d', data_object, '%', '%'], 'STDERR', ['CAT_NO_ACCESS_PERMISSION'])
# Show that using the admin option allows the administrator to detach metadata from
# another user's data objects they do not have permissions on.
self.admin.assert_icommand(['imeta', '-M', 'rmw', '-d', data_object, '%', '%'])
# As the owner of the data object, verify that the metadata is no longer attached to it.
self.user.assert_icommand(['imeta', 'ls', '-d', data_object], 'STDOUT', ['None\n'])
def test_subcommand_rmi__issue_6124(self):
attr_name = 'issue_6124_attr'
attr_value = 'issue_6124_value'
# True : indicates that the object is a data object.
# False: indicates that the object is a collection.
test_cases = [True, False]
for is_data_object in test_cases:
object_path = os.path.join(self.user.session_collection, 'foo')
try:
object_type_option = self.create_object(object_path, is_data_object)
# Attach metadata to the newly created data object.
self.user.assert_icommand(['imeta', 'add', object_type_option, object_path, attr_name, attr_value])
# Verify that the metadata is attached to the data object.
self.user.assert_icommand(['imeta', 'ls', object_type_option, object_path], 'STDOUT', ['attribute: ' + attr_name, 'value: ' + attr_value])
# Get the ID of the metadata attribute name.
# This will be used to detach the metadata from the data object.
if is_data_object: gql = "select META_DATA_ATTR_ID where COLL_NAME = '{0}' and DATA_NAME = 'foo'".format(self.user.session_collection)
else: gql = "select META_COLL_ATTR_ID where COLL_NAME = '{0}/foo'".format(self.user.session_collection)
_, out, _ = self.user.assert_icommand(['iquest', '%s', gql], 'STDOUT', ['\n'])
attribute_id = out.strip()
# Show that without the admin option, the administrator is not allowed to detach metadata
# from an object they don't have permissions on.
self.admin.assert_icommand(['imeta', 'rmi', object_type_option, object_path, attribute_id], 'STDERR', ['CAT_NO_ACCESS_PERMISSION'])
# Show that with the admin option, the administrator is allowed to detach the metadata from
# the data object.
self.admin.assert_icommand(['imeta', '-M', 'rmi', object_type_option, object_path, attribute_id])
finally:
self.user.run_icommand(['irm', '-rf', object_path])
def test_subcommand_cp__issue_6124(self):
src_object = os.path.join(self.user.session_collection, 'src_object')
dst_object = os.path.join(self.user.session_collection, 'dst_object')
attr_name = 'issue_6124_attr'
attr_value = 'issue_6124_value'
# True : indicates that the object is a data object.
# False: indicates that the object is a collection.
#
# The following table produces the following tests:
# - data object to data object
# - data object to collection
# - collection to collection
# - collection to data object
test_cases = [
{'src': True, 'dst': True},
{'src': True, 'dst': False},
{'src': False, 'dst': False},
{'src': False, 'dst': True}
]
for tc in test_cases:
try:
# Create the source and destination objects and capture the object type.
# The newly created objects are owned by "self.user".
src_object_type_flag = self.create_object(src_object, tc['src'])
dst_object_type_flag = self.create_object(dst_object, tc['dst'])
# Attach metadata to the source object.
self.user.assert_icommand(['imeta', 'set', src_object_type_flag, src_object, attr_name, attr_value])
self.user.assert_icommand(['imeta', 'ls', src_object_type_flag, src_object], 'STDOUT', ['attribute: ' + attr_name, 'value: ' + attr_value])
# Copy from source object's metadata to the destination object.
#
# First, show that an administrator is not allowed to copy metadata if they do not have
# the appropriate permissions.
#
# Second, show that by using the admin option, an administrator is allowed to copy metadata
# from one object to another, even if they don't have permissions set on the objects.
self.admin.assert_icommand(['imeta', 'cp', src_object_type_flag, dst_object_type_flag, src_object, dst_object], 'STDERR', ['CAT_NO_ACCESS_PERMISSION'])
self.admin.assert_icommand(['imeta', '-M', 'cp', src_object_type_flag, dst_object_type_flag, src_object, dst_object])
# Verify that the metadata was copied from the source object to the destination object.
self.user.assert_icommand(['imeta', 'ls', dst_object_type_flag, dst_object], 'STDOUT', ['attribute: ' + attr_name, 'value: ' + attr_value])
finally:
self.user.run_icommand(['irm', '-rf', src_object, dst_object])
def create_object(self, _object_name, _is_data_object):
if _is_data_object:
self.user.assert_icommand(['itouch', _object_name])
return '-d'
self.user.assert_icommand(['imkdir', _object_name])
return '-C'
|
9d537273d6a3de8858a61d08a74e449ebcda14b6
|
aae782b6f6037570ceca0925adcaf75d8876b995
|
/tests/backend/topology/test_von_neumann.py
|
7331189bec22bfa839016048c99b90340e49f704
|
[
"MIT"
] |
permissive
|
ljvmiranda921/pyswarms
|
af308358cc2a45524d1ab4e74dfc6205ac67e8f8
|
70c969d929bb2dab6211765def0431680fc5cb01
|
refs/heads/master
| 2023-06-09T08:19:50.000532
| 2023-06-06T09:46:40
| 2023-06-06T09:46:40
| 97,002,861
| 1,194
| 401
|
MIT
| 2023-06-06T09:41:53
| 2017-07-12T12:04:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,495
|
py
|
test_von_neumann.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Import modules
import numpy as np
import pytest
# Import from pyswarms
from pyswarms.backend.topology import VonNeumann
from .abc_test_topology import ABCTestTopology
np.random.seed(4135157)
class TestVonNeumannTopology(ABCTestTopology):
@pytest.fixture
def topology(self):
return VonNeumann
@pytest.fixture
def options(self):
return {"p": 1, "r": 1}
@pytest.mark.parametrize("r", [0, 1])
@pytest.mark.parametrize("p", [1, 2])
def test_update_gbest_neighborhood(self, swarm, topology, p, r):
"""Test if update_gbest_neighborhood gives the expected return values"""
topo = topology()
pos, cost = topo.compute_gbest(swarm, p=p, r=r)
expected_pos = np.array(
[9.90438476e-01, 2.50379538e-03, 1.87405987e-05]
)
expected_pos_2 = np.array(
[9.98033031e-01, 4.97392619e-03, 3.07726256e-03]
)
expected_cost = 1.0002528364353296
assert cost == pytest.approx(expected_cost)
assert (pos[np.argmin(cost)] == pytest.approx(expected_pos)) or (
pos[np.argmin(cost)] == pytest.approx(expected_pos_2)
)
@pytest.mark.parametrize("m", [i for i in range(3)])
@pytest.mark.parametrize("n", [i for i in range(3)])
def test_delannoy_numbers(self, m, n):
expected_values = np.array([1, 3, 5, 7, 9, 11, 13, 15, 17])
assert VonNeumann.delannoy(m, n) in expected_values
|
8b71e6175fce031f2bc632113081d19da4b52f46
|
8f48cf56bbb19560c8f65a81e0ce42e2a9fc27a6
|
/tools/schemacode/bidsschematools/conftest.py
|
3154e3e5cfc71cc1c83ba957300ecdf5db8fefbb
|
[
"LicenseRef-scancode-public-domain",
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
bids-standard/bids-specification
|
7b48af9353a805aa34e90bb669b0a7564fa377b4
|
e1f853873036e2079d39a4fcb1357a04c8afbb80
|
refs/heads/master
| 2023-08-23T09:18:46.250752
| 2023-08-22T05:45:00
| 2023-08-22T05:45:00
| 150,465,237
| 242
| 164
|
CC-BY-4.0
| 2023-09-13T14:09:30
| 2018-09-26T17:35:34
|
Python
|
UTF-8
|
Python
| false
| false
| 3,493
|
py
|
conftest.py
|
import logging
import tempfile
from subprocess import run
try:
from importlib.resources import as_file, files
except ImportError: # PY<3.9
from importlib_resources import as_file, files
import pytest
lgr = logging.getLogger()
# This selects a subset of the bids-examples collection to run the test suite on.
# Generally it's best to avoid adding large datasets to this list, but ideally a
# good proportion modalities would be represented, as well as datasets exemplifying
# tricky edge-cases, such as directory pseudo-files.
BIDS_SELECTION = [
"asl003", # anat, perf, _asl, _T1w
"eeg_cbm", # eeg
"hcp_example_bids", # anat, fmap
"micr_SEMzarr", # micr, SEM, OME-ZARR
"micr_SPIM", # micr, SPIM, .ome.tif
"pet003", # pet, anat
"qmri_tb1tfl", # fmap, _TB1TFL
"qmri_vfa", # derivatives
"ds000248", # .bidsignore
]
# Errors are described in the README of the respective datasets:
# https://github.com/bids-standard/bids-error-examples
BIDS_ERROR_SELECTION = [
"invalid_asl003",
"invalid_pet001",
]
def get_gitrepo_fixture(url, whitelist):
@pytest.fixture(scope="session")
def fixture():
archive_name = url.rsplit("/", 1)[-1]
testdata_dir = files("bidsschematools.tests.data") / archive_name
if testdata_dir.is_dir():
lgr.info(
f"Found static testdata archive under `{testdata_dir}`. "
"Not downloading latest data from version control."
)
with as_file(testdata_dir) as path:
yield path
else:
lgr.info(
"No static testdata available under `%s`. "
"Attempting to fetch live data from version control.",
testdata_dir,
)
with tempfile.TemporaryDirectory() as path:
lgr.debug("Cloning %r into %r", url, path)
runout = run(
[
"git",
"clone",
"--depth=1",
"--filter=blob:none",
"--sparse",
url,
path,
],
capture_output=True,
)
if runout.returncode:
raise RuntimeError(f"Failed to clone {url} into {path}")
# cwd specification is VERY important, not only to achieve the correct
# effects, but also to avoid dropping files from your repository if you
# were to run `git sparse-checkout` inside the software repo.
_ = run(["git", "sparse-checkout", "init", "--cone"], cwd=path)
_ = run(["git", "sparse-checkout", "set"] + whitelist, cwd=path)
yield path
return fixture
@pytest.fixture(scope="session")
def schema_dir():
"""Path to the schema housed in the bids-specification repo."""
from bidsschematools import utils
bids_schema = utils.get_bundled_schema_path()
return bids_schema
@pytest.fixture(scope="session")
def schema_obj():
"""Schema object."""
from bidsschematools import schema
return schema.load_schema()
bids_examples = get_gitrepo_fixture(
"https://github.com/bids-standard/bids-examples",
whitelist=BIDS_SELECTION,
)
bids_error_examples = get_gitrepo_fixture(
"https://github.com/bids-standard/bids-error-examples",
whitelist=BIDS_ERROR_SELECTION,
)
|
c5dc5adb3cefeab1e016ab38c12fa0e025f77eea
|
db40aac8ebc24d6f2ec2c09605549a4b09e9358b
|
/CountingGridsPy/EngineToBrowseCloudPipeline/browseCloudArtifactGenerator.py
|
1b7077effbc1c16c867ac56b7f176cc6429c38c5
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
microsoft/browsecloud
|
ec9373545222a98d6b56ea54af380d8ced72e097
|
434f0d7e071306659cc9d6066149d3f1b0bdcb32
|
refs/heads/master
| 2023-09-04T19:09:47.832558
| 2021-01-07T00:19:54
| 2021-01-07T00:19:54
| 191,622,782
| 163
| 20
|
MIT
| 2023-06-02T17:40:42
| 2019-06-12T18:15:36
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 16,035
|
py
|
browseCloudArtifactGenerator.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import scipy.io as io
import pandas as pd
import matplotlib.pyplot as plt
from CountingGridsPy.EngineToBrowseCloudPipeline import MorphologicalTightener
class BrowseCloudArtifactGenerator(object):
def __init__(self, DIRECTORY_DATA):
self.DIRECTORY_DATA = DIRECTORY_DATA
def read(self, LEARNED_GRID_FILE_NAME):
MAT = io.loadmat(self.DIRECTORY_DATA + LEARNED_GRID_FILE_NAME)
self.pi2_idf = MAT['pi2_idf']
self.counts_to_show = MAT['counts_to_show'] # COUNTS MATRIX
if type(self.counts_to_show) is not np.ndarray:
try:
self.counts_to_show = self.counts_to_show.toarray()
except Exception as e:
raise ValueError(e)
# MAPPING AFTER LAYERS CODE Q( Location | Document ) TxE
self.ql2 = MAT['ql2']
# ARRAY WITH THE LAYER NUMBER FOR EACH DOCUMENT, argmax over the layers
self.id_layer = MAT['id_layer'][0]
# LAYERED PI WEIGHTED BY IDF. E1xE2xZxLA
self.pi_la_idf = MAT['pi_la_idf']
try:
# We don't need it for now. Simply 1:T
self.indices_to_show = MAT['indices_to_show'][0]
except Exception as e:
# Not implemented in matlab version
self.indices_to_show = np.array(range(MAT['ql2'].shape[0])) + 1
del MAT
cgsz = np.zeros(2)
cgsz[0], cgsz[1], self.Z = self.pi2_idf.shape
self.cgsz = cgsz.astype(int)
self.indexR = np.arange(0, self.cgsz[0]).astype(int)
self.indexC = np.arange(0, self.cgsz[1]).astype(int)
def write_top_pi(self):
MAXZ = 80
self.pi2_idf = MorphologicalTightener.tighten_pi(self.pi2_idf)
pi_max = np.argsort(-self.pi2_idf, axis=2)[:, :, :MAXZ]
pi_max_vals = -np.sort(-self.pi2_idf, axis=2)[:, :, :MAXZ]
missing_words = set(range(self.Z)).difference(set(pi_max.flatten()))
locations_missing_words = np.zeros([len(missing_words), 4])
for m_id, m in enumerate(missing_words):
loc = np.unravel_index(
np.argmax(self.pi2_idf[:, :, m]), self.cgsz.astype('int'))
locations_missing_words[m_id, :] = [
int(m), self.pi2_idf[loc[0], loc[1], m], loc[0], loc[1]]
with open(self.DIRECTORY_DATA + '/top_pi.txt', 'w') as the_file:
for r in self.indexR:
for c in self.indexC:
tmp = "row:" + ("%1d" % (r+1)) + "\t" + "col:" + ("%1d" % (c+1)) + "\t" + "\t".join(
["%1d" % a + ":" + "%1.3f" % b for a, b in zip(pi_max[r, c, :], pi_max_vals[r, c, :])])
if any((locations_missing_words[:, 2] == r) & (locations_missing_words[:, 3] == c)):
tmp = tmp + "\t" + "\t".join([("%1d" % a) + ":" + ("%1.3f" % b) for a, b in locations_missing_words[(
locations_missing_words[:, 2] == r) & (locations_missing_words[:, 3] == c), :2]])
the_file.write(tmp + "\n")
def write_top_pi_layers(self):
no_layers = self.pi_la_idf.shape[3]
with open(self.DIRECTORY_DATA + '/top_pi_layers.txt', 'w') as the_file:
for layer in range(no_layers):
MAXZ = 80
self.pi_la_idf[:, :, :, layer] = MorphologicalTightener.tighten_pi(
self.pi_la_idf[:, :, :, layer])
pi_max = np.argsort(-self.pi_la_idf[:,
:, :, layer], axis=2)[:, :, :MAXZ]
pi_max_vals = - \
np.sort(-self.pi_la_idf[:, :, :,
layer], axis=2)[:, :, :MAXZ]
missing_words = set(range(self.Z)).difference(
set(pi_max.flatten()))
locations_missing_words = np.zeros([len(missing_words), 4])
for m_id, m in enumerate(missing_words):
loc = np.unravel_index(
np.argmax(self.pi_la_idf[:, :, m, layer]), self.cgsz.astype(int))
locations_missing_words[m_id, :] = [
int(m), self.pi_la_idf[loc[0], loc[1], m, layer], loc[0], loc[1]]
for r in self.indexR:
for c in self.indexC:
tmp = "layer:" + ("%1d" % (layer+1)) + "\t" + "row:" + ("%1d" % (r+1)) + "\t" + "col:" + ("%1d" % (c+1)) + "\t" + "\t".join(
["%1d" % a + ":" + "%1.3f" % b for a, b in zip(pi_max[r, c, :], pi_max_vals[r, c, :])])
if any((locations_missing_words[:, 2] == r) & (locations_missing_words[:, 3] == c)):
tmp = tmp + "\t" + "\t".join([("%1d" % a) + ":" + ("%1.3f" % b) for a, b in locations_missing_words[(
locations_missing_words[:, 2] == r) & (locations_missing_words[:, 3] == c), :2]])
the_file.write(tmp + "\n")
def write_database(self, df, keep):
dfSave = df.copy()
dfSave = dfSave[keep]
dfSave.reset_index(drop=True, inplace=True)
dfSave["id"] = np.arange(len(dfSave)) + 1
dfSave["layer"] = self.id_layer
def format_full_row(row):
row_property_strings = []
for column_name in set(dfSave.columns):
row_property_strings.append(
column_name + ":" + str(row[column_name]))
return str.join('\t', row_property_strings) + '\n'
databaselist = dfSave.apply(format_full_row, axis=1).tolist()
with open(self.DIRECTORY_DATA + '/database.txt', 'w', encoding="utf-8") as the_file:
the_file.writelines(databaselist)
def add_feature_map_to_database(self, feature_map):
file_text = ""
with open(self.DIRECTORY_DATA + '/database.txt', "r") as f:
for index, line in enumerate(f):
file_text += line.strip() + '\tfeature:' + \
str(feature_map[index]) + "\n"
with open(self.DIRECTORY_DATA + '/database.txt', "w") as f:
f.writelines(file_text)
def write_keep(self, keep):
with open(self.DIRECTORY_DATA + '/keep.txt', 'w') as the_file:
the_file.writelines("\n".join([str(int(x)) for x in keep]))
def read_docmap(self, fileName, engine="numpy"):
if not (engine == "numpy" or engine == "matlab"):
raise ValueError("The {} engine does not exist.".format(engine))
self.ql2 = np.zeros(self.ql2.shape)
with open(self.DIRECTORY_DATA + fileName) as f:
for line in f:
arr = line.split("\t")
e1Index = int(arr[0].replace("row:", ""))-1
e2Index = int(arr[1].replace("col:", ""))-1
# since there are layers, we have previously pick the max probability over the bold-ith layer
i = 2
while i < len(arr):
docId, qVal, layer = arr[i].split(":")
docId = int(docId)
qVal = float(qVal)
layer = int(layer)
t = docId - 1
if engine == "matlab":
self.ql2[e1Index, e2Index, t] = qVal
elif engine == "numpy":
self.ql2[t, e1Index, e2Index] = qVal
i += 1
def write_docmap(self, wd_size, engine="numpy"):
docToGridMapping = np.copy(self.ql2)
if engine == "matlab":
pass
elif engine == "numpy":
docToGridMapping = np.moveaxis(docToGridMapping, 0, -1)
else:
raise ValueError("The {} engine does not exist.".format(engine))
thr = 0.01
mask = np.zeros(self.cgsz)
mask[:wd_size, :wd_size] = 1
qlSmooth = np.real(np.fft.ifft2(np.fft.fft2(docToGridMapping, axes=(
0, 1)) * np.fft.fft2(np.expand_dims(mask, 2), axes=(0, 1)), axes=(0, 1)))
tmp = list()
with open(self.DIRECTORY_DATA + '/docmap.txt', 'w') as f:
for r in self.indexR:
for c in self.indexC:
ids = np.where(qlSmooth[r, c, :] > thr)[0]
vals = qlSmooth[r, c, ids]
lay = self.id_layer[ids]
tmp.append("row:" + ("%1d" % (r+1)) + "\tcol:" + ("%1d" % (c+1)) + "\t" + "\t".join(
[str(theid + 1)+":"+str(val)+":"+str(l) for theid, val, l in zip(ids, vals, lay)]) + "\n")
f.writelines(tmp)
def write_correspondences(self, correspondences, vocabulary):
'''
Correspondences maps the lemmatized words to the original text.
Example correspondences:
{'adopt': ',adopted,adopted,adopted,adopted', 'work': ',work,work,work,work', 'i': ',i,i,i,i,i,i,i,i
', 'wish': ',wish,wish,wish,wish'}
'''
li = list()
with open(self.DIRECTORY_DATA + '/correspondences.txt', 'w') as the_file:
for k, v in correspondences.items():
unique_values = list(set([w for w in v.split(",") if w != '']))
N = len(unique_values)
li = li + list(zip(unique_values, [k]*N))
tmp = list()
for w1, w2 in li:
try:
i = vocabulary.index(w2)
tmp.append(w1 + "\t" + w2 + "\t" + str(i+1) + "\n")
except Exception as e:
pass
the_file.writelines(tmp)
def write_cooccurences(self):
raise Exception(
"The coccurrences function should not be called because it's not guaranteed to be a correct artifact for BrowseCloud.")
def write_counts(self):
tmp = list()
with open(self.DIRECTORY_DATA + '/words.txt', 'w') as the_file:
for z in range(self.counts_to_show.shape[0]):
docIds = np.where(self.counts_to_show[z, :] != 0)[0]
vals = np.array(self.counts_to_show[z, docIds]).flatten()
tmp.append("id:"+str(z+1) + "\t" + "\t".join(
[str(i + 1) + ":" + "%1d" % v for i, v in zip(docIds, vals)]) + "\n")
the_file.writelines(tmp)
def write_vocabulary(self, vocabulary):
with open(self.DIRECTORY_DATA + '/vocabulary.txt', 'w') as the_file:
the_file.writelines(
[str(id + 1) + "\t" + str(word) + "\n" for id, word in enumerate(vocabulary)])
def write_legends(self, colorTuples=None, labelTuples=None):
if (colorTuples is not None and labelTuples is not None):
with open(self.DIRECTORY_DATA + '/legend.txt', 'w') as the_file:
for ct, lt in zip(colorTuples, labelTuples):
r1, g1, b1 = ct[0]
rm, gm, bm = ct[1]
r2, g2, b2 = ct[2]
l1, l2 = lt
data = [l1, r1, g1, b1, rm, gm, bm, l2, r2, g2, b2]
data = [str(x) for x in data]
the_file.write("\t".join(data)+"\n")
# 0. make sure ql is a distribution over the indices again -
# chose not to do this because the final map used for visualization will be screwed up
# docToGridMapping/np.sum(np.sum(docToGridMapping,axis=0),axis=0)
# 1. Calculate the weighted average between ql and the featuremapping for each index - weights are ql
# 2. Do 0,1 normalization of the and multiply by 255 to map the range [0,1] to the range [0,255]
# 3. Use this new range to map to the RGB colorscale
def mapSentiment(self, docToGridMapping, feature_map, W=[5, 5], doNormalizeQOverGrid=True, stretch_the_truth=False):
normalizedDocToGridMapping = None
if doNormalizeQOverGrid:
normalizedDocToGridMapping = docToGridMapping / \
np.sum(docToGridMapping, axis=(0, 1))
else:
normalizedDocToGridMapping = np.copy(docToGridMapping)
e0, e1, T = docToGridMapping.shape
# toroidal- top, left, and top left
Q = np.pad(normalizedDocToGridMapping, [
(W[0]-1, 0), (W[1]-1, 0), (0, 0)], 'wrap').cumsum(axis=0).cumsum(axis=1)
# sum area table trick
normalizedDocToGridMapping = Q[(
W[0]-1):, (W[1]-1):, :] - Q[(W[0]-1):, :e1, :] - Q[:e0, (W[1]-1):, :] + Q[:e0, :e1, :]
normalizedDocToGridMapping = np.moveaxis(np.moveaxis(
normalizedDocToGridMapping, -1, 0) / np.sum(normalizedDocToGridMapping, axis=-1), 0, -1)
sentimentMapping = np.dot(normalizedDocToGridMapping, feature_map)
weights = None
if stretch_the_truth:
weights = 255*(sentimentMapping - np.min(sentimentMapping.flatten())) / (np.max(
sentimentMapping.flatten()) - np.min(sentimentMapping.flatten())) # weights between 0 and 256
else:
weights = 255*(sentimentMapping)
return (sentimentMapping, weights)
def write_colors(self, colors=None, feature_map=None, engine="numpy", cm=None, stretch_the_truth=False):
def valid_color_comp(c):
return 0.0 < c and c < 1
if colors is not None:
for color in colors:
if len(color) != 3 or not valid_color_comp(color[0]) or not valid_color_comp(color[1]) or not valid_color_comp(color[2]):
raise Exception(
"Invalid RGB color for BrowseCloud input. Must be between 0 and 1 and only 3 dimensions are given.")
elif feature_map is not None:
colors = [0 for d in range(len(self.indexR)*len(self.indexC))]
docToGridMapping = np.copy(self.ql2)
if engine == "matlab":
pass
elif engine == "numpy":
# move the first axis to the third
docToGridMapping = np.moveaxis(docToGridMapping, 0, -1)
else:
raise ValueError(
"The {} engine does not exist.".format(engine))
W = None
if self.W is None:
W = [5, 5]
else:
W = self.W.copy()
sentimentMapping, weights = self.mapSentiment(
docToGridMapping, feature_map, W, stretch_the_truth=stretch_the_truth)
if cm is None:
cm = plt.get_cmap('PuRd')
colors = [(c[0], c[1], c[2])
for c in cm([int(np.round(w)) for w in weights.flatten()])]
else:
colors = [(1.0, 1.0, 1.0)
for d in range(len(self.indexR)*len(self.indexC))]
with open(self.DIRECTORY_DATA + '/colors_browser.txt', 'w') as the_file:
for r in self.indexR:
for c in self.indexC:
i = len(self.indexR)*r + c
tmp = ("%1d" % (r+1)) + "\t" + ("%1d" % (c+1)) + "\t" + str(
(colors[i][0])) + "\t"+str((colors[i][1])) + "\t" + str((colors[i][2]))
the_file.write(tmp + "\n")
return colors
if __name__ == "__main__":
bcag = BrowseCloudArtifactGenerator("")
# 3 documents each with sentiment[0,.5,1] from negative to positive
# 9x9 grid
doc1Q = [
[0.25, 0.25, 0],
[0.25, 0.25, 0],
[0, 0, 0]
]
doc2Q = [
[0, 0.25, 0.25],
[0, 0.25, 0.25],
[0, 0, 0]
]
doc3Q = [
[0, 0, 0],
[0, 0.25, 0.25],
[0, 0.25, 0.25]
]
q = np.array([doc1Q, doc2Q, doc3Q])
feature_map = np.array([0, 0.5, 1])
W = [1, 1]
qMatlab = np.moveaxis(q, 0, -1)
Q = np.pad(q, [(0, 0), (W[1]-1, 0), (W[0]-1, 0)],
'wrap').cumsum(1).cumsum(2)
normalizedDocToGridMapping = Q[:, (W[0]-1):, (W[1]-1):] - \
Q[:, (W[0]-1):, :3] - Q[:, :3, (W[1]-1):] + Q[:, :3, :3]
print(normalizedDocToGridMapping)
result = bcag.mapSentiment(qMatlab, feature_map)[0]
print('DONE')
print(result)
|
83dad2e11cc4c94614bdff547c2beef2e1b4d848
|
bbfc9f05efefe29b6ce9832bb3506efb900c1c93
|
/tencentcloud/teo/v20220106/teo_client.py
|
59533f575ee5f236b19089ef5c427beb4eabf200
|
[
"Apache-2.0"
] |
permissive
|
TencentCloud/tencentcloud-sdk-python
|
a2fab235926b0a27e9cfdf55e085a8bb15b3f506
|
6baf00a5a56ba58b6a1123423e0a1422d17a0201
|
refs/heads/master
| 2023-09-04T10:52:28.060438
| 2023-09-01T03:09:16
| 2023-09-01T03:09:16
| 130,147,399
| 594
| 300
|
Apache-2.0
| 2023-09-06T07:03:24
| 2018-04-19T02:23:12
|
Python
|
UTF-8
|
Python
| false
| false
| 5,399
|
py
|
teo_client.py
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.teo.v20220106 import models
class TeoClient(AbstractClient):
_apiVersion = '2022-01-06'
_endpoint = 'teo.tencentcloudapi.com'
_service = 'teo'
def CreatePrefetchTask(self, request):
"""创建预热任务
:param request: Request instance for CreatePrefetchTask.
:type request: :class:`tencentcloud.teo.v20220106.models.CreatePrefetchTaskRequest`
:rtype: :class:`tencentcloud.teo.v20220106.models.CreatePrefetchTaskResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("CreatePrefetchTask", params, headers=headers)
response = json.loads(body)
model = models.CreatePrefetchTaskResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def CreatePurgeTask(self, request):
"""创建清除缓存任务
:param request: Request instance for CreatePurgeTask.
:type request: :class:`tencentcloud.teo.v20220106.models.CreatePurgeTaskRequest`
:rtype: :class:`tencentcloud.teo.v20220106.models.CreatePurgeTaskResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("CreatePurgeTask", params, headers=headers)
response = json.loads(body)
model = models.CreatePurgeTaskResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def DescribePrefetchTasks(self, request):
"""查询预热任务状态
:param request: Request instance for DescribePrefetchTasks.
:type request: :class:`tencentcloud.teo.v20220106.models.DescribePrefetchTasksRequest`
:rtype: :class:`tencentcloud.teo.v20220106.models.DescribePrefetchTasksResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribePrefetchTasks", params, headers=headers)
response = json.loads(body)
model = models.DescribePrefetchTasksResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def DescribePurgeTasks(self, request):
"""查询清除缓存历史记录
:param request: Request instance for DescribePurgeTasks.
:type request: :class:`tencentcloud.teo.v20220106.models.DescribePurgeTasksRequest`
:rtype: :class:`tencentcloud.teo.v20220106.models.DescribePurgeTasksResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribePurgeTasks", params, headers=headers)
response = json.loads(body)
model = models.DescribePurgeTasksResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def DescribeZones(self, request):
"""用户查询用户站点信息列表,支持分页
:param request: Request instance for DescribeZones.
:type request: :class:`tencentcloud.teo.v20220106.models.DescribeZonesRequest`
:rtype: :class:`tencentcloud.teo.v20220106.models.DescribeZonesResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribeZones", params, headers=headers)
response = json.loads(body)
model = models.DescribeZonesResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
1202b1d46516077679322edea23b0c06ee4620fb
|
797c8b85fc5adc6d7d197a904ff0c7c73e6840f9
|
/trackmac/config.py
|
cc278210203a8fdeb697df6fd434374f45316e40
|
[
"MIT"
] |
permissive
|
MacLeek/trackmac
|
f8f349b3593964bf4c254a64588434907750a4ad
|
0d99ab5604305d6a572b31b5e5cb33b87fd85d40
|
refs/heads/master
| 2020-12-04T13:50:51.951150
| 2016-10-05T08:52:40
| 2016-10-05T08:52:40
| 67,489,431
| 121
| 15
| null | 2019-10-21T16:40:38
| 2016-09-06T08:32:39
|
Python
|
UTF-8
|
Python
| false
| false
| 704
|
py
|
config.py
|
# -*- coding: utf-8 -*-
import os
VERSION = '0.0.6'
TRACK_SCRIPT = 'tm'
TRACK_DAEMON = 'trackmac_service'
TRACK_DIR = os.path.expanduser('~/Library/Application Support/com.github.macleek.trackmac/')
TRACK_DB_FILE = TRACK_DIR + 'track.db'
TRACK_LOG_FILE = TRACK_DIR + 'track.log'
TRACK_PLIST_NAME = 'com.github.macleek.trackmac.plist'
USER_LAUNCHAGENTS_DIR = os.path.expanduser('~/Library/LaunchAgents')
BROWSERS = {
'Google Chrome': {
'bundle_id': 'com.google.Chrome',
'tab': 'activeTab',
'title': 'title',
'url': 'URL'
},
'Safari': {
'bundle_id': 'com.apple.Safari',
'tab': 'currentTab',
'title': 'name',
'url': 'URL'
}
}
|
55ad9064741383fe222e6ebec21dbed0eb161d9f
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/A_Primer_on_Scientific_Programming_with_Python/plot/mpl_pylab_examples.py
|
33793944da0db16ead73e2b7687f15317213ed77
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,656
|
py
|
mpl_pylab_examples.py
|
from matplotlib.pylab import *
raw_input=lambda x: None
def f(t):
return t**2*exp(-t**2)
t = linspace(0, 3, 51) # 51 points between 0 and 3
y = zeros(len(t)) # allocate y with float elements
for i in xrange(len(t)):
y[i] = f(t[i])
plot(t, y)
show()
raw_input('Press the Return key to quit: ')
# vectorized version:
y = f(t) # compute all f values at once
# or
y = t**2*exp(-t**2)
plot(t, y)
savefig('tmp1.eps') # produce PostScript
savefig('tmp1.png') # produce PNG
raw_input('Press the Return key to quit: ')
show()
plot(t, y)
legend(['t^2*exp(-t^2)'])
xlabel('t')
ylabel('y')
axis([0, 3, -0.05, 0.6]) # [tmin, tmax, ymin, ymax]
title('My First Matplotlib Demo')
savefig('tmp2.eps') # produce PostScript
show()
raw_input('Press the Return key to quit: ')
# Multiple curves
def f1(t):
return t**2*exp(-t**2)
def f2(t):
return t**2*f1(t)
t = linspace(0, 3, 51)
y1 = f1(t)
y2 = f2(t)
plot(t, y1, 'r-')
hold('on')
#hold(True)
plot(t, y2, 'bo')
xlabel('t')
ylabel('y')
legend(['t^2*exp(-t^2)', 't^4*exp(-t^2)'])
title('Plotting two curves in the same plot')
savefig('tmp3.eps')
show()
# multiple plots
figure()
subplot(2, 1, 1)
t = linspace(0, 3, 51)
y1 = f1(t)
y2 = f2(t)
plot(t, y1, 'r-', t, y2, 'bo')
xlabel('t')
ylabel('y')
axis([t[0], t[-1], min(y2)-0.05, max(y2)+0.5])
legend(['t^2*exp(-t^2)', 't^4*exp(-t^2)'])
title('Top figure')
subplot(2, 1, 2)
t3 = t[::4]
y3 = f2(t3)
plot(t, y1, 'b-', t3, y3, 'ys')
legend(['t^2*exp(-t^2)', 't^4*exp(-t^2)'])
xlabel('t')
ylabel('y')
axis([0, 4, -0.2, 0.6])
title('Bottom figure')
savefig('tmp4.eps')
show()
raw_input('Press the Return key to quit: ')
|
b36fd780df6f60b9b8e399382dc4dbd4393125c6
|
fdb47aa5092baa4f5ec86b3819961c77731a33e1
|
/darts/ad/scorers/nll_poisson_scorer.py
|
df5ee411b84649b56b0b5e08efb1465b91fb6548
|
[
"Apache-2.0"
] |
permissive
|
unit8co/darts
|
6177e273950208b859c9208677a6f2632fc2aa2d
|
52ac1814a7a21b107d2391598dfc3a4a5bd33ca7
|
refs/heads/master
| 2023-09-01T04:12:19.632394
| 2023-08-31T12:58:58
| 2023-08-31T12:58:58
| 148,657,183
| 6,234
| 759
|
Apache-2.0
| 2023-09-14T10:18:50
| 2018-09-13T15:17:28
|
Python
|
UTF-8
|
Python
| false
| false
| 809
|
py
|
nll_poisson_scorer.py
|
"""
NLL Poisson Scorer
------------------
Poisson distribution negative log-likelihood Scorer.
The anomaly score is the negative log likelihood of the actual series values
under a Poisson distribution estimated from the stochastic prediction.
"""
import numpy as np
from scipy.stats import poisson
from darts.ad.scorers.scorers import NLLScorer
class PoissonNLLScorer(NLLScorer):
def __init__(self, window: int = 1) -> None:
super().__init__(window=window)
def __str__(self):
return "PoissonNLLScorer"
def _score_core_nllikelihood(
self,
deterministic_values: np.ndarray,
probabilistic_estimations: np.ndarray,
) -> np.ndarray:
mu = np.mean(probabilistic_estimations, axis=1)
return -poisson.logpmf(deterministic_values, mu=mu)
|
662784f50b5732d1f9c82fcc5e94bad6bc3c8d74
|
8ed15d43652dbcab332c78923da416b91b139323
|
/python/fedml/core/distributed/crypto/crypto_api.py
|
e7283aca5772acfe7a2839dccd6fb56aacd2f729
|
[
"Apache-2.0"
] |
permissive
|
FedML-AI/FedML
|
74d144038c9de4a0621eb328d00987abac35e2d1
|
b436fbd95cbb62f6c58d2233d7affa0f62cb1817
|
refs/heads/master
| 2023-08-31T22:15:39.786371
| 2023-08-24T03:41:58
| 2023-08-24T03:41:58
| 281,519,510
| 3,197
| 807
|
Apache-2.0
| 2023-09-14T02:14:20
| 2020-07-21T22:41:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,167
|
py
|
crypto_api.py
|
import hashlib
from base64 import a85decode, a85encode
from ecies.utils import aes_decrypt, aes_encrypt
from nacl.public import Box, PrivateKey, PublicKey
def export_public_key(private_key_hex: str) -> bytes:
"""Export public key for contract join request.
Args:
private_key: hex string representing private key
Returns:
32 bytes representing public key
"""
def _hex_to_bytes(hex: str) -> bytes:
return bytes.fromhex(hex[2:] if hex[:2] == "0x" else hex)
return bytes(PrivateKey(_hex_to_bytes(private_key_hex)).public_key)
def encrypt_nacl(public_key: bytes, data: bytes) -> bytes:
"""Encryption function using NaCl box compatible with MetaMask
For implementation used in MetaMask look into: https://github.com/MetaMask/eth-sig-util
Args:
public_key: public key of recipient
data: message data
Returns:
encrypted data
"""
emph_key = PrivateKey.generate()
enc_box = Box(emph_key, PublicKey(public_key))
# Encryption is required to work with MetaMask decryption (requires utf8)
data = a85encode(data)
ciphertext = enc_box.encrypt(data)
return bytes(emph_key.public_key) + ciphertext
def decrypt_nacl(private_key: bytes, data: bytes) -> bytes:
"""Decryption function using NaCl box compatible with MetaMask
For implementation used in MetaMask look into: https://github.com/MetaMask/eth-sig-util
Args:
private_key: private key to decrypt with
data: encrypted message data
Returns:
decrypted data
"""
emph_key, ciphertext = data[:32], data[32:]
box = Box(PrivateKey(private_key), PublicKey(emph_key))
return a85decode(box.decrypt(ciphertext))
def get_current_secret(secret: bytes, entry_key_turn: int, key_turn: int) -> bytes:
"""Calculate shared secret at current state."""
for _ in range(entry_key_turn, key_turn):
secret = hashlib.sha256(secret).digest()
return secret
def encrypt(key: bytes, plain_text: bytes) -> bytes:
return aes_encrypt(key, plain_text)
def decrypt(key: bytes, cipher_text: bytes) -> bytes:
return aes_decrypt(key, cipher_text)
|
440a9147d254e79d448c1ed46569749dffaa10eb
|
85ccd32aa73eecf274a937f1fc3b6f4d484b77da
|
/test cases/common/97 find program path/program.py
|
2ebc5641932b103432e4ef7a0e6213fb740bf2fb
|
[
"Apache-2.0"
] |
permissive
|
mesonbuild/meson
|
48321cf4235dfcc0194fed90ff43a57367592bf7
|
cf5adf0c646474f0259d123fad60ca5ed38ec891
|
refs/heads/master
| 2023-09-01T05:58:50.807952
| 2023-03-17T20:27:37
| 2023-08-31T11:52:41
| 19,784,232
| 5,122
| 1,848
|
Apache-2.0
| 2023-09-14T15:47:23
| 2014-05-14T15:08:16
|
Python
|
UTF-8
|
Python
| false
| false
| 39
|
py
|
program.py
|
#!/usr/bin/env python3
print("Found")
|
c3f0f1abda4896c0403fc227a7b82c987040634b
|
bd40507df0632aab5ead2a06a4fe6f0459bda236
|
/data_lineage/server.py
|
8040f15694416cad38be8d564c8728a2fd5d540b
|
[
"MIT"
] |
permissive
|
tokern/data-lineage
|
ed8a50f0fb9a82ce628e0c4351ecca020ab60103
|
5945542742979fe350d313d906440c93ee3d0f36
|
refs/heads/master
| 2023-09-01T11:24:44.698349
| 2023-08-04T07:24:15
| 2023-08-04T07:24:15
| 247,882,621
| 274
| 41
|
MIT
| 2023-03-23T18:36:35
| 2020-03-17T04:55:39
|
Python
|
UTF-8
|
Python
| false
| false
| 13,810
|
py
|
server.py
|
import datetime
import logging
from typing import Any, Dict, List, Tuple
import flask_restless
import gunicorn.app.base
from dbcat import Catalog, PGCatalog, init_db
from dbcat.catalog import CatColumn
from dbcat.catalog.models import (
CatSchema,
CatSource,
CatTable,
ColumnLineage,
DefaultSchema,
Job,
JobExecution,
JobExecutionStatus,
)
from flask import Flask
from flask_restful import Api, Resource, reqparse
from pglast.parser import ParseError
from rq import Queue
from rq import job as RqJob
from werkzeug.exceptions import NotFound, UnprocessableEntity
from data_lineage import ColumnNotFound, SemanticError, TableNotFound
from data_lineage.parser import (
analyze_dml_query,
extract_lineage,
parse,
parse_dml_query,
)
from data_lineage.worker import scan
class TableNotFoundHTTP(NotFound):
"""Table not found in catalog"""
code = 441
class ColumnNotFoundHTTP(NotFound):
"""Column not found in catalog"""
code = 442
class ParseErrorHTTP(UnprocessableEntity):
"""Parser Error"""
class SemanticErrorHTTP(UnprocessableEntity):
"""Semantic Error"""
code = 443
class Kedro(Resource):
def __init__(self, catalog: Catalog):
self._catalog = catalog
self._parser = reqparse.RequestParser()
self._parser.add_argument(
"job_ids", action="append", help="List of job ids for a sub graph"
)
def get(self):
nodes = []
edges = []
args = self._parser.parse_args()
with self._catalog.managed_session:
column_edges = self._catalog.get_column_lineages(args["job_ids"])
for edge in column_edges:
nodes.append(self._column_info(edge.source))
nodes.append(self._column_info(edge.target))
nodes.append(self._job_info(edge.job_execution.job))
edges.append(
{
"source": "column:{}".format(edge.source_id),
"target": "task:{}".format(edge.job_execution.job_id),
}
)
edges.append(
{
"source": "task:{}".format(edge.job_execution.job_id),
"target": "column:{}".format(edge.target_id),
}
)
return {"nodes": nodes, "edges": edges}
@staticmethod
def _column_info(node: CatColumn):
return {
"id": "column:{}".format(node.id),
"name": ".".join(node.fqdn),
"type": "data",
}
@staticmethod
def _job_info(node: Job):
return {"id": "task:{}".format(node.id), "name": node.name, "type": "task"}
class ScanList(Resource):
def __init__(self, catalog: PGCatalog, queue: Queue):
self._catalog = catalog
self._queue = queue
self._parser = reqparse.RequestParser()
self._parser.add_argument("id", required=True, help="ID of the resource")
def post(self):
args = self._parser.parse_args()
logging.info("Args for scanning: {}".format(args))
job = self._queue.enqueue(
scan,
{
"user": self._catalog.user,
"password": self._catalog.password,
"database": self._catalog.database,
"host": self._catalog.host,
"port": self._catalog.port,
},
int(args["id"]),
)
return {"id": job.id, "status": "queued"}, 200
def get(self):
job_list = []
for job in self._queue.started_job_registry.get_job_ids():
job_list.append({"id": job, "status": "started"})
for job in self._queue.finished_job_registry.get_job_ids():
job_list.append({"id": job, "status": "finished"})
for job in self._queue.failed_job_registry.get_job_ids():
job_list.append({"id": job, "status": "failed"})
return job_list, 200
class Scan(Resource):
def __init__(self, catalog: PGCatalog, queue: Queue):
self._catalog = catalog
self._queue = queue
self._parser = reqparse.RequestParser()
self._parser.add_argument("id", required=True, help="ID of the resource")
def get(self, job_id):
status = RqJob.Job.fetch(job_id, connection=self._queue.connection).get_status()
return {"id": job_id, "status": status}, 200
def put(self, job_id):
RqJob.Job.fetch(job_id, connection=self._queue.connection).cancel()
return {"message": "Job {} cancelled".format(job_id)}, 200
class Parse(Resource):
def __init__(self, catalog: Catalog):
self._catalog = catalog
self._parser = reqparse.RequestParser()
self._parser.add_argument("query", required=True, help="Query to parse")
self._parser.add_argument(
"source_id", help="Source database of the query", required=True
)
def post(self):
args = self._parser.parse_args()
logging.debug("Parse query: {}".format(args["query"]))
try:
parsed = parse(args["query"], "parse_api")
except ParseError as error:
raise ParseErrorHTTP(description=str(error))
try:
with self._catalog.managed_session:
source = self._catalog.get_source_by_id(args["source_id"])
logging.debug("Parsing query for source {}".format(source))
binder = parse_dml_query(
catalog=self._catalog, parsed=parsed, source=source
)
return (
{
"select_tables": [table.name for table in binder.tables],
"select_columns": [context.alias for context in binder.columns],
},
200,
)
except TableNotFound as table_error:
raise TableNotFoundHTTP(description=str(table_error))
except ColumnNotFound as column_error:
raise ColumnNotFoundHTTP(description=str(column_error))
except SemanticError as semantic_error:
raise SemanticErrorHTTP(description=str(semantic_error))
class Analyze(Resource):
def __init__(self, catalog: Catalog):
self._catalog = catalog
self._parser = reqparse.RequestParser()
self._parser.add_argument("query", required=True, help="Query to parse")
self._parser.add_argument("name", help="Name of the ETL job")
self._parser.add_argument(
"start_time", required=True, help="Start time of the task"
)
self._parser.add_argument(
"end_time", required=True, help="End time of the task"
)
self._parser.add_argument(
"source_id", help="Source database of the query", required=True
)
def post(self):
args = self._parser.parse_args()
logging.debug("Parse query: {}".format(args["query"]))
try:
parsed = parse(args["query"], args["name"])
except ParseError as error:
raise ParseErrorHTTP(description=str(error))
try:
with self._catalog.managed_session:
source = self._catalog.get_source_by_id(args["source_id"])
logging.debug("Parsing query for source {}".format(source))
chosen_visitor = analyze_dml_query(self._catalog, parsed, source)
job_execution = extract_lineage(
catalog=self._catalog,
visited_query=chosen_visitor,
source=source,
parsed=parsed,
start_time=datetime.datetime.fromisoformat(args["start_time"]),
end_time=datetime.datetime.fromisoformat(args["end_time"]),
)
return (
{
"data": {
"id": job_execution.id,
"type": "job_executions",
"attributes": {
"job_id": job_execution.job_id,
"started_at": job_execution.started_at.strftime(
"%Y-%m-%d %H:%M:%S"
),
"ended_at": job_execution.ended_at.strftime(
"%Y-%m-%d %H:%M:%S"
),
"status": job_execution.status.name,
},
}
},
200,
)
except TableNotFound as table_error:
raise TableNotFoundHTTP(description=str(table_error))
except ColumnNotFound as column_error:
raise ColumnNotFoundHTTP(description=str(column_error))
except SemanticError as semantic_error:
raise SemanticErrorHTTP(description=str(semantic_error))
class Server(gunicorn.app.base.BaseApplication):
def __init__(self, app):
self.application = app
super().__init__()
def load_config(self):
# parse console args
parser = self.cfg.parser()
env_args = parser.parse_args(self.cfg.get_cmd_args_from_env())
# Load up environment configuration
for k, v in vars(env_args).items():
if v is None:
continue
if k == "args":
continue
self.cfg.set(k.lower(), v)
def load(self):
return self.application
def job_execution_serializer(instance: JobExecution, only: List[str]):
return {
"id": instance.id,
"type": "job_executions",
"attributes": {
"job_id": instance.job_id,
"started_at": instance.started_at.strftime("%Y-%m-%d %H:%M:%S"),
"ended_at": instance.ended_at.strftime("%Y-%m-%d %H:%M:%S"),
"status": instance.status.name,
},
}
def job_execution_deserializer(data: Dict["str", Any]):
attributes = data["data"]["attributes"]
logging.debug(attributes)
job_execution = JobExecution()
job_execution.job_id = int(attributes["job_id"])
job_execution.started_at = datetime.datetime.strptime(
attributes["started_at"], "%Y-%m-%d %H:%M:%S"
)
job_execution.ended_at = datetime.datetime.strptime(
attributes["ended_at"], "%Y-%m-%d %H:%M:%S"
)
job_execution.status = (
JobExecutionStatus.SUCCESS
if attributes["status"] == "SUCCESS"
else JobExecutionStatus.SUCCESS
)
logging.debug(job_execution)
logging.debug(job_execution.status == JobExecutionStatus.SUCCESS)
return job_execution
def create_server(
catalog_options: Dict[str, str], connection, is_production=True
) -> Tuple[Any, PGCatalog]:
logging.debug(catalog_options)
catalog = PGCatalog(
**catalog_options,
connect_args={"application_name": "data-lineage:flask-restless"},
max_overflow=40,
pool_size=20,
pool_pre_ping=True
)
init_db(catalog)
restful_catalog = PGCatalog(
**catalog_options,
connect_args={"application_name": "data-lineage:restful"},
pool_pre_ping=True
)
app = Flask(__name__)
queue = Queue(is_async=is_production, connection=connection)
# Create CRUD APIs
methods = ["DELETE", "GET", "PATCH", "POST"]
url_prefix = "/api/v1/catalog"
api_manager = flask_restless.APIManager(app, catalog.get_scoped_session())
api_manager.create_api(
CatSource,
methods=methods,
url_prefix=url_prefix,
additional_attributes=["fqdn"],
)
api_manager.create_api(
CatSchema,
methods=methods,
url_prefix=url_prefix,
additional_attributes=["fqdn"],
)
api_manager.create_api(
CatTable,
methods=methods,
url_prefix=url_prefix,
additional_attributes=["fqdn"],
)
api_manager.create_api(
CatColumn,
methods=methods,
url_prefix=url_prefix,
additional_attributes=["fqdn"],
)
api_manager.create_api(Job, methods=methods, url_prefix=url_prefix)
api_manager.create_api(
JobExecution,
methods=methods,
url_prefix=url_prefix,
serializer=job_execution_serializer,
deserializer=job_execution_deserializer,
)
api_manager.create_api(
ColumnLineage,
methods=methods,
url_prefix=url_prefix,
collection_name="column_lineage",
)
api_manager.create_api(
DefaultSchema,
methods=methods,
url_prefix=url_prefix,
collection_name="default_schema",
primary_key="source_id",
)
restful_manager = Api(app)
restful_manager.add_resource(
Kedro, "/api/main", resource_class_kwargs={"catalog": restful_catalog}
)
restful_manager.add_resource(
ScanList,
"/api/v1/scan",
resource_class_kwargs={"catalog": restful_catalog, "queue": queue},
)
restful_manager.add_resource(
Scan,
"/api/v1/scan/<job_id>",
resource_class_kwargs={"catalog": restful_catalog, "queue": queue},
)
restful_manager.add_resource(
Analyze, "/api/v1/analyze", resource_class_kwargs={"catalog": restful_catalog}
)
restful_manager.add_resource(
Parse, "/api/v1/parse", resource_class_kwargs={"catalog": restful_catalog}
)
for rule in app.url_map.iter_rules():
rule_methods = ",".join(rule.methods)
logging.debug("{:50s} {:20s} {}".format(rule.endpoint, rule_methods, rule))
if is_production:
return Server(app=app), catalog
else:
return app, catalog
|
b540ea7e946e545bdf25c5788d7cfa573d07cbee
|
4b15f318ba3332ee946cb0b2838c93e7935b9b89
|
/tests/functional/pyocf/types/io.py
|
0c7d69220ff939ecdbad0b91cddfa744720a840d
|
[
"BSD-3-Clause"
] |
permissive
|
Open-CAS/ocf
|
c4f8a5c9c1b254a905fda75be2c19bd7c8ebd450
|
016d7a8ee2822d672c308264e79bae4081e7930e
|
refs/heads/master
| 2023-05-28T08:40:51.328181
| 2023-05-11T08:11:57
| 2023-05-11T08:11:57
| 152,160,836
| 168
| 94
|
BSD-3-Clause
| 2023-09-14T08:01:50
| 2018-10-08T23:46:10
|
C
|
UTF-8
|
Python
| false
| false
| 3,275
|
py
|
io.py
|
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from ctypes import (
c_void_p,
c_int,
c_uint32,
c_uint64,
CFUNCTYPE,
Structure,
POINTER,
byref,
cast,
)
from enum import IntEnum
from ..ocf import OcfLib
from .data import Data
class IoDir(IntEnum):
READ = 0
WRITE = 1
class IoOps(Structure):
pass
class Io(Structure):
START = CFUNCTYPE(None, c_void_p)
HANDLE = CFUNCTYPE(None, c_void_p, c_void_p)
END = CFUNCTYPE(None, c_void_p, c_int)
_instances_ = {}
_fields_ = [
("_addr", c_uint64),
("_flags", c_uint64),
("_bytes", c_uint32),
("_class", c_uint32),
("_dir", c_uint32),
("_io_queue", c_void_p),
("_start", START),
("_priv1", c_void_p),
("_priv2", c_void_p),
("_handle", HANDLE),
("_end", END),
]
@classmethod
def from_pointer(cls, ref):
c = cls.from_address(ref)
cls._instances_[ref] = c
OcfLib.getInstance().ocf_io_set_cmpl_wrapper(byref(c), None, None, c.c_end)
return c
@classmethod
def get_instance(cls, ref):
return cls._instances_[cast(ref, c_void_p).value]
def del_object(self):
del type(self)._instances_[cast(byref(self), c_void_p).value]
def put(self):
OcfLib.getInstance().ocf_io_put(byref(self))
def get(self):
OcfLib.getInstance().ocf_io_get(byref(self))
@staticmethod
@END
def c_end(io, err):
Io.get_instance(io).end(err)
@staticmethod
@START
def c_start(io):
Io.get_instance(io).start()
@staticmethod
@HANDLE
def c_handle(io, opaque):
Io.get_instance(io).handle(opaque)
def end(self, err):
try:
self.callback(err)
except: # noqa E722
pass
self.del_object()
self.put()
def submit(self):
return OcfLib.getInstance().ocf_volume_submit_io(byref(self))
def submit_flush(self):
return OcfLib.getInstance().ocf_volume_submit_flush(byref(self))
def submit_discard(self):
return OcfLib.getInstance().ocf_volume_submit_discard(byref(self))
def submit_flush(self):
return OcfLib.getInstance().ocf_volume_submit_flush(byref(self))
def submit_discard(self):
return OcfLib.getInstance().ocf_volume_submit_discard(byref(self))
def set_data(self, data: Data, offset: int = 0):
self.data = data
OcfLib.getInstance().ocf_io_set_data(byref(self), data, offset)
IoOps.SET_DATA = CFUNCTYPE(c_int, POINTER(Io), c_void_p, c_uint32)
IoOps.GET_DATA = CFUNCTYPE(c_void_p, POINTER(Io))
IoOps._fields_ = [("_set_data", IoOps.SET_DATA), ("_get_data", IoOps.GET_DATA)]
lib = OcfLib.getInstance()
lib.ocf_io_set_cmpl_wrapper.argtypes = [POINTER(Io), c_void_p, c_void_p, Io.END]
lib.ocf_io_set_data.argtypes = [POINTER(Io), c_void_p, c_uint32]
lib.ocf_io_set_data.restype = c_int
lib.ocf_volume_submit_io.argtypes = [POINTER(Io)]
lib.ocf_volume_submit_io.restype = None
lib.ocf_volume_submit_flush.argtypes = [POINTER(Io)]
lib.ocf_volume_submit_flush.restype = None
lib.ocf_volume_submit_discard.argtypes = [POINTER(Io)]
lib.ocf_volume_submit_discard.restype = None
|
b25c6bf10e7f5dd96703db220c9ab07bbe8eb640
|
da1721d2783ea4d67ff4e73cee6eee71292f2ef7
|
/toontown/ai/CrashedLeaderBoardDecorator.py
|
e55b20c92b4f01fd8cfd6f147ca295ad083d3675
|
[
"BSD-3-Clause"
] |
permissive
|
open-toontown/open-toontown
|
bbdeb1b7bf0fb2861eba2df5483738c0112090ca
|
464c2d45f60551c31397bd03561582804e760b4a
|
refs/heads/develop
| 2023-07-07T01:34:31.959657
| 2023-05-30T23:49:10
| 2023-05-30T23:49:10
| 219,221,570
| 143
| 104
|
BSD-3-Clause
| 2023-09-11T09:52:34
| 2019-11-02T22:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,928
|
py
|
CrashedLeaderBoardDecorator.py
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
from . import HolidayDecorator
from toontown.toonbase import ToontownGlobals
from panda3d.core import Vec4, CSDefault, TransformState, NodePath, TransparencyAttrib
from panda3d.toontown import loadDNAFile
from toontown.hood import GSHood
class CrashedLeaderBoardDecorator(HolidayDecorator.HolidayDecorator):
notify = DirectNotifyGlobal.directNotify.newCategory('CrashedLeaderBoardDecorator')
def __init__(self):
HolidayDecorator.HolidayDecorator.__init__(self)
def decorate(self):
self.updateHoodDNAStore()
self.swapIval = self.getSwapVisibleIval()
if self.swapIval:
self.swapIval.start()
holidayIds = base.cr.newsManager.getDecorationHolidayId()
if ToontownGlobals.CRASHED_LEADERBOARD not in holidayIds:
return
if base.config.GetBool('want-crashedLeaderBoard-Smoke', 1):
self.startSmokeEffect()
def startSmokeEffect(self):
if isinstance(base.cr.playGame.getPlace().loader.hood, GSHood.GSHood):
base.cr.playGame.getPlace().loader.startSmokeEffect()
def stopSmokeEffect(self):
if isinstance(base.cr.playGame.getPlace().loader.hood, GSHood.GSHood):
base.cr.playGame.getPlace().loader.stopSmokeEffect()
def undecorate(self):
if base.config.GetBool('want-crashedLeaderBoard-Smoke', 1):
self.stopSmokeEffect()
holidayIds = base.cr.newsManager.getDecorationHolidayId()
if len(holidayIds) > 0:
self.decorate()
return
storageFile = base.cr.playGame.hood.storageDNAFile
if storageFile:
loadDNAFile(self.dnaStore, storageFile, CSDefault)
self.swapIval = self.getSwapVisibleIval()
if self.swapIval:
self.swapIval.start()
|
9cdb299351fea3b328d608c04a57bcbee80baf2f
|
dbd37114c5e3b39d0ce4d6135144f5a7a3b6d4f5
|
/pytorch_tools/fit_wrapper/utils.py
|
613319eb6e16417b59a2b44fe148d3cf33d7f23c
|
[
"MIT"
] |
permissive
|
bonlime/pytorch-tools
|
6633fe8969ef51450589670c2393838131963773
|
74cd3dcef5ce4f81302be4061414c1d1113dafb8
|
refs/heads/master
| 2023-09-03T20:18:16.718561
| 2023-08-29T12:36:35
| 2023-08-29T12:36:35
| 201,936,229
| 190
| 16
|
MIT
| 2023-08-04T22:42:27
| 2019-08-12T13:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,669
|
py
|
utils.py
|
"""Utils used inside fit wrapper. Moved here to make it easily separable
Some functions may duplicate, this is expected
"""
import os
import time
import torch
from collections.abc import Iterable
import numpy as np
import torch.distributed as dist
from typing import Any
def listify(p: Any) -> Iterable:
if p is None:
p = []
elif not isinstance(p, Iterable):
p = [p]
return p
def to_numpy(x: Any) -> np.ndarray:
"""Convert whatever to numpy array"""
if isinstance(x, np.ndarray):
return x
elif isinstance(x, torch.Tensor):
return x.detach().cpu().numpy()
elif isinstance(x, (list, tuple, int, float)):
return np.array(x)
else:
raise ValueError("Unsupported type")
def to_tensor(x: Any, dtype=None) -> torch.Tensor:
"""Convert whatever to torch Tensor"""
if isinstance(x, torch.Tensor):
if dtype is not None:
x = x.type(dtype)
return x
elif isinstance(x, np.ndarray):
x = torch.from_numpy(x)
if dtype is not None:
x = x.type(dtype)
return x
elif isinstance(x, (list, tuple)):
x = np.array(x)
x = torch.from_numpy(x)
if dtype is not None:
x = x.type(dtype)
return x
else:
raise ValueError("Unsupported input type" + str(type(x)))
def env_world_size() -> int:
return int(os.environ.get("WORLD_SIZE", 1))
def env_rank() -> int:
return int(os.environ.get("RANK", 0))
def reduce_tensor(tensor: torch.Tensor) -> torch.Tensor:
return sum_tensor(tensor) / env_world_size()
def sum_tensor(tensor: torch.Tensor) -> torch.Tensor:
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
return rt
class AverageMeter:
"""Computes and stores the average and current value
Attributes:
val - last value
avg - true average
avg_smooth - smoothed average"""
def __init__(self, name="Meter", avg_mom=0.95):
self.avg_mom = avg_mom
self.name = name
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.avg_smooth = 0
self.count = 0
def update(self, val):
self.val = val
if self.count == 0:
self.avg_smooth = self.avg_smooth + val
else:
self.avg_smooth = self.avg_smooth * self.avg_mom + val * (1 - self.avg_mom)
self.count += 1
self.avg *= (self.count - 1) / self.count
self.avg += val / self.count
def __call__(self, val):
return self.update(val)
def __repr__(self):
return f"AverageMeter(name={self.name}, avg={self.avg:.3f}, count={self.count})"
# return f"{self.name}: {self.avg:.3f}" # maybe use this version for easier printing?
class TimeMeter:
def __init__(self):
self.reset()
def reset(self):
self.batch_time = AverageMeter()
self.data_time = AverageMeter()
self.start = time.time()
def batch_start(self):
self.data_time.update(time.time() - self.start)
def batch_end(self):
self.batch_time.update(time.time() - self.start)
self.start = time.time()
def reduce_meter(meter: AverageMeter) -> AverageMeter:
"""Args: meter (AverageMeter): meter to reduce"""
if env_world_size() == 1:
return meter
# can't reduce AverageMeter so need to reduce every attribute separately
reduce_attributes = ["val", "avg", "avg_smooth", "count"]
for attr in reduce_attributes:
old_value = to_tensor([getattr(meter, attr)]).float().cuda()
setattr(meter, attr, reduce_tensor(old_value).cpu().numpy()[0])
|
f9c1c4aad485494a8be50932105a61a8de8d4e9a
|
a22139a4b99042def74629f0a648673e7b618648
|
/code/FINDER_ND_cost/FINDER.pyx
|
3f6ff88e088bdaf6e590b204c85775bdd4a03add
|
[
"MIT"
] |
permissive
|
FFrankyy/FINDER
|
32ded2d6b9a8f9389e0221217b4086f61fb586f5
|
dde6f724abca765a2b8eba418cb7f40ccefb4629
|
refs/heads/master
| 2023-04-14T02:09:11.428610
| 2020-07-07T08:25:33
| 2020-07-07T08:25:33
| 245,740,191
| 144
| 42
|
MIT
| 2023-03-24T22:15:42
| 2020-03-08T02:38:54
|
Python
|
UTF-8
|
Python
| false
| false
| 42,386
|
pyx
|
FINDER.pyx
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 19 00:33:33 2017
@author: fanchangjun
"""
from __future__ import print_function, division
import tensorflow as tf
import numpy as np
import networkx as nx
import random
import time
import pickle as cp
import sys
from tqdm import tqdm
import PrepareBatchGraph
import graph
import nstep_replay_mem
import nstep_replay_mem_prioritized
import mvc_env
import utils
import heapq
import scipy.linalg as linalg
import os
import pandas as pd
# from gurobipy import *
# Hyper Parameters:
cdef double GAMMA = 1 # decay rate of past observations
cdef int UPDATE_TIME = 1000
cdef int EMBEDDING_SIZE = 64
cdef int MAX_ITERATION = 1000000
cdef double LEARNING_RATE = 0.0001 #dai
cdef int MEMORY_SIZE = 500000
cdef double Alpha = 0.001 ## weight of reconstruction loss
########################### hyperparameters for priority(start)#########################################
cdef double epsilon = 0.0000001 # small amount to avoid zero priority
cdef double alpha = 0.6 # [0~1] convert the importance of TD error to priority
cdef double beta = 0.4 # importance-sampling, from initial value increasing to 1
cdef double beta_increment_per_sampling = 0.001
cdef double TD_err_upper = 1. # clipped abs error
########################## hyperparameters for priority(end)#########################################
cdef int N_STEP = 5
cdef int NUM_MIN = 30
cdef int NUM_MAX = 50
cdef int REG_HIDDEN = 32
cdef int M = 4 # how many edges selected each time for BA model
cdef int BATCH_SIZE = 64
cdef double initialization_stddev = 0.01 # 权重初始化的方差
cdef int n_valid = 200
cdef int aux_dim = 4
cdef int num_env = 1
cdef double inf = 2147483647/2
######################### embedding method ##########################################################
cdef int max_bp_iter = 3
cdef int aggregatorID = 0 #0:sum; 1:mean; 2:GCN
cdef int embeddingMethod = 1 #0:structure2vec; 1:graphsage
class FINDER:
def __init__(self):
# init some parameters
self.embedding_size = EMBEDDING_SIZE
self.learning_rate = LEARNING_RATE
self.g_type = 'barabasi_albert'#'barabasi_albert', 'small-world'
self.training_type = 'degree'
self.TrainSet = graph.py_GSet()
self.TestSet = graph.py_GSet()
self.inputs = dict()
self.reg_hidden = REG_HIDDEN
self.utils = utils.py_Utils()
############----------------------------- variants of DQN(start) ------------------- ###################################
self.IsHuberloss = False
self.IsDoubleDQN = False
self.IsPrioritizedSampling = False
self.IsDuelingDQN = False
self.IsMultiStepDQN = True ##(if IsNStepDQN=False, N_STEP==1)
self.IsDistributionalDQN = False
self.IsNoisyNetDQN = False
self.Rainbow = False
############----------------------------- variants of DQN(end) ------------------- ###################################
#Simulator
self.ngraph_train = 0
self.ngraph_test = 0
self.env_list=[]
self.g_list=[]
# self.covered=[]
self.pred=[]
if self.IsPrioritizedSampling:
self.nStepReplayMem = nstep_replay_mem_prioritized.py_Memory(epsilon,alpha,beta,beta_increment_per_sampling,TD_err_upper,MEMORY_SIZE)
else:
self.nStepReplayMem = nstep_replay_mem.py_NStepReplayMem(MEMORY_SIZE)
for i in range(num_env):
self.env_list.append(mvc_env.py_MvcEnv(NUM_MAX))
self.g_list.append(graph.py_Graph())
self.test_env = mvc_env.py_MvcEnv(NUM_MAX)
# [batch_size, node_cnt]
self.action_select = tf.sparse_placeholder(tf.float32, name="action_select")
# [node_cnt, batch_size]
self.rep_global = tf.sparse_placeholder(tf.float32, name="rep_global")
# [node_cnt, node_cnt]
self.n2nsum_param = tf.sparse_placeholder(tf.float32, name="n2nsum_param")
# [node_cnt, node_cnt]
self.laplacian_param = tf.sparse_placeholder(tf.float32, name="laplacian_param")
# [batch_size, node_cnt]
self.subgsum_param = tf.sparse_placeholder(tf.float32, name="subgsum_param")
# [batch_size,1]
self.target = tf.placeholder(tf.float32, [BATCH_SIZE,1], name="target")
# [batch_size, aux_dim]
self.aux_input = tf.placeholder(tf.float32, name="aux_input")
# [node_cnt, 2]
self.node_input = tf.placeholder(tf.float32, name="node_input")
#[batch_size, 1]
if self.IsPrioritizedSampling:
self.ISWeights = tf.placeholder(tf.float32, [BATCH_SIZE, 1], name='IS_weights')
# init Q network
self.loss,self.trainStep,self.q_pred, self.q_on_all,self.Q_param_list = self.BuildNet() #[loss,trainStep,q_pred, q_on_all, ...]
#init Target Q Network
self.lossT,self.trainStepT,self.q_predT, self.q_on_allT,self.Q_param_listT = self.BuildNet()
#takesnapsnot
self.copyTargetQNetworkOperation = [a.assign(b) for a,b in zip(self.Q_param_listT,self.Q_param_list)]
self.UpdateTargetQNetwork = tf.group(*self.copyTargetQNetworkOperation)
# saving and loading networks
self.saver = tf.train.Saver(max_to_keep=None)
#self.session = tf.InteractiveSession()
config = tf.ConfigProto(device_count={"CPU": 8}, # limit to num_cpu_core CPU usage
inter_op_parallelism_threads=100,
intra_op_parallelism_threads=100,
log_device_placement=False)
config.gpu_options.allow_growth = True
self.session = tf.Session(config = config)
# self.session = tf_debug.LocalCLIDebugWrapperSession(self.session)
self.session.run(tf.global_variables_initializer())
#################################################New code for FINDER#####################################
def BuildNet(self):
# [2, embed_dim]
w_n2l = tf.Variable(tf.truncated_normal([2, self.embedding_size], stddev=initialization_stddev), tf.float32)
# [embed_dim, embed_dim]
p_node_conv = tf.Variable(tf.truncated_normal([self.embedding_size, self.embedding_size], stddev=initialization_stddev), tf.float32)
if embeddingMethod == 1: #'graphsage'
# [embed_dim, embed_dim]
p_node_conv2 = tf.Variable(tf.truncated_normal([self.embedding_size, self.embedding_size], stddev=initialization_stddev), tf.float32)
# [2*embed_dim, embed_dim]
p_node_conv3 = tf.Variable(tf.truncated_normal([2*self.embedding_size, self.embedding_size], stddev=initialization_stddev), tf.float32)
#[reg_hidden+aux_dim, 1]
if self.reg_hidden > 0:
#[2*embed_dim, reg_hidden]
# h1_weight = tf.Variable(tf.truncated_normal([2 * self.embedding_size, self.reg_hidden], stddev=initialization_stddev), tf.float32)
# [embed_dim, reg_hidden]
h1_weight = tf.Variable(tf.truncated_normal([self.embedding_size, self.reg_hidden], stddev=initialization_stddev), tf.float32)
#[reg_hidden1, reg_hidden2]
# h2_weight = tf.Variable(tf.truncated_normal([self.reg_hidden1, self.reg_hidden2], stddev=initialization_stddev), tf.float32)
#[reg_hidden+aux_dim, 1]
h2_weight = tf.Variable(tf.truncated_normal([self.reg_hidden + aux_dim, 1], stddev=initialization_stddev), tf.float32)
#[reg_hidden2 + aux_dim, 1]
last_w = h2_weight
else:
#[2*embed_dim, reg_hidden]
h1_weight = tf.Variable(tf.truncated_normal([2 * self.embedding_size, self.reg_hidden], stddev=initialization_stddev), tf.float32)
# [embed_dim, reg_hidden]
# h1_weight = tf.Variable(tf.truncated_normal([self.embedding_size, self.reg_hidden], stddev=initialization_stddev), tf.float32)
#[2*embed_dim, reg_hidden]
last_w = h1_weight
## [embed_dim, 1]
cross_product = tf.Variable(tf.truncated_normal([self.embedding_size, 1], stddev=initialization_stddev), tf.float32)
# #[node_cnt, 2]
# nodes_size = tf.shape(self.n2nsum_param)[0]
# node_input = tf.ones((nodes_size,2))
y_nodes_size = tf.shape(self.subgsum_param)[0]
# [batch_size, 2]
y_node_input = tf.ones((y_nodes_size,2))
#[node_cnt, 2] * [2, embed_dim] = [node_cnt, embed_dim]
# no sparse
input_message = tf.matmul(tf.cast(self.node_input,tf.float32), w_n2l)
#[node_cnt, embed_dim] # no sparse
input_potential_layer = tf.nn.relu(input_message)
# # no sparse
# [batch_size, embed_dim]
y_input_message = tf.matmul(tf.cast(y_node_input,tf.float32), w_n2l)
#[batch_size, embed_dim] # no sparse
y_input_potential_layer = tf.nn.relu(y_input_message)
#input_potential_layer = input_message
cdef int lv = 0
#[node_cnt, embed_dim], no sparse
cur_message_layer = input_potential_layer
cur_message_layer = tf.nn.l2_normalize(cur_message_layer, axis=1)
#[batch_size, embed_dim], no sparse
y_cur_message_layer = y_input_potential_layer
# [batch_size, embed_dim]
y_cur_message_layer = tf.nn.l2_normalize(y_cur_message_layer, axis=1)
while lv < max_bp_iter:
lv = lv + 1
#[node_cnt, node_cnt] * [node_cnt, embed_dim] = [node_cnt, embed_dim], dense
n2npool = tf.sparse_tensor_dense_matmul(tf.cast(self.n2nsum_param,tf.float32), cur_message_layer)
#[node_cnt, embed_dim] * [embed_dim, embed_dim] = [node_cnt, embed_dim], dense
node_linear = tf.matmul(n2npool, p_node_conv)
# [batch_size, node_cnt] * [node_cnt, embed_dim] = [batch_size, embed_dim]
y_n2npool = tf.sparse_tensor_dense_matmul(tf.cast(self.subgsum_param,tf.float32), cur_message_layer)
#[batch_size, embed_dim] * [embed_dim, embed_dim] = [batch_size, embed_dim], dense
y_node_linear = tf.matmul(y_n2npool, p_node_conv)
if embeddingMethod == 0: # 'structure2vec'
#[node_cnt, embed_dim] + [node_cnt, embed_dim] = [node_cnt, embed_dim], return tensed matrix
merged_linear = tf.add(node_linear,input_message)
#[node_cnt, embed_dim]
cur_message_layer = tf.nn.relu(merged_linear)
#[batch_size, embed_dim] + [batch_size, embed_dim] = [batch_size, embed_dim], return tensed matrix
y_merged_linear = tf.add(y_node_linear, y_input_message)
#[batch_size, embed_dim]
y_cur_message_layer = tf.nn.relu(y_merged_linear)
else: # 'graphsage'
#[node_cnt, embed_dim] * [embed_dim, embed_dim] = [node_cnt, embed_dim], dense
cur_message_layer_linear = tf.matmul(tf.cast(cur_message_layer, tf.float32), p_node_conv2)
#[[node_cnt, embed_dim] [node_cnt, embed_dim]] = [node_cnt, 2*embed_dim], return tensed matrix
merged_linear = tf.concat([node_linear, cur_message_layer_linear], 1)
#[node_cnt, 2*embed_dim]*[2*embed_dim, embed_dim] = [node_cnt, embed_dim]
cur_message_layer = tf.nn.relu(tf.matmul(merged_linear, p_node_conv3))
#[batch_size, embed_dim] * [embed_dim, embed_dim] = [batch_size, embed_dim], dense
y_cur_message_layer_linear = tf.matmul(tf.cast(y_cur_message_layer, tf.float32), p_node_conv2)
#[[batch_size, embed_dim] [batch_size, embed_dim]] = [batch_size, 2*embed_dim], return tensed matrix
y_merged_linear = tf.concat([y_node_linear, y_cur_message_layer_linear], 1)
#[batch_size, 2*embed_dim]*[2*embed_dim, embed_dim] = [batch_size, embed_dim]
y_cur_message_layer = tf.nn.relu(tf.matmul(y_merged_linear, p_node_conv3))
cur_message_layer = tf.nn.l2_normalize(cur_message_layer, axis=1)
y_cur_message_layer = tf.nn.l2_normalize(y_cur_message_layer, axis=1)
self.node_embedding = cur_message_layer
#[batch_size, node_cnt] * [node_cnt, embed_dim] = [batch_size, embed_dim], dense
# y_potential = tf.sparse_tensor_dense_matmul(tf.cast(self.subgsum_param,tf.float32), cur_message_layer)
y_potential = y_cur_message_layer
#[batch_size, node_cnt] * [node_cnt, embed_dim] = [batch_size, embed_dim]
action_embed = tf.sparse_tensor_dense_matmul(tf.cast(self.action_select, tf.float32), cur_message_layer)
#[[batch_size, embed_dim], [batch_size, embed_dim]] = [batch_size, 2*embed_dim], dense
# embed_s_a = tf.concat([action_embed,y_potential],1)
# # [batch_size, embed_dim, embed_dim]
temp = tf.matmul(tf.expand_dims(action_embed, axis=2),tf.expand_dims(y_potential, axis=1))
# [batch_size, embed_dim]
Shape = tf.shape(action_embed)
# [batch_size, embed_dim], first transform
embed_s_a = tf.reshape(tf.matmul(temp, tf.reshape(tf.tile(cross_product,[Shape[0],1]),[Shape[0],Shape[1],1])),Shape)
#[batch_size, 2 * embed_dim]
last_output = embed_s_a
if self.reg_hidden > 0:
#[batch_size, 2*embed_dim] * [2*embed_dim, reg_hidden] = [batch_size, reg_hidden], dense
hidden = tf.matmul(embed_s_a, h1_weight)
#[batch_size, reg_hidden]
last_output = tf.nn.relu(hidden)
#[batch_size, reg_hidden1] * [reg_hidden1, reg_hidden2] = [batch_size, reg_hidden2]
# last_output_hidden = tf.matmul(last_output1, h2_weight)
# last_output = tf.nn.relu(last_output_hidden)
# if reg_hidden == 0: ,[[batch_size, 2*embed_dim], [batch_size, aux_dim]] = [batch_size, 2*embed_dim+aux_dim]
# if reg_hidden > 0: ,[[batch_size, reg_hidden], [batch_size, aux_dim]] = [batch_size, reg_hidden+aux_dim]
last_output = tf.concat([last_output, self.aux_input], 1)
#if reg_hidden == 0: ,[batch_size, 2*embed_dim+aux_dim] * [2*embed_dim+aux_dim, 1] = [batch_size, 1]
#if reg_hidden > 0: ,[batch_size, reg_hidden+aux_dim] * [reg_hidden+aux_dim, 1] = [batch_size, 1]
q_pred = tf.matmul(last_output, last_w)
## first order reconstruction loss
loss_recons = 2 * tf.trace(tf.matmul(tf.transpose(cur_message_layer), tf.sparse_tensor_dense_matmul(tf.cast(self.laplacian_param,tf.float32), cur_message_layer)))
edge_num = tf.sparse_reduce_sum(self.n2nsum_param)
loss_recons = tf.divide(loss_recons, edge_num)
if self.IsPrioritizedSampling:
self.TD_errors = tf.reduce_sum(tf.abs(self.target - q_pred), axis=1) # for updating Sumtree
if self.IsHuberloss:
loss_rl = tf.losses.huber_loss(self.ISWeights * self.target, self.ISWeights * q_pred)
else:
loss_rl = tf.reduce_mean(self.ISWeights * tf.squared_difference(self.target, q_pred))
else:
if self.IsHuberloss:
loss_rl = tf.losses.huber_loss(self.target, q_pred)
else:
loss_rl = tf.losses.mean_squared_error(self.target, q_pred)
loss = loss_rl + Alpha * loss_recons
#
# loss = loss_rl
# self.lossRecons = loss_recons
# self.lossRL = loss_rl
trainStep = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)
#[node_cnt, batch_size] * [batch_size, embed_dim] = [node_cnt, embed_dim]
rep_y = tf.sparse_tensor_dense_matmul(tf.cast(self.rep_global, tf.float32), y_potential)
#[[node_cnt, embed_dim], [node_cnt, embed_dim]] = [node_cnt, 2*embed_dim]
# embed_s_a_all = tf.concat([cur_message_layer,rep_y],1)
# # [node_cnt, embed_dim, embed_dim]
temp1 = tf.matmul(tf.expand_dims(cur_message_layer, axis=2),tf.expand_dims(rep_y, axis=1))
# [node_cnt embed_dim]
Shape1 = tf.shape(cur_message_layer)
# [batch_size, embed_dim], first transform
embed_s_a_all = tf.reshape(tf.matmul(temp1, tf.reshape(tf.tile(cross_product,[Shape1[0],1]),[Shape1[0],Shape1[1],1])),Shape1)
#[node_cnt, 2 * embed_dim]
last_output = embed_s_a_all
if self.reg_hidden > 0:
#[node_cnt, 2 * embed_dim] * [2 * embed_dim, reg_hidden] = [node_cnt, reg_hidden1]
hidden = tf.matmul(embed_s_a_all, h1_weight)
#Relu, [node_cnt, reg_hidden1]
last_output = tf.nn.relu(hidden)
#[node_cnt, reg_hidden1] * [reg_hidden1, reg_hidden2] = [node_cnt, reg_hidden2]
# last_output_hidden = tf.matmul(last_output1, h2_weight)
# last_output = tf.nn.relu(last_output_hidden)
#[node_cnt, batch_size] * [batch_size, aux_dim] = [node_cnt, aux_dim]
rep_aux = tf.sparse_tensor_dense_matmul(tf.cast(self.rep_global, tf.float32), self.aux_input)
#if reg_hidden == 0: , [[node_cnt, 2 * embed_dim], [node_cnt, aux_dim]] = [node_cnt, 2*embed_dim + aux_dim]
#if reg_hidden > 0: , [[node_cnt, reg_hidden], [node_cnt, aux_dim]] = [node_cnt, reg_hidden + aux_dim]
last_output = tf.concat([last_output,rep_aux],1)
#if reg_hidden == 0: , [node_cnt, 2 * embed_dim + aux_dim] * [2 * embed_dim + aux_dim, 1] = [node_cnt,1]
#f reg_hidden > 0: , [node_cnt, reg_hidden + aux_dim] * [reg_hidden + aux_dim, 1] = [node_cnt,1]
q_on_all = tf.matmul(last_output, last_w)
return loss,trainStep,q_pred,q_on_all,tf.trainable_variables()
def gen_graph(self, num_min, num_max):
cdef int max_n = num_max
cdef int min_n = num_min
cdef int cur_n = np.random.randint(max_n - min_n + 1) + min_n
if self.g_type == 'erdos_renyi':
g = nx.erdos_renyi_graph(n=cur_n, p=0.15)
elif self.g_type == 'powerlaw':
g = nx.powerlaw_cluster_graph(n=cur_n, m=4, p=0.05)
elif self.g_type == 'small-world':
g = nx.connected_watts_strogatz_graph(n=cur_n, k=8, p=0.1)
elif self.g_type == 'barabasi_albert':
g = nx.barabasi_albert_graph(n=cur_n, m=4)
### random weight
if self.training_type == 'random':
weights = {}
for node in g.nodes():
weights[node] = random.uniform(0,1)
# ### degree weight
elif self.training_type == 'degree':
degree = nx.degree(g)
maxDegree = max(dict(degree).values())
weights = {}
for node in g.nodes():
weights[node] = degree[node]/maxDegree
elif self.training_type == 'degree_noise':
degree = nx.degree(g)
#maxDegree = max(dict(degree).values())
mu = np.mean(list(dict(degree).values()))
std = np.std(list(dict(degree).values()))
weights = {}
for node in g.nodes():
episilon = np.random.normal(mu, std, 1)[0]
weights[node] = 0.5*degree[node] + episilon
if weights[node] < 0.0:
weights[node] = -weights[node]
maxDegree = max(weights.values())
for node in g.nodes():
weights[node] = weights[node] / maxDegree
nx.set_node_attributes(g, weights,'weight')
return g
def gen_new_graphs(self, num_min, num_max):
print('\ngenerating new training graphs...')
sys.stdout.flush()
self.ClearTrainGraphs()
cdef int i
for i in tqdm(range(1000)):
g = self.gen_graph(num_min, num_max)
self.InsertGraph(g, is_test=False)
def ClearTrainGraphs(self):
self.ngraph_train = 0
self.TrainSet.Clear()
def ClearTestGraphs(self):
self.ngraph_test = 0
self.TestSet.Clear()
def InsertGraph(self,g,is_test):
cdef int t
if is_test:
t = self.ngraph_test
self.ngraph_test += 1
self.TestSet.InsertGraph(t, self.GenNetwork(g))
else:
t = self.ngraph_train
self.ngraph_train += 1
self.TrainSet.InsertGraph(t, self.GenNetwork(g))
#pass
def PrepareValidData(self):
print('\ngenerating validation graphs...')
sys.stdout.flush()
cdef double result_degree = 0.0
cdef double result_betweeness = 0.0
for i in tqdm(range(n_valid)):
g = self.gen_graph(NUM_MIN, NUM_MAX)
g_degree = g.copy()
g_betweenness = g.copy()
result_degree += self.HXA(g_degree,'HDA')
result_betweeness += self.HXA(g_betweenness,'HBA')
self.InsertGraph(g, is_test=True)
print ('Validation of HDA: %.16f'%(result_degree / n_valid))
print ('Validation of HBA: %.16f'%(result_betweeness / n_valid))
def Run_simulator(self, int num_seq, double eps, TrainSet, int n_step):
cdef int num_env = len(self.env_list)
cdef int n = 0
cdef int i
while n < num_seq:
for i in range(num_env):
if self.env_list[i].graph.num_nodes == 0 or self.env_list[i].isTerminal():
if self.env_list[i].graph.num_nodes > 0 and self.env_list[i].isTerminal():
n = n + 1
self.nStepReplayMem.Add(self.env_list[i], n_step)
g_sample= TrainSet.Sample()
self.env_list[i].s0(g_sample)
self.g_list[i] = self.env_list[i].graph
if n >= num_seq:
break
Random = False
if random.uniform(0,1) >= eps:
pred = self.PredictWithCurrentQNet(self.g_list, [env.action_list for env in self.env_list])
else:
Random = True
for i in range(num_env):
if (Random):
a_t = self.env_list[i].randomAction()
else:
a_t = self.argMax(pred[i])
self.env_list[i].step(a_t)
#pass
def PlayGame(self,int n_traj, double eps):
self.Run_simulator(n_traj, eps, self.TrainSet, N_STEP)
def SetupTrain(self, idxes, g_list, covered, actions, target):
self.m_y = target
self.inputs['target'] = self.m_y
prepareBatchGraph = PrepareBatchGraph.py_PrepareBatchGraph(aggregatorID)
prepareBatchGraph.SetupTrain(idxes, g_list, covered, actions)
self.inputs['action_select'] = prepareBatchGraph.act_select
self.inputs['rep_global'] = prepareBatchGraph.rep_global
self.inputs['n2nsum_param'] = prepareBatchGraph.n2nsum_param
self.inputs['laplacian_param'] = prepareBatchGraph.laplacian_param
self.inputs['subgsum_param'] = prepareBatchGraph.subgsum_param
self.inputs['node_input'] = prepareBatchGraph.node_feat
self.inputs['aux_input'] = prepareBatchGraph.aux_feat
def SetupPredAll(self, idxes, g_list, covered):
prepareBatchGraph = PrepareBatchGraph.py_PrepareBatchGraph(aggregatorID)
prepareBatchGraph.SetupPredAll(idxes, g_list, covered)
self.inputs['rep_global'] = prepareBatchGraph.rep_global
self.inputs['n2nsum_param'] = prepareBatchGraph.n2nsum_param
# self.inputs['laplacian_param'] = prepareBatchGraph.laplacian_param
self.inputs['subgsum_param'] = prepareBatchGraph.subgsum_param
self.inputs['node_input'] = prepareBatchGraph.node_feat
self.inputs['aux_input'] = prepareBatchGraph.aux_feat
return prepareBatchGraph.idx_map_list
def Predict(self,g_list,covered,isSnapSnot):
cdef int n_graphs = len(g_list)
cdef int i, j, k, bsize
for i in range(0, n_graphs, BATCH_SIZE):
bsize = BATCH_SIZE
if (i + BATCH_SIZE) > n_graphs:
bsize = n_graphs - i
batch_idxes = np.zeros(bsize)
for j in range(i, i + bsize):
batch_idxes[j - i] = j
batch_idxes = np.int32(batch_idxes)
idx_map_list = self.SetupPredAll(batch_idxes, g_list, covered)
if isSnapSnot:
result = self.session.run([self.q_on_allT], feed_dict={
self.rep_global: self.inputs['rep_global'],
self.n2nsum_param: self.inputs['n2nsum_param'],
self.subgsum_param: self.inputs['subgsum_param'],
self.node_input: self.inputs['node_input'],
self.aux_input: np.array(self.inputs['aux_input'])
})
else:
result = self.session.run([self.q_on_all], feed_dict={
self.rep_global: self.inputs['rep_global'],
self.n2nsum_param: self.inputs['n2nsum_param'],
self.subgsum_param: self.inputs['subgsum_param'],
self.node_input: self.inputs['node_input'],
self.aux_input: np.array(self.inputs['aux_input'])
})
raw_output = result[0]
pos = 0
pred = []
for j in range(i, i + bsize):
idx_map = idx_map_list[j-i]
cur_pred = np.zeros(len(idx_map))
for k in range(len(idx_map)):
if idx_map[k] < 0:
cur_pred[k] = -inf
else:
cur_pred[k] = raw_output[pos]
pos += 1
for k in covered[j]:
cur_pred[k] = -inf
pred.append(cur_pred)
assert (pos == len(raw_output))
return pred
def PredictWithCurrentQNet(self,g_list,covered):
result = self.Predict(g_list,covered,False)
return result
def PredictWithSnapshot(self,g_list,covered):
result = self.Predict(g_list,covered,True)
return result
#pass
def TakeSnapShot(self):
self.session.run(self.UpdateTargetQNetwork)
def Fit(self):
sample = self.nStepReplayMem.Sampling(BATCH_SIZE)
ness = False
cdef int i
for i in range(BATCH_SIZE):
if (not sample.list_term[i]):
ness = True
break
if ness:
if self.IsDoubleDQN:
double_list_pred = self.PredictWithCurrentQNet(sample.g_list, sample.list_s_primes)
double_list_predT = self.PredictWithSnapshot(sample.g_list, sample.list_s_primes)
list_pred = [a[self.argMax(b)] for a, b in zip(double_list_predT, double_list_pred)]
else:
list_pred = self.PredictWithSnapshot(sample.g_list, sample.list_s_primes)
list_target = np.zeros([BATCH_SIZE, 1])
for i in range(BATCH_SIZE):
q_rhs = 0
if (not sample.list_term[i]):
if self.IsDoubleDQN:
q_rhs=GAMMA * list_pred[i]
else:
q_rhs=GAMMA * self.Max(list_pred[i])
q_rhs += sample.list_rt[i]
list_target[i] = q_rhs
# list_target.append(q_rhs)
if self.IsPrioritizedSampling:
return self.fit_with_prioritized(sample.b_idx,sample.ISWeights,sample.g_list, sample.list_st, sample.list_at,list_target)
else:
return self.fit(sample.g_list, sample.list_st, sample.list_at,list_target)
def fit_with_prioritized(self,tree_idx,ISWeights,g_list,covered,actions,list_target):
cdef double loss = 0.0
cdef int n_graphs = len(g_list)
cdef int i, j, bsize
for i in range(0,n_graphs,BATCH_SIZE):
bsize = BATCH_SIZE
if (i + BATCH_SIZE) > n_graphs:
bsize = n_graphs - i
batch_idxes = np.zeros(bsize)
# batch_idxes = []
for j in range(i, i + bsize):
batch_idxes[j-i] = j
# batch_idxes.append(j)
batch_idxes = np.int32(batch_idxes)
self.SetupTrain(batch_idxes, g_list, covered, actions,list_target)
result = self.session.run([self.trainStep,self.TD_errors,self.loss],feed_dict={
self.action_select : self.inputs['action_select'],
self.rep_global : self.inputs['rep_global'],
self.n2nsum_param : self.inputs['n2nsum_param'],
self.laplacian_param : self.inputs['laplacian_param'],
self.subgsum_param : self.inputs['subgsum_param'],
self.node_input: self.inputs['node_input'],
self.aux_input : np.array(self.inputs['aux_input']),
self.ISWeights : np.mat(ISWeights).T,
self.target : self.inputs['target']})
self.nStepReplayMem.batch_update(tree_idx, result[1])
loss += result[2]*bsize
return loss / len(g_list)
def fit(self,g_list,covered,actions,list_target):
cdef double loss = 0.0
cdef int n_graphs = len(g_list)
cdef int i, j, bsize
for i in range(0,n_graphs,BATCH_SIZE):
bsize = BATCH_SIZE
if (i + BATCH_SIZE) > n_graphs:
bsize = n_graphs - i
batch_idxes = np.zeros(bsize)
# batch_idxes = []
for j in range(i, i + bsize):
batch_idxes[j-i] = j
# batch_idxes.append(j)
batch_idxes = np.int32(batch_idxes)
self.SetupTrain(batch_idxes, g_list, covered, actions,list_target)
result = self.session.run([self.loss,self.trainStep],feed_dict={
self.action_select : self.inputs['action_select'],
self.rep_global : self.inputs['rep_global'],
self.n2nsum_param : self.inputs['n2nsum_param'],
self.laplacian_param : self.inputs['laplacian_param'],
self.subgsum_param : self.inputs['subgsum_param'],
self.node_input: self.inputs['node_input'],
self.aux_input: np.array(self.inputs['aux_input']),
self.target : self.inputs['target']})
loss += result[0]*bsize
return loss / len(g_list)
#pass
def Train(self):
self.PrepareValidData()
self.gen_new_graphs(NUM_MIN, NUM_MAX)
cdef int i, iter, idx
for i in range(10):
self.PlayGame(100, 1)
self.TakeSnapShot()
cdef double eps_start = 1.0
cdef double eps_end = 0.05
cdef double eps_step = 10000.0
cdef int loss = 0
cdef double frac, start, end
save_dir = './models/Model_%s'%(self.g_type)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
VCFile = '%s/ModelVC_%d_%d.csv'%(save_dir, NUM_MIN, NUM_MAX)
f_out = open(VCFile, 'w')
for iter in range(MAX_ITERATION):
start = time.clock()
###########-----------------------normal training data setup(start) -----------------##############################
if iter and iter % 5000 == 0:
self.gen_new_graphs(NUM_MIN, NUM_MAX)
eps = eps_end + max(0., (eps_start - eps_end) * (eps_step - iter) / eps_step)
if iter % 10 == 0:
self.PlayGame(10, eps)
if iter % 300 == 0:
if(iter == 0):
N_start = start
else:
N_start = N_end
frac = 0.0
# n_valid = 1
test_start = time.time()
for idx in range(n_valid):
frac += self.Test(idx)
test_end = time.time()
f_out.write('%.16f\n'%(frac/n_valid)) #write vc into the file
f_out.flush()
print('iter', iter, 'eps', eps, 'average size of vc: ', frac / n_valid)
print ('testing 100 graphs time: %.8fs'%(test_end-test_start))
N_end = time.clock()
print ('300 iterations total time: %.8fs'%(N_end-N_start))
sys.stdout.flush()
model_path = '%s/nrange_%d_%d_iter_%d.ckpt' % (save_dir, NUM_MIN, NUM_MAX, iter)
self.SaveModel(model_path)
if iter % UPDATE_TIME == 0:
self.TakeSnapShot()
self.Fit()
f_out.close()
def findModel(self):
VCFile = './models/ModelVC_%d_%d.csv'%(NUM_MIN, NUM_MAX)
vc_list = []
for line in open(VCFile):
vc_list.append(float(line))
start_loc = 33
min_vc = start_loc + np.argmin(vc_list[start_loc:])
best_model_iter = 300 * min_vc
best_model = './models/nrange_%d_%d_iter_%d.ckpt' % (NUM_MIN, NUM_MAX, best_model_iter)
return best_model
def Evaluate(self, data_test, model_file=None):
if model_file == None: #if user do not specify the model_file
model_file = self.findModel()
print ('The best model is :%s'%(model_file))
sys.stdout.flush()
self.LoadModel(model_file)
cdef int n_test = 100
cdef int i
result_list_score = []
result_list_time = []
sys.stdout.flush()
for i in tqdm(range(n_test)):
g_path = '%s/'%data_test + 'g_%d'%i
g = nx.read_gml(g_path)
self.InsertGraph(g, is_test=True)
t1 = time.time()
val, sol = self.GetSol(i)
t2 = time.time()
result_list_score.append(val)
result_list_time.append(t2-t1)
self.ClearTestGraphs()
score_mean = np.mean(result_list_score)
score_std = np.std(result_list_score)
time_mean = np.mean(result_list_time)
time_std = np.std(result_list_time)
return score_mean, score_std, time_mean, time_std
def EvaluateRealData(self, model_file, data_test, save_dir, stepRatio=0.0025): #测试真实数据
cdef double solution_time = 0.0
test_name = data_test.split('/')[-1].replace('.gml','.txt')
save_dir_local = save_dir+'/StepRatio_%.4f'%stepRatio
if not os.path.exists(save_dir_local):#make dir
os.mkdir(save_dir_local)
result_file = '%s/%s' %(save_dir_local, test_name)
# g = nx.read_edgelist(data_test)
g = nx.read_gml(data_test)
# g = self.Real2networkx(g_temp)
with open(result_file, 'w') as f_out:
print ('testing')
sys.stdout.flush()
print ('number of nodes:%d'%(nx.number_of_nodes(g)))
print ('number of edges:%d'%(nx.number_of_edges(g)))
if stepRatio > 0:
step = int(stepRatio*nx.number_of_nodes(g)) #step size
else:
step = 1
self.InsertGraph(g, is_test=True)
t1 = time.time()
solution = self.GetSolution(0,step)
t2 = time.time()
solution_time = (t2 - t1)
for i in range(len(solution)):
f_out.write('%d\n' % solution[i])
self.ClearTestGraphs()
return solution, solution_time
def GetSolution(self, int gid, int step=1):
g_list = []
self.test_env.s0(self.TestSet.Get(gid))
g_list.append(self.test_env.graph)
sol = []
start = time.time()
cdef int iter = 0
cdef int new_action
while (not self.test_env.isTerminal()):
print ('Iteration:%d'%iter)
iter += 1
list_pred = self.PredictWithCurrentQNet(g_list, [self.test_env.action_list])
batchSol = np.argsort(-list_pred[0])[:step]
for new_action in batchSol:
if not self.test_env.isTerminal():
self.test_env.stepWithoutReward(new_action)
sol.append(new_action)
else:
break
return sol
def EvaluateSol(self, data_test, sol_file, strategyID=0, reInsertStep=20):
#evaluate the robust given the solution, strategyID:0,count;2:rank;3:multipy
sys.stdout.flush()
# g = nx.read_weighted_edgelist(data_test)
g = nx.read_gml(data_test)
g_inner = self.GenNetwork(g)
# print ('number of nodes:%d'%nx.number_of_nodes(g))
# print ('number of edges:%d'%nx.number_of_edges(g))
nodes = list(range(nx.number_of_nodes(g)))
sol = []
for line in open(sol_file):
sol.append(int(line))
sol_left = list(set(nodes)^set(sol))
if strategyID > 0:
start = time.time()
sol_reinsert = self.utils.reInsert(g_inner, sol, sol_left, strategyID, reInsertStep)
end = time.time()
print ('reInsert time:%.6f'%(end-start))
else:
sol_reinsert = sol
solution = sol_reinsert + sol_left
# print ('number of solution nodes:%d'%len(solution))
Robustness = self.utils.getRobustness(g_inner, solution)
MaxCCList = self.utils.MaxWccSzList
return Robustness, MaxCCList, solution
def Test(self,int gid):
g_list = []
self.test_env.s0(self.TestSet.Get(gid))
g_list.append(self.test_env.graph)
cdef double cost = 0.0
cdef int i
sol = []
while (not self.test_env.isTerminal()):
# cost += 1
list_pred = self.PredictWithCurrentQNet(g_list, [self.test_env.action_list])
new_action = self.argMax(list_pred[0])
self.test_env.stepWithoutReward(new_action)
sol.append(new_action)
nodes = list(range(g_list[0].num_nodes))
solution = sol + list(set(nodes)^set(sol))
Robustness = self.utils.getRobustness(g_list[0], solution)
return Robustness
def GetSol(self, int gid, int step=1):
g_list = []
self.test_env.s0(self.TestSet.Get(gid))
g_list.append(self.test_env.graph)
cdef double cost = 0.0
sol = []
# start = time.time()
cdef int new_action
while (not self.test_env.isTerminal()):
list_pred = self.PredictWithCurrentQNet(g_list, [self.test_env.action_list])
batchSol = np.argsort(-list_pred[0])[:step]
for new_action in batchSol:
if not self.test_env.isTerminal():
self.test_env.stepWithoutReward(new_action)
sol.append(new_action)
else:
break
# end = time.time()
# print ('solution obtained time is:%.8f'%(end-start))
nodes = list(range(g_list[0].num_nodes))
solution = sol + list(set(nodes)^set(sol))
Robustness = self.utils.getRobustness(g_list[0], solution)
# print ('Robustness is:%.8f'%(Robustness))
return Robustness, sol
def SaveModel(self,model_path):
self.saver.save(self.session, model_path)
print('model has been saved success!')
def LoadModel(self,model_path):
self.saver.restore(self.session, model_path)
print('restore model from file successfully')
def GenNetwork(self, g): #networkx2four
nodes = g.nodes()
edges = g.edges()
weights = []
for i in range(len(nodes)):
weights.append(g.node[i]['weight'])
if len(edges) > 0:
a, b = zip(*edges)
A = np.array(a)
B = np.array(b)
W = np.array(weights)
else:
A = np.array([0])
B = np.array([0])
W = np.array([0])
return graph.py_Graph(len(nodes), len(edges), A, B, W)
def HXA(self, g, method):
# 'HDA', 'HBA', 'HPRA', 'HCA'
sol = []
G = g.copy()
while (nx.number_of_edges(G)>0):
if method == 'HDA':
dc = nx.degree_centrality(G)
elif method == 'HBA':
dc = nx.betweenness_centrality(G)
elif method == 'HCA':
dc = nx.closeness_centrality(G)
elif method == 'HPRA':
dc = nx.pagerank(G)
keys = list(dc.keys())
values = list(dc.values())
maxTag = np.argmax(values)
node = keys[maxTag]
sol.append(node)
G.remove_node(node)
solution = sol + list(set(g.nodes())^set(sol))
solutions = [int(i) for i in solution]
Robustness = self.utils.getRobustness(self.GenNetwork(g), solutions)
return Robustness
def argMax(self, scores):
cdef int n = len(scores)
cdef int pos = -1
cdef double best = -10000000
cdef int i
for i in range(n):
if pos == -1 or scores[i] > best:
pos = i
best = scores[i]
return pos
def Max(self, scores):
cdef int n = len(scores)
cdef int pos = -1
cdef double best = -10000000
cdef int i
for i in range(n):
if pos == -1 or scores[i] > best:
pos = i
best = scores[i]
return best
|
e5ba12545e1f7bdce6a8332a16bd4ab64017c8f6
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/Focus-DETR/config/Focus_DETR/coco_transformer.py
|
064dc4f443bf983c6376a3acc2ca1a42e5e83b5e
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 256
|
py
|
coco_transformer.py
|
# Copyright 2023 Huawei Technologies Co., Ltd
#
data_aug_scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
data_aug_max_size = 1333
data_aug_scales2_resize = [400, 500, 600]
data_aug_scales2_crop = [384, 600]
data_aug_scale_overlap = None
|
92fbd9a2886157790eb3345969ac97a69e66d66f
|
5dc77586e3e0f9de1f032fd2ca68494d8e58928f
|
/tests/render/test_inline_renderer.py
|
10c1df373e8a188edf6bcdabaf5fab6edbb11cf7
|
[
"Apache-2.0"
] |
permissive
|
great-expectations/great_expectations
|
dd7c22e6277d6b08bee3ff38a015e6e8cd434df6
|
b0290e2fd2aa05aec6d7d8871b91cb4478e9501d
|
refs/heads/develop
| 2023-09-04T09:30:26.395518
| 2023-09-02T00:00:13
| 2023-09-02T00:00:13
| 103,071,520
| 8,931
| 1,535
|
Apache-2.0
| 2023-09-14T19:57:16
| 2017-09-11T00:18:46
|
Python
|
UTF-8
|
Python
| false
| false
| 30,838
|
py
|
test_inline_renderer.py
|
from typing import List
import pytest
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.core.expectation_suite import ExpectationSuite
from great_expectations.core.expectation_validation_result import (
ExpectationValidationResult,
)
from great_expectations.render import (
AtomicDiagnosticRendererType,
AtomicPrescriptiveRendererType,
RenderedAtomicContent,
)
from great_expectations.render.exceptions import InlineRendererError
from great_expectations.render.renderer.inline_renderer import InlineRenderer
from great_expectations.render.renderer_configuration import MetaNotesFormat
# module level markers
pytestmark = pytest.mark.unit
def clean_serialized_rendered_atomic_content_graphs(
serialized_rendered_atomic_content: List[dict],
) -> List[dict]:
for content_block in serialized_rendered_atomic_content:
if content_block["value_type"] == "GraphType":
content_block["value"]["graph"].pop("$schema")
content_block["value"]["graph"].pop("data")
content_block["value"]["graph"].pop("datasets")
return serialized_rendered_atomic_content
def test_inline_renderer_instantiation_error_message(
basic_expectation_suite: ExpectationSuite,
):
expectation_suite: ExpectationSuite = basic_expectation_suite
with pytest.raises(InlineRendererError) as e:
InlineRenderer(render_object=expectation_suite) # type: ignore
assert (
str(e.value)
== "InlineRenderer can only be used with an ExpectationConfiguration or ExpectationValidationResult, but <class 'great_expectations.core.expectation_suite.ExpectationSuite'> was used."
)
@pytest.mark.parametrize(
"expectation_configuration,fake_result,expected_serialized_expectation_validation_result_rendered_atomic_content",
[
pytest.param(
ExpectationConfiguration(
expectation_type="expect_table_row_count_to_equal",
kwargs={"value": 3},
),
{"observed_value": 3},
[
{
"name": AtomicDiagnosticRendererType.OBSERVED_VALUE,
"value": {
"params": {},
"schema": {"type": "com.superconductive.rendered.string"},
"template": "3",
},
"value_type": "StringValueType",
},
{
"value_type": "StringValueType",
"name": AtomicPrescriptiveRendererType.SUMMARY,
"value": {
"template": "Must have exactly $value rows.",
"schema": {"type": "com.superconductive.rendered.string"},
"params": {
"value": {"schema": {"type": "number"}, "value": 3},
},
},
},
],
id="equal",
),
pytest.param(
ExpectationConfiguration(
expectation_type="expect_column_min_to_be_between",
kwargs={"column": "event_type", "min_value": 3, "max_value": 20},
),
{"observed_value": 19},
[
{
"name": AtomicDiagnosticRendererType.OBSERVED_VALUE,
"value": {
"params": {},
"schema": {"type": "com.superconductive.rendered.string"},
"template": "19",
},
"value_type": "StringValueType",
},
{
"name": AtomicPrescriptiveRendererType.SUMMARY,
"value": {
"params": {
"column": {
"schema": {"type": "string"},
"value": "event_type",
},
"max_value": {"schema": {"type": "number"}, "value": 20},
"min_value": {"schema": {"type": "number"}, "value": 3},
},
"schema": {"type": "com.superconductive.rendered.string"},
"template": "$column minimum value must be greater than or equal "
"to $min_value and less than or equal to $max_value.",
},
"value_type": "StringValueType",
},
],
id="between",
),
pytest.param(
ExpectationConfiguration(
expectation_type="expect_column_quantile_values_to_be_between",
kwargs={
"column": "user_id",
"quantile_ranges": {
"quantiles": [0.0, 0.5, 1.0],
"value_ranges": [
[300000, 400000],
[2000000, 4000000],
[4000000, 10000000],
],
},
},
),
{
"observed_value": {
"quantiles": [0.0, 0.5, 1.0],
"values": [397433, 2388055, 9488404],
},
"details": {"success_details": [True, True, True]},
},
[
{
"name": AtomicDiagnosticRendererType.OBSERVED_VALUE,
"value": {
"header_row": [
{"schema": {"type": "string"}, "value": "Quantile"},
{"schema": {"type": "string"}, "value": "Value"},
],
"schema": {"type": "TableType"},
"table": [
[
{"schema": {"type": "string"}, "value": "0.00"},
{"schema": {"type": "number"}, "value": 397433},
],
[
{"schema": {"type": "string"}, "value": "Median"},
{"schema": {"type": "number"}, "value": 2388055},
],
[
{"schema": {"type": "string"}, "value": "1.00"},
{"schema": {"type": "number"}, "value": 9488404},
],
],
},
"value_type": "TableType",
},
{
"name": AtomicPrescriptiveRendererType.SUMMARY,
"value": {
"header": {
"schema": {"type": "StringValueType"},
"value": {
"params": {
"column": {
"schema": {"type": "string"},
"value": "user_id",
},
},
"template": "$column quantiles must be within "
"the following value ranges.",
},
},
"header_row": [
{"schema": {"type": "string"}, "value": "Quantile"},
{"schema": {"type": "string"}, "value": "Min Value"},
{"schema": {"type": "string"}, "value": "Max Value"},
],
"schema": {"type": "TableType"},
"table": [
[
{"schema": {"type": "string"}, "value": "0.00"},
{"schema": {"type": "number"}, "value": 300000},
{"schema": {"type": "number"}, "value": 400000},
],
[
{"schema": {"type": "string"}, "value": "Median"},
{"schema": {"type": "number"}, "value": 2000000},
{"schema": {"type": "number"}, "value": 4000000},
],
[
{"schema": {"type": "string"}, "value": "1.00"},
{"schema": {"type": "number"}, "value": 4000000},
{"schema": {"type": "number"}, "value": 10000000},
],
],
},
"value_type": "TableType",
},
],
id="table",
),
pytest.param(
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={"column": "event_type", "value_set": [19, 22, 73]},
),
{
"element_count": 3,
"unexpected_count": 0,
"unexpected_percent": 0.0,
"partial_unexpected_list": [],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 0.0,
"unexpected_percent_nonmissing": 0.0,
},
[
{
"name": AtomicDiagnosticRendererType.OBSERVED_VALUE,
"value": {
"params": {},
"schema": {"type": "com.superconductive.rendered.string"},
"template": "0% unexpected",
},
"value_type": "StringValueType",
},
{
"name": AtomicPrescriptiveRendererType.SUMMARY,
"value": {
"params": {
"column": {
"schema": {"type": "string"},
"value": "event_type",
},
"v__0": {"schema": {"type": "number"}, "value": 19},
"v__1": {"schema": {"type": "number"}, "value": 22},
"v__2": {"schema": {"type": "number"}, "value": 73},
"value_set": {
"schema": {"type": "array"},
"value": [19, 22, 73],
},
},
"schema": {"type": "com.superconductive.rendered.string"},
"template": "$column values must belong to this set: $v__0 $v__1 "
"$v__2.",
},
"value_type": "StringValueType",
},
],
id="set",
),
pytest.param(
ExpectationConfiguration(
expectation_type="expect_column_kl_divergence_to_be_less_than",
kwargs={
"column": "user_id",
"partition_object": {
"values": [2000000, 6000000],
"weights": [0.3, 0.7],
},
},
),
{
"observed_value": None,
"details": {
"observed_partition": {
"values": [2000000, 6000000, 397433, 2388055, 9488404],
"weights": [
0.0,
0.0,
0.3333333333333333,
0.3333333333333333,
0.3333333333333333,
],
},
"expected_partition": {
"values": [2000000, 6000000, 397433, 2388055, 9488404],
"weights": [0.3, 0.7, 0.0, 0.0, 0.0],
},
},
},
[
{
"name": AtomicDiagnosticRendererType.OBSERVED_VALUE,
"value": {
"graph": {
"autosize": "fit",
"config": {
"view": {
"continuousHeight": 300,
"continuousWidth": 400,
}
},
"encoding": {
"tooltip": [
{"field": "values", "type": "quantitative"},
{"field": "fraction", "type": "quantitative"},
],
"x": {"field": "values", "type": "nominal"},
"y": {"field": "fraction", "type": "quantitative"},
},
"height": 400,
"mark": "bar",
"width": 250,
},
"header": {
"schema": {"type": "StringValueType"},
"value": {
"params": {
"observed_value": {
"schema": {"type": "string"},
"value": "None (-infinity, infinity, or NaN)",
}
},
"template": "KL Divergence: $observed_value",
},
},
"schema": {"type": "GraphType"},
},
"value_type": "GraphType",
},
{
"name": AtomicPrescriptiveRendererType.SUMMARY,
"value": {
"graph": {
"autosize": "fit",
"config": {
"view": {
"continuousHeight": 300,
"continuousWidth": 400,
}
},
"encoding": {
"tooltip": [
{"field": "values", "type": "quantitative"},
{"field": "fraction", "type": "quantitative"},
],
"x": {"field": "values", "type": "nominal"},
"y": {"field": "fraction", "type": "quantitative"},
},
"height": 400,
"mark": "bar",
"width": 250,
},
"header": {
"schema": {"type": "StringValueType"},
"value": {
"params": {
"column": {
"schema": {"type": "string"},
"value": "user_id",
},
},
"template": "$column Kullback-Leibler (KL) "
"divergence with respect to the "
"following distribution must be "
"lower than $threshold.",
},
},
"schema": {"type": "GraphType"},
},
"value_type": "GraphType",
},
],
id="graph",
),
],
)
def test_inline_renderer_expectation_validation_result_serialization(
expectation_configuration: ExpectationConfiguration,
fake_result: dict,
expected_serialized_expectation_validation_result_rendered_atomic_content: dict,
):
expectation_validation_result = ExpectationValidationResult(
exception_info={
"raised_exception": False,
"exception_traceback": None,
"exception_message": None,
},
expectation_config=expectation_configuration,
result=fake_result,
success=True,
)
inline_renderer: InlineRenderer = InlineRenderer(
render_object=expectation_validation_result
)
expectation_validation_result_rendered_atomic_content: List[
RenderedAtomicContent
] = inline_renderer.get_rendered_content()
actual_serialized_expectation_validation_result_rendered_atomic_content: List[
dict
] = clean_serialized_rendered_atomic_content_graphs(
serialized_rendered_atomic_content=[
rendered_atomic_content.to_json_dict()
for rendered_atomic_content in expectation_validation_result_rendered_atomic_content
]
)
assert (
actual_serialized_expectation_validation_result_rendered_atomic_content
== expected_serialized_expectation_validation_result_rendered_atomic_content
)
@pytest.mark.parametrize(
"expectation_configuration,expected_serialized_expectation_configuration_rendered_atomic_content",
[
pytest.param(
ExpectationConfiguration(
expectation_type="expect_table_row_count_to_equal",
kwargs={"value": 3},
),
[
{
"value_type": "StringValueType",
"name": AtomicPrescriptiveRendererType.SUMMARY,
"value": {
"template": "Must have exactly $value rows.",
"schema": {"type": "com.superconductive.rendered.string"},
"params": {
"value": {"schema": {"type": "number"}, "value": 3},
},
},
}
],
id="equal",
),
pytest.param(
ExpectationConfiguration(
expectation_type="expect_column_min_to_be_between",
kwargs={"column": "event_type", "min_value": 3, "max_value": 20},
),
[
{
"name": AtomicPrescriptiveRendererType.SUMMARY,
"value": {
"params": {
"column": {
"schema": {"type": "string"},
"value": "event_type",
},
"max_value": {"schema": {"type": "number"}, "value": 20},
"min_value": {"schema": {"type": "number"}, "value": 3},
},
"schema": {"type": "com.superconductive.rendered.string"},
"template": "$column minimum value must be greater than or equal "
"to $min_value and less than or equal to $max_value.",
},
"value_type": "StringValueType",
}
],
id="between",
),
pytest.param(
ExpectationConfiguration(
expectation_type="expect_column_quantile_values_to_be_between",
kwargs={
"column": "user_id",
"quantile_ranges": {
"quantiles": [0.0, 0.5, 1.0],
"value_ranges": [
[300000, 400000],
[2000000, 4000000],
[4000000, 10000000],
],
},
},
),
[
{
"name": AtomicPrescriptiveRendererType.SUMMARY,
"value": {
"header": {
"schema": {"type": "StringValueType"},
"value": {
"params": {
"column": {
"schema": {"type": "string"},
"value": "user_id",
},
},
"template": "$column quantiles must be within "
"the following value ranges.",
},
},
"header_row": [
{"schema": {"type": "string"}, "value": "Quantile"},
{"schema": {"type": "string"}, "value": "Min Value"},
{"schema": {"type": "string"}, "value": "Max Value"},
],
"schema": {"type": "TableType"},
"table": [
[
{"schema": {"type": "string"}, "value": "0.00"},
{"schema": {"type": "number"}, "value": 300000},
{"schema": {"type": "number"}, "value": 400000},
],
[
{"schema": {"type": "string"}, "value": "Median"},
{"schema": {"type": "number"}, "value": 2000000},
{"schema": {"type": "number"}, "value": 4000000},
],
[
{"schema": {"type": "string"}, "value": "1.00"},
{"schema": {"type": "number"}, "value": 4000000},
{"schema": {"type": "number"}, "value": 10000000},
],
],
},
"value_type": "TableType",
}
],
id="table",
),
pytest.param(
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={"column": "event_type", "value_set": [19, 22, 73]},
),
[
{
"name": AtomicPrescriptiveRendererType.SUMMARY,
"value": {
"params": {
"column": {
"schema": {"type": "string"},
"value": "event_type",
},
"v__0": {"schema": {"type": "number"}, "value": 19},
"v__1": {"schema": {"type": "number"}, "value": 22},
"v__2": {"schema": {"type": "number"}, "value": 73},
"value_set": {
"schema": {"type": "array"},
"value": [19, 22, 73],
},
},
"schema": {"type": "com.superconductive.rendered.string"},
"template": "$column values must belong to this set: $v__0 $v__1 "
"$v__2.",
},
"value_type": "StringValueType",
}
],
id="set",
),
pytest.param(
ExpectationConfiguration(
expectation_type="expect_column_kl_divergence_to_be_less_than",
kwargs={
"column": "user_id",
"partition_object": {
"values": [2000000, 6000000],
"weights": [0.3, 0.7],
},
},
),
[
{
"name": AtomicPrescriptiveRendererType.SUMMARY,
"value": {
"graph": {
"autosize": "fit",
"config": {
"view": {
"continuousHeight": 300,
"continuousWidth": 400,
}
},
"encoding": {
"tooltip": [
{"field": "values", "type": "quantitative"},
{"field": "fraction", "type": "quantitative"},
],
"x": {"field": "values", "type": "nominal"},
"y": {"field": "fraction", "type": "quantitative"},
},
"height": 400,
"mark": "bar",
"width": 250,
},
"header": {
"schema": {"type": "StringValueType"},
"value": {
"params": {
"column": {
"schema": {"type": "string"},
"value": "user_id",
},
},
"template": "$column Kullback-Leibler (KL) "
"divergence with respect to the "
"following distribution must be "
"lower than $threshold.",
},
},
"schema": {"type": "GraphType"},
},
"value_type": "GraphType",
}
],
id="graph",
),
pytest.param(
ExpectationConfiguration(
expectation_type="expect_table_row_count_to_equal",
kwargs={"value": 3},
meta={
"notes": {
"format": MetaNotesFormat.STRING,
"content": ["This is the most important Expectation!!"],
}
},
),
[
{
"value_type": "StringValueType",
"name": AtomicPrescriptiveRendererType.SUMMARY,
"value": {
"template": "Must have exactly $value rows.",
"schema": {"type": "com.superconductive.rendered.string"},
"params": {
"value": {"schema": {"type": "number"}, "value": 3},
},
"meta_notes": {
"content": ["This is the most important Expectation!!"],
"format": MetaNotesFormat.STRING,
},
},
}
],
id="meta_notes content list",
),
pytest.param(
ExpectationConfiguration(
expectation_type="expect_table_row_count_to_equal",
kwargs={"value": 3},
meta={
"notes": {
"format": MetaNotesFormat.STRING,
"content": "This is the most important Expectation!!",
}
},
),
[
{
"value_type": "StringValueType",
"name": AtomicPrescriptiveRendererType.SUMMARY,
"value": {
"template": "Must have exactly $value rows.",
"schema": {"type": "com.superconductive.rendered.string"},
"params": {
"value": {"schema": {"type": "number"}, "value": 3},
},
"meta_notes": {
"content": ["This is the most important Expectation!!"],
"format": MetaNotesFormat.STRING,
},
},
}
],
id="meta_notes content string",
),
pytest.param(
ExpectationConfiguration(
expectation_type="expect_table_row_count_to_equal",
kwargs={"value": 3},
meta={
"notes": "This is the most important Expectation!!",
},
),
[
{
"value_type": "StringValueType",
"name": AtomicPrescriptiveRendererType.SUMMARY,
"value": {
"template": "Must have exactly $value rows.",
"schema": {"type": "com.superconductive.rendered.string"},
"params": {
"value": {"schema": {"type": "number"}, "value": 3},
},
"meta_notes": {
"content": ["This is the most important Expectation!!"],
"format": MetaNotesFormat.STRING,
},
},
}
],
id="meta_notes string",
),
],
)
def test_inline_renderer_expectation_configuration_serialization(
expectation_configuration: ExpectationConfiguration,
expected_serialized_expectation_configuration_rendered_atomic_content: dict,
):
inline_renderer: InlineRenderer = InlineRenderer(
render_object=expectation_configuration
)
expectation_configuration_rendered_atomic_content: List[
RenderedAtomicContent
] = inline_renderer.get_rendered_content()
actual_serialized_expectation_configuration_rendered_atomic_content: List[
dict
] = clean_serialized_rendered_atomic_content_graphs(
serialized_rendered_atomic_content=[
rendered_atomic_content.to_json_dict()
for rendered_atomic_content in expectation_configuration_rendered_atomic_content
]
)
assert (
actual_serialized_expectation_configuration_rendered_atomic_content
== expected_serialized_expectation_configuration_rendered_atomic_content
)
|
c43ee4f097aa71f1eec4010eb4277eef013217d5
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/lobby/customization/customization_carousel.py
|
0ed410c7fe93cb82aceadd649127ab69e4f2d93e
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 29,408
|
py
|
customization_carousel.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/customization/customization_carousel.py
import logging
from collections import defaultdict, namedtuple, OrderedDict
from itertools import chain
import typing
from CurrentVehicle import g_currentVehicle
from cache import cached_property
from gui.Scaleform.daapi.view.lobby.customization.shared import CustomizationTabs, TYPES_ORDER, isItemLimitReached, isItemUsedUp, vehicleHasSlot, ITEM_TYPE_TO_TAB
from gui.Scaleform.framework.entities.DAAPIDataProvider import SortableDAAPIDataProvider
from gui.customization.constants import CustomizationModes
from gui.customization.shared import getBaseStyleItems, createCustomizationBaseRequestCriteria, C11N_ITEM_TYPE_MAP, getInheritors, getAncestors, getGroupHelper
from gui.impl import backport
from gui.impl.gen import R
from gui.shared.gui_items import GUI_ITEM_TYPE
from gui.shared.utils.requesters import REQ_CRITERIA, RequestCriteria
from helpers import dependency
from items.components.c11n_constants import SeasonType, ProjectionDecalFormTags, ItemTags, EMPTY_ITEM_ID
from items import vehicles
from skeletons.gui.customization import ICustomizationService
from skeletons.gui.server_events import IEventsCache
from skeletons.gui.shared import IItemsCache
if typing.TYPE_CHECKING:
from gui.shared.gui_items.customization.c11n_items import Customization
_logger = logging.getLogger(__name__)
def comparisonKey(item):
typeOrder = TYPES_ORDER.index(item.itemTypeID)
isNationalEmblem = ItemTags.NATIONAL_EMBLEM in item.tags
formfactorId = ProjectionDecalFormTags.ALL.index(item.formfactor) if hasattr(item, 'formfactor') and item.formfactor else 0
return (typeOrder,
not isNationalEmblem,
not item.isRare(),
item.groupID,
formfactorId,
item.id)
CustomizationBookmarkVO = namedtuple('CustomizationBookmarkVO', ('bookmarkName', 'bookmarkIndex'))
CustomizationArrowVO = namedtuple('CustomizationArrowVO', ('index', 'enabled'))
SelectedItem = namedtuple('SelectedItem', ('intCD', 'idx'))
SelectedItem.__new__.__defaults__ = (-1, -1)
class ItemsData(object):
def __init__(self, items=None, groups=None):
self.items = items or []
self.groups = groups or OrderedDict()
@cached_property
def hasUsedUpItems(self):
return any((isItemUsedUp(item) for item in self.items))
@cached_property
def hasProgressiveItems(self):
return any((item.isProgressive for item in self.items))
@cached_property
def hasQuestProgressItems(self):
return any((item.isQuestsProgression for item in self.items))
class CarouselData(object):
__slots__ = ('items', 'sizes', 'bookmarks', 'arrows', 'showSeparators')
def __init__(self):
self.items = []
self.sizes = []
self.bookmarks = []
self.arrows = []
self.showSeparators = False
class CarouselCache(object):
__itemsCache = dependency.descriptor(IItemsCache)
__service = dependency.descriptor(ICustomizationService)
__eventsCache = dependency.descriptor(IEventsCache)
def __init__(self, createFilterCriteria, createSortCriteria):
self.__itemsData = defaultdict(lambda : defaultdict(OrderedDict))
self.__carouselData = {}
self.__createFilterCriteria = createFilterCriteria
self.__createSortCriteria = createSortCriteria
self.__cachedEditableStyleId = 0
self.__ctx = self.__service.getCtx()
def fini(self):
self.invalidateItemsData()
self.invalidateCarouselData()
self.__invalidateEditableStyleCache()
self.__createFilterCriteria = None
self.__createSortCriteria = None
self.__ctx = None
return
def getVisibleTabs(self):
season, modeId = self.__ctx.season, self.__ctx.modeId
self.__invalidateEditableStyleCache()
self.__initCache()
visibleTabs = self.__itemsData[modeId][season].keys()
return visibleTabs
def getItemsData(self, season=None, modeId=None, tabId=None):
season = season or self.__ctx.season
modeId = modeId or self.__ctx.modeId
tabId = tabId or self.__ctx.mode.tabId
self.__invalidateEditableStyleCache()
self.__initCache()
itemsData = self.__itemsData[modeId][season].get(tabId, ItemsData())
return itemsData
def getCarouselData(self, season=None, modeId=None, tabId=None):
season = season or self.__ctx.season
modeId = modeId or self.__ctx.modeId
tabId = tabId or self.__ctx.mode.tabId
self.__invalidateEditableStyleCache()
carouselData = self.__carouselData.get(modeId, {}).get(season, {}).get(tabId)
if carouselData is None:
carouselData = self.__getCarouselData(season, modeId, tabId)
self.__carouselData.setdefault(modeId, {}).setdefault(season, {})[tabId] = carouselData
return carouselData
def invalidateItemsData(self):
self.__itemsData.clear()
self.__cachedEditableStyleId = 0
def invalidateCarouselData(self):
self.__carouselData.clear()
def __initCache(self):
if not self.__itemsData:
self.__initItemsData()
if self.__ctx.modeId == CustomizationModes.EDITABLE_STYLE and not self.__cachedEditableStyleId:
self.__initEditableStyleItemsData()
def __getCarouselData(self, season=None, modeId=None, tabId=None):
itemsData = self.getItemsData(season, modeId, tabId)
filteredItems = filter(self.__createFilterCriteria(), itemsData.items)
sortCriteria = self.__createSortCriteria()
showBookmarks = True
if sortCriteria:
filteredItems.sort(key=sortCriteria)
showBookmarks = False
carouselData = CarouselData()
lastGroupID = None
carouselData.showSeparators = itemsData.hasQuestProgressItems and self.__ctx.mode.modeId == CustomizationModes.EDITABLE_STYLE
for idx, item in enumerate(filteredItems):
helper = getGroupHelper(item)
groupID = helper.getGroupID()
groupUserName = helper.getGroupName()
if showBookmarks and groupID != lastGroupID:
lastGroupID = groupID
bookmarkVO = CustomizationBookmarkVO(groupUserName, len(carouselData.items))
carouselData.bookmarks.append(bookmarkVO._asdict())
isLastItem = idx == len(filteredItems) - 1
if item.isQuestsProgression and not isLastItem:
nextItem = filteredItems[idx + 1]
nextGroupID = getGroupHelper(nextItem).getGroupID()
if nextItem and nextGroupID == groupID and item.descriptor.requiredTokenCount != nextItem.descriptor.requiredTokenCount:
arrowVO = CustomizationArrowVO(idx, item.isUnlockedByToken())
carouselData.arrows.append(arrowVO._asdict())
carouselData.items.append(item.intCD)
carouselData.sizes.append(item.isWide())
return carouselData
def __initItemsData(self):
self.__itemsData.clear()
requirement = createCustomizationBaseRequestCriteria(g_currentVehicle.item, self.__eventsCache.questsProgress, self.__ctx.mode.getAppliedItems())
requirement |= REQ_CRITERIA.CUSTOM(lambda item: not item.isHiddenInUI())
itemTypes = []
for tabId, slotType in CustomizationTabs.SLOT_TYPES.iteritems():
if vehicleHasSlot(slotType):
itemTypes.extend(CustomizationTabs.ITEM_TYPES[tabId])
allItems = []
customizationCache = vehicles.g_cache.customization20().itemTypes
cTypes = set((C11N_ITEM_TYPE_MAP[iType] for iType in itemTypes if iType in C11N_ITEM_TYPE_MAP))
for cType in cTypes:
for itemID in customizationCache[cType]:
if itemID == EMPTY_ITEM_ID:
continue
intCD = vehicles.makeIntCompactDescrByID('customizationItem', cType, itemID)
item = self.__service.getItemByCD(intCD)
if requirement(item):
allItems.append(item)
sortedItems = sorted(allItems, key=comparisonKey)
customModeTabs = CustomizationTabs.MODES[CustomizationModes.CUSTOM]
for item in sortedItems:
tabId = ITEM_TYPE_TO_TAB[item.itemTypeID]
modeId = CustomizationModes.CUSTOM if tabId in customModeTabs else CustomizationModes.STYLED
for season in SeasonType.COMMON_SEASONS:
if not item.season & season:
continue
itemsDataStorage = self.__itemsData[modeId][season]
if not itemsDataStorage or tabId != itemsDataStorage.keys()[-1]:
itemsDataStorage[tabId] = ItemsData()
itemsData = itemsDataStorage.values()[-1]
if not itemsData.groups or item.groupID != itemsData.groups.keys()[-1]:
itemsData.groups[item.groupID] = item.groupUserName
itemsData.items.append(item)
def __initEditableStyleItemsData(self):
style = self.__ctx.mode.style
if CustomizationModes.EDITABLE_STYLE in self.__itemsData:
self.__itemsData[CustomizationModes.EDITABLE_STYLE].clear()
vehicleCD = g_currentVehicle.item.descriptor.makeCompactDescr()
itemsFilter = style.descriptor.isItemInstallable
for season in SeasonType.COMMON_SEASONS:
itemsDataStorage = self.__itemsData[CustomizationModes.CUSTOM][season]
styleBaseOutfit = style.getOutfit(season, vehicleCD)
styleBaseItems = [ self.__service.getItemByCD(intCD) for intCD in styleBaseOutfit.items() ]
for tabId, itemsData in itemsDataStorage.iteritems():
itemTypes = CustomizationTabs.ITEM_TYPES[tabId]
questItems = []
questItemsIDs = []
if style.isQuestsProgression:
qProg = style.descriptor.questsProgression
for token in sorted(qProg.getGroupTokens()):
groupItems = qProg.getItemsForGroup(token)
for itemsForLevel in groupItems:
for itemType in itemTypes:
c11nType = C11N_ITEM_TYPE_MAP[itemType]
itemsIdsForType = itemsForLevel.get(c11nType, ())
buf = [ self.__service.getItemByID(itemType, itemId) for itemId in itemsIdsForType ]
for item in buf:
if item.itemTypeID in itemTypes and item.season & season:
questItems.append(item)
questItemsIDs.append(item.id)
filteredItems = [ item for item in itemsData.items if itemsFilter(item.descriptor) and item.id not in questItemsIDs ]
alternateItems = []
for itemType in itemTypes:
c11nType = C11N_ITEM_TYPE_MAP[itemType]
alternateItemIds = style.descriptor.alternateItems.get(c11nType, ())
buf = [ self.__service.getItemByID(itemType, itemId) for itemId in alternateItemIds if itemId not in questItemsIDs ]
alternateItems.extend([ i for i in buf if i.itemTypeID in itemTypes and i.season & season ])
if not any((questItems, alternateItems, filteredItems)):
continue
baseItems = [ item for item in styleBaseItems if item.itemTypeID in itemTypes and item.season & season and item.id not in questItemsIDs ]
items = questItems + sorted(set(chain(alternateItems, filteredItems, baseItems)), key=comparisonKey)
groups = OrderedDict()
for item in items:
helper = getGroupHelper(item)
groupID = helper.getGroupID()
groupUserName = helper.getGroupName()
if not groups or groupID != groups.keys()[-1]:
groups[groupID] = groupUserName
self.__itemsData[CustomizationModes.EDITABLE_STYLE][season][tabId] = ItemsData(items, groups)
self.__cachedEditableStyleId = style.id
def __invalidateEditableStyleCache(self):
if self.__ctx.modeId != CustomizationModes.EDITABLE_STYLE:
return
if self.__cachedEditableStyleId == self.__ctx.mode.style.id:
return
self.__cachedEditableStyleId = 0
if CustomizationModes.EDITABLE_STYLE in self.__itemsData:
self.__itemsData[CustomizationModes.EDITABLE_STYLE].clear()
self.__carouselData.get(self.__ctx.modeId, {}).clear()
class CustomizationCarouselDataProvider(SortableDAAPIDataProvider):
__service = dependency.descriptor(ICustomizationService)
def __init__(self, carouselItemWrapper):
super(CustomizationCarouselDataProvider, self).__init__()
self.__ctx = self.__service.getCtx()
self.__selectedItem = SelectedItem()
self.__selectedGroup = {}
self.__carouselFilters = {}
self.__appliedItems = set()
self.__baseStyleItems = set()
self.__dependentItems = tuple()
self.__carouselData = CarouselData()
self.__carouselCache = CarouselCache(createFilterCriteria=self.__createFilterCriteria, createSortCriteria=self.__createSortCriteria)
self.setItemWrapper(carouselItemWrapper)
self.__initFilters()
def _dispose(self):
self.__carouselCache.fini()
self.__carouselCache = None
self.__ctx = None
self.__carouselData = None
super(CustomizationCarouselDataProvider, self)._dispose()
return
@property
def collection(self):
return self.__carouselData.items
@property
def itemCount(self):
return len(self.__carouselData.items)
@property
def totalItemCount(self):
itemsData = self.__carouselCache.getItemsData()
return len(itemsData.items)
def pyGetSelectedIdx(self):
return self.__selectedItem.idx
def emptyItem(self):
return None
def refresh(self):
if not g_currentVehicle.isPresent():
return
super(CustomizationCarouselDataProvider, self).refresh()
self.__baseStyleItems = getBaseStyleItems()
def buildList(self):
self.__appliedItems = self.__ctx.mode.getAppliedItems(isOriginal=False)
for camoIntCD, dependentItems in self.__ctx.mode.getDependenciesData().iteritems():
if camoIntCD in self.__appliedItems:
self.__dependentItems = dependentItems
break
else:
self.__dependentItems = tuple()
self.__updateCarouselData()
def getVisibleTabs(self):
return self.__carouselCache.getVisibleTabs()
def getItemsData(self, season=None, modeId=None, tabId=None):
return self.__carouselCache.getItemsData(season, modeId, tabId).items
def getCarouselData(self, season=None, modeId=None, tabId=None):
return self.__carouselCache.getCarouselData(season, modeId, tabId).items
def getAppliedItems(self):
return self.__appliedItems
def getBaseStyleItems(self):
return self.__baseStyleItems
def getItemSizeData(self):
return self.__carouselData.sizes
def getBookmarskData(self):
return self.__carouselData.bookmarks
def getArrowsData(self):
return self.__carouselData.arrows
def getShowSeparatorsData(self):
return self.__carouselData.showSeparators
def getDependentItems(self):
return self.__dependentItems
def processDependentParams(self, item):
isMarkedAsDependent = False
isUnsuitable = False
styleDependencies = self.__ctx.mode.getDependenciesData()
if styleDependencies:
itemCD = item.intCD
isApplied = itemCD in self.getAppliedItems()
if item.itemTypeID == GUI_ITEM_TYPE.CAMOUFLAGE:
if isApplied:
isMarkedAsDependent = bool(getInheritors(itemCD, styleDependencies))
else:
selectedDependentItems = self.getDependentItems()
if selectedDependentItems:
if itemCD in selectedDependentItems:
isMarkedAsDependent = isApplied
elif getAncestors(itemCD, styleDependencies):
isUnsuitable = True
return (isMarkedAsDependent, isUnsuitable)
def onModeChanged(self, modeId, prevModeId):
visibleTabs = self.getVisibleTabs()
if not visibleTabs:
return
tabId = visibleTabs[0]
if CustomizationModes.EDITABLE_STYLE in (modeId, prevModeId):
self.clearFilter()
self.__selectedGroup.clear()
self.invalidateFilteredItems()
if self.__ctx.mode.getDependenciesData():
if CustomizationTabs.CAMOUFLAGES in visibleTabs:
tabId = CustomizationTabs.CAMOUFLAGES
else:
_logger.warning('Style with dependencies have to open Camouflages tab, but this tab is not found!')
self.__ctx.mode.changeTab(tabId)
def hasAppliedFilter(self):
isGroupSelected = self.__getSelectedGroupIdx() is not None
isAnyFilterApplied = any((carouselFilter.isApplied() for carouselFilter in self.__carouselFilters.itervalues()))
return isAnyFilterApplied or isGroupSelected
def selectItem(self, item=None):
prevSelectedItem = self.__selectedItem
intCD = item.intCD if item is not None else -1
self.__updateSelection(intCD)
if prevSelectedItem != self.__selectedItem:
self.refresh()
return
def getNextItem(self, reverse):
if self.__selectedItem.idx == -1:
return None
else:
outfits = self.__ctx.mode.getModifiedOutfits()
shift = -1 if reverse else 1
itemsCount = len(self.collection)
idx = self.__selectedItem.idx + shift
while 0 <= idx < itemsCount:
intCD = self.collection[idx]
item = self.__service.getItemByCD(intCD)
if not isItemLimitReached(item, outfits) or item.isStyleOnly and not self.processDependentParams(item)[1]:
return item
idx += shift
return None
def getFilterData(self):
itemsData = self.__carouselCache.getItemsData()
groups = itemsData.groups.values()
if len(groups) > 1:
groups.append(backport.text(R.strings.vehicle_customization.customization.filter.allGroups()))
groupCount = len(groups)
selectedGroup = self.__getSelectedGroupIdx()
if selectedGroup is None:
selectedGroup = groupCount - 1
else:
groups = []
groupCount = 0
selectedGroup = 0
formfactors = []
if self.__ctx.mode.tabId == CustomizationTabs.PROJECTION_DECALS:
formfactorsFilter = self.__carouselFilters[FilterTypes.FORMFACTORS]
formfactors = [ formfactor in formfactorsFilter.formfactors for formfactor in ProjectionDecalFormTags.ALL ]
return {'purchasedEnabled': self.isFilterApplied(FilterTypes.INVENTORY),
'historicEnabled': self.isFilterApplied(FilterTypes.HISTORIC, FilterAliases.HISTORIC),
'nonHistoricEnabled': self.isFilterApplied(FilterTypes.HISTORIC, FilterAliases.NON_HISTORIC),
'fantasticalEnabled': self.isFilterApplied(FilterTypes.HISTORIC, FilterAliases.FANTASTICAL),
'appliedEnabled': self.isFilterApplied(FilterTypes.APPLIED),
'groups': groups,
'selectedGroup': selectedGroup,
'groupCount': groupCount,
'formfactorGroups': formfactors,
'hideOnAnotherVehEnabled': self.isFilterApplied(FilterTypes.USED_UP),
'showOnlyProgressionDecalsEnabled': self.isFilterApplied(FilterTypes.PROGRESSION),
'showOnlyEditableStylesEnabled': self.isFilterApplied(FilterTypes.EDITABLE_STYLES, FilterAliases.EDITABLE_STYLES),
'showOnlyNonEditableStylesEnabled': self.isFilterApplied(FilterTypes.EDITABLE_STYLES, FilterAliases.NON_EDITABLE_STYLES)}
def clearFilter(self):
for carouselFilter in self.__carouselFilters.itervalues():
carouselFilter.clear()
self.__setSelectedGroupIdx(None)
return
def invalidateItems(self):
self.__carouselCache.invalidateItemsData()
self.invalidateFilteredItems()
def invalidateFilteredItems(self):
self.__carouselCache.invalidateCarouselData()
self.selectItem()
def updateSelectedGroup(self, index):
self.__setSelectedGroupIdx(index)
def updateCarouselFilter(self, filterType, value, *alias):
if filterType not in self.__carouselFilters:
_logger.error('Invalid filterType: %s', filterType)
self.__carouselFilters[filterType].update(value, *alias)
def isFilterApplied(self, filterType, *alias):
if filterType not in self.__carouselFilters:
_logger.error('Invalid filterType: %s', filterType)
return False
return self.__carouselFilters[filterType].isApplied(*alias)
def __initFilters(self):
self.__carouselFilters[FilterTypes.HISTORIC] = DisjunctionCarouselFilter(criteria={FilterAliases.HISTORIC: REQ_CRITERIA.CUSTOMIZATION.HISTORICAL,
FilterAliases.NON_HISTORIC: REQ_CRITERIA.CUSTOMIZATION.NON_HISTORICAL,
FilterAliases.FANTASTICAL: REQ_CRITERIA.CUSTOMIZATION.FANTASTICAL})
self.__carouselFilters[FilterTypes.INVENTORY] = SimpleCarouselFilter(criteria=REQ_CRITERIA.CUSTOM(lambda item: self.__ctx.mode.getItemInventoryCount(item) > 0 and item.isUnlockedByToken()))
self.__carouselFilters[FilterTypes.APPLIED] = SimpleCarouselFilter(criteria=REQ_CRITERIA.CUSTOM(lambda item: item.intCD in self.__ctx.mode.getAppliedItems(isOriginal=False)))
self.__carouselFilters[FilterTypes.USED_UP] = SimpleCarouselFilter(criteria=REQ_CRITERIA.CUSTOM(lambda item: not isItemUsedUp(item)), requirements=lambda : self.__ctx.isItemsOnAnotherVeh, inverse=True)
self.__carouselFilters[FilterTypes.EDITABLE_STYLES] = DisjunctionCarouselFilter(criteria={FilterAliases.EDITABLE_STYLES: REQ_CRITERIA.CUSTOM(lambda item: item.canBeEditedForVehicle(g_currentVehicle.item.intCD)),
FilterAliases.NON_EDITABLE_STYLES: REQ_CRITERIA.CUSTOM(lambda item: not item.canBeEditedForVehicle(g_currentVehicle.item.intCD))}, requirements=lambda : self.__ctx.mode.tabId == CustomizationTabs.STYLES)
self.__carouselFilters[FilterTypes.PROGRESSION] = SimpleCarouselFilter(criteria=REQ_CRITERIA.CUSTOM(lambda item: item.isProgressive), requirements=lambda : self.__ctx.isProgressiveItemsExist)
self.__carouselFilters[FilterTypes.FORMFACTORS] = FormfactorsCarouselFilter(requirements=lambda : self.__ctx.mode.tabId == CustomizationTabs.PROJECTION_DECALS)
def __getSelectedGroupIdx(self):
season, modeId, tabId = self.__ctx.season, self.__ctx.modeId, self.__ctx.mode.tabId
selectedGroup = self.__selectedGroup.get(modeId, {}).get(season, {}).get(tabId)
return selectedGroup
def __setSelectedGroupIdx(self, index=None):
season, modeId, tabId = self.__ctx.season, self.__ctx.modeId, self.__ctx.mode.tabId
itemsData = self.__carouselCache.getItemsData()
if index is not None and index >= len(itemsData.groups):
index = None
self.__selectedGroup.setdefault(modeId, {}).setdefault(season, {})[tabId] = index
return
def __createFilterCriteria(self):
requirement = REQ_CRITERIA.EMPTY
groupIdx = self.__getSelectedGroupIdx()
if groupIdx is not None and groupIdx != -1:
itemsData = self.__carouselCache.getItemsData()
groupId = itemsData.groups.keys()[groupIdx]
groupName = itemsData.groups[groupId]
requirement |= REQ_CRITERIA.CUSTOM(lambda item: getGroupHelper(item).getGroupName() == groupName)
for carouselFilter in self.__carouselFilters.itervalues():
if carouselFilter.isEnabled():
requirement |= carouselFilter.criteria
slotId = self.__ctx.mode.selectedSlot
if slotId is not None and slotId.slotType == GUI_ITEM_TYPE.PROJECTION_DECAL:
slot = g_currentVehicle.item.getAnchorBySlotId(slotId.slotType, slotId.areaId, slotId.regionIdx)
requirement |= REQ_CRITERIA.CUSTOM(lambda item: item.formfactor in slot.formfactors)
if self.__dependentItems:
requirement |= REQ_CRITERIA.CUSTOM(lambda item: not (ItemTags.HIDE_IF_INCOMPATIBLE in item.tags and item.intCD not in self.__dependentItems))
if self.__ctx.mode.modeId == CustomizationModes.CUSTOM:
requirement |= REQ_CRITERIA.CUSTOM(lambda item: not item.isStyleOnly)
if self.__ctx.mode.modeId == CustomizationModes.EDITABLE_STYLE and self.__ctx.mode.tabId == CustomizationTabs.PROJECTION_DECALS:
baseOutfit = self.__ctx.mode.baseOutfits.get(self.__ctx.mode.season)
if baseOutfit:
baseComponent = baseOutfit.pack()
taggedDecals = [ decal.id for decal in baseComponent.projection_decals if decal.matchingTag ]
requirement |= REQ_CRITERIA.CUSTOM(lambda item: item.id not in taggedDecals)
return requirement
def __createSortCriteria(self):
return (lambda item: self.processDependentParams(item)[1]) if self.__dependentItems else None
def __updateCarouselData(self):
itemsData = self.__carouselCache.getItemsData()
self.__ctx.setIsItemsOnAnotherVeh(itemsData.hasUsedUpItems)
self.__ctx.setIsProgressiveItemsExist(itemsData.hasProgressiveItems)
self.__carouselData = self.__carouselCache.getCarouselData()
self.__ctx.setCarouselItems(self.__carouselData.items)
def __updateSwitchers(self):
left = self.getNextItem(reverse=True) is not None
right = self.getNextItem(reverse=False) is not None
self.__ctx.events.onUpdateSwitchers(left, right)
return
def __updateSelection(self, intCD):
idx = self.collection.index(intCD) if intCD in self.collection else -1
self.__selectedItem = SelectedItem(intCD, idx)
self.__updateSwitchers()
class FilterTypes(object):
HISTORIC = 1
INVENTORY = 2
APPLIED = 3
USED_UP = 4
EDITABLE_STYLES = 5
PROGRESSION = 6
FORMFACTORS = 7
class FilterAliases(object):
HISTORIC = 'historic'
NON_HISTORIC = 'nonHistoric'
FANTASTICAL = 'fantastical'
EDITABLE_STYLES = 'editableStyles'
NON_EDITABLE_STYLES = 'nonEditableStyles'
class SimpleCarouselFilter(object):
def __init__(self, criteria, requirements=None, inverse=False):
self.__applied = False
self.__criteria = criteria
self.__inverse = inverse
self.__requirements = requirements
@property
def isAvailable(self):
return self.__requirements is None or self.__requirements()
@property
def isInverse(self):
return self.__inverse
@property
def criteria(self):
return self.__criteria
def isApplied(self):
return self.isAvailable and self.__applied
def isEnabled(self):
return self.isApplied() ^ self.isInverse
def update(self, value):
self.__applied = value
def clear(self):
self.__applied = False
class DisjunctionCarouselFilter(object):
def __init__(self, criteria, requirements=None):
self.__applied = set()
self.__criteria = criteria
self.__requirements = requirements
@property
def isAvailable(self):
return self.__requirements is None or self.__requirements()
@property
def criteria(self):
return reduce(RequestCriteria.__xor__, (self.__criteria[alias] for alias in self.__applied), REQ_CRITERIA.NONE)
def isApplied(self, alias=None):
if not self.isAvailable:
return False
else:
return alias in self.__applied if alias is not None else bool(self.__applied)
def isEnabled(self, alias=None):
return self.isApplied(alias)
def update(self, value, alias):
if value:
self.__applied.add(alias)
else:
self.__applied.discard(alias)
def clear(self):
self.__applied.clear()
class FormfactorsCarouselFilter(SimpleCarouselFilter):
def __init__(self, requirements=None):
self.__formfactors = set()
criteria = REQ_CRITERIA.CUSTOM(lambda item: item.formfactor in self.formfactors)
super(FormfactorsCarouselFilter, self).__init__(criteria, requirements)
@property
def formfactors(self):
return self.__formfactors
def update(self, value):
self.__formfactors = set((formfactor for formfactor, isApplied in value.iteritems() if isApplied))
super(FormfactorsCarouselFilter, self).update(bool(self.__formfactors))
def clear(self):
self.__formfactors = set()
super(FormfactorsCarouselFilter, self).clear()
|
b7e18fee32f17ec7e6aea3a04dfa4ede543d712d
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowUsers/cli/equal/golden_output_5_expected.py
|
8b38320d2c82720d0d4f1ea66a92679916010e91
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 399
|
py
|
golden_output_5_expected.py
|
expected_output = {
'line': {
'0 con 0': {
'host': 'idle',
'idle': '00:00:00',
'active': True
}
},
'connection_details': {
1: {
'intf': 'Vi2.1',
'u_name': 'lns@cisco.com',
'mode': 'PPPoVPDN',
'idle_time': '-',
'peer_address': '21.21.21.3'
}
}
}
|
eb08ff688af6a141bff8827eed3ae9b82197fe21
|
2bb96128322c0307742c6809f0d8c7eddb09c523
|
/pytest_tutorial/demo2_function_test.py
|
7e3574dfc235c71cc1e98a09c2da133b245fbbc3
|
[
"MIT"
] |
permissive
|
twtrubiks/python-notes
|
2390c66d25b39f5a73968829702d4fd02ccf1b28
|
042a651aaa04c07f76c52bc52bace665d0df99d7
|
refs/heads/master
| 2023-08-17T11:50:13.122573
| 2023-08-05T04:14:56
| 2023-08-05T04:14:56
| 90,001,387
| 122
| 49
|
MIT
| 2021-12-21T07:18:06
| 2017-05-02T06:55:21
|
Python
|
UTF-8
|
Python
| false
| false
| 328
|
py
|
demo2_function_test.py
|
import pytest
def setup_function():
print("setup...")
def teardown_function():
print("teardown...")
def test_case1():
print("run test_case1")
assert 2 + 2 == 4
def test_case2():
print("run test_case2")
assert 1 + 12 == 13
def test_case3():
print("run test_case3")
assert 199 + 1 == 200
|
5c90a3e9ab9d4b7608906fc0a803da594cef4684
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-CoreServices/PyObjCTest/test_dictionaryservices.py
|
5c62bd961fe93fd0a59cf73bc3cf6ab0ed09f614
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 752
|
py
|
test_dictionaryservices.py
|
import CoreServices
from PyObjCTools.TestSupport import TestCase, os_release, skipUnless
class TestDictionaryServices(TestCase):
def testClasses(self):
self.assertIsCFType(CoreServices.DCSDictionaryRef)
@skipUnless(
os_release().rsplit(".", 1)[0] not in ("10.12", "10.13"), "buggy os release"
)
def testFunctions(self):
txt = "the hello world program"
r = CoreServices.DCSGetTermRangeInString(None, txt, 5)
self.assertIsInstance(r, CoreServices.CFRange)
self.assertEqual(r, (4, 5))
r = CoreServices.DCSCopyTextDefinition(None, txt, r)
self.assertIsInstance(r, (str, type(None)))
v = CoreServices.DCSDictionaryGetTypeID()
self.assertIsInstance(v, int)
|
ef216e72b585f8e9bfd8b092371234fc56241a24
|
66a9c25cf0c53e2c3029b423018b856103d709d4
|
/sleekxmpp/stanza/htmlim.py
|
c43178f2f0f800721b20ebd18c6942ac08d2e60f
|
[
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
fritzy/SleekXMPP
|
1b02d3e2b22efeb6bf3f8f487e6c0343b9b85baf
|
cc1d470397de768ffcc41d2ed5ac3118d19f09f5
|
refs/heads/develop
| 2020-05-22T04:14:58.568822
| 2020-02-18T22:54:57
| 2020-02-18T22:54:57
| 463,405
| 658
| 254
|
NOASSERTION
| 2023-06-27T20:05:54
| 2010-01-08T05:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 595
|
py
|
htmlim.py
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.stanza import Message
from sleekxmpp.xmlstream import register_stanza_plugin
from sleekxmpp.plugins.xep_0071 import XHTML_IM as HTMLIM
register_stanza_plugin(Message, HTMLIM)
# To comply with PEP8, method names now use underscores.
# Deprecated method names are re-mapped for backwards compatibility.
HTMLIM.setBody = HTMLIM.set_body
HTMLIM.getBody = HTMLIM.get_body
HTMLIM.delBody = HTMLIM.del_body
|
ffa6bec7348c3c61cacd502e058fdaf087b89753
|
b7be705ab774860f8a5a49d756f50994a37e307d
|
/equipment-activity-monitor/python/iot_equipment_activity_monitor/hardware/dfrobot.py
|
520cc0327ac3b60e13bbeed76b3430da0f88d334
|
[
"MIT"
] |
permissive
|
intel-iot-devkit/how-to-code-samples
|
b87916f7e919d859a5f4084f40e8bd62007a8469
|
821de0727b999391131e6947868371b1424c1d39
|
refs/heads/master
| 2023-01-05T04:30:54.650902
| 2023-01-03T22:56:31
| 2023-01-03T22:56:31
| 45,066,968
| 204
| 161
| null | 2017-11-13T20:57:42
| 2015-10-27T20:08:02
|
C++
|
UTF-8
|
Python
| false
| false
| 4,221
|
py
|
dfrobot.py
|
# Copyright (c) 2015 - 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from upm.pyupm_lcdks import LCDKS
from upm.pyupm_button import Button
from upm.pyupm_mic import Microphone, thresholdContext, uint16Array
from mraa import addSubplatform, GENERIC_FIRMATA
from ..config import HARDWARE_CONFIG, KNOWN_PLATFORMS
from .board import Board, PinMappings
from .events import VIBRATION_SAMPLE, NOISE_SAMPLE
class DfrobotBoard(Board):
"""
Board class for drobot hardware.
"""
def __init__(self):
super(DfrobotBoard, self).__init__()
# pin mappings
self.pin_mappings = PinMappings(
sound_pin=2,
vibration_pin=15,
screen_register_select_pin=8,
screen_enable_pin=9,
screen_data_0_pin=4,
screen_data_1_pin=5,
screen_data_2_pin=6,
screen_data_3_pin=7,
screen_analog_input_pin=0
)
if HARDWARE_CONFIG.platform == KNOWN_PLATFORMS.firmata:
addSubplatform(GENERIC_FIRMATA, "/dev/ttyACM0")
self.pin_mappings += 512
self.screen = LCDKS(
self.pin_mappings.screen_register_select_pin,
self.pin_mappings.screen_enable_pin,
self.pin_mappings.screen_data_0_pin,
self.pin_mappings.screen_data_1_pin,
self.pin_mappings.screen_data_2_pin,
self.pin_mappings.screen_data_3_pin,
self.pin_mappings.screen_analog_input_pin
)
self.sound = Microphone(self.pin_mappings.sound_pin)
self.sound_ctx = thresholdContext()
self.sound_ctx.averageReading = 0
self.sound_ctx.runningAverage = 0
self.sound_ctx.averagedOver = 2
self.vibration = Button(self.pin_mappings.vibration_pin)
self.sample_time = 2
self.sample_number = 128
def update_hardware_state(self):
"""
Update hardware state.
"""
vibration_sample = self.measure_vibration()
self.trigger_hardware_event(VIBRATION_SAMPLE, vibration_sample)
noise_sample = self.measure_sound()
self.trigger_hardware_event(NOISE_SAMPLE, noise_sample)
# hardware functions
def measure_vibration(self):
"""
Measure average vibration.
"""
vibration = self.vibration.value()
return 200 if vibration else 0
def measure_sound(self):
"""
Measure average volume.
"""
samples = uint16Array(128)
length = self.sound.getSampledWindow(self.sample_time, self.sample_number, samples)
if not length:
return 0
noise = self.sound.findThreshold(self.sound_ctx, 30, samples, length)
average = noise / 100
return average
def write_message(self, message, line=0):
"""
Write message to LCD screen.
"""
message = message.ljust(16)
self.screen.setCursor(line, 0)
self.screen.write(message)
def change_background(self, color):
"""
Change LCD screen background color.
No effect on the dfrobot.
"""
pass
|
271b3a185195948f64e531c3eb069fc2012138e1
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/prompt-toolkit/py3/prompt_toolkit/shortcuts/prompt.py
|
ed56adc94e78afb42bf869fd59093bdc34714fd1
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 59,364
|
py
|
prompt.py
|
"""
Line editing functionality.
---------------------------
This provides a UI for a line input, similar to GNU Readline, libedit and
linenoise.
Either call the `prompt` function for every line input. Or create an instance
of the :class:`.PromptSession` class and call the `prompt` method from that
class. In the second case, we'll have a 'session' that keeps all the state like
the history in between several calls.
There is a lot of overlap between the arguments taken by the `prompt` function
and the `PromptSession` (like `completer`, `style`, etcetera). There we have
the freedom to decide which settings we want for the whole 'session', and which
we want for an individual `prompt`.
Example::
# Simple `prompt` call.
result = prompt('Say something: ')
# Using a 'session'.
s = PromptSession()
result = s.prompt('Say something: ')
"""
from __future__ import annotations
from asyncio import get_running_loop
from contextlib import contextmanager
from enum import Enum
from functools import partial
from typing import TYPE_CHECKING, Callable, Generic, Iterator, TypeVar, Union, cast
from prompt_toolkit.application import Application
from prompt_toolkit.application.current import get_app
from prompt_toolkit.auto_suggest import AutoSuggest, DynamicAutoSuggest
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.clipboard import Clipboard, DynamicClipboard, InMemoryClipboard
from prompt_toolkit.completion import Completer, DynamicCompleter, ThreadedCompleter
from prompt_toolkit.cursor_shapes import (
AnyCursorShapeConfig,
CursorShapeConfig,
DynamicCursorShapeConfig,
)
from prompt_toolkit.document import Document
from prompt_toolkit.enums import DEFAULT_BUFFER, SEARCH_BUFFER, EditingMode
from prompt_toolkit.filters import (
Condition,
FilterOrBool,
has_arg,
has_focus,
is_done,
is_true,
renderer_height_is_known,
to_filter,
)
from prompt_toolkit.formatted_text import (
AnyFormattedText,
StyleAndTextTuples,
fragment_list_to_text,
merge_formatted_text,
to_formatted_text,
)
from prompt_toolkit.history import History, InMemoryHistory
from prompt_toolkit.input.base import Input
from prompt_toolkit.key_binding.bindings.auto_suggest import load_auto_suggest_bindings
from prompt_toolkit.key_binding.bindings.completion import (
display_completions_like_readline,
)
from prompt_toolkit.key_binding.bindings.open_in_editor import (
load_open_in_editor_bindings,
)
from prompt_toolkit.key_binding.key_bindings import (
ConditionalKeyBindings,
DynamicKeyBindings,
KeyBindings,
KeyBindingsBase,
merge_key_bindings,
)
from prompt_toolkit.key_binding.key_processor import KeyPressEvent
from prompt_toolkit.keys import Keys
from prompt_toolkit.layout import Float, FloatContainer, HSplit, Window
from prompt_toolkit.layout.containers import ConditionalContainer, WindowAlign
from prompt_toolkit.layout.controls import (
BufferControl,
FormattedTextControl,
SearchBufferControl,
)
from prompt_toolkit.layout.dimension import Dimension
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.layout.menus import CompletionsMenu, MultiColumnCompletionsMenu
from prompt_toolkit.layout.processors import (
AfterInput,
AppendAutoSuggestion,
ConditionalProcessor,
DisplayMultipleCursors,
DynamicProcessor,
HighlightIncrementalSearchProcessor,
HighlightSelectionProcessor,
PasswordProcessor,
Processor,
ReverseSearchProcessor,
merge_processors,
)
from prompt_toolkit.layout.utils import explode_text_fragments
from prompt_toolkit.lexers import DynamicLexer, Lexer
from prompt_toolkit.output import ColorDepth, DummyOutput, Output
from prompt_toolkit.styles import (
BaseStyle,
ConditionalStyleTransformation,
DynamicStyle,
DynamicStyleTransformation,
StyleTransformation,
SwapLightAndDarkStyleTransformation,
merge_style_transformations,
)
from prompt_toolkit.utils import (
get_cwidth,
is_dumb_terminal,
suspend_to_background_supported,
to_str,
)
from prompt_toolkit.validation import DynamicValidator, Validator
from prompt_toolkit.widgets.toolbars import (
SearchToolbar,
SystemToolbar,
ValidationToolbar,
)
if TYPE_CHECKING:
from prompt_toolkit.formatted_text.base import MagicFormattedText
__all__ = [
"PromptSession",
"prompt",
"confirm",
"create_confirm_session", # Used by '_display_completions_like_readline'.
"CompleteStyle",
]
_StyleAndTextTuplesCallable = Callable[[], StyleAndTextTuples]
E = KeyPressEvent
def _split_multiline_prompt(
get_prompt_text: _StyleAndTextTuplesCallable,
) -> tuple[
Callable[[], bool], _StyleAndTextTuplesCallable, _StyleAndTextTuplesCallable
]:
"""
Take a `get_prompt_text` function and return three new functions instead.
One that tells whether this prompt consists of multiple lines; one that
returns the fragments to be shown on the lines above the input; and another
one with the fragments to be shown at the first line of the input.
"""
def has_before_fragments() -> bool:
for fragment, char, *_ in get_prompt_text():
if "\n" in char:
return True
return False
def before() -> StyleAndTextTuples:
result: StyleAndTextTuples = []
found_nl = False
for fragment, char, *_ in reversed(explode_text_fragments(get_prompt_text())):
if found_nl:
result.insert(0, (fragment, char))
elif char == "\n":
found_nl = True
return result
def first_input_line() -> StyleAndTextTuples:
result: StyleAndTextTuples = []
for fragment, char, *_ in reversed(explode_text_fragments(get_prompt_text())):
if char == "\n":
break
else:
result.insert(0, (fragment, char))
return result
return has_before_fragments, before, first_input_line
class _RPrompt(Window):
"""
The prompt that is displayed on the right side of the Window.
"""
def __init__(self, text: AnyFormattedText) -> None:
super().__init__(
FormattedTextControl(text=text),
align=WindowAlign.RIGHT,
style="class:rprompt",
)
class CompleteStyle(str, Enum):
"""
How to display autocompletions for the prompt.
"""
value: str
COLUMN = "COLUMN"
MULTI_COLUMN = "MULTI_COLUMN"
READLINE_LIKE = "READLINE_LIKE"
# Formatted text for the continuation prompt. It's the same like other
# formatted text, except that if it's a callable, it takes three arguments.
PromptContinuationText = Union[
str,
"MagicFormattedText",
StyleAndTextTuples,
# (prompt_width, line_number, wrap_count) -> AnyFormattedText.
Callable[[int, int, int], AnyFormattedText],
]
_T = TypeVar("_T")
class PromptSession(Generic[_T]):
"""
PromptSession for a prompt application, which can be used as a GNU Readline
replacement.
This is a wrapper around a lot of ``prompt_toolkit`` functionality and can
be a replacement for `raw_input`.
All parameters that expect "formatted text" can take either just plain text
(a unicode object), a list of ``(style_str, text)`` tuples or an HTML object.
Example usage::
s = PromptSession(message='>')
text = s.prompt()
:param message: Plain text or formatted text to be shown before the prompt.
This can also be a callable that returns formatted text.
:param multiline: `bool` or :class:`~prompt_toolkit.filters.Filter`.
When True, prefer a layout that is more adapted for multiline input.
Text after newlines is automatically indented, and search/arg input is
shown below the input, instead of replacing the prompt.
:param wrap_lines: `bool` or :class:`~prompt_toolkit.filters.Filter`.
When True (the default), automatically wrap long lines instead of
scrolling horizontally.
:param is_password: Show asterisks instead of the actual typed characters.
:param editing_mode: ``EditingMode.VI`` or ``EditingMode.EMACS``.
:param vi_mode: `bool`, if True, Identical to ``editing_mode=EditingMode.VI``.
:param complete_while_typing: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Enable autocompletion while
typing.
:param validate_while_typing: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Enable input validation while
typing.
:param enable_history_search: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Enable up-arrow parting
string matching.
:param search_ignore_case:
:class:`~prompt_toolkit.filters.Filter`. Search case insensitive.
:param lexer: :class:`~prompt_toolkit.lexers.Lexer` to be used for the
syntax highlighting.
:param validator: :class:`~prompt_toolkit.validation.Validator` instance
for input validation.
:param completer: :class:`~prompt_toolkit.completion.Completer` instance
for input completion.
:param complete_in_thread: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Run the completer code in a
background thread in order to avoid blocking the user interface.
For ``CompleteStyle.READLINE_LIKE``, this setting has no effect. There
we always run the completions in the main thread.
:param reserve_space_for_menu: Space to be reserved for displaying the menu.
(0 means that no space needs to be reserved.)
:param auto_suggest: :class:`~prompt_toolkit.auto_suggest.AutoSuggest`
instance for input suggestions.
:param style: :class:`.Style` instance for the color scheme.
:param include_default_pygments_style: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Tell whether the default
styling for Pygments lexers has to be included. By default, this is
true, but it is recommended to be disabled if another Pygments style is
passed as the `style` argument, otherwise, two Pygments styles will be
merged.
:param style_transformation:
:class:`~prompt_toolkit.style.StyleTransformation` instance.
:param swap_light_and_dark_colors: `bool` or
:class:`~prompt_toolkit.filters.Filter`. When enabled, apply
:class:`~prompt_toolkit.style.SwapLightAndDarkStyleTransformation`.
This is useful for switching between dark and light terminal
backgrounds.
:param enable_system_prompt: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Pressing Meta+'!' will show
a system prompt.
:param enable_suspend: `bool` or :class:`~prompt_toolkit.filters.Filter`.
Enable Control-Z style suspension.
:param enable_open_in_editor: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Pressing 'v' in Vi mode or
C-X C-E in emacs mode will open an external editor.
:param history: :class:`~prompt_toolkit.history.History` instance.
:param clipboard: :class:`~prompt_toolkit.clipboard.Clipboard` instance.
(e.g. :class:`~prompt_toolkit.clipboard.InMemoryClipboard`)
:param rprompt: Text or formatted text to be displayed on the right side.
This can also be a callable that returns (formatted) text.
:param bottom_toolbar: Formatted text or callable which is supposed to
return formatted text.
:param prompt_continuation: Text that needs to be displayed for a multiline
prompt continuation. This can either be formatted text or a callable
that takes a `prompt_width`, `line_number` and `wrap_count` as input
and returns formatted text. When this is `None` (the default), then
`prompt_width` spaces will be used.
:param complete_style: ``CompleteStyle.COLUMN``,
``CompleteStyle.MULTI_COLUMN`` or ``CompleteStyle.READLINE_LIKE``.
:param mouse_support: `bool` or :class:`~prompt_toolkit.filters.Filter`
to enable mouse support.
:param placeholder: Text to be displayed when no input has been given
yet. Unlike the `default` parameter, this won't be returned as part of
the output ever. This can be formatted text or a callable that returns
formatted text.
:param refresh_interval: (number; in seconds) When given, refresh the UI
every so many seconds.
:param input: `Input` object. (Note that the preferred way to change the
input/output is by creating an `AppSession`.)
:param output: `Output` object.
"""
_fields = (
"message",
"lexer",
"completer",
"complete_in_thread",
"is_password",
"editing_mode",
"key_bindings",
"is_password",
"bottom_toolbar",
"style",
"style_transformation",
"swap_light_and_dark_colors",
"color_depth",
"cursor",
"include_default_pygments_style",
"rprompt",
"multiline",
"prompt_continuation",
"wrap_lines",
"enable_history_search",
"search_ignore_case",
"complete_while_typing",
"validate_while_typing",
"complete_style",
"mouse_support",
"auto_suggest",
"clipboard",
"validator",
"refresh_interval",
"input_processors",
"placeholder",
"enable_system_prompt",
"enable_suspend",
"enable_open_in_editor",
"reserve_space_for_menu",
"tempfile_suffix",
"tempfile",
)
def __init__(
self,
message: AnyFormattedText = "",
*,
multiline: FilterOrBool = False,
wrap_lines: FilterOrBool = True,
is_password: FilterOrBool = False,
vi_mode: bool = False,
editing_mode: EditingMode = EditingMode.EMACS,
complete_while_typing: FilterOrBool = True,
validate_while_typing: FilterOrBool = True,
enable_history_search: FilterOrBool = False,
search_ignore_case: FilterOrBool = False,
lexer: Lexer | None = None,
enable_system_prompt: FilterOrBool = False,
enable_suspend: FilterOrBool = False,
enable_open_in_editor: FilterOrBool = False,
validator: Validator | None = None,
completer: Completer | None = None,
complete_in_thread: bool = False,
reserve_space_for_menu: int = 8,
complete_style: CompleteStyle = CompleteStyle.COLUMN,
auto_suggest: AutoSuggest | None = None,
style: BaseStyle | None = None,
style_transformation: StyleTransformation | None = None,
swap_light_and_dark_colors: FilterOrBool = False,
color_depth: ColorDepth | None = None,
cursor: AnyCursorShapeConfig = None,
include_default_pygments_style: FilterOrBool = True,
history: History | None = None,
clipboard: Clipboard | None = None,
prompt_continuation: PromptContinuationText | None = None,
rprompt: AnyFormattedText = None,
bottom_toolbar: AnyFormattedText = None,
mouse_support: FilterOrBool = False,
input_processors: list[Processor] | None = None,
placeholder: AnyFormattedText | None = None,
key_bindings: KeyBindingsBase | None = None,
erase_when_done: bool = False,
tempfile_suffix: str | Callable[[], str] | None = ".txt",
tempfile: str | Callable[[], str] | None = None,
refresh_interval: float = 0,
input: Input | None = None,
output: Output | None = None,
) -> None:
history = history or InMemoryHistory()
clipboard = clipboard or InMemoryClipboard()
# Ensure backwards-compatibility, when `vi_mode` is passed.
if vi_mode:
editing_mode = EditingMode.VI
# Store all settings in this class.
self._input = input
self._output = output
# Store attributes.
# (All except 'editing_mode'.)
self.message = message
self.lexer = lexer
self.completer = completer
self.complete_in_thread = complete_in_thread
self.is_password = is_password
self.key_bindings = key_bindings
self.bottom_toolbar = bottom_toolbar
self.style = style
self.style_transformation = style_transformation
self.swap_light_and_dark_colors = swap_light_and_dark_colors
self.color_depth = color_depth
self.cursor = cursor
self.include_default_pygments_style = include_default_pygments_style
self.rprompt = rprompt
self.multiline = multiline
self.prompt_continuation = prompt_continuation
self.wrap_lines = wrap_lines
self.enable_history_search = enable_history_search
self.search_ignore_case = search_ignore_case
self.complete_while_typing = complete_while_typing
self.validate_while_typing = validate_while_typing
self.complete_style = complete_style
self.mouse_support = mouse_support
self.auto_suggest = auto_suggest
self.clipboard = clipboard
self.validator = validator
self.refresh_interval = refresh_interval
self.input_processors = input_processors
self.placeholder = placeholder
self.enable_system_prompt = enable_system_prompt
self.enable_suspend = enable_suspend
self.enable_open_in_editor = enable_open_in_editor
self.reserve_space_for_menu = reserve_space_for_menu
self.tempfile_suffix = tempfile_suffix
self.tempfile = tempfile
# Create buffers, layout and Application.
self.history = history
self.default_buffer = self._create_default_buffer()
self.search_buffer = self._create_search_buffer()
self.layout = self._create_layout()
self.app = self._create_application(editing_mode, erase_when_done)
def _dyncond(self, attr_name: str) -> Condition:
"""
Dynamically take this setting from this 'PromptSession' class.
`attr_name` represents an attribute name of this class. Its value
can either be a boolean or a `Filter`.
This returns something that can be used as either a `Filter`
or `Filter`.
"""
@Condition
def dynamic() -> bool:
value = cast(FilterOrBool, getattr(self, attr_name))
return to_filter(value)()
return dynamic
def _create_default_buffer(self) -> Buffer:
"""
Create and return the default input buffer.
"""
dyncond = self._dyncond
# Create buffers list.
def accept(buff: Buffer) -> bool:
"""Accept the content of the default buffer. This is called when
the validation succeeds."""
cast(Application[str], get_app()).exit(result=buff.document.text)
return True # Keep text, we call 'reset' later on.
return Buffer(
name=DEFAULT_BUFFER,
# Make sure that complete_while_typing is disabled when
# enable_history_search is enabled. (First convert to Filter,
# to avoid doing bitwise operations on bool objects.)
complete_while_typing=Condition(
lambda: is_true(self.complete_while_typing)
and not is_true(self.enable_history_search)
and not self.complete_style == CompleteStyle.READLINE_LIKE
),
validate_while_typing=dyncond("validate_while_typing"),
enable_history_search=dyncond("enable_history_search"),
validator=DynamicValidator(lambda: self.validator),
completer=DynamicCompleter(
lambda: ThreadedCompleter(self.completer)
if self.complete_in_thread and self.completer
else self.completer
),
history=self.history,
auto_suggest=DynamicAutoSuggest(lambda: self.auto_suggest),
accept_handler=accept,
tempfile_suffix=lambda: to_str(self.tempfile_suffix or ""),
tempfile=lambda: to_str(self.tempfile or ""),
)
def _create_search_buffer(self) -> Buffer:
return Buffer(name=SEARCH_BUFFER)
def _create_layout(self) -> Layout:
"""
Create `Layout` for this prompt.
"""
dyncond = self._dyncond
# Create functions that will dynamically split the prompt. (If we have
# a multiline prompt.)
(
has_before_fragments,
get_prompt_text_1,
get_prompt_text_2,
) = _split_multiline_prompt(self._get_prompt)
default_buffer = self.default_buffer
search_buffer = self.search_buffer
# Create processors list.
@Condition
def display_placeholder() -> bool:
return self.placeholder is not None and self.default_buffer.text == ""
all_input_processors = [
HighlightIncrementalSearchProcessor(),
HighlightSelectionProcessor(),
ConditionalProcessor(
AppendAutoSuggestion(), has_focus(default_buffer) & ~is_done
),
ConditionalProcessor(PasswordProcessor(), dyncond("is_password")),
DisplayMultipleCursors(),
# Users can insert processors here.
DynamicProcessor(lambda: merge_processors(self.input_processors or [])),
ConditionalProcessor(
AfterInput(lambda: self.placeholder),
filter=display_placeholder,
),
]
# Create bottom toolbars.
bottom_toolbar = ConditionalContainer(
Window(
FormattedTextControl(
lambda: self.bottom_toolbar, style="class:bottom-toolbar.text"
),
style="class:bottom-toolbar",
dont_extend_height=True,
height=Dimension(min=1),
),
filter=Condition(lambda: self.bottom_toolbar is not None)
& ~is_done
& renderer_height_is_known,
)
search_toolbar = SearchToolbar(
search_buffer, ignore_case=dyncond("search_ignore_case")
)
search_buffer_control = SearchBufferControl(
buffer=search_buffer,
input_processors=[ReverseSearchProcessor()],
ignore_case=dyncond("search_ignore_case"),
)
system_toolbar = SystemToolbar(
enable_global_bindings=dyncond("enable_system_prompt")
)
def get_search_buffer_control() -> SearchBufferControl:
"Return the UIControl to be focused when searching start."
if is_true(self.multiline):
return search_toolbar.control
else:
return search_buffer_control
default_buffer_control = BufferControl(
buffer=default_buffer,
search_buffer_control=get_search_buffer_control,
input_processors=all_input_processors,
include_default_input_processors=False,
lexer=DynamicLexer(lambda: self.lexer),
preview_search=True,
)
default_buffer_window = Window(
default_buffer_control,
height=self._get_default_buffer_control_height,
get_line_prefix=partial(
self._get_line_prefix, get_prompt_text_2=get_prompt_text_2
),
wrap_lines=dyncond("wrap_lines"),
)
@Condition
def multi_column_complete_style() -> bool:
return self.complete_style == CompleteStyle.MULTI_COLUMN
# Build the layout.
layout = HSplit(
[
# The main input, with completion menus floating on top of it.
FloatContainer(
HSplit(
[
ConditionalContainer(
Window(
FormattedTextControl(get_prompt_text_1),
dont_extend_height=True,
),
Condition(has_before_fragments),
),
ConditionalContainer(
default_buffer_window,
Condition(
lambda: get_app().layout.current_control
!= search_buffer_control
),
),
ConditionalContainer(
Window(search_buffer_control),
Condition(
lambda: get_app().layout.current_control
== search_buffer_control
),
),
]
),
[
# Completion menus.
# NOTE: Especially the multi-column menu needs to be
# transparent, because the shape is not always
# rectangular due to the meta-text below the menu.
Float(
xcursor=True,
ycursor=True,
transparent=True,
content=CompletionsMenu(
max_height=16,
scroll_offset=1,
extra_filter=has_focus(default_buffer)
& ~multi_column_complete_style,
),
),
Float(
xcursor=True,
ycursor=True,
transparent=True,
content=MultiColumnCompletionsMenu(
show_meta=True,
extra_filter=has_focus(default_buffer)
& multi_column_complete_style,
),
),
# The right prompt.
Float(
right=0,
top=0,
hide_when_covering_content=True,
content=_RPrompt(lambda: self.rprompt),
),
],
),
ConditionalContainer(ValidationToolbar(), filter=~is_done),
ConditionalContainer(
system_toolbar, dyncond("enable_system_prompt") & ~is_done
),
# In multiline mode, we use two toolbars for 'arg' and 'search'.
ConditionalContainer(
Window(FormattedTextControl(self._get_arg_text), height=1),
dyncond("multiline") & has_arg,
),
ConditionalContainer(search_toolbar, dyncond("multiline") & ~is_done),
bottom_toolbar,
]
)
return Layout(layout, default_buffer_window)
def _create_application(
self, editing_mode: EditingMode, erase_when_done: bool
) -> Application[_T]:
"""
Create the `Application` object.
"""
dyncond = self._dyncond
# Default key bindings.
auto_suggest_bindings = load_auto_suggest_bindings()
open_in_editor_bindings = load_open_in_editor_bindings()
prompt_bindings = self._create_prompt_bindings()
# Create application
application: Application[_T] = Application(
layout=self.layout,
style=DynamicStyle(lambda: self.style),
style_transformation=merge_style_transformations(
[
DynamicStyleTransformation(lambda: self.style_transformation),
ConditionalStyleTransformation(
SwapLightAndDarkStyleTransformation(),
dyncond("swap_light_and_dark_colors"),
),
]
),
include_default_pygments_style=dyncond("include_default_pygments_style"),
clipboard=DynamicClipboard(lambda: self.clipboard),
key_bindings=merge_key_bindings(
[
merge_key_bindings(
[
auto_suggest_bindings,
ConditionalKeyBindings(
open_in_editor_bindings,
dyncond("enable_open_in_editor")
& has_focus(DEFAULT_BUFFER),
),
prompt_bindings,
]
),
DynamicKeyBindings(lambda: self.key_bindings),
]
),
mouse_support=dyncond("mouse_support"),
editing_mode=editing_mode,
erase_when_done=erase_when_done,
reverse_vi_search_direction=True,
color_depth=lambda: self.color_depth,
cursor=DynamicCursorShapeConfig(lambda: self.cursor),
refresh_interval=self.refresh_interval,
input=self._input,
output=self._output,
)
# During render time, make sure that we focus the right search control
# (if we are searching). - This could be useful if people make the
# 'multiline' property dynamic.
"""
def on_render(app):
multiline = is_true(self.multiline)
current_control = app.layout.current_control
if multiline:
if current_control == search_buffer_control:
app.layout.current_control = search_toolbar.control
app.invalidate()
else:
if current_control == search_toolbar.control:
app.layout.current_control = search_buffer_control
app.invalidate()
app.on_render += on_render
"""
return application
def _create_prompt_bindings(self) -> KeyBindings:
"""
Create the KeyBindings for a prompt application.
"""
kb = KeyBindings()
handle = kb.add
default_focused = has_focus(DEFAULT_BUFFER)
@Condition
def do_accept() -> bool:
return not is_true(self.multiline) and self.app.layout.has_focus(
DEFAULT_BUFFER
)
@handle("enter", filter=do_accept & default_focused)
def _accept_input(event: E) -> None:
"Accept input when enter has been pressed."
self.default_buffer.validate_and_handle()
@Condition
def readline_complete_style() -> bool:
return self.complete_style == CompleteStyle.READLINE_LIKE
@handle("tab", filter=readline_complete_style & default_focused)
def _complete_like_readline(event: E) -> None:
"Display completions (like Readline)."
display_completions_like_readline(event)
@handle("c-c", filter=default_focused)
@handle("<sigint>")
def _keyboard_interrupt(event: E) -> None:
"Abort when Control-C has been pressed."
event.app.exit(exception=KeyboardInterrupt, style="class:aborting")
@Condition
def ctrl_d_condition() -> bool:
"""Ctrl-D binding is only active when the default buffer is selected
and empty."""
app = get_app()
return (
app.current_buffer.name == DEFAULT_BUFFER
and not app.current_buffer.text
)
@handle("c-d", filter=ctrl_d_condition & default_focused)
def _eof(event: E) -> None:
"Exit when Control-D has been pressed."
event.app.exit(exception=EOFError, style="class:exiting")
suspend_supported = Condition(suspend_to_background_supported)
@Condition
def enable_suspend() -> bool:
return to_filter(self.enable_suspend)()
@handle("c-z", filter=suspend_supported & enable_suspend)
def _suspend(event: E) -> None:
"""
Suspend process to background.
"""
event.app.suspend_to_background()
return kb
def prompt(
self,
# When any of these arguments are passed, this value is overwritten
# in this PromptSession.
message: AnyFormattedText | None = None,
# `message` should go first, because people call it as
# positional argument.
*,
editing_mode: EditingMode | None = None,
refresh_interval: float | None = None,
vi_mode: bool | None = None,
lexer: Lexer | None = None,
completer: Completer | None = None,
complete_in_thread: bool | None = None,
is_password: bool | None = None,
key_bindings: KeyBindingsBase | None = None,
bottom_toolbar: AnyFormattedText | None = None,
style: BaseStyle | None = None,
color_depth: ColorDepth | None = None,
cursor: AnyCursorShapeConfig | None = None,
include_default_pygments_style: FilterOrBool | None = None,
style_transformation: StyleTransformation | None = None,
swap_light_and_dark_colors: FilterOrBool | None = None,
rprompt: AnyFormattedText | None = None,
multiline: FilterOrBool | None = None,
prompt_continuation: PromptContinuationText | None = None,
wrap_lines: FilterOrBool | None = None,
enable_history_search: FilterOrBool | None = None,
search_ignore_case: FilterOrBool | None = None,
complete_while_typing: FilterOrBool | None = None,
validate_while_typing: FilterOrBool | None = None,
complete_style: CompleteStyle | None = None,
auto_suggest: AutoSuggest | None = None,
validator: Validator | None = None,
clipboard: Clipboard | None = None,
mouse_support: FilterOrBool | None = None,
input_processors: list[Processor] | None = None,
placeholder: AnyFormattedText | None = None,
reserve_space_for_menu: int | None = None,
enable_system_prompt: FilterOrBool | None = None,
enable_suspend: FilterOrBool | None = None,
enable_open_in_editor: FilterOrBool | None = None,
tempfile_suffix: str | Callable[[], str] | None = None,
tempfile: str | Callable[[], str] | None = None,
# Following arguments are specific to the current `prompt()` call.
default: str | Document = "",
accept_default: bool = False,
pre_run: Callable[[], None] | None = None,
set_exception_handler: bool = True,
handle_sigint: bool = True,
in_thread: bool = False,
) -> _T:
"""
Display the prompt.
The first set of arguments is a subset of the :class:`~.PromptSession`
class itself. For these, passing in ``None`` will keep the current
values that are active in the session. Passing in a value will set the
attribute for the session, which means that it applies to the current,
but also to the next prompts.
Note that in order to erase a ``Completer``, ``Validator`` or
``AutoSuggest``, you can't use ``None``. Instead pass in a
``DummyCompleter``, ``DummyValidator`` or ``DummyAutoSuggest`` instance
respectively. For a ``Lexer`` you can pass in an empty ``SimpleLexer``.
Additional arguments, specific for this prompt:
:param default: The default input text to be shown. (This can be edited
by the user).
:param accept_default: When `True`, automatically accept the default
value without allowing the user to edit the input.
:param pre_run: Callable, called at the start of `Application.run`.
:param in_thread: Run the prompt in a background thread; block the
current thread. This avoids interference with an event loop in the
current thread. Like `Application.run(in_thread=True)`.
This method will raise ``KeyboardInterrupt`` when control-c has been
pressed (for abort) and ``EOFError`` when control-d has been pressed
(for exit).
"""
# NOTE: We used to create a backup of the PromptSession attributes and
# restore them after exiting the prompt. This code has been
# removed, because it was confusing and didn't really serve a use
# case. (People were changing `Application.editing_mode`
# dynamically and surprised that it was reset after every call.)
# NOTE 2: YES, this is a lot of repeation below...
# However, it is a very convenient for a user to accept all
# these parameters in this `prompt` method as well. We could
# use `locals()` and `setattr` to avoid the repetition, but
# then we loose the advantage of mypy and pyflakes to be able
# to verify the code.
if message is not None:
self.message = message
if editing_mode is not None:
self.editing_mode = editing_mode
if refresh_interval is not None:
self.refresh_interval = refresh_interval
if vi_mode:
self.editing_mode = EditingMode.VI
if lexer is not None:
self.lexer = lexer
if completer is not None:
self.completer = completer
if complete_in_thread is not None:
self.complete_in_thread = complete_in_thread
if is_password is not None:
self.is_password = is_password
if key_bindings is not None:
self.key_bindings = key_bindings
if bottom_toolbar is not None:
self.bottom_toolbar = bottom_toolbar
if style is not None:
self.style = style
if color_depth is not None:
self.color_depth = color_depth
if cursor is not None:
self.cursor = cursor
if include_default_pygments_style is not None:
self.include_default_pygments_style = include_default_pygments_style
if style_transformation is not None:
self.style_transformation = style_transformation
if swap_light_and_dark_colors is not None:
self.swap_light_and_dark_colors = swap_light_and_dark_colors
if rprompt is not None:
self.rprompt = rprompt
if multiline is not None:
self.multiline = multiline
if prompt_continuation is not None:
self.prompt_continuation = prompt_continuation
if wrap_lines is not None:
self.wrap_lines = wrap_lines
if enable_history_search is not None:
self.enable_history_search = enable_history_search
if search_ignore_case is not None:
self.search_ignore_case = search_ignore_case
if complete_while_typing is not None:
self.complete_while_typing = complete_while_typing
if validate_while_typing is not None:
self.validate_while_typing = validate_while_typing
if complete_style is not None:
self.complete_style = complete_style
if auto_suggest is not None:
self.auto_suggest = auto_suggest
if validator is not None:
self.validator = validator
if clipboard is not None:
self.clipboard = clipboard
if mouse_support is not None:
self.mouse_support = mouse_support
if input_processors is not None:
self.input_processors = input_processors
if placeholder is not None:
self.placeholder = placeholder
if reserve_space_for_menu is not None:
self.reserve_space_for_menu = reserve_space_for_menu
if enable_system_prompt is not None:
self.enable_system_prompt = enable_system_prompt
if enable_suspend is not None:
self.enable_suspend = enable_suspend
if enable_open_in_editor is not None:
self.enable_open_in_editor = enable_open_in_editor
if tempfile_suffix is not None:
self.tempfile_suffix = tempfile_suffix
if tempfile is not None:
self.tempfile = tempfile
self._add_pre_run_callables(pre_run, accept_default)
self.default_buffer.reset(
default if isinstance(default, Document) else Document(default)
)
self.app.refresh_interval = self.refresh_interval # This is not reactive.
# If we are using the default output, and have a dumb terminal. Use the
# dumb prompt.
if self._output is None and is_dumb_terminal():
with self._dumb_prompt(self.message) as dump_app:
return dump_app.run(in_thread=in_thread, handle_sigint=handle_sigint)
return self.app.run(
set_exception_handler=set_exception_handler,
in_thread=in_thread,
handle_sigint=handle_sigint,
)
@contextmanager
def _dumb_prompt(self, message: AnyFormattedText = "") -> Iterator[Application[_T]]:
"""
Create prompt `Application` for prompt function for dumb terminals.
Dumb terminals have minimum rendering capabilities. We can only print
text to the screen. We can't use colors, and we can't do cursor
movements. The Emacs inferior shell is an example of a dumb terminal.
We will show the prompt, and wait for the input. We still handle arrow
keys, and all custom key bindings, but we don't really render the
cursor movements. Instead we only print the typed character that's
right before the cursor.
"""
# Send prompt to output.
self.output.write(fragment_list_to_text(to_formatted_text(self.message)))
self.output.flush()
# Key bindings for the dumb prompt: mostly the same as the full prompt.
key_bindings: KeyBindingsBase = self._create_prompt_bindings()
if self.key_bindings:
key_bindings = merge_key_bindings([self.key_bindings, key_bindings])
# Create and run application.
application = cast(
Application[_T],
Application(
input=self.input,
output=DummyOutput(),
layout=self.layout,
key_bindings=key_bindings,
),
)
def on_text_changed(_: object) -> None:
self.output.write(self.default_buffer.document.text_before_cursor[-1:])
self.output.flush()
self.default_buffer.on_text_changed += on_text_changed
try:
yield application
finally:
# Render line ending.
self.output.write("\r\n")
self.output.flush()
self.default_buffer.on_text_changed -= on_text_changed
async def prompt_async(
self,
# When any of these arguments are passed, this value is overwritten
# in this PromptSession.
message: AnyFormattedText | None = None,
# `message` should go first, because people call it as
# positional argument.
*,
editing_mode: EditingMode | None = None,
refresh_interval: float | None = None,
vi_mode: bool | None = None,
lexer: Lexer | None = None,
completer: Completer | None = None,
complete_in_thread: bool | None = None,
is_password: bool | None = None,
key_bindings: KeyBindingsBase | None = None,
bottom_toolbar: AnyFormattedText | None = None,
style: BaseStyle | None = None,
color_depth: ColorDepth | None = None,
cursor: CursorShapeConfig | None = None,
include_default_pygments_style: FilterOrBool | None = None,
style_transformation: StyleTransformation | None = None,
swap_light_and_dark_colors: FilterOrBool | None = None,
rprompt: AnyFormattedText | None = None,
multiline: FilterOrBool | None = None,
prompt_continuation: PromptContinuationText | None = None,
wrap_lines: FilterOrBool | None = None,
enable_history_search: FilterOrBool | None = None,
search_ignore_case: FilterOrBool | None = None,
complete_while_typing: FilterOrBool | None = None,
validate_while_typing: FilterOrBool | None = None,
complete_style: CompleteStyle | None = None,
auto_suggest: AutoSuggest | None = None,
validator: Validator | None = None,
clipboard: Clipboard | None = None,
mouse_support: FilterOrBool | None = None,
input_processors: list[Processor] | None = None,
placeholder: AnyFormattedText | None = None,
reserve_space_for_menu: int | None = None,
enable_system_prompt: FilterOrBool | None = None,
enable_suspend: FilterOrBool | None = None,
enable_open_in_editor: FilterOrBool | None = None,
tempfile_suffix: str | Callable[[], str] | None = None,
tempfile: str | Callable[[], str] | None = None,
# Following arguments are specific to the current `prompt()` call.
default: str | Document = "",
accept_default: bool = False,
pre_run: Callable[[], None] | None = None,
set_exception_handler: bool = True,
handle_sigint: bool = True,
) -> _T:
if message is not None:
self.message = message
if editing_mode is not None:
self.editing_mode = editing_mode
if refresh_interval is not None:
self.refresh_interval = refresh_interval
if vi_mode:
self.editing_mode = EditingMode.VI
if lexer is not None:
self.lexer = lexer
if completer is not None:
self.completer = completer
if complete_in_thread is not None:
self.complete_in_thread = complete_in_thread
if is_password is not None:
self.is_password = is_password
if key_bindings is not None:
self.key_bindings = key_bindings
if bottom_toolbar is not None:
self.bottom_toolbar = bottom_toolbar
if style is not None:
self.style = style
if color_depth is not None:
self.color_depth = color_depth
if cursor is not None:
self.cursor = cursor
if include_default_pygments_style is not None:
self.include_default_pygments_style = include_default_pygments_style
if style_transformation is not None:
self.style_transformation = style_transformation
if swap_light_and_dark_colors is not None:
self.swap_light_and_dark_colors = swap_light_and_dark_colors
if rprompt is not None:
self.rprompt = rprompt
if multiline is not None:
self.multiline = multiline
if prompt_continuation is not None:
self.prompt_continuation = prompt_continuation
if wrap_lines is not None:
self.wrap_lines = wrap_lines
if enable_history_search is not None:
self.enable_history_search = enable_history_search
if search_ignore_case is not None:
self.search_ignore_case = search_ignore_case
if complete_while_typing is not None:
self.complete_while_typing = complete_while_typing
if validate_while_typing is not None:
self.validate_while_typing = validate_while_typing
if complete_style is not None:
self.complete_style = complete_style
if auto_suggest is not None:
self.auto_suggest = auto_suggest
if validator is not None:
self.validator = validator
if clipboard is not None:
self.clipboard = clipboard
if mouse_support is not None:
self.mouse_support = mouse_support
if input_processors is not None:
self.input_processors = input_processors
if placeholder is not None:
self.placeholder = placeholder
if reserve_space_for_menu is not None:
self.reserve_space_for_menu = reserve_space_for_menu
if enable_system_prompt is not None:
self.enable_system_prompt = enable_system_prompt
if enable_suspend is not None:
self.enable_suspend = enable_suspend
if enable_open_in_editor is not None:
self.enable_open_in_editor = enable_open_in_editor
if tempfile_suffix is not None:
self.tempfile_suffix = tempfile_suffix
if tempfile is not None:
self.tempfile = tempfile
self._add_pre_run_callables(pre_run, accept_default)
self.default_buffer.reset(
default if isinstance(default, Document) else Document(default)
)
self.app.refresh_interval = self.refresh_interval # This is not reactive.
# If we are using the default output, and have a dumb terminal. Use the
# dumb prompt.
if self._output is None and is_dumb_terminal():
with self._dumb_prompt(self.message) as dump_app:
return await dump_app.run_async(handle_sigint=handle_sigint)
return await self.app.run_async(
set_exception_handler=set_exception_handler, handle_sigint=handle_sigint
)
def _add_pre_run_callables(
self, pre_run: Callable[[], None] | None, accept_default: bool
) -> None:
def pre_run2() -> None:
if pre_run:
pre_run()
if accept_default:
# Validate and handle input. We use `call_from_executor` in
# order to run it "soon" (during the next iteration of the
# event loop), instead of right now. Otherwise, it won't
# display the default value.
get_running_loop().call_soon(self.default_buffer.validate_and_handle)
self.app.pre_run_callables.append(pre_run2)
@property
def editing_mode(self) -> EditingMode:
return self.app.editing_mode
@editing_mode.setter
def editing_mode(self, value: EditingMode) -> None:
self.app.editing_mode = value
def _get_default_buffer_control_height(self) -> Dimension:
# If there is an autocompletion menu to be shown, make sure that our
# layout has at least a minimal height in order to display it.
if (
self.completer is not None
and self.complete_style != CompleteStyle.READLINE_LIKE
):
space = self.reserve_space_for_menu
else:
space = 0
if space and not get_app().is_done:
buff = self.default_buffer
# Reserve the space, either when there are completions, or when
# `complete_while_typing` is true and we expect completions very
# soon.
if buff.complete_while_typing() or buff.complete_state is not None:
return Dimension(min=space)
return Dimension()
def _get_prompt(self) -> StyleAndTextTuples:
return to_formatted_text(self.message, style="class:prompt")
def _get_continuation(
self, width: int, line_number: int, wrap_count: int
) -> StyleAndTextTuples:
"""
Insert the prompt continuation.
:param width: The width that was used for the prompt. (more or less can
be used.)
:param line_number:
:param wrap_count: Amount of times that the line has been wrapped.
"""
prompt_continuation = self.prompt_continuation
if callable(prompt_continuation):
continuation: AnyFormattedText = prompt_continuation(
width, line_number, wrap_count
)
else:
continuation = prompt_continuation
# When the continuation prompt is not given, choose the same width as
# the actual prompt.
if continuation is None and is_true(self.multiline):
continuation = " " * width
return to_formatted_text(continuation, style="class:prompt-continuation")
def _get_line_prefix(
self,
line_number: int,
wrap_count: int,
get_prompt_text_2: _StyleAndTextTuplesCallable,
) -> StyleAndTextTuples:
"""
Return whatever needs to be inserted before every line.
(the prompt, or a line continuation.)
"""
# First line: display the "arg" or the prompt.
if line_number == 0 and wrap_count == 0:
if not is_true(self.multiline) and get_app().key_processor.arg is not None:
return self._inline_arg()
else:
return get_prompt_text_2()
# For the next lines, display the appropriate continuation.
prompt_width = get_cwidth(fragment_list_to_text(get_prompt_text_2()))
return self._get_continuation(prompt_width, line_number, wrap_count)
def _get_arg_text(self) -> StyleAndTextTuples:
"'arg' toolbar, for in multiline mode."
arg = self.app.key_processor.arg
if arg is None:
# Should not happen because of the `has_arg` filter in the layout.
return []
if arg == "-":
arg = "-1"
return [("class:arg-toolbar", "Repeat: "), ("class:arg-toolbar.text", arg)]
def _inline_arg(self) -> StyleAndTextTuples:
"'arg' prefix, for in single line mode."
app = get_app()
if app.key_processor.arg is None:
return []
else:
arg = app.key_processor.arg
return [
("class:prompt.arg", "(arg: "),
("class:prompt.arg.text", str(arg)),
("class:prompt.arg", ") "),
]
# Expose the Input and Output objects as attributes, mainly for
# backward-compatibility.
@property
def input(self) -> Input:
return self.app.input
@property
def output(self) -> Output:
return self.app.output
def prompt(
message: AnyFormattedText | None = None,
*,
history: History | None = None,
editing_mode: EditingMode | None = None,
refresh_interval: float | None = None,
vi_mode: bool | None = None,
lexer: Lexer | None = None,
completer: Completer | None = None,
complete_in_thread: bool | None = None,
is_password: bool | None = None,
key_bindings: KeyBindingsBase | None = None,
bottom_toolbar: AnyFormattedText | None = None,
style: BaseStyle | None = None,
color_depth: ColorDepth | None = None,
cursor: AnyCursorShapeConfig = None,
include_default_pygments_style: FilterOrBool | None = None,
style_transformation: StyleTransformation | None = None,
swap_light_and_dark_colors: FilterOrBool | None = None,
rprompt: AnyFormattedText | None = None,
multiline: FilterOrBool | None = None,
prompt_continuation: PromptContinuationText | None = None,
wrap_lines: FilterOrBool | None = None,
enable_history_search: FilterOrBool | None = None,
search_ignore_case: FilterOrBool | None = None,
complete_while_typing: FilterOrBool | None = None,
validate_while_typing: FilterOrBool | None = None,
complete_style: CompleteStyle | None = None,
auto_suggest: AutoSuggest | None = None,
validator: Validator | None = None,
clipboard: Clipboard | None = None,
mouse_support: FilterOrBool | None = None,
input_processors: list[Processor] | None = None,
placeholder: AnyFormattedText | None = None,
reserve_space_for_menu: int | None = None,
enable_system_prompt: FilterOrBool | None = None,
enable_suspend: FilterOrBool | None = None,
enable_open_in_editor: FilterOrBool | None = None,
tempfile_suffix: str | Callable[[], str] | None = None,
tempfile: str | Callable[[], str] | None = None,
in_thread: bool = False,
# Following arguments are specific to the current `prompt()` call.
default: str = "",
accept_default: bool = False,
pre_run: Callable[[], None] | None = None,
) -> str:
"""
The global `prompt` function. This will create a new `PromptSession`
instance for every call.
"""
# The history is the only attribute that has to be passed to the
# `PromptSession`, it can't be passed into the `prompt()` method.
session: PromptSession[str] = PromptSession(history=history)
return session.prompt(
message,
editing_mode=editing_mode,
refresh_interval=refresh_interval,
vi_mode=vi_mode,
lexer=lexer,
completer=completer,
complete_in_thread=complete_in_thread,
is_password=is_password,
key_bindings=key_bindings,
bottom_toolbar=bottom_toolbar,
style=style,
color_depth=color_depth,
cursor=cursor,
include_default_pygments_style=include_default_pygments_style,
style_transformation=style_transformation,
swap_light_and_dark_colors=swap_light_and_dark_colors,
rprompt=rprompt,
multiline=multiline,
prompt_continuation=prompt_continuation,
wrap_lines=wrap_lines,
enable_history_search=enable_history_search,
search_ignore_case=search_ignore_case,
complete_while_typing=complete_while_typing,
validate_while_typing=validate_while_typing,
complete_style=complete_style,
auto_suggest=auto_suggest,
validator=validator,
clipboard=clipboard,
mouse_support=mouse_support,
input_processors=input_processors,
placeholder=placeholder,
reserve_space_for_menu=reserve_space_for_menu,
enable_system_prompt=enable_system_prompt,
enable_suspend=enable_suspend,
enable_open_in_editor=enable_open_in_editor,
tempfile_suffix=tempfile_suffix,
tempfile=tempfile,
default=default,
accept_default=accept_default,
pre_run=pre_run,
in_thread=in_thread,
)
prompt.__doc__ = PromptSession.prompt.__doc__
def create_confirm_session(
message: str, suffix: str = " (y/n) "
) -> PromptSession[bool]:
"""
Create a `PromptSession` object for the 'confirm' function.
"""
bindings = KeyBindings()
@bindings.add("y")
@bindings.add("Y")
def yes(event: E) -> None:
session.default_buffer.text = "y"
event.app.exit(result=True)
@bindings.add("n")
@bindings.add("N")
def no(event: E) -> None:
session.default_buffer.text = "n"
event.app.exit(result=False)
@bindings.add(Keys.Any)
def _(event: E) -> None:
"Disallow inserting other text."
pass
complete_message = merge_formatted_text([message, suffix])
session: PromptSession[bool] = PromptSession(
complete_message, key_bindings=bindings
)
return session
def confirm(message: str = "Confirm?", suffix: str = " (y/n) ") -> bool:
"""
Display a confirmation prompt that returns True/False.
"""
session = create_confirm_session(message, suffix)
return session.prompt()
|
324c996e460460978d37195f92ca524e994b0694
|
e7efae2b83216d9621bd93390959d652de779c3d
|
/solr/tests/common.py
|
91dec8b6c5c30609fd12d2b0d145f2285ab83741
|
[
"BSD-3-Clause",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] |
permissive
|
DataDog/integrations-core
|
ee1886cc7655972b2791e6ab8a1c62ab35afdb47
|
406072e4294edff5b46b513f0cdf7c2c00fac9d2
|
refs/heads/master
| 2023-08-31T04:08:06.243593
| 2023-08-30T18:22:10
| 2023-08-30T18:22:10
| 47,203,045
| 852
| 1,548
|
BSD-3-Clause
| 2023-09-14T16:39:54
| 2015-12-01T16:41:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
common.py
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from datadog_checks.dev import get_docker_hostname
HOST = get_docker_hostname()
UI_PORT = "8983"
SOLR_METRICS = [
"solr.document_cache.evictions",
"solr.document_cache.hits",
"solr.document_cache.inserts",
"solr.document_cache.lookups",
"solr.filter_cache.evictions",
"solr.filter_cache.hits",
"solr.filter_cache.inserts",
"solr.filter_cache.lookups",
"solr.query_result_cache.evictions",
"solr.query_result_cache.hits",
"solr.query_result_cache.inserts",
"solr.query_result_cache.lookups",
"solr.search_handler.errors",
"solr.search_handler.request_times.50percentile",
"solr.search_handler.request_times.75percentile",
"solr.search_handler.request_times.95percentile",
"solr.search_handler.request_times.98percentile",
"solr.search_handler.request_times.99percentile",
"solr.search_handler.request_times.999percentile",
"solr.search_handler.request_times.mean",
"solr.search_handler.request_times.mean_rate",
"solr.search_handler.request_times.one_minute_rate",
"solr.search_handler.requests",
"solr.search_handler.time",
"solr.search_handler.timeouts",
"solr.searcher.maxdocs",
"solr.searcher.numdocs",
"solr.searcher.warmup",
]
|
d2b038a0d1c4d4af3b674ffa265792acefa6b5a9
|
ef0c1514e9af6de3ba4a20e0d01de7cc3a915188
|
/sdk/resourcemanagerhybrid/update_root_pom.py
|
13e86ab9b076de19d884a2c979ae50cb78bd6869
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-or-later",
"CC0-1.0",
"BSD-3-Clause",
"UPL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
Azure/azure-sdk-for-java
|
0902d584b42d3654b4ce65b1dad8409f18ddf4bc
|
789bdc6c065dc44ce9b8b630e2f2e5896b2a7616
|
refs/heads/main
| 2023-09-04T09:36:35.821969
| 2023-09-02T01:53:56
| 2023-09-02T01:53:56
| 2,928,948
| 2,027
| 2,084
|
MIT
| 2023-09-14T21:37:15
| 2011-12-06T23:33:56
|
Java
|
UTF-8
|
Python
| false
| false
| 234
|
py
|
update_root_pom.py
|
with open('pom.xml', 'r') as file:
filedata = file.read()
filedata = filedata.replace('<module>sdk/resourcemanager</module>', '<module>sdk/resourcemanagerhybrid</module>')
with open('pom.xml', 'w') as file:
file.write(filedata)
|
54f232400a4c04d5e117a93717d4fe34a1bbc853
|
07e810873aa0134ba5017ccfef641d1038ca9b92
|
/theme/migrations/0007_auto_20170427_1553.py
|
af8e08ea4a6905598a2978c71ebbd096580943b3
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
hydroshare/hydroshare
|
9093e6dce047a30d4b2b7720257a7841d209353f
|
69855813052243c702c9b0108d2eac3f4f1a768f
|
refs/heads/develop
| 2023-09-04T12:52:30.816709
| 2023-08-30T16:46:20
| 2023-08-30T16:46:20
| 24,703,136
| 207
| 57
|
BSD-3-Clause
| 2023-09-14T20:20:16
| 2014-10-02T02:19:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,098
|
py
|
0007_auto_20170427_1553.py
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('theme', '0006_auto_20170309_1516'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='message_end_date',
field=models.DateField(help_text='Date on which the message will no more be displayed', null=True),
),
migrations.AddField(
model_name='homepage',
name='message_start_date',
field=models.DateField(help_text='Date from which the message will be displayed', null=True),
),
migrations.AddField(
model_name='homepage',
name='message_type',
field=models.CharField(default='Information', max_length=100, choices=[('warning', 'Warning'), ('information', 'Information')]),
),
migrations.AddField(
model_name='homepage',
name='show_message',
field=models.BooleanField(default=False, help_text='Check to show message'),
),
]
|
69204b72e4ef822047c55d375adee2568717328f
|
749780a767bb719f6333d406d477b1fc0ec2c80a
|
/utils/cbl_installed_software.py
|
f1055a3b2f3cd65e31162fe1f6bd57a270f888c1
|
[
"MIT"
] |
permissive
|
chapmanb/cloudbiolinux
|
4482a271d4b8ea5e37075170452076c9886ae0d7
|
39eb5ac3d45654404d703ffb450a67947458df37
|
refs/heads/master
| 2023-09-06T03:52:42.206531
| 2023-03-27T04:37:38
| 2023-03-27T04:37:38
| 1,591,413
| 167
| 135
|
MIT
| 2023-03-21T22:10:36
| 2011-04-09T13:09:58
|
Python
|
UTF-8
|
Python
| false
| false
| 382
|
py
|
cbl_installed_software.py
|
#!/usr/bin/env python
"""Provide dump of software and libraries installed on CloudBioLinux image.
Run from the top level of the cloudbiolinux source directory:
python utils/cbl_installed_software.py
"""
import os
from cloudbio import manifest
def main():
out_dir = os.path.join(os.getcwd(), "manifest")
manifest.create(out_dir)
if __name__ == "__main__":
main()
|
04b779d939762037f54c994b96f53666ea9a7897
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/app_manager/exceptions.py
|
eaf56aca01b42a99afb9125060235ceddf103a19
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,488
|
py
|
exceptions.py
|
import couchdbkit
from corehq.apps.app_manager.const import APP_V2
class AppManagerException(Exception):
pass
class VersioningError(AppManagerException):
"""For errors that violate the principles of versioning in ApplicationBase"""
pass
class ModuleNotFoundException(AppManagerException, IndexError):
pass
class FormNotFoundException(AppManagerException, IndexError):
pass
class IncompatibleFormTypeException(AppManagerException):
pass
class AddOnNotFoundException(AppManagerException, IndexError):
pass
class AppEditingError(AppManagerException):
pass
class ModuleIdMissingException(AppManagerException):
pass
class RearrangeError(AppEditingError):
pass
class XFormException(AppManagerException):
pass
class CaseError(XFormException):
pass
class ScheduleError(XFormException):
pass
class XFormValidationFailed(XFormException):
"""Unable to communicate with validation service or validation service errored"""
pass
class XFormValidationError(XFormException):
def __init__(self, fatal_error, version=APP_V2, validation_problems=None):
self.fatal_error = fatal_error
self.version = version
self.validation_problems = validation_problems or []
def __str__(self):
fatal_error_text = self.format_v1(self.fatal_error)
ret = "Validation Error%s" % (': %s' % fatal_error_text if fatal_error_text else '')
problems = [problem for problem in self.validation_problems if problem['message'] != self.fatal_error]
if problems:
ret += "\n\nMore information:"
for problem in problems:
ret += "\n{type}: {msg}".format(type=problem['type'].title(), msg=problem['message'])
return ret
def format_v1(self, msg):
if self.version != '1.0':
return msg
# Don't display the first two lines which say "Parsing form..." and 'Title: "{form_name}"'
#
# ... and if possible split the third line that looks like e.g. "org.javarosa.xform.parse.XFormParseException: Select question has no choices"
# and just return the undecorated string
#
# ... unless the first line says
message_lines = str(msg).split('\n')[2:]
if len(message_lines) > 0 and ':' in message_lines[0] and 'XPath Dependency Cycle' not in str(msg):
message = ' '.join(message_lines[0].split(':')[1:])
else:
message = '\n'.join(message_lines)
return message
class BindNotFound(XFormException):
pass
class SuiteError(AppManagerException):
pass
class MediaResourceError(SuiteError):
pass
class ResourceOverrideError(SuiteError):
pass
class ParentModuleReferenceError(SuiteError):
pass
class SuiteValidationError(SuiteError):
pass
class LocationXPathValidationError(AppManagerException):
pass
class UnknownInstanceError(SuiteError):
pass
class DuplicateInstanceIdError(SuiteError):
pass
class ConfigurableReportException(AppManagerException):
pass
class NoMatchingFilterException(ConfigurableReportException):
pass
class XPathValidationError(SuiteValidationError):
def __init__(self, *args, **kwargs):
self.module = kwargs.pop('module', None)
self.form = kwargs.pop('form', None)
super(XPathValidationError, self).__init__(*args, **kwargs)
class CaseXPathValidationError(XPathValidationError):
pass
class UsercaseXPathValidationError(XPathValidationError):
pass
class PracticeUserException(AppManagerException):
""" For errors related to misconfiguration of app.practice_mobile_worker_id """
def __init__(self, *args, **kwargs):
self.build_profile_id = kwargs.pop('build_profile_id', None)
super(PracticeUserException, self).__init__(*args, **kwargs)
class AppLinkError(AppManagerException):
pass
class CaseSearchConfigError(AppManagerException):
pass
class SavedAppBuildException(AppManagerException):
pass
class MultimediaMissingError(AppManagerException):
pass
class BuildNotFoundException(AppManagerException):
pass
class BuildConflictException(Exception):
pass
class AppValidationError(AppManagerException):
def __init__(self, errors):
self.errors = errors
class DangerousXmlException(Exception):
pass
class AppMisconfigurationError(AppManagerException):
"""Errors in app configuration that are the user's responsibility"""
|
2641b0b846b6b16c88c0c70d2ef9fe00c878ce25
|
df1254b56f35b24644e00493c50d4b6eb3c15b7b
|
/colour/geometry/section.py
|
aa7cf78db92a10df4cb0659bdcb066a2332c0f70
|
[
"BSD-3-Clause"
] |
permissive
|
colour-science/colour
|
908400b227cf81668675e41099256ce50b23ae4b
|
1fdf3b3042922e8d4f86b989b00a06e7e5d81102
|
refs/heads/develop
| 2023-09-01T23:17:07.186869
| 2023-08-26T09:40:45
| 2023-08-26T09:40:45
| 17,114,363
| 1,756
| 301
|
BSD-3-Clause
| 2023-09-14T10:24:37
| 2014-02-23T18:55:40
|
Python
|
UTF-8
|
Python
| false
| false
| 6,831
|
py
|
section.py
|
"""
Geometry / Hull Section
=======================
Defines various objects to compute hull sections:
- :func:`colour.geometry.hull_section`
"""
from __future__ import annotations
import numpy as np
from colour.algebra import linear_conversion
from colour.constants import DEFAULT_FLOAT_DTYPE
from colour.hints import Any, ArrayLike, Literal, NDArrayFloat, cast
from colour.utilities import (
as_float_array,
as_float_scalar,
required,
validate_method,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"edges_to_chord",
"unique_vertices",
"close_chord",
"hull_section",
]
def edges_to_chord(edges: ArrayLike, index: int = 0) -> NDArrayFloat:
"""
Convert given edges to a chord, starting at given index.
Parameters
----------
edges
Edges to convert to a chord.
index
Index to start forming the chord at.
Returns
-------
:class:`numpy.ndarray`
Chord.
Examples
--------
>>> edges = np.array(
... [
... [[-0.0, -0.5, 0.0], [0.5, -0.5, 0.0]],
... [[-0.5, -0.5, 0.0], [-0.0, -0.5, 0.0]],
... [[0.5, 0.5, 0.0], [-0.0, 0.5, 0.0]],
... [[-0.0, 0.5, 0.0], [-0.5, 0.5, 0.0]],
... [[-0.5, 0.0, -0.0], [-0.5, -0.5, -0.0]],
... [[-0.5, 0.5, -0.0], [-0.5, 0.0, -0.0]],
... [[0.5, -0.5, -0.0], [0.5, 0.0, -0.0]],
... [[0.5, 0.0, -0.0], [0.5, 0.5, -0.0]],
... ]
... )
>>> edges_to_chord(edges)
array([[-0. , -0.5, 0. ],
[ 0.5, -0.5, 0. ],
[ 0.5, -0.5, -0. ],
[ 0.5, 0. , -0. ],
[ 0.5, 0. , -0. ],
[ 0.5, 0.5, -0. ],
[ 0.5, 0.5, 0. ],
[-0. , 0.5, 0. ],
[-0. , 0.5, 0. ],
[-0.5, 0.5, 0. ],
[-0.5, 0.5, -0. ],
[-0.5, 0. , -0. ],
[-0.5, 0. , -0. ],
[-0.5, -0.5, -0. ],
[-0.5, -0.5, 0. ],
[-0. , -0.5, 0. ]])
"""
edge_list = as_float_array(edges).tolist()
edges_ordered = [edge_list.pop(index)]
segment = np.array(edges_ordered[0][1])
while len(edge_list) > 0:
edges_array = np.array(edge_list)
d_0 = np.linalg.norm(edges_array[:, 0, :] - segment, axis=1)
d_1 = np.linalg.norm(edges_array[:, 1, :] - segment, axis=1)
d_0_argmin, d_1_argmin = d_0.argmin(), d_1.argmin()
if d_0[d_0_argmin] < d_1[d_1_argmin]:
edges_ordered.append(edge_list.pop(d_0_argmin))
segment = np.array(edges_ordered[-1][1])
else:
edges_ordered.append(edge_list.pop(d_1_argmin))
segment = np.array(edges_ordered[-1][0])
return as_float_array(edges_ordered).reshape([-1, segment.shape[-1]])
def close_chord(vertices: ArrayLike) -> NDArrayFloat:
"""
Close the chord.
Parameters
----------
vertices
Vertices of the chord to close.
Returns
-------
:class:`numpy.ndarray`
Closed chord.
Examples
--------
>>> close_chord(np.array([[0.0, 0.5, 0.0], [0.0, 0.0, 0.5]]))
array([[ 0. , 0.5, 0. ],
[ 0. , 0. , 0.5],
[ 0. , 0.5, 0. ]])
"""
vertices = as_float_array(vertices)
return np.vstack([vertices, vertices[0]])
def unique_vertices(
vertices: ArrayLike,
decimals: int = np.finfo(cast(Any, DEFAULT_FLOAT_DTYPE)).precision - 1,
) -> NDArrayFloat:
"""
Return the unique vertices from given vertices.
Parameters
----------
vertices
Vertices to return the unique vertices from.
decimals
Decimals used when rounding the vertices prior to comparison.
Returns
-------
:class:`numpy.ndarray`
Unique vertices.
Notes
-----
- The vertices are rounded at given ``decimals``.
Examples
--------
>>> unique_vertices(
... np.array([[0.0, 0.5, 0.0], [0.0, 0.0, 0.5], [0.0, 0.5, 0.0]])
... )
array([[ 0. , 0.5, 0. ],
[ 0. , 0. , 0.5]])
"""
vertices = as_float_array(vertices)
unique, indexes = np.unique(
vertices.round(decimals=decimals), axis=0, return_index=True
)
return unique[np.argsort(indexes)]
@required("trimesh")
def hull_section(
hull: trimesh.Trimesh, # pyright: ignore # noqa: F821
axis: Literal["+z", "+x", "+y"] | str = "+z",
origin: float = 0.5,
normalise: bool = False,
) -> NDArrayFloat:
"""
Compute the hull section for given axis at given origin.
Parameters
----------
hull
*Trimesh* hull.
axis
Axis the hull section will be normal to.
origin
Coordinate along ``axis`` at which to plot the hull section.
normalise
Whether to normalise ``axis`` to the extent of the hull along it.
Returns
-------
:class:`numpy.ndarray`
Hull section vertices.
Examples
--------
>>> from colour.geometry import primitive_cube
>>> from colour.utilities import is_trimesh_installed
>>> vertices, faces, outline = primitive_cube(1, 1, 1, 2, 2, 2)
>>> if is_trimesh_installed:
... import trimesh
...
... hull = trimesh.Trimesh(vertices["position"], faces, process=False)
... hull_section(hull, origin=0)
...
array([[-0. , -0.5, 0. ],
[ 0.5, -0.5, 0. ],
[ 0.5, 0. , -0. ],
[ 0.5, 0.5, -0. ],
[-0. , 0.5, 0. ],
[-0.5, 0.5, 0. ],
[-0.5, 0. , -0. ],
[-0.5, -0.5, -0. ],
[-0. , -0.5, 0. ]])
"""
import trimesh.intersections
axis = validate_method(
axis,
("+z", "+x", "+y"),
'"{0}" axis is invalid, it must be one of {1}!',
)
if axis == "+x":
normal, plane = np.array([1, 0, 0]), np.array([origin, 0, 0])
elif axis == "+y":
normal, plane = np.array([0, 1, 0]), np.array([0, origin, 0])
elif axis == "+z":
normal, plane = np.array([0, 0, 1]), np.array([0, 0, origin])
if normalise:
vertices = hull.vertices * normal
origin = as_float_scalar(
linear_conversion(
origin, [0, 1], [np.min(vertices), np.max(vertices)]
)
)
plane[plane != 0] = origin
section = trimesh.intersections.mesh_plane(hull, normal, plane)
if len(section) == 0:
raise ValueError(
f'No section exists on "{axis}" axis at {origin} origin!'
)
section = close_chord(unique_vertices(edges_to_chord(section)))
return section
|
387e4ce10faf6b74f047d95c0ae17b06b101a1bd
|
0893d8cdfbce152bedba671cbc77f2210137e966
|
/docs/conf.py
|
370a4669a1eb2d5dc7de440cf107b18ac4b623cc
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
elastic/rally
|
ff66be39505ae93d1d3fb125a3ee1cc30d8ef4cb
|
3398bb861e6f9e15ff4887be8e9cede3966930fe
|
refs/heads/master
| 2023-08-31T20:37:05.610611
| 2023-08-31T06:58:58
| 2023-08-31T06:58:58
| 47,140,131
| 1,849
| 346
|
Apache-2.0
| 2023-09-14T12:32:46
| 2015-11-30T19:11:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,859
|
py
|
conf.py
|
#!/usr/bin/env python3
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
from datetime import date
from os.path import dirname, join
from sphinx.config import ConfigError
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.ifconfig",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames. You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
root_doc = "index"
language = "en"
CI_VARS = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", ".ci", "variables.json")
def read_min_python_version():
try:
with open(CI_VARS) as fp:
return json.load(fp)["python_versions"]["MIN_PY_VER"]
except KeyError as e:
raise ConfigError(f"Failed building docs as required key [{e}] couldn't be found in the file [{CI_VARS}].")
def get_es_client_version():
import elasticsearch
return ".".join(map(str, elasticsearch.__version__))
GLOBAL_SUBSTITUTIONS = {
"{MIN_PY_VER}": read_min_python_version(),
"{ES_CLIENT_VER}": get_es_client_version(),
}
# inspiration from https://github.com/sphinx-doc/sphinx/issues/4054#issuecomment-329097229
def replace_globals(app, docname, source):
tmp_source = source[0]
for k, v in GLOBAL_SUBSTITUTIONS.items():
tmp_source = tmp_source.replace(k, v)
source[0] = tmp_source
def setup(app):
app.connect("source-read", replace_globals)
def read_min_es_version():
with open(join(dirname(__file__), os.pardir, "esrally/min-es-version.txt")) as f:
return f.read().strip()
year = date.today().year
rst_prolog = f"""
.. |year| replace:: {year}
.. |MIN_PY_VER| replace:: {read_min_python_version()}
.. |min_es_version| replace:: {read_min_es_version()}
.. |ES_CLIENT_VER| replace:: {get_es_client_version()}
"""
# General information about the project.
project = "Rally"
copyright = "%i, Elasticsearch B.V." % year
author = "Daniel Mitterdorfer"
import esrally
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# Technically the short X.Y version, but let's use the full version, including .dev0
version = esrally.__version__
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "architecture"]
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "furo"
html_static_path = ["."]
html_logo = "rally-logo.svg"
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# Output file base name for HTML help builder.
htmlhelp_basename = "Rallydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
latex_documents = [
(
root_doc,
"Rally.tex",
"Rally Documentation",
"Daniel Mitterdorfer",
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
root_doc,
"esrally",
"Rally Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
texinfo_documents = [
(
root_doc,
"Rally",
"Rally Documentation",
author,
"Rally",
"Macrobenchmarking framework for Elasticsearch.",
"Miscellaneous",
),
]
|
d6c62d74442de3641905784beebca650cb155199
|
32809f6f425bf5665fc19de2bc929bacc3eeb469
|
/src/1281-Subtract-the-Product-and-Sum-of-Digits-of-an-Integer/1281.py
|
4a0304bc511862815f6353c0a2c7788944ce058d
|
[] |
no_license
|
luliyucoordinate/Leetcode
|
9f6bf01f79aa680e2dff11e73e4d10993467f113
|
bcc04d49969654cb44f79218a7ef2fd5c1e5449a
|
refs/heads/master
| 2023-05-25T04:58:45.046772
| 2023-05-24T11:57:20
| 2023-05-24T11:57:20
| 132,753,892
| 1,575
| 569
| null | 2023-05-24T11:57:22
| 2018-05-09T12:30:59
|
C++
|
UTF-8
|
Python
| false
| false
| 179
|
py
|
1281.py
|
from functools import reduce
class Solution:
def subtractProductAndSum(self, n: int) -> int:
A = list(map(int, str(n)))
return reduce(operator.mul, A) - sum(A)
|
8445413be24680f60faba791d280b5fd6dbf5d69
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/python/framework/type_utils.py
|
0eebdae2be9800651bd6478c98f1fc1e88888bc3
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 8,356
|
py
|
type_utils.py
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for types information, incuding full type information."""
from typing import List
from tensorflow.core.framework import full_type_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import type_spec
from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensorSpec
from tensorflow.python.ops.structured.structured_tensor import StructuredTensor
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
# TODO(b/226455884) A python binding for DT_TO_FT or map_dtype_to_tensor() from
# tensorflow/core/framework/types.cc to avoid duplication here
_DT_TO_FT = {
types_pb2.DT_FLOAT: full_type_pb2.TFT_FLOAT,
types_pb2.DT_DOUBLE: full_type_pb2.TFT_DOUBLE,
types_pb2.DT_INT32: full_type_pb2.TFT_INT32,
types_pb2.DT_UINT8: full_type_pb2.TFT_UINT8,
types_pb2.DT_INT16: full_type_pb2.TFT_INT16,
types_pb2.DT_INT8: full_type_pb2.TFT_INT8,
types_pb2.DT_STRING: full_type_pb2.TFT_STRING,
types_pb2.DT_COMPLEX64: full_type_pb2.TFT_COMPLEX64,
types_pb2.DT_INT64: full_type_pb2.TFT_INT64,
types_pb2.DT_BOOL: full_type_pb2.TFT_BOOL,
types_pb2.DT_UINT16: full_type_pb2.TFT_UINT16,
types_pb2.DT_COMPLEX128: full_type_pb2.TFT_COMPLEX128,
types_pb2.DT_HALF: full_type_pb2.TFT_HALF,
types_pb2.DT_UINT32: full_type_pb2.TFT_UINT32,
types_pb2.DT_UINT64: full_type_pb2.TFT_UINT64,
types_pb2.DT_VARIANT: full_type_pb2.TFT_LEGACY_VARIANT,
}
def _translate_to_fulltype_for_flat_tensors(
spec: type_spec.TypeSpec) -> List[full_type_pb2.FullTypeDef]:
"""Convert a TypeSec to a list of FullTypeDef.
The FullTypeDef created corresponds to the encoding used with datasets
(and map_fn) that uses variants (and not FullTypeDef corresponding to the
default "component" encoding).
Currently, the only use of this is for information about the contents of
ragged tensors, so only ragged tensors return useful full type information
and other types return TFT_UNSET. While this could be improved in the future,
this function is intended for temporary use and expected to be removed
when type inference support is sufficient.
Args:
spec: A TypeSpec for one element of a dataset or map_fn.
Returns:
A list of FullTypeDef corresponding to SPEC. The length of this list
is always the same as the length of spec._flat_tensor_specs.
"""
if isinstance(spec, RaggedTensorSpec):
dt = spec.dtype
elem_t = _DT_TO_FT.get(dt)
if elem_t is None:
logging.vlog(1, "dtype %s that has no conversion to fulltype.", dt)
elif elem_t == full_type_pb2.TFT_LEGACY_VARIANT:
logging.vlog(1, "Ragged tensors containing variants are not supported.",
dt)
else:
assert len(spec._flat_tensor_specs) == 1 # pylint: disable=protected-access
return [
full_type_pb2.FullTypeDef(
type_id=full_type_pb2.TFT_RAGGED,
args=[full_type_pb2.FullTypeDef(type_id=elem_t)])
]
return [
full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_UNSET)
for t in spec._flat_tensor_specs # pylint: disable=protected-access
]
# LINT.IfChange(_specs_for_flat_tensors)
def _specs_for_flat_tensors(element_spec):
"""Return a flat list of type specs for element_spec.
Note that "flat" in this function and in `_flat_tensor_specs` is a nickname
for the "batchable tensor list" encoding used by datasets and map_fn
internally (in C++/graphs). The ability to batch, unbatch and change
batch size is one important characteristic of this encoding. A second
important characteristic is that it represets a ragged tensor or sparse
tensor as a single tensor of type variant (and this encoding uses special
ops to encode/decode to/from variants).
(In constrast, the more typical encoding, e.g. the C++/graph
representation when calling a tf.function, is "component encoding" which
represents sparse and ragged tensors as multiple dense tensors and does
not use variants or special ops for encoding/decoding.)
Args:
element_spec: A nest of TypeSpec describing the elements of a dataset (or
map_fn).
Returns:
A non-nested list of TypeSpec used by the encoding of tensors by
datasets and map_fn for ELEMENT_SPEC. The items
in this list correspond to the items in `_flat_tensor_specs`.
"""
if isinstance(element_spec, StructuredTensor.Spec):
specs = []
for _, field_spec in sorted(
element_spec._field_specs.items(), key=lambda t: t[0]): # pylint: disable=protected-access
specs.extend(_specs_for_flat_tensors(field_spec))
elif isinstance(element_spec, type_spec.BatchableTypeSpec) and (
element_spec.__class__._flat_tensor_specs is # pylint: disable=protected-access
type_spec.BatchableTypeSpec._flat_tensor_specs): # pylint: disable=protected-access
# Classes which use the default `_flat_tensor_specs` from
# `BatchableTypeSpec` case (i.e. a derived class does not override
# `_flat_tensor_specs`.) are encoded using `component_specs`.
specs = nest.flatten(
element_spec._component_specs, # pylint: disable=protected-access
expand_composites=False)
else:
# In addition flatting any nesting in Python,
# this default case covers things that are encoded by one tensor,
# such as dense tensors which are unchanged by encoding and
# ragged tensors and sparse tensors which are encoded by a variant tensor.
specs = nest.flatten(element_spec, expand_composites=False)
return specs
# LINT.ThenChange()
# Note that _specs_for_flat_tensors must correspond to _flat_tensor_specs
def fulltypes_for_flat_tensors(element_spec):
"""Convert the element_spec for a dataset to a list of FullType Def.
Note that "flat" in this function and in `_flat_tensor_specs` is a nickname
for the "batchable tensor list" encoding used by datasets and map_fn.
The FullTypeDef created corresponds to this encoding (e.g. that uses variants
and not the FullTypeDef corresponding to the default "component" encoding).
This is intended for temporary internal use and expected to be removed
when type inference support is sufficient. See limitations of
`_translate_to_fulltype_for_flat_tensors`.
Args:
element_spec: A nest of TypeSpec describing the elements of a dataset (or
map_fn).
Returns:
A list of FullTypeDef correspoinding to ELEMENT_SPEC. The items
in this list correspond to the items in `_flat_tensor_specs`.
"""
specs = _specs_for_flat_tensors(element_spec)
full_types_lists = [_translate_to_fulltype_for_flat_tensors(s) for s in specs]
rval = nest.flatten(full_types_lists) # flattens list-of-list to flat list.
return rval
def fulltype_list_to_product(fulltype_list):
"""Convert a list of FullType Def into a single FullType Def."""
return full_type_pb2.FullTypeDef(
type_id=full_type_pb2.TFT_PRODUCT, args=fulltype_list)
def iterator_full_type_from_spec(element_spec):
"""Returns a FullTypeDef for an iterator for the elements.
Args:
element_spec: A nested structure of `tf.TypeSpec` objects representing the
element type specification.
Returns:
A FullTypeDef for an iterator for the element tensor representation.
"""
args = fulltypes_for_flat_tensors(element_spec)
return full_type_pb2.FullTypeDef(
type_id=full_type_pb2.TFT_PRODUCT,
args=[
full_type_pb2.FullTypeDef(
type_id=full_type_pb2.TFT_ITERATOR,
args=[
full_type_pb2.FullTypeDef(
type_id=full_type_pb2.TFT_PRODUCT, args=args)
])
])
|
65c3989e7fd36bb0a79a174c971a0bafa942e754
|
6416b746ee71d897789eab1e450000831674dbd0
|
/src/otx/api/usecases/adapters/model_adapter.py
|
837f7ef6f6275325b8c912409fba42f608b9a634
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/training_extensions
|
c921f83ad52311af96ff45ae0b88d0aecddd855b
|
80454808b38727e358e8b880043eeac0f18152fb
|
refs/heads/develop
| 2023-08-31T06:29:07.229339
| 2023-08-31T01:57:26
| 2023-08-31T01:57:26
| 154,843,614
| 397
| 230
|
Apache-2.0
| 2023-09-14T06:17:01
| 2018-10-26T14:02:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,812
|
py
|
model_adapter.py
|
"""This module define a module to adapt model weights from a data source."""
# Copyright (C) 2021-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import abc
from typing import Union
class IDataSource:
"""Class that holds a combination of both a repo and a URL which can be used to fetch data."""
@property
@abc.abstractmethod
def data(self):
"""Returns the data of the source."""
raise NotImplementedError
class ModelAdapter(metaclass=abc.ABCMeta):
"""The ModelAdapter is an adapter is intended to lazily fetch its binary data from a given data source."""
def __init__(self, data_source: Union[IDataSource, bytes]):
self.__data_source = data_source
@property
def data_source(self):
"""Returns the data source of the adapter."""
return self.__data_source
@data_source.setter
def data_source(self, value: Union[IDataSource, bytes]):
self.__data_source = value
@property
def data(self):
"""Returns the data of the Model."""
if isinstance(self.__data_source, IDataSource):
return self.__data_source.data
if isinstance(self.__data_source, bytes):
return self.__data_source
raise ValueError("This model adapter is not properly initialized with a source of data")
@property
def from_file_storage(self) -> bool:
"""Returns if the ModelAdapters data comes from the file storage or not.
This is used in the model repo to know if the data of the model should be saved or not.
"""
if isinstance(self.data_source, bytes):
return False
return True
class ExportableCodeAdapter(ModelAdapter):
"""Adapter intended to lazily fetch raw exportable code data from a given data source."""
|
7965c1e6dbf51961817f450533483a439c636005
|
53ab063373d7c405bf6edd29d5fb2dfadeff874e
|
/tester.py
|
6febcd64bf52f3c05d5c038cf7fdb44aa1b1a9aa
|
[
"BSD-2-Clause-Views"
] |
permissive
|
dano/aioprocessing
|
2e5d10e4b25ed1df6575eb351950c9470794f3aa
|
39e84ac016aa21d93835b44ca134b0791c8c13ba
|
refs/heads/master
| 2023-07-31T20:26:37.158038
| 2022-09-16T02:29:57
| 2022-09-16T02:29:57
| 22,361,744
| 645
| 37
|
NOASSERTION
| 2022-09-09T16:25:39
| 2014-07-28T23:48:40
|
Python
|
UTF-8
|
Python
| false
| false
| 927
|
py
|
tester.py
|
#!/usr/bin/python3
import asyncio
from aioprocessing import AioManager
from concurrent.futures import ProcessPoolExecutor
async def _do_coro_proc_work(q, val, val2):
ok = val + val2
# await asyncio.sleep(4)
print("Passing {} to parent".format(ok))
await q.coro_put(ok)
item = q.get()
print("got {} back from parent".format(item))
def do_coro_proc_work(q, val, val2):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(_do_coro_proc_work(q, val, val2))
async def do_work(q):
print("hi")
loop.run_in_executor(ProcessPoolExecutor(),
do_coro_proc_work, q, 1, 2)
item = await q.coro_get()
print("Got {} from worker".format(item))
item = item + 25
q.put(item)
if __name__ == "__main__":
m = AioManager()
q = m.AioQueue()
loop = asyncio.get_event_loop()
loop.run_until_complete(do_work(q))
|
bf94b79cfc2873c169518e36d9e4c9215efdd6e7
|
80f94bea418d7956df1ba19d4d6a1d7715a94ade
|
/lib/galaxy/webapps/galaxy/services/visualizations.py
|
557d4cd650822ba757ac022405735bc2a3846721
|
[
"CC-BY-2.5",
"MIT",
"CC-BY-3.0",
"AFL-3.0"
] |
permissive
|
galaxyproject/galaxy
|
5748409eb6693b1611f289d164f85e20c3237495
|
b9ae7a16ba0465995e880ae9701b7e87226b9bab
|
refs/heads/dev
| 2023-08-28T22:35:51.248138
| 2023-08-26T08:02:33
| 2023-08-26T08:02:33
| 31,211,061
| 1,277
| 1,137
|
NOASSERTION
| 2023-09-14T19:39:01
| 2015-02-23T14:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,227
|
py
|
visualizations.py
|
import logging
from galaxy.managers.notification import NotificationManager
from galaxy.managers.visualizations import (
VisualizationManager,
VisualizationSerializer,
)
from galaxy.security.idencoding import IdEncodingHelper
from galaxy.webapps.galaxy.services.base import ServiceBase
from galaxy.webapps.galaxy.services.sharable import ShareableService
log = logging.getLogger(__name__)
class VisualizationsService(ServiceBase):
"""Common interface/service logic for interactions with visualizations in the context of the API.
Provides the logic of the actions invoked by API controllers and uses type definitions
and pydantic models to declare its parameters and return types.
"""
def __init__(
self,
security: IdEncodingHelper,
manager: VisualizationManager,
serializer: VisualizationSerializer,
notification_manager: NotificationManager,
):
super().__init__(security)
self.manager = manager
self.serializer = serializer
self.shareable_service = ShareableService(self.manager, self.serializer, notification_manager)
# TODO: add the rest of the API actions here and call them directly from the API controller
|
fecd53f2c86a943fa094eb592b8d6e4a6d179e45
|
187414dcb264fb49d82507a099fd5fdca6e55e38
|
/python/pyspark/sql/connect/readwriter.py
|
cfcbcede34873257b17b3b42d030cc63b4481bbe
|
[
"BSD-3-Clause",
"CC0-1.0",
"CDDL-1.1",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"EPL-2.0",
"CDDL-1.0",
"MIT",
"LGPL-2.0-or-later",
"Python-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"EPL-1.0",
"Classpath-exception-2.0",
"GCC-exception-3.1",
"CC-BY-SA-3.0",
"LGPL-2.1-only",
"LicenseRef-scancode-unicode",
"CPL-1.0",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-only",
"CC-PDDC",
"NAIST-2003",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
apache/spark
|
8aeba2d80465a262acc95781ede105a5b5886f6d
|
60d8fc49bec5dae1b8cf39a0670cb640b430f520
|
refs/heads/master
| 2023-09-04T04:33:36.058199
| 2023-09-04T03:48:52
| 2023-09-04T03:48:52
| 17,165,658
| 39,983
| 32,449
|
Apache-2.0
| 2023-09-14T19:46:24
| 2014-02-25T08:00:08
|
Scala
|
UTF-8
|
Python
| false
| false
| 30,448
|
py
|
readwriter.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql.connect.utils import check_dependencies
check_dependencies(__name__)
from typing import Dict
from typing import Optional, Union, List, overload, Tuple, cast
from typing import TYPE_CHECKING
from pyspark.sql.connect.plan import Read, DataSource, LogicalPlan, WriteOperation, WriteOperationV2
from pyspark.sql.types import StructType
from pyspark.sql.utils import to_str
from pyspark.sql.readwriter import (
DataFrameWriter as PySparkDataFrameWriter,
DataFrameReader as PySparkDataFrameReader,
DataFrameWriterV2 as PySparkDataFrameWriterV2,
)
from pyspark.errors import PySparkAttributeError, PySparkTypeError, PySparkValueError
if TYPE_CHECKING:
from pyspark.sql.connect.dataframe import DataFrame
from pyspark.sql.connect._typing import ColumnOrName, OptionalPrimitiveType
from pyspark.sql.connect.session import SparkSession
__all__ = ["DataFrameReader", "DataFrameWriter"]
PathOrPaths = Union[str, List[str]]
TupleOrListOfString = Union[List[str], Tuple[str, ...]]
class OptionUtils:
def _set_opts(
self,
schema: Optional[Union[StructType, str]] = None,
**options: "OptionalPrimitiveType",
) -> None:
"""
Set named options (filter out those the value is None)
"""
if schema is not None:
self.schema(schema) # type: ignore[attr-defined]
for k, v in options.items():
if v is not None:
self.option(k, v) # type: ignore[attr-defined]
class DataFrameReader(OptionUtils):
# TODO(SPARK-40539) Achieve parity with PySpark.
def __init__(self, client: "SparkSession"):
self._client = client
self._format: Optional[str] = None
self._schema = ""
self._options: Dict[str, str] = {}
def format(self, source: str) -> "DataFrameReader":
self._format = source
return self
format.__doc__ = PySparkDataFrameReader.format.__doc__
def schema(self, schema: Union[StructType, str]) -> "DataFrameReader":
if isinstance(schema, StructType):
self._schema = schema.json()
elif isinstance(schema, str):
self._schema = schema
else:
raise PySparkTypeError(
error_class="NOT_STR_OR_STRUCT",
message_parameters={
"arg_name": "schema",
"arg_type": type(schema).__name__,
},
)
return self
schema.__doc__ = PySparkDataFrameReader.schema.__doc__
def option(self, key: str, value: "OptionalPrimitiveType") -> "DataFrameReader":
self._options[key] = str(value)
return self
option.__doc__ = PySparkDataFrameReader.option.__doc__
def options(self, **options: "OptionalPrimitiveType") -> "DataFrameReader":
for k in options:
self.option(k, to_str(options[k]))
return self
options.__doc__ = PySparkDataFrameReader.options.__doc__
def load(
self,
path: Optional[PathOrPaths] = None,
format: Optional[str] = None,
schema: Optional[Union[StructType, str]] = None,
**options: "OptionalPrimitiveType",
) -> "DataFrame":
if format is not None:
self.format(format)
if schema is not None:
self.schema(schema)
self.options(**options)
paths = path
if isinstance(path, str):
paths = [path]
plan = DataSource(
format=self._format,
schema=self._schema,
options=self._options,
paths=paths, # type: ignore[arg-type]
)
return self._df(plan)
load.__doc__ = PySparkDataFrameReader.load.__doc__
def _df(self, plan: LogicalPlan) -> "DataFrame":
from pyspark.sql.connect.dataframe import DataFrame
return DataFrame.withPlan(plan, self._client)
def table(self, tableName: str) -> "DataFrame":
return self._df(Read(tableName, self._options))
table.__doc__ = PySparkDataFrameReader.table.__doc__
def json(
self,
path: PathOrPaths,
schema: Optional[Union[StructType, str]] = None,
primitivesAsString: Optional[Union[bool, str]] = None,
prefersDecimal: Optional[Union[bool, str]] = None,
allowComments: Optional[Union[bool, str]] = None,
allowUnquotedFieldNames: Optional[Union[bool, str]] = None,
allowSingleQuotes: Optional[Union[bool, str]] = None,
allowNumericLeadingZero: Optional[Union[bool, str]] = None,
allowBackslashEscapingAnyCharacter: Optional[Union[bool, str]] = None,
mode: Optional[str] = None,
columnNameOfCorruptRecord: Optional[str] = None,
dateFormat: Optional[str] = None,
timestampFormat: Optional[str] = None,
multiLine: Optional[Union[bool, str]] = None,
allowUnquotedControlChars: Optional[Union[bool, str]] = None,
lineSep: Optional[str] = None,
samplingRatio: Optional[Union[float, str]] = None,
dropFieldIfAllNull: Optional[Union[bool, str]] = None,
encoding: Optional[str] = None,
locale: Optional[str] = None,
pathGlobFilter: Optional[Union[bool, str]] = None,
recursiveFileLookup: Optional[Union[bool, str]] = None,
modifiedBefore: Optional[Union[bool, str]] = None,
modifiedAfter: Optional[Union[bool, str]] = None,
allowNonNumericNumbers: Optional[Union[bool, str]] = None,
) -> "DataFrame":
self._set_opts(
primitivesAsString=primitivesAsString,
prefersDecimal=prefersDecimal,
allowComments=allowComments,
allowUnquotedFieldNames=allowUnquotedFieldNames,
allowSingleQuotes=allowSingleQuotes,
allowNumericLeadingZero=allowNumericLeadingZero,
allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter,
mode=mode,
columnNameOfCorruptRecord=columnNameOfCorruptRecord,
dateFormat=dateFormat,
timestampFormat=timestampFormat,
multiLine=multiLine,
allowUnquotedControlChars=allowUnquotedControlChars,
lineSep=lineSep,
samplingRatio=samplingRatio,
dropFieldIfAllNull=dropFieldIfAllNull,
encoding=encoding,
locale=locale,
pathGlobFilter=pathGlobFilter,
recursiveFileLookup=recursiveFileLookup,
modifiedBefore=modifiedBefore,
modifiedAfter=modifiedAfter,
allowNonNumericNumbers=allowNonNumericNumbers,
)
if isinstance(path, str):
path = [path]
return self.load(path=path, format="json", schema=schema)
json.__doc__ = PySparkDataFrameReader.json.__doc__
def parquet(self, *paths: str, **options: "OptionalPrimitiveType") -> "DataFrame":
mergeSchema = options.get("mergeSchema", None)
pathGlobFilter = options.get("pathGlobFilter", None)
modifiedBefore = options.get("modifiedBefore", None)
modifiedAfter = options.get("modifiedAfter", None)
recursiveFileLookup = options.get("recursiveFileLookup", None)
datetimeRebaseMode = options.get("datetimeRebaseMode", None)
int96RebaseMode = options.get("int96RebaseMode", None)
self._set_opts(
mergeSchema=mergeSchema,
pathGlobFilter=pathGlobFilter,
recursiveFileLookup=recursiveFileLookup,
modifiedBefore=modifiedBefore,
modifiedAfter=modifiedAfter,
datetimeRebaseMode=datetimeRebaseMode,
int96RebaseMode=int96RebaseMode,
)
return self.load(path=list(paths), format="parquet")
parquet.__doc__ = PySparkDataFrameReader.parquet.__doc__
def text(
self,
paths: PathOrPaths,
wholetext: Optional[bool] = None,
lineSep: Optional[str] = None,
pathGlobFilter: Optional[Union[bool, str]] = None,
recursiveFileLookup: Optional[Union[bool, str]] = None,
modifiedBefore: Optional[Union[bool, str]] = None,
modifiedAfter: Optional[Union[bool, str]] = None,
) -> "DataFrame":
self._set_opts(
wholetext=wholetext,
lineSep=lineSep,
pathGlobFilter=pathGlobFilter,
recursiveFileLookup=recursiveFileLookup,
modifiedBefore=modifiedBefore,
modifiedAfter=modifiedAfter,
)
if isinstance(paths, str):
paths = [paths]
return self.load(path=paths, format="text")
text.__doc__ = PySparkDataFrameReader.text.__doc__
def csv(
self,
path: PathOrPaths,
schema: Optional[Union[StructType, str]] = None,
sep: Optional[str] = None,
encoding: Optional[str] = None,
quote: Optional[str] = None,
escape: Optional[str] = None,
comment: Optional[str] = None,
header: Optional[Union[bool, str]] = None,
inferSchema: Optional[Union[bool, str]] = None,
ignoreLeadingWhiteSpace: Optional[Union[bool, str]] = None,
ignoreTrailingWhiteSpace: Optional[Union[bool, str]] = None,
nullValue: Optional[str] = None,
nanValue: Optional[str] = None,
positiveInf: Optional[str] = None,
negativeInf: Optional[str] = None,
dateFormat: Optional[str] = None,
timestampFormat: Optional[str] = None,
maxColumns: Optional[Union[int, str]] = None,
maxCharsPerColumn: Optional[Union[int, str]] = None,
maxMalformedLogPerPartition: Optional[Union[int, str]] = None,
mode: Optional[str] = None,
columnNameOfCorruptRecord: Optional[str] = None,
multiLine: Optional[Union[bool, str]] = None,
charToEscapeQuoteEscaping: Optional[str] = None,
samplingRatio: Optional[Union[float, str]] = None,
enforceSchema: Optional[Union[bool, str]] = None,
emptyValue: Optional[str] = None,
locale: Optional[str] = None,
lineSep: Optional[str] = None,
pathGlobFilter: Optional[Union[bool, str]] = None,
recursiveFileLookup: Optional[Union[bool, str]] = None,
modifiedBefore: Optional[Union[bool, str]] = None,
modifiedAfter: Optional[Union[bool, str]] = None,
unescapedQuoteHandling: Optional[str] = None,
) -> "DataFrame":
self._set_opts(
sep=sep,
encoding=encoding,
quote=quote,
escape=escape,
comment=comment,
header=header,
inferSchema=inferSchema,
ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace,
nullValue=nullValue,
nanValue=nanValue,
positiveInf=positiveInf,
negativeInf=negativeInf,
dateFormat=dateFormat,
timestampFormat=timestampFormat,
maxColumns=maxColumns,
maxCharsPerColumn=maxCharsPerColumn,
maxMalformedLogPerPartition=maxMalformedLogPerPartition,
mode=mode,
columnNameOfCorruptRecord=columnNameOfCorruptRecord,
multiLine=multiLine,
charToEscapeQuoteEscaping=charToEscapeQuoteEscaping,
samplingRatio=samplingRatio,
enforceSchema=enforceSchema,
emptyValue=emptyValue,
locale=locale,
lineSep=lineSep,
pathGlobFilter=pathGlobFilter,
recursiveFileLookup=recursiveFileLookup,
modifiedBefore=modifiedBefore,
modifiedAfter=modifiedAfter,
unescapedQuoteHandling=unescapedQuoteHandling,
)
if isinstance(path, str):
path = [path]
return self.load(path=path, format="csv", schema=schema)
csv.__doc__ = PySparkDataFrameReader.csv.__doc__
def orc(
self,
path: PathOrPaths,
mergeSchema: Optional[bool] = None,
pathGlobFilter: Optional[Union[bool, str]] = None,
recursiveFileLookup: Optional[Union[bool, str]] = None,
modifiedBefore: Optional[Union[bool, str]] = None,
modifiedAfter: Optional[Union[bool, str]] = None,
) -> "DataFrame":
self._set_opts(
mergeSchema=mergeSchema,
pathGlobFilter=pathGlobFilter,
modifiedBefore=modifiedBefore,
modifiedAfter=modifiedAfter,
recursiveFileLookup=recursiveFileLookup,
)
if isinstance(path, str):
path = [path]
return self.load(path=path, format="orc")
orc.__doc__ = PySparkDataFrameReader.orc.__doc__
@overload
def jdbc(
self, url: str, table: str, *, properties: Optional[Dict[str, str]] = None
) -> "DataFrame":
...
@overload
def jdbc(
self,
url: str,
table: str,
column: str,
lowerBound: Union[int, str],
upperBound: Union[int, str],
numPartitions: int,
*,
properties: Optional[Dict[str, str]] = None,
) -> "DataFrame":
...
@overload
def jdbc(
self,
url: str,
table: str,
*,
predicates: List[str],
properties: Optional[Dict[str, str]] = None,
) -> "DataFrame":
...
def jdbc(
self,
url: str,
table: str,
column: Optional[str] = None,
lowerBound: Optional[Union[int, str]] = None,
upperBound: Optional[Union[int, str]] = None,
numPartitions: Optional[int] = None,
predicates: Optional[List[str]] = None,
properties: Optional[Dict[str, str]] = None,
) -> "DataFrame":
if properties is None:
properties = dict()
self.format("jdbc")
if column is not None:
assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified"
assert upperBound is not None, "upperBound can not be None when ``column`` is specified"
assert (
numPartitions is not None
), "numPartitions can not be None when ``column`` is specified"
self.options(
partitionColumn=column,
lowerBound=lowerBound,
upperBound=upperBound,
numPartitions=numPartitions,
)
self.options(**properties)
self.options(url=url, dbtable=table)
return self.load()
else:
self.options(**properties)
self.options(url=url, dbtable=table)
if predicates is not None:
plan = DataSource(
format=self._format,
schema=self._schema,
options=self._options,
predicates=predicates,
)
return self._df(plan)
else:
return self.load()
jdbc.__doc__ = PySparkDataFrameReader.jdbc.__doc__
@property
def _jreader(self) -> None:
raise PySparkAttributeError(
error_class="JVM_ATTRIBUTE_NOT_SUPPORTED", message_parameters={"attr_name": "_jreader"}
)
DataFrameReader.__doc__ = PySparkDataFrameReader.__doc__
class DataFrameWriter(OptionUtils):
def __init__(self, plan: "LogicalPlan", session: "SparkSession"):
self._df: "LogicalPlan" = plan
self._spark: "SparkSession" = session
self._write: "WriteOperation" = WriteOperation(self._df)
def mode(self, saveMode: Optional[str]) -> "DataFrameWriter":
# At the JVM side, the default value of mode is already set to "error".
# So, if the given saveMode is None, we will not call JVM-side's mode method.
if saveMode is not None:
self._write.mode = saveMode
return self
mode.__doc__ = PySparkDataFrameWriter.mode.__doc__
def format(self, source: str) -> "DataFrameWriter":
self._write.source = source
return self
format.__doc__ = PySparkDataFrameWriter.format.__doc__
def option(self, key: str, value: "OptionalPrimitiveType") -> "DataFrameWriter":
self._write.options[key] = to_str(value)
return self
option.__doc__ = PySparkDataFrameWriter.option.__doc__
def options(self, **options: "OptionalPrimitiveType") -> "DataFrameWriter":
for k in options:
self._write.options[k] = to_str(options[k])
return self
options.__doc__ = PySparkDataFrameWriter.options.__doc__
@overload
def partitionBy(self, *cols: str) -> "DataFrameWriter":
...
@overload
def partitionBy(self, *cols: List[str]) -> "DataFrameWriter":
...
def partitionBy(self, *cols: Union[str, List[str]]) -> "DataFrameWriter":
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0] # type: ignore[assignment]
self._write.partitioning_cols = cast(List[str], cols)
return self
partitionBy.__doc__ = PySparkDataFrameWriter.partitionBy.__doc__
@overload
def bucketBy(self, numBuckets: int, col: str, *cols: str) -> "DataFrameWriter":
...
@overload
def bucketBy(self, numBuckets: int, col: TupleOrListOfString) -> "DataFrameWriter":
...
def bucketBy(
self, numBuckets: int, col: Union[str, TupleOrListOfString], *cols: Optional[str]
) -> "DataFrameWriter":
if not isinstance(numBuckets, int):
raise PySparkValueError(
error_class="CANNOT_SET_TOGETHER",
message_parameters={
"arg_list": f"`col` of type {type(col).__name__} and `cols`",
},
)
if isinstance(col, (list, tuple)):
if cols:
raise PySparkValueError(
error_class="NOT_INT",
message_parameters={
"arg_list": "numBuckets",
},
)
col, cols = col[0], col[1:] # type: ignore[assignment]
for c in cols:
if not isinstance(c, str):
raise PySparkTypeError(
error_class="NOT_LIST_OF_STR",
message_parameters={
"arg_name": "cols",
"arg_type": type(c).__name__,
},
)
if not isinstance(col, str):
raise PySparkTypeError(
error_class="NOT_LIST_OF_STR",
message_parameters={
"arg_name": "col",
"arg_type": type(col).__name__,
},
)
self._write.num_buckets = numBuckets
self._write.bucket_cols = cast(List[str], [col, *cols])
return self
bucketBy.__doc__ = PySparkDataFrameWriter.bucketBy.__doc__
@overload
def sortBy(self, col: str, *cols: str) -> "DataFrameWriter":
...
@overload
def sortBy(self, col: TupleOrListOfString) -> "DataFrameWriter":
...
def sortBy(
self, col: Union[str, TupleOrListOfString], *cols: Optional[str]
) -> "DataFrameWriter":
if isinstance(col, (list, tuple)):
if cols:
raise PySparkValueError(
error_class="CANNOT_SET_TOGETHER",
message_parameters={
"arg_list": f"`col` of type {type(col).__name__} and `cols`",
},
)
col, cols = col[0], col[1:] # type: ignore[assignment]
for c in cols:
if not isinstance(c, str):
raise PySparkTypeError(
error_class="NOT_LIST_OF_STR",
message_parameters={
"arg_name": "cols",
"arg_type": type(c).__name__,
},
)
if not isinstance(col, str):
raise PySparkTypeError(
error_class="NOT_LIST_OF_STR",
message_parameters={
"arg_name": "col",
"arg_type": type(col).__name__,
},
)
self._write.sort_cols = cast(List[str], [col, *cols])
return self
sortBy.__doc__ = PySparkDataFrameWriter.sortBy.__doc__
def save(
self,
path: Optional[str] = None,
format: Optional[str] = None,
mode: Optional[str] = None,
partitionBy: Optional[Union[str, List[str]]] = None,
**options: "OptionalPrimitiveType",
) -> None:
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
self._write.path = path
self._spark.client.execute_command(self._write.command(self._spark.client))
save.__doc__ = PySparkDataFrameWriter.save.__doc__
def insertInto(self, tableName: str, overwrite: Optional[bool] = None) -> None:
if overwrite is not None:
self.mode("overwrite" if overwrite else "append")
self._write.table_name = tableName
self._write.table_save_method = "insert_into"
self._spark.client.execute_command(self._write.command(self._spark.client))
insertInto.__doc__ = PySparkDataFrameWriter.insertInto.__doc__
def saveAsTable(
self,
name: str,
format: Optional[str] = None,
mode: Optional[str] = None,
partitionBy: Optional[Union[str, List[str]]] = None,
**options: "OptionalPrimitiveType",
) -> None:
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
self._write.table_name = name
self._write.table_save_method = "save_as_table"
self._spark.client.execute_command(self._write.command(self._spark.client))
saveAsTable.__doc__ = PySparkDataFrameWriter.saveAsTable.__doc__
def json(
self,
path: str,
mode: Optional[str] = None,
compression: Optional[str] = None,
dateFormat: Optional[str] = None,
timestampFormat: Optional[str] = None,
lineSep: Optional[str] = None,
encoding: Optional[str] = None,
ignoreNullFields: Optional[Union[bool, str]] = None,
) -> None:
self.mode(mode)
self._set_opts(
compression=compression,
dateFormat=dateFormat,
timestampFormat=timestampFormat,
lineSep=lineSep,
encoding=encoding,
ignoreNullFields=ignoreNullFields,
)
self.format("json").save(path)
json.__doc__ = PySparkDataFrameWriter.json.__doc__
def parquet(
self,
path: str,
mode: Optional[str] = None,
partitionBy: Optional[Union[str, List[str]]] = None,
compression: Optional[str] = None,
) -> None:
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self.option("compression", compression)
self.format("parquet").save(path)
parquet.__doc__ = PySparkDataFrameWriter.parquet.__doc__
def text(
self, path: str, compression: Optional[str] = None, lineSep: Optional[str] = None
) -> None:
self._set_opts(compression=compression, lineSep=lineSep)
self.format("text").save(path)
text.__doc__ = PySparkDataFrameWriter.text.__doc__
def csv(
self,
path: str,
mode: Optional[str] = None,
compression: Optional[str] = None,
sep: Optional[str] = None,
quote: Optional[str] = None,
escape: Optional[str] = None,
header: Optional[Union[bool, str]] = None,
nullValue: Optional[str] = None,
escapeQuotes: Optional[Union[bool, str]] = None,
quoteAll: Optional[Union[bool, str]] = None,
dateFormat: Optional[str] = None,
timestampFormat: Optional[str] = None,
ignoreLeadingWhiteSpace: Optional[Union[bool, str]] = None,
ignoreTrailingWhiteSpace: Optional[Union[bool, str]] = None,
charToEscapeQuoteEscaping: Optional[str] = None,
encoding: Optional[str] = None,
emptyValue: Optional[str] = None,
lineSep: Optional[str] = None,
) -> None:
self.mode(mode)
self._set_opts(
compression=compression,
sep=sep,
quote=quote,
escape=escape,
header=header,
nullValue=nullValue,
escapeQuotes=escapeQuotes,
quoteAll=quoteAll,
dateFormat=dateFormat,
timestampFormat=timestampFormat,
ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace,
charToEscapeQuoteEscaping=charToEscapeQuoteEscaping,
encoding=encoding,
emptyValue=emptyValue,
lineSep=lineSep,
)
self.format("csv").save(path)
csv.__doc__ = PySparkDataFrameWriter.csv.__doc__
def orc(
self,
path: str,
mode: Optional[str] = None,
partitionBy: Optional[Union[str, List[str]]] = None,
compression: Optional[str] = None,
) -> None:
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._set_opts(compression=compression)
self.format("orc").save(path)
orc.__doc__ = PySparkDataFrameWriter.orc.__doc__
def jdbc(
self,
url: str,
table: str,
mode: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
) -> None:
if properties is None:
properties = dict()
self.format("jdbc").mode(mode).options(**properties).options(url=url, dbtable=table).save()
jdbc.__doc__ = PySparkDataFrameWriter.jdbc.__doc__
class DataFrameWriterV2(OptionUtils):
def __init__(self, plan: "LogicalPlan", session: "SparkSession", table: str):
self._df: "LogicalPlan" = plan
self._spark: "SparkSession" = session
self._table_name: str = table
self._write: "WriteOperationV2" = WriteOperationV2(self._df, self._table_name)
def using(self, provider: str) -> "DataFrameWriterV2":
self._write.provider = provider
return self
using.__doc__ = PySparkDataFrameWriterV2.using.__doc__
def option(self, key: str, value: "OptionalPrimitiveType") -> "DataFrameWriterV2":
self._write.options[key] = to_str(value)
return self
option.__doc__ = PySparkDataFrameWriterV2.option.__doc__
def options(self, **options: "OptionalPrimitiveType") -> "DataFrameWriterV2":
for k in options:
self._write.options[k] = to_str(options[k])
return self
options.__doc__ = PySparkDataFrameWriterV2.options.__doc__
def tableProperty(self, property: str, value: str) -> "DataFrameWriterV2":
self._write.table_properties[property] = value
return self
tableProperty.__doc__ = PySparkDataFrameWriterV2.tableProperty.__doc__
def partitionedBy(self, col: "ColumnOrName", *cols: "ColumnOrName") -> "DataFrameWriterV2":
self._write.partitioning_columns = [col]
self._write.partitioning_columns.extend(cols)
return self
partitionedBy.__doc__ = PySparkDataFrameWriterV2.partitionedBy.__doc__
def create(self) -> None:
self._write.mode = "create"
self._spark.client.execute_command(self._write.command(self._spark.client))
create.__doc__ = PySparkDataFrameWriterV2.create.__doc__
def replace(self) -> None:
self._write.mode = "replace"
self._spark.client.execute_command(self._write.command(self._spark.client))
replace.__doc__ = PySparkDataFrameWriterV2.replace.__doc__
def createOrReplace(self) -> None:
self._write.mode = "create_or_replace"
self._spark.client.execute_command(self._write.command(self._spark.client))
createOrReplace.__doc__ = PySparkDataFrameWriterV2.createOrReplace.__doc__
def append(self) -> None:
self._write.mode = "append"
self._spark.client.execute_command(self._write.command(self._spark.client))
append.__doc__ = PySparkDataFrameWriterV2.append.__doc__
def overwrite(self, condition: "ColumnOrName") -> None:
self._write.mode = "overwrite"
self._write.overwrite_condition = condition
self._spark.client.execute_command(self._write.command(self._spark.client))
overwrite.__doc__ = PySparkDataFrameWriterV2.overwrite.__doc__
def overwritePartitions(self) -> None:
self._write.mode = "overwrite_partitions"
self._spark.client.execute_command(self._write.command(self._spark.client))
overwritePartitions.__doc__ = PySparkDataFrameWriterV2.overwritePartitions.__doc__
def _test() -> None:
import sys
import doctest
from pyspark.sql import SparkSession as PySparkSession
import pyspark.sql.connect.readwriter
globs = pyspark.sql.connect.readwriter.__dict__.copy()
globs["spark"] = (
PySparkSession.builder.appName("sql.connect.readwriter tests")
.remote("local[4]")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.connect.readwriter,
globs=globs,
optionflags=doctest.ELLIPSIS
| doctest.NORMALIZE_WHITESPACE
| doctest.IGNORE_EXCEPTION_DETAIL,
)
globs["spark"].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
25316fbf72848737ee365bf235321f2025389076
|
db43055180800e54526affb7941fc77e0226c4b3
|
/city_scrapers/mixins/__init__.py
|
254a84cf7bc620c6d64e9b9bf021ccffaf3e87a5
|
[
"MIT"
] |
permissive
|
City-Bureau/city-scrapers
|
53724d4e973d71ec331cd9749e350c460aa6fb9d
|
611fce6a2705446e25a2fc33e32090a571eb35d1
|
refs/heads/main
| 2023-08-15T01:06:49.336847
| 2023-07-01T20:39:28
| 2023-07-01T20:39:28
| 97,890,025
| 308
| 366
|
MIT
| 2023-09-07T07:30:12
| 2017-07-21T00:45:13
|
Python
|
UTF-8
|
Python
| false
| false
| 144
|
py
|
__init__.py
|
from .chi_mayors_advisory_councils import ChiMayorsAdvisoryCouncilsMixin # noqa
from .chi_rogers_park_ssa import ChiRogersParkSsaMixin # noqa
|
00da1021cd1fe8892fac35d533601a058ce1a710
|
9f84d91a8ae3df53b07fe3267992fba00a99ac9e
|
/test/explain/algorithm/test_explain_algorithm_utils.py
|
71a3768892348981b5fb5601f2baa198d4a4a400
|
[
"MIT"
] |
permissive
|
pyg-team/pytorch_geometric
|
ebea601eae228f3905465b5c2349d3fb3bb5cb26
|
a52af694b8ce6a80811e20966fe6d08a3e7511fe
|
refs/heads/master
| 2023-08-31T04:13:40.943308
| 2023-08-30T12:48:42
| 2023-08-30T12:48:42
| 106,024,057
| 6,775
| 1,563
|
MIT
| 2023-09-14T17:10:18
| 2017-10-06T16:03:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,674
|
py
|
test_explain_algorithm_utils.py
|
import torch
from torch_geometric.explain.algorithm.utils import (
clear_masks,
set_hetero_masks,
)
from torch_geometric.nn import GCNConv, HeteroConv, SAGEConv, to_hetero
class HeteroModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = HeteroConv({
('paper', 'to', 'paper'):
GCNConv(-1, 32),
('author', 'to', 'paper'):
SAGEConv((-1, -1), 32),
('paper', 'to', 'author'):
SAGEConv((-1, -1), 32),
})
self.conv2 = HeteroConv({
('paper', 'to', 'paper'):
GCNConv(-1, 32),
('author', 'to', 'paper'):
SAGEConv((-1, -1), 32),
('paper', 'to', 'author'):
SAGEConv((-1, -1), 32),
})
class GraphSAGE(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = SAGEConv((-1, -1), 32)
self.conv2 = SAGEConv((-1, -1), 32)
def forward(self, x, edge_index):
x = self.conv1(x, edge_index).relu()
return self.conv2(x, edge_index)
def test_set_clear_mask(hetero_data):
edge_mask_dict = {
('paper', 'to', 'paper'): torch.ones(200),
('author', 'to', 'paper'): torch.ones(100),
('paper', 'to', 'author'): torch.ones(100),
}
model = HeteroModel()
set_hetero_masks(model, edge_mask_dict, hetero_data.edge_index_dict)
for edge_type in hetero_data.edge_types:
# Check that masks are correctly set:
str_edge_type = '__'.join(edge_type)
assert torch.allclose(model.conv1.convs[str_edge_type]._edge_mask,
edge_mask_dict[edge_type])
assert model.conv1.convs[str_edge_type].explain
clear_masks(model)
for edge_type in hetero_data.edge_types:
str_edge_type = '__'.join(edge_type)
assert model.conv1.convs[str_edge_type]._edge_mask is None
assert not model.conv1.convs[str_edge_type].explain
model = to_hetero(GraphSAGE(), hetero_data.metadata(), debug=False)
set_hetero_masks(model, edge_mask_dict, hetero_data.edge_index_dict)
for edge_type in hetero_data.edge_types:
# Check that masks are correctly set:
str_edge_type = '__'.join(edge_type)
assert torch.allclose(model.conv1[str_edge_type]._edge_mask,
edge_mask_dict[edge_type])
assert model.conv1[str_edge_type].explain
clear_masks(model)
for edge_type in hetero_data.edge_types:
str_edge_type = '__'.join(edge_type)
assert model.conv1[str_edge_type]._edge_mask is None
assert not model.conv1[str_edge_type].explain
|
0e1198d2fd96e53bea0068e6059f7ac39c0041b2
|
f3806d9fb54773908cd9704121a543b114470aca
|
/angr/analyses/decompiler/peephole_optimizations/bitwise_or_to_logical_or.py
|
a33c9360fe1975e3c4b6c0f34c3391955da947ef
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr
|
8ae95fceca51b0a001de56477d984dd01193ac1d
|
37e8ca1c3308ec601ad1d7c6bc8081ff38a7cffd
|
refs/heads/master
| 2023-08-17T03:15:21.007865
| 2023-08-15T18:44:57
| 2023-08-15T18:44:57
| 40,328,394
| 7,184
| 1,306
|
BSD-2-Clause
| 2023-09-14T20:14:23
| 2015-08-06T21:46:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,288
|
py
|
bitwise_or_to_logical_or.py
|
from ailment.expression import BinaryOp, Const
from .base import PeepholeOptimizationExprBase
class BitwiseOrToLogicalOr(PeepholeOptimizationExprBase):
"""
Perform the following two simplifications:
1. (a | b) == 0 ==> (a == 0) && (b == 0)
2. (a | b) != 0 ==> (a != 0) || (b != 0)
"""
__slots__ = ()
NAME = "(a | b) == 0 => (a == 0) && (b == 0) ; (a | b) != 0 => (a != 0) || (b != 0)"
expr_classes = (BinaryOp,) # all expressions are allowed
def optimize(self, expr: BinaryOp):
if (
expr.op in {"CmpEQ", "CmpNE"}
and isinstance(expr.operands[0], BinaryOp)
and expr.operands[0].op == "Or"
and isinstance(expr.operands[1], Const)
and expr.operands[1].value == 0
):
inner = expr.operands[0]
new_left = BinaryOp(inner.idx, expr.op, (inner.operands[0], expr.operands[1]), False, bits=1, **inner.tags)
new_right = BinaryOp(inner.idx, expr.op, (inner.operands[1], expr.operands[1]), False, bits=1, **inner.tags)
op = "LogicalOr" if expr.op == "CmpNE" else "LogicalAnd"
new_expr = BinaryOp(expr.idx, op, (new_left, new_right), False, bits=expr.bits, **expr.tags)
return new_expr
return expr
|
65da192e19f02a88e259c1ee9535631dc464d85e
|
e986b7650d2317f740d65f44b19e663cb646d85e
|
/jdaviz/configs/default/plugins/viewer_creator/__init__.py
|
b1c67ad265fcf655f59d9f148d0ccfef3116a555
|
[
"BSD-3-Clause"
] |
permissive
|
spacetelescope/jdaviz
|
a223230d2296f7fdee17a43ae1a4bee45452ec13
|
17a864ed7d64cece18fbc29f3561c137e6bf0942
|
refs/heads/main
| 2023-08-17T05:59:55.109052
| 2023-08-15T19:46:49
| 2023-08-15T19:46:49
| 185,452,341
| 105
| 70
|
BSD-3-Clause
| 2023-09-14T15:20:16
| 2019-05-07T17:54:06
|
Python
|
UTF-8
|
Python
| false
| false
| 38
|
py
|
__init__.py
|
from .viewer_creator import * # noqa
|
d0e16d44b1405228dcbc68610d12238172e9da05
|
2ad93a1cf25a580fe980482d2d17a657de3b2523
|
/django-stubs/core/serializers/python.pyi
|
c25c910edb8da8bafb0525eaa1fce04561534d71
|
[
"MIT"
] |
permissive
|
typeddjango/django-stubs
|
f35dfcb001e54694a0a1e8c0afcc6e6a3d130c32
|
0117348c3c7713f25f96b46e53ebdeed7bdba544
|
refs/heads/master
| 2023-08-25T19:42:52.707151
| 2023-08-23T15:13:25
| 2023-08-23T15:13:25
| 142,779,680
| 1,133
| 376
|
MIT
| 2023-09-13T19:05:06
| 2018-07-29T17:08:50
|
Python
|
UTF-8
|
Python
| false
| false
| 490
|
pyi
|
python.pyi
|
from collections.abc import Iterator
from typing import Any
from django.core.serializers import base
from django.core.serializers.base import DeserializedObject
from django.db.models.base import Model
class Serializer(base.Serializer):
objects: list[Any]
def get_dump_object(self, obj: Model) -> dict[str, Any]: ...
def Deserializer(
object_list: list[dict[str, Any]], *, using: str = ..., ignorenonexistent: bool = ..., **options: Any
) -> Iterator[DeserializedObject]: ...
|
19f3827bd92739fb037af6ea045fb4908abd68f4
|
c6a101547c2b7f36fe83a725974a8a7f02cf176d
|
/data_structures/array/three_largest_elements.py
|
5a8cd9e473ec587efa782622919073019921689a
|
[
"MIT"
] |
permissive
|
prabhupant/python-ds
|
737cc35574de5c2ece0f0813cf00775324a8dbe7
|
f7d6d78fedaf84b7527965bb1798b7a8da989474
|
refs/heads/master
| 2023-08-22T05:04:22.937675
| 2022-10-04T01:29:39
| 2022-10-04T01:29:39
| 199,366,418
| 2,325
| 704
|
MIT
| 2022-10-10T13:01:10
| 2019-07-29T02:48:57
|
Python
|
UTF-8
|
Python
| false
| false
| 426
|
py
|
three_largest_elements.py
|
import sys
def three_largest(arr):
first = second = third = -sys.maxsize
for i in arr:
if i > first:
third = second
second = first
first = i
elif i > second:
third = second
second = i
elif i > third:
third = i
print(first, second, third)
arr = [10,45,3,7,4,6,8,9,4,6,4,23,45,56,47,25,34,67,634]
three_largest(arr)
|
b4c18de1f37bb4f1e6b2181d6235e2963ad8c964
|
aaf572d39319e4400ae0c2655ea2cfa52845a429
|
/trio/_tests/test_highlevel_generic.py
|
38bcedee25a77408c3f1ae307408032097d25858
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
python-trio/trio
|
04cbde76313c26abdea15317cea3980dd5bc475b
|
e97bcb61b8b02523c82435b5408ff46efca5dfc3
|
refs/heads/master
| 2023-08-30T23:22:05.599646
| 2023-08-30T06:11:28
| 2023-08-30T06:11:28
| 79,083,614
| 5,651
| 369
|
NOASSERTION
| 2023-09-13T14:14:21
| 2017-01-16T04:45:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,650
|
py
|
test_highlevel_generic.py
|
import attr
import pytest
from .._highlevel_generic import StapledStream
from ..abc import ReceiveStream, SendStream
@attr.s
class RecordSendStream(SendStream):
record = attr.ib(factory=list)
async def send_all(self, data):
self.record.append(("send_all", data))
async def wait_send_all_might_not_block(self):
self.record.append("wait_send_all_might_not_block")
async def aclose(self):
self.record.append("aclose")
@attr.s
class RecordReceiveStream(ReceiveStream):
record = attr.ib(factory=list)
async def receive_some(self, max_bytes=None):
self.record.append(("receive_some", max_bytes))
async def aclose(self):
self.record.append("aclose")
async def test_StapledStream():
send_stream = RecordSendStream()
receive_stream = RecordReceiveStream()
stapled = StapledStream(send_stream, receive_stream)
assert stapled.send_stream is send_stream
assert stapled.receive_stream is receive_stream
await stapled.send_all(b"foo")
await stapled.wait_send_all_might_not_block()
assert send_stream.record == [
("send_all", b"foo"),
"wait_send_all_might_not_block",
]
send_stream.record.clear()
await stapled.send_eof()
assert send_stream.record == ["aclose"]
send_stream.record.clear()
async def fake_send_eof():
send_stream.record.append("send_eof")
send_stream.send_eof = fake_send_eof
await stapled.send_eof()
assert send_stream.record == ["send_eof"]
send_stream.record.clear()
assert receive_stream.record == []
await stapled.receive_some(1234)
assert receive_stream.record == [("receive_some", 1234)]
assert send_stream.record == []
receive_stream.record.clear()
await stapled.aclose()
assert receive_stream.record == ["aclose"]
assert send_stream.record == ["aclose"]
async def test_StapledStream_with_erroring_close():
# Make sure that if one of the aclose methods errors out, then the other
# one still gets called.
class BrokenSendStream(RecordSendStream):
async def aclose(self):
await super().aclose()
raise ValueError
class BrokenReceiveStream(RecordReceiveStream):
async def aclose(self):
await super().aclose()
raise ValueError
stapled = StapledStream(BrokenSendStream(), BrokenReceiveStream())
with pytest.raises(ValueError) as excinfo:
await stapled.aclose()
assert isinstance(excinfo.value.__context__, ValueError)
assert stapled.send_stream.record == ["aclose"]
assert stapled.receive_stream.record == ["aclose"]
|
330b663983381535d7b55d7fadb2f20971877213
|
8355bc4e1ad1a863124c1d80d4a00b28ef587b48
|
/src/probnum/randvars/_scipy_stats.py
|
a8ae6359ddd7ef44a940e63059f5463ffc90dc1c
|
[
"MIT"
] |
permissive
|
probabilistic-numerics/probnum
|
af62f04253a08da71174e5c1b7d733deb1914eee
|
af410278783069542610d16b10ba12d2940a05a6
|
refs/heads/main
| 2023-08-31T05:12:08.877238
| 2023-06-19T20:34:15
| 2023-06-19T20:34:15
| 218,856,084
| 384
| 56
|
MIT
| 2023-09-10T18:52:24
| 2019-10-31T20:29:11
|
Python
|
UTF-8
|
Python
| false
| false
| 8,125
|
py
|
_scipy_stats.py
|
"""Wrapper classes for SciPy random variables."""
from typing import Any, Dict, Union
import numpy as np
import scipy.stats
from probnum import utils as _utils
from . import _normal, _random_variable
ValueType = Union[np.generic, np.ndarray]
# pylint: disable=protected-access
class _SciPyRandomVariableMixin:
"""Mix-in class for SciPy random variable wrappers."""
@property
def scipy_rv(self):
"""SciPy random variable."""
return self._scipy_rv
class WrappedSciPyRandomVariable(
_SciPyRandomVariableMixin, _random_variable.RandomVariable[ValueType]
):
"""Wrapper for SciPy random variable objects.
Parameters
----------
scipy_rv
SciPy random variable.
"""
def __init__(
self,
scipy_rv: Union[
scipy.stats._distn_infrastructure.rv_frozen,
scipy.stats._multivariate.multi_rv_frozen,
],
):
self._scipy_rv = scipy_rv
super().__init__(**_rv_init_kwargs_from_scipy_rv(scipy_rv))
class WrappedSciPyDiscreteRandomVariable(
_SciPyRandomVariableMixin, _random_variable.DiscreteRandomVariable[ValueType]
):
"""Wrapper for discrete SciPy random variable objects.
Parameters
----------
scipy_rv
Discrete SciPy random variable.
"""
def __init__(
self,
scipy_rv: Union[
scipy.stats._distn_infrastructure.rv_frozen,
scipy.stats._multivariate.multi_rv_frozen,
],
):
if isinstance(scipy_rv, scipy.stats._distn_infrastructure.rv_frozen):
if not isinstance(scipy_rv.dist, scipy.stats.rv_discrete):
raise ValueError("The given SciPy random variable is not discrete.")
self._scipy_rv = scipy_rv
rv_kwargs = _rv_init_kwargs_from_scipy_rv(scipy_rv)
rv_kwargs["pmf"] = _return_numpy(
getattr(scipy_rv, "pmf", None),
dtype=np.float_,
)
rv_kwargs["logpmf"] = _return_numpy(
getattr(scipy_rv, "logpmf", None),
dtype=np.float_,
)
super().__init__(**rv_kwargs)
class WrappedSciPyContinuousRandomVariable(
_SciPyRandomVariableMixin, _random_variable.ContinuousRandomVariable[ValueType]
):
"""Wrapper for continuous SciPy random variable objects.
Parameters
----------
scipy_rv
Continuous SciPy random variable.
"""
def __init__(
self,
scipy_rv: Union[
scipy.stats._distn_infrastructure.rv_frozen,
scipy.stats._multivariate.multi_rv_frozen,
],
):
if isinstance(scipy_rv, scipy.stats._distn_infrastructure.rv_frozen):
if not isinstance(scipy_rv.dist, scipy.stats.rv_continuous):
raise ValueError("The given SciPy random variable is not continuous.")
self._scipy_rv = scipy_rv
rv_kwargs = _rv_init_kwargs_from_scipy_rv(scipy_rv)
rv_kwargs["pdf"] = _return_numpy(
getattr(scipy_rv, "pdf", None),
dtype=np.float_,
)
rv_kwargs["logpdf"] = _return_numpy(
getattr(scipy_rv, "logpdf", None),
dtype=np.float_,
)
super().__init__(**rv_kwargs)
def wrap_scipy_rv(
scipy_rv: Union[
scipy.stats._distn_infrastructure.rv_frozen,
scipy.stats._multivariate.multi_rv_frozen,
]
) -> _random_variable.RandomVariable:
"""Transform SciPy distributions to ProbNum :class:`RandomVariable`s.
Parameters
----------
scipy_rv :
SciPy random variable.
"""
# pylint: disable=too-many-return-statements
# Random variables with concrete implementations in ProbNum
if isinstance(scipy_rv, scipy.stats._distn_infrastructure.rv_frozen):
# Univariate distributions
if scipy_rv.dist.name == "norm":
# Normal distribution
return _normal.Normal(
mean=scipy_rv.mean(),
cov=scipy_rv.var(),
)
elif isinstance(scipy_rv, scipy.stats._multivariate.multi_rv_frozen):
# Multivariate distributions
if scipy_rv.__class__.__name__ == "multivariate_normal_frozen":
# Multivariate normal distribution
try:
cov_explicit = scipy_rv.cov
except AttributeError:
# As of SciPy 1.10.0 multivariate normal rvs have a Covariance object
# See https://scipy.github.io/devdocs/release.1.10.0.html\
# scipy-stats-improvements
cov_explicit = scipy_rv.cov_object.covariance
return _normal.Normal(
mean=scipy_rv.mean,
cov=cov_explicit,
)
# Generic random variables
if isinstance(scipy_rv, scipy.stats._distn_infrastructure.rv_frozen):
if isinstance(scipy_rv.dist, scipy.stats.rv_discrete):
return WrappedSciPyDiscreteRandomVariable(scipy_rv)
if isinstance(scipy_rv.dist, scipy.stats.rv_continuous):
return WrappedSciPyContinuousRandomVariable(scipy_rv)
assert isinstance(scipy_rv.dist, scipy.stats.rv_generic)
return WrappedSciPyRandomVariable(scipy_rv)
if isinstance(scipy_rv, scipy.stats._multivariate.multi_rv_frozen):
has_pmf = hasattr(scipy_rv, "pmf") or hasattr(scipy_rv, "logpmf")
has_pdf = hasattr(scipy_rv, "pdf") or hasattr(scipy_rv, "logpdf")
if has_pdf and has_pmf:
return WrappedSciPyRandomVariable(scipy_rv)
if has_pmf:
return WrappedSciPyDiscreteRandomVariable(scipy_rv)
if has_pdf:
return WrappedSciPyContinuousRandomVariable(scipy_rv)
assert not has_pmf and not has_pdf
return WrappedSciPyRandomVariable(scipy_rv)
raise ValueError(f"Unsupported argument type {type(scipy_rv)}")
def _rv_init_kwargs_from_scipy_rv(
scipy_rv: Union[
scipy.stats._distn_infrastructure.rv_frozen,
scipy.stats._multivariate.multi_rv_frozen,
],
) -> Dict[str, Any]:
"""Create dictionary of random variable properties from a Scipy random variable.
Parameters
----------
scipy_rv
SciPy random variable.
"""
# Infer shape and dtype
sample = _return_numpy(scipy_rv.rvs)()
shape = sample.shape
dtype = sample.dtype
median_dtype = np.promote_types(dtype, np.float_)
moments_dtype = np.promote_types(dtype, np.float_)
# Support of univariate random variables
if isinstance(scipy_rv, scipy.stats._distn_infrastructure.rv_frozen):
def in_support(x):
low, high = scipy_rv.support()
return bool(low <= x <= high)
else:
in_support = None
def sample_from_scipy_rv(rng, size):
return scipy_rv.rvs(size=size, random_state=rng)
if hasattr(scipy_rv, "rvs"):
sample_wrapper = sample_from_scipy_rv
else:
sample_wrapper = None
return {
"shape": shape,
"dtype": dtype,
"sample": _return_numpy(sample_wrapper, dtype),
"in_support": in_support,
"cdf": _return_numpy(getattr(scipy_rv, "cdf", None), np.float_),
"logcdf": _return_numpy(getattr(scipy_rv, "logcdf", None), np.float_),
"quantile": _return_numpy(getattr(scipy_rv, "ppf", None), dtype),
"mode": None, # not offered by scipy.stats
"median": _return_numpy(getattr(scipy_rv, "median", None), median_dtype),
"mean": _return_numpy(getattr(scipy_rv, "mean", None), moments_dtype),
"cov": _return_numpy(getattr(scipy_rv, "cov", None), moments_dtype),
"var": _return_numpy(getattr(scipy_rv, "var", None), moments_dtype),
"std": _return_numpy(getattr(scipy_rv, "std", None), moments_dtype),
"entropy": _return_numpy(getattr(scipy_rv, "entropy", None), np.float_),
}
def _return_numpy(fun, dtype=None):
if fun is None:
return None
def _wrapper(*args, **kwargs):
res = fun(*args, **kwargs)
if np.isscalar(res):
return _utils.as_numpy_scalar(res, dtype=dtype)
return np.asarray(res, dtype=dtype)
return _wrapper
|
46d6d111e393262bda52b2f2b752ff11a93bd93f
|
7378aaee27ef676db95dce7702c48f8643c63313
|
/grow/routing/path_filter.py
|
d04f88ab6007deebb9c9e7eed7e72b073e1b1d4d
|
[
"MIT"
] |
permissive
|
grow/grow
|
323fa25c7690643bf170cc4558fffdfbd406ac76
|
17471c436621ebfd978b51225fa4de05367a53e1
|
refs/heads/main
| 2023-06-15T09:51:08.288251
| 2022-07-21T16:19:33
| 2022-07-21T16:19:33
| 12,899,663
| 352
| 56
|
MIT
| 2023-02-08T02:35:36
| 2013-09-17T15:51:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,330
|
py
|
path_filter.py
|
"""Filter paths to control ignores and includes by path."""
import re
DEFAULT_IGNORED = [
re.compile(r'^.*/\.[^/]*$'), # Dot files.
]
DEFAULT_INCLUDED = []
class PathFilter:
"""Filter for testing paths against a set of filter criteria."""
def __init__(self, ignored=None, included=None):
self._ignored = []
self._included = []
if ignored:
for item in ignored:
self.add_ignored(item)
if included:
for item in included:
self.add_included(item)
def __repr__(self):
return '<PathFilter ignored={} included={}>'.format(
[i.pattern for i in self.ignored], [i.pattern for i in self.included])
def _is_ignored(self, path):
"""Test for ignored pattern match."""
for pattern in self.ignored:
if pattern.search(path):
return True
return False
def _is_included(self, path):
"""Test for include pattern match."""
for pattern in self.included:
if pattern.search(path):
return True
return False
@property
def ignored(self):
"""All ignored patterns."""
if not self._ignored and DEFAULT_IGNORED:
self._ignored = DEFAULT_IGNORED
for item in self._ignored:
yield item
@property
def included(self):
"""All included patterns."""
if not self._included and DEFAULT_INCLUDED:
self._included = DEFAULT_INCLUDED
for item in self._included:
yield item
def add_ignored(self, raw_pattern):
"""Add a new ignored pattern."""
self._ignored.append(re.compile(raw_pattern))
def add_included(self, raw_pattern):
"""Add a new included pattern."""
self._included.append(re.compile(raw_pattern))
def export(self):
"""Export for serialization."""
return {
'ignored': [item.pattern for item in self._ignored],
'included': [item.pattern for item in self._included],
}
def is_valid(self, path):
"""Tests if the path is valid according to the known filters."""
if self._is_ignored(path):
# Includes override an ignores.
return self._is_included(path)
return True
|
bc8645631705cfe0e15e5e4d190308b67895942f
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stdlib/trace.pyi
|
1f0de1d4d96442dc5101ceb4a1fbebc5f9734af4
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 2,697
|
pyi
|
trace.pyi
|
import sys
import types
from _typeshed import StrPath, TraceFunction
from collections.abc import Callable, Mapping, Sequence
from typing import Any, TypeVar
from typing_extensions import ParamSpec, TypeAlias
__all__ = ["Trace", "CoverageResults"]
_T = TypeVar("_T")
_P = ParamSpec("_P")
_FileModuleFunction: TypeAlias = tuple[str, str | None, str]
class CoverageResults:
def __init__(
self,
counts: dict[tuple[str, int], int] | None = ...,
calledfuncs: dict[_FileModuleFunction, int] | None = ...,
infile: StrPath | None = ...,
callers: dict[tuple[_FileModuleFunction, _FileModuleFunction], int] | None = ...,
outfile: StrPath | None = ...,
) -> None: ... # undocumented
def update(self, other: CoverageResults) -> None: ...
def write_results(self, show_missing: bool = ..., summary: bool = ..., coverdir: StrPath | None = ...) -> None: ...
def write_results_file(
self, path: StrPath, lines: Sequence[str], lnotab: Any, lines_hit: Mapping[int, int], encoding: str | None = ...
) -> tuple[int, int]: ...
def is_ignored_filename(self, filename: str) -> bool: ... # undocumented
class Trace:
def __init__(
self,
count: int = ...,
trace: int = ...,
countfuncs: int = ...,
countcallers: int = ...,
ignoremods: Sequence[str] = ...,
ignoredirs: Sequence[str] = ...,
infile: StrPath | None = ...,
outfile: StrPath | None = ...,
timing: bool = ...,
) -> None: ...
def run(self, cmd: str | types.CodeType) -> None: ...
def runctx(
self, cmd: str | types.CodeType, globals: Mapping[str, Any] | None = ..., locals: Mapping[str, Any] | None = ...
) -> None: ...
if sys.version_info >= (3, 9):
def runfunc(self, __func: Callable[_P, _T], *args: _P.args, **kw: _P.kwargs) -> _T: ...
else:
def runfunc(self, func: Callable[_P, _T], *args: _P.args, **kw: _P.kwargs) -> _T: ...
def file_module_function_of(self, frame: types.FrameType) -> _FileModuleFunction: ...
def globaltrace_trackcallers(self, frame: types.FrameType, why: str, arg: Any) -> None: ...
def globaltrace_countfuncs(self, frame: types.FrameType, why: str, arg: Any) -> None: ...
def globaltrace_lt(self, frame: types.FrameType, why: str, arg: Any) -> None: ...
def localtrace_trace_and_count(self, frame: types.FrameType, why: str, arg: Any) -> TraceFunction: ...
def localtrace_trace(self, frame: types.FrameType, why: str, arg: Any) -> TraceFunction: ...
def localtrace_count(self, frame: types.FrameType, why: str, arg: Any) -> TraceFunction: ...
def results(self) -> CoverageResults: ...
|
27f825d55b48ea0303c4f9521bab27902150aca1
|
e7bf1ff05319acc59bba5af5890041bd82c3e197
|
/examples/inverse/dics_source_power.py
|
68925202b175128bb18602e55934aee23b406b7c
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mne-tools/mne-python
|
7e8d7e945dfbbee6432a4955cf050fa823f2d34b
|
f44636f00666b8eb869417960926d01690ff4f42
|
refs/heads/main
| 2023-09-04T03:05:37.402100
| 2023-09-03T14:15:18
| 2023-09-03T14:15:18
| 1,301,584
| 2,437
| 1,418
|
BSD-3-Clause
| 2023-09-14T19:23:38
| 2011-01-28T03:31:13
|
Python
|
UTF-8
|
Python
| false
| false
| 3,461
|
py
|
dics_source_power.py
|
"""
.. _ex-inverse-source-power:
==========================================
Compute source power using DICS beamformer
==========================================
Compute a Dynamic Imaging of Coherent Sources (DICS) :footcite:`GrossEtAl2001`
filter from single-trial activity to estimate source power across a frequency
band. This example demonstrates how to source localize the event-related
synchronization (ERS) of beta band activity in the
:ref:`somato dataset <somato-dataset>`.
"""
# Author: Marijn van Vliet <w.m.vanvliet@gmail.com>
# Roman Goj <roman.goj@gmail.com>
# Denis Engemann <denis.engemann@gmail.com>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD-3-Clause
# %%
import numpy as np
import mne
from mne.datasets import somato
from mne.time_frequency import csd_morlet
from mne.beamformer import make_dics, apply_dics_csd
print(__doc__)
# %%
# Reading the raw data and creating epochs:
data_path = somato.data_path()
subject = "01"
task = "somato"
raw_fname = data_path / f"sub-{subject}" / "meg" / f"sub-{subject}_task-{task}_meg.fif"
# Use a shorter segment of raw just for speed here
raw = mne.io.read_raw_fif(raw_fname)
raw.crop(0, 120) # one minute for speed (looks similar to using all ~800 s)
# Read epochs
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=1, tmin=-1.5, tmax=2, preload=True)
del raw
# Paths to forward operator and FreeSurfer subject directory
fname_fwd = (
data_path / "derivatives" / f"sub-{subject}" / f"sub-{subject}_task-{task}-fwd.fif"
)
subjects_dir = data_path / "derivatives" / "freesurfer" / "subjects"
# %%
# We are interested in the beta band. Define a range of frequencies, using a
# log scale, from 12 to 30 Hz.
freqs = np.logspace(np.log10(12), np.log10(30), 9)
# %%
# Computing the cross-spectral density matrix for the beta frequency band, for
# different time intervals. We use a decim value of 20 to speed up the
# computation in this example at the loss of accuracy.
csd = csd_morlet(epochs, freqs, tmin=-1, tmax=1.5, decim=20)
csd_baseline = csd_morlet(epochs, freqs, tmin=-1, tmax=0, decim=20)
# ERS activity starts at 0.5 seconds after stimulus onset
csd_ers = csd_morlet(epochs, freqs, tmin=0.5, tmax=1.5, decim=20)
info = epochs.info
del epochs
# %%
# To compute the source power for a frequency band, rather than each frequency
# separately, we average the CSD objects across frequencies.
csd = csd.mean()
csd_baseline = csd_baseline.mean()
csd_ers = csd_ers.mean()
# %%
# Computing DICS spatial filters using the CSD that was computed on the entire
# timecourse.
fwd = mne.read_forward_solution(fname_fwd)
filters = make_dics(
info,
fwd,
csd,
noise_csd=csd_baseline,
pick_ori="max-power",
reduce_rank=True,
real_filter=True,
)
del fwd
# %%
# Applying DICS spatial filters separately to the CSD computed using the
# baseline and the CSD computed during the ERS activity.
baseline_source_power, freqs = apply_dics_csd(csd_baseline, filters)
beta_source_power, freqs = apply_dics_csd(csd_ers, filters)
# %%
# Visualizing source power during ERS activity relative to the baseline power.
stc = beta_source_power / baseline_source_power
message = "DICS source power in the 12-30 Hz frequency band"
brain = stc.plot(
hemi="both",
views="axial",
subjects_dir=subjects_dir,
subject=subject,
time_label=message,
)
# %%
# References
# ----------
# .. footbibliography::
|
1bd87bacfa0a3462b8f84395ab820d3a79f1c4a4
|
fdb47aa5092baa4f5ec86b3819961c77731a33e1
|
/darts/tests/dataprocessing/transformers/test_window_transformations.py
|
65bc70d00116e49ae3c0dbde399749be99966cf8
|
[
"Apache-2.0"
] |
permissive
|
unit8co/darts
|
6177e273950208b859c9208677a6f2632fc2aa2d
|
52ac1814a7a21b107d2391598dfc3a4a5bd33ca7
|
refs/heads/master
| 2023-09-01T04:12:19.632394
| 2023-08-31T12:58:58
| 2023-08-31T12:58:58
| 148,657,183
| 6,234
| 759
|
Apache-2.0
| 2023-09-14T10:18:50
| 2018-09-13T15:17:28
|
Python
|
UTF-8
|
Python
| false
| false
| 21,292
|
py
|
test_window_transformations.py
|
import itertools
import numpy as np
import pandas as pd
import pytest
from darts import TimeSeries
from darts.dataprocessing.pipeline import Pipeline
from darts.dataprocessing.transformers import Mapper, WindowTransformer
class TestTimeSeriesWindowTransform:
times = pd.date_range("20130101", "20130110")
series_from_values = TimeSeries.from_values(
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
)
target = TimeSeries.from_times_and_values(times, np.array(range(1, 11)))
series_multi_prob = (
(target + 10)
.stack(target + 20)
.concatenate((target + 100).stack(target + 200), axis=2)
) # 2 comps, 2 samples
series_multi_det = (
(target + 10).stack(target + 20).stack((target + 30).stack(target + 40))
) # 4 comps, 1 sample
series_univ_det = target + 50 # 1 comp, 1 sample
series_univ_prob = (target + 50).concatenate(
target + 500, axis=2
) # 1 comp, 2 samples
def test_ts_windowtransf_input_dictionary(self):
"""
Test that the forecasting window transformer dictionary input parameter is correctly formatted
"""
with pytest.raises(TypeError):
window_transformations = None # None input
self.series_univ_det.window_transform(transforms=window_transformations)
with pytest.raises(ValueError):
window_transformations = [] # empty list
self.series_univ_det.window_transform(transforms=window_transformations)
with pytest.raises(KeyError):
window_transformations = {} # empty dictionary
self.series_univ_det.window_transform(transforms=window_transformations)
with pytest.raises(ValueError):
window_transformations = [1, 2, 3] # list of not dictionaries
self.series_univ_det.window_transform(transforms=window_transformations)
with pytest.raises(KeyError):
window_transformations = {"random_fn_name": "mean"} # no 'function' key
self.series_univ_det.window_transform(transforms=window_transformations)
with pytest.raises(AttributeError):
window_transformations = {
"function": "wild_fn"
} # not valid pandas built-in function
self.series_univ_det.window_transform(transforms=window_transformations)
with pytest.raises(ValueError):
window_transformations = {
"function": 1
} # not valid pandas built-in function nore callable
self.series_univ_det.window_transform(transforms=window_transformations)
with pytest.raises(ValueError):
window_transformations = {"function": None} # None function value
self.series_univ_det.window_transform(transforms=window_transformations)
with pytest.raises(TypeError):
window_transformations = {
"function": "quantile",
"window": [3],
} # not enough mandatory arguments for quantile
self.series_univ_det.window_transform(transforms=window_transformations)
with pytest.raises(ValueError):
window_transformations = {
"function": "mean",
"mode": "rolling",
"window": -3,
} # negative window
self.series_univ_det.window_transform(transforms=window_transformations)
with pytest.raises(ValueError):
window_transformations = {
"function": "mean",
"mode": "rolling",
"window": None,
} # None window
self.series_univ_det.window_transform(transforms=window_transformations)
with pytest.raises(ValueError):
window_transformations = {
"function": "mean",
"mode": "rolling",
"window": [5],
} # window list
self.series_univ_det.window_transform(transforms=window_transformations)
with pytest.raises(ValueError):
window_transformations = {
"function": "mean",
"window": 3,
"mode": "rolling",
"step": -2,
} # Negative step
self.series_univ_det.window_transform(transforms=window_transformations)
with pytest.raises(ValueError):
window_transformations = {
"function": "mean",
"window": 3,
"mode": "rnd",
} # invalid mode
self.series_univ_det.window_transform(transforms=window_transformations)
with pytest.raises(ValueError):
window_transformations = {
"function": "mean",
"mode": "rolling",
"window": 3,
"center": "True",
} # forecating_safe=True vs center=True
self.series_univ_det.window_transform(transforms=window_transformations)
def test_ts_windowtransf_output_series(self):
# univariate deterministic input
transforms = {"function": "sum", "mode": "rolling", "window": 1}
transformed_ts = self.series_univ_det.window_transform(transforms=transforms)
assert list(itertools.chain(*transformed_ts.values().tolist())) == list(
itertools.chain(*self.series_univ_det.values().tolist())
)
assert transformed_ts.components.to_list() == [
f"{transforms['mode']}_{transforms['function']}_{str(transforms['window'])}_{comp}"
for comp in self.series_univ_det.components
]
# test customized function name that overwrites the pandas builtin transformation
transforms = {
"function": "sum",
"mode": "rolling",
"window": 1,
"function_name": "customized_name",
}
transformed_ts = self.series_univ_det.window_transform(transforms=transforms)
assert transformed_ts.components.to_list() == [
f"{transforms['mode']}_{transforms['function_name']}_{str(transforms['window'])}_{comp}"
for comp in self.series_univ_det.components
]
del transforms["function_name"]
# multivariate deterministic input
# transform one component
transforms.update({"components": "0"})
transformed_ts = self.series_multi_det.window_transform(transforms=transforms)
assert transformed_ts.components.to_list() == [
f"{transforms['mode']}_{transforms['function']}_{str(transforms['window'])}_{comp}"
for comp in transforms["components"]
]
transformed_ts = self.series_multi_det.window_transform(
transforms=transforms, keep_non_transformed=True
)
assert (
transformed_ts.components.to_list()
== [
f"{transforms['mode']}_{transforms['function']}_{str(transforms['window'])}_{comp}"
for comp in transforms["components"]
]
+ self.series_multi_det.components.to_list()
)
# transform multiple components
transforms = {
"function": "sum",
"mode": "rolling",
"window": 1,
"components": ["0", "0_1"],
}
transformed_ts = self.series_multi_det.window_transform(transforms=transforms)
assert transformed_ts.components.to_list() == [
f"{transforms['mode']}_{transforms['function']}_{str(transforms['window'])}_{comp}"
for comp in transforms["components"]
]
transformed_ts = self.series_multi_det.window_transform(
transforms=transforms, keep_non_transformed=True
)
assert (
transformed_ts.components.to_list()
== [
f"{transforms['mode']}_{transforms['function']}_{str(transforms['window'])}_{comp}"
for comp in transforms["components"]
]
+ self.series_multi_det.components.to_list()
)
# multiple transformations
transforms = [transforms] + [
{
"function": "mean",
"mode": "rolling",
"window": 1,
"components": ["0", "0_1"],
}
]
transformed_ts = self.series_multi_det.window_transform(transforms=transforms)
assert transformed_ts.components.to_list() == [
f"{transformation['mode']}_{transformation['function']}_{str(transformation['window'])}_{comp}"
for transformation in transforms
for comp in transformation["components"]
]
transformed_ts = self.series_multi_det.window_transform(
transforms=transforms, keep_non_transformed=True
)
assert (
transformed_ts.components.to_list()
== [
f"{transformation['mode']}_{transformation['function']}_{str(transformation['window'])}_{comp}"
for transformation in transforms
for comp in transformation["components"]
]
+ self.series_multi_det.components.to_list()
)
# multivariate probabilistic input
transformed_ts = self.series_multi_prob.window_transform(transforms=transforms)
assert transformed_ts.n_samples == 2
def test_user_defined_function_behavior(self):
def count_above_mean(array):
mean = np.mean(array)
return np.where(array > mean)[0].size
transformation = {
"function": count_above_mean,
"mode": "rolling",
"window": 5,
}
transformed_ts = self.target.window_transform(
transformation,
)
expected_transformed_series = TimeSeries.from_times_and_values(
self.times,
np.array([0, 1, 1, 2, 2, 2, 2, 2, 2, 2]),
columns=["rolling_udf_5_0"],
)
assert transformed_ts == expected_transformed_series
# test if a customized function name is provided
transformation.update({"function_name": "count_above_mean"})
transformed_ts = self.target.window_transform(
transformation,
)
assert transformed_ts.components.to_list() == [
f"{transformation['mode']}_{transformation['function_name']}_{str(transformation['window'])}_{comp}"
for comp in self.target.components
]
def test_ts_windowtransf_output_nabehavior(self):
window_transformations = {
"function": "sum",
"mode": "rolling",
"window": 3,
"min_periods": 2,
}
# fill na with a specific value
transformed_ts = self.target.window_transform(
window_transformations, treat_na=100
)
expected_transformed_series = TimeSeries.from_times_and_values(
self.times,
np.array([100, 3, 6, 9, 12, 15, 18, 21, 24, 27]),
columns=["rolling_sum_3_2_0"],
)
assert transformed_ts == expected_transformed_series
# dropna
transformed_ts = self.target.window_transform(
window_transformations, treat_na="dropna"
)
expected_transformed_series = TimeSeries.from_times_and_values(
self.times[1:],
np.array([3, 6, 9, 12, 15, 18, 21, 24, 27]),
columns=["rolling_sum_3_2_0"],
)
assert transformed_ts == expected_transformed_series
# backfill na
transformed_ts = self.target.window_transform(
window_transformations, treat_na="bfill", forecasting_safe=False
)
# backfill works only with forecasting_safe=False
expected_transformed_series = TimeSeries.from_times_and_values(
self.times,
np.array([3, 3, 6, 9, 12, 15, 18, 21, 24, 27]),
columns=["rolling_sum_3_2_0"],
)
assert transformed_ts == expected_transformed_series
with pytest.raises(ValueError):
# uknonwn treat_na
self.target.window_transform(
window_transformations, treat_na="fillrnd", forecasting_safe=False
)
with pytest.raises(ValueError):
# unauhtorized treat_na=bfill with forecasting_safe=True
self.target.window_transform(window_transformations, treat_na="bfill")
def test_tranformed_ts_index(self):
# DateTimeIndex
transformed_series = self.target.window_transform({"function": "sum"})
assert (
self.target._time_index.__class__
== transformed_series._time_index.__class__
)
# length index should not change for default transformation configurations
assert len(self.target._time_index) == len(transformed_series._time_index)
# RangeIndex
transformed_series = self.series_from_values.window_transform(
{"function": "sum"}
)
assert (
self.series_from_values._time_index.__class__
== transformed_series._time_index.__class__
)
assert len(self.series_from_values._time_index) == len(
transformed_series._time_index
)
def test_include_current(self):
# if "closed"="left" should not shift the index
transformation = {
"function": "sum",
"mode": "rolling",
"window": 1,
"closed": "left",
}
expected_transformed_series = TimeSeries.from_times_and_values(
self.times,
np.array(["NaN", 1, 2, 3, 4, 5, 6, 7, 8, 9]),
columns=["rolling_sum_1_0"],
)
transformed_ts = self.target.window_transform(
transformation, include_current=False
)
assert transformed_ts == expected_transformed_series
# shift the index
transformation = {"function": "sum", "mode": "rolling", "window": 1}
expected_transformed_series = TimeSeries.from_times_and_values(
self.times,
np.array(["NaN", 1, 2, 3, 4, 5, 6, 7, 8, 9]),
columns=["rolling_sum_1_0"],
)
transformed_ts = self.target.window_transform(
transformation, include_current=False
)
assert transformed_ts == expected_transformed_series
transformation = [
{"function": "sum", "mode": "rolling", "window": 1, "closed": "left"},
{"function": "sum", "mode": "ewm", "span": 1},
]
expected_transformed_series = TimeSeries.from_times_and_values(
self.times,
np.array(
[
["NaN", "NaN"],
[1, 1],
[2, 2],
[3, 3],
[4, 4],
[5, 5],
[6, 6],
[7, 7],
[8, 8],
[9, 9],
]
),
columns=["rolling_sum_1_0", "ewm_sum_0"],
)
transformed_ts = self.target.window_transform(
transformation, include_current=False
)
assert transformed_ts == expected_transformed_series
expected_transformed_series = TimeSeries.from_times_and_values(
self.times,
np.array(
[
[1, 1],
[1, 1],
[2, 2],
[3, 3],
[4, 4],
[5, 5],
[6, 6],
[7, 7],
[8, 8],
[9, 9],
]
),
columns=["rolling_sum_1_0", "ewm_sum_0"],
)
transformed_ts = self.target.window_transform(
transformation,
include_current=False,
forecasting_safe=False,
treat_na="bfill",
)
assert transformed_ts == expected_transformed_series
transformation = [
{
"function": "sum",
"mode": "rolling",
"window": 2,
"closed": "left",
"min_periods": 2,
},
{"function": "sum", "mode": "ewm", "span": 1, "min_periods": 2},
]
expected_transformed_series = TimeSeries.from_times_and_values(
self.times,
np.array(
[
["NaN", "NaN"],
["NaN", "NaN"],
[3, 2],
[5, 3],
[7, 4],
[9, 5],
[11, 6],
[13, 7],
[15, 8],
[17, 9],
]
),
columns=["rolling_sum_2_2_0", "ewm_sum_2_0"],
)
transformed_ts = self.target.window_transform(
transformation, include_current=False
)
assert transformed_ts == expected_transformed_series
class TestWindowTransformer:
times = pd.date_range("20130101", "20130110")
target = TimeSeries.from_times_and_values(times, np.array(range(1, 11)))
times_hourly = pd.date_range(start="20130101", freq="1H", periods=10)
target_hourly = TimeSeries.from_times_and_values(
times_hourly, np.array(range(1, 11))
)
series_multi_prob = (
(target + 10)
.stack(target + 20)
.concatenate((target + 100).stack(target + 200), axis=2)
) # 2 comps, 2 samples
series_multi_det = (
(target + 10).stack(target + 20).stack((target + 30).stack(target + 40))
) # 4 comps, 1 sample
series_univ_det = target + 50 # 1 comp, 1 sample
series_univ_prob = (target + 50).concatenate(
target + 500, axis=2
) # 1 comp, 2 samples
sequence_det = [series_univ_det, series_multi_det]
sequence_prob = [series_univ_prob, series_multi_prob]
def test_window_transformer_output(self):
window_transformations = {
"function": "sum",
"components": ["0"],
}
transformer = WindowTransformer(
transforms=window_transformations,
treat_na=100,
keep_non_transformed=True,
forecasting_safe=True,
)
transformed_ts_list = transformer.transform(self.sequence_det)
assert len(transformed_ts_list) == 2
assert transformed_ts_list[0].n_components == 2
assert transformed_ts_list[0].n_timesteps == self.series_multi_det.n_timesteps
assert transformed_ts_list[1].n_components == 5
assert transformed_ts_list[1].n_timesteps == self.series_multi_det.n_timesteps
def test_window_transformer_offset_parameter(self):
"""
Test that the window parameter can support offset of pandas.Timedelta
"""
base_parameters = {
"function": "mean",
"components": ["0"],
"mode": "rolling",
}
offset_parameters = base_parameters.copy()
offset_parameters.update({"window": pd.Timedelta(hours=4)})
offset_transformer = WindowTransformer(
transforms=offset_parameters,
)
offset_transformed = offset_transformer.transform(self.target_hourly)
integer_parameters = base_parameters.copy()
integer_parameters.update({"window": 4})
integer_transformer = WindowTransformer(
transforms=integer_parameters,
)
integer_transformed = integer_transformer.transform(self.target_hourly)
np.testing.assert_equal(
integer_transformed.values(), offset_transformed.values()
)
assert offset_transformed.components[0] == "rolling_mean_0 days 04:00:00_0"
assert integer_transformed.components[0] == "rolling_mean_4_0"
invalid_parameters = base_parameters.copy()
invalid_parameters.update({"window": pd.DateOffset(hours=4)})
invalid_transformer = WindowTransformer(
transforms=invalid_parameters,
)
# if pd.DateOffset, raise ValueError of non-fixed frequency
with pytest.raises(ValueError):
invalid_transformer.transform(self.target_hourly)
def test_transformers_pipeline(self):
"""
Test that the forecasting window transformer can be used in a pipeline
"""
times1 = pd.date_range("20130101", "20130110")
series_1 = TimeSeries.from_times_and_values(times1, np.array(range(1, 11)))
expected_transformed_series = TimeSeries.from_times_and_values(
times1,
np.array([100, 15, 30, 45, 60, 75, 90, 105, 120, 135]),
columns=["rolling_sum_3_2_0"],
)
# adds NaNs
window_transformations = [
{"function": "sum", "mode": "rolling", "window": 3, "min_periods": 2}
]
def times_five(x):
return x * 5
mapper = Mapper(fn=times_five)
window_transformer = WindowTransformer(
transforms=window_transformations, treat_na=100
)
pipeline = Pipeline([mapper, window_transformer])
transformed_series = pipeline.fit_transform(series_1)
assert transformed_series == expected_transformed_series
|
4fbf0bc2eead15384195cbac9dd2acbe998af068
|
fd47751e91f8bd43d6223033ce947ff720038f87
|
/tests/test_main.py
|
1f9fe85b3d777ace7880eab825cfde2074e92de6
|
[
"BSD-3-Clause"
] |
permissive
|
dbcli/litecli
|
bde48b747c4bfbbfcf203c37420c0438524b7d73
|
5975d2010278fda42aa224be5770113fc15ee28f
|
refs/heads/main
| 2023-09-01T11:53:11.932344
| 2023-08-25T14:43:02
| 2023-08-25T14:43:02
| 133,243,075
| 1,908
| 75
|
BSD-3-Clause
| 2023-08-25T14:43:03
| 2018-05-13T14:11:49
|
Python
|
UTF-8
|
Python
| false
| false
| 7,282
|
py
|
test_main.py
|
import os
from collections import namedtuple
from textwrap import dedent
from tempfile import NamedTemporaryFile
import shutil
import click
from click.testing import CliRunner
from litecli.main import cli, LiteCli
from litecli.packages.special.main import COMMANDS as SPECIAL_COMMANDS
from utils import dbtest, run
test_dir = os.path.abspath(os.path.dirname(__file__))
project_dir = os.path.dirname(test_dir)
default_config_file = os.path.join(project_dir, "tests", "liteclirc")
CLI_ARGS = ["--liteclirc", default_config_file, "_test_db"]
@dbtest
def test_execute_arg(executor):
run(executor, "create table test (a text)")
run(executor, 'insert into test values("abc")')
sql = "select * from test;"
runner = CliRunner()
result = runner.invoke(cli, args=CLI_ARGS + ["-e", sql])
assert result.exit_code == 0
assert "abc" in result.output
result = runner.invoke(cli, args=CLI_ARGS + ["--execute", sql])
assert result.exit_code == 0
assert "abc" in result.output
expected = "a\nabc\n"
assert expected in result.output
@dbtest
def test_execute_arg_with_table(executor):
run(executor, "create table test (a text)")
run(executor, 'insert into test values("abc")')
sql = "select * from test;"
runner = CliRunner()
result = runner.invoke(cli, args=CLI_ARGS + ["-e", sql] + ["--table"])
expected = "+-----+\n| a |\n+-----+\n| abc |\n+-----+\n"
assert result.exit_code == 0
assert expected in result.output
@dbtest
def test_execute_arg_with_csv(executor):
run(executor, "create table test (a text)")
run(executor, 'insert into test values("abc")')
sql = "select * from test;"
runner = CliRunner()
result = runner.invoke(cli, args=CLI_ARGS + ["-e", sql] + ["--csv"])
expected = '"a"\n"abc"\n'
assert result.exit_code == 0
assert expected in "".join(result.output)
@dbtest
def test_batch_mode(executor):
run(executor, """create table test(a text)""")
run(executor, """insert into test values('abc'), ('def'), ('ghi')""")
sql = "select count(*) from test;\n" "select * from test limit 1;"
runner = CliRunner()
result = runner.invoke(cli, args=CLI_ARGS, input=sql)
assert result.exit_code == 0
assert "count(*)\n3\na\nabc\n" in "".join(result.output)
@dbtest
def test_batch_mode_table(executor):
run(executor, """create table test(a text)""")
run(executor, """insert into test values('abc'), ('def'), ('ghi')""")
sql = "select count(*) from test;\n" "select * from test limit 1;"
runner = CliRunner()
result = runner.invoke(cli, args=CLI_ARGS + ["-t"], input=sql)
expected = dedent(
"""\
+----------+
| count(*) |
+----------+
| 3 |
+----------+
+-----+
| a |
+-----+
| abc |
+-----+"""
)
assert result.exit_code == 0
assert expected in result.output
@dbtest
def test_batch_mode_csv(executor):
run(executor, """create table test(a text, b text)""")
run(executor, """insert into test (a, b) values('abc', 'de\nf'), ('ghi', 'jkl')""")
sql = "select * from test;"
runner = CliRunner()
result = runner.invoke(cli, args=CLI_ARGS + ["--csv"], input=sql)
expected = '"a","b"\n"abc","de\nf"\n"ghi","jkl"\n'
assert result.exit_code == 0
assert expected in "".join(result.output)
def test_help_strings_end_with_periods():
"""Make sure click options have help text that end with a period."""
for param in cli.params:
if isinstance(param, click.core.Option):
assert hasattr(param, "help")
assert param.help.endswith(".")
def output(monkeypatch, terminal_size, testdata, explicit_pager, expect_pager):
global clickoutput
clickoutput = ""
m = LiteCli(liteclirc=default_config_file)
class TestOutput:
def get_size(self):
size = namedtuple("Size", "rows columns")
size.columns, size.rows = terminal_size
return size
class TestExecute:
host = "test"
user = "test"
dbname = "test"
port = 0
def server_type(self):
return ["test"]
class PromptBuffer:
output = TestOutput()
m.prompt_app = PromptBuffer()
m.sqlexecute = TestExecute()
m.explicit_pager = explicit_pager
def echo_via_pager(s):
assert expect_pager
global clickoutput
clickoutput += s
def secho(s):
assert not expect_pager
global clickoutput
clickoutput += s + "\n"
monkeypatch.setattr(click, "echo_via_pager", echo_via_pager)
monkeypatch.setattr(click, "secho", secho)
m.output(testdata)
if clickoutput.endswith("\n"):
clickoutput = clickoutput[:-1]
assert clickoutput == "\n".join(testdata)
def test_conditional_pager(monkeypatch):
testdata = "Lorem ipsum dolor sit amet consectetur adipiscing elit sed do".split(
" "
)
# User didn't set pager, output doesn't fit screen -> pager
output(
monkeypatch,
terminal_size=(5, 10),
testdata=testdata,
explicit_pager=False,
expect_pager=True,
)
# User didn't set pager, output fits screen -> no pager
output(
monkeypatch,
terminal_size=(20, 20),
testdata=testdata,
explicit_pager=False,
expect_pager=False,
)
# User manually configured pager, output doesn't fit screen -> pager
output(
monkeypatch,
terminal_size=(5, 10),
testdata=testdata,
explicit_pager=True,
expect_pager=True,
)
# User manually configured pager, output fit screen -> pager
output(
monkeypatch,
terminal_size=(20, 20),
testdata=testdata,
explicit_pager=True,
expect_pager=True,
)
SPECIAL_COMMANDS["nopager"].handler()
output(
monkeypatch,
terminal_size=(5, 10),
testdata=testdata,
explicit_pager=False,
expect_pager=False,
)
SPECIAL_COMMANDS["pager"].handler("")
def test_reserved_space_is_integer():
"""Make sure that reserved space is returned as an integer."""
def stub_terminal_size():
return (5, 5)
old_func = shutil.get_terminal_size
shutil.get_terminal_size = stub_terminal_size
lc = LiteCli()
assert isinstance(lc.get_reserved_space(), int)
shutil.get_terminal_size = old_func
@dbtest
def test_import_command(executor):
data_file = os.path.join(project_dir, "tests", "data", "import_data.csv")
run(executor, """create table tbl1(one varchar(10), two smallint)""")
# execute
run(executor, """.import %s tbl1""" % data_file)
# verify
sql = "select * from tbl1;"
runner = CliRunner()
result = runner.invoke(cli, args=CLI_ARGS + ["--csv"], input=sql)
expected = """one","two"
"t1","11"
"t2","22"
"""
assert result.exit_code == 0
assert expected in "".join(result.output)
def test_startup_commands(executor):
m = LiteCli(liteclirc=default_config_file)
assert m.startup_commands['commands'] == ['create table startupcommands(a text)', "insert into startupcommands values('abc')"]
# implement tests on executions of the startupcommands
|
c870ce951a49eed006d8f921cf04b3db25ac4261
|
8a2474f61a49b0e24812456b34f59948b756a94e
|
/autotest/test_gwf_pertim.py
|
d3adcccc35892783dcec8f7824615c2f1f5cff16
|
[
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
MODFLOW-USGS/modflow6
|
6e913abcab5c23686ed05b1cceac85f90282955d
|
43f6198125867c487eedc64b17e9adaceb73f5ab
|
refs/heads/master
| 2023-09-01T20:51:30.970467
| 2023-06-28T20:17:59
| 2023-06-28T20:17:59
| 116,149,490
| 158
| 111
|
NOASSERTION
| 2023-09-14T17:02:58
| 2018-01-03T15:00:55
|
Fortran
|
UTF-8
|
Python
| false
| false
| 3,157
|
py
|
test_gwf_pertim.py
|
import os
import flopy
import numpy as np
import pytest
from framework import TestFramework
from simulation import TestSimulation
ex = [
"gwf_pertim",
]
# static model data
# temporal discretization
nper = 1
perlen = [0.0]
nstp = [1]
tsmult = [1.0]
tdis_rc = []
for idx in range(nper):
tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
# spatial discretization data
nlay, nrow, ncol = 3, 21, 20
shape3d = (nlay, nrow, ncol)
size3d = nlay * nrow * ncol
delr, delc = 500.0, 500.0
top = 330.0
botm = [220.0, 200.0, 0.0]
strt = 330.0
# calculate hk
hk = [50.0, 0.01, 200.0]
k33 = [10.0, 0.01, 20.0]
# chd data
canal_spd = [(0, i, 0, 330.0, "canal") for i in range(nrow)]
river_spd = [(0, i, ncol - 1, 320.0, "river") for i in range(nrow)]
def build_model(idx, dir):
name = ex[idx]
# build MODFLOW 6 files
ws = dir
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
ims = flopy.mf6.ModflowIms(
sim,
print_option="ALL",
complexity="simple",
)
tdis = flopy.mf6.ModflowTdis(
sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
)
gwf = flopy.mf6.ModflowGwf(
sim,
modelname=name,
)
# create iterative model solution and register the gwf model with it
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
)
# initial conditions
ic = flopy.mf6.ModflowGwfic(
gwf,
strt=strt,
)
# node property flow
npf = flopy.mf6.ModflowGwfnpf(
gwf,
save_flows=False,
k=hk,
k33=k33,
)
# chd files
chd_canal = flopy.mf6.ModflowGwfchd(
gwf,
boundnames=True,
stress_period_data=canal_spd,
pname="CHD-CANAL",
filename=f"{name}_canal.chd",
)
chd_river = flopy.mf6.ModflowGwfchd(
gwf,
boundnames=True,
stress_period_data=river_spd,
pname="CHD-RIVER",
filename=f"{name}_river.chd",
)
# output control
oc = flopy.mf6.ModflowGwfoc(
gwf,
printrecord=[
(("BUDGET", "ALL")),
],
)
return sim, None
def eval_model(sim):
print("evaluating results...")
fpth = os.path.join(sim.simpath, f"{sim.name}.lst")
mflist = flopy.utils.Mf6ListBudget(fpth)
inc = mflist.get_incremental()
q_in = 99928.4941
q_out = 99928.5036
q_in_sim = inc["CHD_IN"]
q_out_sim = inc["CHD2_OUT"]
assert np.allclose([q_in_sim], [q_in]), f"CHD_IN <> {q_in} ({q_in_sim})"
assert np.allclose(
[q_out_sim], [q_out]
), f"CHD2_OUT <> {q_out} ({q_out_sim})"
@pytest.mark.parametrize(
"idx, name",
list(enumerate(ex)),
)
def test_mf6model(idx, name, function_tmpdir, targets):
ws = str(function_tmpdir)
test = TestFramework()
test.build(build_model, idx, ws)
test.run(
TestSimulation(
name=name,
exe_dict=targets,
exfunc=eval_model,
idxsim=idx,
),
ws,
)
|
4bc9aa575b7c29fb6519d64ac39bb49d2c85d593
|
8294fbe3389bebe56c42f958c87e0482c412887d
|
/February/FenrisWolf123.py
|
056ad705dbacba5af87066ce39bf8d64648ea8e3
|
[] |
no_license
|
py-study-group/challenges
|
8717376d1f44c4846d05052e8446b647f84c26f6
|
9129f48fb7c61fb5610c16e8da427149f0a9ed0a
|
refs/heads/master
| 2021-01-20T08:37:14.224261
| 2018-02-28T16:27:52
| 2018-02-28T16:27:52
| 90,166,445
| 133
| 70
| null | 2018-02-28T10:22:04
| 2017-05-03T15:48:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,259
|
py
|
FenrisWolf123.py
|
import argparse
def caesar_cipher(mode, message, key):
"""
function to encode/decode caesar cipher
if decode, negate the key
"""
if key>26:
key = key % 26
if mode == 'd':
key = -key
output = ''
for i in message:
if i.isalpha():
num = ord(i)
num+=key
if i.isupper():
if num > ord('Z'):
num = num - 26
elif num < ord('A'):
num = num + 26
elif i.islower():
if num > ord('z'):
num = num - 26
elif num < ord('a'):
num = num + 26
output += chr(num)
else:
output += i
return output
def main():
parser = argparse.ArgumentParser()
parser.add_argument('action', help='encode/decode')
parser.add_argument('shift', help='the amount of shift to be applied')
parser.add_argument('-f','--file', help='file to be opened')
parser.add_argument('-t','--text', help='text to be encoded/decoded')
args = parser.parse_args()
if args.action == 'encode':
if args.file:
try:
with open(args.file,'r') as file_obj:
print(caesar_cipher('e', file_obj.read(), int(args.shift)))
except FileNotFoundError:
print('File not found.')
elif args.text:
print(caesar_cipher('e', args.text, int(args.shift)))
else:
text = input('Enter the string to be encoded: ')
print(caesar_cipher('e', text, int(args.shift)))
elif args.action == 'decode':
if args.file:
try:
with open(args.file,'r') as file_obj:
print(caesar_cipher('d', file_obj.read(), int(args.shift)))
except FileNotFoundError:
print('File not found.')
elif args.text:
print(caesar_cipher('d', args.text, int(args.shift)))
else:
text = input('Enter the string to be decoded: ')
print(caesar_cipher('d', text, int(args.shift)))
if __name__ == '__main__':
main()
|
ab860df41c907b10dbeb95dfbdb7a19dde286299
|
f4095ef092092399102bb21d1198e324f10f53ed
|
/pontoon/actionlog/models.py
|
458a52d34fd1eb675f6b6177635b59a7b34cd392
|
[
"BSD-3-Clause"
] |
permissive
|
mozilla/pontoon
|
2c53227570099ca666467d4e3d78e929bf456c9c
|
0c4f74e15b1e442a9cee9b1cd636214b24f5352b
|
refs/heads/master
| 2023-09-06T04:15:41.009180
| 2023-09-01T14:23:51
| 2023-09-01T14:23:51
| 1,385,890
| 1,367
| 713
|
BSD-3-Clause
| 2023-09-13T18:04:41
| 2011-02-19T11:25:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,360
|
py
|
models.py
|
from django.core.exceptions import ValidationError
from django.db import models
class ActionLog(models.Model):
class ActionType(models.TextChoices):
# A translation has been created.
TRANSLATION_CREATED = "translation:created", "Translation created"
# A translation has been deleted.
TRANSLATION_DELETED = "translation:deleted", "Translation deleted"
# A translation has been approved.
TRANSLATION_APPROVED = "translation:approved", "Translation approved"
# A translation has been unapproved.
TRANSLATION_UNAPPROVED = "translation:unapproved", "Translation unapproved"
# A translation has been rejected.
TRANSLATION_REJECTED = "translation:rejected", "Translation rejected"
# A translation has been unrejected.
TRANSLATION_UNREJECTED = "translation:unrejected", "Translation unrejected"
# A comment has been added.
COMMENT_ADDED = "comment:added", "Comment added"
action_type = models.CharField(max_length=50, choices=ActionType.choices)
created_at = models.DateTimeField(auto_now_add=True)
performed_by = models.ForeignKey(
"auth.User", models.SET_NULL, related_name="actions", null=True
)
# Used to track on what translation related actions apply.
translation = models.ForeignKey(
"base.Translation",
models.CASCADE,
blank=True,
null=True,
)
# Used when a translation has been deleted or a team comment has been added.
entity = models.ForeignKey(
"base.Entity",
models.CASCADE,
blank=True,
null=True,
)
locale = models.ForeignKey(
"base.Locale",
models.CASCADE,
blank=True,
null=True,
)
def validate_action_type_choice(self):
valid_types = self.ActionType.values
if self.action_type not in valid_types:
raise ValidationError(
'Action type "{}" is not one of the permitted values: {}'.format(
self.action_type, ", ".join(valid_types)
)
)
def validate_foreign_keys_per_action(self):
if self.action_type == self.ActionType.TRANSLATION_DELETED and (
self.translation or not self.entity or not self.locale
):
raise ValidationError(
f'For action type "{self.action_type}", `entity` and `locale` are required'
)
if self.action_type == self.ActionType.COMMENT_ADDED and not (
(self.translation and not self.locale and not self.entity)
or (not self.translation and self.locale and self.entity)
):
raise ValidationError(
f'For action type "{self.action_type}", either `translation` or `entity` and `locale` are required'
)
if (
self.action_type != self.ActionType.TRANSLATION_DELETED
and self.action_type != self.ActionType.COMMENT_ADDED
) and (not self.translation or self.entity or self.locale):
raise ValidationError(
f'For action type "{self.action_type}", only `translation` is accepted'
)
def save(self, *args, **kwargs):
self.validate_action_type_choice()
self.validate_foreign_keys_per_action()
super().save(*args, **kwargs)
|
278b944cd47758711f8ab2195252efb32a944650
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Numerical_Methods_Lecture_Notes_Physics_801_Creighton/decayerr.py
|
a9520c8dab6034eec4a96d31511d8ca471677531
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
decayerr.py
|
import math, pylab
nuclei0 = input('initial number of nuclei -> ')
tau = input('decay time constant -> ')
dtlow = input('lowest resolution time step -> ')
nres = input('number of resolution refinements -> ')
tmax = input('time to end of simulation -> ')
for n in range(nres):
refine = 10**n
dt = dtlow/refine
nsteps = int(tmax/dt)
nuclei = nuclei0
err = [0.0]*nsteps
t = [0.0]*nsteps
# use Euler's method to integrate equation for radioactive decay compute
# error relative to exact solution
for i in range(nsteps-1):
t[i+1] = t[i]+dt
nuclei = nuclei-nuclei/tau*dt
exact = nuclei0*math.exp(-t[i+1]/tau)
err[i+1] = abs((nuclei-exact)/exact)
# plot the error at this resolution
pylab.loglog(t[refine::refine], err[refine::refine], '.-', label='dt = '
+str(dt))
pylab.legend(loc=4)
pylab.xlabel('time')
pylab.ylabel('fractional error')
pylab.title('radioactive decay integration error')
pylab.grid(linestyle='-', which='major')
pylab.grid(which='minor')
pylab.show()
|
577b676012112b0afb4ed2ee44361ea3a07ae1f8
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Geometry/ForwardGeometry/python/ZdcGeometryDBWriter_cfi.py
|
1e9b00c3c91592820d14bb76b2d584fcb95eb0ca
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 475
|
py
|
ZdcGeometryDBWriter_cfi.py
|
import FWCore.ParameterSet.Config as cms
ZdcHardcodeGeometryEP = cms.ESProducer( "ZdcHardcodeGeometryEP",
appendToDataLabel = cms.string("_master")
)
ZdcGeometryToDBEP = cms.ESProducer( "ZdcGeometryToDBEP" ,
applyAlignment = cms.bool(False) ,
appendToDataLabel = cms.string("_toDB")
)
|
6da29f47f7cf52883ed9073ed3ae1321ad6dba85
|
5b6ba0f288b1e2ac236af846a9bf546a63228476
|
/mmtbx/scaling/tst_scaling.py
|
190138e4dcaa9c793602eeb004d367ec2987c3ae
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
cctbx/cctbx_project
|
5b547b416cadbdf95cca21dace9f54272a08d98a
|
7f4dfb6c873fd560920f697cbfd8a5ff6eed82fa
|
refs/heads/master
| 2023-08-17T17:44:05.077010
| 2023-08-16T22:40:22
| 2023-08-16T22:40:22
| 39,508,026
| 206
| 131
|
NOASSERTION
| 2023-09-14T17:12:55
| 2015-07-22T13:36:27
|
Python
|
UTF-8
|
Python
| false
| false
| 39,183
|
py
|
tst_scaling.py
|
from __future__ import absolute_import, division, print_function
## Peter Zwart July 5, 2005
from pytest import raises
from cctbx.array_family import flex
from cctbx import crystal
from cctbx import miller
from cctbx import xray
from cctbx import sgtbx
from cctbx import uctbx
from mmtbx import scaling
from libtbx.test_utils import approx_equal
from mmtbx.scaling import absolute_scaling
from mmtbx.scaling import twin_analyses as t_a
import mmtbx.scaling
from scitbx.python_utils import random_transform
import random
import math
import time
from libtbx.str_utils import StringIO
from libtbx.utils import format_cpu_times
from six.moves import zip
from six.moves import range
random.seed(0)
flex.set_random_seed(0)
import scitbx.math as sm
##testing quick erf and quick eio
def test_luts():
qerf = mmtbx.scaling.very_quick_erf(0.001)
qeio = mmtbx.scaling.quick_ei0(5000)
for i in range(-1000,1000):
x=i/100.0
assert approx_equal( qerf.erf(x), sm.erf(x), eps=1e-5 )
if (x>=0):
assert approx_equal( qeio.ei0(x), math.exp(-x)*sm.bessel_i0(x) , eps=1e-5 )
number_of_iterations = 15000000
for optimized in [False, True]:
t0 = time.time()
zero = qerf.loop_for_timings(number_of_iterations, optimized=optimized)
print("very_quick_erf*%d optimized=%s: %.2f s" % (
number_of_iterations, str(optimized), time.time()-t0))
assert approx_equal(zero, 0)
number_of_iterations = 5000000
for optimized in [False, True]:
t0 = time.time()
zero = qeio.loop_for_timings(number_of_iterations, optimized=optimized)
print("quick_ei0*%d optimized=%s: %.2f s" % (
number_of_iterations, str(optimized), time.time()-t0))
assert approx_equal(zero, 0)
## Testing Wilson parameters
def test_gamma_prot():
gamma_prot_test = scaling.gamma_prot(0.011478)
assert approx_equal(gamma_prot_test,-0.349085)
gamma_prot_test = scaling.gamma_prot(0.028868)
assert approx_equal(gamma_prot_test,-0.585563)
d_star_sq = flex.double([0.011478,0.028868,1.0,0.0])
gamma_array_test = scaling.get_gamma_prot(d_star_sq)
assert approx_equal(gamma_array_test[0],-0.349085)
assert approx_equal(gamma_array_test[1],-0.585563)
assert approx_equal(gamma_array_test[2], 0.0)
assert approx_equal(gamma_array_test[3], 0.0)
def test_sigma_prot():
z_0 = scaling.sigma_prot_sq(0.0,1.0)
z_0_theory = + 8.0*1.0*1.0 \
+ 5.0*6.0*6.0 \
+ 1.5*7.0*7.0 \
+ 1.2*8.0*8.0
assert approx_equal(z_0,z_0_theory,eps=1e-0)
d_star_sq = flex.double([0.0])
z_0_array = scaling.get_sigma_prot_sq(d_star_sq,1.0)
assert approx_equal(z_0_array[0],z_0)
## Testing isotropic wilson scaling
def finite_diffs_iso(p_scale=0.0,p_B_wilson=0.0,centric=False,h=0.0001):
d_star_sq = flex.double(10,0.25)
f_obs = flex.double(10,1.0)
centric_array = flex.bool(10,centric)
sigma_f_obs = f_obs/10.0
sigma_sq = flex.double(10,1.0)
epsilon = flex.double(10,1.0)
gamma =flex.double(10,0.0)
stmp1 = scaling.wilson_total_nll(d_star_sq = d_star_sq,
f_obs = f_obs,
sigma_f_obs = sigma_f_obs,
epsilon = epsilon,
sigma_sq = sigma_sq,
gamma_prot = gamma,
centric = centric_array,
p_scale = p_scale-h,
p_B_wilson = p_B_wilson )
stmp2 = scaling.wilson_total_nll(d_star_sq = d_star_sq,
f_obs = f_obs,
sigma_f_obs = sigma_f_obs,
epsilon = epsilon,
sigma_sq = sigma_sq,
gamma_prot = gamma,
centric = centric_array,
p_scale = p_scale+h,
p_B_wilson = p_B_wilson)
s_grad_diff = (stmp1-stmp2)/(-2.0*h)
btmp1 = scaling.wilson_total_nll(d_star_sq = d_star_sq,
f_obs = f_obs,
sigma_f_obs = sigma_f_obs,
epsilon = epsilon,
sigma_sq = sigma_sq,
gamma_prot = gamma,
centric = centric_array,
p_scale = p_scale,
p_B_wilson = p_B_wilson-h)
btmp2 = scaling.wilson_total_nll(d_star_sq = d_star_sq,
f_obs = f_obs,
sigma_f_obs = sigma_f_obs,
epsilon = epsilon,
sigma_sq = sigma_sq,
gamma_prot = gamma,
centric = centric_array,
p_scale = p_scale,
p_B_wilson = p_B_wilson+h)
b_grad_diff = (btmp1-btmp2)/(-2.0*h)
grad = scaling.wilson_total_nll_gradient(d_star_sq = d_star_sq,
f_obs = f_obs,
sigma_f_obs = sigma_f_obs,
epsilon = epsilon,
sigma_sq = sigma_sq,
gamma_prot = gamma,
centric = centric_array,
p_scale = p_scale,
p_B_wilson = p_B_wilson)
assert approx_equal(s_grad_diff, grad[0])
assert approx_equal(b_grad_diff, grad[1])
def test_likelihood_iso():
d_star_sq = flex.double(10,0.250)
f_obs = flex.double(10,1.0)
sigma_f_obs = flex.double(10,0.0000)
sigma_sq = flex.double(10,1.0)
epsilon = flex.double(10,1.0)
gamma = flex.double(10,0.0)
centric = flex.bool(10,True)
acentric = flex.bool(10,False)
p_scale = 0.0
p_B_wilson = 0.0
centric_single_trans = scaling.wilson_single_nll(
d_star_sq = d_star_sq[0],
f_obs = f_obs[0],
sigma_f_obs = sigma_f_obs[0],
epsilon = epsilon[0],
sigma_sq = sigma_sq[0],
gamma_prot = gamma[0],
centric = centric[0],
p_scale = p_scale,
p_B_wilson = p_B_wilson,
transform = True)
centric_single_no_trans = scaling.wilson_single_nll(
d_star_sq = d_star_sq[0],
f_obs = f_obs[0],
sigma_f_obs = sigma_f_obs[0],
epsilon = epsilon[0],
sigma_sq = sigma_sq[0],
gamma_prot = gamma[0],
centric = centric[0],
p_scale = 1.0,
p_B_wilson = p_B_wilson,
transform = False)
assert approx_equal(centric_single_trans, 1.072364 ) ## from Mathematica
assert approx_equal(centric_single_trans, centric_single_no_trans)
acentric_single_trans = scaling.wilson_single_nll(
d_star_sq = d_star_sq[0],
f_obs = f_obs[0],
sigma_f_obs = sigma_f_obs[0],
epsilon = epsilon[0],
sigma_sq = sigma_sq[0],
gamma_prot = gamma[0],
centric = acentric[0],
p_scale = p_scale,
p_B_wilson = p_B_wilson)
acentric_single_no_trans = scaling.wilson_single_nll(
d_star_sq = d_star_sq[0],
f_obs = f_obs[0],
sigma_f_obs = sigma_f_obs[0],
epsilon = epsilon[0],
sigma_sq = sigma_sq[0],
gamma_prot = gamma[0],
centric = acentric[0],
p_scale = 1.0,
p_B_wilson =p_B_wilson,
transform = False)
assert approx_equal(acentric_single_trans, 0.306853) ## from Mathematica
assert approx_equal(acentric_single_trans, acentric_single_no_trans)
centric_total = scaling.wilson_total_nll(
d_star_sq = d_star_sq,
f_obs = f_obs,
sigma_f_obs = sigma_f_obs,
epsilon = epsilon,
sigma_sq = sigma_sq,
gamma_prot = gamma,
centric = centric,
p_scale = p_scale,
p_B_wilson = p_B_wilson)
acentric_total = scaling.wilson_total_nll(
d_star_sq = d_star_sq,
f_obs = f_obs,
sigma_f_obs = sigma_f_obs,
epsilon = epsilon,
sigma_sq = sigma_sq,
gamma_prot = gamma,
centric = acentric,
p_scale = p_scale,
p_B_wilson = p_B_wilson)
assert approx_equal(centric_total, centric_single_trans*10.0)
assert approx_equal(acentric_total, acentric_single_trans*10.0)
def test_gradients_iso():
## Centrics
finite_diffs_iso(p_scale=3.0,
p_B_wilson=10.0,
centric=True,h=0.000001)
finite_diffs_iso(p_scale=-3.0,
p_B_wilson=-10.0,
centric=True,h=0.000001)
finite_diffs_iso(p_scale=90.0,
p_B_wilson=-10.0,
centric=True,h=0.000001)
finite_diffs_iso(p_scale=-90.0,
p_B_wilson=10.0,
centric=True,h=0.000001)
## Acentrics
finite_diffs_iso(p_scale=3.0,
p_B_wilson=10.0,
centric=False,h=0.000001)
finite_diffs_iso(p_scale=-3.0,
p_B_wilson=-10.0,
centric=False,h=0.000001)
finite_diffs_iso(p_scale=90.0,
p_B_wilson=-10.0,
centric=True,h=0.000001)
finite_diffs_iso(p_scale=-90.0,
p_B_wilson=10.0,
centric=True,h=0.000001)
## Testing anisotropic wilson scaling
def finite_diffs_aniso(p_scale,
u_star,
centric=False,
h=0.0001):
d_star_sq = flex.double(2,0.25)
f_obs = flex.double(2,1.0)
centric_array = flex.bool(2,centric)
sigma_f_obs = f_obs/10.0
sigma_sq = flex.double(2,1.0)
epsilon = flex.double(2,1.0)
gamma =flex.double(2,0.0)
unit_cell = uctbx.unit_cell('20, 30, 40, 90.0, 90.0, 90.0')
mi = flex.miller_index(((1,2,3), (1,2,3)))
xs = crystal.symmetry((20,30,40), "P 2 2 2")
ms = miller.set(xs, mi)
nll_norm = scaling.wilson_single_nll_aniso(ms.indices()[0],
f_obs[0],
sigma_f_obs[0],
epsilon[0],
sigma_sq[0],
gamma[0],
centric_array[0],
p_scale,
unit_cell,
u_star)
nll_scale = scaling.wilson_single_nll_aniso(ms.indices()[0],
f_obs[0],
sigma_f_obs[0],
epsilon[0],
sigma_sq[0],
gamma[0],
centric_array[0],
p_scale+h,
unit_cell,
u_star)
u_star[0]+=h
nll_u11 = scaling.wilson_single_nll_aniso(ms.indices()[0],
f_obs[0],
sigma_f_obs[0],
epsilon[0],
sigma_sq[0],
gamma[0],
centric_array[0],
p_scale,
unit_cell,
u_star)
u_star[0]-=h
u_star[1]+=h
nll_u22 = scaling.wilson_single_nll_aniso(ms.indices()[0],
f_obs[0],
sigma_f_obs[0],
epsilon[0],
sigma_sq[0],
gamma[0],
centric_array[0],
p_scale,
unit_cell,
u_star)
u_star[1]-=h
u_star[2]+=h
nll_u33 = scaling.wilson_single_nll_aniso(ms.indices()[0],
f_obs[0],
sigma_f_obs[0],
epsilon[0],
sigma_sq[0],
gamma[0],
centric_array[0],
p_scale,
unit_cell,
u_star)
u_star[2]-=h
u_star[3]+=h
nll_u12 = scaling.wilson_single_nll_aniso(ms.indices()[0],
f_obs[0],
sigma_f_obs[0],
epsilon[0],
sigma_sq[0],
gamma[0],
centric_array[0],
p_scale,
unit_cell,
u_star)
u_star[3]-=h
u_star[4]+=h
nll_u13 = scaling.wilson_single_nll_aniso(ms.indices()[0],
f_obs[0],
sigma_f_obs[0],
epsilon[0],
sigma_sq[0],
gamma[0],
centric_array[0],
p_scale,
unit_cell,
u_star)
u_star[4]-=h
u_star[5]+=h
nll_u23 = scaling.wilson_single_nll_aniso(ms.indices()[0],
f_obs[0],
sigma_f_obs[0],
epsilon[0],
sigma_sq[0],
gamma[0],
centric_array[0],
p_scale,
unit_cell,
u_star)
g = scaling.wilson_single_nll_aniso_gradient(ms.indices()[0],
f_obs[0],
sigma_f_obs[0],
epsilon[0],
sigma_sq[0],
gamma[0],
centric_array[0],
p_scale,
unit_cell,
u_star)
g2 = scaling.wilson_total_nll_aniso_gradient(ms.indices(),
f_obs,
sigma_f_obs,
epsilon,
sigma_sq,
gamma,
centric_array,
p_scale,
unit_cell,
u_star)
ds=(nll_norm-nll_scale)/-h
du11=(nll_norm-nll_u11)/-h
du22=(nll_norm-nll_u22)/-h
du33=(nll_norm-nll_u33)/-h
du12=(nll_norm-nll_u12)/-h
du13=(nll_norm-nll_u13)/-h
du23=(nll_norm-nll_u23)/-h
assert approx_equal(ds,g[0]), (ds,g[0])
assert approx_equal(du11,g[1]), (du11,g[1])
assert approx_equal(du22,g[2])
assert approx_equal(du33,g[3])
assert approx_equal(du12,g[4])
assert approx_equal(du13,g[5])
assert approx_equal(du23,g[6])
assert approx_equal(ds,g2[0]/2.0)
assert approx_equal(du11,g2[1]/2.0)
assert approx_equal(du22,g2[2]/2.0)
assert approx_equal(du33,g2[3]/2.0)
assert approx_equal(du12,g2[4]/2.0)
assert approx_equal(du13,g2[5]/2.0)
assert approx_equal(du23,g2[6]/2.0)
def test_likelihood_aniso():
u_star = [0,0,0,0,0,0]
d_star_sq = flex.double(2,0.25)
f_obs = flex.double(2,1.0)
centric_array = flex.bool(2,True)
sigma_f_obs = f_obs/10.0
sigma_sq = flex.double(2,1.0)
epsilon = flex.double(2,1.0)
gamma =flex.double(2,0.0)
unit_cell = uctbx.unit_cell('20, 30, 40, 90.0, 90.0, 90.0')
mi = flex.miller_index(((1,2,3), (1,2,3)))
xs = crystal.symmetry((20,30,40), "P 2 2 2")
ms = miller.set(xs, mi)
nll_centric_aniso = scaling.wilson_single_nll_aniso(ms.indices()[0],
f_obs[0],
sigma_f_obs[0],
epsilon[0],
sigma_sq[0],
gamma[0],
centric_array[0],
0.0,
unit_cell,
u_star)
assert approx_equal(nll_centric_aniso, 1.07239 ) ## from Mathematica
nll_acentric_aniso = scaling.wilson_single_nll_aniso(ms.indices()[0],
f_obs[0],
sigma_f_obs[0],
epsilon[0],
sigma_sq[0],
gamma[0],
centric_array[0],
0.0,
unit_cell,
u_star)
centric_array = flex.bool(2,False)
nll_acentric_aniso = scaling.wilson_single_nll_aniso(ms.indices()[0],
f_obs[0],
sigma_f_obs[0],
epsilon[0],
sigma_sq[0],
gamma[0],
centric_array[0],
0.0,
unit_cell,
u_star)
assert approx_equal(nll_acentric_aniso,0.306902 ) ## from Mathematica
centric_array = flex.bool(2,True)
u_star = [1,1,1,0,0,0]
nll_centric_aniso = scaling.wilson_single_nll_aniso(ms.indices()[0],
f_obs[0],
sigma_f_obs[0],
epsilon[0],
sigma_sq[0],
gamma[0],
centric_array[0],
0.0,
unit_cell,
u_star)
assert approx_equal(nll_centric_aniso, 1.535008 ) ## from Mathematica
centric_array = flex.bool(2,False)
nll_acentric_aniso = scaling.wilson_single_nll_aniso(ms.indices()[0],
f_obs[0],
sigma_f_obs[0],
epsilon[0],
sigma_sq[0],
gamma[0],
centric_array[0],
0.0,
unit_cell,
u_star)
assert approx_equal(nll_acentric_aniso, 0.900003 ) ## from Mathematica
centric_array[1]=True
nll_total_aniso = scaling.wilson_total_nll_aniso(ms.indices(),
f_obs,
sigma_f_obs,
epsilon,
sigma_sq,
gamma,
centric_array,
0.0,
unit_cell,
u_star)
assert approx_equal(nll_total_aniso, 2.435011)
def test_grads_aniso():
finite_diffs_aniso(0.0,[0.0,0.0,0.0,0.0,0.0,0.0],True, 0.0000001)
finite_diffs_aniso(0.0,[0.0,0.0,0.0,2.0,0.0,0.0],False, 0.0000001)
finite_diffs_aniso(0.0,[1.0,2.0,3.0,4.0,5.0,6.0],True, 0.0000001)
finite_diffs_aniso(0.0,[1.0,2.0,3.0,4.0,5.0,6.0],False, 0.0000001)
finite_diffs_aniso(-10.0,[1.0,2.0,3.0,4.0,5.0,6.0],True, 0.0000001)
finite_diffs_aniso(-10.0,[1.0,2.0,3.0,4.0,5.0,6.0],False, 0.0000001)
finite_diffs_aniso(10.0,[1.0,2.0,3.0,4.0,5.0,6.0],True, 0.0000001)
finite_diffs_aniso(10.0,[1.0,2.0,3.0,4.0,5.0,6.0],False, 0.0000001)
finite_diffs_aniso(10.0,[10.0,20.0,30.0,40.0,50.0,60.0],True, 0.0000001)
finite_diffs_aniso(10.0,[10.0,20.0,30.0,40.0,50.0,60.0],False, 0.0000001)
## Testing relative scaling summats
class scaling_tester(object):
def __init__(self):
self.data_obs1 = flex.double(2,1.0)
self.data_obs2 = flex.double(2,3.0)
self.sigma_obs1 = flex.double(2,0.1)
self.sigma_obs2 = flex.double(2,1)
self.unit_cell = uctbx.unit_cell('20, 30, 40, 90.0, 90.0, 90.0')
#mi = flex.miller_index(((1,2,3), (1,2,3)))
self.mi = flex.miller_index(((1,2,3), (5,6,7)))
self.xs = crystal.symmetry((20,30,40), "P 2 2 2")
self.ms = miller.set(self.xs, self.mi)
self.u = [1,2,3,4,5,6]
self.p_scale = 0.40
#self.u = [0,0,0,0,0,0]
#self.p_scale = 0.00
self.ls_i_wt = scaling.least_squares_on_i_wt(
self.mi,
self.data_obs1,
self.sigma_obs1,
self.data_obs2,
self.sigma_obs2,
self.p_scale,
self.unit_cell,
self.u)
self.ls_i = scaling.least_squares_on_i(
self.mi,
self.data_obs1,
self.sigma_obs1,
self.data_obs2,
self.sigma_obs2,
self.p_scale,
self.unit_cell,
self.u)
self.ls_f_wt = scaling.least_squares_on_f_wt(
self.mi,
self.data_obs1,
self.sigma_obs1,
self.data_obs2,
self.sigma_obs2,
self.p_scale,
self.unit_cell,
self.u)
self.ls_f = scaling.least_squares_on_f(
self.mi,
self.data_obs1,
self.sigma_obs1,
self.data_obs2,
self.sigma_obs2,
self.p_scale,
self.unit_cell,
self.u)
self.tst_ls_f_wt()
self.tst_ls_i_wt()
self.tst_ls_f()
self.tst_ls_i()
self.tst_hes_ls_i_wt()
self.tst_hes_ls_f_wt()
self.tst_hes_ls_i()
self.tst_hes_ls_f()
def tst_ls_i_wt(self, h=0.0000001):
## This function tests the gradients
tmp = self.ls_i_wt.get_function()
before = flex.double(7,tmp)
after = flex.double(7,0)
## Test the pscale
self.ls_i_wt.set_p_scale(self.p_scale+h)
tmp = self.ls_i_wt.get_function()
after[0]=tmp
self.ls_i_wt.set_p_scale(self.p_scale)
for ii in range(6):
u_tmp=list(flex.double(self.u).deep_copy())
u_tmp[ii]+=h
self.ls_i_wt.set_u_rwgk(u_tmp)
tmp = self.ls_i_wt.get_function()
after[ii+1]=tmp
self.ls_i_wt.set_u_rwgk(self.u)
grads=self.ls_i_wt.get_gradient()
f = max(1, flex.max(flex.abs(grads)))
assert approx_equal(grads/f, (after-before)/h/f)
def tst_ls_f_wt(self, h=0.0000001):
## This function tests the gradients
tmp = self.ls_f_wt.get_function()
before = flex.double(7,tmp)
after = flex.double(7,0)
## Test the pscale
self.ls_f_wt.set_p_scale(self.p_scale+h)
tmp = self.ls_f_wt.get_function()
after[0]=tmp
self.ls_f_wt.set_p_scale(self.p_scale)
for ii in range(6):
u_tmp=list(flex.double(self.u).deep_copy())
u_tmp[ii]+=h
self.ls_f_wt.set_u_rwgk(u_tmp)
tmp = self.ls_f_wt.get_function()
after[ii+1]=tmp
self.ls_f_wt.set_u_rwgk(self.u)
grads=self.ls_f_wt.get_gradient()
f = max(1, flex.max(flex.abs(grads)))
assert approx_equal(grads/f, (after-before)/h/f)
def tst_ls_f(self, h=0.0000001):
## This function tests the gradients
tmp = self.ls_f.get_function()
before = flex.double(7,tmp)
after = flex.double(7,0)
## Test the pscale
self.ls_f.set_p_scale(self.p_scale+h)
tmp = self.ls_f.get_function()
after[0]=tmp
self.ls_f.set_p_scale(self.p_scale)
for ii in range(6):
u_tmp=list(flex.double(self.u).deep_copy())
u_tmp[ii]+=h
self.ls_f.set_u_rwgk(u_tmp)
tmp = self.ls_f.get_function()
after[ii+1]=tmp
self.ls_f.set_u_rwgk(self.u)
grads=self.ls_f.get_gradient()
f = max(1, flex.max(flex.abs(grads)))
assert approx_equal(grads/f, (after-before)/h/f)
def tst_ls_i(self, h=0.0000001):
## This function tests the gradients
tmp = self.ls_i.get_function()
before = flex.double(7,tmp)
after = flex.double(7,0)
## Test the pscale
self.ls_i.set_p_scale(self.p_scale+h)
tmp = self.ls_i.get_function()
after[0]=tmp
self.ls_i.set_p_scale(self.p_scale)
#aniso tensor components
for ii in range(6):
u_tmp=list(flex.double(self.u).deep_copy())
u_tmp[ii]+=h
self.ls_i.set_u_rwgk(u_tmp)
tmp = self.ls_i.get_function()
after[ii+1]=tmp
self.ls_i.set_u_rwgk(self.u)
grads=self.ls_i.get_gradient()
f = max(1, flex.max(flex.abs(grads)))
assert approx_equal(grads/f, (after-before)/h/f)
def tst_hes_ls_i_wt(self,h=0.0000001):
hes_anal = self.ls_i_wt.hessian_as_packed_u()
hes_anal=hes_anal.matrix_packed_u_as_symmetric()
grads = self.ls_i_wt.get_gradient()
self.ls_i_wt.set_p_scale(self.p_scale+h)
tmp = self.ls_i_wt.get_gradient()
tmp = list( (grads-tmp)/-h )
tmp_hess=[]
tmp_hess.append( tmp )
self.ls_i_wt.set_p_scale(self.p_scale)
for ii in range(6):
u_tmp=list(flex.double(self.u).deep_copy())
u_tmp[ii]+=h
self.ls_i_wt.set_u_rwgk(u_tmp)
tmp = self.ls_i_wt.get_gradient()
tmp = (grads - tmp)/-h
tmp_hess.append( list(tmp) )
self.ls_i_wt.set_u_rwgk(self.u)
f = max(1, flex.max(flex.abs(hes_anal)))
count=0
for ii in range(7):
for jj in range(7):
assert approx_equal(tmp_hess[ii][jj]/f, hes_anal[count]/f)
count+=1
def tst_hes_ls_f_wt(self,h=0.0000001):
hes_anal = self.ls_f_wt.hessian_as_packed_u()
hes_anal=hes_anal.matrix_packed_u_as_symmetric()
grads = self.ls_f_wt.get_gradient()
self.ls_f_wt.set_p_scale(self.p_scale+h)
tmp = self.ls_f_wt.get_gradient()
tmp = list( (grads-tmp)/-h )
tmp_hess=[]
tmp_hess.append( tmp )
self.ls_f_wt.set_p_scale(self.p_scale)
for ii in range(6):
u_tmp=list(flex.double(self.u).deep_copy())
u_tmp[ii]+=h
self.ls_f_wt.set_u_rwgk(u_tmp)
tmp = self.ls_f_wt.get_gradient()
tmp = (grads - tmp)/-h
tmp_hess.append( list(tmp) )
self.ls_f_wt.set_u_rwgk(self.u)
f = max(1, flex.max(flex.abs(hes_anal)))
count=0
for ii in range(7):
for jj in range(7):
assert approx_equal(tmp_hess[ii][jj]/f, hes_anal[count]/f)
count+=1
def tst_hes_ls_i(self,h=0.0000001):
hes_anal = self.ls_i.hessian_as_packed_u()
hes_anal=hes_anal.matrix_packed_u_as_symmetric()
grads = self.ls_i.get_gradient()
self.ls_i.set_p_scale(self.p_scale+h)
tmp = self.ls_i.get_gradient()
tmp = list( (grads-tmp)/-h )
tmp_hess=[]
tmp_hess.append( tmp )
self.ls_i.set_p_scale(self.p_scale)
for ii in range(6):
u_tmp=list(flex.double(self.u).deep_copy())
u_tmp[ii]+=h
self.ls_i.set_u_rwgk(u_tmp)
tmp = self.ls_i.get_gradient()
tmp = (grads - tmp)/-h
tmp_hess.append( list(tmp) )
self.ls_i.set_u_rwgk(self.u)
count=0
for ii in range(7):
for jj in range(7):
assert approx_equal(tmp_hess[ii][jj]/hes_anal[count], 1 , eps=1e-5)
count+=1
def tst_hes_ls_f(self,h=0.0000001):
hes_anal = self.ls_f.hessian_as_packed_u()
hes_anal=hes_anal.matrix_packed_u_as_symmetric()
grads = self.ls_f.get_gradient()
self.ls_f.set_p_scale(self.p_scale+h)
tmp = self.ls_f.get_gradient()
tmp = list( (grads-tmp)/-h )
tmp_hess=[]
tmp_hess.append( tmp )
self.ls_f.set_p_scale(self.p_scale)
for ii in range(6):
u_tmp=list(flex.double(self.u).deep_copy())
u_tmp[ii]+=h
self.ls_f.set_u_rwgk(u_tmp)
tmp = self.ls_f.get_gradient()
tmp = (grads - tmp)/-h
tmp_hess.append( list(tmp) )
self.ls_f.set_u_rwgk(self.u)
count=0
for ii in range(7):
for jj in range(7):
assert approx_equal(tmp_hess[ii][jj]/hes_anal[count], 1 , eps=1e-5)
count+=1
def random_data(B_add=35,
n_residues=585.0,
d_min=3.5):
unit_cell = uctbx.unit_cell( (81.0, 81.0, 61.0, 90.0, 90.0, 120.0) )
xtal = crystal.symmetry(unit_cell, " P 3 ")
## In P3 I do not have to worry about centrics or reflections with different
## epsilons.
miller_set = miller.build_set(
crystal_symmetry = xtal,
anomalous_flag = False,
d_min = d_min)
## Now make an array with d_star_sq values
d_star_sq = miller_set.d_spacings().data()
d_star_sq = 1.0/(d_star_sq*d_star_sq)
asu = {"H":8.0*n_residues*1.0,
"C":5.0*n_residues*1.0,
"N":1.5*n_residues*1.0,
"O":1.2*n_residues*1.0}
scat_info = absolute_scaling.scattering_information(
asu_contents = asu,
fraction_protein=1.0,
fraction_nucleic=0.0)
scat_info.scat_data(d_star_sq)
gamma_prot = scat_info.gamma_tot
sigma_prot = scat_info.sigma_tot_sq
## The number of residues is multriplied by the Z of the spacegroup
protein_total = sigma_prot * (1.0+gamma_prot)
## add a B-value of 35 please
protein_total = protein_total*flex.exp(-B_add*d_star_sq/2.0)
## Now that has been done,
## We can make random structure factors
normalised_random_intensities = \
random_transform.wilson_intensity_variate(protein_total.size())
random_intensities = normalised_random_intensities*protein_total*math.exp(6)
std_dev = random_intensities*5.0/100.0
noise = random_transform.normal_variate(N=protein_total.size())
noise = noise*std_dev
random_intensities=noise+random_intensities
## STuff the arrays in the miller array
miller_array = miller.array(miller_set,
data=random_intensities,
sigmas=std_dev)
miller_array=miller_array.set_observation_type(
xray.observation_types.intensity())
miller_array = miller_array.f_sq_as_f()
return (miller_array)
def test_scaling_on_random_data(B_add):
miller_array = random_data(B_add,n_residues=100.0)
scale_object_iso = absolute_scaling.ml_iso_absolute_scaling(
miller_array,
n_residues=100.0)
## compare the results please
assert approx_equal(B_add, scale_object_iso.b_wilson, eps=5)
scale_object_aniso = absolute_scaling.ml_aniso_absolute_scaling(
miller_array,
n_residues=100.0)
assert approx_equal(B_add, scale_object_aniso.b_cart[0], eps=5)
assert approx_equal(B_add, scale_object_aniso.b_cart[1], eps=5)
assert approx_equal(B_add, scale_object_aniso.b_cart[2], eps=5)
def test_scattering_info():
miller_array = random_data(35.0, d_min=2.5 )
d_star_sq = miller_array.d_spacings().data()
d_star_sq = 1.0/(d_star_sq*d_star_sq)
asu = {"H":8.0*585.0,"C":5.0*585.0,"N":1.5*585.0, "O":1.2*585.0}
scat_info = absolute_scaling.scattering_information(
asu_contents = asu,
fraction_protein=1.0,
fraction_nucleic=0.0)
scat_info.scat_data(d_star_sq)
scat_info2 = absolute_scaling.scattering_information(
n_residues=585.0)
scat_info2.scat_data(d_star_sq)
sigma_prot = scaling.get_sigma_prot_sq(d_star_sq,195.0*3.0)
# Testing for consistency
for ii in range(d_star_sq.size()):
assert approx_equal(scat_info.sigma_tot_sq[ii],
scat_info2.sigma_tot_sq[ii],
eps=1e-03)
assert approx_equal(scat_info.sigma_tot_sq[ii],
sigma_prot[ii],
eps=0.5)
def twin_the_data_and_analyse(twin_operator,twin_fraction=0.2):
out_string = StringIO()
miller_array = random_data(35).map_to_asu()
miller_array = miller_array.f_as_f_sq()
cb_op = sgtbx.change_of_basis_op( twin_operator )
miller_array_mod, miller_array_twin = miller_array.common_sets(
miller_array.change_basis( cb_op ).map_to_asu() )
twinned_miller = miller_array_mod.customized_copy(
data = (1.0-twin_fraction)*miller_array_mod.data()
+ twin_fraction*miller_array_twin.data(),
sigmas = flex.sqrt(
flex.pow( ((1.0-twin_fraction)*miller_array_mod.sigmas()),2.0)+\
flex.pow( ((twin_fraction)*miller_array_twin.sigmas()),2.0))
)
twinned_miller.set_observation_type( miller_array.observation_type())
twin_anal_object = t_a.twin_analyses(twinned_miller,
out=out_string,
verbose=-100)
index = twin_anal_object.twin_summary.most_worrysome_twin_law
assert approx_equal(
twin_anal_object.twin_summary.britton_alpha[index],
twin_fraction,eps=0.1)
assert approx_equal(twin_anal_object.twin_law_dependent_analyses[index].ml_murray_rust.estimated_alpha,
twin_fraction, eps=0.1)
## Untwinned data standards
if twin_fraction==0:
## L-test
assert approx_equal(twin_anal_object.l_test.mean_l, 0.50,eps=0.1)
## Wilson ratios
assert approx_equal(twin_anal_object.twin_summary.i_ratio,
2.00,
eps=0.1)
## H-test
assert approx_equal(
twin_anal_object.twin_law_dependent_analyses[index].h_test.mean_h,
0.50,eps=0.1)
## Perfect twin standards
if twin_fraction==0.5:
assert approx_equal(twin_anal_object.l_test.mean_l, 0.375,eps=0.1)
assert approx_equal(twin_anal_object.twin_summary.i_ratio,
1.50,eps=0.1)
assert approx_equal(
twin_anal_object.twin_law_dependent_analyses[index].h_test.mean_h,
0.00,eps=0.1)
## Just make sure we actually detect significant twinning
if twin_fraction > 0.10:
assert (twin_anal_object.twin_summary.maha_l > 3.0)
## The patterson origin peak should be smallish ...
assert (twin_anal_object.twin_summary.patterson_p_value > 0.01)
# and the brief test should be passed as well
answer = t_a.twin_analyses_brief( twinned_miller,out=out_string,verbose=-100 )
if twin_fraction > 0.10:
assert answer is True
def test_twin_r_value(twin_operator):
miller_array = random_data(35).map_to_asu()
miller_array = miller_array.f_as_f_sq()
for twin_fraction, expected_r_abs,expected_r_sq in zip(
[0,0.1,0.2,0.3,0.4,0.5],
[0.50,0.40,0.30,0.20,0.10,0.0],
[0.333,0.213,0.120,0.0533,0.0133,0.00]):
cb_op = sgtbx.change_of_basis_op( twin_operator )
miller_array_mod, miller_array_twin = miller_array.common_sets(
miller_array.change_basis( cb_op ).map_to_asu() )
twinned_miller = miller_array_mod.customized_copy(
data = (1.0-twin_fraction)*miller_array_mod.data()
+ twin_fraction*miller_array_twin.data(),
sigmas = flex.sqrt(
flex.pow( ((1.0-twin_fraction)*miller_array_mod.sigmas()),2.0)+\
flex.pow( ((twin_fraction)*miller_array_twin.sigmas()),2.0))
)
twinned_miller.set_observation_type( miller_array.observation_type())
twin_r = scaling.twin_r( twinned_miller.indices(),
twinned_miller.data(),
twinned_miller.space_group(),
twinned_miller.anomalous_flag(),
cb_op.c().r().as_double()[0:9] )
assert approx_equal(twin_r.r_abs_value(), expected_r_abs, 0.08)
assert approx_equal(twin_r.r_sq_value(), expected_r_sq, 0.08)
def test_constant():
# this is to make sure that the tmp_const in the class symmetry_issues
# does not result in any overflow problems
math.log(1e-250)
def test_kernel_based_normalisation():
miller_array = random_data(35.0, d_min=2.5 )
normalizer = absolute_scaling.kernel_normalisation(
miller_array, auto_kernel=True)
z_values = normalizer.normalised_miller.data()/\
normalizer.normalised_miller.epsilons().data().as_double()
z_values = flex.mean(z_values)
assert approx_equal(1.0,z_values,eps=0.05)
# This should raise an error rather than enter an infinite loop
with raises(AssertionError) as e:
absolute_scaling.kernel_normalisation(
miller_array[:1].set_observation_type_xray_amplitude(), auto_kernel=True)
def test_ml_murray_rust():
miller_array = random_data(35.0, d_min=4.5 )
ml_mr_object = scaling.ml_murray_rust(
miller_array.data(),
miller_array.data(),
miller_array.indices(),
miller_array.space_group(),
miller_array.anomalous_flag(),
(1,0,0,0,1,0,0,0,1),
6 )
for ii in range(5,30):
for jj in range(5,30):
p1 = ml_mr_object.p_raw(ii/3.0, jj/3.0, 0.25)
p2 = ml_mr_object.num_int(ii/3.0, 1e-13, jj/3.0, 1e-13, -5, 5,0.25, 20)
assert approx_equal( p1, p2, eps=0.01)
def test_all():
test_luts()
test_ml_murray_rust()
test_likelihood_iso()
test_gradients_iso()
test_gamma_prot()
test_sigma_prot()
test_likelihood_aniso()
test_grads_aniso()
test_scaling_on_random_data(10)
test_scaling_on_random_data(20)
test_scaling_on_random_data(40)
test_scaling_on_random_data(70)
test_scaling_on_random_data(80)
scaling_tester()
twin_the_data_and_analyse('h+k,-k,-l',0.00)
twin_the_data_and_analyse('h+k,-k,-l',0.10)
twin_the_data_and_analyse('h+k,-k,-l',0.20)
twin_the_data_and_analyse('h+k,-k,-l',0.30)
twin_the_data_and_analyse('h+k,-k,-l',0.50)
test_scattering_info()
test_kernel_based_normalisation()
test_twin_r_value('h+k,-k,-l')
test_constant()
def run(args):
assert len(args) == 0
test_all()
print(format_cpu_times())
if (__name__ == "__main__"):
import sys
run(args=sys.argv[1:])
|
e18e58653c1ceba793b31dcab793a5d7a502910c
|
33975f46034a58482c5fd82dc5bd2e7c9c22e3c7
|
/python_bindings/test/correctness/addconstant_test.py
|
25b150ed9862d3ecbe8763e0c0e600cf1c2254c7
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-blas-2017"
] |
permissive
|
halide/Halide
|
8fafc15c2361a99d59e5d8146799fd570e7f062a
|
8188b42c50e4ed47a01bbfb43289453c6fc5b1c9
|
refs/heads/main
| 2023-09-04T09:24:42.156763
| 2023-09-01T17:38:19
| 2023-09-01T17:38:19
| 5,241,312
| 5,014
| 1,142
|
NOASSERTION
| 2023-09-14T19:38:55
| 2012-07-31T04:05:25
|
C++
|
UTF-8
|
Python
| false
| false
| 7,154
|
py
|
addconstant_test.py
|
from addconstantcpp import addconstantcpp
from addconstantpy import addconstantpy
from addconstantcpp_with_offset_42 import addconstantcpp_with_offset_42
from addconstantpy_with_offset_42 import addconstantpy_with_offset_42
from addconstantcpp_with_negative_offset import addconstantcpp_with_negative_offset
from addconstantpy_with_negative_offset import addconstantpy_with_negative_offset
import numpy
TESTS_AND_OFFSETS = [
(addconstantcpp, 0),
(addconstantpy, 0),
(addconstantcpp_with_offset_42, 42),
(addconstantpy_with_offset_42, 42),
(addconstantcpp_with_negative_offset, -1),
(addconstantpy_with_negative_offset, -1),
]
ERROR_THRESHOLD = 0.0001
def test(addconstant_impl_func, offset):
scalar_u1 = True
scalar_u8 = 3
scalar_u16 = 49153
scalar_u32 = 65537
scalar_u64 = 5724968371
scalar_i8 = -7
scalar_i16 = -30712
scalar_i32 = 98901
scalar_i64 = -8163465847
scalar_float = 3.14159
scalar_double = 1.61803
input_u8 = numpy.array([0, 1, 2], dtype=numpy.uint8)
input_u16 = numpy.array([0, 256, 512], dtype=numpy.uint16)
input_u32 = numpy.array([0, 65536, 131072], dtype=numpy.uint32)
input_u64 = numpy.array([0, 4294967296, 8589934592], dtype=numpy.uint64)
input_i8 = numpy.array([1, -2, 3], dtype=numpy.int8)
input_i16 = numpy.array([1, -256, 512], dtype=numpy.int16)
input_i32 = numpy.array([1, -65536, 131072], dtype=numpy.int32)
input_i64 = numpy.array([0, -4294967296, 8589934592], dtype=numpy.int64)
input_float = numpy.array([3.14, 2.718, 1.618], dtype=numpy.float32)
input_double = numpy.array([3.14, 2.718, 1.618], dtype=numpy.float64)
input_half = numpy.array([3.14, 2.718, 1.618], dtype=numpy.float16)
input_2d = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=numpy.int8, order="F")
input_3d = numpy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=numpy.int8)
output_u8 = numpy.zeros((3,), dtype=numpy.uint8)
output_u16 = numpy.zeros((3,), dtype=numpy.uint16)
output_u32 = numpy.zeros((3,), dtype=numpy.uint32)
output_u64 = numpy.zeros((3,), dtype=numpy.uint64)
output_i8 = numpy.zeros((3,), dtype=numpy.int8)
output_i16 = numpy.zeros((3,), dtype=numpy.int16)
output_i32 = numpy.zeros((3,), dtype=numpy.int32)
output_i64 = numpy.zeros((3,), dtype=numpy.int64)
output_float = numpy.zeros((3,), dtype=numpy.float32)
output_double = numpy.zeros((3,), dtype=numpy.float64)
output_half = numpy.zeros((3,), dtype=numpy.float16)
output_2d = numpy.zeros((2, 3), dtype=numpy.int8, order="F")
output_3d = numpy.zeros((2, 2, 2), dtype=numpy.int8)
addconstant_impl_func(
scalar_u1,
scalar_u8,
scalar_u16,
scalar_u32,
scalar_u64,
scalar_i8,
scalar_i16,
scalar_i32,
scalar_i64,
scalar_float,
scalar_double,
input_u8,
input_u16,
input_u32,
input_u64,
input_i8,
input_i16,
input_i32,
input_i64,
input_float,
input_double,
input_half,
input_2d,
input_3d,
output_u8,
output_u16,
output_u32,
output_u64,
output_i8,
output_i16,
output_i32,
output_i64,
output_float,
output_double,
output_half,
output_2d,
output_3d,
)
combinations = [
("u8", input_u8, output_u8, scalar_u8),
("u16", input_u16, output_u16, scalar_u16),
("u32", input_u32, output_u32, scalar_u32),
("u64", input_u64, output_u64, scalar_u64),
("i8", input_i8, output_i8, scalar_i8),
("i16", input_i16, output_i16, scalar_i16),
("i32", input_i32, output_i32, scalar_i32),
("i64", input_i64, output_i64, scalar_i64),
("float", input_float, output_float, scalar_float),
("double", input_double, output_double, scalar_double),
("half", input_half, output_half, scalar_float),
]
for _, input, output, scalar in combinations:
for i, o in zip(input, output):
scalar_as_numpy = numpy.array(scalar).astype(input.dtype)
assert abs(o - (i + scalar_as_numpy)) < ERROR_THRESHOLD
for x in range(input_2d.shape[0]):
for y in range(input_2d.shape[1]):
assert output_2d[x, y] == input_2d[x, y] + scalar_i8
for x in range(input_3d.shape[0]):
for y in range(input_3d.shape[1]):
for z in range(input_3d.shape[2]):
assert output_3d[x, y, z] == input_3d[x, y, z] + scalar_i8 + offset
try:
# Expected requirement failure #1
scalar_i32 = 0
addconstant_impl_func(
scalar_u1,
scalar_u8,
scalar_u16,
scalar_u32,
scalar_u64,
scalar_i8,
scalar_i16,
scalar_i32,
scalar_i64,
scalar_float,
scalar_double,
input_u8,
input_u16,
input_u32,
input_u64,
input_i8,
input_i16,
input_i32,
input_i64,
input_float,
input_double,
input_half,
input_2d,
input_3d,
output_u8,
output_u16,
output_u32,
output_u64,
output_i8,
output_i16,
output_i32,
output_i64,
output_float,
output_double,
output_half,
output_2d,
output_3d,
)
except RuntimeError as e:
assert str(e) == "Halide Runtime Error: -27", e
else:
assert False, "Did not see expected exception!"
try:
# Expected requirement failure #2 -- note that for AOT-compiled
# code in Python, the error message is stricly numeric (the text
# of the error isn't currently propagated int he exception).
scalar_i32 = -1
addconstant_impl_func(
scalar_u1,
scalar_u8,
scalar_u16,
scalar_u32,
scalar_u64,
scalar_i8,
scalar_i16,
scalar_i32,
scalar_i64,
scalar_float,
scalar_double,
input_u8,
input_u16,
input_u32,
input_u64,
input_i8,
input_i16,
input_i32,
input_i64,
input_float,
input_double,
input_half,
input_2d,
input_3d,
output_u8,
output_u16,
output_u32,
output_u64,
output_i8,
output_i16,
output_i32,
output_i64,
output_float,
output_double,
output_half,
output_2d,
output_3d,
)
except RuntimeError as e:
assert str(e) == "Halide Runtime Error: -27", e
else:
assert False, "Did not see expected exception!"
if __name__ == "__main__":
for t, o in TESTS_AND_OFFSETS:
test(t, o)
|
14e7dd7c63d2e2e7916aed1e817c2c1a64adb3ab
|
469c1754788d8637a74e6306ae3a4e536dba88b0
|
/astroML/plotting/scatter_contour.py
|
49e1e37b4f03444831d60e971f8227f59205aa7d
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
astroML/astroML
|
17bbbfdbd9da06407d727c6e032d23c5a63ec098
|
3ec75de08cddd59577e8c2a79be354c5eaebf9db
|
refs/heads/main
| 2023-08-07T11:54:17.311594
| 2022-12-12T08:52:31
| 2022-12-12T08:52:31
| 6,269,799
| 878
| 300
|
BSD-2-Clause
| 2023-05-09T14:41:47
| 2012-10-17T22:33:50
|
Python
|
UTF-8
|
Python
| false
| false
| 3,892
|
py
|
scatter_contour.py
|
import numpy as np
def scatter_contour(x, y,
levels=10,
threshold=100,
log_counts=False,
histogram2d_args=None,
plot_args=None,
contour_args=None,
filled_contour=True,
ax=None):
"""Scatter plot with contour over dense regions
Parameters
----------
x, y : arrays
x and y data for the contour plot
levels : integer or array (optional, default=10)
number of contour levels, or array of contour levels
threshold : float (default=100)
number of points per 2D bin at which to begin drawing contours
log_counts :boolean (optional)
if True, contour levels are the base-10 logarithm of bin counts.
histogram2d_args : dict
keyword arguments passed to numpy.histogram2d
see doc string of numpy.histogram2d for more information
plot_args : dict
keyword arguments passed to plt.plot. By default it will use
dict(marker='.', linestyle='none').
see doc string of pylab.plot for more information
contour_args : dict
keyword arguments passed to plt.contourf or plt.contour
see doc string of pylab.contourf for more information
filled_contour : bool
If True (default) use filled contours. Otherwise, use contour outlines.
ax : pylab.Axes instance
the axes on which to plot. If not specified, the current
axes will be used
Returns
-------
points, contours :
points is the return value of ax.plot()
contours is the return value of ax.contour or ax.contourf
"""
x = np.asarray(x)
y = np.asarray(y)
default_contour_args = dict(zorder=2)
default_plot_args = dict(marker='.', linestyle='none', zorder=1)
if plot_args is not None:
default_plot_args.update(plot_args)
plot_args = default_plot_args
if contour_args is not None:
default_contour_args.update(contour_args)
contour_args = default_contour_args
if histogram2d_args is None:
histogram2d_args = {}
if contour_args is None:
contour_args = {}
if ax is None:
# Import here so that testing with Agg will work
from matplotlib import pyplot as plt
ax = plt.gca()
H, xbins, ybins = np.histogram2d(x, y, **histogram2d_args)
if log_counts:
H = np.log10(1 + H)
threshold = np.log10(1 + threshold)
levels = np.asarray(levels)
if levels.size == 1:
levels = np.linspace(threshold, H.max(), levels)
extent = [xbins[0], xbins[-1], ybins[0], ybins[-1]]
i_min = np.argmin(levels)
# draw a zero-width line: this gives us the outer polygon to
# reduce the number of points we draw
# somewhat hackish... we could probably get the same info from
# the full contour plot below.
outline = ax.contour(H.T, levels[i_min:i_min + 1],
linewidths=0, extent=extent,
alpha=0)
if filled_contour:
contours = ax.contourf(H.T, levels, extent=extent, **contour_args)
else:
contours = ax.contour(H.T, levels, extent=extent, **contour_args)
X = np.hstack([x[:, None], y[:, None]])
if len(outline.allsegs[0]) > 0:
outer_poly = outline.allsegs[0][0]
try:
# this works in newer matplotlib versions
from matplotlib.path import Path
points_inside = Path(outer_poly).contains_points(X)
except ImportError:
# this works in older matplotlib versions
import matplotlib.nxutils as nx
points_inside = nx.points_inside_poly(X, outer_poly)
Xplot = X[~points_inside]
else:
Xplot = X
points = ax.plot(Xplot[:, 0], Xplot[:, 1], **plot_args)
return points, contours
|
996dcea6bf2041abec0a9f67918a8453fa2b6137
|
05b85a5260e6a7b236693300208b35bde1ca73ee
|
/fastapi_auth/fastapi_util/settings/base_api_settings.py
|
6c7b9fc32923c58a6f94af313b5910427d04922d
|
[
"MIT"
] |
permissive
|
dmontagu/fastapi-auth
|
d05e3440ee829e57cc7cdac1a987ac51b4e87f3a
|
d0e86774f66bd43e80376de19bdf034eb228dc07
|
refs/heads/master
| 2023-02-16T20:41:35.883202
| 2019-12-18T03:10:51
| 2019-12-18T04:11:46
| 228,746,139
| 131
| 10
|
MIT
| 2023-02-14T21:35:22
| 2019-12-18T03:00:54
|
Python
|
UTF-8
|
Python
| false
| false
| 189
|
py
|
base_api_settings.py
|
from pydantic import BaseSettings
class BaseAPISettings(BaseSettings):
class Config:
env_prefix = ""
arbitrary_types_allowed = True
validate_assignment = True
|
c107f167de1adf2de4023b885f2039a5febceac0
|
c3542b98289c1ba85f62d08b5edbe1a3c18f3c80
|
/folder_size.py
|
7410de8da368af9f12f3d21e83c872d95dba35a8
|
[
"LicenseRef-scancode-unknown",
"GPL-1.0-or-later",
"MIT"
] |
permissive
|
geekcomputers/Python
|
16674289843f89f6cc287097f033b928f4181d84
|
bc55e2a2c5a98f4c7597e901a04457dfb9d5df0c
|
refs/heads/master
| 2023-08-18T21:04:18.163283
| 2023-08-17T17:38:16
| 2023-08-17T17:38:16
| 2,881,789
| 32,418
| 15,024
|
MIT
| 2023-09-02T18:40:33
| 2011-11-30T09:04:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,576
|
py
|
folder_size.py
|
# Script Name : folder_size.py
# Author : Craig Richards
# Created : 19th July 2012
# Last Modified : 22 February 2016
# Version : 1.0.1
# Modifications : Modified the Printing method and added a few comments
# Description : This will scan the current directory and all subdirectories and display the size.
import os
import sys # Load the library module and the sys module for the argument vector'''
try:
directory = sys.argv[
1
] # Set the variable directory to be the argument supplied by user.
except IndexError:
sys.exit("Must provide an argument.")
dir_size = 0 # Set the size to 0
fsizedicr = {
"Bytes": 1,
"Kilobytes": float(1) / 1024,
"Megabytes": float(1) / (1024 * 1024),
"Gigabytes": float(1) / (1024 * 1024 * 1024),
}
for (path, dirs, files) in os.walk(
directory
): # Walk through all the directories. For each iteration, os.walk returns the folders, subfolders and files in the dir.
for file in files: # Get all the files
filename = os.path.join(path, file)
dir_size += os.path.getsize(
filename
) # Add the size of each file in the root dir to get the total size.
fsizeList = [
str(round(fsizedicr[key] * dir_size, 2)) + " " + key for key in fsizedicr
] # List of units
if dir_size == 0:
print("File Empty") # Sanity check to eliminate corner-case of empty file.
else:
for units in sorted(fsizeList)[
::-1
]: # Reverse sort list of units so smallest magnitude units print first.
print("Folder Size: " + units)
|
60ca8c6b4bc0d8804564ea09af685a55c5d94c52
|
8358c8d86600703663eb8a8f30493c20704cf586
|
/xcube/core/gen2/local/transformer.py
|
f7dddbc301b4a0a3c182ec87c20f3967ec13785f
|
[
"MIT"
] |
permissive
|
dcs4cop/xcube
|
612ffeb416dfee4e6a32677a719eab1a26aee990
|
a5a4da14bdc2dba80e0dd7d89b221fb30d148b77
|
refs/heads/master
| 2023-08-17T06:36:57.207806
| 2023-08-08T15:16:09
| 2023-08-08T15:16:09
| 130,693,090
| 149
| 21
|
MIT
| 2023-09-14T07:38:55
| 2018-04-23T12:27:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,217
|
py
|
transformer.py
|
# The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from abc import ABC, abstractmethod
from typing import Tuple
import xarray as xr
from xcube.core.gridmapping import GridMapping
from xcube.util.progress import observe_progress
from .helpers import is_empty_cube
from .helpers import strip_cube
from ..config import CubeConfig
TransformedCube = Tuple[xr.Dataset, GridMapping, CubeConfig]
class CubeTransformer(ABC):
@abstractmethod
def transform_cube(self,
cube: xr.Dataset,
gm: GridMapping,
cube_config: CubeConfig) -> TransformedCube:
"""
Transform given *cube*, grid mapping *gm*, and cube
configuration *cube_config* into a potentially new cube,
a new grid mapping, and less restrictive cube configuration
*cube_config*.
The latter is achieved by returning a derived *cube_config* where
all properties that have been "consumed" by this transformer
are removed. See :meth:`CubeConfig.drop_property`.
"""
class CubeIdentity(CubeTransformer):
def transform_cube(self,
cube: xr.Dataset,
gm: GridMapping,
cube_config: CubeConfig) -> TransformedCube:
"""
Return *cube*, grid mapping *gm*, and parameters without change.
"""
return cube, gm, cube_config
def transform_cube(t_cube: TransformedCube,
transformer: CubeTransformer,
label: str = '') -> TransformedCube:
empty_cube = is_empty_cube(t_cube[0])
identity = isinstance(transformer, CubeIdentity)
if not label:
label = f'{type(transformer).__name__}'
if identity:
label += ' (step not applicable)'
elif empty_cube:
label += ' (step not applicable, empty cube)'
with observe_progress(label, 1) as progress:
if not (identity or empty_cube):
t_cube = transformer.transform_cube(*t_cube)
t_cube = strip_cube(t_cube[0]), t_cube[1], t_cube[2]
progress.worked(1)
return t_cube
|
8b3a06112342bc518d7663210f0eab34707cd8ab
|
8d44e796eaf0c8e11bbc2a27ef093e97a25b6f4a
|
/test/modeling/test_feature_extraction.py
|
3e85e7f93e88ecdb4c5859f7ed6a1b93daf7c347
|
[
"Apache-2.0"
] |
permissive
|
deepset-ai/haystack
|
caa5287051d1771395ea624b58097000825bad81
|
5f1256ac7e5734c2ea481e72cb7e02c34baf8c43
|
refs/heads/main
| 2023-09-01T02:41:23.490526
| 2023-08-31T15:33:12
| 2023-08-31T15:33:12
| 221,654,678
| 10,599
| 1,558
|
Apache-2.0
| 2023-09-14T17:09:42
| 2019-11-14T09:05:28
|
Python
|
UTF-8
|
Python
| false
| false
| 5,283
|
py
|
test_feature_extraction.py
|
import pytest
from unittest.mock import MagicMock
from unittest import mock
from pathlib import Path
import haystack
from haystack.errors import ModelingError
from haystack.modeling.model.feature_extraction import FeatureExtractor
class MockedAutoTokenizer:
mocker: MagicMock = MagicMock()
@classmethod
def from_pretrained(cls, *args, **kwargs):
cls.mocker.from_pretrained(*args, **kwargs)
return cls()
class MockedAutoConfig:
mocker: MagicMock = MagicMock()
model_type: str = "mocked"
@classmethod
def from_pretrained(cls, *args, **kwargs):
cls.mocker.from_pretrained(*args, **kwargs)
return cls()
@pytest.fixture()
def mock_autotokenizer(monkeypatch):
monkeypatch.setattr(
haystack.modeling.model.feature_extraction, "FEATURE_EXTRACTORS", {"mocked": MockedAutoTokenizer}
)
monkeypatch.setattr(haystack.modeling.model.feature_extraction, "AutoConfig", MockedAutoConfig)
monkeypatch.setattr(haystack.modeling.model.feature_extraction, "AutoTokenizer", MockedAutoTokenizer)
@pytest.mark.unit
def test_get_tokenizer_from_HF():
with mock.patch("haystack.modeling.model.feature_extraction.AutoConfig") as mocked_ac:
from haystack.modeling.model.feature_extraction import FEATURE_EXTRACTORS
FEATURE_EXTRACTORS["test"] = mock.MagicMock()
FEATURE_EXTRACTORS["test"].__name__ = "Test"
mocked_ac.from_pretrained.return_value.model_type = "test"
FeatureExtractor(pretrained_model_name_or_path="test-model-name")
FEATURE_EXTRACTORS["test"].from_pretrained.assert_called_with(
pretrained_model_name_or_path="test-model-name", revision=None, use_fast=True, use_auth_token=None
)
# clean up
FEATURE_EXTRACTORS.pop("test")
@pytest.mark.unit
def test_get_tokenizer_from_HF_not_found():
with mock.patch("haystack.modeling.model.feature_extraction.AutoConfig") as mocked_ac:
mocked_ac.from_pretrained.return_value.model_type = "does_not_exist"
with pytest.raises(ModelingError):
FeatureExtractor(pretrained_model_name_or_path="test-model-name")
@pytest.mark.unit
def test_get_tokenizer_from_path_fast():
here = Path(__file__).resolve().parent
mocked_model_folder = here / "samples/test_get_tokenizer_from_path"
with mock.patch("haystack.modeling.model.feature_extraction.transformers") as mocked_tf:
mocked_tf.TestTokenizerFast.__class__.__name__ = "Test Class"
FeatureExtractor(pretrained_model_name_or_path=mocked_model_folder)
mocked_tf.TestTokenizerFast.from_pretrained.assert_called_with(
pretrained_model_name_or_path=str(mocked_model_folder), revision=None, use_fast=True, use_auth_token=None
)
@pytest.mark.unit
def test_get_tokenizer_from_path():
here = Path(__file__).resolve().parent
mocked_model_folder = here / "samples/test_get_tokenizer_from_path"
with mock.patch("haystack.modeling.model.feature_extraction.transformers") as mocked_tf:
mocked_tf.TestTokenizer.__class__.__name__ = "Test Class"
FeatureExtractor(pretrained_model_name_or_path=mocked_model_folder)
mocked_tf.TestTokenizerFast.from_pretrained.assert_called_with(
pretrained_model_name_or_path=str(mocked_model_folder), revision=None, use_fast=True, use_auth_token=None
)
@pytest.mark.unit
def test_get_tokenizer_from_path_class_doesnt_exist():
here = Path(__file__).resolve().parent
mocked_model_folder = here / "samples/test_get_tokenizer_from_path"
with pytest.raises(AttributeError, match="module transformers has no attribute TestTokenizer"):
FeatureExtractor(pretrained_model_name_or_path=mocked_model_folder)
@pytest.mark.unit
def test_get_tokenizer_keep_accents():
here = Path(__file__).resolve().parent
mocked_model_folder = here / "samples/test_get_tokenizer_from_path"
with mock.patch("haystack.modeling.model.feature_extraction.transformers") as mocked_tf:
mocked_tf.TestTokenizer.__class__.__name__ = "Test Class"
FeatureExtractor(pretrained_model_name_or_path=mocked_model_folder, keep_accents=True)
mocked_tf.TestTokenizerFast.from_pretrained.assert_called_with(
pretrained_model_name_or_path=str(mocked_model_folder),
revision=None,
use_fast=True,
use_auth_token=None,
keep_accents=True,
)
FEATURE_EXTRACTORS_TO_TEST = ["bert-base-cased"]
@pytest.mark.integration
@pytest.mark.parametrize("model_name", FEATURE_EXTRACTORS_TO_TEST)
def test_load_modify_save_load(tmp_path, model_name: str):
# Load base tokenizer
feature_extractor = FeatureExtractor(pretrained_model_name_or_path=model_name, do_lower_case=False)
# Add new tokens
feature_extractor.feature_extractor.add_tokens(new_tokens=["neverseentokens"])
# Save modified tokenizer
save_dir = tmp_path / "saved_tokenizer"
feature_extractor.feature_extractor.save_pretrained(save_dir)
# Load modified tokenizer
new_feature_extractor = FeatureExtractor(pretrained_model_name_or_path=save_dir)
# Assert the new tokenizer still has the added tokens
assert len(new_feature_extractor.feature_extractor) == len(feature_extractor.feature_extractor)
|
7ed6b495a4fdced335b1fb94f5cca60189b796e7
|
cea371c0b298635f2d843e15ffe2be7912a624fd
|
/peewee_asyncext.py
|
c1c79d88e950667cf6ca1534ff53ae01743d5c02
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
05bit/peewee-async
|
20bfc5eb611d608a04dbd19a295759d186965512
|
942813a7ebaaefaefe6f5260d0e5025c5e80edcf
|
refs/heads/master
| 2023-08-20T19:36:46.161859
| 2023-05-19T13:16:41
| 2023-05-19T13:16:41
| 24,544,015
| 773
| 129
|
MIT
| 2023-09-14T05:30:55
| 2014-09-27T22:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,927
|
py
|
peewee_asyncext.py
|
"""
peewee-async
============
Asynchronous interface for `peewee`_ ORM powered by `asyncio`_:
https://github.com/05bit/peewee-async
.. _peewee: https://github.com/coleifer/peewee
.. _asyncio: https://docs.python.org/3/library/asyncio.html
Licensed under The MIT License (MIT)
Copyright (c) 2014, Alexey Kinëv <rudy@05bit.com>
"""
from playhouse import postgres_ext as ext
from playhouse.db_url import register_database
from peewee_async import AsyncPostgresqlMixin, aiopg
class PostgresqlExtDatabase(AsyncPostgresqlMixin, ext.PostgresqlExtDatabase):
"""PosgreSQL database extended driver providing **single drop-in sync**
connection and **single async connection** interface.
JSON fields support is always enabled, HStore supports is enabled by
default, but can be disabled with ``register_hstore=False`` argument.
Example::
database = PostgresqlExtDatabase('test', register_hstore=False)
See also:
https://peewee.readthedocs.io/en/latest/peewee/
playhouse.html#PostgresqlExtDatabase
"""
def init(self, database, **kwargs):
self.min_connections = 1
self.max_connections = 1
super().init(database, **kwargs)
self.init_async(enable_json=True,
enable_hstore=self._register_hstore)
@property
def use_speedups(self):
return False
@use_speedups.setter
def use_speedups(self, value):
pass
register_database(PostgresqlExtDatabase, 'postgresext+async',
'postgresqlext+async')
class PooledPostgresqlExtDatabase(AsyncPostgresqlMixin,
ext.PostgresqlExtDatabase):
"""PosgreSQL database extended driver providing **single drop-in sync**
connection and **async connections pool** interface.
JSON fields support is always enabled, HStore supports is enabled by
default, but can be disabled with ``register_hstore=False`` argument.
:param max_connections: connections pool size
Example::
database = PooledPostgresqlExtDatabase('test', register_hstore=False,
max_connections=20)
See also:
https://peewee.readthedocs.io/en/latest/peewee/
playhouse.html#PostgresqlExtDatabase
"""
def init(self, database, **kwargs):
self.min_connections = kwargs.pop('min_connections', 1)
self.max_connections = kwargs.pop('max_connections', 20)
self._timeout = kwargs.pop('connection_timeout', aiopg.DEFAULT_TIMEOUT)
super().init(database, **kwargs)
self.init_async(enable_json=True,
enable_hstore=self._register_hstore)
@property
def use_speedups(self):
return False
@use_speedups.setter
def use_speedups(self, value):
pass
register_database(PooledPostgresqlExtDatabase, 'postgresext+pool+async',
'postgresqlext+pool+async')
|
7931f54570dc2435f34c5675a0bfce23075f2e88
|
a2cbddc824efc370cfc5f371ece96139d849fc71
|
/decentralized/utils/multi_robot_plot.py
|
82e6afeaca983ea9b314ca4c0b8ad4b4c1ec05da
|
[
"MIT"
] |
permissive
|
atb033/multi_agent_path_planning
|
28df529fccd3ce19f84aac4a8071c549d5419ab4
|
89f69e11944b172e11c3bcbbecc57344eaa4be9c
|
refs/heads/master
| 2023-04-17T22:06:20.711394
| 2022-10-22T23:00:29
| 2022-10-22T23:00:29
| 184,384,043
| 786
| 211
|
MIT
| 2023-04-05T15:02:43
| 2019-05-01T07:39:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,217
|
py
|
multi_robot_plot.py
|
"""
Plotting tool for 2D multi-robot system
author: Ashwin Bose (@atb033)
"""
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.patches import Circle
import numpy as np
def plot_robot_and_obstacles(robot, obstacles, robot_radius, num_steps, sim_time, filename):
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(0, 10), ylim=(0, 10))
ax.set_aspect('equal')
ax.grid()
line, = ax.plot([], [], '--r')
robot_patch = Circle((robot[0, 0], robot[1, 0]),
robot_radius, facecolor='green', edgecolor='black')
obstacle_list = []
for obstacle in range(np.shape(obstacles)[2]):
obstacle = Circle((0, 0), robot_radius,
facecolor='aqua', edgecolor='black')
obstacle_list.append(obstacle)
def init():
ax.add_patch(robot_patch)
for obstacle in obstacle_list:
ax.add_patch(obstacle)
line.set_data([], [])
return [robot_patch] + [line] + obstacle_list
def animate(i):
robot_patch.center = (robot[0, i], robot[1, i])
for j in range(len(obstacle_list)):
obstacle_list[j].center = (obstacles[0, i, j], obstacles[1, i, j])
line.set_data(robot[0, :i], robot[1, :i])
return [robot_patch] + [line] + obstacle_list
init()
step = (sim_time / num_steps)
for i in range(num_steps):
animate(i)
plt.pause(step)
# Save animation
if not filename:
return
ani = animation.FuncAnimation(
fig, animate, np.arange(1, num_steps), interval=200,
blit=True, init_func=init)
ani.save(filename, "ffmpeg", fps=30)
def plot_robot(robot, timestep, radius=1, is_obstacle=False):
if robot is None:
return
center = robot[:2, timestep]
x = center[0]
y = center[1]
if is_obstacle:
circle = plt.Circle((x, y), radius, color='aqua', ec='black')
plt.plot(robot[0, :timestep], robot[1, :timestep], '--r',)
else:
circle = plt.Circle((x, y), radius, color='green', ec='black')
plt.plot(robot[0, :timestep], robot[1, :timestep], 'blue')
plt.gcf().gca().add_artist(circle)
|
212951fd82b0cfad895a03e119c0d45f62ac6f10
|
f2034c76a11ce6296131d2bab89a5dae7d59edfe
|
/python/dllib/src/bigdl/dllib/utils/common.py
|
cda02f78db46a14a036638eb6424990dc089570a
|
[
"Apache-2.0"
] |
permissive
|
intel-analytics/BigDL
|
e22cd917eecc7340bda3df4356acba0623a62ef6
|
4ffa012a426e0d16ed13b707b03d8787ddca6aa4
|
refs/heads/main
| 2023-08-22T06:31:37.923091
| 2023-08-22T02:58:42
| 2023-08-22T02:58:42
| 66,823,715
| 4,913
| 1,327
|
Apache-2.0
| 2023-09-14T10:41:50
| 2016-08-29T07:59:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 29,388
|
py
|
common.py
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import os
import sys
import six
from py4j.protocol import Py4JJavaError
from py4j.java_gateway import JavaObject
from py4j.java_collections import ListConverter, JavaArray, JavaList, JavaMap, MapConverter
from py4j.java_gateway import JavaGateway, GatewayClient
from pyspark import RDD, SparkContext
from pyspark.serializers import PickleSerializer, AutoBatchedSerializer
from pyspark.sql import DataFrame, SQLContext
from pyspark.mllib.common import callJavaFunc
from pyspark import SparkConf
from pyspark.files import SparkFiles
import numpy as np
import threading
import tempfile
import traceback
from bigdl.dllib.utils.engine import get_bigdl_classpath, is_spark_below_2_2
from bigdl.dllib.utils.log4Error import *
INTMAX = 2147483647
INTMIN = -2147483648
DOUBLEMAX = 1.7976931348623157E308
if sys.version >= '3':
long = int
unicode = str
class SingletonMixin(object):
_lock = threading.RLock()
_instance = None
@classmethod
def instance(cls,
bigdl_type, *args):
if not cls._instance:
with cls._lock:
if not cls._instance:
cls._instance = cls(bigdl_type, *args)
return cls._instance
class GatewayWrapper(SingletonMixin):
def __init__(self, bigdl_type, port=25333):
self.value = JavaGateway(GatewayClient(port=port), auto_convert=True)
class JavaCreator(SingletonMixin):
__creator_class = [
"com.intel.analytics.bigdl.dllib.utils.python.api.PythonBigDLKeras",
"com.intel.analytics.bigdl.dllib.utils.python.api.PythonBigDLOnnx",
"com.intel.analytics.bigdl.dllib.common.PythonZoo",
"com.intel.analytics.bigdl.dllib.nnframes.python.PythonNNFrames",
"com.intel.analytics.bigdl.dllib.nnframes.python.PythonTreeModel",
"com.intel.analytics.bigdl.dllib.feature.python.PythonImageFeature",
"com.intel.analytics.bigdl.dllib.feature.python.PythonTextFeature",
"com.intel.analytics.bigdl.dllib.feature.python.PythonFeatureSet",
"com.intel.analytics.bigdl.dllib.keras.python.PythonZooKeras",
"com.intel.analytics.bigdl.dllib.keras.python.PythonAutoGrad",
"com.intel.analytics.bigdl.dllib.net.python.PythonDllibNet",
"com.intel.analytics.bigdl.dllib.estimator.python.PythonEstimator"
]
@classmethod
def add_creator_class(cls, jinvoker):
with JavaCreator._lock:
JavaCreator.__creator_class.append(jinvoker)
JavaCreator._instance = None
@classmethod
def get_creator_class(cls):
with JavaCreator._lock:
return JavaCreator.__creator_class
@classmethod
def set_creator_class(cls, cclass):
if isinstance(cclass, six.string_types):
cclass = [cclass]
with JavaCreator._lock:
JavaCreator.__creator_class = cclass
JavaCreator._instance = None
def __init__(self, bigdl_type, gateway):
self.value = []
for creator_class in JavaCreator.get_creator_class():
jclass = getattr(gateway.jvm, creator_class)
if bigdl_type == "float":
self.value.append(getattr(jclass, "ofFloat")())
elif bigdl_type == "double":
self.value.append(getattr(jclass, "ofDouble")())
else:
invalidInputError(False, "Not supported bigdl_type: %s" % bigdl_type)
class JavaValue(object):
def jvm_class_constructor(self):
name = "create" + self.__class__.__name__
print("creating: " + name)
return name
def __init__(self, jvalue, bigdl_type, *args):
self.value = jvalue if jvalue else callBigDlFunc(
bigdl_type, self.jvm_class_constructor(), *args)
self.bigdl_type = bigdl_type
def __str__(self):
return self.value.toString()
class EvaluatedResult():
"""
A testing result used to benchmark the model quality.
"""
def __init__(self, result, total_num, method):
"""
:param result: the validation result. i.e: top1 accuracy percentage.
:param total_num: the total processed records.
:param method: the validation method. i.e: Top1Accuracy
"""
self.result = result
self.total_num = total_num
self.method = method
def __reduce__(self):
return (EvaluatedResult, (self.result, self.total_num, self.method))
def __str__(self):
return "Evaluated result: %s, total_num: %s, method: %s" % (
self.result, self.total_num, self.method)
def get_dtype(bigdl_type):
# Always return float32 for now
return "float32"
class JActivity(object):
def __init__(self, value):
self.value = value
class JTensor(object):
"""
A wrapper to easy our work when need to pass or return Tensor to/from Scala.
>>> import numpy as np
>>> from bigdl.dllib.utils.common import JTensor
>>> np.random.seed(123)
>>>
"""
def __init__(self, storage, shape, bigdl_type="float", indices=None):
"""
:param storage: values in this tensor
:param shape: shape of this tensor
:param bigdl_type: numeric type
:param indices: if indices is provided, means this is a SparseTensor;
if not provided, means this is a DenseTensor
"""
if isinstance(storage, bytes) and isinstance(shape, bytes):
self.storage = np.frombuffer(storage, dtype=get_dtype(bigdl_type))
self.shape = np.frombuffer(shape, dtype=np.int32)
else:
self.storage = np.array(storage, dtype=get_dtype(bigdl_type))
self.shape = np.array(shape, dtype=np.int32)
if indices is None:
self.indices = None
elif isinstance(indices, bytes):
self.indices = np.frombuffer(indices, dtype=np.int32)
else:
invalidInputError(isinstance(indices, np.ndarray),
f"indices should be a np.ndarray, not ${type(indices)},"
f" ${str(indices)}")
self.indices = np.array(indices, dtype=np.int32)
self.bigdl_type = bigdl_type
@classmethod
def from_ndarray(cls, a_ndarray, bigdl_type="float"):
"""
Convert a ndarray to a DenseTensor which would be used in Java side.
>>> import numpy as np
>>> from bigdl.dllib.utils.common import JTensor
>>> from bigdl.dllib.utils.common import callBigDlFunc
>>> np.random.seed(123)
>>> data = np.random.uniform(0, 1, (2, 3)).astype("float32")
>>> result = JTensor.from_ndarray(data)
>>> expected_storage = np.array([[0.69646919, 0.28613934, 0.22685145], [0.55131477,
... 0.71946895, 0.42310646]])
>>> expected_shape = np.array([2, 3])
>>> np.testing.assert_allclose(result.storage, expected_storage, rtol=1e-6, atol=1e-6)
>>> np.testing.assert_allclose(result.shape, expected_shape)
>>> data_back = result.to_ndarray()
>>> (data == data_back).all()
True
>>> tensor1 = callBigDlFunc("float", "testTensor", JTensor.from_ndarray(data)) # noqa
>>> array_from_tensor = tensor1.to_ndarray()
>>> (array_from_tensor == data).all()
True
"""
if a_ndarray is None:
return None
invalidInputError(isinstance(a_ndarray, np.ndarray),
f"input should be a np.ndarray, not ${type(a_ndarray)}")
return cls(a_ndarray,
a_ndarray.shape if a_ndarray.shape else (a_ndarray.size),
bigdl_type)
@classmethod
def sparse(cls, a_ndarray, i_ndarray, shape, bigdl_type="float"):
"""
Convert a three ndarray to SparseTensor which would be used in Java side.
For example:
a_ndarray = [1, 3, 2, 4]
i_ndarray = [[0, 0, 1, 2],
[0, 3, 2, 1]]
shape = [3, 4]
Present a dense tensor
[[ 1, 0, 0, 3],
[ 0, 0, 2, 0],
[ 0, 4, 0, 0]]
:param a_ndarray non-zero elements in this SparseTensor
:param i_ndarray zero-based indices for non-zero element
i_ndarray's shape should be (shape.size, a_ndarray.size)
And the i-th non-zero elements indices is i_ndarray[:, 1],
should be zero-based and ascending;
:param shape shape as a DenseTensor.
>>> import numpy as np
>>> from bigdl.dllib.utils.common import JTensor
>>> from bigdl.dllib.utils.common import callBigDlFunc
>>> np.random.seed(123)
>>> data = np.arange(1, 7).astype("float32")
>>> indices = np.arange(1, 7)
>>> shape = np.array([10])
>>> result = JTensor.sparse(data, indices, shape)
>>> expected_storage = np.array([1., 2., 3., 4., 5., 6.])
>>> expected_shape = np.array([10])
>>> expected_indices = np.array([1, 2, 3, 4, 5, 6])
>>> np.testing.assert_allclose(result.storage, expected_storage)
>>> np.testing.assert_allclose(result.shape, expected_shape)
>>> np.testing.assert_allclose(result.indices, expected_indices)
>>> tensor1 = callBigDlFunc("float", "testTensor", result) # noqa
>>> array_from_tensor = tensor1.to_ndarray()
>>> expected_ndarray = np.array([0, 1, 2, 3, 4, 5, 6, 0, 0, 0])
>>> (array_from_tensor == expected_ndarray).all()
True
"""
if a_ndarray is None:
return None
invalidInputError(isinstance(a_ndarray, np.ndarray),
f"input should be a np.ndarray, not ${type(a_ndarray)}")
invalidInputError(isinstance(i_ndarray, np.ndarray),
f"indices should be a np.ndarray, not ${type(i_ndarray)}")
invalidInputError(i_ndarray.size == a_ndarray.size * shape.size,
f"size of values ${a_ndarray.size * shape.size} and"
f" indices ${i_ndarray.size} should match")
return cls(a_ndarray,
shape,
bigdl_type,
i_ndarray)
def to_ndarray(self):
"""
Transfer JTensor to ndarray.
As SparseTensor may generate an very big ndarray, so we don't support this function for
SparseTensor.
:return: a ndarray
"""
invalidInputError(self.indices is None, "sparseTensor to ndarray is not supported")
return np.array(self.storage, dtype=get_dtype(self.bigdl_type)).reshape(self.shape) # noqa
def __reduce__(self):
if self.indices is None:
return JTensor, (self.storage.tostring(), self.shape.tostring(), self.bigdl_type)
else:
return JTensor, (self.storage.tostring(), self.shape.tostring(), self.bigdl_type,
self.indices.tostring())
def __str__(self):
return self.__repr__()
def __repr__(self):
indices = "" if self.indices is None else " ,indices %s" % str(self.indices)
return "JTensor: storage: %s, shape: %s%s, %s" % (
str(self.storage), str(self.shape), indices, self.bigdl_type)
class Sample(object):
def __init__(self, features, labels, bigdl_type="float"):
"""
User should always use Sample.from_ndarray to construct Sample.
:param features: a list of JTensors
:param labels: a list of JTensors
:param bigdl_type: "double" or "float"
"""
self.feature = features[0]
self.features = features
self.label = labels[0]
self.bigdl_type = bigdl_type
self.labels = labels
@classmethod
def from_ndarray(cls, features, labels, bigdl_type="float"):
"""
Convert a ndarray of features and labels to Sample, which would be used in Java side.
:param features: an ndarray or a list of ndarrays
:param labels: an ndarray or a list of ndarrays or a scalar
:param bigdl_type: "double" or "float"
>>> import numpy as np
>>> from bigdl.dllib.utils.common import callBigDlFunc
>>> from numpy.testing import assert_allclose
>>> np.random.seed(123)
>>> sample = Sample.from_ndarray(np.random.random((2,3)), np.random.random((2,3)))
>>> expected_feature_storage = np.array(([[0.69646919, 0.28613934, 0.22685145], [0.55131477,
... 0.71946895, 0.42310646]]))
>>> expected_feature_shape = np.array([2, 3])
>>> expected_label_storage = np.array(([[0.98076421, 0.68482971, 0.48093191], [0.39211753,
... 0.343178, 0.72904968]]))
>>> expected_label_shape = np.array([2, 3])
>>> assert_allclose(sample.features[0].storage, expected_feature_storage, rtol=1e-6,
... atol=1e-6)
>>> assert_allclose(sample.features[0].shape, expected_feature_shape)
>>> assert_allclose(sample.labels[0].storage, expected_label_storage, rtol=1e-6, atol=1e-6)
>>> assert_allclose(sample.labels[0].shape, expected_label_shape)
"""
if isinstance(features, np.ndarray):
features = [features]
else:
invalidInputError(all(isinstance(feature, np.ndarray) for feature in features),
"features should be a list of np.ndarray, not %s" % type(features))
if np.isscalar(labels): # in case labels is a scalar.
labels = [np.array(labels)]
elif isinstance(labels, np.ndarray):
labels = [labels]
else:
invalidInputError(all(isinstance(label, np.ndarray) for label in labels),
"labels should be a list of np.ndarray, not %s" % type(labels))
return cls(
features=[JTensor.from_ndarray(feature) for feature in features],
labels=[JTensor.from_ndarray(label) for label in labels],
bigdl_type=bigdl_type)
@classmethod
def from_jtensor(cls, features, labels, bigdl_type="float"):
"""
Convert a sequence of JTensor to Sample, which would be used in Java side.
:param features: an JTensor or a list of JTensor
:param labels: an JTensor or a list of JTensor or a scalar
:param bigdl_type: "double" or "float"
>>> import numpy as np
>>> data = np.random.uniform(0, 1, (6)).astype("float32")
>>> indices = np.arange(1, 7)
>>> shape = np.array([10])
>>> feature0 = JTensor.sparse(data, indices, shape)
>>> feature1 = JTensor.from_ndarray(np.random.uniform(0, 1, (2, 3)).astype("float32"))
>>> sample = Sample.from_jtensor([feature0, feature1], 1)
"""
if isinstance(features, JTensor):
features = [features]
else:
invalidInputError(all(isinstance(feature, JTensor) for feature in features),
"features should be a list of JTensor, not %s" % type(features))
if np.isscalar(labels): # in case labels is a scalar.
labels = [JTensor.from_ndarray(np.array(labels))]
elif isinstance(labels, JTensor):
labels = [labels]
else:
invalidInputError(all(isinstance(label, JTensor) for label in labels),
"labels should be a list of np.ndarray, not %s" % type(labels))
return cls(
features=features,
labels=labels,
bigdl_type=bigdl_type)
def __reduce__(self):
return Sample, (self.features, self.labels, self.bigdl_type)
def __str__(self):
return "Sample: features: %s, labels: %s," % (self.features, self.labels)
def __repr__(self):
return "Sample: features: %s, labels: %s" % (self.features, self.labels)
class RNG():
"""
generate tensor data with seed
"""
def __init__(self, bigdl_type="float"):
self.bigdl_type = bigdl_type
def set_seed(self, seed):
callBigDlFunc(self.bigdl_type, "setModelSeed", seed)
def uniform(self, a, b, size):
return callBigDlFunc(self.bigdl_type, "uniform", a, b, size).to_ndarray() # noqa
_picklable_classes = [
'LinkedList',
'SparseVector',
'DenseVector',
'DenseMatrix',
'Rating',
'LabeledPoint',
'Sample',
'EvaluatedResult',
'JTensor',
'JActivity'
]
def init_engine(bigdl_type="float"):
callBigDlFunc(bigdl_type, "initEngine")
# Spark context is supposed to have been created when init_engine is called
get_spark_context()._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.initialize()
def get_bigdl_engine_type(bigdl_type="float"):
return callBigDlFunc(bigdl_type, "getEngineType")
def set_optimizer_version(optimizerVersion, bigdl_type="float"):
return callBigDlFunc(bigdl_type, "setOptimizerVersion", optimizerVersion)
def get_optimizer_version(bigdl_type="float"):
return callBigDlFunc(bigdl_type, "getOptimizerVersion")
def init_executor_gateway(sc, bigdl_type="float"):
callBigDlFunc(bigdl_type, "initExecutorGateway", sc, sc._gateway._gateway_client.port)
def get_node_and_core_number(bigdl_type="float"):
result = callBigDlFunc(bigdl_type, "getNodeAndCoreNumber")
return result[0], result[1]
def redire_spark_logs(bigdl_type="float", log_path=os.getcwd() + "/bigdl.log"):
"""
Redirect spark logs to the specified path.
:param bigdl_type: "double" or "float"
:param log_path: the file path to be redirected to; the default file is under the current
workspace named `bigdl.log`.
"""
callBigDlFunc(bigdl_type, "redirectSparkLogs", log_path)
def show_bigdl_info_logs(bigdl_type="float"):
"""
Set BigDL log level to INFO.
:param bigdl_type: "double" or "float"
"""
callBigDlFunc(bigdl_type, "showBigDlInfoLogs")
def get_bigdl_conf():
bigdl_conf_file = "spark-bigdl.conf"
bigdl_python_wrapper = "python-api.zip"
def load_conf(conf_str):
return dict(line.split() for line in conf_str.split("\n") if
"#" not in line and line.strip())
for p in sys.path:
if bigdl_conf_file in p and os.path.isfile(p):
with open(p) if sys.version_info < (3,) else open(p,
encoding='latin-1') as conf_file:
# noqa
return load_conf(conf_file.read())
if bigdl_python_wrapper in p and os.path.isfile(p):
import zipfile
with zipfile.ZipFile(p, 'r') as zip_conf:
if bigdl_conf_file in zip_conf.namelist():
content = zip_conf.read(bigdl_conf_file)
if sys.version_info >= (3,):
content = str(content, 'latin-1')
return load_conf(content)
return {}
def to_list(a):
if type(a) is list:
return a
return [a]
def to_sample_rdd(x, y, numSlices=None):
"""
Conver x and y into RDD[Sample]
:param x: ndarray and the first dimension should be batch
:param y: ndarray and the first dimension should be batch
:param numSlices:
:return:
"""
sc = get_spark_context()
from bigdl.dllib.utils.common import Sample
x_rdd = sc.parallelize(x, numSlices)
y_rdd = sc.parallelize(y, numSlices)
return x_rdd.zip(y_rdd).map(lambda item: Sample.from_ndarray(item[0], item[1]))
def extend_spark_driver_cp(sparkConf, path):
original_driver_classpath = ":" + sparkConf.get("spark.driver.extraClassPath") \
if sparkConf.contains("spark.driver.extraClassPath") else ""
sparkConf.set("spark.driver.extraClassPath", path + original_driver_classpath)
def create_spark_conf():
bigdl_conf = get_bigdl_conf()
sparkConf = SparkConf()
sparkConf.setAll(bigdl_conf.items())
if os.environ.get("BIGDL_JARS", None) and not is_spark_below_2_2():
for jar in os.environ["BIGDL_JARS"].split(":"):
extend_spark_driver_cp(sparkConf, jar)
# add content in PYSPARK_FILES in spark.submit.pyFiles
# This is a workaround for current Spark on k8s
python_lib = os.environ.get('PYSPARK_FILES', None)
if python_lib:
existing_py_files = sparkConf.get("spark.submit.pyFiles")
if existing_py_files:
sparkConf.set(key="spark.submit.pyFiles",
value="%s,%s" % (python_lib, existing_py_files))
else:
sparkConf.set(key="spark.submit.pyFiles", value=python_lib)
return sparkConf
def get_spark_context(conf=None):
"""
Get the current active spark context and create one if no active instance
:param conf: combining bigdl configs into spark conf
:return: SparkContext
"""
if hasattr(SparkContext, "getOrCreate"):
with SparkContext._lock:
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext.getOrCreate(spark_conf)
else:
return SparkContext.getOrCreate()
else:
# Might have threading issue but we cann't add _lock here
# as it's not RLock in spark1.5;
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext(conf=spark_conf)
else:
return SparkContext._active_spark_context
def get_spark_sql_context(sc):
if "getOrCreate" in SQLContext.__dict__:
return SQLContext.getOrCreate(sc)
else:
return SQLContext(sc) # Compatible with Spark1.5.1
def _get_port():
root_dir = SparkFiles.getRootDirectory()
path = os.path.join(root_dir, "gateway_port")
try:
with open(path) as f:
port = int(f.readline())
except IOError as e:
traceback.print_exc()
invalidInputError(False,
"Could not open the file %s, which contains the listening port of"
" local Java Gateway, please make sure the init_executor_gateway()"
" function is called before any call of java function on the"
" executor side." % e.filename)
return port
def _get_gateway():
if SparkFiles._is_running_on_worker:
gateway_port = _get_port()
gateway = GatewayWrapper.instance(None, gateway_port).value
else:
sc = get_spark_context()
gateway = sc._gateway
return gateway
def callBigDlFunc(bigdl_type, name, *args):
""" Call API in PythonBigDL """
gateway = _get_gateway()
args = [_py2java(gateway, a) for a in args]
error = Exception("Cannot find function: %s" % name)
for jinvoker in JavaCreator.instance(bigdl_type, gateway).value:
# hasattr(jinvoker, name) always return true here,
# so you need to invoke the method to check if it exist or not
try:
api = getattr(jinvoker, name)
result = callJavaFunc(api, *args)
except Exception as e:
error = e
# if the invoked method exist but something else went wrong, throw the exception
if not re.match(r'.*Method.*does not exist', str(e), flags=re.DOTALL):
invalidOperationError(False, str(e), cause=e)
else:
return result
invalidOperationError(False, "Cannot find function: %s" % name, cause=error)
def _java2py(gateway, r, encoding="bytes"):
if isinstance(r, JavaObject):
clsName = r.getClass().getSimpleName()
# convert RDD into JavaRDD
if clsName != 'JavaRDD' and clsName.endswith("RDD"):
r = r.toJavaRDD()
clsName = 'JavaRDD'
if clsName == 'JavaRDD':
jrdd = gateway.jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.javaToPython(r)
return RDD(jrdd, get_spark_context())
if clsName == 'DataFrame':
return DataFrame(r, get_spark_sql_context(get_spark_context()))
if clsName == 'Dataset':
return DataFrame(r, get_spark_sql_context(get_spark_context()))
if clsName == "ImageFrame[]":
return r
if clsName in _picklable_classes:
r = gateway.jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.dumps(r)
elif isinstance(r, (JavaArray, JavaList)) and len(r) != 0 \
and isinstance(r[0], JavaObject) \
and r[0].getClass().getSimpleName() in ['DataFrame', 'Dataset']:
spark = get_spark_sql_context(get_spark_context())
r = list(map(lambda x: DataFrame(x, spark), r))
elif isinstance(r, (JavaArray, JavaList, JavaMap)):
try:
r = gateway.jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.dumps(
r)
except Py4JJavaError:
pass # not pickable
if isinstance(r, (bytearray, bytes)):
r = PickleSerializer().loads(bytes(r), encoding=encoding)
return r
def callJavaFunc(func, *args):
""" Call Java Function """
gateway = _get_gateway()
result = func(*args)
return _java2py(gateway, result)
def _to_java_object_rdd(rdd):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever
the RDD is serialized in batch or not.
"""
rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer()))
return \
rdd.ctx._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.pythonToJava(
rdd._jrdd, True)
def _py2java(gateway, obj):
""" Convert Python object into Java """
if isinstance(obj, RDD):
obj = _to_java_object_rdd(obj)
elif isinstance(obj, DataFrame):
obj = obj._jdf
elif isinstance(obj, SparkContext):
obj = obj._jsc
elif isinstance(obj, SQLContext):
obj = obj._jsqlContext
elif isinstance(obj, (list, tuple)):
obj = ListConverter().convert([_py2java(gateway, x) for x in obj],
gateway._gateway_client)
elif isinstance(obj, dict):
result = {}
for (key, value) in obj.items():
result[key] = _py2java(gateway, value)
obj = MapConverter().convert(result, gateway._gateway_client)
elif isinstance(obj, JavaValue):
obj = obj.value
elif isinstance(obj, JavaObject):
pass
elif isinstance(obj, (int, long, float, bool, bytes, unicode)):
pass
else:
data = bytearray(PickleSerializer().dumps(obj))
obj = gateway.jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.loads(data)
return obj
def create_tmp_path():
tmp_file = tempfile.NamedTemporaryFile(prefix="bigdl")
tmp_file.close()
return tmp_file.name
def text_from_path(path):
sc = get_spark_context()
return sc.textFile(path).collect()[0]
def get_local_file(a_path):
if not is_distributed(a_path):
return a_path
path, data = get_spark_context().binaryFiles(a_path).collect()[0]
local_file_path = create_tmp_path()
with open(local_file_path, 'w') as local_file:
local_file.write(data)
return local_file_path
def is_distributed(path):
return "://" in path
def get_activation_by_name(activation_name, activation_id=None):
""" Convert to a bigdl activation layer
given the name of the activation as a string """
import bigdl.dllib.nn.layer as BLayer
activation = None
activation_name = activation_name.lower()
if activation_name == "tanh":
activation = BLayer.Tanh()
elif activation_name == "sigmoid":
activation = BLayer.Sigmoid()
elif activation_name == "hard_sigmoid":
activation = BLayer.HardSigmoid()
elif activation_name == "relu":
activation = BLayer.ReLU()
elif activation_name == "softmax":
activation = BLayer.SoftMax()
elif activation_name == "softplus":
activation = BLayer.SoftPlus(beta=1.0)
elif activation_name == "softsign":
activation = BLayer.SoftSign()
elif activation_name == "linear":
activation = BLayer.Identity()
else:
invalidInputError(False, "Unsupported activation type: %s" % activation_name)
if not activation_id:
activation.set_name(activation_id)
return activation
def _test():
import doctest
from pyspark import SparkContext
from bigdl.dllib.nn import layer
globs = layer.__dict__.copy()
sc = SparkContext(master="local[2]", appName="test common utility")
globs['sc'] = sc
(failure_count, test_count) = doctest.testmod(globs=globs,
optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
f208adabd337f18b94d7d2b9292c4a4b2ee36d5b
|
ebb592491b7a6e0a3e67e1badfb05f76fcb5658c
|
/strictyaml/ruamel/scalarstring.py
|
c4aa30c069ff7346232fa7ba8ba6de85397eafa8
|
[
"MIT"
] |
permissive
|
crdoconnor/strictyaml
|
fe4475043efc4516f5f49c9792bd937599d9c65f
|
dfd93f9740ebd5e7150029bc3d89ea102bcddf00
|
refs/heads/master
| 2023-08-11T13:32:03.154467
| 2023-05-25T11:08:41
| 2023-05-25T11:08:41
| 61,367,476
| 1,344
| 83
|
MIT
| 2023-06-28T09:37:45
| 2016-06-17T10:56:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,566
|
py
|
scalarstring.py
|
# coding: utf-8
from __future__ import print_function, absolute_import, division, unicode_literals
from strictyaml.ruamel.compat import text_type
from strictyaml.ruamel.anchor import Anchor
if False: # MYPY
from typing import Text, Any, Dict, List # NOQA
__all__ = [
"ScalarString",
"LiteralScalarString",
"FoldedScalarString",
"SingleQuotedScalarString",
"DoubleQuotedScalarString",
"PlainScalarString",
# PreservedScalarString is the old name, as it was the first to be preserved on rt,
# use LiteralScalarString instead
"PreservedScalarString",
]
class ScalarString(text_type):
__slots__ = Anchor.attrib
def __new__(cls, *args, **kw):
# type: (Any, Any) -> Any
anchor = kw.pop("anchor", None) # type: ignore
ret_val = text_type.__new__(cls, *args, **kw) # type: ignore
if anchor is not None:
ret_val.yaml_set_anchor(anchor, always_dump=True)
return ret_val
def replace(self, old, new, maxreplace=-1):
# type: (Any, Any, int) -> Any
return type(self)((text_type.replace(self, old, new, maxreplace)))
@property
def anchor(self):
# type: () -> Any
if not hasattr(self, Anchor.attrib):
setattr(self, Anchor.attrib, Anchor())
return getattr(self, Anchor.attrib)
def yaml_anchor(self, any=False):
# type: (bool) -> Any
if not hasattr(self, Anchor.attrib):
return None
if any or self.anchor.always_dump:
return self.anchor
return None
def yaml_set_anchor(self, value, always_dump=False):
# type: (Any, bool) -> None
self.anchor.value = value
self.anchor.always_dump = always_dump
class LiteralScalarString(ScalarString):
__slots__ = "comment" # the comment after the | on the first line
style = "|"
def __new__(cls, value, anchor=None):
# type: (Text, Any) -> Any
return ScalarString.__new__(cls, value, anchor=anchor)
PreservedScalarString = LiteralScalarString
class FoldedScalarString(ScalarString):
__slots__ = ("fold_pos", "comment") # the comment after the > on the first line
style = ">"
def __new__(cls, value, anchor=None):
# type: (Text, Any) -> Any
return ScalarString.__new__(cls, value, anchor=anchor)
class SingleQuotedScalarString(ScalarString):
__slots__ = ()
style = "'"
def __new__(cls, value, anchor=None):
# type: (Text, Any) -> Any
return ScalarString.__new__(cls, value, anchor=anchor)
class DoubleQuotedScalarString(ScalarString):
__slots__ = ()
style = '"'
def __new__(cls, value, anchor=None):
# type: (Text, Any) -> Any
return ScalarString.__new__(cls, value, anchor=anchor)
class PlainScalarString(ScalarString):
__slots__ = ()
style = ""
def __new__(cls, value, anchor=None):
# type: (Text, Any) -> Any
return ScalarString.__new__(cls, value, anchor=anchor)
def preserve_literal(s):
# type: (Text) -> Text
return LiteralScalarString(s.replace("\r\n", "\n").replace("\r", "\n"))
def walk_tree(base, map=None):
# type: (Any, Any) -> None
"""
the routine here walks over a simple yaml tree (recursing in
dict values and list items) and converts strings that
have multiple lines to literal scalars
You can also provide an explicit (ordered) mapping for multiple transforms
(first of which is executed):
map = strictyaml.ruamel.compat.ordereddict
map['\n'] = preserve_literal
map[':'] = SingleQuotedScalarString
walk_tree(data, map=map)
"""
from strictyaml.ruamel.compat import string_types
from strictyaml.ruamel.compat import MutableMapping, MutableSequence # type: ignore
if map is None:
map = {"\n": preserve_literal}
if isinstance(base, MutableMapping):
for k in base:
v = base[k] # type: Text
if isinstance(v, string_types):
for ch in map:
if ch in v:
base[k] = map[ch](v)
break
else:
walk_tree(v, map=map)
elif isinstance(base, MutableSequence):
for idx, elem in enumerate(base):
if isinstance(elem, string_types):
for ch in map:
if ch in elem: # type: ignore
base[idx] = map[ch](elem)
break
else:
walk_tree(elem, map=map)
|
7a3cf93ce679a9552203e90929bca4ff74485e8d
|
160f08e768d7271f9522ad2597ac4ee79c04477a
|
/src/c3nav/mapdata/migrations/0040_access_permissions.py
|
d068fe17be9db72dd7a878dac327a5400c1221ec
|
[
"Apache-2.0"
] |
permissive
|
c3nav/c3nav
|
6254724dfc8589ee03c6028577befd7c65b05857
|
1a4ef5caa06ddacc8d9370b5adcee248fd4f55f7
|
refs/heads/main
| 2023-08-04T08:36:18.431458
| 2023-07-24T09:57:18
| 2023-07-24T09:57:18
| 56,852,994
| 140
| 47
|
Apache-2.0
| 2023-07-05T22:55:27
| 2016-04-22T12:13:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,078
|
py
|
0040_access_permissions.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-24 20:12
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mapdata', '0039_auto_20171024_2011'),
]
operations = [
migrations.CreateModel(
name='AccessPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('expire_date', models.DateTimeField(null=True, verbose_name='expires')),
],
options={
'verbose_name': 'Access Permission',
'verbose_name_plural': 'Access Permissions',
'default_related_name': 'accesspermissions',
},
),
migrations.AddField(
model_name='accessrestriction',
name='open',
field=models.BooleanField(default=False, verbose_name='open'),
),
migrations.AddField(
model_name='accesspermission',
name='access_restriction',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='accesspermissions', to='mapdata.AccessRestriction'),
),
migrations.AddField(
model_name='accesspermission',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='accesspermissions', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='accessrestriction',
name='users',
field=models.ManyToManyField(related_name='accessrestrictions', through='mapdata.AccessPermission', to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='accesspermission',
unique_together=set([('user', 'access_restriction')]),
),
]
|
12427a5696d026c454fb02659ae42e0338f13cff
|
5a2715fb652ebc1b16a70cf76c8faff22ab94723
|
/utils/download-dependencies_distilbert.py
|
2cbac198a8906bd9564a91a8bf29b5540239fe4e
|
[
"Apache-2.0"
] |
permissive
|
guillaume-be/rust-bert
|
35ee29c22038e126b325f0b04cf51b572d481d4e
|
0a1c3dfeef78c0936892031cee4ecd406a1ef880
|
refs/heads/main
| 2023-08-31T11:08:30.296705
| 2023-08-27T13:15:11
| 2023-08-27T13:15:11
| 236,150,741
| 2,066
| 185
|
Apache-2.0
| 2023-09-11T15:34:48
| 2020-01-25T09:40:07
|
Rust
|
UTF-8
|
Python
| false
| false
| 1,353
|
py
|
download-dependencies_distilbert.py
|
import os
import subprocess
from pathlib import Path
import numpy as np
import requests
import torch
if __name__ == "__main__":
target_path = Path.home() / "rustbert" / "distilbert"
os.makedirs(str(target_path), exist_ok=True)
weights_url = "https://huggingface.co/sshleifer/tiny-distilbert-base-cased/resolve/main/pytorch_model.bin"
r = requests.get(weights_url, allow_redirects=True)
(target_path / "pytorch_model.bin").open("wb").write(r.content)
weights = torch.load(target_path / "pytorch_model.bin", map_location="cpu")
nps = {}
for k, v in weights.items():
nps[k] = np.ascontiguousarray(v.cpu().numpy())
np.savez(target_path / "model.npz", **nps)
source = str(target_path / "model.npz")
target = str(target_path / "model.ot")
toml_location = (Path(__file__).resolve() / ".." / ".." / "Cargo.toml").resolve()
subprocess.call(
[
"cargo",
"run",
"--bin=convert-tensor",
"--features",
"download-libtorch",
"--manifest-path=%s" % toml_location,
"--",
source,
target,
]
)
os.remove(str(target_path / "pytorch_model.bin"))
os.remove(str(target_path / "model.npz"))
assert (target_path / "model.ot").exists(), "Conversion of the model failed."
|
35f0ec16569fa4aca115f16304efe15cd29cfb14
|
b26c41926fa3a7c2c061132d80e91a2750f2f468
|
/tensorflow_probability/python/math/psd_kernels/spectral_mixture.py
|
bfd6846a3b07af631b438094942a66a63db0af79
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/probability
|
22e679a4a883e408f8ef237cda56e3e3dfa42b17
|
42a64ba0d9e0973b1707fcd9b8bd8d14b2d4e3e5
|
refs/heads/main
| 2023-09-04T02:06:08.174935
| 2023-08-31T20:30:00
| 2023-08-31T20:31:33
| 108,053,674
| 4,055
| 1,269
|
Apache-2.0
| 2023-09-13T21:49:49
| 2017-10-23T23:50:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 8,071
|
py
|
spectral_mixture.py
|
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The SpectralMixture kernel."""
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.math import generic as tfp_math
from tensorflow_probability.python.math.psd_kernels import positive_semidefinite_kernel as psd_kernel
from tensorflow_probability.python.math.psd_kernels.internal import util
__all__ = ['SpectralMixture']
class SpectralMixture(psd_kernel.AutoCompositeTensorPsdKernel):
"""The SpectralMixture kernel.
This kernel is derived from parameterizing the spectral density of a
stationary kernel by a mixture of `m` diagonal multivariate normal
distributions [1].
This in turn parameterizes the following kernel:
```none
k(x, y) = sum_j w[j] (prod_i
exp(-2 * (pi * (x[i] - y[i]) * s[j][i])**2) *
cos(2 * pi * (x[i] - y[i]) * m[j][i]))
```
where:
* `j` is the number of mixtures (as mentioned above).
* `w[j]` are the mixture weights.
* `m[j]` and `s[j]` parameterize a `MultivariateNormalDiag(m[j], s[j])`.
In other words, they are the mean and diagonal scale for each mixture
component.
NOTE: This kernel can result in negative off-diagonal entries.
#### References
[1]: A. Wilson, R. P. Adams.
Gaussian Process Kernels for Pattern Discovery and Extrapolation.
https://arxiv.org/abs/1302.4245
"""
def __init__(self,
logits,
locs,
scales,
feature_ndims=1,
validate_args=False,
name='SpectralMixture'):
"""Construct a SpectralMixture kernel instance.
Args:
logits: Floating-point `Tensor` of shape `[..., M]`, whose softmax
represents the mixture weights for the spectral density. Must
be broadcastable with `locs` and `scales`.
locs: Floating-point `Tensor` of shape `[..., M, F1, F2, ... FN]`, which
represents the location parameter of each of the `M` mixture components.
`N` is `feature_ndims`. Must be broadcastable with `logits` and
`scales`.
scales: Positive Floating-point `Tensor` of shape
`[..., M, F1, F2, ..., FN]`, which represents the scale parameter of
each of the `M` mixture components. `N` is `feature_ndims`. Must be
broadcastable with `locs` and `logits`. These parameters act like
inverse length scale parameters.
feature_ndims: Python `int` number of rightmost dims to include in the
squared difference norm in the exponential.
validate_args: If `True`, parameters are checked for validity despite
possibly degrading runtime performance
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with tf.name_scope(name):
dtype = util.maybe_get_common_dtype([logits, locs, scales])
self._logits = tensor_util.convert_nonref_to_tensor(
logits, name='logits', dtype=dtype)
self._locs = tensor_util.convert_nonref_to_tensor(
locs, name='locs', dtype=dtype)
self._scales = tensor_util.convert_nonref_to_tensor(
scales, name='scales', dtype=dtype)
super(SpectralMixture, self).__init__(
feature_ndims,
dtype=dtype,
name=name,
validate_args=validate_args,
parameters=parameters)
@property
def logits(self):
"""Logits parameter."""
return self._logits
@property
def locs(self):
"""Location parameter."""
return self._locs
@property
def scales(self):
"""Scale parameter."""
return self._scales
@classmethod
def _parameter_properties(cls, dtype):
from tensorflow_probability.python.bijectors import softplus # pylint:disable=g-import-not-at-top
return dict(
logits=parameter_properties.ParameterProperties(event_ndims=1),
locs=parameter_properties.ParameterProperties(
event_ndims=lambda self: self.feature_ndims + 1),
scales=parameter_properties.ParameterProperties(
event_ndims=lambda self: self.feature_ndims + 1,
default_constraining_bijector_fn=(
lambda: softplus.Softplus(low=dtype_util.eps(dtype)))))
def _apply_with_distance(
self, x1, x2, pairwise_square_distance, example_ndims=0):
exponent = -2. * pairwise_square_distance
locs = util.pad_shape_with_ones(
self.locs, ndims=example_ndims, start=-(self.feature_ndims + 1))
cos_coeffs = tf.math.cos(2 * np.pi * (x1 - x2) * locs)
feature_ndims = ps.cast(self.feature_ndims, ps.rank(cos_coeffs).dtype)
reduction_axes = ps.range(
ps.rank(cos_coeffs) - feature_ndims, ps.rank(cos_coeffs))
coeff_sign = tf.math.reduce_prod(
tf.math.sign(cos_coeffs), axis=reduction_axes)
log_cos_coeffs = tf.math.reduce_sum(
tf.math.log(tf.math.abs(cos_coeffs)), axis=reduction_axes)
logits = util.pad_shape_with_ones(
self.logits, ndims=example_ndims, start=-1)
log_result, sign = tfp_math.reduce_weighted_logsumexp(
exponent + log_cos_coeffs + logits,
coeff_sign, return_sign=True, axis=-(example_ndims + 1))
return sign * tf.math.exp(log_result)
def _apply(self, x1, x2, example_ndims=0):
# Add an extra dimension to x1 and x2 so it broadcasts with scales.
# [B1, ...., E1, ...., E2, M, F1, ..., F2]
x1 = util.pad_shape_with_ones(
x1, ndims=1, start=-(self.feature_ndims + example_ndims + 1))
x2 = util.pad_shape_with_ones(
x2, ndims=1, start=-(self.feature_ndims + example_ndims + 1))
scales = util.pad_shape_with_ones(
self.scales, ndims=example_ndims, start=-(self.feature_ndims + 1))
pairwise_square_distance = util.sum_rightmost_ndims_preserving_shape(
tf.math.square(np.pi * (x1 - x2) * scales), ndims=self.feature_ndims)
return self._apply_with_distance(
x1, x2, pairwise_square_distance, example_ndims=example_ndims)
def _matrix(self, x1, x2):
# Add an extra dimension to x1 and x2 so it broadcasts with scales.
x1 = util.pad_shape_with_ones(x1, ndims=1, start=-(self.feature_ndims + 2))
x2 = util.pad_shape_with_ones(x2, ndims=1, start=-(self.feature_ndims + 2))
scales = util.pad_shape_with_ones(
self.scales, ndims=1, start=-(self.feature_ndims + 1))
pairwise_square_distance = util.pairwise_square_distance_matrix(
np.pi * x1 * scales, np.pi * x2 * scales, self.feature_ndims)
x1 = util.pad_shape_with_ones(x1, ndims=1, start=-(self.feature_ndims + 1))
x2 = util.pad_shape_with_ones(x2, ndims=1, start=-(self.feature_ndims + 2))
# Expand `x1` and `x2` so that the broadcast against each other.
return self._apply_with_distance(
x1, x2, pairwise_square_distance, example_ndims=2)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
if is_init != tensor_util.is_ref(self._scales):
assertions.append(assert_util.assert_positive(
self._scales,
message='`scales` must be positive.'))
return assertions
|
5445eecc4d5a6b61f9e2fef10a11f2f481599f1e
|
9b1eda0abdc5dea7c6e9695ff4e1098abe0a708b
|
/src/textual/_callback.py
|
abefeae5576e16d9df0312ca70d94d917e905088
|
[
"MIT"
] |
permissive
|
Textualize/textual
|
b8cf4b5d18069fccc7623b3116436f479e1ef446
|
b74ac1e47fdd16133ca567390c99ea19de278c5a
|
refs/heads/main
| 2023-08-30T21:40:21.563823
| 2023-08-30T10:18:27
| 2023-08-30T10:18:27
| 355,959,597
| 14,818
| 588
|
MIT
| 2023-09-14T20:22:02
| 2021-04-08T15:24:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,117
|
py
|
_callback.py
|
from __future__ import annotations
import asyncio
from functools import lru_cache
from inspect import isawaitable, signature
from typing import TYPE_CHECKING, Any, Callable
from . import active_app
if TYPE_CHECKING:
from .app import App
# Maximum seconds before warning about a slow callback
INVOKE_TIMEOUT_WARNING = 3
@lru_cache(maxsize=2048)
def count_parameters(func: Callable) -> int:
"""Count the number of parameters in a callable"""
return len(signature(func).parameters)
async def _invoke(callback: Callable, *params: object) -> Any:
"""Invoke a callback with an arbitrary number of parameters.
Args:
callback: The callable to be invoked.
Returns:
The return value of the invoked callable.
"""
_rich_traceback_guard = True
parameter_count = count_parameters(callback)
result = callback(*params[:parameter_count])
if isawaitable(result):
result = await result
return result
async def invoke(callback: Callable[[], Any], *params: object) -> Any:
"""Invoke a callback with an arbitrary number of parameters.
Args:
callback: The callable to be invoked.
Returns:
The return value of the invoked callable.
"""
app: App | None
try:
app = active_app.get()
except LookupError:
# May occur if this method is called outside of an app context (i.e. in a unit test)
app = None
if app is not None and "debug" in app.features:
# In debug mode we will warn about callbacks that may be stuck
def log_slow() -> None:
"""Log a message regarding a slow callback."""
assert app is not None
app.log.warning(
f"Callback {callback} is still pending after {INVOKE_TIMEOUT_WARNING} seconds"
)
call_later_handle = asyncio.get_running_loop().call_later(
INVOKE_TIMEOUT_WARNING, log_slow
)
try:
return await _invoke(callback, *params)
finally:
call_later_handle.cancel()
else:
return await _invoke(callback, *params)
|
3ccb698be6f4e54a7d1b5133b77fab26b68e5bff
|
0e4860fecfdd34a3255003cc8c8df086c14083dd
|
/python/practise/learn-python/python_basic/function_arguments.py
|
01d22226809128bce733cb46291bd4b5aa20f926
|
[] |
no_license
|
anzhihe/learning
|
503ab9a58f280227011da5eaa4b14b46c678e6f3
|
66f7f801e1395207778484e1543ea26309d4b354
|
refs/heads/master
| 2023-08-08T11:42:11.983677
| 2023-07-29T09:19:47
| 2023-07-29T09:19:47
| 188,768,643
| 1,443
| 617
| null | 2023-08-24T02:10:34
| 2019-05-27T04:04:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,930
|
py
|
function_arguments.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@FileName: function_arguments.py
@Function: python function arguments
@Author: Zhihe An
@Site: https://chegva.com
@Time: 2021/7/4
"""
"""一、函数的调用之位置实参"""
"""
调用函数时,可以根据每个形参在所有形参中的位置传递对应位置的实参,从而用每个实参初始化
对应位置的形参,这样的实参称为位置实参
"""
def f(a, b, c):
print('a =', a, 'b =', b, 'c = ', c)
f(2, 5, 8) # a = 2 b = 5 c = 8
f(5, 8, 2) # a = 5 b = 8 c = 2
f(8, 5, 2) # a = 8 b = 5 c = 2
"""二、函数的调用之关键字实参"""
"""
调用函数时,传递的实参的形式可以为:形参名 = 实参值,从而用指定的实参值初始化指定名称的形参,
这样的实参称为关键字实参
"""
def f(a, b, c):
print('a = ', a, 'b = ', b, 'c = ', c)
f(a = 2, b = 5, c = 8) # a = 2 b = 5 c = 8
"""
由于关键字实参中指定了形参名,所有实参和形参的匹配关系更加清晰,而且每个关键字实参在所有
关键字实参中的位置可以是任意的
"""
f(b = 5, c = 8, a = 2) # a = 2 b = 5 c = 8
f(c = 8, b = 5, a = 2) # a = 2 b = 5 c = 8
"""
调用函数时,可以组合使用位置实参和关键字实参。但是,位置实参必须位于关键字实参之前,否则,
无法根据位置来匹配实参和对应的形参
"""
f(2, 5, c = 8) # a = 2 b = 5 c = 8
# f(2, c = 8, 5) # SyntaxError: positional argument follows keyword argument
"""三、函数的调用之实参的传递"""
"""
前面学习过:"变量相当于标签。对于赋值语句:变量 = 对象,相当于给对象贴了一个标签,
标签名就是变量名"
调用函数时把实参传递给形参从而用实参初始化形参,本质上执行了赋值语句:形参 = 实参对象,
相当于给实参对象贴了一个标签,标签名就是形参名
如果实参对象是可变类型,在函数体内对形参对象的任何修改其实就是对实参对象的修改
"""
def f(arg1, arg2):
print('初始化形参后:arg1 =', arg1, 'arg2 =', arg2)
arg1 = arg1 * 2
arg2.append(4)
print('修改形参后:arg1 =', arg1, 'arg2 =', arg2)
i = 10
L = [1, 2, 3]
print('调用函数前: i =', i, 'L =', L) # 调用函数前: i = 10 L = [1, 2, 3]
f(i, L)
# 初始化形参后:arg1 = 10 arg2 = [1, 2, 3]
# 修改形参后:arg1 = 20 arg2 = [1, 2, 3, 4]
print('调用函数后:i =', i, 'L =', L) # 调用函数后:i = 10 L = [1, 2, 3, 4]
i = 10
L = [1, 2, 3]
print('调用函数前: i =', i, 'L =', L) # 调用函数前: i = 10 L = [1, 2, 3]
f(i, L[:])
# 初始化形参后:arg1 = 10 arg2 = [1, 2, 3]
# 修改形参后:arg1 = 20 arg2 = [1, 2, 3, 4]
print('调用函数后:i =', i, 'L =', L) # 调用函数后:i = 10 L = [1, 2, 3]
|
2b3eeeabfc22531a88e163d7fcaaab9f5fac71ee
|
61673ab9a42f7151de7337608c442fa6247f13bb
|
/socket/basic/version-4/client.py
|
6b15275b80a9172c5eafc957612bd10f26aa679b
|
[
"MIT"
] |
permissive
|
furas/python-examples
|
22d101670ecd667a29376d7c7d7d86f8ec71f6cf
|
95cb53b664f312e0830f010c0c96be94d4a4db90
|
refs/heads/master
| 2022-08-23T23:55:08.313936
| 2022-08-01T14:48:33
| 2022-08-01T14:48:33
| 45,575,296
| 176
| 91
|
MIT
| 2021-02-17T23:33:37
| 2015-11-04T23:54:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,688
|
py
|
client.py
|
#!/usr/bin/env python3
#
# https://docs.python.org/3.5/library/socket.html
#
import socket
import time
# --- constants ---
HOST = '' # (local or external) address IP of remote server
PORT = 8000 # (local or external) port of remote server
# server can have local address IP - used only in local network
# or external address IP - used in internet on external router
# (and router redirects data to internal address IP)
# --- create socket ---
print('[DEBUG] create socket')
#s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s = socket.socket() # default value is (socket.AF_INET, socket.SOCK_STREAM)
# so you don't have to use it in socket()
# --- connect to server ---
print('[DEBUG] connect:', HOST, PORT)
s.connect((HOST, PORT)) # one tuple (HOST, PORT), not two arguments
# --- sending receivng many times ---
try:
while True:
now = int(time.time())
# --- send data ---
# if you don't use native characters
# then you can use 'ascii' instead of 'utf-8'
text = "Hello World of Sockets in Python"
data = text.encode('utf-8') # encode string to bytes
s.send(data)
print('[{}] send: {}'.format(now, text))
# --- receive data ---
# if you don't use native characters
# then you can use 'ascii' instead of 'utf-8'
data = s.recv(1024)
text = data.decode('utf-8') # decode bytes to string
print('[{}] recv: {}'.format(now, text))
# --- wait awhile ---
time.sleep(1)
except Exception as e:
print('[DEBUG] exception:', e)
# --- close socket ---
print('[DEBUG] close socket')
s.close()
|
9d98629409ec43ad999960cb1e326453c5582ca6
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/unit/test_master.py
|
96fe2a5459532b6f1d71679130c1d8a28662fd2c
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 27,305
|
py
|
test_master.py
|
import time
import pytest
import salt.config
import salt.master
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
class TransportMethodsTest(TestCase):
def test_transport_methods(self):
class Foo(salt.master.TransportMethods):
expose_methods = ["bar"]
def bar(self):
pass
def bang(self):
pass
foo = Foo()
assert foo.get_method("bar") is not None
assert foo.get_method("bang") is None
def test_aes_funcs_white(self):
"""
Validate methods exposed on AESFuncs exist and are callable
"""
opts = salt.config.master_config(None)
aes_funcs = salt.master.AESFuncs(opts)
self.addCleanup(aes_funcs.destroy)
for name in aes_funcs.expose_methods:
func = getattr(aes_funcs, name, None)
assert callable(func)
def test_aes_funcs_black(self):
"""
Validate methods on AESFuncs that should not be called remotely
"""
opts = salt.config.master_config(None)
aes_funcs = salt.master.AESFuncs(opts)
self.addCleanup(aes_funcs.destroy)
# Any callable that should not explicitly be allowed should be added
# here.
blacklist_methods = [
"_AESFuncs__setup_fileserver",
"_AESFuncs__verify_load",
"_AESFuncs__verify_minion",
"_AESFuncs__verify_minion_publish",
"__class__",
"__delattr__",
"__dir__",
"__eq__",
"__format__",
"__ge__",
"__getattribute__",
"__getstate__",
"__gt__",
"__hash__",
"__init__",
"__init_subclass__",
"__le__",
"__lt__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"destroy",
"get_method",
"run_func",
]
for name in dir(aes_funcs):
if name in aes_funcs.expose_methods:
continue
if not callable(getattr(aes_funcs, name)):
continue
assert name in blacklist_methods, name
def test_clear_funcs_white(self):
"""
Validate methods exposed on ClearFuncs exist and are callable
"""
opts = salt.config.master_config(None)
clear_funcs = salt.master.ClearFuncs(opts, {})
self.addCleanup(clear_funcs.destroy)
for name in clear_funcs.expose_methods:
func = getattr(clear_funcs, name, None)
assert callable(func)
def test_clear_funcs_black(self):
"""
Validate methods on ClearFuncs that should not be called remotely
"""
opts = salt.config.master_config(None)
clear_funcs = salt.master.ClearFuncs(opts, {})
self.addCleanup(clear_funcs.destroy)
blacklist_methods = [
"__class__",
"__delattr__",
"__dir__",
"__eq__",
"__format__",
"__ge__",
"__getattribute__",
"__getstate__",
"__gt__",
"__hash__",
"__init__",
"__init_subclass__",
"__le__",
"__lt__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"_prep_auth_info",
"_prep_jid",
"_prep_pub",
"_send_pub",
"_send_ssh_pub",
"connect",
"destroy",
"get_method",
]
for name in dir(clear_funcs):
if name in clear_funcs.expose_methods:
continue
if not callable(getattr(clear_funcs, name)):
continue
assert name in blacklist_methods, name
class ClearFuncsTestCase(TestCase):
"""
TestCase for salt.master.ClearFuncs class
"""
@classmethod
def setUpClass(cls):
opts = salt.config.master_config(None)
cls.clear_funcs = salt.master.ClearFuncs(opts, {})
@classmethod
def tearDownClass(cls):
cls.clear_funcs.destroy()
del cls.clear_funcs
def test_get_method(self):
assert getattr(self.clear_funcs, "_send_pub", None) is not None
assert self.clear_funcs.get_method("_send_pub") is None
# runner tests
@pytest.mark.slow_test
def test_runner_token_not_authenticated(self):
"""
Asserts that a TokenAuthenticationError is returned when the token can't authenticate.
"""
mock_ret = {
"error": {
"name": "TokenAuthenticationError",
"message": 'Authentication failure of type "token" occurred.',
}
}
ret = self.clear_funcs.runner({"token": "asdfasdfasdfasdf"})
self.assertDictEqual(mock_ret, ret)
@pytest.mark.slow_test
def test_runner_token_authorization_error(self):
"""
Asserts that a TokenAuthenticationError is returned when the token authenticates, but is
not authorized.
"""
token = "asdfasdfasdfasdf"
clear_load = {"token": token, "fun": "test.arg"}
mock_token = {"token": token, "eauth": "foo", "name": "test"}
mock_ret = {
"error": {
"name": "TokenAuthenticationError",
"message": (
'Authentication failure of type "token" occurred for user test.'
),
}
}
with patch(
"salt.auth.LoadAuth.authenticate_token", MagicMock(return_value=mock_token)
), patch("salt.auth.LoadAuth.get_auth_list", MagicMock(return_value=[])):
ret = self.clear_funcs.runner(clear_load)
self.assertDictEqual(mock_ret, ret)
@pytest.mark.slow_test
def test_runner_token_salt_invocation_error(self):
"""
Asserts that a SaltInvocationError is returned when the token authenticates, but the
command is malformed.
"""
token = "asdfasdfasdfasdf"
clear_load = {"token": token, "fun": "badtestarg"}
mock_token = {"token": token, "eauth": "foo", "name": "test"}
mock_ret = {
"error": {
"name": "SaltInvocationError",
"message": "A command invocation error occurred: Check syntax.",
}
}
with patch(
"salt.auth.LoadAuth.authenticate_token", MagicMock(return_value=mock_token)
), patch(
"salt.auth.LoadAuth.get_auth_list", MagicMock(return_value=["testing"])
):
ret = self.clear_funcs.runner(clear_load)
self.assertDictEqual(mock_ret, ret)
@pytest.mark.slow_test
def test_runner_eauth_not_authenticated(self):
"""
Asserts that an EauthAuthenticationError is returned when the user can't authenticate.
"""
mock_ret = {
"error": {
"name": "EauthAuthenticationError",
"message": (
'Authentication failure of type "eauth" occurred for user UNKNOWN.'
),
}
}
ret = self.clear_funcs.runner({"eauth": "foo"})
self.assertDictEqual(mock_ret, ret)
@pytest.mark.slow_test
def test_runner_eauth_authorization_error(self):
"""
Asserts that an EauthAuthenticationError is returned when the user authenticates, but is
not authorized.
"""
clear_load = {"eauth": "foo", "username": "test", "fun": "test.arg"}
mock_ret = {
"error": {
"name": "EauthAuthenticationError",
"message": (
'Authentication failure of type "eauth" occurred for user test.'
),
}
}
with patch(
"salt.auth.LoadAuth.authenticate_eauth", MagicMock(return_value=True)
), patch("salt.auth.LoadAuth.get_auth_list", MagicMock(return_value=[])):
ret = self.clear_funcs.runner(clear_load)
self.assertDictEqual(mock_ret, ret)
@pytest.mark.slow_test
def test_runner_eauth_salt_invocation_error(self):
"""
Asserts that an EauthAuthenticationError is returned when the user authenticates, but the
command is malformed.
"""
clear_load = {"eauth": "foo", "username": "test", "fun": "bad.test.arg.func"}
mock_ret = {
"error": {
"name": "SaltInvocationError",
"message": "A command invocation error occurred: Check syntax.",
}
}
with patch(
"salt.auth.LoadAuth.authenticate_eauth", MagicMock(return_value=True)
), patch(
"salt.auth.LoadAuth.get_auth_list", MagicMock(return_value=["testing"])
):
ret = self.clear_funcs.runner(clear_load)
self.assertDictEqual(mock_ret, ret)
@pytest.mark.slow_test
def test_runner_user_not_authenticated(self):
"""
Asserts that an UserAuthenticationError is returned when the user can't authenticate.
"""
mock_ret = {
"error": {
"name": "UserAuthenticationError",
"message": 'Authentication failure of type "user" occurred',
}
}
ret = self.clear_funcs.runner({})
self.assertDictEqual(mock_ret, ret)
# wheel tests
@pytest.mark.slow_test
def test_wheel_token_not_authenticated(self):
"""
Asserts that a TokenAuthenticationError is returned when the token can't authenticate.
"""
mock_ret = {
"error": {
"name": "TokenAuthenticationError",
"message": 'Authentication failure of type "token" occurred.',
}
}
ret = self.clear_funcs.wheel({"token": "asdfasdfasdfasdf"})
self.assertDictEqual(mock_ret, ret)
@pytest.mark.slow_test
def test_wheel_token_authorization_error(self):
"""
Asserts that a TokenAuthenticationError is returned when the token authenticates, but is
not authorized.
"""
token = "asdfasdfasdfasdf"
clear_load = {"token": token, "fun": "test.arg"}
mock_token = {"token": token, "eauth": "foo", "name": "test"}
mock_ret = {
"error": {
"name": "TokenAuthenticationError",
"message": (
'Authentication failure of type "token" occurred for user test.'
),
}
}
with patch(
"salt.auth.LoadAuth.authenticate_token", MagicMock(return_value=mock_token)
), patch("salt.auth.LoadAuth.get_auth_list", MagicMock(return_value=[])):
ret = self.clear_funcs.wheel(clear_load)
self.assertDictEqual(mock_ret, ret)
@pytest.mark.slow_test
def test_wheel_token_salt_invocation_error(self):
"""
Asserts that a SaltInvocationError is returned when the token authenticates, but the
command is malformed.
"""
token = "asdfasdfasdfasdf"
clear_load = {"token": token, "fun": "badtestarg"}
mock_token = {"token": token, "eauth": "foo", "name": "test"}
mock_ret = {
"error": {
"name": "SaltInvocationError",
"message": "A command invocation error occurred: Check syntax.",
}
}
with patch(
"salt.auth.LoadAuth.authenticate_token", MagicMock(return_value=mock_token)
), patch(
"salt.auth.LoadAuth.get_auth_list", MagicMock(return_value=["testing"])
):
ret = self.clear_funcs.wheel(clear_load)
self.assertDictEqual(mock_ret, ret)
@pytest.mark.slow_test
def test_wheel_eauth_not_authenticated(self):
"""
Asserts that an EauthAuthenticationError is returned when the user can't authenticate.
"""
mock_ret = {
"error": {
"name": "EauthAuthenticationError",
"message": (
'Authentication failure of type "eauth" occurred for user UNKNOWN.'
),
}
}
ret = self.clear_funcs.wheel({"eauth": "foo"})
self.assertDictEqual(mock_ret, ret)
@pytest.mark.slow_test
def test_wheel_eauth_authorization_error(self):
"""
Asserts that an EauthAuthenticationError is returned when the user authenticates, but is
not authorized.
"""
clear_load = {"eauth": "foo", "username": "test", "fun": "test.arg"}
mock_ret = {
"error": {
"name": "EauthAuthenticationError",
"message": (
'Authentication failure of type "eauth" occurred for user test.'
),
}
}
with patch(
"salt.auth.LoadAuth.authenticate_eauth", MagicMock(return_value=True)
), patch("salt.auth.LoadAuth.get_auth_list", MagicMock(return_value=[])):
ret = self.clear_funcs.wheel(clear_load)
self.assertDictEqual(mock_ret, ret)
@pytest.mark.slow_test
def test_wheel_eauth_salt_invocation_error(self):
"""
Asserts that an EauthAuthenticationError is returned when the user authenticates, but the
command is malformed.
"""
clear_load = {"eauth": "foo", "username": "test", "fun": "bad.test.arg.func"}
mock_ret = {
"error": {
"name": "SaltInvocationError",
"message": "A command invocation error occurred: Check syntax.",
}
}
with patch(
"salt.auth.LoadAuth.authenticate_eauth", MagicMock(return_value=True)
), patch(
"salt.auth.LoadAuth.get_auth_list", MagicMock(return_value=["testing"])
):
ret = self.clear_funcs.wheel(clear_load)
self.assertDictEqual(mock_ret, ret)
@pytest.mark.slow_test
def test_wheel_user_not_authenticated(self):
"""
Asserts that an UserAuthenticationError is returned when the user can't authenticate.
"""
mock_ret = {
"error": {
"name": "UserAuthenticationError",
"message": 'Authentication failure of type "user" occurred',
}
}
ret = self.clear_funcs.wheel({})
self.assertDictEqual(mock_ret, ret)
# publish tests
@pytest.mark.slow_test
def test_publish_user_is_blacklisted(self):
"""
Asserts that an AuthorizationError is returned when the user has been blacklisted.
"""
mock_ret = {
"error": {
"name": "AuthorizationError",
"message": "Authorization error occurred.",
}
}
with patch(
"salt.acl.PublisherACL.user_is_blacklisted", MagicMock(return_value=True)
):
self.assertEqual(
mock_ret, self.clear_funcs.publish({"user": "foo", "fun": "test.arg"})
)
@pytest.mark.slow_test
def test_publish_cmd_blacklisted(self):
"""
Asserts that an AuthorizationError is returned when the command has been blacklisted.
"""
mock_ret = {
"error": {
"name": "AuthorizationError",
"message": "Authorization error occurred.",
}
}
with patch(
"salt.acl.PublisherACL.user_is_blacklisted", MagicMock(return_value=False)
), patch(
"salt.acl.PublisherACL.cmd_is_blacklisted", MagicMock(return_value=True)
):
self.assertEqual(
mock_ret, self.clear_funcs.publish({"user": "foo", "fun": "test.arg"})
)
@pytest.mark.slow_test
def test_publish_token_not_authenticated(self):
"""
Asserts that an AuthenticationError is returned when the token can't authenticate.
"""
mock_ret = {
"error": {
"name": "AuthenticationError",
"message": "Authentication error occurred.",
}
}
load = {
"user": "foo",
"fun": "test.arg",
"tgt": "test_minion",
"kwargs": {"token": "asdfasdfasdfasdf"},
}
with patch(
"salt.acl.PublisherACL.user_is_blacklisted", MagicMock(return_value=False)
), patch(
"salt.acl.PublisherACL.cmd_is_blacklisted", MagicMock(return_value=False)
):
self.assertEqual(mock_ret, self.clear_funcs.publish(load))
@pytest.mark.slow_test
def test_publish_token_authorization_error(self):
"""
Asserts that an AuthorizationError is returned when the token authenticates, but is not
authorized.
"""
token = "asdfasdfasdfasdf"
load = {
"user": "foo",
"fun": "test.arg",
"tgt": "test_minion",
"arg": "bar",
"kwargs": {"token": token},
}
mock_token = {"token": token, "eauth": "foo", "name": "test"}
mock_ret = {
"error": {
"name": "AuthorizationError",
"message": "Authorization error occurred.",
}
}
with patch(
"salt.acl.PublisherACL.user_is_blacklisted", MagicMock(return_value=False)
), patch(
"salt.acl.PublisherACL.cmd_is_blacklisted", MagicMock(return_value=False)
), patch(
"salt.auth.LoadAuth.authenticate_token", MagicMock(return_value=mock_token)
), patch(
"salt.auth.LoadAuth.get_auth_list", MagicMock(return_value=[])
):
self.assertEqual(mock_ret, self.clear_funcs.publish(load))
@pytest.mark.slow_test
def test_publish_eauth_not_authenticated(self):
"""
Asserts that an AuthenticationError is returned when the user can't authenticate.
"""
load = {
"user": "test",
"fun": "test.arg",
"tgt": "test_minion",
"kwargs": {"eauth": "foo"},
}
mock_ret = {
"error": {
"name": "AuthenticationError",
"message": "Authentication error occurred.",
}
}
with patch(
"salt.acl.PublisherACL.user_is_blacklisted", MagicMock(return_value=False)
), patch(
"salt.acl.PublisherACL.cmd_is_blacklisted", MagicMock(return_value=False)
):
self.assertEqual(mock_ret, self.clear_funcs.publish(load))
@pytest.mark.slow_test
def test_publish_eauth_authorization_error(self):
"""
Asserts that an AuthorizationError is returned when the user authenticates, but is not
authorized.
"""
load = {
"user": "test",
"fun": "test.arg",
"tgt": "test_minion",
"kwargs": {"eauth": "foo"},
"arg": "bar",
}
mock_ret = {
"error": {
"name": "AuthorizationError",
"message": "Authorization error occurred.",
}
}
with patch(
"salt.acl.PublisherACL.user_is_blacklisted", MagicMock(return_value=False)
), patch(
"salt.acl.PublisherACL.cmd_is_blacklisted", MagicMock(return_value=False)
), patch(
"salt.auth.LoadAuth.authenticate_eauth", MagicMock(return_value=True)
), patch(
"salt.auth.LoadAuth.get_auth_list", MagicMock(return_value=[])
):
self.assertEqual(mock_ret, self.clear_funcs.publish(load))
@pytest.mark.slow_test
def test_publish_user_not_authenticated(self):
"""
Asserts that an AuthenticationError is returned when the user can't authenticate.
"""
load = {"user": "test", "fun": "test.arg", "tgt": "test_minion"}
mock_ret = {
"error": {
"name": "AuthenticationError",
"message": "Authentication error occurred.",
}
}
with patch(
"salt.acl.PublisherACL.user_is_blacklisted", MagicMock(return_value=False)
), patch(
"salt.acl.PublisherACL.cmd_is_blacklisted", MagicMock(return_value=False)
):
self.assertEqual(mock_ret, self.clear_funcs.publish(load))
@pytest.mark.slow_test
def test_publish_user_authenticated_missing_auth_list(self):
"""
Asserts that an AuthenticationError is returned when the user has an effective user id and is
authenticated, but the auth_list is empty.
"""
load = {
"user": "test",
"fun": "test.arg",
"tgt": "test_minion",
"kwargs": {"user": "test"},
"arg": "foo",
}
mock_ret = {
"error": {
"name": "AuthenticationError",
"message": "Authentication error occurred.",
}
}
with patch(
"salt.acl.PublisherACL.user_is_blacklisted", MagicMock(return_value=False)
), patch(
"salt.acl.PublisherACL.cmd_is_blacklisted", MagicMock(return_value=False)
), patch(
"salt.auth.LoadAuth.authenticate_key",
MagicMock(return_value="fake-user-key"),
), patch(
"salt.utils.master.get_values_of_matching_keys", MagicMock(return_value=[])
):
self.assertEqual(mock_ret, self.clear_funcs.publish(load))
@pytest.mark.slow_test
def test_publish_user_authorization_error(self):
"""
Asserts that an AuthorizationError is returned when the user authenticates, but is not
authorized.
"""
load = {
"user": "test",
"fun": "test.arg",
"tgt": "test_minion",
"kwargs": {"user": "test"},
"arg": "foo",
}
mock_ret = {
"error": {
"name": "AuthorizationError",
"message": "Authorization error occurred.",
}
}
with patch(
"salt.acl.PublisherACL.user_is_blacklisted", MagicMock(return_value=False)
), patch(
"salt.acl.PublisherACL.cmd_is_blacklisted", MagicMock(return_value=False)
), patch(
"salt.auth.LoadAuth.authenticate_key",
MagicMock(return_value="fake-user-key"),
), patch(
"salt.utils.master.get_values_of_matching_keys",
MagicMock(return_value=["test"]),
), patch(
"salt.utils.minions.CkMinions.auth_check", MagicMock(return_value=False)
):
self.assertEqual(mock_ret, self.clear_funcs.publish(load))
class MaintenanceTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
"""
TestCase for salt.master.Maintenance class
"""
def setUp(self):
opts = self.get_temp_config(
"master", git_pillar_update_interval=180, maintenance_interval=181
)
self.main_class = salt.master.Maintenance(opts)
self.main_class._after_fork_methods = self.main_class._finalize_methods = []
def tearDown(self):
del self.main_class
def test_run_func(self):
"""
Test the run function inside Maintenance class.
"""
class MockTime:
def __init__(self, max_duration):
self._start_time = time.time()
self._current_duration = 0
self._max_duration = max_duration
self._calls = []
def time(self):
return self._start_time + self._current_duration
def sleep(self, secs):
self._calls += [secs]
self._current_duration += secs
if self._current_duration >= self._max_duration:
raise RuntimeError("Time passes")
mocked_time = MockTime(60 * 4)
class MockTimedFunc:
def __init__(self):
self.call_times = []
def __call__(self, *args, **kwargs):
self.call_times += [mocked_time._current_duration]
mocked__post_fork_init = MockTimedFunc()
mocked_clean_old_jobs = MockTimedFunc()
mocked_clean_expired_tokens = MockTimedFunc()
mocked_clean_pub_auth = MockTimedFunc()
mocked_handle_git_pillar = MockTimedFunc()
mocked_handle_schedule = MockTimedFunc()
mocked_handle_key_cache = MockTimedFunc()
mocked_handle_presence = MockTimedFunc()
mocked_handle_key_rotate = MockTimedFunc()
mocked_check_max_open_files = MockTimedFunc()
with patch("salt.master.time", mocked_time), patch(
"salt.utils.process", autospec=True
), patch(
"salt.master.Maintenance._post_fork_init", mocked__post_fork_init
), patch(
"salt.daemons.masterapi.clean_old_jobs", mocked_clean_old_jobs
), patch(
"salt.daemons.masterapi.clean_expired_tokens", mocked_clean_expired_tokens
), patch(
"salt.daemons.masterapi.clean_pub_auth", mocked_clean_pub_auth
), patch(
"salt.master.Maintenance.handle_git_pillar", mocked_handle_git_pillar
), patch(
"salt.master.Maintenance.handle_schedule", mocked_handle_schedule
), patch(
"salt.master.Maintenance.handle_key_cache", mocked_handle_key_cache
), patch(
"salt.master.Maintenance.handle_presence", mocked_handle_presence
), patch(
"salt.master.Maintenance.handle_key_rotate", mocked_handle_key_rotate
), patch(
"salt.utils.verify.check_max_open_files", mocked_check_max_open_files
):
try:
self.main_class.run()
except RuntimeError as exc:
self.assertEqual(str(exc), "Time passes")
self.assertEqual(mocked_time._calls, [60] * 4)
self.assertEqual(mocked__post_fork_init.call_times, [0])
self.assertEqual(mocked_clean_old_jobs.call_times, [0, 120, 180])
self.assertEqual(mocked_clean_expired_tokens.call_times, [0, 120, 180])
self.assertEqual(mocked_clean_pub_auth.call_times, [0, 120, 180])
self.assertEqual(mocked_handle_git_pillar.call_times, [0])
self.assertEqual(mocked_handle_schedule.call_times, [0, 60, 120, 180])
self.assertEqual(mocked_handle_key_cache.call_times, [0, 60, 120, 180])
self.assertEqual(mocked_handle_presence.call_times, [0, 60, 120, 180])
self.assertEqual(mocked_handle_key_rotate.call_times, [0, 60, 120, 180])
self.assertEqual(mocked_check_max_open_files.call_times, [0, 60, 120, 180])
|
6603229b30a8c90e86622a108d541db95b7a9b46
|
a881bfadf8405ebbd3143cfb5cc5cb715b0898c6
|
/test/test_exceptions.py
|
c617b4b9d180d63b82217861c75d81fd2dea264b
|
[
"Apache-2.0"
] |
permissive
|
GIScience/openrouteservice-py
|
1f8f1140c9544195d250f79515e6f2390663d457
|
9fc22f378db8f9ef98a1675031055b1ae2bec97b
|
refs/heads/master
| 2023-07-10T17:16:52.547413
| 2023-06-01T11:42:58
| 2023-06-01T11:42:58
| 119,974,379
| 327
| 60
|
Apache-2.0
| 2023-07-04T12:23:11
| 2018-02-02T11:36:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,503
|
py
|
test_exceptions.py
|
from openrouteservice.exceptions import (
ValidationError,
ApiError,
HTTPError,
Timeout,
_RetriableRequest,
_OverQueryLimit,
)
import test as _test
from pprint import pprint
class ExceptionTest(_test.TestCase):
def test_ValidationError(self):
exception = ValidationError("hamspam")
pprint(exception.__dict__)
self.assertIsInstance(exception, Exception)
def test_ApIError(self):
exception = ApiError(500, "hamspam")
pprint(exception.__dict__)
self.assertEqual(exception.status, 500)
self.assertEqual(exception.message, "hamspam")
self.assertEqual(str(exception), "500 (hamspam)")
exception = ApiError(500)
self.assertEqual(str(exception), "500")
def test_HTTPError(self):
exception = HTTPError(500)
self.assertEqual(exception.status_code, 500)
self.assertEqual(str(exception), "HTTP Error: 500")
def test_Timeout(self):
exception = Timeout()
self.assertIsInstance(exception, Exception)
def test_RetriableRequest(self):
exception = _RetriableRequest()
self.assertIsInstance(exception, Exception)
def test_OverQueryLimit(self):
exception = _OverQueryLimit(500, "hamspam")
self.assertIsInstance(exception, Exception)
self.assertIsInstance(exception, ApiError)
self.assertIsInstance(exception, _RetriableRequest)
self.assertEqual(str(exception), "500 (hamspam)")
|
b3bb935ed4a48773eef1087e13f372c9f69c480b
|
6d54a7b26d0eb82152a549a6a9dfde656687752c
|
/examples/platform/nxp/k32w/k32w0/scripts/sign-outdir.py
|
577cc2424fd5b93764a5a704026e2c81c2fa6717
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
project-chip/connectedhomeip
|
81a123d675cf527773f70047d1ed1c43be5ffe6d
|
ea3970a7f11cd227ac55917edaa835a2a9bc4fc8
|
refs/heads/master
| 2023-09-01T11:43:37.546040
| 2023-09-01T08:01:32
| 2023-09-01T08:01:32
| 244,694,174
| 6,409
| 1,789
|
Apache-2.0
| 2023-09-14T20:56:31
| 2020-03-03T17:05:10
|
C++
|
UTF-8
|
Python
| false
| false
| 711
|
py
|
sign-outdir.py
|
import os
import subprocess
if os.environ["NXP_K32W0_SDK_ROOT"] != "":
sign_images_path = os.environ["NXP_K32W0_SDK_ROOT"] + "/tools/imagetool/sign_images.sh"
else:
sign_images_path = os.getcwd() + "/../../../../../../third_party/nxp/k32w0_sdk/repo/core/tools/imagetool/sign_images.sh"
# Give execute permission if needed
if os.access(sign_images_path, os.X_OK) is False:
os.chmod(sign_images_path, 0o766)
# Convert script to unix format if needed
subprocess.call("(file " + sign_images_path + " | grep CRLF > /dev/null) && (dos2unix " + sign_images_path + ")", shell=True)
# Call sign_images.sh script with the output directory
subprocess.call(sign_images_path + " " + os.getcwd(), shell=True)
|
242b45613882735d5b92a2332f0be412e27a6094
|
6bbc02d382d78a2608a2e7674cf8e8e737f4aacc
|
/examples/Fastai/Image-classification/score.py
|
e4de435b4fc3851444f2af3116a2e69486bd7687
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
microsoft/MLOps
|
8b12b0eaea14e28a5c55fe14cfc9552e6426f0fd
|
c29628b917b42a910d2ae232556b706b8f1bc0c5
|
refs/heads/master
| 2023-09-02T14:41:40.417962
| 2023-07-27T07:03:17
| 2023-07-27T07:03:17
| 184,819,127
| 1,455
| 474
|
MIT
| 2023-07-31T12:07:28
| 2019-05-03T20:55:03
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,177
|
py
|
score.py
|
import os
import json
from azureml.core.model import Model
from azureml.core import Workspace
import fastai
from fastai.vision import *
from fastai.metrics import accuracy
from fastai.metrics import error_rate
import urllib.request
def download_jpg(url):
file_path = "./breadpudding.jpg"
local_filename, header = urllib.request.urlretrieve(url, file_path)
return local_filename
def init():
global food_classification_model
# The AZUREML_MODEL_DIR environment variable indicates a directory containing the model file you registered.
#this init works
model_path=os.getenv('AZUREML_MODEL_DIR')
filename="export.pkl"
classes = ['apple-pie','breadpudding','padthai', 'ramen', 'waffles']
food_classification_model = load_learner(path=model_path, file=filename)
classes = food_classification_model.data.classes
print(classes)
def run(request):
candidate_url = json.loads(request)["url"]
file_path = download_jpg(candidate_url)
img = open_image(file_path)
prediction = food_classification_model.predict(img)
index = 0
pred = str(prediction[index])
print(pred)
return pred
|
90b230a5cbe21348be47284bc6ba84be2cf9af11
|
3ef70fe63acaa665e2b163f30f1abd0a592231c1
|
/stackoverflow/venv/lib/python3.6/site-packages/twisted/test/test_sslverify.py
|
84761dc2c555237965f5acc93d29c1a41da5d057
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
wistbean/learn_python3_spider
|
14914b63691ac032955ba1adc29ad64976d80e15
|
40861791ec4ed3bbd14b07875af25cc740f76920
|
refs/heads/master
| 2023-08-16T05:42:27.208302
| 2023-03-30T17:03:58
| 2023-03-30T17:03:58
| 179,152,420
| 14,403
| 3,556
|
MIT
| 2022-05-20T14:08:34
| 2019-04-02T20:19:54
|
Python
|
UTF-8
|
Python
| false
| false
| 116,121
|
py
|
test_sslverify.py
|
# Copyright 2005 Divmod, Inc. See LICENSE file for details
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet._sslverify}.
"""
from __future__ import division, absolute_import
import sys
import itertools
import datetime
from zope.interface import implementer
from twisted.python.reflect import requireModule
skipSSL = None
skipSNI = None
skipNPN = None
skipALPN = None
if requireModule("OpenSSL"):
import ipaddress
from twisted.internet import ssl
from OpenSSL import SSL
from OpenSSL.crypto import get_elliptic_curves
from OpenSSL.crypto import PKey, X509
from OpenSSL.crypto import TYPE_RSA, FILETYPE_PEM
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.serialization import (
PrivateFormat, NoEncryption
)
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives.serialization import Encoding
try:
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.set_npn_advertise_callback(lambda c: None)
except NotImplementedError:
skipNPN = "OpenSSL 1.0.1 or greater required for NPN support"
try:
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.set_alpn_select_callback(lambda c: None)
except NotImplementedError:
skipALPN = "OpenSSL 1.0.2 or greater required for ALPN support"
else:
skipSSL = "OpenSSL is required for SSL tests."
skipSNI = skipSSL
skipNPN = skipSSL
skipALPN = skipSSL
from twisted.test.test_twisted import SetAsideModule
from twisted.test.iosim import connectedServerAndClient
from twisted.internet.error import ConnectionClosed
from twisted.python.compat import nativeString
from twisted.python.filepath import FilePath
from twisted.python.modules import getModule
from twisted.trial import unittest, util
from twisted.internet import protocol, defer, reactor
from twisted.internet._idna import _idnaText
from twisted.internet.error import CertificateError, ConnectionLost
from twisted.internet import interfaces
from incremental import Version
if not skipSSL:
from twisted.internet.ssl import platformTrust, VerificationError
from twisted.internet import _sslverify as sslverify
from twisted.protocols.tls import TLSMemoryBIOFactory
# A couple of static PEM-format certificates to be used by various tests.
A_HOST_CERTIFICATE_PEM = """
-----BEGIN CERTIFICATE-----
MIIC2jCCAkMCAjA5MA0GCSqGSIb3DQEBBAUAMIG0MQswCQYDVQQGEwJVUzEiMCAG
A1UEAxMZZXhhbXBsZS50d2lzdGVkbWF0cml4LmNvbTEPMA0GA1UEBxMGQm9zdG9u
MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMRYwFAYDVQQIEw1NYXNzYWNo
dXNldHRzMScwJQYJKoZIhvcNAQkBFhhub2JvZHlAdHdpc3RlZG1hdHJpeC5jb20x
ETAPBgNVBAsTCFNlY3VyaXR5MB4XDTA2MDgxNjAxMDEwOFoXDTA3MDgxNjAxMDEw
OFowgbQxCzAJBgNVBAYTAlVTMSIwIAYDVQQDExlleGFtcGxlLnR3aXN0ZWRtYXRy
aXguY29tMQ8wDQYDVQQHEwZCb3N0b24xHDAaBgNVBAoTE1R3aXN0ZWQgTWF0cml4
IExhYnMxFjAUBgNVBAgTDU1hc3NhY2h1c2V0dHMxJzAlBgkqhkiG9w0BCQEWGG5v
Ym9keUB0d2lzdGVkbWF0cml4LmNvbTERMA8GA1UECxMIU2VjdXJpdHkwgZ8wDQYJ
KoZIhvcNAQEBBQADgY0AMIGJAoGBAMzH8CDF/U91y/bdbdbJKnLgnyvQ9Ig9ZNZp
8hpsu4huil60zF03+Lexg2l1FIfURScjBuaJMR6HiMYTMjhzLuByRZ17KW4wYkGi
KXstz03VIKy4Tjc+v4aXFI4XdRw10gGMGQlGGscXF/RSoN84VoDKBfOMWdXeConJ
VyC4w3iJAgMBAAEwDQYJKoZIhvcNAQEEBQADgYEAviMT4lBoxOgQy32LIgZ4lVCj
JNOiZYg8GMQ6y0ugp86X80UjOvkGtNf/R7YgED/giKRN/q/XJiLJDEhzknkocwmO
S+4b2XpiaZYxRyKWwL221O7CGmtWYyZl2+92YYmmCiNzWQPfP6BOMlfax0AGLHls
fXzCWdG0O/3Lk2SRM0I=
-----END CERTIFICATE-----
"""
A_PEER_CERTIFICATE_PEM = """
-----BEGIN CERTIFICATE-----
MIIC3jCCAkcCAjA6MA0GCSqGSIb3DQEBBAUAMIG2MQswCQYDVQQGEwJVUzEiMCAG
A1UEAxMZZXhhbXBsZS50d2lzdGVkbWF0cml4LmNvbTEPMA0GA1UEBxMGQm9zdG9u
MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMRYwFAYDVQQIEw1NYXNzYWNo
dXNldHRzMSkwJwYJKoZIhvcNAQkBFhpzb21lYm9keUB0d2lzdGVkbWF0cml4LmNv
bTERMA8GA1UECxMIU2VjdXJpdHkwHhcNMDYwODE2MDEwMTU2WhcNMDcwODE2MDEw
MTU2WjCBtjELMAkGA1UEBhMCVVMxIjAgBgNVBAMTGWV4YW1wbGUudHdpc3RlZG1h
dHJpeC5jb20xDzANBgNVBAcTBkJvc3RvbjEcMBoGA1UEChMTVHdpc3RlZCBNYXRy
aXggTGFiczEWMBQGA1UECBMNTWFzc2FjaHVzZXR0czEpMCcGCSqGSIb3DQEJARYa
c29tZWJvZHlAdHdpc3RlZG1hdHJpeC5jb20xETAPBgNVBAsTCFNlY3VyaXR5MIGf
MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCnm+WBlgFNbMlHehib9ePGGDXF+Nz4
CjGuUmVBaXCRCiVjg3kSDecwqfb0fqTksBZ+oQ1UBjMcSh7OcvFXJZnUesBikGWE
JE4V8Bjh+RmbJ1ZAlUPZ40bAkww0OpyIRAGMvKG+4yLFTO4WDxKmfDcrOb6ID8WJ
e1u+i3XGkIf/5QIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAD4Oukm3YYkhedUepBEA
vvXIQhVDqL7mk6OqYdXmNj6R7ZMC8WWvGZxrzDI1bZuB+4aIxxd1FXC3UOHiR/xg
i9cDl1y8P/qRp4aEBNF6rI0D4AxTbfnHQx4ERDAOShJdYZs/2zifPJ6va6YvrEyr
yqDtGhklsWW3ZwBzEh5VEOUp
-----END CERTIFICATE-----
"""
A_KEYPAIR = getModule(__name__).filePath.sibling('server.pem').getContent()
def counter(counter=itertools.count()):
"""
Each time we're called, return the next integer in the natural numbers.
"""
return next(counter)
def makeCertificate(**kw):
keypair = PKey()
keypair.generate_key(TYPE_RSA, 1024)
certificate = X509()
certificate.gmtime_adj_notBefore(0)
certificate.gmtime_adj_notAfter(60 * 60 * 24 * 365) # One year
for xname in certificate.get_issuer(), certificate.get_subject():
for (k, v) in kw.items():
setattr(xname, k, nativeString(v))
certificate.set_serial_number(counter())
certificate.set_pubkey(keypair)
certificate.sign(keypair, "md5")
return keypair, certificate
def certificatesForAuthorityAndServer(serviceIdentity=u'example.com'):
"""
Create a self-signed CA certificate and server certificate signed by the
CA.
@param serviceIdentity: The identity (hostname) of the server.
@type serviceIdentity: L{unicode}
@return: a 2-tuple of C{(certificate_authority_certificate,
server_certificate)}
@rtype: L{tuple} of (L{sslverify.Certificate},
L{sslverify.PrivateCertificate})
"""
commonNameForCA = x509.Name(
[x509.NameAttribute(NameOID.COMMON_NAME, u'Testing Example CA')]
)
commonNameForServer = x509.Name(
[x509.NameAttribute(NameOID.COMMON_NAME, u'Testing Example Server')]
)
oneDay = datetime.timedelta(1, 0, 0)
privateKeyForCA = rsa.generate_private_key(
public_exponent=65537,
key_size=4096,
backend=default_backend()
)
publicKeyForCA = privateKeyForCA.public_key()
caCertificate = (
x509.CertificateBuilder()
.subject_name(commonNameForCA)
.issuer_name(commonNameForCA)
.not_valid_before(datetime.datetime.today() - oneDay)
.not_valid_after(datetime.datetime.today() + oneDay)
.serial_number(x509.random_serial_number())
.public_key(publicKeyForCA)
.add_extension(
x509.BasicConstraints(ca=True, path_length=9), critical=True,
)
.sign(
private_key=privateKeyForCA, algorithm=hashes.SHA256(),
backend=default_backend()
)
)
privateKeyForServer = rsa.generate_private_key(
public_exponent=65537,
key_size=4096,
backend=default_backend()
)
publicKeyForServer = privateKeyForServer.public_key()
try:
ipAddress = ipaddress.ip_address(serviceIdentity)
except ValueError:
subjectAlternativeNames = [
x509.DNSName(serviceIdentity.encode("idna").decode("ascii"))
]
else:
subjectAlternativeNames = [x509.IPAddress(ipAddress)]
serverCertificate = (
x509.CertificateBuilder()
.subject_name(commonNameForServer)
.issuer_name(commonNameForCA)
.not_valid_before(datetime.datetime.today() - oneDay)
.not_valid_after(datetime.datetime.today() + oneDay)
.serial_number(x509.random_serial_number())
.public_key(publicKeyForServer)
.add_extension(
x509.BasicConstraints(ca=False, path_length=None), critical=True,
)
.add_extension(
x509.SubjectAlternativeName(
subjectAlternativeNames
),
critical=True,
)
.sign(
private_key=privateKeyForCA, algorithm=hashes.SHA256(),
backend=default_backend()
)
)
caSelfCert = sslverify.Certificate.loadPEM(
caCertificate.public_bytes(Encoding.PEM)
)
serverCert = sslverify.PrivateCertificate.loadPEM(
b"\n".join([privateKeyForServer.private_bytes(
Encoding.PEM,
PrivateFormat.TraditionalOpenSSL,
NoEncryption(),
),
serverCertificate.public_bytes(Encoding.PEM)])
)
return caSelfCert, serverCert
def _loopbackTLSConnection(serverOpts, clientOpts):
"""
Common implementation code for both L{loopbackTLSConnection} and
L{loopbackTLSConnectionInMemory}. Creates a loopback TLS connection
using the provided server and client context factories.
@param serverOpts: An OpenSSL context factory for the server.
@type serverOpts: C{OpenSSLCertificateOptions}, or any class with an
equivalent API.
@param clientOpts: An OpenSSL context factory for the client.
@type clientOpts: C{OpenSSLCertificateOptions}, or any class with an
equivalent API.
@return: 5-tuple of server-tls-protocol, server-inner-protocol,
client-tls-protocol, client-inner-protocol and L{IOPump}
@rtype: L{tuple}
"""
class GreetingServer(protocol.Protocol):
greeting = b"greetings!"
def connectionMade(self):
self.transport.write(self.greeting)
class ListeningClient(protocol.Protocol):
data = b''
lostReason = None
def dataReceived(self, data):
self.data += data
def connectionLost(self, reason):
self.lostReason = reason
clientWrappedProto = ListeningClient()
serverWrappedProto = GreetingServer()
plainClientFactory = protocol.Factory()
plainClientFactory.protocol = lambda: clientWrappedProto
plainServerFactory = protocol.Factory()
plainServerFactory.protocol = lambda: serverWrappedProto
clientFactory = TLSMemoryBIOFactory(
clientOpts, isClient=True,
wrappedFactory=plainServerFactory
)
serverFactory = TLSMemoryBIOFactory(
serverOpts, isClient=False,
wrappedFactory=plainClientFactory
)
sProto, cProto, pump = connectedServerAndClient(
lambda: serverFactory.buildProtocol(None),
lambda: clientFactory.buildProtocol(None)
)
return sProto, cProto, serverWrappedProto, clientWrappedProto, pump
def loopbackTLSConnection(trustRoot, privateKeyFile, chainedCertFile=None):
"""
Create a loopback TLS connection with the given trust and keys.
@param trustRoot: the C{trustRoot} argument for the client connection's
context.
@type trustRoot: L{sslverify.IOpenSSLTrustRoot}
@param privateKeyFile: The name of the file containing the private key.
@type privateKeyFile: L{str} (native string; file name)
@param chainedCertFile: The name of the chained certificate file.
@type chainedCertFile: L{str} (native string; file name)
@return: 3-tuple of server-protocol, client-protocol, and L{IOPump}
@rtype: L{tuple}
"""
class ContextFactory(object):
def getContext(self):
"""
Create a context for the server side of the connection.
@return: an SSL context using a certificate and key.
@rtype: C{OpenSSL.SSL.Context}
"""
ctx = SSL.Context(SSL.TLSv1_METHOD)
if chainedCertFile is not None:
ctx.use_certificate_chain_file(chainedCertFile)
ctx.use_privatekey_file(privateKeyFile)
# Let the test author know if they screwed something up.
ctx.check_privatekey()
return ctx
serverOpts = ContextFactory()
clientOpts = sslverify.OpenSSLCertificateOptions(trustRoot=trustRoot)
return _loopbackTLSConnection(serverOpts, clientOpts)
def loopbackTLSConnectionInMemory(trustRoot, privateKey,
serverCertificate, clientProtocols=None,
serverProtocols=None,
clientOptions=None):
"""
Create a loopback TLS connection with the given trust and keys. Like
L{loopbackTLSConnection}, but using in-memory certificates and keys rather
than writing them to disk.
@param trustRoot: the C{trustRoot} argument for the client connection's
context.
@type trustRoot: L{sslverify.IOpenSSLTrustRoot}
@param privateKey: The private key.
@type privateKey: L{str} (native string)
@param serverCertificate: The certificate used by the server.
@type chainedCertFile: L{str} (native string)
@param clientProtocols: The protocols the client is willing to negotiate
using NPN/ALPN.
@param serverProtocols: The protocols the server is willing to negotiate
using NPN/ALPN.
@param clientOptions: The type of C{OpenSSLCertificateOptions} class to
use for the client. Defaults to C{OpenSSLCertificateOptions}.
@return: 3-tuple of server-protocol, client-protocol, and L{IOPump}
@rtype: L{tuple}
"""
if clientOptions is None:
clientOptions = sslverify.OpenSSLCertificateOptions
clientCertOpts = clientOptions(
trustRoot=trustRoot,
acceptableProtocols=clientProtocols
)
serverCertOpts = sslverify.OpenSSLCertificateOptions(
privateKey=privateKey,
certificate=serverCertificate,
acceptableProtocols=serverProtocols,
)
return _loopbackTLSConnection(serverCertOpts, clientCertOpts)
def pathContainingDumpOf(testCase, *dumpables):
"""
Create a temporary file to store some serializable-as-PEM objects in, and
return its name.
@param testCase: a test case to use for generating a temporary directory.
@type testCase: L{twisted.trial.unittest.TestCase}
@param dumpables: arguments are objects from pyOpenSSL with a C{dump}
method, taking a pyOpenSSL file-type constant, such as
L{OpenSSL.crypto.FILETYPE_PEM} or L{OpenSSL.crypto.FILETYPE_ASN1}.
@type dumpables: L{tuple} of L{object} with C{dump} method taking L{int}
returning L{bytes}
@return: the path to a file where all of the dumpables were dumped in PEM
format.
@rtype: L{str}
"""
fname = testCase.mktemp()
with open(fname, "wb") as f:
for dumpable in dumpables:
f.write(dumpable.dump(FILETYPE_PEM))
return fname
class DataCallbackProtocol(protocol.Protocol):
def dataReceived(self, data):
d, self.factory.onData = self.factory.onData, None
if d is not None:
d.callback(data)
def connectionLost(self, reason):
d, self.factory.onLost = self.factory.onLost, None
if d is not None:
d.errback(reason)
class WritingProtocol(protocol.Protocol):
byte = b'x'
def connectionMade(self):
self.transport.write(self.byte)
def connectionLost(self, reason):
self.factory.onLost.errback(reason)
class FakeContext(object):
"""
Introspectable fake of an C{OpenSSL.SSL.Context}.
Saves call arguments for later introspection.
Necessary because C{Context} offers poor introspection. cf. this
U{pyOpenSSL bug<https://bugs.launchpad.net/pyopenssl/+bug/1173899>}.
@ivar _method: See C{method} parameter of L{__init__}.
@ivar _options: L{int} of C{OR}ed values from calls of L{set_options}.
@ivar _certificate: Set by L{use_certificate}.
@ivar _privateKey: Set by L{use_privatekey}.
@ivar _verify: Set by L{set_verify}.
@ivar _verifyDepth: Set by L{set_verify_depth}.
@ivar _mode: Set by L{set_mode}.
@ivar _sessionID: Set by L{set_session_id}.
@ivar _extraCertChain: Accumulated L{list} of all extra certificates added
by L{add_extra_chain_cert}.
@ivar _cipherList: Set by L{set_cipher_list}.
@ivar _dhFilename: Set by L{load_tmp_dh}.
@ivar _defaultVerifyPathsSet: Set by L{set_default_verify_paths}
@ivar _ecCurve: Set by L{set_tmp_ecdh}
"""
_options = 0
def __init__(self, method):
self._method = method
self._extraCertChain = []
self._defaultVerifyPathsSet = False
self._ecCurve = None
def set_options(self, options):
self._options |= options
def use_certificate(self, certificate):
self._certificate = certificate
def use_privatekey(self, privateKey):
self._privateKey = privateKey
def check_privatekey(self):
return None
def set_mode(self, mode):
"""
Set the mode. See L{SSL.Context.set_mode}.
@param mode: See L{SSL.Context.set_mode}.
"""
self._mode = mode
def set_verify(self, flags, callback):
self._verify = flags, callback
def set_verify_depth(self, depth):
self._verifyDepth = depth
def set_session_id(self, sessionID):
self._sessionID = sessionID
def add_extra_chain_cert(self, cert):
self._extraCertChain.append(cert)
def set_cipher_list(self, cipherList):
self._cipherList = cipherList
def load_tmp_dh(self, dhfilename):
self._dhFilename = dhfilename
def set_default_verify_paths(self):
"""
Set the default paths for the platform.
"""
self._defaultVerifyPathsSet = True
def set_tmp_ecdh(self, curve):
"""
Set an ECDH curve. Should only be called by OpenSSL 1.0.1
code.
@param curve: See L{OpenSSL.SSL.Context.set_tmp_ecdh}
"""
self._ecCurve = curve
class ClientOptionsTests(unittest.SynchronousTestCase):
"""
Tests for L{sslverify.optionsForClientTLS}.
"""
if skipSSL:
skip = skipSSL
def test_extraKeywords(self):
"""
When passed a keyword parameter other than C{extraCertificateOptions},
L{sslverify.optionsForClientTLS} raises an exception just like a
normal Python function would.
"""
error = self.assertRaises(
TypeError,
sslverify.optionsForClientTLS,
hostname=u'alpha', someRandomThing=u'beta',
)
self.assertEqual(
str(error),
"optionsForClientTLS() got an unexpected keyword argument "
"'someRandomThing'"
)
def test_bytesFailFast(self):
"""
If you pass L{bytes} as the hostname to
L{sslverify.optionsForClientTLS} it immediately raises a L{TypeError}.
"""
error = self.assertRaises(
TypeError,
sslverify.optionsForClientTLS, b'not-actually-a-hostname.com'
)
expectedText = (
"optionsForClientTLS requires text for host names, not " +
bytes.__name__
)
self.assertEqual(str(error), expectedText)
def test_dNSNameHostname(self):
"""
If you pass a dNSName to L{sslverify.optionsForClientTLS}
L{_hostnameIsDnsName} will be True
"""
options = sslverify.optionsForClientTLS(u'example.com')
self.assertTrue(options._hostnameIsDnsName)
def test_IPv4AddressHostname(self):
"""
If you pass an IPv4 address to L{sslverify.optionsForClientTLS}
L{_hostnameIsDnsName} will be False
"""
options = sslverify.optionsForClientTLS(u'127.0.0.1')
self.assertFalse(options._hostnameIsDnsName)
def test_IPv6AddressHostname(self):
"""
If you pass an IPv6 address to L{sslverify.optionsForClientTLS}
L{_hostnameIsDnsName} will be False
"""
options = sslverify.optionsForClientTLS(u'::1')
self.assertFalse(options._hostnameIsDnsName)
class FakeChooseDiffieHellmanEllipticCurve(object):
"""
A fake implementation of L{_ChooseDiffieHellmanEllipticCurve}
"""
def __init__(self, versionNumber, openSSLlib, openSSLcrypto):
"""
A no-op constructor.
"""
def configureECDHCurve(self, ctx):
"""
A null configuration.
@param ctx: An L{OpenSSL.SSL.Context} that would be
configured.
"""
class OpenSSLOptionsTestsMixin(object):
"""
A mixin for L{OpenSSLOptions} test cases creates client and server
certificates, signs them with a CA, and provides a L{loopback}
that creates TLS a connections with them.
"""
if skipSSL:
skip = skipSSL
serverPort = clientConn = None
onServerLost = onClientLost = None
def setUp(self):
"""
Create class variables of client and server certificates.
"""
self.sKey, self.sCert = makeCertificate(
O=b"Server Test Certificate",
CN=b"server")
self.cKey, self.cCert = makeCertificate(
O=b"Client Test Certificate",
CN=b"client")
self.caCert1 = makeCertificate(
O=b"CA Test Certificate 1",
CN=b"ca1")[1]
self.caCert2 = makeCertificate(
O=b"CA Test Certificate",
CN=b"ca2")[1]
self.caCerts = [self.caCert1, self.caCert2]
self.extraCertChain = self.caCerts
def tearDown(self):
if self.serverPort is not None:
self.serverPort.stopListening()
if self.clientConn is not None:
self.clientConn.disconnect()
L = []
if self.onServerLost is not None:
L.append(self.onServerLost)
if self.onClientLost is not None:
L.append(self.onClientLost)
return defer.DeferredList(L, consumeErrors=True)
def loopback(self, serverCertOpts, clientCertOpts,
onServerLost=None, onClientLost=None, onData=None):
if onServerLost is None:
self.onServerLost = onServerLost = defer.Deferred()
if onClientLost is None:
self.onClientLost = onClientLost = defer.Deferred()
if onData is None:
onData = defer.Deferred()
serverFactory = protocol.ServerFactory()
serverFactory.protocol = DataCallbackProtocol
serverFactory.onLost = onServerLost
serverFactory.onData = onData
clientFactory = protocol.ClientFactory()
clientFactory.protocol = WritingProtocol
clientFactory.onLost = onClientLost
self.serverPort = reactor.listenSSL(0, serverFactory, serverCertOpts)
self.clientConn = reactor.connectSSL('127.0.0.1',
self.serverPort.getHost().port, clientFactory, clientCertOpts)
class OpenSSLOptionsTests(OpenSSLOptionsTestsMixin, unittest.TestCase):
"""
Tests for L{sslverify.OpenSSLOptions}.
"""
def setUp(self):
"""
Same as L{OpenSSLOptionsTestsMixin.setUp}, but it also patches
L{sslverify._ChooseDiffieHellmanEllipticCurve}.
"""
super(OpenSSLOptionsTests, self).setUp()
self.patch(sslverify, "_ChooseDiffieHellmanEllipticCurve",
FakeChooseDiffieHellmanEllipticCurve)
def test_constructorWithOnlyPrivateKey(self):
"""
C{privateKey} and C{certificate} make only sense if both are set.
"""
self.assertRaises(
ValueError,
sslverify.OpenSSLCertificateOptions, privateKey=self.sKey
)
def test_constructorWithOnlyCertificate(self):
"""
C{privateKey} and C{certificate} make only sense if both are set.
"""
self.assertRaises(
ValueError,
sslverify.OpenSSLCertificateOptions, certificate=self.sCert
)
def test_constructorWithCertificateAndPrivateKey(self):
"""
Specifying C{privateKey} and C{certificate} initializes correctly.
"""
opts = sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert)
self.assertEqual(opts.privateKey, self.sKey)
self.assertEqual(opts.certificate, self.sCert)
self.assertEqual(opts.extraCertChain, [])
def test_constructorDoesNotAllowVerifyWithoutCACerts(self):
"""
C{verify} must not be C{True} without specifying C{caCerts}.
"""
self.assertRaises(
ValueError,
sslverify.OpenSSLCertificateOptions,
privateKey=self.sKey, certificate=self.sCert, verify=True
)
def test_constructorDoesNotAllowLegacyWithTrustRoot(self):
"""
C{verify}, C{requireCertificate}, and C{caCerts} must not be specified
by the caller (to be I{any} value, even the default!) when specifying
C{trustRoot}.
"""
self.assertRaises(
TypeError,
sslverify.OpenSSLCertificateOptions,
privateKey=self.sKey, certificate=self.sCert,
verify=True, trustRoot=None, caCerts=self.caCerts,
)
self.assertRaises(
TypeError,
sslverify.OpenSSLCertificateOptions,
privateKey=self.sKey, certificate=self.sCert,
trustRoot=None, requireCertificate=True,
)
def test_constructorAllowsCACertsWithoutVerify(self):
"""
It's currently a NOP, but valid.
"""
opts = sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert,
caCerts=self.caCerts)
self.assertFalse(opts.verify)
self.assertEqual(self.caCerts, opts.caCerts)
def test_constructorWithVerifyAndCACerts(self):
"""
Specifying C{verify} and C{caCerts} initializes correctly.
"""
opts = sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert,
verify=True,
caCerts=self.caCerts)
self.assertTrue(opts.verify)
self.assertEqual(self.caCerts, opts.caCerts)
def test_constructorSetsExtraChain(self):
"""
Setting C{extraCertChain} works if C{certificate} and C{privateKey} are
set along with it.
"""
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
extraCertChain=self.extraCertChain,
)
self.assertEqual(self.extraCertChain, opts.extraCertChain)
def test_constructorDoesNotAllowExtraChainWithoutPrivateKey(self):
"""
A C{extraCertChain} without C{privateKey} doesn't make sense and is
thus rejected.
"""
self.assertRaises(
ValueError,
sslverify.OpenSSLCertificateOptions,
certificate=self.sCert,
extraCertChain=self.extraCertChain,
)
def test_constructorDoesNotAllowExtraChainWithOutPrivateKey(self):
"""
A C{extraCertChain} without C{certificate} doesn't make sense and is
thus rejected.
"""
self.assertRaises(
ValueError,
sslverify.OpenSSLCertificateOptions,
privateKey=self.sKey,
extraCertChain=self.extraCertChain,
)
def test_extraChainFilesAreAddedIfSupplied(self):
"""
If C{extraCertChain} is set and all prerequisites are met, the
specified chain certificates are added to C{Context}s that get
created.
"""
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
extraCertChain=self.extraCertChain,
)
opts._contextFactory = FakeContext
ctx = opts.getContext()
self.assertEqual(self.sKey, ctx._privateKey)
self.assertEqual(self.sCert, ctx._certificate)
self.assertEqual(self.extraCertChain, ctx._extraCertChain)
def test_extraChainDoesNotBreakPyOpenSSL(self):
"""
C{extraCertChain} doesn't break C{OpenSSL.SSL.Context} creation.
"""
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
extraCertChain=self.extraCertChain,
)
ctx = opts.getContext()
self.assertIsInstance(ctx, SSL.Context)
def test_acceptableCiphersAreAlwaysSet(self):
"""
If the user doesn't supply custom acceptable ciphers, a shipped secure
default is used. We can't check directly for it because the effective
cipher string we set varies with platforms.
"""
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
)
opts._contextFactory = FakeContext
ctx = opts.getContext()
self.assertEqual(opts._cipherString.encode('ascii'), ctx._cipherList)
def test_givesMeaningfulErrorMessageIfNoCipherMatches(self):
"""
If there is no valid cipher that matches the user's wishes,
a L{ValueError} is raised.
"""
self.assertRaises(
ValueError,
sslverify.OpenSSLCertificateOptions,
privateKey=self.sKey,
certificate=self.sCert,
acceptableCiphers=
sslverify.OpenSSLAcceptableCiphers.fromOpenSSLCipherString('')
)
def test_honorsAcceptableCiphersArgument(self):
"""
If acceptable ciphers are passed, they are used.
"""
@implementer(interfaces.IAcceptableCiphers)
class FakeAcceptableCiphers(object):
def selectCiphers(self, _):
return [sslverify.OpenSSLCipher(u'sentinel')]
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
acceptableCiphers=FakeAcceptableCiphers(),
)
opts._contextFactory = FakeContext
ctx = opts.getContext()
self.assertEqual(b'sentinel', ctx._cipherList)
def test_basicSecurityOptionsAreSet(self):
"""
Every context must have C{OP_NO_SSLv2}, C{OP_NO_COMPRESSION}, and
C{OP_CIPHER_SERVER_PREFERENCE} set.
"""
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
)
opts._contextFactory = FakeContext
ctx = opts.getContext()
options = (SSL.OP_NO_SSLv2 | SSL.OP_NO_COMPRESSION |
SSL.OP_CIPHER_SERVER_PREFERENCE)
self.assertEqual(options, ctx._options & options)
def test_modeIsSet(self):
"""
Every context must be in C{MODE_RELEASE_BUFFERS} mode.
"""
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
)
opts._contextFactory = FakeContext
ctx = opts.getContext()
self.assertEqual(SSL.MODE_RELEASE_BUFFERS, ctx._mode)
def test_singleUseKeys(self):
"""
If C{singleUseKeys} is set, every context must have
C{OP_SINGLE_DH_USE} and C{OP_SINGLE_ECDH_USE} set.
"""
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
enableSingleUseKeys=True,
)
opts._contextFactory = FakeContext
ctx = opts.getContext()
options = SSL.OP_SINGLE_DH_USE | SSL.OP_SINGLE_ECDH_USE
self.assertEqual(options, ctx._options & options)
def test_methodIsDeprecated(self):
"""
Passing C{method} to L{sslverify.OpenSSLCertificateOptions} is
deprecated.
"""
sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
method=SSL.SSLv23_METHOD,
)
message = ("Passing method to twisted.internet.ssl.CertificateOptions "
"was deprecated in Twisted 17.1.0. Please use a "
"combination of insecurelyLowerMinimumTo, raiseMinimumTo, "
"and lowerMaximumSecurityTo instead, as Twisted will "
"correctly configure the method.")
warnings = self.flushWarnings([self.test_methodIsDeprecated])
self.assertEqual(1, len(warnings))
self.assertEqual(DeprecationWarning, warnings[0]['category'])
self.assertEqual(message, warnings[0]['message'])
def test_tlsv1ByDefault(self):
"""
L{sslverify.OpenSSLCertificateOptions} will make the default minimum
TLS version v1.0, if no C{method}, or C{insecurelyLowerMinimumTo} is
given.
"""
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert
)
opts._contextFactory = FakeContext
ctx = opts.getContext()
options = (SSL.OP_NO_SSLv2 | SSL.OP_NO_COMPRESSION |
SSL.OP_CIPHER_SERVER_PREFERENCE | SSL.OP_NO_SSLv3)
self.assertEqual(options, ctx._options & options)
def test_tlsProtocolsAtLeastWithMinimum(self):
"""
Passing C{insecurelyLowerMinimumTo} along with C{raiseMinimumTo} to
L{sslverify.OpenSSLCertificateOptions} will cause it to raise an
exception.
"""
with self.assertRaises(TypeError) as e:
sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
raiseMinimumTo=sslverify.TLSVersion.TLSv1_2,
insecurelyLowerMinimumTo=sslverify.TLSVersion.TLSv1_2,
)
self.assertIn('raiseMinimumTo', e.exception.args[0])
self.assertIn('insecurelyLowerMinimumTo', e.exception.args[0])
self.assertIn('exclusive', e.exception.args[0])
def test_tlsProtocolsNoMethodWithAtLeast(self):
"""
Passing C{raiseMinimumTo} along with C{method} to
L{sslverify.OpenSSLCertificateOptions} will cause it to raise an
exception.
"""
with self.assertRaises(TypeError) as e:
sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
method=SSL.SSLv23_METHOD,
raiseMinimumTo=sslverify.TLSVersion.TLSv1_2,
)
self.assertIn('method', e.exception.args[0])
self.assertIn('raiseMinimumTo', e.exception.args[0])
self.assertIn('exclusive', e.exception.args[0])
def test_tlsProtocolsNoMethodWithMinimum(self):
"""
Passing C{insecurelyLowerMinimumTo} along with C{method} to
L{sslverify.OpenSSLCertificateOptions} will cause it to raise an
exception.
"""
with self.assertRaises(TypeError) as e:
sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
method=SSL.SSLv23_METHOD,
insecurelyLowerMinimumTo=sslverify.TLSVersion.TLSv1_2,
)
self.assertIn('method', e.exception.args[0])
self.assertIn('insecurelyLowerMinimumTo', e.exception.args[0])
self.assertIn('exclusive', e.exception.args[0])
def test_tlsProtocolsNoMethodWithMaximum(self):
"""
Passing C{lowerMaximumSecurityTo} along with C{method} to
L{sslverify.OpenSSLCertificateOptions} will cause it to raise an
exception.
"""
with self.assertRaises(TypeError) as e:
sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
method=SSL.SSLv23_METHOD,
lowerMaximumSecurityTo=sslverify.TLSVersion.TLSv1_2,
)
self.assertIn('method', e.exception.args[0])
self.assertIn('lowerMaximumSecurityTo', e.exception.args[0])
self.assertIn('exclusive', e.exception.args[0])
def test_tlsVersionRangeInOrder(self):
"""
Passing out of order TLS versions to C{insecurelyLowerMinimumTo} and
C{lowerMaximumSecurityTo} will cause it to raise an exception.
"""
with self.assertRaises(ValueError) as e:
sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
insecurelyLowerMinimumTo=sslverify.TLSVersion.TLSv1_0,
lowerMaximumSecurityTo=sslverify.TLSVersion.SSLv3)
self.assertEqual(e.exception.args, (
("insecurelyLowerMinimumTo needs to be lower than "
"lowerMaximumSecurityTo"),))
def test_tlsVersionRangeInOrderAtLeast(self):
"""
Passing out of order TLS versions to C{raiseMinimumTo} and
C{lowerMaximumSecurityTo} will cause it to raise an exception.
"""
with self.assertRaises(ValueError) as e:
sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
raiseMinimumTo=sslverify.TLSVersion.TLSv1_0,
lowerMaximumSecurityTo=sslverify.TLSVersion.SSLv3)
self.assertEqual(e.exception.args, (
("raiseMinimumTo needs to be lower than "
"lowerMaximumSecurityTo"),))
def test_tlsProtocolsreduceToMaxWithoutMin(self):
"""
When calling L{sslverify.OpenSSLCertificateOptions} with
C{lowerMaximumSecurityTo} but no C{raiseMinimumTo} or
C{insecurelyLowerMinimumTo} set, and C{lowerMaximumSecurityTo} is
below the minimum default, the minimum will be made the new maximum.
"""
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
lowerMaximumSecurityTo=sslverify.TLSVersion.SSLv3,
)
opts._contextFactory = FakeContext
ctx = opts.getContext()
options = (SSL.OP_NO_SSLv2 | SSL.OP_NO_COMPRESSION |
SSL.OP_CIPHER_SERVER_PREFERENCE | SSL.OP_NO_TLSv1 |
SSL.OP_NO_TLSv1_1 | SSL.OP_NO_TLSv1_2 | opts._OP_NO_TLSv1_3)
self.assertEqual(options, ctx._options & options)
def test_tlsProtocolsSSLv3Only(self):
"""
When calling L{sslverify.OpenSSLCertificateOptions} with
C{insecurelyLowerMinimumTo} and C{lowerMaximumSecurityTo} set to
SSLv3, it will exclude all others.
"""
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
insecurelyLowerMinimumTo=sslverify.TLSVersion.SSLv3,
lowerMaximumSecurityTo=sslverify.TLSVersion.SSLv3,
)
opts._contextFactory = FakeContext
ctx = opts.getContext()
options = (SSL.OP_NO_SSLv2 | SSL.OP_NO_COMPRESSION |
SSL.OP_CIPHER_SERVER_PREFERENCE | SSL.OP_NO_TLSv1 |
SSL.OP_NO_TLSv1_1 | SSL.OP_NO_TLSv1_2 | opts._OP_NO_TLSv1_3)
self.assertEqual(options, ctx._options & options)
def test_tlsProtocolsTLSv1Point0Only(self):
"""
When calling L{sslverify.OpenSSLCertificateOptions} with
C{insecurelyLowerMinimumTo} and C{lowerMaximumSecurityTo} set to v1.0,
it will exclude all others.
"""
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
insecurelyLowerMinimumTo=sslverify.TLSVersion.TLSv1_0,
lowerMaximumSecurityTo=sslverify.TLSVersion.TLSv1_0,
)
opts._contextFactory = FakeContext
ctx = opts.getContext()
options = (SSL.OP_NO_SSLv2 | SSL.OP_NO_COMPRESSION |
SSL.OP_CIPHER_SERVER_PREFERENCE | SSL.OP_NO_SSLv3 |
SSL.OP_NO_TLSv1_1 | SSL.OP_NO_TLSv1_2 | opts._OP_NO_TLSv1_3)
self.assertEqual(options, ctx._options & options)
def test_tlsProtocolsTLSv1Point1Only(self):
"""
When calling L{sslverify.OpenSSLCertificateOptions} with
C{insecurelyLowerMinimumTo} and C{lowerMaximumSecurityTo} set to v1.1,
it will exclude all others.
"""
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
insecurelyLowerMinimumTo=sslverify.TLSVersion.TLSv1_1,
lowerMaximumSecurityTo=sslverify.TLSVersion.TLSv1_1,
)
opts._contextFactory = FakeContext
ctx = opts.getContext()
options = (SSL.OP_NO_SSLv2 | SSL.OP_NO_COMPRESSION |
SSL.OP_CIPHER_SERVER_PREFERENCE | SSL.OP_NO_SSLv3 |
SSL.OP_NO_TLSv1 | SSL.OP_NO_TLSv1_2 | opts._OP_NO_TLSv1_3)
self.assertEqual(options, ctx._options & options)
def test_tlsProtocolsTLSv1Point2Only(self):
"""
When calling L{sslverify.OpenSSLCertificateOptions} with
C{insecurelyLowerMinimumTo} and C{lowerMaximumSecurityTo} set to v1.2,
it will exclude all others.
"""
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
insecurelyLowerMinimumTo=sslverify.TLSVersion.TLSv1_2,
lowerMaximumSecurityTo=sslverify.TLSVersion.TLSv1_2,
)
opts._contextFactory = FakeContext
ctx = opts.getContext()
options = (SSL.OP_NO_SSLv2 | SSL.OP_NO_COMPRESSION |
SSL.OP_CIPHER_SERVER_PREFERENCE | SSL.OP_NO_SSLv3 |
SSL.OP_NO_TLSv1 | SSL.OP_NO_TLSv1_1 | opts._OP_NO_TLSv1_3)
self.assertEqual(options, ctx._options & options)
def test_tlsProtocolsAllModernTLS(self):
"""
When calling L{sslverify.OpenSSLCertificateOptions} with
C{insecurelyLowerMinimumTo} set to TLSv1.0 and
C{lowerMaximumSecurityTo} to TLSv1.2, it will exclude both SSLs and
the (unreleased) TLSv1.3.
"""
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
insecurelyLowerMinimumTo=sslverify.TLSVersion.TLSv1_0,
lowerMaximumSecurityTo=sslverify.TLSVersion.TLSv1_2,
)
opts._contextFactory = FakeContext
ctx = opts.getContext()
options = (SSL.OP_NO_SSLv2 | SSL.OP_NO_COMPRESSION |
SSL.OP_CIPHER_SERVER_PREFERENCE | SSL.OP_NO_SSLv3 |
opts._OP_NO_TLSv1_3)
self.assertEqual(options, ctx._options & options)
def test_tlsProtocolsAtLeastAllSecureTLS(self):
"""
When calling L{sslverify.OpenSSLCertificateOptions} with
C{raiseMinimumTo} set to TLSv1.2, it will ignore all TLSs below
1.2 and SSL.
"""
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
raiseMinimumTo=sslverify.TLSVersion.TLSv1_2
)
opts._contextFactory = FakeContext
ctx = opts.getContext()
options = (SSL.OP_NO_SSLv2 | SSL.OP_NO_COMPRESSION |
SSL.OP_CIPHER_SERVER_PREFERENCE | SSL.OP_NO_SSLv3 |
SSL.OP_NO_TLSv1 | SSL.OP_NO_TLSv1_1)
self.assertEqual(options, ctx._options & options)
def test_tlsProtocolsAtLeastWillAcceptHigherDefault(self):
"""
When calling L{sslverify.OpenSSLCertificateOptions} with
C{raiseMinimumTo} set to a value lower than Twisted's default will
cause it to use the more secure default.
"""
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
raiseMinimumTo=sslverify.TLSVersion.SSLv3
)
opts._contextFactory = FakeContext
ctx = opts.getContext()
# Future maintainer warning: this will break if we change our default
# up, so you should change it to add the relevant OP_NO flags when we
# do make that change and this test fails.
options = (SSL.OP_NO_SSLv2 | SSL.OP_NO_COMPRESSION |
SSL.OP_CIPHER_SERVER_PREFERENCE | SSL.OP_NO_SSLv3)
self.assertEqual(options, ctx._options & options)
self.assertEqual(opts._defaultMinimumTLSVersion,
sslverify.TLSVersion.TLSv1_0)
def test_tlsProtocolsAllSecureTLS(self):
"""
When calling L{sslverify.OpenSSLCertificateOptions} with
C{insecurelyLowerMinimumTo} set to TLSv1.2, it will ignore all TLSs below
1.2 and SSL.
"""
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
insecurelyLowerMinimumTo=sslverify.TLSVersion.TLSv1_2
)
opts._contextFactory = FakeContext
ctx = opts.getContext()
options = (SSL.OP_NO_SSLv2 | SSL.OP_NO_COMPRESSION |
SSL.OP_CIPHER_SERVER_PREFERENCE | SSL.OP_NO_SSLv3 |
SSL.OP_NO_TLSv1 | SSL.OP_NO_TLSv1_1)
self.assertEqual(options, ctx._options & options)
def test_dhParams(self):
"""
If C{dhParams} is set, they are loaded into each new context.
"""
class FakeDiffieHellmanParameters(object):
_dhFile = FilePath(b'dh.params')
dhParams = FakeDiffieHellmanParameters()
opts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
dhParameters=dhParams,
)
opts._contextFactory = FakeContext
ctx = opts.getContext()
self.assertEqual(
FakeDiffieHellmanParameters._dhFile.path,
ctx._dhFilename
)
def test_abbreviatingDistinguishedNames(self):
"""
Check that abbreviations used in certificates correctly map to
complete names.
"""
self.assertEqual(
sslverify.DN(CN=b'a', OU=b'hello'),
sslverify.DistinguishedName(commonName=b'a',
organizationalUnitName=b'hello'))
self.assertNotEqual(
sslverify.DN(CN=b'a', OU=b'hello'),
sslverify.DN(CN=b'a', OU=b'hello', emailAddress=b'xxx'))
dn = sslverify.DN(CN=b'abcdefg')
self.assertRaises(AttributeError, setattr, dn, 'Cn', b'x')
self.assertEqual(dn.CN, dn.commonName)
dn.CN = b'bcdefga'
self.assertEqual(dn.CN, dn.commonName)
def testInspectDistinguishedName(self):
n = sslverify.DN(commonName=b'common name',
organizationName=b'organization name',
organizationalUnitName=b'organizational unit name',
localityName=b'locality name',
stateOrProvinceName=b'state or province name',
countryName=b'country name',
emailAddress=b'email address')
s = n.inspect()
for k in [
'common name',
'organization name',
'organizational unit name',
'locality name',
'state or province name',
'country name',
'email address']:
self.assertIn(k, s, "%r was not in inspect output." % (k,))
self.assertIn(k.title(), s, "%r was not in inspect output." % (k,))
def testInspectDistinguishedNameWithoutAllFields(self):
n = sslverify.DN(localityName=b'locality name')
s = n.inspect()
for k in [
'common name',
'organization name',
'organizational unit name',
'state or province name',
'country name',
'email address']:
self.assertNotIn(k, s, "%r was in inspect output." % (k,))
self.assertNotIn(k.title(), s, "%r was in inspect output." % (k,))
self.assertIn('locality name', s)
self.assertIn('Locality Name', s)
def test_inspectCertificate(self):
"""
Test that the C{inspect} method of L{sslverify.Certificate} returns
a human-readable string containing some basic information about the
certificate.
"""
c = sslverify.Certificate.loadPEM(A_HOST_CERTIFICATE_PEM)
pk = c.getPublicKey()
keyHash = pk.keyHash()
# Maintenance Note: the algorithm used to compute the "public key hash"
# is highly dubious and can differ between underlying versions of
# OpenSSL (and across versions of Twisted), since it is not actually
# the hash of the public key by itself. If we can get the appropriate
# APIs to get the hash of the key itself out of OpenSSL, then we should
# be able to make it statically declared inline below again rather than
# computing it here.
self.assertEqual(
c.inspect().split('\n'),
["Certificate For Subject:",
" Common Name: example.twistedmatrix.com",
" Country Name: US",
" Email Address: nobody@twistedmatrix.com",
" Locality Name: Boston",
" Organization Name: Twisted Matrix Labs",
" Organizational Unit Name: Security",
" State Or Province Name: Massachusetts",
"",
"Issuer:",
" Common Name: example.twistedmatrix.com",
" Country Name: US",
" Email Address: nobody@twistedmatrix.com",
" Locality Name: Boston",
" Organization Name: Twisted Matrix Labs",
" Organizational Unit Name: Security",
" State Or Province Name: Massachusetts",
"",
"Serial Number: 12345",
"Digest: C4:96:11:00:30:C3:EC:EE:A3:55:AA:ED:8C:84:85:18",
"Public Key with Hash: " + keyHash])
def test_publicKeyMatching(self):
"""
L{PublicKey.matches} returns L{True} for keys from certificates with
the same key, and L{False} for keys from certificates with different
keys.
"""
hostA = sslverify.Certificate.loadPEM(A_HOST_CERTIFICATE_PEM)
hostB = sslverify.Certificate.loadPEM(A_HOST_CERTIFICATE_PEM)
peerA = sslverify.Certificate.loadPEM(A_PEER_CERTIFICATE_PEM)
self.assertTrue(hostA.getPublicKey().matches(hostB.getPublicKey()))
self.assertFalse(peerA.getPublicKey().matches(hostA.getPublicKey()))
def test_certificateOptionsSerialization(self):
"""
Test that __setstate__(__getstate__()) round-trips properly.
"""
firstOpts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
method=SSL.SSLv23_METHOD,
verify=True,
caCerts=[self.sCert],
verifyDepth=2,
requireCertificate=False,
verifyOnce=False,
enableSingleUseKeys=False,
enableSessions=False,
fixBrokenPeers=True,
enableSessionTickets=True)
context = firstOpts.getContext()
self.assertIs(context, firstOpts._context)
self.assertIsNotNone(context)
state = firstOpts.__getstate__()
self.assertNotIn("_context", state)
opts = sslverify.OpenSSLCertificateOptions()
opts.__setstate__(state)
self.assertEqual(opts.privateKey, self.sKey)
self.assertEqual(opts.certificate, self.sCert)
self.assertEqual(opts.method, SSL.SSLv23_METHOD)
self.assertTrue(opts.verify)
self.assertEqual(opts.caCerts, [self.sCert])
self.assertEqual(opts.verifyDepth, 2)
self.assertFalse(opts.requireCertificate)
self.assertFalse(opts.verifyOnce)
self.assertFalse(opts.enableSingleUseKeys)
self.assertFalse(opts.enableSessions)
self.assertTrue(opts.fixBrokenPeers)
self.assertTrue(opts.enableSessionTickets)
test_certificateOptionsSerialization.suppress = [
util.suppress(category = DeprecationWarning,
message='twisted\.internet\._sslverify\.*__[gs]etstate__')]
def test_certificateOptionsSessionTickets(self):
"""
Enabling session tickets should not set the OP_NO_TICKET option.
"""
opts = sslverify.OpenSSLCertificateOptions(enableSessionTickets=True)
ctx = opts.getContext()
self.assertEqual(0, ctx.set_options(0) & 0x00004000)
def test_certificateOptionsSessionTicketsDisabled(self):
"""
Enabling session tickets should set the OP_NO_TICKET option.
"""
opts = sslverify.OpenSSLCertificateOptions(enableSessionTickets=False)
ctx = opts.getContext()
self.assertEqual(0x00004000, ctx.set_options(0) & 0x00004000)
def test_allowedAnonymousClientConnection(self):
"""
Check that anonymous connections are allowed when certificates aren't
required on the server.
"""
onData = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert, requireCertificate=False),
sslverify.OpenSSLCertificateOptions(
requireCertificate=False),
onData=onData)
return onData.addCallback(
lambda result: self.assertEqual(result, WritingProtocol.byte))
def test_refusedAnonymousClientConnection(self):
"""
Check that anonymous connections are refused when certificates are
required on the server.
"""
onServerLost = defer.Deferred()
onClientLost = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert, verify=True,
caCerts=[self.sCert], requireCertificate=True),
sslverify.OpenSSLCertificateOptions(
requireCertificate=False),
onServerLost=onServerLost,
onClientLost=onClientLost)
d = defer.DeferredList([onClientLost, onServerLost],
consumeErrors=True)
def afterLost(result):
((cSuccess, cResult), (sSuccess, sResult)) = result
self.assertFalse(cSuccess)
self.assertFalse(sSuccess)
# Win32 fails to report the SSL Error, and report a connection lost
# instead: there is a race condition so that's not totally
# surprising (see ticket #2877 in the tracker)
self.assertIsInstance(cResult.value, (SSL.Error, ConnectionLost))
self.assertIsInstance(sResult.value, SSL.Error)
return d.addCallback(afterLost)
def test_failedCertificateVerification(self):
"""
Check that connecting with a certificate not accepted by the server CA
fails.
"""
onServerLost = defer.Deferred()
onClientLost = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert, verify=False,
requireCertificate=False),
sslverify.OpenSSLCertificateOptions(verify=True,
requireCertificate=False, caCerts=[self.cCert]),
onServerLost=onServerLost,
onClientLost=onClientLost)
d = defer.DeferredList([onClientLost, onServerLost],
consumeErrors=True)
def afterLost(result):
((cSuccess, cResult), (sSuccess, sResult)) = result
self.assertFalse(cSuccess)
self.assertFalse(sSuccess)
return d.addCallback(afterLost)
def test_successfulCertificateVerification(self):
"""
Test a successful connection with client certificate validation on
server side.
"""
onData = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert, verify=False,
requireCertificate=False),
sslverify.OpenSSLCertificateOptions(verify=True,
requireCertificate=True, caCerts=[self.sCert]),
onData=onData)
return onData.addCallback(
lambda result: self.assertEqual(result, WritingProtocol.byte))
def test_successfulSymmetricSelfSignedCertificateVerification(self):
"""
Test a successful connection with validation on both server and client
sides.
"""
onData = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert, verify=True,
requireCertificate=True, caCerts=[self.cCert]),
sslverify.OpenSSLCertificateOptions(privateKey=self.cKey,
certificate=self.cCert, verify=True,
requireCertificate=True, caCerts=[self.sCert]),
onData=onData)
return onData.addCallback(
lambda result: self.assertEqual(result, WritingProtocol.byte))
def test_verification(self):
"""
Check certificates verification building custom certificates data.
"""
clientDN = sslverify.DistinguishedName(commonName='client')
clientKey = sslverify.KeyPair.generate()
clientCertReq = clientKey.certificateRequest(clientDN)
serverDN = sslverify.DistinguishedName(commonName='server')
serverKey = sslverify.KeyPair.generate()
serverCertReq = serverKey.certificateRequest(serverDN)
clientSelfCertReq = clientKey.certificateRequest(clientDN)
clientSelfCertData = clientKey.signCertificateRequest(
clientDN, clientSelfCertReq, lambda dn: True, 132)
clientSelfCert = clientKey.newCertificate(clientSelfCertData)
serverSelfCertReq = serverKey.certificateRequest(serverDN)
serverSelfCertData = serverKey.signCertificateRequest(
serverDN, serverSelfCertReq, lambda dn: True, 516)
serverSelfCert = serverKey.newCertificate(serverSelfCertData)
clientCertData = serverKey.signCertificateRequest(
serverDN, clientCertReq, lambda dn: True, 7)
clientCert = clientKey.newCertificate(clientCertData)
serverCertData = clientKey.signCertificateRequest(
clientDN, serverCertReq, lambda dn: True, 42)
serverCert = serverKey.newCertificate(serverCertData)
onData = defer.Deferred()
serverOpts = serverCert.options(serverSelfCert)
clientOpts = clientCert.options(clientSelfCert)
self.loopback(serverOpts,
clientOpts,
onData=onData)
return onData.addCallback(
lambda result: self.assertEqual(result, WritingProtocol.byte))
class OpenSSLOptionsECDHIntegrationTests(
OpenSSLOptionsTestsMixin, unittest.TestCase):
"""
ECDH-related integration tests for L{OpenSSLOptions}.
"""
def test_ellipticCurveDiffieHellman(self):
"""
Connections use ECDH when OpenSSL supports it.
"""
if not get_elliptic_curves():
raise unittest.SkipTest("OpenSSL does not support ECDH.")
onData = defer.Deferred()
# TLS 1.3 cipher suites do not specify the key exchange
# mechanism:
# https://wiki.openssl.org/index.php/TLS1.3#Differences_with_TLS1.2_and_below
#
# and OpenSSL only supports ECHDE groups with TLS 1.3:
# https://wiki.openssl.org/index.php/TLS1.3#Groups
#
# so TLS 1.3 implies ECDHE. Force this test to use TLS 1.2 to
# ensure ECDH is selected when it might not be.
self.loopback(
sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
requireCertificate=False,
lowerMaximumSecurityTo=sslverify.TLSVersion.TLSv1_2
),
sslverify.OpenSSLCertificateOptions(
requireCertificate=False,
lowerMaximumSecurityTo=sslverify.TLSVersion.TLSv1_2,
),
onData=onData,
)
@onData.addCallback
def assertECDH(_):
self.assertEqual(len(self.clientConn.factory.protocols), 1)
[clientProtocol] = self.clientConn.factory.protocols
cipher = clientProtocol.getHandle().get_cipher_name()
self.assertIn(u"ECDH", cipher)
return onData
class DeprecationTests(unittest.SynchronousTestCase):
"""
Tests for deprecation of L{sslverify.OpenSSLCertificateOptions}'s support
of the pickle protocol.
"""
if skipSSL:
skip = skipSSL
def test_getstateDeprecation(self):
"""
L{sslverify.OpenSSLCertificateOptions.__getstate__} is deprecated.
"""
self.callDeprecated(
(Version("Twisted", 15, 0, 0), "a real persistence system"),
sslverify.OpenSSLCertificateOptions().__getstate__)
def test_setstateDeprecation(self):
"""
L{sslverify.OpenSSLCertificateOptions.__setstate__} is deprecated.
"""
self.callDeprecated(
(Version("Twisted", 15, 0, 0), "a real persistence system"),
sslverify.OpenSSLCertificateOptions().__setstate__, {})
class TrustRootTests(unittest.TestCase):
"""
Tests for L{sslverify.OpenSSLCertificateOptions}' C{trustRoot} argument,
L{sslverify.platformTrust}, and their interactions.
"""
if skipSSL:
skip = skipSSL
def setUp(self):
"""
Patch L{sslverify._ChooseDiffieHellmanEllipticCurve}.
"""
self.patch(sslverify, "_ChooseDiffieHellmanEllipticCurve",
FakeChooseDiffieHellmanEllipticCurve)
def test_caCertsPlatformDefaults(self):
"""
Specifying a C{trustRoot} of L{sslverify.OpenSSLDefaultPaths} when
initializing L{sslverify.OpenSSLCertificateOptions} loads the
platform-provided trusted certificates via C{set_default_verify_paths}.
"""
opts = sslverify.OpenSSLCertificateOptions(
trustRoot=sslverify.OpenSSLDefaultPaths(),
)
fc = FakeContext(SSL.TLSv1_METHOD)
opts._contextFactory = lambda method: fc
opts.getContext()
self.assertTrue(fc._defaultVerifyPathsSet)
def test_trustRootPlatformRejectsUntrustedCA(self):
"""
Specifying a C{trustRoot} of L{platformTrust} when initializing
L{sslverify.OpenSSLCertificateOptions} causes certificates issued by a
newly created CA to be rejected by an SSL connection using these
options.
Note that this test should I{always} pass, even on platforms where the
CA certificates are not installed, as long as L{platformTrust} rejects
completely invalid / unknown root CA certificates. This is simply a
smoke test to make sure that verification is happening at all.
"""
caSelfCert, serverCert = certificatesForAuthorityAndServer()
chainedCert = pathContainingDumpOf(self, serverCert, caSelfCert)
privateKey = pathContainingDumpOf(self, serverCert.privateKey)
sProto, cProto, sWrapped, cWrapped, pump = loopbackTLSConnection(
trustRoot=platformTrust(),
privateKeyFile=privateKey,
chainedCertFile=chainedCert,
)
# No data was received.
self.assertEqual(cWrapped.data, b'')
# It was an L{SSL.Error}.
self.assertEqual(cWrapped.lostReason.type, SSL.Error)
# Some combination of OpenSSL and PyOpenSSL is bad at reporting errors.
err = cWrapped.lostReason.value
self.assertEqual(err.args[0][0][2], 'tlsv1 alert unknown ca')
def test_trustRootSpecificCertificate(self):
"""
Specifying a L{Certificate} object for L{trustRoot} will result in that
certificate being the only trust root for a client.
"""
caCert, serverCert = certificatesForAuthorityAndServer()
otherCa, otherServer = certificatesForAuthorityAndServer()
sProto, cProto, sWrapped, cWrapped, pump = loopbackTLSConnection(
trustRoot=caCert,
privateKeyFile=pathContainingDumpOf(self, serverCert.privateKey),
chainedCertFile=pathContainingDumpOf(self, serverCert),
)
pump.flush()
self.assertIsNone(cWrapped.lostReason)
self.assertEqual(cWrapped.data,
sWrapped.greeting)
class ServiceIdentityTests(unittest.SynchronousTestCase):
"""
Tests for the verification of the peer's service's identity via the
C{hostname} argument to L{sslverify.OpenSSLCertificateOptions}.
"""
if skipSSL:
skip = skipSSL
def serviceIdentitySetup(self, clientHostname, serverHostname,
serverContextSetup=lambda ctx: None,
validCertificate=True,
clientPresentsCertificate=False,
validClientCertificate=True,
serverVerifies=False,
buggyInfoCallback=False,
fakePlatformTrust=False,
useDefaultTrust=False):
"""
Connect a server and a client.
@param clientHostname: The I{client's idea} of the server's hostname;
passed as the C{hostname} to the
L{sslverify.OpenSSLCertificateOptions} instance.
@type clientHostname: L{unicode}
@param serverHostname: The I{server's own idea} of the server's
hostname; present in the certificate presented by the server.
@type serverHostname: L{unicode}
@param serverContextSetup: a 1-argument callable invoked with the
L{OpenSSL.SSL.Context} after it's produced.
@type serverContextSetup: L{callable} taking L{OpenSSL.SSL.Context}
returning L{None}.
@param validCertificate: Is the server's certificate valid? L{True} if
so, L{False} otherwise.
@type validCertificate: L{bool}
@param clientPresentsCertificate: Should the client present a
certificate to the server? Defaults to 'no'.
@type clientPresentsCertificate: L{bool}
@param validClientCertificate: If the client presents a certificate,
should it actually be a valid one, i.e. signed by the same CA that
the server is checking? Defaults to 'yes'.
@type validClientCertificate: L{bool}
@param serverVerifies: Should the server verify the client's
certificate? Defaults to 'no'.
@type serverVerifies: L{bool}
@param buggyInfoCallback: Should we patch the implementation so that
the C{info_callback} passed to OpenSSL to have a bug and raise an
exception (L{ZeroDivisionError})? Defaults to 'no'.
@type buggyInfoCallback: L{bool}
@param fakePlatformTrust: Should we fake the platformTrust to be the
same as our fake server certificate authority, so that we can test
it's being used? Defaults to 'no' and we just pass platform trust.
@type fakePlatformTrust: L{bool}
@param useDefaultTrust: Should we avoid passing the C{trustRoot} to
L{ssl.optionsForClientTLS}? Defaults to 'no'.
@type useDefaultTrust: L{bool}
@return: the client TLS protocol, the client wrapped protocol,
the server TLS protocol, the server wrapped protocol and
an L{IOPump} which, when its C{pump} and C{flush} methods are
called, will move data between the created client and server
protocol instances
@rtype: 5-L{tuple} of 4 L{IProtocol}s and L{IOPump}
"""
serverCA, serverCert = certificatesForAuthorityAndServer(
serverHostname
)
other = {}
passClientCert = None
clientCA, clientCert = certificatesForAuthorityAndServer(u'client')
if serverVerifies:
other.update(trustRoot=clientCA)
if clientPresentsCertificate:
if validClientCertificate:
passClientCert = clientCert
else:
bogusCA, bogus = certificatesForAuthorityAndServer(u'client')
passClientCert = bogus
serverOpts = sslverify.OpenSSLCertificateOptions(
privateKey=serverCert.privateKey.original,
certificate=serverCert.original,
**other
)
serverContextSetup(serverOpts.getContext())
if not validCertificate:
serverCA, otherServer = certificatesForAuthorityAndServer(
serverHostname
)
if buggyInfoCallback:
def broken(*a, **k):
"""
Raise an exception.
@param a: Arguments for an C{info_callback}
@param k: Keyword arguments for an C{info_callback}
"""
1 / 0
self.patch(
sslverify.ClientTLSOptions, "_identityVerifyingInfoCallback",
broken,
)
signature = {'hostname': clientHostname}
if passClientCert:
signature.update(clientCertificate=passClientCert)
if not useDefaultTrust:
signature.update(trustRoot=serverCA)
if fakePlatformTrust:
self.patch(sslverify, "platformTrust", lambda: serverCA)
clientOpts = sslverify.optionsForClientTLS(**signature)
class GreetingServer(protocol.Protocol):
greeting = b"greetings!"
lostReason = None
data = b''
def connectionMade(self):
self.transport.write(self.greeting)
def dataReceived(self, data):
self.data += data
def connectionLost(self, reason):
self.lostReason = reason
class GreetingClient(protocol.Protocol):
greeting = b'cheerio!'
data = b''
lostReason = None
def connectionMade(self):
self.transport.write(self.greeting)
def dataReceived(self, data):
self.data += data
def connectionLost(self, reason):
self.lostReason = reason
serverWrappedProto = GreetingServer()
clientWrappedProto = GreetingClient()
clientFactory = protocol.Factory()
clientFactory.protocol = lambda: clientWrappedProto
serverFactory = protocol.Factory()
serverFactory.protocol = lambda: serverWrappedProto
self.serverOpts = serverOpts
self.clientOpts = clientOpts
clientTLSFactory = TLSMemoryBIOFactory(
clientOpts, isClient=True,
wrappedFactory=clientFactory
)
serverTLSFactory = TLSMemoryBIOFactory(
serverOpts, isClient=False,
wrappedFactory=serverFactory
)
cProto, sProto, pump = connectedServerAndClient(
lambda: serverTLSFactory.buildProtocol(None),
lambda: clientTLSFactory.buildProtocol(None),
)
return cProto, sProto, clientWrappedProto, serverWrappedProto, pump
def test_invalidHostname(self):
"""
When a certificate containing an invalid hostname is received from the
server, the connection is immediately dropped.
"""
cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(
u"wrong-host.example.com",
u"correct-host.example.com",
)
self.assertEqual(cWrapped.data, b'')
self.assertEqual(sWrapped.data, b'')
cErr = cWrapped.lostReason.value
sErr = sWrapped.lostReason.value
self.assertIsInstance(cErr, VerificationError)
self.assertIsInstance(sErr, ConnectionClosed)
def test_validHostname(self):
"""
Whenever a valid certificate containing a valid hostname is received,
connection proceeds normally.
"""
cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(
u"valid.example.com",
u"valid.example.com",
)
self.assertEqual(cWrapped.data,
b'greetings!')
cErr = cWrapped.lostReason
sErr = sWrapped.lostReason
self.assertIsNone(cErr)
self.assertIsNone(sErr)
def test_validHostnameInvalidCertificate(self):
"""
When an invalid certificate containing a perfectly valid hostname is
received, the connection is aborted with an OpenSSL error.
"""
cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(
u"valid.example.com",
u"valid.example.com",
validCertificate=False,
)
self.assertEqual(cWrapped.data, b'')
self.assertEqual(sWrapped.data, b'')
cErr = cWrapped.lostReason.value
sErr = sWrapped.lostReason.value
self.assertIsInstance(cErr, SSL.Error)
self.assertIsInstance(sErr, SSL.Error)
def test_realCAsBetterNotSignOurBogusTestCerts(self):
"""
If we use the default trust from the platform, our dinky certificate
should I{really} fail.
"""
cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(
u"valid.example.com",
u"valid.example.com",
validCertificate=False,
useDefaultTrust=True,
)
self.assertEqual(cWrapped.data, b'')
self.assertEqual(sWrapped.data, b'')
cErr = cWrapped.lostReason.value
sErr = sWrapped.lostReason.value
self.assertIsInstance(cErr, SSL.Error)
self.assertIsInstance(sErr, SSL.Error)
def test_butIfTheyDidItWouldWork(self):
"""
L{ssl.optionsForClientTLS} should be using L{ssl.platformTrust} by
default, so if we fake that out then it should trust ourselves again.
"""
cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(
u"valid.example.com",
u"valid.example.com",
useDefaultTrust=True,
fakePlatformTrust=True,
)
self.assertEqual(cWrapped.data,
b'greetings!')
cErr = cWrapped.lostReason
sErr = sWrapped.lostReason
self.assertIsNone(cErr)
self.assertIsNone(sErr)
def test_clientPresentsCertificate(self):
"""
When the server verifies and the client presents a valid certificate
for that verification by passing it to
L{sslverify.optionsForClientTLS}, communication proceeds.
"""
cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(
u"valid.example.com",
u"valid.example.com",
validCertificate=True,
serverVerifies=True,
clientPresentsCertificate=True,
)
self.assertEqual(cWrapped.data,
b'greetings!')
cErr = cWrapped.lostReason
sErr = sWrapped.lostReason
self.assertIsNone(cErr)
self.assertIsNone(sErr)
def test_clientPresentsBadCertificate(self):
"""
When the server verifies and the client presents an invalid certificate
for that verification by passing it to
L{sslverify.optionsForClientTLS}, the connection cannot be established
with an SSL error.
"""
cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(
u"valid.example.com",
u"valid.example.com",
validCertificate=True,
serverVerifies=True,
validClientCertificate=False,
clientPresentsCertificate=True,
)
self.assertEqual(cWrapped.data,
b'')
cErr = cWrapped.lostReason.value
sErr = sWrapped.lostReason.value
self.assertIsInstance(cErr, SSL.Error)
self.assertIsInstance(sErr, SSL.Error)
def test_hostnameIsIndicated(self):
"""
Specifying the C{hostname} argument to L{CertificateOptions} also sets
the U{Server Name Extension
<https://en.wikipedia.org/wiki/Server_Name_Indication>} TLS indication
field to the correct value.
"""
names = []
def setupServerContext(ctx):
def servername_received(conn):
names.append(conn.get_servername().decode("ascii"))
ctx.set_tlsext_servername_callback(servername_received)
cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(
u"valid.example.com",
u"valid.example.com",
setupServerContext
)
self.assertEqual(names, [u"valid.example.com"])
if skipSNI is not None:
test_hostnameIsIndicated.skip = skipSNI
def test_hostnameEncoding(self):
"""
Hostnames are encoded as IDNA.
"""
names = []
hello = u"h\N{LATIN SMALL LETTER A WITH ACUTE}llo.example.com"
def setupServerContext(ctx):
def servername_received(conn):
serverIDNA = _idnaText(conn.get_servername())
names.append(serverIDNA)
ctx.set_tlsext_servername_callback(servername_received)
cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(
hello, hello, setupServerContext
)
self.assertEqual(names, [hello])
self.assertEqual(cWrapped.data,
b'greetings!')
cErr = cWrapped.lostReason
sErr = sWrapped.lostReason
self.assertIsNone(cErr)
self.assertIsNone(sErr)
if skipSNI is not None:
test_hostnameEncoding.skip = skipSNI
def test_fallback(self):
"""
L{sslverify.simpleVerifyHostname} checks string equality on the
commonName of a connection's certificate's subject, doing nothing if it
matches and raising L{VerificationError} if it doesn't.
"""
name = 'something.example.com'
class Connection(object):
def get_peer_certificate(self):
"""
Fake of L{OpenSSL.SSL.Connection.get_peer_certificate}.
@return: A certificate with a known common name.
@rtype: L{OpenSSL.crypto.X509}
"""
cert = X509()
cert.get_subject().commonName = name
return cert
conn = Connection()
self.assertIs(
sslverify.simpleVerifyHostname(conn, u'something.example.com'),
None
)
self.assertRaises(
sslverify.SimpleVerificationError,
sslverify.simpleVerifyHostname, conn, u'nonsense'
)
def test_surpriseFromInfoCallback(self):
"""
pyOpenSSL isn't always so great about reporting errors. If one occurs
in the verification info callback, it should be logged and the
connection should be shut down (if possible, anyway; the app_data could
be clobbered but there's no point testing for that).
"""
cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(
u"correct-host.example.com",
u"correct-host.example.com",
buggyInfoCallback=True,
)
self.assertEqual(cWrapped.data, b'')
self.assertEqual(sWrapped.data, b'')
cErr = cWrapped.lostReason.value
sErr = sWrapped.lostReason.value
self.assertIsInstance(cErr, ZeroDivisionError)
self.assertIsInstance(sErr, (ConnectionClosed, SSL.Error))
errors = self.flushLoggedErrors(ZeroDivisionError)
self.assertTrue(errors)
def negotiateProtocol(serverProtocols,
clientProtocols,
clientOptions=None):
"""
Create the TLS connection and negotiate a next protocol.
@param serverProtocols: The protocols the server is willing to negotiate.
@param clientProtocols: The protocols the client is willing to negotiate.
@param clientOptions: The type of C{OpenSSLCertificateOptions} class to
use for the client. Defaults to C{OpenSSLCertificateOptions}.
@return: A L{tuple} of the negotiated protocol and the reason the
connection was lost.
"""
caCertificate, serverCertificate = certificatesForAuthorityAndServer()
trustRoot = sslverify.OpenSSLCertificateAuthorities([
caCertificate.original,
])
sProto, cProto, sWrapped, cWrapped, pump = loopbackTLSConnectionInMemory(
trustRoot=trustRoot,
privateKey=serverCertificate.privateKey.original,
serverCertificate=serverCertificate.original,
clientProtocols=clientProtocols,
serverProtocols=serverProtocols,
clientOptions=clientOptions,
)
pump.flush()
return (cProto.negotiatedProtocol, cWrapped.lostReason)
class NPNOrALPNTests(unittest.TestCase):
"""
NPN and ALPN protocol selection.
These tests only run on platforms that have a PyOpenSSL version >= 0.15,
and OpenSSL version 1.0.1 or later.
"""
if skipSSL:
skip = skipSSL
elif skipNPN:
skip = skipNPN
def test_nextProtocolMechanismsNPNIsSupported(self):
"""
When at least NPN is available on the platform, NPN is in the set of
supported negotiation protocols.
"""
supportedProtocols = sslverify.protocolNegotiationMechanisms()
self.assertTrue(
sslverify.ProtocolNegotiationSupport.NPN in supportedProtocols
)
def test_NPNAndALPNSuccess(self):
"""
When both ALPN and NPN are used, and both the client and server have
overlapping protocol choices, a protocol is successfully negotiated.
Further, the negotiated protocol is the first one in the list.
"""
protocols = [b'h2', b'http/1.1']
negotiatedProtocol, lostReason = negotiateProtocol(
clientProtocols=protocols,
serverProtocols=protocols,
)
self.assertEqual(negotiatedProtocol, b'h2')
self.assertIsNone(lostReason)
def test_NPNAndALPNDifferent(self):
"""
Client and server have different protocol lists: only the common
element is chosen.
"""
serverProtocols = [b'h2', b'http/1.1', b'spdy/2']
clientProtocols = [b'spdy/3', b'http/1.1']
negotiatedProtocol, lostReason = negotiateProtocol(
clientProtocols=clientProtocols,
serverProtocols=serverProtocols,
)
self.assertEqual(negotiatedProtocol, b'http/1.1')
self.assertIsNone(lostReason)
def test_NPNAndALPNNoAdvertise(self):
"""
When one peer does not advertise any protocols, the connection is set
up with no next protocol.
"""
protocols = [b'h2', b'http/1.1']
negotiatedProtocol, lostReason = negotiateProtocol(
clientProtocols=protocols,
serverProtocols=[],
)
self.assertIsNone(negotiatedProtocol)
self.assertIsNone(lostReason)
def test_NPNAndALPNNoOverlap(self):
"""
When the client and server have no overlap of protocols, the connection
fails.
"""
clientProtocols = [b'h2', b'http/1.1']
serverProtocols = [b'spdy/3']
negotiatedProtocol, lostReason = negotiateProtocol(
serverProtocols=clientProtocols,
clientProtocols=serverProtocols,
)
self.assertIsNone(negotiatedProtocol)
self.assertEqual(lostReason.type, SSL.Error)
class ALPNTests(unittest.TestCase):
"""
ALPN protocol selection.
These tests only run on platforms that have a PyOpenSSL version >= 0.15,
and OpenSSL version 1.0.2 or later.
This covers only the ALPN specific logic, as any platform that has ALPN
will also have NPN and so will run the NPNAndALPNTest suite as well.
"""
if skipSSL:
skip = skipSSL
elif skipALPN:
skip = skipALPN
def test_nextProtocolMechanismsALPNIsSupported(self):
"""
When ALPN is available on a platform, protocolNegotiationMechanisms
includes ALPN in the suported protocols.
"""
supportedProtocols = sslverify.protocolNegotiationMechanisms()
self.assertTrue(
sslverify.ProtocolNegotiationSupport.ALPN in
supportedProtocols
)
class NPNAndALPNAbsentTests(unittest.TestCase):
"""
NPN/ALPN operations fail on platforms that do not support them.
These tests only run on platforms that have a PyOpenSSL version < 0.15,
an OpenSSL version earlier than 1.0.1, or an OpenSSL/cryptography built
without NPN support.
"""
if skipSSL:
skip = skipSSL
elif not skipNPN or not skipALPN:
skip = "NPN and/or ALPN is present on this platform"
def test_nextProtocolMechanismsNoNegotiationSupported(self):
"""
When neither NPN or ALPN are available on a platform, there are no
supported negotiation protocols.
"""
supportedProtocols = sslverify.protocolNegotiationMechanisms()
self.assertFalse(supportedProtocols)
def test_NPNAndALPNNotImplemented(self):
"""
A NotImplementedError is raised when using acceptableProtocols on a
platform that does not support either NPN or ALPN.
"""
protocols = [b'h2', b'http/1.1']
self.assertRaises(
NotImplementedError,
negotiateProtocol,
serverProtocols=protocols,
clientProtocols=protocols,
)
def test_NegotiatedProtocolReturnsNone(self):
"""
negotiatedProtocol return L{None} even when NPN/ALPN aren't supported.
This works because, as neither are supported, negotiation isn't even
attempted.
"""
serverProtocols = None
clientProtocols = None
negotiatedProtocol, lostReason = negotiateProtocol(
clientProtocols=clientProtocols,
serverProtocols=serverProtocols,
)
self.assertIsNone(negotiatedProtocol)
self.assertIsNone(lostReason)
class _NotSSLTransport:
def getHandle(self):
return self
class _MaybeSSLTransport:
def getHandle(self):
return self
def get_peer_certificate(self):
return None
def get_host_certificate(self):
return None
class _ActualSSLTransport:
def getHandle(self):
return self
def get_host_certificate(self):
return sslverify.Certificate.loadPEM(A_HOST_CERTIFICATE_PEM).original
def get_peer_certificate(self):
return sslverify.Certificate.loadPEM(A_PEER_CERTIFICATE_PEM).original
class ConstructorsTests(unittest.TestCase):
if skipSSL:
skip = skipSSL
def test_peerFromNonSSLTransport(self):
"""
Verify that peerFromTransport raises an exception if the transport
passed is not actually an SSL transport.
"""
x = self.assertRaises(CertificateError,
sslverify.Certificate.peerFromTransport,
_NotSSLTransport())
self.assertTrue(str(x).startswith("non-TLS"))
def test_peerFromBlankSSLTransport(self):
"""
Verify that peerFromTransport raises an exception if the transport
passed is an SSL transport, but doesn't have a peer certificate.
"""
x = self.assertRaises(CertificateError,
sslverify.Certificate.peerFromTransport,
_MaybeSSLTransport())
self.assertTrue(str(x).startswith("TLS"))
def test_hostFromNonSSLTransport(self):
"""
Verify that hostFromTransport raises an exception if the transport
passed is not actually an SSL transport.
"""
x = self.assertRaises(CertificateError,
sslverify.Certificate.hostFromTransport,
_NotSSLTransport())
self.assertTrue(str(x).startswith("non-TLS"))
def test_hostFromBlankSSLTransport(self):
"""
Verify that hostFromTransport raises an exception if the transport
passed is an SSL transport, but doesn't have a host certificate.
"""
x = self.assertRaises(CertificateError,
sslverify.Certificate.hostFromTransport,
_MaybeSSLTransport())
self.assertTrue(str(x).startswith("TLS"))
def test_hostFromSSLTransport(self):
"""
Verify that hostFromTransport successfully creates the correct
certificate if passed a valid SSL transport.
"""
self.assertEqual(
sslverify.Certificate.hostFromTransport(
_ActualSSLTransport()).serialNumber(),
12345)
def test_peerFromSSLTransport(self):
"""
Verify that peerFromTransport successfully creates the correct
certificate if passed a valid SSL transport.
"""
self.assertEqual(
sslverify.Certificate.peerFromTransport(
_ActualSSLTransport()).serialNumber(),
12346)
class MultipleCertificateTrustRootTests(unittest.TestCase):
"""
Test the behavior of the trustRootFromCertificates() API call.
"""
if skipSSL:
skip = skipSSL
def test_trustRootFromCertificatesPrivatePublic(self):
"""
L{trustRootFromCertificates} accepts either a L{sslverify.Certificate}
or a L{sslverify.PrivateCertificate} instance.
"""
privateCert = sslverify.PrivateCertificate.loadPEM(A_KEYPAIR)
cert = sslverify.Certificate.loadPEM(A_HOST_CERTIFICATE_PEM)
mt = sslverify.trustRootFromCertificates([privateCert, cert])
# Verify that the returned object acts correctly when used as a
# trustRoot= param to optionsForClientTLS.
sProto, cProto, sWrap, cWrap, pump = loopbackTLSConnectionInMemory(
trustRoot=mt,
privateKey=privateCert.privateKey.original,
serverCertificate=privateCert.original,
)
# This connection should succeed
self.assertEqual(cWrap.data, b'greetings!')
self.assertIsNone(cWrap.lostReason)
def test_trustRootSelfSignedServerCertificate(self):
"""
L{trustRootFromCertificates} called with a single self-signed
certificate will cause L{optionsForClientTLS} to accept client
connections to a server with that certificate.
"""
key, cert = makeCertificate(O=b"Server Test Certificate", CN=b"server")
selfSigned = sslverify.PrivateCertificate.fromCertificateAndKeyPair(
sslverify.Certificate(cert),
sslverify.KeyPair(key),
)
trust = sslverify.trustRootFromCertificates([selfSigned])
# Since we trust this exact certificate, connections to this server
# should succeed.
sProto, cProto, sWrap, cWrap, pump = loopbackTLSConnectionInMemory(
trustRoot=trust,
privateKey=selfSigned.privateKey.original,
serverCertificate=selfSigned.original,
)
self.assertEqual(cWrap.data, b'greetings!')
self.assertIsNone(cWrap.lostReason)
def test_trustRootCertificateAuthorityTrustsConnection(self):
"""
L{trustRootFromCertificates} called with certificate A will cause
L{optionsForClientTLS} to accept client connections to a server with
certificate B where B is signed by A.
"""
caCert, serverCert = certificatesForAuthorityAndServer()
trust = sslverify.trustRootFromCertificates([caCert])
# Since we've listed the CA's certificate as a trusted cert, a
# connection to the server certificate it signed should succeed.
sProto, cProto, sWrap, cWrap, pump = loopbackTLSConnectionInMemory(
trustRoot=trust,
privateKey=serverCert.privateKey.original,
serverCertificate=serverCert.original,
)
self.assertEqual(cWrap.data, b'greetings!')
self.assertIsNone(cWrap.lostReason)
def test_trustRootFromCertificatesUntrusted(self):
"""
L{trustRootFromCertificates} called with certificate A will cause
L{optionsForClientTLS} to disallow any connections to a server with
certificate B where B is not signed by A.
"""
key, cert = makeCertificate(O=b"Server Test Certificate", CN=b"server")
serverCert = sslverify.PrivateCertificate.fromCertificateAndKeyPair(
sslverify.Certificate(cert),
sslverify.KeyPair(key),
)
untrustedCert = sslverify.Certificate(
makeCertificate(O=b"CA Test Certificate", CN=b"unknown CA")[1]
)
trust = sslverify.trustRootFromCertificates([untrustedCert])
# Since we only trust 'untrustedCert' which has not signed our
# server's cert, we should reject this connection
sProto, cProto, sWrap, cWrap, pump = loopbackTLSConnectionInMemory(
trustRoot=trust,
privateKey=serverCert.privateKey.original,
serverCertificate=serverCert.original,
)
# This connection should fail, so no data was received.
self.assertEqual(cWrap.data, b'')
# It was an L{SSL.Error}.
self.assertEqual(cWrap.lostReason.type, SSL.Error)
# Some combination of OpenSSL and PyOpenSSL is bad at reporting errors.
err = cWrap.lostReason.value
self.assertEqual(err.args[0][0][2], 'tlsv1 alert unknown ca')
def test_trustRootFromCertificatesOpenSSLObjects(self):
"""
L{trustRootFromCertificates} rejects any L{OpenSSL.crypto.X509}
instances in the list passed to it.
"""
private = sslverify.PrivateCertificate.loadPEM(A_KEYPAIR)
certX509 = private.original
exception = self.assertRaises(
TypeError,
sslverify.trustRootFromCertificates, [certX509],
)
self.assertEqual(
"certificates items must be twisted.internet.ssl.CertBase "
"instances",
exception.args[0],
)
class OpenSSLCipherTests(unittest.TestCase):
"""
Tests for twisted.internet._sslverify.OpenSSLCipher.
"""
if skipSSL:
skip = skipSSL
cipherName = u'CIPHER-STRING'
def test_constructorSetsFullName(self):
"""
The first argument passed to the constructor becomes the full name.
"""
self.assertEqual(
self.cipherName,
sslverify.OpenSSLCipher(self.cipherName).fullName
)
def test_repr(self):
"""
C{repr(cipher)} returns a valid constructor call.
"""
cipher = sslverify.OpenSSLCipher(self.cipherName)
self.assertEqual(
cipher,
eval(repr(cipher), {'OpenSSLCipher': sslverify.OpenSSLCipher})
)
def test_eqSameClass(self):
"""
Equal type and C{fullName} means that the objects are equal.
"""
cipher1 = sslverify.OpenSSLCipher(self.cipherName)
cipher2 = sslverify.OpenSSLCipher(self.cipherName)
self.assertEqual(cipher1, cipher2)
def test_eqSameNameDifferentType(self):
"""
If ciphers have the same name but different types, they're still
different.
"""
class DifferentCipher(object):
fullName = self.cipherName
self.assertNotEqual(
sslverify.OpenSSLCipher(self.cipherName),
DifferentCipher(),
)
class ExpandCipherStringTests(unittest.TestCase):
"""
Tests for twisted.internet._sslverify._expandCipherString.
"""
if skipSSL:
skip = skipSSL
def test_doesNotStumbleOverEmptyList(self):
"""
If the expanded cipher list is empty, an empty L{list} is returned.
"""
self.assertEqual(
[],
sslverify._expandCipherString(u'', SSL.SSLv23_METHOD, 0)
)
def test_doesNotSwallowOtherSSLErrors(self):
"""
Only no cipher matches get swallowed, every other SSL error gets
propagated.
"""
def raiser(_):
# Unfortunately, there seems to be no way to trigger a real SSL
# error artificially.
raise SSL.Error([['', '', '']])
ctx = FakeContext(SSL.SSLv23_METHOD)
ctx.set_cipher_list = raiser
self.patch(sslverify.SSL, 'Context', lambda _: ctx)
self.assertRaises(
SSL.Error,
sslverify._expandCipherString, u'ALL', SSL.SSLv23_METHOD, 0
)
def test_returnsListOfICiphers(self):
"""
L{sslverify._expandCipherString} always returns a L{list} of
L{interfaces.ICipher}.
"""
ciphers = sslverify._expandCipherString(u'ALL', SSL.SSLv23_METHOD, 0)
self.assertIsInstance(ciphers, list)
bogus = []
for c in ciphers:
if not interfaces.ICipher.providedBy(c):
bogus.append(c)
self.assertEqual([], bogus)
class AcceptableCiphersTests(unittest.TestCase):
"""
Tests for twisted.internet._sslverify.OpenSSLAcceptableCiphers.
"""
if skipSSL:
skip = skipSSL
def test_selectOnEmptyListReturnsEmptyList(self):
"""
If no ciphers are available, nothing can be selected.
"""
ac = sslverify.OpenSSLAcceptableCiphers([])
self.assertEqual([], ac.selectCiphers([]))
def test_selectReturnsOnlyFromAvailable(self):
"""
Select only returns a cross section of what is available and what is
desirable.
"""
ac = sslverify.OpenSSLAcceptableCiphers([
sslverify.OpenSSLCipher('A'),
sslverify.OpenSSLCipher('B'),
])
self.assertEqual([sslverify.OpenSSLCipher('B')],
ac.selectCiphers([sslverify.OpenSSLCipher('B'),
sslverify.OpenSSLCipher('C')]))
def test_fromOpenSSLCipherStringExpandsToListOfCiphers(self):
"""
If L{sslverify.OpenSSLAcceptableCiphers.fromOpenSSLCipherString} is
called it expands the string to a list of ciphers.
"""
ac = sslverify.OpenSSLAcceptableCiphers.fromOpenSSLCipherString('ALL')
self.assertIsInstance(ac._ciphers, list)
self.assertTrue(all(sslverify.ICipher.providedBy(c)
for c in ac._ciphers))
class DiffieHellmanParametersTests(unittest.TestCase):
"""
Tests for twisted.internet._sslverify.OpenSSLDHParameters.
"""
if skipSSL:
skip = skipSSL
filePath = FilePath(b'dh.params')
def test_fromFile(self):
"""
Calling C{fromFile} with a filename returns an instance with that file
name saved.
"""
params = sslverify.OpenSSLDiffieHellmanParameters.fromFile(
self.filePath
)
self.assertEqual(self.filePath, params._dhFile)
class FakeLibState(object):
"""
State for L{FakeLib}
@param setECDHAutoRaises: An exception
L{FakeLib.SSL_CTX_set_ecdh_auto} should raise; if L{None},
nothing is raised.
@ivar ecdhContexts: A list of SSL contexts with which
L{FakeLib.SSL_CTX_set_ecdh_auto} was called
@type ecdhContexts: L{list} of L{OpenSSL.SSL.Context}s
@ivar ecdhValues: A list of boolean values with which
L{FakeLib.SSL_CTX_set_ecdh_auto} was called
@type ecdhValues: L{list} of L{boolean}s
"""
__slots__ = ("setECDHAutoRaises", "ecdhContexts", "ecdhValues")
def __init__(self, setECDHAutoRaises):
self.setECDHAutoRaises = setECDHAutoRaises
self.ecdhContexts = []
self.ecdhValues = []
class FakeLib(object):
"""
An introspectable fake of cryptography's lib object.
@param state: A L{FakeLibState} instance that contains this fake's
state.
"""
def __init__(self, state):
self._state = state
def SSL_CTX_set_ecdh_auto(self, ctx, value):
"""
Record the context and value under in the C{_state} instance
variable.
@see: L{FakeLibState}
@param ctx: An SSL context.
@type ctx: L{OpenSSL.SSL.Context}
@param value: A boolean value
@type value: L{bool}
"""
self._state.ecdhContexts.append(ctx)
self._state.ecdhValues.append(value)
if self._state.setECDHAutoRaises is not None:
raise self._state.setECDHAutoRaises
class FakeLibTests(unittest.TestCase):
"""
Tests for L{FakeLib}.
"""
def test_SSL_CTX_set_ecdh_auto(self):
"""
L{FakeLib.SSL_CTX_set_ecdh_auto} records context and value it
was called with.
"""
state = FakeLibState(setECDHAutoRaises=None)
lib = FakeLib(state)
self.assertNot(state.ecdhContexts)
self.assertNot(state.ecdhValues)
context, value = "CONTEXT", True
lib.SSL_CTX_set_ecdh_auto(context, value)
self.assertEqual(state.ecdhContexts, [context])
self.assertEqual(state.ecdhValues, [True])
def test_SSL_CTX_set_ecdh_autoRaises(self):
"""
L{FakeLib.SSL_CTX_set_ecdh_auto} raises the exception provided
by its state, while still recording its arguments.
"""
state = FakeLibState(setECDHAutoRaises=ValueError)
lib = FakeLib(state)
self.assertNot(state.ecdhContexts)
self.assertNot(state.ecdhValues)
context, value = "CONTEXT", True
self.assertRaises(
ValueError, lib.SSL_CTX_set_ecdh_auto, context, value
)
self.assertEqual(state.ecdhContexts, [context])
self.assertEqual(state.ecdhValues, [True])
class FakeCryptoState(object):
"""
State for L{FakeCrypto}
@param getEllipticCurveRaises: What
L{FakeCrypto.get_elliptic_curve} should raise; L{None} and it
won't raise anything
@param getEllipticCurveReturns: What
L{FakeCrypto.get_elliptic_curve} should return.
@ivar getEllipticCurveCalls: The arguments with which
L{FakeCrypto.get_elliptic_curve} has been called.
@type getEllipticCurveCalls: L{list}
"""
__slots__ = (
"getEllipticCurveRaises",
"getEllipticCurveReturns",
"getEllipticCurveCalls",
)
def __init__(
self,
getEllipticCurveRaises,
getEllipticCurveReturns,
):
self.getEllipticCurveRaises = getEllipticCurveRaises
self.getEllipticCurveReturns = getEllipticCurveReturns
self.getEllipticCurveCalls = []
class FakeCrypto(object):
"""
An introspectable fake of pyOpenSSL's L{OpenSSL.crypto} module.
@ivar state: A L{FakeCryptoState} instance
"""
def __init__(self, state):
self._state = state
def get_elliptic_curve(self, curve):
"""
A fake that records the curve with which it was called.
@param curve: see L{crypto.get_elliptic_curve}
@return: see L{FakeCryptoState.getEllipticCurveReturns}
@raises: see L{FakeCryptoState.getEllipticCurveRaises}
"""
self._state.getEllipticCurveCalls.append(curve)
if self._state.getEllipticCurveRaises is not None:
raise self._state.getEllipticCurveRaises
return self._state.getEllipticCurveReturns
class FakeCryptoTests(unittest.SynchronousTestCase):
"""
Tests for L{FakeCrypto}.
"""
def test_get_elliptic_curveRecordsArgument(self):
"""
L{FakeCrypto.test_get_elliptic_curve} records the curve with
which it was called.
"""
state = FakeCryptoState(
getEllipticCurveRaises=None,
getEllipticCurveReturns=None,
)
crypto = FakeCrypto(state)
crypto.get_elliptic_curve("a curve name")
self.assertEqual(state.getEllipticCurveCalls, ["a curve name"])
def test_get_elliptic_curveReturns(self):
"""
L{FakeCrypto.test_get_elliptic_curve} returns the value
specified by its state object and records what it was called
with.
"""
returnValue = "object"
state = FakeCryptoState(
getEllipticCurveRaises=None,
getEllipticCurveReturns=returnValue,
)
crypto = FakeCrypto(state)
self.assertIs(
crypto.get_elliptic_curve("another curve name"),
returnValue,
)
self.assertEqual(
state.getEllipticCurveCalls,
["another curve name"]
)
def test_get_elliptic_curveRaises(self):
"""
L{FakeCrypto.test_get_elliptic_curve} raises the exception
specified by its state object.
"""
state = FakeCryptoState(
getEllipticCurveRaises=ValueError,
getEllipticCurveReturns=None
)
crypto = FakeCrypto(state)
self.assertRaises(
ValueError,
crypto.get_elliptic_curve, "yet another curve name",
)
self.assertEqual(
state.getEllipticCurveCalls,
["yet another curve name"],
)
class ChooseDiffieHellmanEllipticCurveTests(unittest.SynchronousTestCase):
"""
Tests for L{sslverify._ChooseDiffieHellmanEllipticCurve}.
@cvar OPENSSL_110: A version number for OpenSSL 1.1.0
@cvar OPENSSL_102: A version number for OpenSSL 1.0.2
@cvar OPENSSL_101: A version number for OpenSSL 1.0.1
@see:
U{https://wiki.openssl.org/index.php/Manual:OPENSSL_VERSION_NUMBER(3)}
"""
if skipSSL:
skip = skipSSL
OPENSSL_110 = 0x1010007f
OPENSSL_102 = 0x100020ef
OPENSSL_101 = 0x1000114f
def setUp(self):
self.libState = FakeLibState(setECDHAutoRaises=False)
self.lib = FakeLib(self.libState)
self.cryptoState = FakeCryptoState(
getEllipticCurveReturns=None,
getEllipticCurveRaises=None
)
self.crypto = FakeCrypto(self.cryptoState)
self.context = FakeContext(SSL.SSLv23_METHOD)
def test_openSSL110(self):
"""
No configuration of contexts occurs under OpenSSL 1.1.0 and
later, because they create contexts with secure ECDH curves.
@see: U{http://twistedmatrix.com/trac/ticket/9210}
"""
chooser = sslverify._ChooseDiffieHellmanEllipticCurve(
self.OPENSSL_110,
openSSLlib=self.lib,
openSSLcrypto=self.crypto,
)
chooser.configureECDHCurve(self.context)
self.assertFalse(self.libState.ecdhContexts)
self.assertFalse(self.libState.ecdhValues)
self.assertFalse(self.cryptoState.getEllipticCurveCalls)
self.assertIsNone(self.context._ecCurve)
def test_openSSL102(self):
"""
OpenSSL 1.0.2 does not set ECDH curves by default, but
C{SSL_CTX_set_ecdh_auto} requests that a context choose a
secure set curves automatically.
"""
context = SSL.Context(SSL.SSLv23_METHOD)
chooser = sslverify._ChooseDiffieHellmanEllipticCurve(
self.OPENSSL_102,
openSSLlib=self.lib,
openSSLcrypto=self.crypto,
)
chooser.configureECDHCurve(context)
self.assertEqual(self.libState.ecdhContexts, [context._context])
self.assertEqual(self.libState.ecdhValues, [True])
self.assertFalse(self.cryptoState.getEllipticCurveCalls)
self.assertIsNone(self.context._ecCurve)
def test_openSSL102SetECDHAutoRaises(self):
"""
An exception raised by C{SSL_CTX_set_ecdh_auto} under OpenSSL
1.0.2 is suppressed because ECDH is best-effort.
"""
self.libState.setECDHAutoRaises = BaseException
context = SSL.Context(SSL.SSLv23_METHOD)
chooser = sslverify._ChooseDiffieHellmanEllipticCurve(
self.OPENSSL_102,
openSSLlib=self.lib,
openSSLcrypto=self.crypto,
)
chooser.configureECDHCurve(context)
self.assertEqual(self.libState.ecdhContexts, [context._context])
self.assertEqual(self.libState.ecdhValues, [True])
self.assertFalse(self.cryptoState.getEllipticCurveCalls)
def test_openSSL101(self):
"""
OpenSSL 1.0.1 does not set ECDH curves by default, nor does
it expose L{SSL_CTX_set_ecdh_auto}. Instead, a single ECDH
curve can be set with L{OpenSSL.SSL.Context.set_tmp_ecdh}.
"""
self.cryptoState.getEllipticCurveReturns = curve = "curve object"
chooser = sslverify._ChooseDiffieHellmanEllipticCurve(
self.OPENSSL_101,
openSSLlib=self.lib,
openSSLcrypto=self.crypto,
)
chooser.configureECDHCurve(self.context)
self.assertFalse(self.libState.ecdhContexts)
self.assertFalse(self.libState.ecdhValues)
self.assertEqual(
self.cryptoState.getEllipticCurveCalls,
[sslverify._defaultCurveName],
)
self.assertIs(self.context._ecCurve, curve)
def test_openSSL101SetECDHRaises(self):
"""
An exception raised by L{OpenSSL.SSL.Context.set_tmp_ecdh}
under OpenSSL 1.0.1 is suppressed because ECHDE is best-effort.
"""
def set_tmp_ecdh(ctx):
raise BaseException
self.context.set_tmp_ecdh = set_tmp_ecdh
chooser = sslverify._ChooseDiffieHellmanEllipticCurve(
self.OPENSSL_101,
openSSLlib=self.lib,
openSSLcrypto=self.crypto,
)
chooser.configureECDHCurve(self.context)
self.assertFalse(self.libState.ecdhContexts)
self.assertFalse(self.libState.ecdhValues)
self.assertEqual(
self.cryptoState.getEllipticCurveCalls,
[sslverify._defaultCurveName],
)
def test_openSSL101NoECC(self):
"""
Contexts created under an OpenSSL 1.0.1 that doesn't support
ECC have no configuration applied.
"""
self.cryptoState.getEllipticCurveRaises = ValueError
chooser = sslverify._ChooseDiffieHellmanEllipticCurve(
self.OPENSSL_101,
openSSLlib=self.lib,
openSSLcrypto=self.crypto,
)
chooser.configureECDHCurve(self.context)
self.assertFalse(self.libState.ecdhContexts)
self.assertFalse(self.libState.ecdhValues)
self.assertIsNone(self.context._ecCurve)
class KeyPairTests(unittest.TestCase):
"""
Tests for L{sslverify.KeyPair}.
"""
if skipSSL:
skip = skipSSL
def setUp(self):
"""
Create test certificate.
"""
self.sKey = makeCertificate(
O=b"Server Test Certificate",
CN=b"server")[0]
def test_getstateDeprecation(self):
"""
L{sslverify.KeyPair.__getstate__} is deprecated.
"""
self.callDeprecated(
(Version("Twisted", 15, 0, 0), "a real persistence system"),
sslverify.KeyPair(self.sKey).__getstate__)
def test_setstateDeprecation(self):
"""
{sslverify.KeyPair.__setstate__} is deprecated.
"""
state = sslverify.KeyPair(self.sKey).dump()
self.callDeprecated(
(Version("Twisted", 15, 0, 0), "a real persistence system"),
sslverify.KeyPair(self.sKey).__setstate__, state)
def test_noTrailingNewlinePemCert(self):
noTrailingNewlineKeyPemPath = getModule(
"twisted.test").filePath.sibling(
"cert.pem.no_trailing_newline")
certPEM = noTrailingNewlineKeyPemPath.getContent()
ssl.Certificate.loadPEM(certPEM)
class SelectVerifyImplementationTests(unittest.SynchronousTestCase):
"""
Tests for L{_selectVerifyImplementation}.
"""
if skipSSL is not None:
skip = skipSSL
def test_dependencyMissing(self):
"""
If I{service_identity} cannot be imported then
L{_selectVerifyImplementation} returns L{simpleVerifyHostname} and
L{SimpleVerificationError}.
"""
with SetAsideModule("service_identity"):
sys.modules["service_identity"] = None
result = sslverify._selectVerifyImplementation()
expected = (
sslverify.simpleVerifyHostname,
sslverify.simpleVerifyIPAddress,
sslverify.SimpleVerificationError)
self.assertEqual(expected, result)
test_dependencyMissing.suppress = [
util.suppress(
message=(
"You do not have a working installation of the "
"service_identity module"),
),
]
def test_dependencyMissingWarning(self):
"""
If I{service_identity} cannot be imported then
L{_selectVerifyImplementation} emits a L{UserWarning} advising the user
of the exact error.
"""
with SetAsideModule("service_identity"):
sys.modules["service_identity"] = None
sslverify._selectVerifyImplementation()
[warning] = list(
warning
for warning
in self.flushWarnings()
if warning["category"] == UserWarning)
importErrors = [
# Python 3.6.3
"'import of service_identity halted; None in sys.modules'",
# Python 3
"'import of 'service_identity' halted; None in sys.modules'",
# Python 2
"'No module named service_identity'"
]
expectedMessages = []
for importError in importErrors:
expectedMessages.append(
"You do not have a working installation of the "
"service_identity module: {message}. Please install it from "
"<https://pypi.python.org/pypi/service_identity> "
"and make sure all of its dependencies are satisfied. "
"Without the service_identity module, Twisted can perform only"
" rudimentary TLS client hostname verification. Many valid "
"certificate/hostname mappings may be rejected.".format(
message=importError))
self.assertIn(warning["message"], expectedMessages)
# Make sure we're abusing the warning system to a sufficient
# degree: there is no filename or line number that makes sense for
# this warning to "blame" for the problem. It is a system
# misconfiguration. So the location information should be blank
# (or as blank as we can make it).
self.assertEqual(warning["filename"], "")
self.assertEqual(warning["lineno"], 0)
|
6bb1b3ecb94f352e2ec3436962d9806a3c7861b7
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowLispInstanceIdServiceStatistics/cli/equal/golden_output2_expected.py
|
83b1d0ea2437aee2775cf9547794c28967c40899
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 12,512
|
py
|
golden_output2_expected.py
|
expected_output = {
'lisp_id': {
0: {
'instance_id': {
4100: {
'last_cleared': 'never',
'control_packets': {
'map_requests': {
'in': 0,
'out': 0,
'5_sec': 0,
'1_min': 0,
'5_min': 0,
'encapsulated': {
'in': 0,
'out': 0
},
'rloc_probe': {
'in': 0,
'out': 0
},
'smr_based': {
'in': 0,
'out': 0
},
'expired': {
'on_queue': 0,
'no_reply': 0
},
'map_resolver_forwarded': 0,
'map_server_forwarded': 0
},
'map_reply': {
'in': 0,
'out': 0,
'authoritative': {
'in': 0,
'out': 0
},
'non_authoritative': {
'in': 0,
'out': 0
},
'negative': {
'in': 0,
'out': 0
},
'rloc_probe': {
'in': 0,
'out': 0
},
'map_server_proxy_reply': {
'out': 0
}
},
'wlc_map_subscribe': {
'in': 0,
'out': 2,
'failures': {
'in': 0,
'out': 0
}
},
'wlc_map_unsubscribe': {
'in': 0,
'out': 0,
'failures': {
'in': 0,
'out': 0
}
},
'map_register': {
'in': 0,
'out': 6,
'5_sec': 0,
'1_min': 0,
'5_min': 0,
'map_server_af_disabled': 0,
'not_valid_site_eid_prefix': 0,
'authentication_failures': 0,
'disallowed_locators': 0,
'misc': 0
},
'wlc_map_registers': {
'in': 0,
'out': 0,
'ap': {
'in': 0,
'out': 0
},
'client': {
'in': 0,
'out': 0
},
'failures': {
'in': 0,
'out': 0
}
},
'map_notify': {
'in': 8,
'out': 0,
'authentication_failures': 0
},
'wlc_map_notify': {
'in': 0,
'out': 0,
'ap': {
'in': 0,
'out': 0
},
'client': {
'in': 0,
'out': 0
},
'failures': {
'in': 0,
'out': 0
}
},
'publish_subscribe': {
'subscription_request': {
'in': 0,
'out': 0,
'iid': {
'in': 0,
'out': 0
},
'pub_refresh': {
'in': 0,
'out': 0
},
'policy': {
'in': 0,
'out': 0
},
'failures': {
'in': 0,
'out': 0
}
},
'subscription_status': {
'in': 0,
'out': 0,
'end_of_publication': {
'in': 0,
'out': 0
},
'subscription_rejected': {
'in': 0,
'out': 0
},
'subscription_removed': {
'in': 0,
'out': 0
},
'failures': {
'in': 0,
'out': 0
}
},
'solicit_subscription': {
'in': 3,
'out': 0,
'failures': {
'in': 0,
'out': 0
}
},
'publication': {
'in': 0,
'out': 0,
'failures': {
'in': 0,
'out': 0
}
}
}
},
'errors': {
'mapping_rec_ttl_alerts': 0,
'map_request_invalid_source_rloc_drops': 0,
'map_register_invalid_source_rloc_drops': 0,
'ddt_requests_failed': 0,
'ddt_itr_map_requests': {
'dropped': 0,
'nonce_collision': 0,
'bad_xtr_nonce': 0
}
},
'cache_related': {
'cache_entries': {
'created': 3,
'deleted': 1
},
'nsf_cef_replay_entry_count': 0,
'eid_prefix_map_cache': 2,
'rejected_eid_prefix_due_to_limit': 0,
'times_signal_suppresion_turned_on': 0,
'time_since_last_signal_suppressed': 'never',
'negative_entries_map_cache': 2,
'total_rlocs_map_cache': 0,
'average_rlocs_per_eid_prefix': 0,
'policy_active_entries': 0
},
'forwarding': {
'data_signals': {
'processed': 0,
'dropped': 0
},
'reachability_reports': {
'count': 0,
'dropped': 0
},
'smr_signals': {
'dropped': 0
}
},
'itr_map_resolvers': {
'44.44.44.44': {
'last_reply': 'never',
'metric': 1,
'req_sent': 0,
'positive': 0,
'negative': 0,
'no_reply': 0,
'avgrtt': {
'5_sec': 0,
'1_min': 0,
'5_min': 0
}
},
'100.100.100.100': {
'last_reply': 'never',
'metric': 1,
'req_sent': 0,
'positive': 0,
'negative': 0,
'no_reply': 0,
'avgrtt': {
'5_sec': 0,
'1_min': 0,
'5_min': 0
}
}
},
'etr_map_servers': {
'44.44.44.44': {
'avgrtt': {
'5_sec': 0,
'1_min': 0,
'5_min': 0
}
},
'100.100.100.100': {
'avgrtt': {
'5_sec': 0,
'1_min': 0,
'5_min': 0
}
}
},
'rloc_statistics': {
'last_cleared': 'never',
'control_packets': {
'rtr': {
'map_requests_forwarded': 0,
'map_notifies_forwarded': 0
},
'ddt': {
'map_requests': {
'in': 0,
'out': 0
},
'map_referrals': {
'in': 0,
'out': 0
}
}
},
'errors': {
'map_request_format': 0,
'map_reply_format': 0,
'map_referral': 0
}
},
'misc_statistics': {
'invalid': {
'ip_version_drops': 0,
'ip_header_drops': 0,
'ip_proto_field_drops': 0,
'packet_size_drops': 0,
'lisp_control_port_drops': 0,
'lisp_checksum_drops': 0
},
'unsupported_lisp_packet_drops': 0,
'unknown_packet_drops': 0
}
}
}
}
}
}
|
210180c860f5315fd69c1014ed015f9458612163
|
9c0224ff346cd8ad79f754756a09c93eefc4d463
|
/padatious/entity_edge.py
|
83de6b8c2bda620beb53e9514210a2ae33b7c9e3
|
[
"Apache-2.0"
] |
permissive
|
MycroftAI/padatious
|
243d2fc085e2563090b2f2d7d7ae6fd3c3fae467
|
9d5a22f2b42f6fb1162e3b6143dfdd6ad9725fc9
|
refs/heads/dev
| 2021-11-30T09:49:08.144447
| 2021-11-10T00:47:30
| 2021-11-10T00:47:30
| 98,936,064
| 169
| 51
|
Apache-2.0
| 2021-11-10T00:47:31
| 2017-07-31T22:41:03
|
Python
|
UTF-8
|
Python
| false
| false
| 4,555
|
py
|
entity_edge.py
|
# Copyright 2017 Mycroft AI, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fann2 import libfann as fann
from padatious.id_manager import IdManager
from padatious.util import StrEnum, resolve_conflicts
class Ids(StrEnum):
end = ':end'
class EntityEdge(object):
"""
Represents the left or right side of an entity (a PosIntent)
Args:
direction (int): -1 for left and +1 for right
token (str): token to attach to (something like {word})
intent_name (str): name of parent intent
"""
def __init__(self, direction, token, intent_name):
self.ids = IdManager(Ids)
self.intent_name = intent_name
self.token = token
self.dir = direction
self.net = None
def get_end(self, sent):
return len(sent) if self.dir > 0 else -1
def vectorize(self, sent, pos):
unknown = 0
vector = self.ids.vector()
end_pos = self.get_end(sent)
for i in range(pos + self.dir, end_pos, self.dir):
if sent[i] in self.ids:
self.ids.assign(vector, sent[i], 1.0 / abs(i - pos))
else:
unknown += 1
self.ids.assign(vector, Ids.end, 1.0 / abs(end_pos - pos))
return vector
def match(self, sent, pos):
return self.net.run(self.vectorize(sent, pos))[0]
def configure_net(self):
layers = [len(self.ids), 3, 1]
self.net = fann.neural_net()
self.net.create_standard_array(layers)
self.net.set_activation_function_hidden(fann.SIGMOID_SYMMETRIC_STEPWISE)
self.net.set_activation_function_output(fann.SIGMOID_STEPWISE)
self.net.set_train_stop_function(fann.STOPFUNC_BIT)
self.net.set_bit_fail_limit(0.1)
def save(self, prefix):
prefix += '.' + {-1: 'l', +1: 'r'}[self.dir]
self.net.save(str(prefix + '.net')) # Must have str()
self.ids.save(prefix)
def load(self, prefix):
prefix += '.' + {-1: 'l', +1: 'r'}[self.dir]
self.net = fann.neural_net()
if not self.net.create_from_file(str(prefix + '.net')): # Must have str()
raise FileNotFoundError(str(prefix + '.net'))
self.ids.load(prefix)
def train(self, train_data):
for sent in train_data.my_sents(self.intent_name):
if self.token in sent:
for i in range(sent.index(self.token) + self.dir,
self.get_end(sent), self.dir):
if sent[i][0] != '{':
self.ids.add_token(sent[i])
inputs, outputs = [], []
def pollute(sent, i, out_val):
"""Simulates multiple token words in adjacent entities"""
for j, check_token in enumerate(sent):
d = j - i
if int(d > 0) - int(d < 0) == self.dir and check_token.startswith('{'):
for pol_len in range(1, 4):
s = sent[:j] + [':0'] * pol_len + sent[j + 1:]
p = i + (pol_len - 1) * int(self.dir < 0)
inputs.append(self.vectorize(s, p))
outputs.append([out_val])
def add_sents(sents, out_fn):
for sent in sents:
for i, token in enumerate(sent):
out_val = out_fn(token)
inputs.append(self.vectorize(sent, i))
outputs.append([out_val])
if out_val == 1.0:
pollute(sent, i, 1.0)
add_sents(train_data.my_sents(self.intent_name), lambda x: float(x == self.token))
add_sents(train_data.other_sents(self.intent_name), lambda x: 0.0)
inputs, outputs = resolve_conflicts(inputs, outputs)
data = fann.training_data()
data.set_train_data(inputs, outputs)
for _ in range(10):
self.configure_net()
self.net.train_on_data(data, 1000, 0, 0)
self.net.test_data(data)
if self.net.get_bit_fail() == 0:
break
|
e6abda255a03ff6e968e38fcd0f9885930d2581c
|
da1500e0d3040497614d5327d2461a22e934b4d8
|
/cobalt/black_box_tests/tests/http_cache.py
|
43958ec63f5c8602066e58a998892d0a9b318f97
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
youtube/cobalt
|
34085fc93972ebe05b988b15410e99845efd1968
|
acefdaaadd3ef46f10f63d1acae2259e4024d383
|
refs/heads/main
| 2023-09-01T13:09:47.225174
| 2023-09-01T08:54:54
| 2023-09-01T08:54:54
| 50,049,789
| 169
| 80
|
BSD-3-Clause
| 2023-09-14T21:50:50
| 2016-01-20T18:11:34
| null |
UTF-8
|
Python
| false
| false
| 2,035
|
py
|
http_cache.py
|
# Copyright 2022 The Cobalt Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests if Cobalt properly caches resources that were previously loaded."""
import os
from six.moves import SimpleHTTPServer
from six.moves.urllib.parse import urlparse
import time
from cobalt.black_box_tests import black_box_tests
from cobalt.black_box_tests.threaded_web_server import MakeRequestHandlerClass
from cobalt.black_box_tests.threaded_web_server import ThreadedWebServer
# The base path of the requested assets is the parent directory.
_SERVER_ROOT_PATH = os.path.join(os.path.dirname(__file__), os.pardir)
class DelayedHttpRequestHandler(MakeRequestHandlerClass(_SERVER_ROOT_PATH)):
"""Handles HTTP requests but adds a delay before serving a response."""
def do_GET(self): # pylint: disable=invalid-name
"""Handles HTTP GET requests for resources."""
parsed_path = urlparse(self.path)
if parsed_path.path.startswith('/testdata/http_cache_test_resources/'):
time.sleep(3)
return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
class HttpCacheTest(black_box_tests.BlackBoxTestCase):
"""Load resources, then reload the page and verify."""
def test_http_cache(self):
with ThreadedWebServer(
binding_address=self.GetBindingAddress(),
handler=DelayedHttpRequestHandler) as server:
url = server.GetURL(file_name='testdata/http_cache.html')
with self.CreateCobaltRunner(url=url) as runner:
self.assertTrue(runner.JSTestsSucceeded())
|
ab1ed592305e57fe256ee1c27d75e3508fd329d6
|
e7f38fa0aea00207bc94c542524bcf36a540f902
|
/tests/test_readme.py
|
203a32d31e10477d9f19cb65d9e5092300de2dc5
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lettucecfd/lettuce
|
c5cf4c27ee66022e9b6d94ff0380c6190ec59fae
|
63be9197efeb88b843fd349b9e8d1b13d1634c1c
|
refs/heads/master
| 2023-04-28T15:48:42.200866
| 2023-04-13T12:54:10
| 2023-04-13T12:54:10
| 185,307,088
| 106
| 22
|
MIT
| 2023-04-18T07:38:28
| 2019-05-07T02:37:05
|
Python
|
UTF-8
|
Python
| false
| false
| 757
|
py
|
test_readme.py
|
def test_readme():
"""Whenever you have to change this test, the example in README.rst has to change, too.
Note differences in the device + number of steps.
"""
import torch
from lettuce import BGKCollision, StandardStreaming, Lattice, D2Q9, TaylorGreenVortex2D, Simulation
device = "cpu"
dtype = torch.float32
lattice = Lattice(D2Q9, device, dtype)
flow = TaylorGreenVortex2D(resolution=256, reynolds_number=10, mach_number=0.05, lattice=lattice)
collision = BGKCollision(lattice, tau=flow.units.relaxation_parameter_lu)
streaming = StandardStreaming(lattice)
simulation = Simulation(flow, lattice, collision, streaming)
mlups = simulation.step(num_steps=1)
print("Performance in MLUPS:", mlups)
|
ddfdd5e39911c6a9873dc1d3de9f35383346a160
|
2871a5c3d1e885ee72332dbd8ff2c015dbcb1200
|
/CLIFF/common/constants.py
|
f6269e94e5d70f37f7c26ba1ddc904f988df961c
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
huawei-noah/noah-research
|
297476299ad040552e44656541858145de72d141
|
82c49c36b76987a46dec8479793f7cf0150839c6
|
refs/heads/master
| 2023-08-16T19:29:25.439701
| 2023-08-14T03:11:49
| 2023-08-14T03:11:49
| 272,853,727
| 816
| 171
| null | 2023-09-12T01:28:36
| 2020-06-17T01:53:20
|
Python
|
UTF-8
|
Python
| false
| false
| 849
|
py
|
constants.py
|
# Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify it
# under the terms of the MIT license.
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the MIT License for more details.
import os
from os.path import join
curr_dir = os.path.dirname(os.path.abspath(__file__))
SMPL_MEAN_PARAMS = join(curr_dir, '../data/smpl_mean_params.npz')
SMPL_MODEL_DIR = join(curr_dir, '../data')
CROP_IMG_HEIGHT = 256
CROP_IMG_WIDTH = 192
CROP_ASPECT_RATIO = CROP_IMG_HEIGHT / float(CROP_IMG_WIDTH)
# Mean and standard deviation for normalizing input image
IMG_NORM_MEAN = [0.485, 0.456, 0.406]
IMG_NORM_STD = [0.229, 0.224, 0.225]
|
0467a35598b335e02f30e5e13b010409fb141c22
|
933728f0079ec76ffd4a5c15d9cab8c9639a3197
|
/tests/performance/performance_test.py
|
8fe5a502731df4b64170646bd448e9a1add9f7d5
|
[
"MIT"
] |
permissive
|
gavincyi/LightMatchingEngine
|
344636d4414b5d6455a810dc5305285ef7f866eb
|
5e210a809e62a802107831d0ca12498ed32d4717
|
refs/heads/develop
| 2022-02-07T14:15:36.998128
| 2021-04-04T14:28:15
| 2021-04-04T14:28:15
| 81,263,233
| 310
| 90
|
MIT
| 2022-09-06T23:01:58
| 2017-02-07T22:53:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,326
|
py
|
performance_test.py
|
"""Performance test for light matching engine.
Usage:
perf-test-light-matching-engine --freq <freq> [options]
Options:
-h --help Show help.
--freq=<freq> Order frequency per second. [Default: 10]
--num-orders=<num_orders> Number of orders. [Default: 100]
--add-order-prob=<prob> Add order probability. [Default: 0.6]
--mean-price=<mean-price> Mean price in the standard normal distribution.
[Default: 100]
--std-price=<std-price> Standard derivation of the price in the standing
derivation. [Default: 0.5]
--tick-size=<tick-size> Tick size. [Default: 0.1]
--gamma-quantity=<gamma> Gamma value in the gamma distribution for the
order quantity. [Default: 2]
"""
from docopt import docopt
import logging
from math import log
from random import uniform, seed
from time import sleep, time
from tabulate import tabulate
from tqdm import tqdm
import numpy as np
import pandas as pd
from lightmatchingengine.lightmatchingengine import (
LightMatchingEngine, Side)
LOGGER = logging.getLogger(__name__)
class Timer:
def __enter__(self):
self.start = time()
return self
def __exit__(self, *args):
self.end = time()
self.interval = self.end - self.start
def run(args):
engine = LightMatchingEngine()
symbol = "EUR/USD"
add_order_prob = float(args['--add-order-prob'])
num_of_orders = int(args['--num-orders'])
gamma_quantity = float(args['--gamma-quantity'])
mean_price = float(args['--mean-price'])
std_price = float(args['--std-price'])
tick_size = float(args['--tick-size'])
freq = float(args['--freq'])
orders = {}
add_statistics = []
cancel_statistics = []
# Initialize random seed
seed(42)
progress_bar = tqdm(num_of_orders)
while num_of_orders > 0:
if uniform(0, 1) <= add_order_prob or len(orders) == 0:
price = np.random.standard_normal() * std_price + mean_price
price = int(price / tick_size) * tick_size
quantity = np.random.gamma(gamma_quantity) + 1
side = Side.BUY if uniform(0, 1) <= 0.5 else Side.SELL
# Add the order
with Timer() as timer:
order, trades = engine.add_order(symbol, price, quantity, side)
LOGGER.debug('Order %s is added at side %s, price %s '
'and quantity %s',
order.order_id, order.side, order.price, order.qty)
# Save the order if there is any quantity left
if order.leaves_qty > 0.0:
orders[order.order_id] = order
# Remove the trades
for trade in trades:
if (trade.order_id != order.order_id and
orders[trade.order_id].leaves_qty < 1e-9):
del orders[trade.order_id]
# Save the statistics
add_statistics.append((order, len(trades), timer))
num_of_orders -= 1
progress_bar.update(1)
else:
index = int(uniform(0, 1) * len(orders))
if index == len(orders):
index -= 1
order_id = list(orders.keys())[index]
with Timer() as timer:
engine.cancel_order(order_id, order.instmt)
LOGGER.debug('Order %s is deleted', order_id)
del orders[order_id]
# Save the statistics
cancel_statistics.append((order, timer))
# Next time = -ln(U) / lambda
sleep(-log(uniform(0, 1)) / freq)
return add_statistics, cancel_statistics
def describe_statistics(add_statistics, cancel_statistics):
add_statistics = pd.DataFrame([
(trade_num, timer.interval * 1e6)
for _, trade_num, timer in add_statistics],
columns=['trade_num', 'interval'])
# Trade statistics
trade_statistics = add_statistics['trade_num'].describe()
LOGGER.info('Trade statistics:\n%s',
tabulate(trade_statistics.to_frame(name='trade'),
tablefmt='pipe'))
cancel_statistics = pd.Series([
timer.interval * 1e6 for _, timer in cancel_statistics],
name='interval')
statistics = pd.concat([
add_statistics['interval'].describe(),
cancel_statistics.describe()],
keys=['add', 'cancel'],
axis=1)
statistics['add (trade > 0)'] = (
add_statistics.loc[
add_statistics['trade_num'] > 0, 'interval'].describe())
percentile_75 = trade_statistics['75%']
statistics['add (trade > %s)' % percentile_75] = (
add_statistics.loc[add_statistics['trade_num'] > percentile_75,
'interval'].describe())
LOGGER.info('Matching engine latency (nanoseconds):\n%s',
tabulate(statistics,
headers=statistics.columns,
tablefmt='pipe'))
if __name__ == '__main__':
args = docopt(__doc__, version='1.0.0')
logging.basicConfig(level=logging.INFO)
LOGGER.info('Running the performance benchmark')
add_statistics, cancel_statistics = run(args)
describe_statistics(add_statistics, cancel_statistics)
|
25a94f282accccce08ad47bf9287848bb0f43255
|
359250410360763cd9e1736017b0130d1803600a
|
/wifipumpkin3/modules/wifi/wifiscan.py
|
7609c9f31c437beeddd43a6e217c2d167c9d5f44
|
[
"Apache-2.0"
] |
permissive
|
P0cL4bs/wifipumpkin3
|
268dde11a7395c5cd92cdd90b58d8bb8f090430a
|
5da92d6c329f9c63d44042163edac410cb027758
|
refs/heads/main
| 2023-08-25T13:51:53.004863
| 2023-08-22T00:00:57
| 2023-08-22T00:00:57
| 185,832,947
| 1,540
| 261
|
Apache-2.0
| 2023-05-20T01:35:43
| 2019-05-09T16:15:31
|
Python
|
UTF-8
|
Python
| false
| false
| 10,328
|
py
|
wifiscan.py
|
from wifipumpkin3.core.common.terminal import ModuleUI
from wifipumpkin3.core.config.globalimport import *
from wifipumpkin3.core.utility.printer import (
display_messages,
setcolor,
display_tabulate,
)
from random import randrange
import time, sys
from multiprocessing import Process
from scapy.all import *
from wifipumpkin3.core.common.platforms import Linux
# This file is part of the wifipumpkin3 Open Source Project.
# wifipumpkin3 is licensed under the Apache 2.0.
# Copyright 2020 P0cL4bs Team - Marcos Bomfim (mh4x0f)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PROBE_REQUEST_TYPE = 0
PROBE_REQUEST_SUBTYPE = 4
DOT11_REQUEST_SUBTYPE = 2
class ModPump(ModuleUI):
"""Scan WiFi networks and detect devices"""
name = "wifiscan"
options = {
"interface": ["wlanx", "Name network interface wireless "],
"timeout": [0, "Time duration of scan network wireless (ex: 0 infinty)"],
}
completions = list(options.keys())
def __init__(self, parse_args=None, root=None):
self.parse_args = parse_args
self.root = root
self.name_module = self.name
self.whitelist = ["00:00:00:00:00:00", "ff:ff:ff:ff:ff:ff"]
self.aps = {}
self.clients = {}
self.table_headers_wifi = [
"CH",
"SSID",
"BSSID",
"RSSI",
"Privacy",
]
self.table_headers_STA = ["BSSID", "STATION", "PWR", "Frames", "Probe"]
self.table_output = []
super(ModPump, self).__init__(parse_args=self.parse_args, root=self.root)
def do_run(self, args):
"""execute module"""
print(
display_messages(
"setting interface: {} monitor momde".format(
setcolor(self.options.get("interface")[0], color="green")
),
info=True,
)
)
self.set_monitor_mode("monitor")
print(display_messages("starting Channel Hopping ", info=True))
self.p = Process(
target=self.channel_hopper, args=(self.options.get("interface")[0],)
)
self.p.daemon = True
self.p.start()
print(display_messages("sniffing... ", info=True))
sniff(
iface=self.options.get("interface")[0],
prn=self.sniffAp,
timeout=None
if int(self.options.get("timeout")[0]) == 0
else int(self.options.get("timeout")[0]),
)
self.p.terminate()
self.set_monitor_mode()
print(display_messages("thread sniffing successfully stopped", info=True))
def channel_hopper(self, interface):
while True:
try:
channel = randrange(1, 11)
os.system("iw dev %s set channel %d" % (interface, channel))
time.sleep(1)
except KeyboardInterrupt:
break
def handle_probe(self, pkt):
if (
pkt.haslayer(Dot11ProbeReq)
and "\x00".encode() not in pkt[Dot11ProbeReq].info
):
essid = pkt[Dot11ProbeReq].info
try:
essid = pkt[Dot11ProbeReq].info.decode('utf8')
except UnicodeDecodeError:
try:
essid = pkt[Dot11ProbeReq].info.decode('unicode-escape')
except Exception:
try:
essid = pkt[Dot11ProbeReq].info.decode('latin1')
except Exception:
essid = "Not decoded ssid"
else:
essid = "Hidden SSID"
client = pkt[Dot11].addr2
if client in self.whitelist or essid in self.whitelist:
return
if client not in self.clients:
self.clients[client] = []
if essid not in self.clients[client]:
self.clients[client].append(essid)
self.aps["(not associated)"] = {}
self.aps["(not associated)"]["STA"] = {
"Frames": 1,
"BSSID": "(not associated)",
"Station": client,
"Probe": essid,
"PWR": self.getRSSIPacketClients(pkt),
}
def getRSSIPacket(self, pkt):
rssi = -100
if pkt.haslayer(Dot11):
if pkt.type == 0 and pkt.subtype == 8:
if pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp):
rssi = pkt[RadioTap].dBm_AntSignal
return rssi
def getRSSIPacketClients(self, pkt):
rssi = -100
if pkt.haslayer(RadioTap):
rssi = pkt[RadioTap].dBm_AntSignal
return rssi
def getStationTrackFrame(self, pkt):
if (
pkt.haslayer(Dot11)
and pkt.getlayer(Dot11).type == DOT11_REQUEST_SUBTYPE
and not pkt.haslayer(EAPOL)
):
sender = pkt.getlayer(Dot11).addr2
receiver = pkt.getlayer(Dot11).addr1
if sender in self.aps.keys():
if Linux.check_is_mac(receiver):
if not receiver in self.whitelist:
self.aps[sender]["STA"] = {
"Frames": 1,
"BSSID": sender,
"Station": receiver,
"Probe": "",
"PWR": self.getRSSIPacketClients(pkt),
}
if "STA" in self.aps[sender]:
self.aps[sender]["STA"]["Frames"] += 1
self.aps[sender]["STA"]["PWR"] = self.getRSSIPacketClients(pkt)
elif receiver in self.aps.keys():
if Linux.check_is_mac(sender):
if not sender in self.whitelist:
self.aps[receiver]["STA"] = {
"Frames": 1,
"BSSID": receiver,
"Station": sender,
"Probe": "",
"PWR": self.getRSSIPacketClients(pkt),
}
if "STA" in self.aps[receiver]:
self.aps[receiver]["STA"]["Frames"] += 1
self.aps[receiver]["STA"]["PWR"] = self.getRSSIPacketClients(
pkt
)
def handle_beacon(self, pkt):
if not pkt.haslayer(Dot11Elt):
return
essid = (
pkt[Dot11Elt].info
if "\x00".encode() not in pkt[Dot11Elt].info and pkt[Dot11Elt].info != ""
else "Hidden SSID"
)
bssid = pkt[Dot11].addr3
client = pkt[Dot11].addr2
if (
client in self.whitelist
or essid in self.whitelist
or bssid in self.whitelist
):
return
try:
channel = int(ord(pkt[Dot11Elt:3].info))
except:
channel = 0
rssi = self.getRSSIPacket(pkt)
p = pkt[Dot11Elt]
capability = p.sprintf(
"{Dot11Beacon:%Dot11Beacon.cap%}\
{Dot11ProbeResp:%Dot11ProbeResp.cap%}"
)
crypto = set()
while isinstance(p, Dot11Elt):
if p.ID == 48:
crypto.add("WPA2")
elif p.ID == 221 and p.info.startswith("\x00P\xf2\x01\x01\x00".encode()):
crypto.add("WPA")
p = p.payload
if not crypto:
if "privacy" in capability:
crypto.add("WEP")
else:
crypto.add("OPN")
enc = "/".join(crypto)
self.aps[bssid] = {
"ssid": essid,
"channel": channel,
"capability": capability,
"enc": enc,
"rssi": rssi,
}
def showDataOutputScan(self):
os.system("clear")
self.table_output = []
self.table_station = []
for bssid, info in self.aps.items():
if not "(not associated)" in bssid:
self.table_output.append(
[info["channel"], info["ssid"], bssid, info["rssi"], info["enc"]]
)
display_tabulate(self.table_headers_wifi, self.table_output)
print("\n")
for bssid, info in self.aps.items():
if "STA" in info:
self.table_station.append(
[
info["STA"]["BSSID"],
info["STA"]["Station"],
info["STA"]["PWR"],
info["STA"]["Frames"],
info["STA"]["Probe"],
]
)
if len(self.table_station) > 0:
display_tabulate(self.table_headers_STA, self.table_station)
print(display_messages("press CTRL+C to stop scanning", info=True))
def sniffAp(self, pkt):
self.getStationTrackFrame(pkt)
if (
pkt.haslayer(Dot11Beacon)
or pkt.haslayer(Dot11ProbeResp)
or pkt.haslayer(Dot11ProbeReq)
):
if pkt.type == PROBE_REQUEST_TYPE and pkt.subtype == PROBE_REQUEST_SUBTYPE:
self.handle_probe(pkt)
if pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp):
self.handle_beacon(pkt)
self.showDataOutputScan()
def set_monitor_mode(self, mode="manager"):
if not self.options.get("interface")[0] in Linux.get_interfaces().get("all"):
print(display_messages("the interface not found!", error=True))
sys.exit(1)
os.system("ifconfig {} down".format(self.options.get("interface")[0]))
os.system("iwconfig {} mode {}".format(self.options.get("interface")[0], mode))
os.system("ifconfig {} up".format(self.options.get("interface")[0]))
|
d27d333fa0347cb788ea1ee36e53754f85acf939
|
71970d42295dc3bf7ce6ac2b07cee23e5e9fda1c
|
/roles/splunk_universal_forwarder/molecule/systemd/tests/test_systemd.py
|
c774455477e2623cfa282c17880c979817808390
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
splunk/splunk-ansible
|
bd89e81d4debd40a9a74fdb1f15791a4fa9ed712
|
553e614f4c36ce4d5b429cf26c9ebf3b216b3d70
|
refs/heads/develop
| 2023-08-31T07:30:31.389474
| 2023-08-30T23:43:24
| 2023-08-30T23:43:24
| 148,547,478
| 337
| 209
| null | 2023-09-06T22:08:09
| 2018-09-12T22:02:31
|
Python
|
UTF-8
|
Python
| false
| false
| 4,520
|
py
|
test_systemd.py
|
#!/usr/bin/env python
#
# These tests specifically exercise the following:
# - UF version 8.0.3/7.3.2 through rpm installation
# - Systemd-enabled, UF started via `enable bootstrap` command
# - Checks for system unit file for SplunkForwarder
from __future__ import absolute_import
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
SPLUNK_HOME = "/opt/splunkforwarder"
SPLUNK_EXEC = "{}/bin/splunk".format(SPLUNK_HOME)
SPLUNK_USER = SPLUNK_GROUP = "splunk"
def test_splunk_user_group(host):
user = host.user(SPLUNK_USER)
assert user.name == SPLUNK_USER
assert user.group == SPLUNK_GROUP
def test_splunk_installation(host):
d = host.file(SPLUNK_HOME)
assert d.is_directory
assert d.user == SPLUNK_USER
assert d.group == SPLUNK_GROUP
f = host.file(SPLUNK_EXEC)
assert f.is_file
assert f.user == SPLUNK_USER
assert f.group == SPLUNK_GROUP
def test_splunk_running(host):
output = host.run("{} status".format(SPLUNK_EXEC))
assert "running" in output.stdout
def test_user_seed(host):
f = host.file("{}/etc/system/local/user-seed.conf".format(SPLUNK_HOME))
assert not f.exists
def test_ui_login(host):
f = host.file("{}/etc/.ui_login".format(SPLUNK_HOME))
assert f.exists
assert f.user == SPLUNK_USER
assert f.group == SPLUNK_GROUP
def test_splunk_version(host):
f = host.file("{}/etc/splunk.version".format(SPLUNK_HOME))
assert f.exists
assert f.user == SPLUNK_USER
assert f.group == SPLUNK_GROUP
hostname = host.check_output("hostname -s")
if "splunk-uf-732-systemd-centos8" in hostname:
assert f.contains("VERSION=7.3.2")
else:
assert f.contains("VERSION=8.0.3")
def test_splunk_pid(host):
f = host.file("{}/var/run/splunk/splunkd.pid".format(SPLUNK_HOME))
assert f.exists
assert f.user == SPLUNK_USER
assert f.group == SPLUNK_GROUP
def test_mongod_lock(host):
f = host.file("{}/var/lib/splunk/kvstore/mongo/mongod.lock".format(SPLUNK_HOME))
assert not f.exists
def test_bin_splunk(host):
f = host.file("{}".format(SPLUNK_EXEC))
assert f.exists
assert f.user == SPLUNK_USER
assert f.group == SPLUNK_GROUP
def test_splunk_hec_inputs(host):
f = host.file("{}/etc/apps/splunk_httpinput/local/inputs.conf".format(SPLUNK_HOME))
assert f.exists
assert f.user == SPLUNK_USER
assert f.group == SPLUNK_GROUP
assert f.contains("[http]")
assert f.contains("disabled = 0")
assert f.contains("[http://splunk_hec_token]")
assert f.contains("token = abcd1234")
def test_inputs_conf(host):
f = host.file("{}/etc/system/local/inputs.conf".format(SPLUNK_HOME))
assert f.exists
assert f.user == SPLUNK_USER
assert f.group == SPLUNK_GROUP
assert f.contains("[splunktcp://9997]")
assert f.contains("disabled = 0")
def test_splunk_ports(host):
output = host.run("netstat -tuln")
assert "0.0.0.0:8089" in output.stdout
assert "0.0.0.0:8088" in output.stdout
assert "0.0.0.0:9997" in output.stdout
def test_splunk_hec(host):
output = host.run('curl -k https://localhost:8088/services/collector/event \
-H "Authorization: Splunk abcd1234" -d \'{"event": "helloworld"}\'')
assert "Success" in output.stdout
def test_splunkd(host):
output = host.run("curl -k https://localhost:8089/services/server/info \
-u admin:helloworld")
assert "Splunk" in output.stdout
def test_service(host):
s = host.service('SplunkForwarder')
assert s.is_running
assert s.is_enabled
def test_splunkforwarder_systemd_file(host):
# This test uses Splunk UF version 8.0.2, which now does not have ExecStartPost directives
f = host.file('/etc/systemd/system/SplunkForwarder.service')
assert f.is_file
assert f.user == "root"
assert f.group == "root"
hostname = host.check_output("hostname -s")
if "splunk-uf-732-systemd-centos8" in hostname:
assert "ExecStartPost" in f.content_string
else:
assert "ExecStartPost" not in f.content_string
def test_custom_user_prefs(host):
f = host.file("{}/etc/users/admin/user-prefs/local/user-prefs.conf".format(SPLUNK_HOME))
assert f.exists
assert f.user == SPLUNK_USER
assert f.group == SPLUNK_GROUP
assert f.contains("\\[general\\]")
assert f.contains("default_namespace = appboilerplate")
assert f.contains("search_syntax_highlighting = dark")
|
f30f05074cfbfc0c6d775e68ab0b9ce7a085e0b3
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/network/_validators.py
|
ce83c461e78d75662346829ca33e32861f5d0cba
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 37,965
|
py
|
_validators.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-many-lines
import argparse
import base64
import socket
import os
from knack.util import CLIError
from knack.log import get_logger
from azure.cli.core.azclierror import ValidationError
from azure.cli.core.commands.validators import validate_tags, get_default_location_from_resource_group
from azure.cli.core.commands.template_create import get_folded_parameter_validator
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.commands.validators import validate_parameter_set
from azure.cli.core.profiles import ResourceType
logger = get_logger(__name__)
def _resolve_api_version(rcf, resource_provider_namespace, parent_resource_path, resource_type):
"""
This is copied from src/azure-cli/azure/cli/command_modules/resource/custom.py in Azure/azure-cli
"""
from azure.cli.core.parser import IncorrectUsageError
provider = rcf.providers.get(resource_provider_namespace)
# If available, we will use parent resource's api-version
resource_type_str = (parent_resource_path.split('/')[0] if parent_resource_path else resource_type)
rt = [t for t in provider.resource_types if t.resource_type.lower() == resource_type_str.lower()]
if not rt:
raise IncorrectUsageError('Resource type {} not found.'.format(resource_type_str))
if len(rt) == 1 and rt[0].api_versions:
npv = [v for v in rt[0].api_versions if 'preview' not in v.lower()]
return npv[0] if npv else rt[0].api_versions[0]
raise IncorrectUsageError(
'API version is required and could not be resolved for resource {}'.format(resource_type))
def get_vnet_validator(dest):
from msrestazure.tools import is_valid_resource_id, resource_id
def _validate_vnet_name_or_id(cmd, namespace):
SubResource = cmd.get_models('SubResource', resource_type=ResourceType.MGMT_NETWORK_DNS)
subscription_id = get_subscription_id(cmd.cli_ctx)
resource_group = namespace.resource_group_name
names_or_ids = getattr(namespace, dest)
ids = []
if names_or_ids == [''] or not names_or_ids:
return
for val in names_or_ids:
if not is_valid_resource_id(val):
val = resource_id(
subscription=subscription_id,
resource_group=resource_group,
namespace='Microsoft.Network', type='virtualNetworks',
name=val
)
ids.append(SubResource(id=val))
setattr(namespace, dest, ids)
return _validate_vnet_name_or_id
def _validate_vpn_gateway_generation(namespace):
if namespace.gateway_type != 'Vpn' and namespace.vpn_gateway_generation:
raise CLIError('vpn_gateway_generation should not be provided if gateway_type is not Vpn.')
def validate_ddos_name_or_id(cmd, namespace):
if namespace.ddos_protection_plan:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(namespace.ddos_protection_plan):
namespace.ddos_protection_plan = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network', type='ddosProtectionPlans',
name=namespace.ddos_protection_plan
)
# pylint: disable=inconsistent-return-statements
def dns_zone_name_type(value):
if value:
return value[:-1] if value[-1] == '.' else value
def _generate_ag_subproperty_id(cli_ctx, namespace, child_type, child_name, subscription=None):
from msrestazure.tools import resource_id
return resource_id(
subscription=subscription or get_subscription_id(cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=namespace.application_gateway_name,
child_type_1=child_type,
child_name_1=child_name)
def _generate_lb_subproperty_id(cli_ctx, namespace, child_type, child_name, subscription=None):
from msrestazure.tools import resource_id
return resource_id(
subscription=subscription or get_subscription_id(cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='loadBalancers',
name=namespace.load_balancer_name,
child_type_1=child_type,
child_name_1=child_name)
def _generate_lb_id_list_from_names_or_ids(cli_ctx, namespace, prop, child_type):
from msrestazure.tools import is_valid_resource_id
raw = getattr(namespace, prop)
if not raw:
return
raw = raw if isinstance(raw, list) else [raw]
result = []
for item in raw:
if is_valid_resource_id(item):
result.append({'id': item})
else:
if not namespace.load_balancer_name:
raise CLIError('Unable to process {}. Please supply a well-formed ID or '
'--lb-name.'.format(item))
result.append({'id': _generate_lb_subproperty_id(
cli_ctx, namespace, child_type, item)})
setattr(namespace, prop, result)
def validate_address_pool_id_list(cmd, namespace):
_generate_lb_id_list_from_names_or_ids(
cmd.cli_ctx, namespace, 'load_balancer_backend_address_pool_ids', 'backendAddressPools')
def validate_address_pool_name_or_id(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
address_pool = namespace.backend_address_pool
lb_name = namespace.load_balancer_name
gateway_name = namespace.application_gateway_name
usage_error = CLIError('usage error: --address-pool ID | --lb-name NAME --address-pool NAME '
'| --gateway-name NAME --address-pool NAME')
if is_valid_resource_id(address_pool):
if lb_name or gateway_name:
raise usage_error
parts = parse_resource_id(address_pool)
if parts['type'] == 'loadBalancers':
namespace.load_balancer_name = parts['name']
elif parts['type'] == 'applicationGateways':
namespace.application_gateway_name = parts['name']
else:
raise usage_error
else:
if bool(lb_name) == bool(gateway_name):
raise usage_error
if lb_name:
namespace.backend_address_pool = _generate_lb_subproperty_id(
cmd.cli_ctx, namespace, 'backendAddressPools', address_pool)
elif gateway_name:
namespace.backend_address_pool = _generate_ag_subproperty_id(
cmd.cli_ctx, namespace, 'backendAddressPools', address_pool)
def validate_address_prefixes(namespace):
if namespace.subnet_type != 'new':
validate_parameter_set(namespace,
required=[],
forbidden=['subnet_address_prefix', 'vnet_address_prefix'],
description='existing subnet')
def read_base_64_file(filename):
with open(filename, 'rb') as f:
contents = f.read()
base64_data = base64.b64encode(contents)
try:
return base64_data.decode('utf-8')
except UnicodeDecodeError:
return str(base64_data)
def validate_ssl_cert(namespace):
params = [namespace.cert_data, namespace.cert_password]
if all(not x for x in params) and not namespace.key_vault_secret_id:
# no cert supplied -- use HTTP
if not namespace.frontend_port:
namespace.frontend_port = 80
else:
if namespace.key_vault_secret_id:
return
# cert supplied -- use HTTPS
if not namespace.cert_data:
raise CLIError(
None, 'To use SSL certificate, you must specify both the filename')
# extract the certificate data from the provided file
namespace.cert_data = read_base_64_file(namespace.cert_data)
try:
# change default to frontend port 443 for https
if not namespace.frontend_port:
namespace.frontend_port = 443
except AttributeError:
# app-gateway ssl-cert create does not have these fields and that is okay
pass
def validate_dns_record_type(namespace):
tokens = namespace.command.split(' ')
types = ['a', 'aaaa', 'caa', 'cname', 'ds', 'mx', 'ns', 'ptr', 'soa', 'srv', 'tlsa', 'txt']
for token in tokens:
if token in types:
if hasattr(namespace, 'record_type'):
namespace.record_type = token
else:
namespace.record_set_type = token
return
def validate_user_assigned_identity(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.user_assigned_identity and not is_valid_resource_id(namespace.user_assigned_identity):
namespace.user_assigned_identity = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.ManagedIdentity',
type='userAssignedIdentities',
name=namespace.user_assigned_identity
)
def validate_waf_policy(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.firewall_policy and not is_valid_resource_id(namespace.firewall_policy):
namespace.firewall_policy = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='ApplicationGatewayWebApplicationFirewallPolicies',
name=namespace.firewall_policy
)
def validate_inbound_nat_rule_id_list(cmd, namespace):
_generate_lb_id_list_from_names_or_ids(
cmd.cli_ctx, namespace, 'load_balancer_inbound_nat_rule_ids', 'inboundNatRules')
def validate_inbound_nat_rule_name_or_id(cmd, namespace):
from msrestazure.tools import is_valid_resource_id
rule_name = namespace.inbound_nat_rule
lb_name = namespace.load_balancer_name
if is_valid_resource_id(rule_name):
if lb_name:
raise CLIError('Please omit --lb-name when specifying an inbound NAT rule ID.')
else:
if not lb_name:
raise CLIError('Please specify --lb-name when specifying an inbound NAT rule name.')
namespace.inbound_nat_rule = _generate_lb_subproperty_id(
cmd.cli_ctx, namespace, 'inboundNatRules', rule_name)
def validate_ip_tags(namespace):
""" Extracts multiple space-separated tags in TYPE=VALUE format """
if namespace.ip_tags:
ip_tags = []
for item in namespace.ip_tags:
tag_type, tag_value = item.split('=', 1)
ip_tags.append({"ip_tag_type": tag_type, "tag": tag_value})
namespace.ip_tags = ip_tags
def validate_frontend_ip_configs(cmd, namespace):
from msrestazure.tools import is_valid_resource_id
if namespace.frontend_ip_configurations:
config_ids = []
for item in namespace.frontend_ip_configurations:
if not is_valid_resource_id(item):
config_ids.append(_generate_lb_subproperty_id(
cmd.cli_ctx, namespace, 'frontendIpConfigurations', item))
else:
config_ids.append(item)
namespace.frontend_ip_configurations = config_ids
def validate_local_gateway(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.gateway_default_site and not is_valid_resource_id(namespace.gateway_default_site):
namespace.gateway_default_site = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
name=namespace.gateway_default_site,
namespace='Microsoft.Network',
type='localNetworkGateways')
def validate_metadata(namespace):
if namespace.metadata:
namespace.metadata = dict(x.split('=', 1) for x in namespace.metadata)
def validate_peering_type(namespace):
if namespace.peering_type and namespace.peering_type == 'MicrosoftPeering':
if not namespace.advertised_public_prefixes:
raise CLIError(
'missing required MicrosoftPeering parameter --advertised-public-prefixes')
def validate_public_ip_prefix(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.public_ip_prefix and not is_valid_resource_id(namespace.public_ip_prefix):
namespace.public_ip_prefix = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
name=namespace.public_ip_prefix,
namespace='Microsoft.Network',
type='publicIPPrefixes')
def validate_nat_gateway(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.nat_gateway and not is_valid_resource_id(namespace.nat_gateway):
namespace.nat_gateway = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
name=namespace.nat_gateway,
namespace='Microsoft.Network',
type='natGateways')
def validate_private_ip_address(namespace):
if namespace.private_ip_address and hasattr(namespace, 'private_ip_address_allocation'):
namespace.private_ip_address_allocation = 'static'
def get_public_ip_validator(has_type_field=False, allow_none=False, allow_new=False,
default_none=False):
""" Retrieves a validator for public IP address. Accepting all defaults will perform a check
for an existing name or ID with no ARM-required -type parameter. """
from msrestazure.tools import is_valid_resource_id, resource_id
def simple_validator(cmd, namespace):
if namespace.public_ip_address:
is_list = isinstance(namespace.public_ip_address, list)
def _validate_name_or_id(public_ip):
# determine if public_ip_address is name or ID
is_id = is_valid_resource_id(public_ip)
return public_ip if is_id else resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='publicIPAddresses',
name=public_ip)
if is_list:
for i, public_ip in enumerate(namespace.public_ip_address):
namespace.public_ip_address[i] = _validate_name_or_id(public_ip)
else:
namespace.public_ip_address = _validate_name_or_id(namespace.public_ip_address)
def complex_validator_with_type(cmd, namespace):
get_folded_parameter_validator(
'public_ip_address', 'Microsoft.Network/publicIPAddresses', '--public-ip-address',
allow_none=allow_none, allow_new=allow_new, default_none=default_none)(cmd, namespace)
return complex_validator_with_type if has_type_field else simple_validator
def get_subnet_validator(has_type_field=False, allow_none=False, allow_new=False,
default_none=False):
from msrestazure.tools import is_valid_resource_id, resource_id
def simple_validator(cmd, namespace):
if namespace.virtual_network_name is None and namespace.subnet is None:
return
if namespace.subnet == '':
return
usage_error = ValueError('incorrect usage: ( --subnet ID | --subnet NAME --vnet-name NAME)')
# error if vnet-name is provided without subnet
if namespace.virtual_network_name and not namespace.subnet:
raise usage_error
# determine if subnet is name or ID
is_id = is_valid_resource_id(namespace.subnet)
# error if vnet-name is provided along with a subnet ID
if is_id and namespace.virtual_network_name:
raise usage_error
if not is_id and not namespace.virtual_network_name:
raise usage_error
if not is_id:
namespace.subnet = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=namespace.virtual_network_name,
child_type_1='subnets',
child_name_1=namespace.subnet)
def complex_validator_with_type(cmd, namespace):
get_folded_parameter_validator(
'subnet', 'subnets', '--subnet',
'virtual_network_name', 'Microsoft.Network/virtualNetworks', '--vnet-name',
allow_none=allow_none, allow_new=allow_new, default_none=default_none)(cmd, namespace)
return complex_validator_with_type if has_type_field else simple_validator
def get_nsg_validator(has_type_field=False, allow_none=False, allow_new=False, default_none=False):
from msrestazure.tools import is_valid_resource_id, resource_id
def simple_validator(cmd, namespace):
if namespace.network_security_group:
# determine if network_security_group is name or ID
is_id = is_valid_resource_id(namespace.network_security_group)
if not is_id:
namespace.network_security_group = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='networkSecurityGroups',
name=namespace.network_security_group)
def complex_validator_with_type(cmd, namespace):
get_folded_parameter_validator(
'network_security_group', 'Microsoft.Network/networkSecurityGroups', '--nsg',
allow_none=allow_none, allow_new=allow_new, default_none=default_none)(cmd, namespace)
return complex_validator_with_type if has_type_field else simple_validator
def validate_service_endpoint_policy(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.service_endpoint_policy:
policy_ids = []
for policy in namespace.service_endpoint_policy:
if not is_valid_resource_id(policy):
policy = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
name=policy,
namespace='Microsoft.Network',
type='serviceEndpointPolicies')
policy_ids.append(policy)
namespace.service_endpoint_policy = policy_ids
def get_servers_validator(camel_case=False):
def validate_servers(namespace):
servers = []
for item in namespace.servers if namespace.servers else []:
try:
socket.inet_aton(item) # pylint:disable=no-member
servers.append({'ipAddress' if camel_case else 'ip_address': item})
except socket.error: # pylint:disable=no-member
servers.append({'fqdn': item})
namespace.servers = servers if servers else None
return validate_servers
def validate_private_dns_zone(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.private_dns_zone and not is_valid_resource_id(namespace.private_dns_zone):
namespace.private_dns_zone = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
name=namespace.private_dns_zone,
namespace='Microsoft.Network',
type='privateDnsZones')
def get_virtual_network_validator(has_type_field=False, allow_none=False, allow_new=False,
default_none=False):
from msrestazure.tools import is_valid_resource_id, resource_id
def simple_validator(cmd, namespace):
if namespace.virtual_network:
# determine if vnet is name or ID
is_id = is_valid_resource_id(namespace.virtual_network)
if not is_id:
namespace.virtual_network = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=namespace.virtual_network)
def complex_validator_with_type(cmd, namespace):
get_folded_parameter_validator(
'virtual_network', 'Microsoft.Network/virtualNetworks', '--vnet',
allow_none=allow_none, allow_new=allow_new, default_none=default_none)(cmd, namespace)
return complex_validator_with_type if has_type_field else simple_validator
# COMMAND NAMESPACE VALIDATORS
def process_ag_create_namespace(cmd, namespace):
get_default_location_from_resource_group(cmd, namespace)
get_servers_validator(camel_case=True)(namespace)
# process folded parameters
if namespace.subnet or namespace.virtual_network_name:
get_subnet_validator(has_type_field=True, allow_new=True)(cmd, namespace)
validate_address_prefixes(namespace)
if namespace.public_ip_address:
get_public_ip_validator(
has_type_field=True, allow_none=True, allow_new=True, default_none=True)(cmd, namespace)
validate_ssl_cert(namespace)
validate_tags(namespace)
validate_custom_error_pages(namespace)
validate_waf_policy(cmd, namespace)
validate_user_assigned_identity(cmd, namespace)
def process_lb_create_namespace(cmd, namespace):
get_default_location_from_resource_group(cmd, namespace)
validate_tags(namespace)
if namespace.subnet and namespace.public_ip_address:
raise ValueError(
'incorrect usage: --subnet NAME --vnet-name NAME | '
'--subnet ID | --public-ip-address NAME_OR_ID')
if namespace.subnet:
# validation for an internal load balancer
get_subnet_validator(
has_type_field=True, allow_new=True, allow_none=True, default_none=True)(cmd, namespace)
namespace.public_ip_address_type = None
namespace.public_ip_address = None
else:
# validation for internet facing load balancer
get_public_ip_validator(has_type_field=True, allow_none=True, allow_new=True)(cmd, namespace)
if namespace.public_ip_dns_name and namespace.public_ip_address_type != 'new':
raise CLIError(
'specify --public-ip-dns-name only if creating a new public IP address.')
namespace.subnet_type = None
namespace.subnet = None
namespace.virtual_network_name = None
def process_cross_region_lb_create_namespace(cmd, namespace):
get_default_location_from_resource_group(cmd, namespace)
validate_tags(namespace)
# validation for internet facing load balancer
get_public_ip_validator(has_type_field=True, allow_none=True, allow_new=True)(cmd, namespace)
if namespace.public_ip_dns_name and namespace.public_ip_address_type != 'new':
raise CLIError(
'specify --public-ip-dns-name only if creating a new public IP address.')
def process_public_ip_create_namespace(cmd, namespace):
get_default_location_from_resource_group(cmd, namespace)
validate_public_ip_prefix(cmd, namespace)
validate_ip_tags(namespace)
validate_tags(namespace)
_inform_coming_breaking_change_for_public_ip(namespace)
def _inform_coming_breaking_change_for_public_ip(namespace):
if namespace.sku == 'Standard' and not namespace.zone:
logger.warning('[Coming breaking change] In the coming release, the default behavior will be changed as follows'
' when sku is Standard and zone is not provided:'
' For zonal regions, you will get a zone-redundant IP indicated by zones:["1","2","3"];'
' For non-zonal regions, you will get a non zone-redundant IP indicated by zones:null.')
def _validate_cert(namespace, param_name):
attr = getattr(namespace, param_name)
if attr and os.path.isfile(attr):
setattr(namespace, param_name, read_base_64_file(attr))
def process_vpn_connection_create_namespace(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
get_default_location_from_resource_group(cmd, namespace)
validate_tags(namespace)
args = [a for a in [namespace.express_route_circuit2,
namespace.local_gateway2,
namespace.vnet_gateway2]
if a]
if len(args) != 1:
raise ValueError('usage error: --vnet-gateway2 NAME_OR_ID | --local-gateway2 NAME_OR_ID '
'| --express-route-circuit2 NAME_OR_ID')
def _validate_name_or_id(value, resource_type):
if not is_valid_resource_id(value):
subscription = getattr(namespace, 'subscription', get_subscription_id(cmd.cli_ctx))
return resource_id(
subscription=subscription,
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type=resource_type,
name=value)
return value
if (namespace.local_gateway2 or namespace.vnet_gateway2) and not namespace.shared_key:
raise CLIError('--shared-key is required for VNET-to-VNET or Site-to-Site connections.')
if namespace.express_route_circuit2 and namespace.shared_key:
raise CLIError('--shared-key cannot be used with an ExpressRoute connection.')
namespace.vnet_gateway1 = \
_validate_name_or_id(namespace.vnet_gateway1, 'virtualNetworkGateways')
if namespace.express_route_circuit2:
namespace.express_route_circuit2 = \
_validate_name_or_id(
namespace.express_route_circuit2, 'expressRouteCircuits')
namespace.connection_type = 'ExpressRoute'
elif namespace.local_gateway2:
namespace.local_gateway2 = \
_validate_name_or_id(namespace.local_gateway2, 'localNetworkGateways')
namespace.connection_type = 'IPSec'
elif namespace.vnet_gateway2:
namespace.vnet_gateway2 = \
_validate_name_or_id(namespace.vnet_gateway2, 'virtualNetworkGateways')
namespace.connection_type = 'Vnet2Vnet'
def load_cert_file(param_name):
def load_cert_validator(namespace):
attr = getattr(namespace, param_name)
if attr and os.path.isfile(attr):
setattr(namespace, param_name, read_base_64_file(attr))
return load_cert_validator
def get_network_watcher_from_location(remove=False, watcher_name='watcher_name',
rg_name='watcher_rg'):
def _validator(cmd, namespace):
from msrestazure.tools import parse_resource_id
from .aaz.latest.network.watcher import List
location = namespace.location
watcher_list = List(cli_ctx=cmd.cli_ctx)(command_args={})
watcher = next((w for w in watcher_list if w["location"].lower() == location.lower()), None)
if not watcher:
raise ValidationError(f"network watcher is not enabled for region {location}.")
id_parts = parse_resource_id(watcher['id'])
setattr(namespace, rg_name, id_parts['resource_group'])
setattr(namespace, watcher_name, id_parts['name'])
if remove:
del namespace.location
return _validator
def _process_vnet_name_and_id(vnet, cmd, resource_group_name):
from msrestazure.tools import is_valid_resource_id, resource_id
if vnet and not is_valid_resource_id(vnet):
vnet = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet)
return vnet
def _process_subnet_name_and_id(subnet, vnet, cmd, resource_group_name):
from azure.cli.core.azclierror import UnrecognizedArgumentError
from msrestazure.tools import is_valid_resource_id
if subnet and not is_valid_resource_id(subnet):
vnet = _process_vnet_name_and_id(vnet, cmd, resource_group_name)
if vnet is None:
raise UnrecognizedArgumentError('vnet should be provided when input subnet name instead of subnet id')
subnet = vnet + f'/subnets/{subnet}'
return subnet
def process_nw_flow_log_show_namespace(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
from azure.cli.core.commands.arm import get_arm_resource_by_id
if hasattr(namespace, 'nsg') and namespace.nsg is not None:
if not is_valid_resource_id(namespace.nsg):
namespace.nsg = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='networkSecurityGroups',
name=namespace.nsg)
nsg = get_arm_resource_by_id(cmd.cli_ctx, namespace.nsg)
namespace.location = nsg.location # pylint: disable=no-member
get_network_watcher_from_location(remove=True)(cmd, namespace)
elif namespace.flow_log_name is not None and namespace.location is not None:
get_network_watcher_from_location(remove=False)(cmd, namespace)
else:
raise CLIError('usage error: --nsg NSG | --location NETWORK_WATCHER_LOCATION --name FLOW_LOW_NAME')
def process_lb_outbound_rule_namespace(cmd, namespace):
from msrestazure.tools import is_valid_resource_id
validate_frontend_ip_configs(cmd, namespace)
if namespace.backend_address_pool:
if not is_valid_resource_id(namespace.backend_address_pool):
namespace.backend_address_pool = _generate_lb_subproperty_id(
cmd.cli_ctx, namespace, 'backendAddressPools', namespace.backend_address_pool)
def validate_ag_address_pools(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
address_pools = namespace.app_gateway_backend_address_pools
gateway_name = namespace.application_gateway_name
delattr(namespace, 'application_gateway_name')
if not address_pools:
return
ids = []
for item in address_pools:
if not is_valid_resource_id(item):
if not gateway_name:
raise CLIError('usage error: --app-gateway-backend-pools IDS | --gateway-name NAME '
'--app-gateway-backend-pools NAMES')
item = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=gateway_name,
child_type_1='backendAddressPools',
child_name_1=item)
ids.append(item)
namespace.app_gateway_backend_address_pools = ids
def validate_custom_error_pages(namespace):
if not namespace.custom_error_pages:
return
values = []
for item in namespace.custom_error_pages:
try:
(code, url) = item.split('=')
values.append({'statusCode': code, 'customErrorPageUrl': url})
except (ValueError, TypeError):
raise CLIError('usage error: --custom-error-pages STATUS_CODE=URL [STATUS_CODE=URL ...]')
namespace.custom_error_pages = values
def validate_custom_headers(namespace):
if not namespace.monitor_custom_headers:
return
values = []
for item in namespace.monitor_custom_headers:
try:
item_split = item.split('=', 1)
values.append({'name': item_split[0], 'value': item_split[1]})
except IndexError:
raise CLIError('usage error: --custom-headers KEY=VALUE')
namespace.monitor_custom_headers = values
def validate_status_code_ranges(namespace):
if not namespace.status_code_ranges:
return
values = []
for item in namespace.status_code_ranges:
item_split = item.split('-', 1)
usage_error = CLIError('usage error: --status-code-ranges VAL | --status-code-ranges MIN-MAX')
try:
if len(item_split) == 1:
values.append({'min': int(item_split[0]), 'max': int(item_split[0])})
elif len(item_split) == 2:
values.append({'min': int(item_split[0]), 'max': int(item_split[1])})
else:
raise usage_error
except ValueError:
raise usage_error
namespace.status_code_ranges = values
def validate_subnet_ranges(namespace):
if not namespace.subnets:
return
values = []
for item in namespace.subnets:
try:
item_split = item.split('-', 1)
if len(item_split) == 2:
values.append({'first': item_split[0], 'last': item_split[1]})
continue
except ValueError:
pass
try:
item_split = item.split(':', 1)
if len(item_split) == 2:
values.append({'first': item_split[0], 'scope': int(item_split[1])})
continue
except ValueError:
pass
values.append({'first': item})
namespace.subnets = values
# pylint: disable=too-few-public-methods
class WafConfigExclusionAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if not namespace.exclusions:
namespace.exclusions = []
if isinstance(values, list):
values = ' '.join(values)
try:
variable, op, selector = values.split(' ')
except (ValueError, TypeError):
raise CLIError('usage error: --exclusion VARIABLE OPERATOR VALUE')
namespace.exclusions.append({
"match_variable": variable,
"selector_match_operator": op,
"selector": selector
})
def process_private_link_resource_id_argument(cmd, namespace):
if all([namespace.resource_group_name,
namespace.name,
namespace.resource_provider]):
logger.warning("Resource ID will be ignored since other three arguments have been provided.")
del namespace.id
return
if not (namespace.id or all([namespace.resource_group_name,
namespace.name,
namespace.resource_provider])):
raise CLIError("usage error: --id / -g -n --type")
from msrestazure.tools import is_valid_resource_id, parse_resource_id
if not is_valid_resource_id(namespace.id):
raise CLIError("Resource ID is not invalid. Please check it.")
split_resource_id = parse_resource_id(namespace.id)
cmd.cli_ctx.data['subscription_id'] = split_resource_id['subscription']
namespace.resource_group_name = split_resource_id['resource_group']
namespace.name = split_resource_id['name']
namespace.resource_provider = '{}/{}'.format(split_resource_id['namespace'], split_resource_id['type'])
del namespace.id
def process_private_endpoint_connection_id_argument(cmd, namespace):
from azure.cli.core.util import parse_proxy_resource_id
if all([namespace.resource_group_name,
namespace.name,
namespace.resource_provider,
namespace.resource_name]):
logger.warning("Resource ID will be ignored since other three arguments have been provided.")
del namespace.connection_id
return
if not (namespace.connection_id or all([namespace.resource_group_name,
namespace.name,
namespace.resource_provider,
namespace.resource_name])):
raise CLIError("usage error: --id / -g -n --type --resource-name")
result = parse_proxy_resource_id(namespace.connection_id)
cmd.cli_ctx.data['subscription_id'] = result['subscription']
namespace.resource_group_name = result['resource_group']
namespace.resource_name = result['name']
namespace.resource_provider = '{}/{}'.format(result['namespace'], result['type'])
namespace.name = result['child_name_1']
del namespace.connection_id
def process_vnet_name_or_id(cmd, namespace):
from azure.mgmt.core.tools import is_valid_resource_id, resource_id
if namespace.vnet and not is_valid_resource_id(namespace.vnet):
namespace.vnet = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=namespace.vnet)
def process_appgw_waf_policy_update(cmd, namespace): # pylint: disable=unused-argument
rule_group_name = namespace.rule_group_name
rules = namespace.rules
if rules is None and rule_group_name is not None:
raise CLIError('--rules and --rule-group-name must be provided at the same time')
if rules is not None and rule_group_name is None:
raise CLIError('--rules and --rule-group-name must be provided at the same time')
|
7e7c11149479f27c8ac436bdf1e315d41b42003e
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/blink/web_tests/external/wpt/fledge/tentative/resources/trusted-bidding-signals.py
|
9602bd22f9667057c46d3900909ec9bcda832341
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 4,992
|
py
|
trusted-bidding-signals.py
|
import json
from urllib.parse import unquote_plus
# Script to generate trusted bidding signals. The responses depends on the
# keys and interestGroupNames - some result in entire response failures, others
# affect only their own value. Keys are preferentially used over
# interestGroupName, since keys are composible, but some tests need to cover
# there being no keys.
def main(request, response):
hostname = None
keys = None
interestGroupNames = None
# Manually parse query params. Can't use request.GET because it unescapes as well as splitting,
# and commas mean very different things from escaped commas.
for param in request.url_parts.query.split("&"):
pair = param.split("=", 1)
if len(pair) != 2:
return fail(response, "Bad query parameter: " + param)
# Browsers should escape query params consistently.
if "%20" in pair[1]:
return fail(response, "Query parameter should escape using '+': " + param)
# Hostname can't be empty. The empty string can be a key or interest group name, though.
if pair[0] == "hostname" and hostname == None and len(pair[1]) > 0:
hostname = pair[1]
continue
if pair[0] == "keys" and keys == None:
keys = list(map(unquote_plus, pair[1].split(",")))
continue
if pair[0] == "interestGroupNames" and interestGroupNames == None:
interestGroupNames = list(map(unquote_plus, pair[1].split(",")))
continue
return fail(response, "Unexpected query parameter: " + param)
# "interestGroupNames" and "hostname" are mandatory.
if not hostname:
return fail(response, "hostname missing")
if not interestGroupNames:
return fail(response, "interestGroupNames missing")
response.status = (200, b"OK")
# The JSON representation of this is used as the response body. This does
# not currently include a "perInterestGroupData" object.
responseBody = {"keys": {}}
# Set when certain special keys are observed, used in place of the JSON
# representation of `responseBody`, when set.
body = None
contentType = "application/json"
adAuctionAllowed = "true"
dataVersion = None
if keys:
for key in keys:
value = "default value"
if key == "close-connection":
# Close connection without writing anything, to simulate a
# network error. The write call is needed to avoid writing the
# default headers.
response.writer.write("")
response.close_connection = True
return
elif key.startswith("replace-body:"):
# Replace entire response body. Continue to run through other
# keys, to allow them to modify request headers.
body = key.split(':', 1)[1]
elif key.startswith("data-version:"):
dataVersion = key.split(':', 1)[1]
elif key == "http-error":
response.status = (404, b"Not found")
elif key == "no-content-type":
contentType = None
elif key == "wrong-content-type":
contentType = 'text/plain'
elif key == "bad-ad-auction-allowed":
adAuctionAllowed = "sometimes"
elif key == "ad-auction-not-allowed":
adAuctionAllowed = "false"
elif key == "no-ad-auction-allow":
adAuctionAllowed = None
elif key == "no-value":
continue
elif key == "wrong-value":
responseBody["keys"]["another-value"] = "another-value"
continue
elif key == "null-value":
value = None
elif key == "num-value":
value = 1
elif key == "string-value":
value = "1"
elif key == "array-value":
value = [1, "foo", None]
elif key == "object-value":
value = {"a":"b", "c":["d"]}
elif key == "interest-group-names":
value = json.dumps(interestGroupNames)
elif key == "hostname":
value = request.GET.first(b"hostname", b"not-found").decode("ASCII")
responseBody["keys"][key] = value
if "data-version" in interestGroupNames:
dataVersion = "4"
if contentType:
response.headers.set("Content-Type", contentType)
if adAuctionAllowed:
response.headers.set("Ad-Auction-Allowed", adAuctionAllowed)
if dataVersion:
response.headers.set("Data-Version", dataVersion)
response.headers.set("X-fledge-bidding-signals-format-version", "2")
if body != None:
return body
return json.dumps(responseBody)
def fail(response, body):
response.status = (400, "Bad Request")
response.headers.set(b"Content-Type", b"text/plain")
return body
|
ffc48caabaa5dff29b65f60389b2c8be37a3e1b9
|
6e235014528acc05996e6a4ef2b33e348bbc5114
|
/testing_tools/print_open_files.py
|
2d5c1d0e34b8eadfaf81256b703ed512466a3adf
|
[
"MIT"
] |
permissive
|
bogdandm/json2python-models
|
f7cf02417c38de587d86aa045756c6b61aa42cb1
|
e2606e8f2c22d3bc11b09f5eb2bc73323ce151c5
|
refs/heads/master
| 2023-01-13T03:47:47.135160
| 2023-01-02T12:28:47
| 2023-01-02T12:28:47
| 144,019,032
| 153
| 12
|
MIT
| 2023-01-02T12:18:26
| 2018-08-08T13:40:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,007
|
py
|
print_open_files.py
|
import builtins
import io
import sys
import traceback
import weakref
from functools import wraps
open_files = weakref.WeakSet()
def opener(old_open):
@wraps(old_open)
def tracking_open(*args, **kw):
file = old_open(*args, **kw)
old_close = file.close
@wraps(old_close)
def close():
old_close()
open_files.remove(file)
file.close = close
try:
file.stack = traceback.extract_stack()
except Exception as e:
print(e)
open_files.add(file)
return file
return tracking_open
io.open = opener(io.open)
builtins.open = opener(builtins.open)
def print_open_files():
if not open_files:
print("No files are opened", file=sys.stderr)
return
print("Opened files:", file=sys.stderr)
for file in open_files:
print(
f'{file.name}:\n'
f'{"".join(traceback.format_list(file.stack))}',
file=sys.stderr
)
|
99f1539646b3a65bb0dc4dbfa7039282dd22987b
|
56a77194fc0cd6087b0c2ca1fb6dc0de64b8a58a
|
/applications/PfemFluidDynamicsApplication/python_scripts/pfem_fluid_dynamics_analysis.py
|
3a7cf5ca3cee23fc5a777fc32875c69a931dcf56
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
KratosMultiphysics/Kratos
|
82b902a2266625b25f17239b42da958611a4b9c5
|
366949ec4e3651702edc6ac3061d2988f10dd271
|
refs/heads/master
| 2023-08-30T20:31:37.818693
| 2023-08-30T18:01:01
| 2023-08-30T18:01:01
| 81,815,495
| 994
| 285
|
NOASSERTION
| 2023-09-14T13:22:43
| 2017-02-13T10:58:24
|
C++
|
UTF-8
|
Python
| false
| false
| 21,109
|
py
|
pfem_fluid_dynamics_analysis.py
|
import time as timer
import os
from importlib import import_module
# Import kratos core and applications
import KratosMultiphysics
import KratosMultiphysics.PfemFluidDynamicsApplication as KratosPfemFluid
import KratosMultiphysics.DelaunayMeshingApplication as KratosDelaunay
from KratosMultiphysics.analysis_stage import AnalysisStage
from KratosMultiphysics.PfemFluidDynamicsApplication import python_solvers_wrapper_pfem_fluid as solver_wrapper
class PfemFluidDynamicsAnalysis(AnalysisStage):
"""The base class for the PfemFluidDynamicsAnalysis
"""
def __init__(self, model, parameters):
"""The constructor of the AnalysisStage-Object.
Keyword arguments:
self -- It signifies an instance of a class.
model -- The Model to be used
parameters -- The ProjectParameters used
"""
self.model = model
#### TIME MONITORING START ####
# Time control starts
self.KratosPrintInfo(timer.ctime())
# Measure process time
self.t0p = timer.process_time()
# Measure wall time
self.t0w = timer.time()
#### TIME MONITORING END ####
#### PARSING THE PARAMETERS ####
#set echo level
self.echo_level = parameters["problem_data"]["echo_level"].GetInt()
# Print solving time
self.report = False
if( self.echo_level > 0 ):
self.report = True
self.KratosPrintInfo(" ")
# defining the number of threads:
num_threads = parameters["problem_data"]["threads"].GetInt()
self.SetParallelSize(num_threads)
self.KratosPrintInfo("::[KPFEM Simulation]:: [OMP USING" + str(num_threads) + "THREADS ]")
#parallel.PrintOMPInfo()
self.KratosPrintInfo(" ")
self.KratosPrintInfo("::[KPFEM Simulation]:: [Time Step:" + str(parameters["solver_settings"]["time_stepping"]["time_step"].GetDouble()) + " echo:" + str(self.echo_level) + "]")
#### Model_part settings start ####
super(PfemFluidDynamicsAnalysis,self).__init__(model,parameters)
# Defining the model_part
self.main_model_part = self.model.GetModelPart(parameters["solver_settings"]["model_part_name"].GetString())
self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.SPACE_DIMENSION, parameters["solver_settings"]["domain_size"].GetInt())
self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, parameters["solver_settings"]["domain_size"].GetInt())
self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.DELTA_TIME, parameters["solver_settings"]["time_stepping"]["time_step"].GetDouble())
self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.TIME, parameters["problem_data"]["start_time"].GetDouble())
if parameters["problem_data"].Has("gravity_vector"):
self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.GRAVITY_X, parameters["problem_data"]["gravity_vector"][0].GetDouble())
self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.GRAVITY_Y, parameters["problem_data"]["gravity_vector"][1].GetDouble())
self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.GRAVITY_Z, parameters["problem_data"]["gravity_vector"][2].GetDouble())
self.problem_path = os.getcwd()
self.problem_name = parameters["problem_data"]["problem_name"].GetString()
#print model_part and properties
if (self.echo_level>-1):
for properties in self.main_model_part.Properties:
self.KratosPrintInfo(properties)
self.AddPfemVariables()
if parameters["solver_settings"].Has("constitutive_laws_list"):
self.constitutive_laws_names = parameters["solver_settings"]["constitutive_laws_list"]
self.AddMaterialVariables()
#else:
# self.AddAllMaterialVariables()
def _CreateSolver(self):
"""Create the solver
"""
return solver_wrapper.CreateSolverByParameters(self.model, self.project_parameters["solver_settings"],self.project_parameters["problem_data"]["parallel_type"].GetString())
def Initialize(self):
"""This function initializes the AnalysisStage
Usage: It is designed to be called ONCE, BEFORE the execution of the solution-loop
This function has to be implemented in deriving classes!
"""
# Read model_part from mdpa file
self._solver.ImportModelPart()
# Prepare model_part (note: the buffer_size is set here) (restart is read here)
self._solver.PrepareModelPart()
# Add dofs (always after importing the model part)
self._solver.AddDofs()
#print model_part and properties
if (self.echo_level>1):
self.KratosPrintInfo("")
self.KratosPrintInfo(self.main_model_part)
for properties in self.main_model_part.Properties:
self.KratosPrintInfo(properties)
#### Processes settings start ####
# obtain the list of the processes to be applied
from KratosMultiphysics.PfemFluidDynamicsApplication.process_handler import ProcessHandler
process_parameters = KratosMultiphysics.Parameters("{}")
process_parameters.AddValue("echo_level", self.project_parameters["problem_data"]["echo_level"])
if( self.project_parameters.Has("problem_process_list") ):
process_parameters.AddValue("problem_process_list", self.project_parameters["problem_process_list"])
self.model_processes = ProcessHandler(self.model, process_parameters)
self.model_processes.ExecuteInitialize()
## here we initialize user-provided processes
order_processes_initialization = self._GetOrderOfProcessesInitialization()
self._list_of_processes = self._CreateProcesses("processes", order_processes_initialization)
self._list_of_output_processes = self._CreateProcesses("output_processes", order_processes_initialization)
self._list_of_processes.extend(self._list_of_output_processes)
for process in self._GetListOfProcesses():
process.ExecuteInitialize()
#### processes settings end ####
#### START SOLUTION ####
self.computing_model_part = self._solver.GetComputingModelPart()
self.graphical_output = self.SetGraphicalOutput()
## Sets strategies, builders, linear solvers, schemes and solving info, and fills the buffer
self._solver.Initialize()
self._solver.InitializeStrategy()
self._solver.SetEchoLevel(self.echo_level)
# Initialize GiD I/O (gid outputs, file_lists)
self.GraphicalOutputExecuteInitialize()
self.KratosPrintInfo(" ")
self.KratosPrintInfo("::[KPFEM Simulation]:: Analysis -START- ")
self.model_processes.ExecuteBeforeSolutionLoop()
for process in self._GetListOfProcesses():
process.ExecuteBeforeSolutionLoop()
self.GraphicalOutputExecuteBeforeSolutionLoop()
# write output results GiD: (frequency writing is controlled internally)
self.GraphicalOutputPrintOutput()
# Set time settings
self.step = self.main_model_part.ProcessInfo[KratosMultiphysics.STEP]
self.time = self.main_model_part.ProcessInfo[KratosMultiphysics.TIME]
self.end_time = self.project_parameters["problem_data"]["end_time"].GetDouble()
self.delta_time = self.project_parameters["solver_settings"]["time_stepping"]["time_step"].GetDouble()
def InitializeSolutionStep(self):
"""This function performs all the required operations that should be executed
(for each step) BEFORE solving the solution step.
"""
self.clock_time = self.StartTimeMeasuring()
# processes to be executed at the begining of the solution step
self.model_processes.ExecuteInitializeSolutionStep()
for process in self._GetListOfProcesses():
process.ExecuteInitializeSolutionStep()
self.GraphicalOutputExecuteInitializeSolutionStep()
# solve time step
self._solver.InitializeSolutionStep()
self.StopTimeMeasuring(self.clock_time,"Initialize Step" , self.report)
def FinalizeSolutionStep(self):
"""This function performs all the required operations that should be executed
(for each step) AFTER solving the solution step.
"""
self.clock_time = self.StartTimeMeasuring();
self._GetSolver().FinalizeSolutionStep()
self.GraphicalOutputExecuteFinalizeSolutionStep()
# processes to be executed at the end of the solution step
self.model_processes.ExecuteFinalizeSolutionStep()
for process in self._GetListOfProcesses():
process.ExecuteFinalizeSolutionStep()
self.model_processes.ExecuteBeforeOutputStep()
for process in self._GetListOfProcesses():
process.ExecuteBeforeOutputStep()
# write output results GiD: (frequency writing is controlled internally)
self.GraphicalOutputPrintOutput()
# processes to be executed after witting the output
self.model_processes.ExecuteAfterOutputStep()
for process in self._GetListOfProcesses():
process.ExecuteAfterOutputStep()
self.StopTimeMeasuring(self.clock_time,"Finalize Step" , self.report);
def Finalize(self):
"""This function finalizes the AnalysisStage
Usage: It is designed to be called ONCE, AFTER the execution of the solution-loop
"""
# Ending the problem (time integration finished)
self.GraphicalOutputExecuteFinalize()
self.model_processes.ExecuteFinalize()
for process in self._GetListOfProcesses():
process.ExecuteFinalize()
self.KratosPrintInfo("::[KPFEM Simulation]:: Analysis -END- ")
self.KratosPrintInfo(" ")
#### END SOLUTION ####
# Measure process time
tfp = timer.process_time()
# Measure wall time
tfw = timer.time()
print("::[KPFEM Simulation]:: [Elapsed Time = %.2f" % (tfw - self.t0w),"seconds] (%.2f" % (tfp - self.t0p),"seconds of cpu/s time)")
self.KratosPrintInfo(timer.ctime())
def SetGraphicalOutput(self):
"""This function sets the settings for the graphical
output
"""
if( self.project_parameters.Has("output_configuration") ):
from KratosMultiphysics.PfemFluidDynamicsApplication.pfem_fluid_gid_output_process import GiDOutputProcess
self.output_settings = self.project_parameters["output_configuration"]
self.post_process_model_part = self.model.CreateModelPart("output_model_part")
return GiDOutputProcess(self.post_process_model_part,
"gid_output/" + self.problem_name,
self.output_settings)
else:
return (KratosMultiphysics.Process())
def GraphicalOutputExecuteInitialize(self):
"""This function performs the initialize of the graphical output
"""
self.graphical_output.ExecuteInitialize()
def GraphicalOutputExecuteBeforeSolutionLoop(self):
"""This function performs the ExecuteBeforeSolutionLoop
of the graphical_output
"""
# writing a initial state results file or single file
self.graphical_output.ExecuteBeforeSolutionLoop()
def GraphicalOutputExecuteInitializeSolutionStep(self):
"""This function performs the ExecuteInitializeSolutionStep
of the graphical_output
"""
self.graphical_output.ExecuteInitializeSolutionStep()
def GraphicalOutputExecuteFinalizeSolutionStep(self):
"""This function performs the ExecuteFinalizeSolutionStep
of the graphical_output
"""
self.graphical_output.ExecuteFinalizeSolutionStep()
def GraphicalOutputPrintOutput(self):
"""This function prints the output for this time step
"""
if( self.project_parameters.Has("output_configuration") ):
self.post_process_model_part.ProcessInfo[KratosMultiphysics.TIME] = self.main_model_part.ProcessInfo[KratosMultiphysics.TIME]
if(self.graphical_output.IsOutputStep()):
time=self.main_model_part.ProcessInfo[KratosMultiphysics.TIME]
delta_time=self.main_model_part.ProcessInfo[KratosMultiphysics.DELTA_TIME]
step=self.main_model_part.ProcessInfo[KratosMultiphysics.STEP]
KratosMultiphysics.PfemFluidDynamicsApplication.PostProcessUtilities().RebuildPostProcessModelPart(self.post_process_model_part, self.main_model_part)
self.KratosPrintInfo("")
self.KratosPrintInfo("**********************************************************")
self.KratosPrintInfo("---> Print Output at [STEP:" + str(step) + " TIME:" + str(time) + " DT:" + str(delta_time) + "]")
self.KratosPrintInfo("**********************************************************")
self.KratosPrintInfo("")
self.graphical_output.PrintOutput()
def GraphicalOutputExecuteFinalize(self):
"""This function performs the ExecuteFinalize
of the graphical_output
"""
self.graphical_output.ExecuteFinalize()
def SetParallelSize(self, num_threads):
"""This function sets the number of threads
"""
KratosMultiphysics.ParallelUtilities.SetNumThreads(int(num_threads))
def GetParallelSize(self):
"""This function returns the number of threads
"""
KratosMultiphysics.ParallelUtilities.GetNumThreads()
def StartTimeMeasuring(self):
"""This function starts time calculation
"""
# Measure process time
time_ip = timer.process_time()
return time_ip
def StopTimeMeasuring(self, time_ip, process, report):
"""This function ends time calculation
"""
# Measure process time
time_fp = timer.process_time()
if report:
used_time = time_fp - time_ip
print("::[PFEM Simulation]:: [ %.2f" % round(used_time,2),"s", process," ] ")
def _GetOrderOfProcessesInitialization(self):
"""This function can be overridden in derived classes if the order of
initialization for the processes matters
"""
return ["constraints_process_list",
"loads_process_list",
"auxiliar_process_list"]
def KratosPrintInfo(self, message):
"""This function prints info on screen
"""
KratosMultiphysics.Logger.Print(message, label="")
KratosMultiphysics.Logger.Flush()
def AddMaterialVariables(self):
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DENSITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.BULK_MODULUS)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DYNAMIC_VISCOSITY)
for i in range(self.constitutive_laws_names.size()):
if (self.constitutive_laws_names[i].GetString()=="FrictionalViscoplastic2DLaw" or self.constitutive_laws_names[i].GetString()=="FrictionalViscoplastic3DLaw"):
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.INTERNAL_FRICTION_ANGLE)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.COHESION)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.ADAPTIVE_EXPONENT)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.REGULARIZATION_COEFFICIENT)
elif (self.constitutive_laws_names[i].GetString()=="Hypoelastic2DLaw" or self.constitutive_laws_names[i].GetString()=="Hypoelastic3DLaw"):
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.POISSON_RATIO)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.YOUNG_MODULUS)
elif (self.constitutive_laws_names[i].GetString()=="Bingham2DLaw" or self.constitutive_laws_names[i].GetString()=="Bingham3DLaw" or
self.constitutive_laws_names[i].GetString()=="HerschelBulkley2DLaw" or self.constitutive_laws_names[i].GetString()=="HerschelBulkley3DLaw"):
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.FLOW_INDEX)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.YIELD_SHEAR)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.YIELDED)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.ADAPTIVE_EXPONENT)
elif (self.constitutive_laws_names[i].GetString()=="MuIRheology2DLaw" or self.constitutive_laws_names[i].GetString()=="MuIRheology3DLaw"):
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.STATIC_FRICTION)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.DYNAMIC_FRICTION)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.INERTIAL_NUMBER_ZERO)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.GRAIN_DIAMETER)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.GRAIN_DENSITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.REGULARIZATION_COEFFICIENT)
elif (self.constitutive_laws_names[i].GetString()!="None" and self.constitutive_laws_names[i].GetString()!="Newtonian2DLaw" and self.constitutive_laws_names[i].GetString()!="Newtonian3DLaw"):
print("ERROR: THE CONSTITUTIVE LAW PROVIDED FOR THIS SUBMODEL PART IS NOT IN THE PFEM FLUID DATABASE")
def AddAllMaterialVariables(self):
print("ATTENTION! YOU ARE ADDING ALL MATERIAL VARIABLES, PLEASE UPDATE YOUR PROJECTPARAMETERS.JSON")
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DENSITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.BULK_MODULUS)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DYNAMIC_VISCOSITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.INTERNAL_FRICTION_ANGLE)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.COHESION)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.ADAPTIVE_EXPONENT)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.REGULARIZATION_COEFFICIENT)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.POISSON_RATIO)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.YOUNG_MODULUS)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.FLOW_INDEX)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.YIELD_SHEAR)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.YIELDED)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.STATIC_FRICTION)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.DYNAMIC_FRICTION)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.INERTIAL_NUMBER_ZERO)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.GRAIN_DIAMETER)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.GRAIN_DENSITY)
def AddPfemVariables(self):
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.MESH_VELOCITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.BODY_FORCE)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NODAL_MASS)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.REACTION)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NORMAL)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VOLUME_ACCELERATION)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.PRESSURE_VELOCITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.PRESSURE_ACCELERATION)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.ISOLATED_NODE)
self.main_model_part.AddNodalSolutionStepVariable(KratosPfemFluid.NODAL_H_WALL)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NODAL_H)
self.main_model_part.AddNodalSolutionStepVariable(KratosDelaunay.SHRINK_FACTOR)
self.main_model_part.AddNodalSolutionStepVariable(KratosDelaunay.PROPERTY_ID)
if __name__ == "__main__":
parameter_file_name = "ProjectParameters.json"
with open(parameter_file_name,'r') as parameter_file:
parameters = Kratos.Parameters(parameter_file.read())
model = KratosMultiphysics.Model()
simulation = PfemFluidDynamicsAnalysis(model,parameters)
simulation.Run()
|
335661d0db788495841007bd02d21b076d7e9cb9
|
95753e92497eb499f1a20262e9639e4309cc8378
|
/demo_free_integration_long_time.py
|
1c5ce6676de431e775f517bf5a5cc7a014dd0725
|
[
"MIT"
] |
permissive
|
Aceinna/gnss-ins-sim
|
cf9cf890048cae832c39980424196f96c64df020
|
020ca3798e931813c6e394ba822d4d3c43218a0f
|
refs/heads/master
| 2023-08-19T19:35:20.285793
| 2023-03-17T03:15:23
| 2023-03-17T03:15:23
| 115,900,361
| 999
| 298
|
MIT
| 2023-03-17T03:15:24
| 2018-01-01T05:02:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,800
|
py
|
demo_free_integration_long_time.py
|
# -*- coding: utf-8 -*-
# Filename: demo_free_integration_long_time.py
"""
A simple free integration (strapdown inertial navigation) demo of Sim.
Created on 2018-01-23
@author: dongxiaoguang
"""
import os
import math
import numpy as np
from gnss_ins_sim.sim import imu_model
from gnss_ins_sim.sim import ins_sim
# globals
D2R = math.pi/180
motion_def_path = os.path.abspath('.//demo_motion_def_files//')
fs = 200.0 # IMU sample frequency
def test_free_integration():
'''
test Sim
'''
#### IMU model, typical for IMU381
imu_err = {'gyro_b': np.array([0.0, 0.0, 0.0]),
'gyro_arw': np.array([0.25, 0.25, 0.25]) * 0.0,
'gyro_b_stability': np.array([3.5, 3.5, 3.5]) * 0.0,
'gyro_b_corr': np.array([100.0, 100.0, 100.0]),
'accel_b': np.array([0.0e-3, 0.0e-3, 0.0e-3]),
'accel_vrw': np.array([0.03119, 0.03009, 0.04779]) * 0.0,
'accel_b_stability': np.array([4.29e-5, 5.72e-5, 8.02e-5]) * 0.0,
'accel_b_corr': np.array([200.0, 200.0, 200.0]),
'mag_std': np.array([0.2, 0.2, 0.2]) * 0.0
}
# do not generate GPS and magnetometer data
imu = imu_model.IMU(accuracy=imu_err, axis=6, gps=False)
#### Algorithm
# Free integration in a virtual inertial frame
from demo_algorithms import free_integration
'''
Free integration requires initial states (position, velocity and attitude).
You should provide theses values when you create the algorithm object.
'''
ini_pos_vel_att = np.genfromtxt(motion_def_path+"//motion_def-long_drive.csv",\
delimiter=',', skip_header=1, max_rows=1)
ini_pos_vel_att[0] = ini_pos_vel_att[0] * D2R
ini_pos_vel_att[1] = ini_pos_vel_att[1] * D2R
ini_pos_vel_att[6:9] = ini_pos_vel_att[6:9] * D2R
# add initial states error if needed
ini_vel_err = np.array([0.0, 0.0, 0.0]) # initial velocity error in the body frame, m/s
ini_att_err = np.array([0.0, 0.0, 0.0]) # initial Euler angles error, deg
ini_pos_vel_att[3:6] += ini_vel_err
ini_pos_vel_att[6:9] += ini_att_err * D2R
# create the algorith object
algo = free_integration.FreeIntegration(ini_pos_vel_att)
#### start simulation
sim = ins_sim.Sim([fs, 0.0, 0.0],
motion_def_path+"//motion_def-long_drive.csv",
ref_frame=0,
imu=imu,
mode=None,
env=None,
algorithm=algo)
# run the simulation once
sim.run(1)
# generate simulation results, summary
# do not save data, generate .kml file
sim.results('', err_stats_start=-1, gen_kml=True)
if __name__ == '__main__':
test_free_integration()
|
3e883027074ea2c64e5df9a8373378b46b517a39
|
b4afb834fc3a3e2c128b1bf825700031e3df519a
|
/pyroms_toolbox/pyroms_toolbox/PCA.py
|
81d137ca31f4429ae9a7ddafc8737e6071db5361
|
[
"BSD-3-Clause"
] |
permissive
|
ESMG/pyroms
|
e4e5e9d70d66907a992846b06d61db31afcd24f3
|
5ea501ef904b01036dd2a0909b7bdc61a56e7eff
|
refs/heads/python3
| 2023-03-19T11:11:09.143443
| 2023-03-10T00:22:13
| 2023-03-10T00:22:13
| 1,012,779
| 102
| 63
|
NOASSERTION
| 2023-03-10T00:23:20
| 2010-10-21T17:22:48
|
Python
|
UTF-8
|
Python
| false
| false
| 7,234
|
py
|
PCA.py
|
import numpy as np
from matplotlib.mlab import psd
from scipy.signal import get_window
class PCA(object):
def __init__(self, dataset, algorithm='svd'):
"""
This class carries out Principal Component Analysis on dataset.
Arguments:
'dataset' -- NumPy array containing the field to be decomposed.
First dimension must be time.
The optional algorithm parameter can be either 'svd' to perform PCA with
the singular value decomposition, or 'eig' to use a symmetric eigenvalue
decomposition.
Notes:
PCA does not center or scale dataset; you usually want to first
dataset = center(dataset) or
dataset = standardize(dataset)
with the functions provided.
"""
self.dataset = dataset
self.packedata, self.mask = self.pack(self.dataset)
if algorithm == 'svd':
self.packedEOFs, self.ECs, self.L = self._pca_svd(self.packedata)
elif algorithm == 'eig':
self.packedEOFs, self.ECs, self.L = self._pca_eig(self.packedata)
else:
raise RuntimeError('Algorithm %s not known.'%algorithm)
self.EOFs = self.unpack(self.packedEOFs, self.mask)
def GetEOFs(self):
"""
Returns the Empirical Orthogonal Functions EOFs.
"""
return self.EOFs
def GetECs(self):
"""
Returns the Temporal Expansion Coeficients ECs.
"""
return self.ECs
def GetL(self):
"""
Returns Covariances matrix L.
"""
return self.L
def GetPSD(self, blocks_length=0, fs=1, window='boxcar', overlap=0):
"""
Returns the power spectral density estimated by Welch's average
periodogram method.
ECs are divided into blocks of lenght "blocks_length". If "blocks_length = 0",
only one block spanning the whole time serie is used. Each block is windowed
by the function "window". "overlap" gives the length of the overlap between
blocks. The PSD of each segment are then averaged.
"""
nECs, nt = self.ECs.shape
if blocks_length == 0:
blocks_length = nt
else:
blocks_length = blocks_length
window_vector = get_window(window, blocks_length)
PSD = np.zeros((nECs, blocks_length/2+1))
freq = np.zeros((nECs, blocks_length/2+1))
for n in range(nECs):
P, f = psd(self.ECs[n], NFFT=blocks_length, Fs=fs, window=window_vector, noverlap=overlap)
PSD[n] = np.squeeze(P)
freq[n] = f
return PSD, freq
def pack(self, dataset):
"""
"""
nt = dataset.shape[0]
mask = np.ma.getmaskarray(dataset[0])
npt = np.sum(~mask)
packedata = np.zeros((nt,npt))
for t in range(nt):
packedata[t] = np.ma.compressed(dataset[t])
return packedata, mask
def unpack(self, packedeofs, mask):
"""
"""
neof = packedeofs.shape[1]
shape = mask.shape
if len(mask.shape) == 2:
ny, nx = mask.shape
dims = nx * ny
elif len(mask.shape) == 3:
nz, ny, nx = mask.shape
dims = nx * ny * nz
eofs = np.ma.masked_all((neof,dims))
for n in range(neof):
eofs[n,~mask.flatten()] = packedeofs[:,n]
if len(mask.shape) == 2:
eofs = eofs.reshape(neof,ny,nx)
elif len(mask.shape) == 3:
eofs = eofs.reshape(neof,nz,ny,nx)
return eofs
def _pca_svd(self, packedata):
"""
Calculates EOF decomposition of a field by means
of the SVD decomposition.
Returns EOFs, ECs, eigenvalues, eigenvectors
Use Singular Value Decomposition (SVD) to break up
M(mxn) into 3 matrices:
M = U * S * V.T
where U and V are orthonormal and D is diagonal. Then,
EOFs = V
ECs = U * S
eigenvalues = ECs.T * ECs / (n-1) = S**2 / (n-1)
"""
u, s, vt = np.linalg.svd(packedata, full_matrices = False)
packedpcs = np.transpose(vt)
ecs = u * s
ecs = ecs.T
eigenval = s**2 / (len(s)-1)
return packedpcs, ecs, eigenval
def _pca_eig(self, packedata):
"""
Calculates EOF decomposition of a field by means
of the symmetric eigenvalue decomposition method
"""
vals, vecs = self._sym_eigh(packedata)
packedpcs = vecs
ecs = np.dot(packedata, vecs)
ecs = ecs.T
eigenval = vals / (len(vals)-1)
return packedpcs, ecs, eigenval
def _sym_eigh(self, a):
"""
Return the eigenvectors and eigenvalues of the symmetric matrix a'a. If
a has more columns than rows, then that matrix will be rank-deficient,
and the non-zero eigenvalues and eigenvectors can be more easily extracted
from the matrix aa'.
From the properties of the SVD:
if a of shape (m,n) has SVD u*s*v', then:
a'a = v*s's*v'
aa' = u*ss'*u'
let s_hat, an array of shape (m,n), be such that s * s_hat = I(m,m)
and s_hat * s = I(n,n). (Note that s_hat is just the elementwise
reciprocal of s, as s is zero except on the main diagonal.)
Thus, we can solve for u or v in terms of the other:
v = a'*u*s_hat'
u = a*v*s_hat
"""
m, n = a.shape
if m >= n:
# just return the eigenvalues and eigenvectors of a'a
vals, vecs = self._eigh(np.dot(np.transpose(a), a))
vecs = np.where(vecs < 0, 0, vecs)
return vals, vecs
else:
# figure out the eigenvalues and vectors based on aa', which is smaller
w, v = self._eigh(np.dot(a, a.transpose()))
# in case due to numerical instabilities we have w < 0 anywhere,
# peg them to zero
vals = np.where(w < 0, 0, w)
# now get the inverse square root of the diagonal, which will form the
# main diagonal of s_hat
err = np.seterr(divide='ignore', invalid='ignore')
s_hat = 1/np.sqrt(w)
np.seterr(**err)
s_hat[~np.isfinite(s_hat)] = 0
# s_hat is a list of length m, a'u is (n,m), so we can just use
# numpy's broadcasting instead of matrix multiplication, and only create
# the upper mxm block of a'u, since that's all we'll use anyway...
vecs = np.dot(np.transpose(a), v[:,:m]) * s_hat
return vals, vecs
def _eigh(self, a):
vals, vecs = np.linalg.eigh(a)
order = np.flipud(vals.argsort())
return vals[order], vecs[:,order]
def center(dataset):
"""
Returns a centered version (mean along _first_ axis removed) of an array
"""
return dataset - dataset.mean(axis=0)
def standardize(dataset):
"""
Returns a standardized (centered and unit variance) of an array
"""
residual = center(dataset)
return residual / residual.std(axis=0)
|
9e0aecbf870f9322f52d06b48f8ead9a24a1273b
|
d36dda5de08015658409cf93bc6aec85fa8c24f6
|
/RPLCD/codecs/st7066_0b.py
|
3e323f7efe21cc8e059bb551017b6379f6c4acd0
|
[
"MIT",
"Python-2.0"
] |
permissive
|
dbrgn/RPLCD
|
f1dc3027ddb328b850dc3481a12ce688f76f10b2
|
486533e25c423a884c8d73486d2b8e53bbaabb97
|
refs/heads/master
| 2023-04-07T11:22:17.872890
| 2023-03-25T00:56:21
| 2023-03-25T00:56:21
| 9,896,961
| 246
| 76
|
MIT
| 2023-03-25T00:54:51
| 2013-05-06T21:09:10
|
Python
|
UTF-8
|
Python
| false
| false
| 12,048
|
py
|
st7066_0b.py
|
"""
The ST7066_0B character table.
"""
# Character shown if no mapping was found
replacement_char = 0x20 # SPACE
# Table with 1:1 mapping
encoding_table = {
'\u0000': 0x00, # User defined (CGRAM)
'\u0001': 0x01, # User defined (CGRAM)
'\u0002': 0x02, # User defined (CGRAM)
'\u0003': 0x03, # User defined (CGRAM)
'\u0004': 0x04, # User defined (CGRAM)
'\u0005': 0x05, # User defined (CGRAM)
'\u0006': 0x06, # User defined (CGRAM)
'\u0007': 0x07, # User defined (CGRAM)
'±': 0x10, # PLUS-MINUS SIGN
'≡': 0x11, # IDENTICAL TO
'\u23B2': 0x12, # SUMMATION TOP
'\u23B3': 0x13, # SUMMATION BOTTOM
'\u239B': 0x14, # LEFT PARENTHESIS UPPER HOOK
'\u239D': 0x15, # LEFT PARENTHESIS LOWER HOOK
'\u239E': 0x16, # RIGHT PARENTHESIS UPPER HOOK
'\u23A0': 0x17, # RIGHT PARENTHESIS LOWER HOOK
'\u23B0': 0x18, # UPPER LEFT OR LOWER RIGHT CURLY BRACKET SECTION
'\u23B1': 0x19, # UPPER RIGHT OR LOWER LEFT CURLY BRACKET SECTION
'\u2248': 0x1a, # ALMOST EQUAL TO
'\u222B': 0x1b, # INTEGRAL
'\u208C': 0x1c, # SUBSCRIPT EQUALS SIGN
'\u02F7': 0x1d, # MODIFIER LETTER LOW TILDE
'²': 0x1e, # SUPERSCRIPT TWO
'³': 0x1f, # SUPERSCRIPT THREE
'\u0020': 0x20, # SPACE
'\u00A0': 0x20, # NO-BREAK SPACE
'!': 0x21, # EXCLAMATION MARK
'"': 0x22, # QUOTATION MARK
'#': 0x23, # NUMBER SIGN
'$': 0x24, # DOLLAR SIGN
'%': 0x25, # PERCENT SIGN
'&': 0x26, # AMPERSAND
"'": 0x27, # APOSTROPHE
'(': 0x28, # LEFT PARENTHESES
')': 0x29, # RIGHT PARENTHESES
'*': 0x2a, # ASTERISK
'+': 0x2b, # PLUS SIGN
',': 0x2c, # COMMA
'\u002d': 0x2d, # HYPHEN-MINUS
'\u2010': 0x2d, # HYPHEN
'\u2011': 0x2d, # NON-BREAKING HYPHEN
'\u2012': 0x2d, # FIGURE DASH
'\u2013': 0x2d, # EN DASH
'\u2014': 0x2d, # EM DASH
'\u2015': 0x2d, # HORIZONTAL BAR
'.': 0x2e, # FULL STOP
'/': 0x2f, # SOLIDUS
'0': 0x30, # DIGIT ZERO
'1': 0x31, # DIGIT ONE
'2': 0x32, # DIGIT TWO
'3': 0x33, # DIGIT THREE
'4': 0x34, # DIGIT FOUR
'5': 0x35, # DIGIT FIVE
'6': 0x36, # DIGIT SIX
'7': 0x37, # DIGIT SEVEN
'8': 0x38, # DIGIT EIGHT
'9': 0x39, # DIGIT NINE
':': 0x3a, # COLON
';': 0x3b, # SEMICOLON
'<': 0x3c, # LESS-THAN SIGN
'=': 0x3d, # EQUALS SIGN
'>': 0x3e, # GREATER-THAN SIGN
'?': 0x3f, # QUESTION MARK
'@': 0x40, # COMMERCIAL AT
'A': 0x41, # LATIN CAPITAL LETTER A
'B': 0x42, # LATIN CAPITAL LETTER B
'C': 0x43, # LATIN CAPITAL LETTER C
'D': 0x44, # LATIN CAPITAL LETTER D
'E': 0x45, # LATIN CAPITAL LETTER E
'F': 0x46, # LATIN CAPITAL LETTER F
'G': 0x47, # LATIN CAPITAL LETTER G
'H': 0x48, # LATIN CAPITAL LETTER H
'I': 0x49, # LATIN CAPITAL LETTER I
'J': 0x4a, # LATIN CAPITAL LETTER J
'K': 0x4b, # LATIN CAPITAL LETTER K
'L': 0x4c, # LATIN CAPITAL LETTER L
'M': 0x4d, # LATIN CAPITAL LETTER M
'N': 0x4e, # LATIN CAPITAL LETTER N
'O': 0x4f, # LATIN CAPITAL LETTER O
'P': 0x50, # LATIN CAPITAL LETTER P
'Q': 0x51, # LATIN CAPITAL LETTER Q
'R': 0x52, # LATIN CAPITAL LETTER R
'S': 0x53, # LATIN CAPITAL LETTER S
'T': 0x54, # LATIN CAPITAL LETTER T
'U': 0x55, # LATIN CAPITAL LETTER U
'V': 0x56, # LATIN CAPITAL LETTER V
'W': 0x57, # LATIN CAPITAL LETTER W
'X': 0x58, # LATIN CAPITAL LETTER X
'Y': 0x59, # LATIN CAPITAL LETTER Y
'Z': 0x5a, # LATIN CAPITAL LETTER Z
'[': 0x5b, # LEFT SQUARE BRACKET
'\\': 0x5c, # REVERSE SOLIDUS
']': 0x5d, # RIGHT SQUARE BRACKET
'^': 0x5e, # CIRCUMFLEX ACCENT
'_': 0x5f, # LOW LINE
'`': 0x60, # GRAVE ACCENT
'a': 0x61, # LATIN SMALL LETTER A
'b': 0x62, # LATIN SMALL LETTER B
'c': 0x63, # LATIN SMALL LETTER C
'd': 0x64, # LATIN SMALL LETTER D
'e': 0x65, # LATIN SMALL LETTER E
'f': 0x66, # LATIN SMALL LETTER F
'g': 0x67, # LATIN SMALL LETTER G
'h': 0x68, # LATIN SMALL LETTER H
'i': 0x69, # LATIN SMALL LETTER I
'j': 0x6a, # LATIN SMALL LETTER J
'k': 0x6b, # LATIN SMALL LETTER K
'l': 0x6c, # LATIN SMALL LETTER L
'm': 0x6d, # LATIN SMALL LETTER M
'n': 0x6e, # LATIN SMALL LETTER N
'o': 0x6f, # LATIN SMALL LETTER O
'p': 0x70, # LATIN SMALL LETTER P
'q': 0x71, # LATIN SMALL LETTER Q
'r': 0x72, # LATIN SMALL LETTER R
's': 0x73, # LATIN SMALL LETTER S
't': 0x74, # LATIN SMALL LETTER T
'u': 0x75, # LATIN SMALL LETTER U
'v': 0x76, # LATIN SMALL LETTER V
'w': 0x77, # LATIN SMALL LETTER W
'x': 0x78, # LATIN SMALL LETTER X
'y': 0x79, # LATIN SMALL LETTER Y
'z': 0x7a, # LATIN SMALL LETTER Z
'{': 0x7b, # LEFT CURLY BRACKET
'|': 0x7c, # VERTICAL LINE
'}': 0x7d, # RIGHT CURLY BRACKET
'~': 0x7e, # TILDE
'⌂': 0x7f, # HOUSE
'Ç': 0x80, # LATIN CAPITAL LETT
'ü': 0x81, # LATIN SMALL LETTER U WITH DIAERESIS
'é': 0x82, # LATIN SMALL LETTER E WITH ACUTE
'â': 0x83, # LATIN SMALL LETTER A WITH CIRCUMFLEX
'ä': 0x84, # LATIN SMALL LETTER A WITH DIAERESIS
'à': 0x85, # LATIN SMALL LETTER A WITH GRAVE
'å': 0x86, # LATIN SMALL LETTER A WITH RING ABOVE
'ç': 0x87, # LATIN SMALL LETTER C WITH CEDILLA
'ê': 0x88, # LATIN SMALL LETTER E WITH CIRCUMFLEX
'ë': 0x89, # LATIN SMALL LETTER E WITH DIAERESIS
'è': 0x8a, # LATIN SMALL LETTER E WITH GRAVE
'ï': 0x8b, # LATIN SMALL LETTER I WITH DIAERESIS
'î': 0x8c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
'ì': 0x8d, # LATIN SMALL LETTER I WITH GRAVE
'Ä': 0x8e, # LATIN CAPITAL LETTER A WITH DIAERESIS
'Å': 0x8f, # LATIN CAPITAL LETTER A WITH RING ABOVE
'É': 0x90, # LATIN CAPITAL LETTER E WITH ACUTE
'æ': 0x91, # LATIN SMALL LETTER AE
'Æ': 0x92, # LATIN CAPITAL LETTER AE
'ô': 0x93, # LATIN SMALL LETTER O WITH CIRCUMFLEX
'ö': 0x94, # LATIN SMALL LETTER O WITH DIAERESIS
'ò': 0x95, # LATIN SMALL LETTER O WITH GRAVE
'û': 0x96, # LATIN SMALL LETTER U WITH CIRCUMFLEX
'ù': 0x97, # LATIN SMALL LETTER U WITH GRAVE
'ÿ': 0x98, # LATIN SMALL LETTER Y WITH DIAERESIS
'Ö': 0x99, # LATIN CAPITAL LETTER O WITH DIAERESIS
'Ü': 0x9a, # LATIN CAPITAL LETTER U WITH DIAERESIS
'ñ': 0x9b, # LATIN SMALL LETTER N WITH TILDE
'Ñ': 0x9c, # LATIN CAPITAL LETTER N WITH TILDE
'ª': 0x9d, # FEMININE ORDINAL INDICATOR
'º': 0x9e, # MASCULINE ORDINAL INDICATOR
'¿': 0x9f, # INVERTED QUESTION MARK
'á': 0xa0, # LATIN SMALL LETTER A WITH ACUTE
'í': 0xa1, # LATIN SMALL LETTER I WITH ACUTE
'ó': 0xa2, # LATIN SMALL LETTER O WITH ACUTE
'ú': 0xa3, # LATIN SMALL LETTER U WITH ACUTE
'¢': 0xa4, # CENT SIGN
'£': 0xa5, # POUND SIGN
'¥': 0xa6, # YEN SIGN
'Я': 0xa7, # CYRILLIC CAPITAL LETTER YA, WRONG
'ƒ': 0xa8, # LATIN SMALL LETTER F WITH HOOK
'¡': 0xa9, # INVERTED EXCLAMATION MARK
'Ã': 0xaa, # LATIN CAPITAL LETTER A WITH TILDE
'ã': 0xab, # LATIN SMALL LETTER A WITH TILDE
'Õ': 0xac, # LATIN CAPITAL LETTER O WITH TILDE
'õ': 0xad, # LATIN SMALL LETTER O WITH TILDE
'Ø': 0xae, # LATIN CAPITAL LETTER O WITH STROKE
'ø': 0xaf, # LATIN SMALL LETTER O WITH STROKE
'˙': 0xb0, # DOT ABOVE
'¨': 0xb1, # DIARESIS
'°': 0xb2, # DEGREE SIGN
'`': 0xb3, # GRAVIS
'´': 0xb4, # ACUTE ACCENT
'½': 0xb5, # VULGAR FRACTION ONE HALF
'¼': 0xb6, # VULGAR FRACTION ONE QUARTER
'×': 0xb7, # MULTIPLICATION SIGN
'÷': 0xb8, # DIVISION SIGN
'≤': 0xb9, # LESS-THAN OR EQUAL TO
'≥': 0xba, # GREATER-THAN OR EQUAL TO
'«': 0xbb, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'»': 0xbc, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'≠': 0xbd, # NOT EQUAL TO
'√': 0xbe, # SQUARE ROOT
'⁻': 0xbf, # SUPERSCRIPT MINUS
'⌠': 0xc0, # TOP HALF INTEGRAL
'⌡': 0xc1, # BOTTOM HALF INTEGRAL
'∞': 0xc2, # INFINITY
'\u25F8': 0xc3, # UPPER LEFT TRIANGLE
'↲': 0xc4, # DOWNWARDS ARROW WITH TIP LEFTWARDS
'↑': 0xc5, # UPWARDS ARROW
'↓': 0xc6, # DOWNWARDS ARROW
'→': 0xc7, # RIGHTWARDS ARROW
'←': 0xc8, # LEFTWARDS ARROW
'┌': 0xc9, # BOX DRAWINGS LIGHT DOWN AND RIGHT
'┐': 0xca, # BOX DRAWINGS LIGHT DOWN AND LEFT
'└': 0xcb, # BOX DRAWINGS LIGHT UP AND RIGHT
'┘': 0xcc, # BOX DRAWINGS LIGHT UP AND LEFT
'●': 0xcd, # BLACK CIRCLE
'®': 0xce, # REGISTERED SIGN
'©': 0xcf, # COPYRIGHT SIGN
'™': 0xd0, # TRADE MARK SIGN
'†': 0xd1, # DAGGER
'§': 0xd2, # SECTION SIGN
'¶': 0xd3, # PILCROW SIGN
'Γ': 0xd4, # GREEK CAPITAL LETTER GAMMA
'◿': 0xd5, # LOWER RIGHT TRIANGLE
'Δ': 0xd5, # GREEK CAPITAL LETTER DELTA
'Θ': 0xd6, # GREEK CAPITAL LETTER THETA
'Λ': 0xd7, # GREEK CAPITAL LETTER LAMBDA
'Ξ': 0xd8, # GREEK CAPITAL LETTER XI
'Π': 0xd9, # GREEK CAPITAL LETTER PI
'Σ': 0xda, # GREEK CAPITAL LETTER SIGMA
'Υ': 0xdb, # GREEK CAPITAL LETTER UPSILON
'Φ': 0xdc, # GREEK CAPITAL LETTER PHI
'Ψ': 0xdd, # GREEK CAPITAL LETTER PSI
'Ω': 0xde, # GREEK CAPITAL LETTER OMEGA
'α': 0xdf, # GREEK SMALL LETTER ALPHA
'ß': 0xe0, # LATIN SMALL LETTER SHARP S (FAKE)
'β': 0xe0, # GREEK SMALL LETTER BETA
'γ': 0xe1, # GREEK SMALL LETTER GAMMA
'δ': 0xe2, # GREEK SMALL LETTER DELTA
'ε': 0xe3, # GREEK SMALL LETTER EPSILON
'ξ': 0xe4, # GREEK SMALL LETTER XI
'η': 0xe5, # GREEK SMALL LETTER ETA
'θ': 0xe6, # GREEK SMALL LETTER THETA
'ι': 0xe7, # GREEK SMALL LETTER IOTA
'κ': 0xe8, # GREEK SMALL LETTER KAPPA
'λ': 0xe9, # GREEK SMALL LETTER LAMBDA
'μ': 0xea, # GREEK SMALL LETTER MU
'ν': 0xeb, # GREEK SMALL LETTER NU
'ξ': 0xec, # GREEK SMALL LETTER XI
'π': 0xed, # GREEK SMALL LETTER PI
'ρ': 0xee, # GREEK SMALL LETTER RHO
'σ': 0xef, # GREEK SMALL LETTER SIGMA
'τ': 0xf0, # GREEK SMALL LETTER TAU
'υ': 0xf1, # GREEK SMALL LETTER UPSILON
'χ': 0xf2, # GREEK SMALL LETTER CHI
'ψ': 0xf3, # GREEK SMALL LETTER PSI
'ω': 0xf4, # GREEK SMALL LETTER OMEGA
'▼': 0xf5, # BLACK DOWN-POINTING TRIANGLE
'▶': 0xf6, # BLACK RIGHT-POINTING TRIANGLE
'◀': 0xf7, # BLACK LEFT-POINTING TRIANGLE
'\U0001D411': 0xf8, # MATHEMATICAL BOLD CAPITAL R
'↤': 0xf9, # LEFTWARDS ARROW FROM BAR
'\U0001D405': 0xfa, # MATHEMATICAL BOLD CAPITAL F
'⇥': 0xfb, # RIGHTWARDS ARROW FROM BAR
'☐': 0xfc, # BALLOT BOX
'━': 0xfd, # BOX DRAWINGS HEAVY HORIZONTAL
'\U0001F142': 0xfe, # SQUARED LATIN CAPITAL LETTER S
'\U0001F13F': 0xff # SQUARED LATIN CAPITAL LETTER P
}
# Table with combined mappings
combined_chars_lookahead = 0
combined_chars = {}
|
40c9d69a501150430de3bc52d23efee5570a7c0e
|
982a904a83e2caa7acd8b2ac19cfc5a4fb75bde1
|
/examples/ch13_TwitterV2/snippets_ipynb/locationlistener.py
|
f154d61ae633577677253b7fa2516d9d5d88edf9
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
pdeitel/IntroToPython
|
73bc349fe40701b51f49d17d7fbc5b9985885e48
|
978093febf2ed849a2049e0b0860d2c4998306f7
|
refs/heads/master
| 2023-02-09T08:04:15.313698
| 2023-02-03T23:23:42
| 2023-02-03T23:23:42
| 173,331,130
| 249
| 371
| null | 2022-12-04T06:52:26
| 2019-03-01T16:08:37
| null |
UTF-8
|
Python
| false
| false
| 2,725
|
py
|
locationlistener.py
|
# locationlistener.py
"""Receives tweets matching a search string and stores a list of
dictionaries containing each tweet's username/text/location."""
import tweepy
from tweetutilities import get_tweet_content
class LocationListener(tweepy.StreamingClient):
"""Handles incoming Tweet stream to get location data."""
def __init__(self, bearer_token, counts_dict,
tweets_list, topic, limit=10):
"""Configure the LocationListener."""
self.tweets_list = tweets_list
self.counts_dict = counts_dict
self.topic = topic
self.TWEET_LIMIT = limit
super().__init__(bearer_token, wait_on_rate_limit=True)
def on_response(self, response):
"""Called when Twitter pushes a new tweet to you."""
# get tweet's username, text and location
tweet_data = get_tweet_content(response)
# ignore retweets and tweets that do not contain the topic
if (tweet_data['text'].startswith('RT') or
self.topic.lower() not in tweet_data['text'].lower()):
return
self.counts_dict['total_tweets'] += 1 # it's an original tweet
# ignore tweets with no location
if not tweet_data.get('location'):
return
self.counts_dict['locations'] += 1 # user account has location
self.tweets_list.append(tweet_data) # store the tweet
print(f"{tweet_data['username']}: {tweet_data['text']}\n")
# if TWEET_LIMIT is reached, terminate streaming
if self.counts_dict['locations'] == self.TWEET_LIMIT:
self.disconnect()
##########################################################################
# (C) Copyright 2022 by Deitel & Associates, Inc. and #
# Pearson Education, Inc. All Rights Reserved. #
# #
# DISCLAIMER: The authors and publisher of this book have used their #
# best efforts in preparing the book. These efforts include the #
# development, research, and testing of the theories and programs #
# to determine their effectiveness. The authors and publisher make #
# no warranty of any kind, expressed or implied, with regard to these #
# programs or to the documentation contained in these books. The authors #
# and publisher shall not be liable in any event for incidental or #
# consequential damages in connection with, or arising out of, the #
# furnishing, performance, or use of these programs. #
##########################################################################
|
9effd9b649efe48e282135e06eaf2d7d57aa85e2
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/RecoParticleFlow/PFClusterProducer/python/particleFlowClusterHO_cfi.py
|
b04e9ea0360578ede70d9e9d25b9985ea6d29603
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 3,717
|
py
|
particleFlowClusterHO_cfi.py
|
import FWCore.ParameterSet.Config as cms
# Use this object to modify parameters specifically for Run 2
#### PF CLUSTER HO ####
#cleaning
#seeding
_localMaxSeeds_HO = cms.PSet(
algoName = cms.string("LocalMaximumSeedFinder"),
thresholdsByDetector = cms.VPSet(
cms.PSet( detector = cms.string("HCAL_BARREL2_RING0"),
seedingThreshold = cms.double(1.0),
seedingThresholdPt = cms.double(0.0)
),
cms.PSet( detector = cms.string("HCAL_BARREL2_RING1"),
seedingThreshold = cms.double(3.1),
seedingThresholdPt = cms.double(0.0)
)
),
nNeighbours = cms.int32(4)
)
#topo clusters
_topoClusterizer_HO = cms.PSet(
algoName = cms.string("Basic2DGenericTopoClusterizer"),
thresholdsByDetector = cms.VPSet(
cms.PSet( detector = cms.string("HCAL_BARREL2_RING0"),
gatheringThreshold = cms.double(0.5),
gatheringThresholdPt = cms.double(0.0)
),
cms.PSet( detector = cms.string("HCAL_BARREL2_RING1"),
gatheringThreshold = cms.double(1.0),
gatheringThresholdPt = cms.double(0.0)
)
),
useCornerCells = cms.bool(True)
)
#position calc
_positionCalcHO_cross_nodepth = cms.PSet(
algoName = cms.string("Basic2DGenericPFlowPositionCalc"),
##
minFractionInCalc = cms.double(1e-9),
posCalcNCrystals = cms.int32(5),
logWeightDenominator = cms.double(0.5), # same as gathering threshold
minAllowedNormalization = cms.double(1e-9)
)
_positionCalcHO_all_nodepth = _positionCalcHO_cross_nodepth.clone(
posCalcNCrystals = -1
)
#pf clusters
_pfClusterizer_HO = cms.PSet(
algoName = cms.string("Basic2DGenericPFlowClusterizer"),
#pf clustering parameters
minFractionToKeep = cms.double(1e-7),
positionCalc = _positionCalcHO_cross_nodepth,
allCellsPositionCalc = _positionCalcHO_all_nodepth,
showerSigma = cms.double(10.0),
stoppingTolerance = cms.double(1e-8),
maxIterations = cms.uint32(50),
excludeOtherSeeds = cms.bool(True),
minFracTot = cms.double(1e-20), ## numerical stabilization
recHitEnergyNorms = cms.VPSet(
cms.PSet( detector = cms.string("HCAL_BARREL2_RING0"),
recHitEnergyNorm = cms.double(0.5)
),
cms.PSet( detector = cms.string("HCAL_BARREL2_RING1"),
recHitEnergyNorm = cms.double(1.0)
)
)
)
particleFlowClusterHO = cms.EDProducer(
"PFClusterProducer",
recHitsSource = cms.InputTag("particleFlowRecHitHO"),
recHitCleaners = cms.VPSet(),
seedCleaners = cms.VPSet(),
seedFinder = _localMaxSeeds_HO,
initialClusteringStep = _topoClusterizer_HO,
pfClusterBuilder = _pfClusterizer_HO,
positionReCalc = cms.PSet(),
energyCorrector = cms.PSet()
)
#
# Need to change the quality tests for Run 2
#
def _modifyParticleFlowClusterHOForRun2( object ) :
"""
Customises PFClusterProducer for Run 2.
"""
for p in object.seedFinder.thresholdsByDetector:
p.seedingThreshold = cms.double(0.08)
for p in object.initialClusteringStep.thresholdsByDetector:
p.gatheringThreshold = cms.double(0.05)
for p in object.pfClusterBuilder.recHitEnergyNorms:
p.recHitEnergyNorm = cms.double(0.05)
object.pfClusterBuilder.positionCalc.logWeightDenominator = cms.double(0.05)
object.pfClusterBuilder.allCellsPositionCalc.logWeightDenominator = cms.double(0.05)
# Call the function above to modify particleFlowClusterHO only if the run2 era is active
from Configuration.Eras.Modifier_run2_common_cff import run2_common
run2_common.toModify( particleFlowClusterHO, func=_modifyParticleFlowClusterHOForRun2 )
|
fe7f29f6c113e2bddbc38ef8fce1bdc2edd0b032
|
362196f32e8248e025cb2f6cf0b88f812c9a059c
|
/juriscraper/opinions/united_states/state/oklaag.py
|
3f4a38ae1be7e8e2c71b0240572a799c40eaade9
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
freelawproject/juriscraper
|
0fea8d4bb512808cb1e036aaaf819e9cc0847a6b
|
d2c6672696e13e33ec9981a1901b87047d8108c5
|
refs/heads/main
| 2023-08-09T13:27:21.357915
| 2023-07-06T22:33:01
| 2023-07-06T22:33:01
| 22,757,589
| 283
| 97
|
BSD-2-Clause
| 2023-09-08T22:59:36
| 2014-08-08T12:50:35
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,287
|
py
|
oklaag.py
|
# Scraper for Oklahoma Attorney General Opinions
# CourtID: oklaag
# Court Short Name: OK
# Author: Andrei Chelaru
# Reviewer: mlr
# Date: 2014-07-05
from datetime import date
from lxml import html
from juriscraper.opinions.united_states.state import okla
## WARNING: THIS SCRAPER IS FAILING:
## This scraper is succeeding in development, but
## is failing in production. We are not exactly
## sure why, and suspect that the hosting court
## site may be blocking our production IP and/or
## throttling/manipulating requests from production.
class Site(okla.Site):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
d = date.today()
self.url = "http://www.oscn.net/applications/oscn/Index.asp?ftdb=STOKAG&year={year}&level=1".format(
year=d.year
)
self.court_id = self.__module__
def _get_precedential_statuses(self):
return ["Unpublished"] * len(self.case_names)
@staticmethod
def cleanup_content(content):
tree = html.fromstring(content)
core_element = tree.xpath(
'//div[contains(concat(" ", normalize-space(@class), " "), " main ")]'
)[0]
return html.tostring(
core_element, pretty_print=True, encoding="unicode"
)
|
6ed7cb0dc8895fd1baad820d6471018a058ba817
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/regionmove/azure-mgmt-regionmove/azure/mgmt/regionmove/aio/operations_async/__init__.py
|
2d5f1a4233916034b5023e6be377e947f05a0ef0
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 938
|
py
|
__init__.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._move_collections_operations_async import MoveCollectionsOperations
from ._move_resources_operations_async import MoveResourcesOperations
from ._unresolved_dependencies_operations_async import UnresolvedDependenciesOperations
from ._operations_discovery_operations_async import OperationsDiscoveryOperations
__all__ = [
'MoveCollectionsOperations',
'MoveResourcesOperations',
'UnresolvedDependenciesOperations',
'OperationsDiscoveryOperations',
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.